1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 23 #include <linux/device.h> 24 #include <linux/export.h> 25 #include <linux/err.h> 26 #include <linux/fs.h> 27 #include <linux/sched.h> 28 #include <linux/slab.h> 29 #include <linux/uaccess.h> 30 #include <linux/compat.h> 31 #include <uapi/linux/kfd_ioctl.h> 32 #include <linux/time.h> 33 #include <linux/mm.h> 34 #include <linux/mman.h> 35 #include <asm/processor.h> 36 #include "kfd_priv.h" 37 #include "kfd_device_queue_manager.h" 38 #include "kfd_dbgmgr.h" 39 40 static long kfd_ioctl(struct file *, unsigned int, unsigned long); 41 static int kfd_open(struct inode *, struct file *); 42 static int kfd_mmap(struct file *, struct vm_area_struct *); 43 44 static const char kfd_dev_name[] = "kfd"; 45 46 static const struct file_operations kfd_fops = { 47 .owner = THIS_MODULE, 48 .unlocked_ioctl = kfd_ioctl, 49 .compat_ioctl = kfd_ioctl, 50 .open = kfd_open, 51 .mmap = kfd_mmap, 52 }; 53 54 static int kfd_char_dev_major = -1; 55 static struct class *kfd_class; 56 struct device *kfd_device; 57 58 int kfd_chardev_init(void) 59 { 60 int err = 0; 61 62 kfd_char_dev_major = register_chrdev(0, kfd_dev_name, &kfd_fops); 63 err = kfd_char_dev_major; 64 if (err < 0) 65 goto err_register_chrdev; 66 67 kfd_class = class_create(THIS_MODULE, kfd_dev_name); 68 err = PTR_ERR(kfd_class); 69 if (IS_ERR(kfd_class)) 70 goto err_class_create; 71 72 kfd_device = device_create(kfd_class, NULL, 73 MKDEV(kfd_char_dev_major, 0), 74 NULL, kfd_dev_name); 75 err = PTR_ERR(kfd_device); 76 if (IS_ERR(kfd_device)) 77 goto err_device_create; 78 79 return 0; 80 81 err_device_create: 82 class_destroy(kfd_class); 83 err_class_create: 84 unregister_chrdev(kfd_char_dev_major, kfd_dev_name); 85 err_register_chrdev: 86 return err; 87 } 88 89 void kfd_chardev_exit(void) 90 { 91 device_destroy(kfd_class, MKDEV(kfd_char_dev_major, 0)); 92 class_destroy(kfd_class); 93 unregister_chrdev(kfd_char_dev_major, kfd_dev_name); 94 } 95 96 struct device *kfd_chardev(void) 97 { 98 return kfd_device; 99 } 100 101 102 static int kfd_open(struct inode *inode, struct file *filep) 103 { 104 struct kfd_process *process; 105 bool is_32bit_user_mode; 106 107 if (iminor(inode) != 0) 108 return -ENODEV; 109 110 is_32bit_user_mode = is_compat_task(); 111 112 if (is_32bit_user_mode == true) { 113 dev_warn(kfd_device, 114 "Process %d (32-bit) failed to open /dev/kfd\n" 115 "32-bit processes are not supported by amdkfd\n", 116 current->pid); 117 return -EPERM; 118 } 119 120 process = kfd_create_process(current); 121 if (IS_ERR(process)) 122 return PTR_ERR(process); 123 124 dev_dbg(kfd_device, "process %d opened, compat mode (32 bit) - %d\n", 125 process->pasid, process->is_32bit_user_mode); 126 127 return 0; 128 } 129 130 static int kfd_ioctl_get_version(struct file *filep, struct kfd_process *p, 131 void *data) 132 { 133 struct kfd_ioctl_get_version_args *args = data; 134 int err = 0; 135 136 args->major_version = KFD_IOCTL_MAJOR_VERSION; 137 args->minor_version = KFD_IOCTL_MINOR_VERSION; 138 139 return err; 140 } 141 142 static int set_queue_properties_from_user(struct queue_properties *q_properties, 143 struct kfd_ioctl_create_queue_args *args) 144 { 145 if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) { 146 pr_err("kfd: queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n"); 147 return -EINVAL; 148 } 149 150 if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) { 151 pr_err("kfd: queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n"); 152 return -EINVAL; 153 } 154 155 if ((args->ring_base_address) && 156 (!access_ok(VERIFY_WRITE, 157 (const void __user *) args->ring_base_address, 158 sizeof(uint64_t)))) { 159 pr_err("kfd: can't access ring base address\n"); 160 return -EFAULT; 161 } 162 163 if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) { 164 pr_err("kfd: ring size must be a power of 2 or 0\n"); 165 return -EINVAL; 166 } 167 168 if (!access_ok(VERIFY_WRITE, 169 (const void __user *) args->read_pointer_address, 170 sizeof(uint32_t))) { 171 pr_err("kfd: can't access read pointer\n"); 172 return -EFAULT; 173 } 174 175 if (!access_ok(VERIFY_WRITE, 176 (const void __user *) args->write_pointer_address, 177 sizeof(uint32_t))) { 178 pr_err("kfd: can't access write pointer\n"); 179 return -EFAULT; 180 } 181 182 if (args->eop_buffer_address && 183 !access_ok(VERIFY_WRITE, 184 (const void __user *) args->eop_buffer_address, 185 sizeof(uint32_t))) { 186 pr_debug("kfd: can't access eop buffer"); 187 return -EFAULT; 188 } 189 190 if (args->ctx_save_restore_address && 191 !access_ok(VERIFY_WRITE, 192 (const void __user *) args->ctx_save_restore_address, 193 sizeof(uint32_t))) { 194 pr_debug("kfd: can't access ctx save restore buffer"); 195 return -EFAULT; 196 } 197 198 q_properties->is_interop = false; 199 q_properties->queue_percent = args->queue_percentage; 200 q_properties->priority = args->queue_priority; 201 q_properties->queue_address = args->ring_base_address; 202 q_properties->queue_size = args->ring_size; 203 q_properties->read_ptr = (uint32_t *) args->read_pointer_address; 204 q_properties->write_ptr = (uint32_t *) args->write_pointer_address; 205 q_properties->eop_ring_buffer_address = args->eop_buffer_address; 206 q_properties->eop_ring_buffer_size = args->eop_buffer_size; 207 q_properties->ctx_save_restore_area_address = 208 args->ctx_save_restore_address; 209 q_properties->ctx_save_restore_area_size = args->ctx_save_restore_size; 210 if (args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE || 211 args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE_AQL) 212 q_properties->type = KFD_QUEUE_TYPE_COMPUTE; 213 else if (args->queue_type == KFD_IOC_QUEUE_TYPE_SDMA) 214 q_properties->type = KFD_QUEUE_TYPE_SDMA; 215 else 216 return -ENOTSUPP; 217 218 if (args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE_AQL) 219 q_properties->format = KFD_QUEUE_FORMAT_AQL; 220 else 221 q_properties->format = KFD_QUEUE_FORMAT_PM4; 222 223 pr_debug("Queue Percentage (%d, %d)\n", 224 q_properties->queue_percent, args->queue_percentage); 225 226 pr_debug("Queue Priority (%d, %d)\n", 227 q_properties->priority, args->queue_priority); 228 229 pr_debug("Queue Address (0x%llX, 0x%llX)\n", 230 q_properties->queue_address, args->ring_base_address); 231 232 pr_debug("Queue Size (0x%llX, %u)\n", 233 q_properties->queue_size, args->ring_size); 234 235 pr_debug("Queue r/w Pointers (0x%llX, 0x%llX)\n", 236 (uint64_t) q_properties->read_ptr, 237 (uint64_t) q_properties->write_ptr); 238 239 pr_debug("Queue Format (%d)\n", q_properties->format); 240 241 pr_debug("Queue EOP (0x%llX)\n", q_properties->eop_ring_buffer_address); 242 243 pr_debug("Queue CTX save arex (0x%llX)\n", 244 q_properties->ctx_save_restore_area_address); 245 246 return 0; 247 } 248 249 static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p, 250 void *data) 251 { 252 struct kfd_ioctl_create_queue_args *args = data; 253 struct kfd_dev *dev; 254 int err = 0; 255 unsigned int queue_id; 256 struct kfd_process_device *pdd; 257 struct queue_properties q_properties; 258 259 memset(&q_properties, 0, sizeof(struct queue_properties)); 260 261 pr_debug("kfd: creating queue ioctl\n"); 262 263 err = set_queue_properties_from_user(&q_properties, args); 264 if (err) 265 return err; 266 267 pr_debug("kfd: looking for gpu id 0x%x\n", args->gpu_id); 268 dev = kfd_device_by_id(args->gpu_id); 269 if (dev == NULL) { 270 pr_debug("kfd: gpu id 0x%x was not found\n", args->gpu_id); 271 return -EINVAL; 272 } 273 274 mutex_lock(&p->mutex); 275 276 pdd = kfd_bind_process_to_device(dev, p); 277 if (IS_ERR(pdd)) { 278 err = -ESRCH; 279 goto err_bind_process; 280 } 281 282 pr_debug("kfd: creating queue for PASID %d on GPU 0x%x\n", 283 p->pasid, 284 dev->id); 285 286 err = pqm_create_queue(&p->pqm, dev, filep, &q_properties, 287 0, q_properties.type, &queue_id); 288 if (err != 0) 289 goto err_create_queue; 290 291 args->queue_id = queue_id; 292 293 294 /* Return gpu_id as doorbell offset for mmap usage */ 295 args->doorbell_offset = (KFD_MMAP_DOORBELL_MASK | args->gpu_id); 296 args->doorbell_offset <<= PAGE_SHIFT; 297 298 mutex_unlock(&p->mutex); 299 300 pr_debug("kfd: queue id %d was created successfully\n", args->queue_id); 301 302 pr_debug("ring buffer address == 0x%016llX\n", 303 args->ring_base_address); 304 305 pr_debug("read ptr address == 0x%016llX\n", 306 args->read_pointer_address); 307 308 pr_debug("write ptr address == 0x%016llX\n", 309 args->write_pointer_address); 310 311 return 0; 312 313 err_create_queue: 314 err_bind_process: 315 mutex_unlock(&p->mutex); 316 return err; 317 } 318 319 static int kfd_ioctl_destroy_queue(struct file *filp, struct kfd_process *p, 320 void *data) 321 { 322 int retval; 323 struct kfd_ioctl_destroy_queue_args *args = data; 324 325 pr_debug("kfd: destroying queue id %d for PASID %d\n", 326 args->queue_id, 327 p->pasid); 328 329 mutex_lock(&p->mutex); 330 331 retval = pqm_destroy_queue(&p->pqm, args->queue_id); 332 333 mutex_unlock(&p->mutex); 334 return retval; 335 } 336 337 static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p, 338 void *data) 339 { 340 int retval; 341 struct kfd_ioctl_update_queue_args *args = data; 342 struct queue_properties properties; 343 344 if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) { 345 pr_err("kfd: queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n"); 346 return -EINVAL; 347 } 348 349 if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) { 350 pr_err("kfd: queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n"); 351 return -EINVAL; 352 } 353 354 if ((args->ring_base_address) && 355 (!access_ok(VERIFY_WRITE, 356 (const void __user *) args->ring_base_address, 357 sizeof(uint64_t)))) { 358 pr_err("kfd: can't access ring base address\n"); 359 return -EFAULT; 360 } 361 362 if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) { 363 pr_err("kfd: ring size must be a power of 2 or 0\n"); 364 return -EINVAL; 365 } 366 367 properties.queue_address = args->ring_base_address; 368 properties.queue_size = args->ring_size; 369 properties.queue_percent = args->queue_percentage; 370 properties.priority = args->queue_priority; 371 372 pr_debug("kfd: updating queue id %d for PASID %d\n", 373 args->queue_id, p->pasid); 374 375 mutex_lock(&p->mutex); 376 377 retval = pqm_update_queue(&p->pqm, args->queue_id, &properties); 378 379 mutex_unlock(&p->mutex); 380 381 return retval; 382 } 383 384 static int kfd_ioctl_set_memory_policy(struct file *filep, 385 struct kfd_process *p, void *data) 386 { 387 struct kfd_ioctl_set_memory_policy_args *args = data; 388 struct kfd_dev *dev; 389 int err = 0; 390 struct kfd_process_device *pdd; 391 enum cache_policy default_policy, alternate_policy; 392 393 if (args->default_policy != KFD_IOC_CACHE_POLICY_COHERENT 394 && args->default_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) { 395 return -EINVAL; 396 } 397 398 if (args->alternate_policy != KFD_IOC_CACHE_POLICY_COHERENT 399 && args->alternate_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) { 400 return -EINVAL; 401 } 402 403 dev = kfd_device_by_id(args->gpu_id); 404 if (dev == NULL) 405 return -EINVAL; 406 407 mutex_lock(&p->mutex); 408 409 pdd = kfd_bind_process_to_device(dev, p); 410 if (IS_ERR(pdd)) { 411 err = -ESRCH; 412 goto out; 413 } 414 415 default_policy = (args->default_policy == KFD_IOC_CACHE_POLICY_COHERENT) 416 ? cache_policy_coherent : cache_policy_noncoherent; 417 418 alternate_policy = 419 (args->alternate_policy == KFD_IOC_CACHE_POLICY_COHERENT) 420 ? cache_policy_coherent : cache_policy_noncoherent; 421 422 if (!dev->dqm->ops.set_cache_memory_policy(dev->dqm, 423 &pdd->qpd, 424 default_policy, 425 alternate_policy, 426 (void __user *)args->alternate_aperture_base, 427 args->alternate_aperture_size)) 428 err = -EINVAL; 429 430 out: 431 mutex_unlock(&p->mutex); 432 433 return err; 434 } 435 436 static int kfd_ioctl_dbg_register(struct file *filep, 437 struct kfd_process *p, void *data) 438 { 439 struct kfd_ioctl_dbg_register_args *args = data; 440 struct kfd_dev *dev; 441 struct kfd_dbgmgr *dbgmgr_ptr; 442 struct kfd_process_device *pdd; 443 bool create_ok; 444 long status = 0; 445 446 dev = kfd_device_by_id(args->gpu_id); 447 if (dev == NULL) 448 return -EINVAL; 449 450 if (dev->device_info->asic_family == CHIP_CARRIZO) { 451 pr_debug("kfd_ioctl_dbg_register not supported on CZ\n"); 452 return -EINVAL; 453 } 454 455 mutex_lock(kfd_get_dbgmgr_mutex()); 456 mutex_lock(&p->mutex); 457 458 /* 459 * make sure that we have pdd, if this the first queue created for 460 * this process 461 */ 462 pdd = kfd_bind_process_to_device(dev, p); 463 if (IS_ERR(pdd)) { 464 mutex_unlock(&p->mutex); 465 mutex_unlock(kfd_get_dbgmgr_mutex()); 466 return PTR_ERR(pdd); 467 } 468 469 if (dev->dbgmgr == NULL) { 470 /* In case of a legal call, we have no dbgmgr yet */ 471 create_ok = kfd_dbgmgr_create(&dbgmgr_ptr, dev); 472 if (create_ok) { 473 status = kfd_dbgmgr_register(dbgmgr_ptr, p); 474 if (status != 0) 475 kfd_dbgmgr_destroy(dbgmgr_ptr); 476 else 477 dev->dbgmgr = dbgmgr_ptr; 478 } 479 } else { 480 pr_debug("debugger already registered\n"); 481 status = -EINVAL; 482 } 483 484 mutex_unlock(&p->mutex); 485 mutex_unlock(kfd_get_dbgmgr_mutex()); 486 487 return status; 488 } 489 490 static int kfd_ioctl_dbg_unrgesiter(struct file *filep, 491 struct kfd_process *p, void *data) 492 { 493 struct kfd_ioctl_dbg_unregister_args *args = data; 494 struct kfd_dev *dev; 495 long status; 496 497 dev = kfd_device_by_id(args->gpu_id); 498 if (dev == NULL) 499 return -EINVAL; 500 501 if (dev->device_info->asic_family == CHIP_CARRIZO) { 502 pr_debug("kfd_ioctl_dbg_unrgesiter not supported on CZ\n"); 503 return -EINVAL; 504 } 505 506 mutex_lock(kfd_get_dbgmgr_mutex()); 507 508 status = kfd_dbgmgr_unregister(dev->dbgmgr, p); 509 if (status == 0) { 510 kfd_dbgmgr_destroy(dev->dbgmgr); 511 dev->dbgmgr = NULL; 512 } 513 514 mutex_unlock(kfd_get_dbgmgr_mutex()); 515 516 return status; 517 } 518 519 /* 520 * Parse and generate variable size data structure for address watch. 521 * Total size of the buffer and # watch points is limited in order 522 * to prevent kernel abuse. (no bearing to the much smaller HW limitation 523 * which is enforced by dbgdev module) 524 * please also note that the watch address itself are not "copied from user", 525 * since it be set into the HW in user mode values. 526 * 527 */ 528 static int kfd_ioctl_dbg_address_watch(struct file *filep, 529 struct kfd_process *p, void *data) 530 { 531 struct kfd_ioctl_dbg_address_watch_args *args = data; 532 struct kfd_dev *dev; 533 struct dbg_address_watch_info aw_info; 534 unsigned char *args_buff; 535 long status; 536 void __user *cmd_from_user; 537 uint64_t watch_mask_value = 0; 538 unsigned int args_idx = 0; 539 540 memset((void *) &aw_info, 0, sizeof(struct dbg_address_watch_info)); 541 542 dev = kfd_device_by_id(args->gpu_id); 543 if (dev == NULL) 544 return -EINVAL; 545 546 if (dev->device_info->asic_family == CHIP_CARRIZO) { 547 pr_debug("kfd_ioctl_dbg_wave_control not supported on CZ\n"); 548 return -EINVAL; 549 } 550 551 cmd_from_user = (void __user *) args->content_ptr; 552 553 /* Validate arguments */ 554 555 if ((args->buf_size_in_bytes > MAX_ALLOWED_AW_BUFF_SIZE) || 556 (args->buf_size_in_bytes <= sizeof(*args) + sizeof(int) * 2) || 557 (cmd_from_user == NULL)) 558 return -EINVAL; 559 560 /* this is the actual buffer to work with */ 561 562 args_buff = kmalloc(args->buf_size_in_bytes - 563 sizeof(*args), GFP_KERNEL); 564 if (args_buff == NULL) 565 return -ENOMEM; 566 567 status = copy_from_user(args_buff, cmd_from_user, 568 args->buf_size_in_bytes - sizeof(*args)); 569 570 if (status != 0) { 571 pr_debug("Failed to copy address watch user data\n"); 572 kfree(args_buff); 573 return -EINVAL; 574 } 575 576 aw_info.process = p; 577 578 aw_info.num_watch_points = *((uint32_t *)(&args_buff[args_idx])); 579 args_idx += sizeof(aw_info.num_watch_points); 580 581 aw_info.watch_mode = (enum HSA_DBG_WATCH_MODE *) &args_buff[args_idx]; 582 args_idx += sizeof(enum HSA_DBG_WATCH_MODE) * aw_info.num_watch_points; 583 584 /* 585 * set watch address base pointer to point on the array base 586 * within args_buff 587 */ 588 aw_info.watch_address = (uint64_t *) &args_buff[args_idx]; 589 590 /* skip over the addresses buffer */ 591 args_idx += sizeof(aw_info.watch_address) * aw_info.num_watch_points; 592 593 if (args_idx >= args->buf_size_in_bytes - sizeof(*args)) { 594 kfree(args_buff); 595 return -EINVAL; 596 } 597 598 watch_mask_value = (uint64_t) args_buff[args_idx]; 599 600 if (watch_mask_value > 0) { 601 /* 602 * There is an array of masks. 603 * set watch mask base pointer to point on the array base 604 * within args_buff 605 */ 606 aw_info.watch_mask = (uint64_t *) &args_buff[args_idx]; 607 608 /* skip over the masks buffer */ 609 args_idx += sizeof(aw_info.watch_mask) * 610 aw_info.num_watch_points; 611 } else { 612 /* just the NULL mask, set to NULL and skip over it */ 613 aw_info.watch_mask = NULL; 614 args_idx += sizeof(aw_info.watch_mask); 615 } 616 617 if (args_idx >= args->buf_size_in_bytes - sizeof(args)) { 618 kfree(args_buff); 619 return -EINVAL; 620 } 621 622 /* Currently HSA Event is not supported for DBG */ 623 aw_info.watch_event = NULL; 624 625 mutex_lock(kfd_get_dbgmgr_mutex()); 626 627 status = kfd_dbgmgr_address_watch(dev->dbgmgr, &aw_info); 628 629 mutex_unlock(kfd_get_dbgmgr_mutex()); 630 631 kfree(args_buff); 632 633 return status; 634 } 635 636 /* Parse and generate fixed size data structure for wave control */ 637 static int kfd_ioctl_dbg_wave_control(struct file *filep, 638 struct kfd_process *p, void *data) 639 { 640 struct kfd_ioctl_dbg_wave_control_args *args = data; 641 struct kfd_dev *dev; 642 struct dbg_wave_control_info wac_info; 643 unsigned char *args_buff; 644 uint32_t computed_buff_size; 645 long status; 646 void __user *cmd_from_user; 647 unsigned int args_idx = 0; 648 649 memset((void *) &wac_info, 0, sizeof(struct dbg_wave_control_info)); 650 651 /* we use compact form, independent of the packing attribute value */ 652 computed_buff_size = sizeof(*args) + 653 sizeof(wac_info.mode) + 654 sizeof(wac_info.operand) + 655 sizeof(wac_info.dbgWave_msg.DbgWaveMsg) + 656 sizeof(wac_info.dbgWave_msg.MemoryVA) + 657 sizeof(wac_info.trapId); 658 659 dev = kfd_device_by_id(args->gpu_id); 660 if (dev == NULL) 661 return -EINVAL; 662 663 if (dev->device_info->asic_family == CHIP_CARRIZO) { 664 pr_debug("kfd_ioctl_dbg_wave_control not supported on CZ\n"); 665 return -EINVAL; 666 } 667 668 /* input size must match the computed "compact" size */ 669 if (args->buf_size_in_bytes != computed_buff_size) { 670 pr_debug("size mismatch, computed : actual %u : %u\n", 671 args->buf_size_in_bytes, computed_buff_size); 672 return -EINVAL; 673 } 674 675 cmd_from_user = (void __user *) args->content_ptr; 676 677 if (cmd_from_user == NULL) 678 return -EINVAL; 679 680 /* this is the actual buffer to work with */ 681 682 args_buff = kmalloc(args->buf_size_in_bytes - sizeof(*args), 683 GFP_KERNEL); 684 685 if (args_buff == NULL) 686 return -ENOMEM; 687 688 /* Now copy the entire buffer from user */ 689 status = copy_from_user(args_buff, cmd_from_user, 690 args->buf_size_in_bytes - sizeof(*args)); 691 if (status != 0) { 692 pr_debug("Failed to copy wave control user data\n"); 693 kfree(args_buff); 694 return -EINVAL; 695 } 696 697 /* move ptr to the start of the "pay-load" area */ 698 wac_info.process = p; 699 700 wac_info.operand = *((enum HSA_DBG_WAVEOP *)(&args_buff[args_idx])); 701 args_idx += sizeof(wac_info.operand); 702 703 wac_info.mode = *((enum HSA_DBG_WAVEMODE *)(&args_buff[args_idx])); 704 args_idx += sizeof(wac_info.mode); 705 706 wac_info.trapId = *((uint32_t *)(&args_buff[args_idx])); 707 args_idx += sizeof(wac_info.trapId); 708 709 wac_info.dbgWave_msg.DbgWaveMsg.WaveMsgInfoGen2.Value = 710 *((uint32_t *)(&args_buff[args_idx])); 711 wac_info.dbgWave_msg.MemoryVA = NULL; 712 713 mutex_lock(kfd_get_dbgmgr_mutex()); 714 715 pr_debug("Calling dbg manager process %p, operand %u, mode %u, trapId %u, message %u\n", 716 wac_info.process, wac_info.operand, 717 wac_info.mode, wac_info.trapId, 718 wac_info.dbgWave_msg.DbgWaveMsg.WaveMsgInfoGen2.Value); 719 720 status = kfd_dbgmgr_wave_control(dev->dbgmgr, &wac_info); 721 722 pr_debug("Returned status of dbg manager is %ld\n", status); 723 724 mutex_unlock(kfd_get_dbgmgr_mutex()); 725 726 kfree(args_buff); 727 728 return status; 729 } 730 731 static int kfd_ioctl_get_clock_counters(struct file *filep, 732 struct kfd_process *p, void *data) 733 { 734 struct kfd_ioctl_get_clock_counters_args *args = data; 735 struct kfd_dev *dev; 736 struct timespec64 time; 737 738 dev = kfd_device_by_id(args->gpu_id); 739 if (dev == NULL) 740 return -EINVAL; 741 742 /* Reading GPU clock counter from KGD */ 743 args->gpu_clock_counter = 744 dev->kfd2kgd->get_gpu_clock_counter(dev->kgd); 745 746 /* No access to rdtsc. Using raw monotonic time */ 747 getrawmonotonic64(&time); 748 args->cpu_clock_counter = (uint64_t)timespec64_to_ns(&time); 749 750 get_monotonic_boottime64(&time); 751 args->system_clock_counter = (uint64_t)timespec64_to_ns(&time); 752 753 /* Since the counter is in nano-seconds we use 1GHz frequency */ 754 args->system_clock_freq = 1000000000; 755 756 return 0; 757 } 758 759 760 static int kfd_ioctl_get_process_apertures(struct file *filp, 761 struct kfd_process *p, void *data) 762 { 763 struct kfd_ioctl_get_process_apertures_args *args = data; 764 struct kfd_process_device_apertures *pAperture; 765 struct kfd_process_device *pdd; 766 767 dev_dbg(kfd_device, "get apertures for PASID %d", p->pasid); 768 769 args->num_of_nodes = 0; 770 771 mutex_lock(&p->mutex); 772 773 /*if the process-device list isn't empty*/ 774 if (kfd_has_process_device_data(p)) { 775 /* Run over all pdd of the process */ 776 pdd = kfd_get_first_process_device_data(p); 777 do { 778 pAperture = 779 &args->process_apertures[args->num_of_nodes]; 780 pAperture->gpu_id = pdd->dev->id; 781 pAperture->lds_base = pdd->lds_base; 782 pAperture->lds_limit = pdd->lds_limit; 783 pAperture->gpuvm_base = pdd->gpuvm_base; 784 pAperture->gpuvm_limit = pdd->gpuvm_limit; 785 pAperture->scratch_base = pdd->scratch_base; 786 pAperture->scratch_limit = pdd->scratch_limit; 787 788 dev_dbg(kfd_device, 789 "node id %u\n", args->num_of_nodes); 790 dev_dbg(kfd_device, 791 "gpu id %u\n", pdd->dev->id); 792 dev_dbg(kfd_device, 793 "lds_base %llX\n", pdd->lds_base); 794 dev_dbg(kfd_device, 795 "lds_limit %llX\n", pdd->lds_limit); 796 dev_dbg(kfd_device, 797 "gpuvm_base %llX\n", pdd->gpuvm_base); 798 dev_dbg(kfd_device, 799 "gpuvm_limit %llX\n", pdd->gpuvm_limit); 800 dev_dbg(kfd_device, 801 "scratch_base %llX\n", pdd->scratch_base); 802 dev_dbg(kfd_device, 803 "scratch_limit %llX\n", pdd->scratch_limit); 804 805 args->num_of_nodes++; 806 } while ((pdd = kfd_get_next_process_device_data(p, pdd)) != NULL && 807 (args->num_of_nodes < NUM_OF_SUPPORTED_GPUS)); 808 } 809 810 mutex_unlock(&p->mutex); 811 812 return 0; 813 } 814 815 static int kfd_ioctl_create_event(struct file *filp, struct kfd_process *p, 816 void *data) 817 { 818 struct kfd_ioctl_create_event_args *args = data; 819 int err; 820 821 err = kfd_event_create(filp, p, args->event_type, 822 args->auto_reset != 0, args->node_id, 823 &args->event_id, &args->event_trigger_data, 824 &args->event_page_offset, 825 &args->event_slot_index); 826 827 return err; 828 } 829 830 static int kfd_ioctl_destroy_event(struct file *filp, struct kfd_process *p, 831 void *data) 832 { 833 struct kfd_ioctl_destroy_event_args *args = data; 834 835 return kfd_event_destroy(p, args->event_id); 836 } 837 838 static int kfd_ioctl_set_event(struct file *filp, struct kfd_process *p, 839 void *data) 840 { 841 struct kfd_ioctl_set_event_args *args = data; 842 843 return kfd_set_event(p, args->event_id); 844 } 845 846 static int kfd_ioctl_reset_event(struct file *filp, struct kfd_process *p, 847 void *data) 848 { 849 struct kfd_ioctl_reset_event_args *args = data; 850 851 return kfd_reset_event(p, args->event_id); 852 } 853 854 static int kfd_ioctl_wait_events(struct file *filp, struct kfd_process *p, 855 void *data) 856 { 857 struct kfd_ioctl_wait_events_args *args = data; 858 enum kfd_event_wait_result wait_result; 859 int err; 860 861 err = kfd_wait_on_events(p, args->num_events, 862 (void __user *)args->events_ptr, 863 (args->wait_for_all != 0), 864 args->timeout, &wait_result); 865 866 args->wait_result = wait_result; 867 868 return err; 869 } 870 871 #define AMDKFD_IOCTL_DEF(ioctl, _func, _flags) \ 872 [_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0, .name = #ioctl} 873 874 /** Ioctl table */ 875 static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = { 876 AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_VERSION, 877 kfd_ioctl_get_version, 0), 878 879 AMDKFD_IOCTL_DEF(AMDKFD_IOC_CREATE_QUEUE, 880 kfd_ioctl_create_queue, 0), 881 882 AMDKFD_IOCTL_DEF(AMDKFD_IOC_DESTROY_QUEUE, 883 kfd_ioctl_destroy_queue, 0), 884 885 AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_MEMORY_POLICY, 886 kfd_ioctl_set_memory_policy, 0), 887 888 AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_CLOCK_COUNTERS, 889 kfd_ioctl_get_clock_counters, 0), 890 891 AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_PROCESS_APERTURES, 892 kfd_ioctl_get_process_apertures, 0), 893 894 AMDKFD_IOCTL_DEF(AMDKFD_IOC_UPDATE_QUEUE, 895 kfd_ioctl_update_queue, 0), 896 897 AMDKFD_IOCTL_DEF(AMDKFD_IOC_CREATE_EVENT, 898 kfd_ioctl_create_event, 0), 899 900 AMDKFD_IOCTL_DEF(AMDKFD_IOC_DESTROY_EVENT, 901 kfd_ioctl_destroy_event, 0), 902 903 AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_EVENT, 904 kfd_ioctl_set_event, 0), 905 906 AMDKFD_IOCTL_DEF(AMDKFD_IOC_RESET_EVENT, 907 kfd_ioctl_reset_event, 0), 908 909 AMDKFD_IOCTL_DEF(AMDKFD_IOC_WAIT_EVENTS, 910 kfd_ioctl_wait_events, 0), 911 912 AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_REGISTER, 913 kfd_ioctl_dbg_register, 0), 914 915 AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_UNREGISTER, 916 kfd_ioctl_dbg_unrgesiter, 0), 917 918 AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_ADDRESS_WATCH, 919 kfd_ioctl_dbg_address_watch, 0), 920 921 AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_WAVE_CONTROL, 922 kfd_ioctl_dbg_wave_control, 0), 923 }; 924 925 #define AMDKFD_CORE_IOCTL_COUNT ARRAY_SIZE(amdkfd_ioctls) 926 927 static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) 928 { 929 struct kfd_process *process; 930 amdkfd_ioctl_t *func; 931 const struct amdkfd_ioctl_desc *ioctl = NULL; 932 unsigned int nr = _IOC_NR(cmd); 933 char stack_kdata[128]; 934 char *kdata = NULL; 935 unsigned int usize, asize; 936 int retcode = -EINVAL; 937 938 if (nr >= AMDKFD_CORE_IOCTL_COUNT) 939 goto err_i1; 940 941 if ((nr >= AMDKFD_COMMAND_START) && (nr < AMDKFD_COMMAND_END)) { 942 u32 amdkfd_size; 943 944 ioctl = &amdkfd_ioctls[nr]; 945 946 amdkfd_size = _IOC_SIZE(ioctl->cmd); 947 usize = asize = _IOC_SIZE(cmd); 948 if (amdkfd_size > asize) 949 asize = amdkfd_size; 950 951 cmd = ioctl->cmd; 952 } else 953 goto err_i1; 954 955 dev_dbg(kfd_device, "ioctl cmd 0x%x (#%d), arg 0x%lx\n", cmd, nr, arg); 956 957 process = kfd_get_process(current); 958 if (IS_ERR(process)) { 959 dev_dbg(kfd_device, "no process\n"); 960 goto err_i1; 961 } 962 963 /* Do not trust userspace, use our own definition */ 964 func = ioctl->func; 965 966 if (unlikely(!func)) { 967 dev_dbg(kfd_device, "no function\n"); 968 retcode = -EINVAL; 969 goto err_i1; 970 } 971 972 if (cmd & (IOC_IN | IOC_OUT)) { 973 if (asize <= sizeof(stack_kdata)) { 974 kdata = stack_kdata; 975 } else { 976 kdata = kmalloc(asize, GFP_KERNEL); 977 if (!kdata) { 978 retcode = -ENOMEM; 979 goto err_i1; 980 } 981 } 982 if (asize > usize) 983 memset(kdata + usize, 0, asize - usize); 984 } 985 986 if (cmd & IOC_IN) { 987 if (copy_from_user(kdata, (void __user *)arg, usize) != 0) { 988 retcode = -EFAULT; 989 goto err_i1; 990 } 991 } else if (cmd & IOC_OUT) { 992 memset(kdata, 0, usize); 993 } 994 995 retcode = func(filep, process, kdata); 996 997 if (cmd & IOC_OUT) 998 if (copy_to_user((void __user *)arg, kdata, usize) != 0) 999 retcode = -EFAULT; 1000 1001 err_i1: 1002 if (!ioctl) 1003 dev_dbg(kfd_device, "invalid ioctl: pid=%d, cmd=0x%02x, nr=0x%02x\n", 1004 task_pid_nr(current), cmd, nr); 1005 1006 if (kdata != stack_kdata) 1007 kfree(kdata); 1008 1009 if (retcode) 1010 dev_dbg(kfd_device, "ret = %d\n", retcode); 1011 1012 return retcode; 1013 } 1014 1015 static int kfd_mmap(struct file *filp, struct vm_area_struct *vma) 1016 { 1017 struct kfd_process *process; 1018 1019 process = kfd_get_process(current); 1020 if (IS_ERR(process)) 1021 return PTR_ERR(process); 1022 1023 if ((vma->vm_pgoff & KFD_MMAP_DOORBELL_MASK) == 1024 KFD_MMAP_DOORBELL_MASK) { 1025 vma->vm_pgoff = vma->vm_pgoff ^ KFD_MMAP_DOORBELL_MASK; 1026 return kfd_doorbell_mmap(process, vma); 1027 } else if ((vma->vm_pgoff & KFD_MMAP_EVENTS_MASK) == 1028 KFD_MMAP_EVENTS_MASK) { 1029 vma->vm_pgoff = vma->vm_pgoff ^ KFD_MMAP_EVENTS_MASK; 1030 return kfd_event_mmap(process, vma); 1031 } 1032 1033 return -EFAULT; 1034 } 1035