1 /* 2 * Copyright 2017 Red Hat 3 * Parts ported from amdgpu (fence wait code). 4 * Copyright 2016 Advanced Micro Devices, Inc. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the next 14 * paragraph) shall be included in all copies or substantial portions of the 15 * Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 23 * IN THE SOFTWARE. 24 * 25 * Authors: 26 * 27 */ 28 29 /** 30 * DOC: Overview 31 * 32 * DRM synchronisation objects (syncobj, see struct &drm_syncobj) are 33 * persistent objects that contain an optional fence. The fence can be updated 34 * with a new fence, or be NULL. 35 * 36 * syncobj's can be waited upon, where it will wait for the underlying 37 * fence. 38 * 39 * syncobj's can be export to fd's and back, these fd's are opaque and 40 * have no other use case, except passing the syncobj between processes. 41 * 42 * Their primary use-case is to implement Vulkan fences and semaphores. 43 * 44 * syncobj have a kref reference count, but also have an optional file. 45 * The file is only created once the syncobj is exported. 46 * The file takes a reference on the kref. 47 */ 48 49 #include <linux/anon_inodes.h> 50 #include <linux/file.h> 51 #include <linux/fs.h> 52 #include <linux/sched/signal.h> 53 #include <linux/sync_file.h> 54 #include <linux/uaccess.h> 55 56 #include <drm/drm_drv.h> 57 #include <drm/drm_file.h> 58 #include <drm/drm_gem.h> 59 #include <drm/drm_print.h> 60 #include <drm/drm_syncobj.h> 61 62 #include "drm_internal.h" 63 64 struct syncobj_wait_entry { 65 struct list_head node; 66 struct task_struct *task; 67 struct dma_fence *fence; 68 struct dma_fence_cb fence_cb; 69 u64 point; 70 }; 71 72 static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj, 73 struct syncobj_wait_entry *wait); 74 75 /** 76 * drm_syncobj_find - lookup and reference a sync object. 77 * @file_private: drm file private pointer 78 * @handle: sync object handle to lookup. 79 * 80 * Returns a reference to the syncobj pointed to by handle or NULL. The 81 * reference must be released by calling drm_syncobj_put(). 82 */ 83 struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private, 84 u32 handle) 85 { 86 struct drm_syncobj *syncobj; 87 88 spin_lock(&file_private->syncobj_table_lock); 89 90 /* Check if we currently have a reference on the object */ 91 syncobj = idr_find(&file_private->syncobj_idr, handle); 92 if (syncobj) 93 drm_syncobj_get(syncobj); 94 95 spin_unlock(&file_private->syncobj_table_lock); 96 97 return syncobj; 98 } 99 EXPORT_SYMBOL(drm_syncobj_find); 100 101 static void drm_syncobj_fence_add_wait(struct drm_syncobj *syncobj, 102 struct syncobj_wait_entry *wait) 103 { 104 struct dma_fence *fence; 105 106 if (wait->fence) 107 return; 108 109 spin_lock(&syncobj->lock); 110 /* We've already tried once to get a fence and failed. Now that we 111 * have the lock, try one more time just to be sure we don't add a 112 * callback when a fence has already been set. 113 */ 114 fence = dma_fence_get(rcu_dereference_protected(syncobj->fence, 1)); 115 if (!fence || dma_fence_chain_find_seqno(&fence, wait->point)) { 116 dma_fence_put(fence); 117 list_add_tail(&wait->node, &syncobj->cb_list); 118 } else if (!fence) { 119 wait->fence = dma_fence_get_stub(); 120 } else { 121 wait->fence = fence; 122 } 123 spin_unlock(&syncobj->lock); 124 } 125 126 static void drm_syncobj_remove_wait(struct drm_syncobj *syncobj, 127 struct syncobj_wait_entry *wait) 128 { 129 if (!wait->node.next) 130 return; 131 132 spin_lock(&syncobj->lock); 133 list_del_init(&wait->node); 134 spin_unlock(&syncobj->lock); 135 } 136 137 /** 138 * drm_syncobj_add_point - add new timeline point to the syncobj 139 * @syncobj: sync object to add timeline point do 140 * @chain: chain node to use to add the point 141 * @fence: fence to encapsulate in the chain node 142 * @point: sequence number to use for the point 143 * 144 * Add the chain node as new timeline point to the syncobj. 145 */ 146 void drm_syncobj_add_point(struct drm_syncobj *syncobj, 147 struct dma_fence_chain *chain, 148 struct dma_fence *fence, 149 uint64_t point) 150 { 151 struct syncobj_wait_entry *cur, *tmp; 152 struct dma_fence *prev; 153 154 dma_fence_get(fence); 155 156 spin_lock(&syncobj->lock); 157 158 prev = drm_syncobj_fence_get(syncobj); 159 /* You are adding an unorder point to timeline, which could cause payload returned from query_ioctl is 0! */ 160 if (prev && prev->seqno >= point) 161 DRM_ERROR("You are adding an unorder point to timeline!\n"); 162 dma_fence_chain_init(chain, prev, fence, point); 163 rcu_assign_pointer(syncobj->fence, &chain->base); 164 165 list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node) 166 syncobj_wait_syncobj_func(syncobj, cur); 167 spin_unlock(&syncobj->lock); 168 169 /* Walk the chain once to trigger garbage collection */ 170 dma_fence_chain_for_each(fence, prev); 171 dma_fence_put(prev); 172 } 173 EXPORT_SYMBOL(drm_syncobj_add_point); 174 175 /** 176 * drm_syncobj_replace_fence - replace fence in a sync object. 177 * @syncobj: Sync object to replace fence in 178 * @fence: fence to install in sync file. 179 * 180 * This replaces the fence on a sync object. 181 */ 182 void drm_syncobj_replace_fence(struct drm_syncobj *syncobj, 183 struct dma_fence *fence) 184 { 185 struct dma_fence *old_fence; 186 struct syncobj_wait_entry *cur, *tmp; 187 188 if (fence) 189 dma_fence_get(fence); 190 191 spin_lock(&syncobj->lock); 192 193 old_fence = rcu_dereference_protected(syncobj->fence, 194 lockdep_is_held(&syncobj->lock)); 195 rcu_assign_pointer(syncobj->fence, fence); 196 197 if (fence != old_fence) { 198 list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node) 199 syncobj_wait_syncobj_func(syncobj, cur); 200 } 201 202 spin_unlock(&syncobj->lock); 203 204 dma_fence_put(old_fence); 205 } 206 EXPORT_SYMBOL(drm_syncobj_replace_fence); 207 208 /** 209 * drm_syncobj_assign_null_handle - assign a stub fence to the sync object 210 * @syncobj: sync object to assign the fence on 211 * 212 * Assign a already signaled stub fence to the sync object. 213 */ 214 static void drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj) 215 { 216 struct dma_fence *fence = dma_fence_get_stub(); 217 218 drm_syncobj_replace_fence(syncobj, fence); 219 dma_fence_put(fence); 220 } 221 222 /* 5s default for wait submission */ 223 #define DRM_SYNCOBJ_WAIT_FOR_SUBMIT_TIMEOUT 5000000000ULL 224 /** 225 * drm_syncobj_find_fence - lookup and reference the fence in a sync object 226 * @file_private: drm file private pointer 227 * @handle: sync object handle to lookup. 228 * @point: timeline point 229 * @flags: DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT or not 230 * @fence: out parameter for the fence 231 * 232 * This is just a convenience function that combines drm_syncobj_find() and 233 * drm_syncobj_fence_get(). 234 * 235 * Returns 0 on success or a negative error value on failure. On success @fence 236 * contains a reference to the fence, which must be released by calling 237 * dma_fence_put(). 238 */ 239 int drm_syncobj_find_fence(struct drm_file *file_private, 240 u32 handle, u64 point, u64 flags, 241 struct dma_fence **fence) 242 { 243 struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle); 244 struct syncobj_wait_entry wait; 245 u64 timeout = nsecs_to_jiffies64(DRM_SYNCOBJ_WAIT_FOR_SUBMIT_TIMEOUT); 246 int ret; 247 248 if (!syncobj) 249 return -ENOENT; 250 251 *fence = drm_syncobj_fence_get(syncobj); 252 drm_syncobj_put(syncobj); 253 254 if (*fence) { 255 ret = dma_fence_chain_find_seqno(fence, point); 256 if (!ret) 257 return 0; 258 dma_fence_put(*fence); 259 } else { 260 ret = -EINVAL; 261 } 262 263 if (!(flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT)) 264 return ret; 265 266 memset(&wait, 0, sizeof(wait)); 267 wait.task = current; 268 wait.point = point; 269 drm_syncobj_fence_add_wait(syncobj, &wait); 270 271 do { 272 set_current_state(TASK_INTERRUPTIBLE); 273 if (wait.fence) { 274 ret = 0; 275 break; 276 } 277 if (timeout == 0) { 278 ret = -ETIME; 279 break; 280 } 281 282 if (signal_pending(current)) { 283 ret = -ERESTARTSYS; 284 break; 285 } 286 287 timeout = schedule_timeout(timeout); 288 } while (1); 289 290 __set_current_state(TASK_RUNNING); 291 *fence = wait.fence; 292 293 if (wait.node.next) 294 drm_syncobj_remove_wait(syncobj, &wait); 295 296 return ret; 297 } 298 EXPORT_SYMBOL(drm_syncobj_find_fence); 299 300 /** 301 * drm_syncobj_free - free a sync object. 302 * @kref: kref to free. 303 * 304 * Only to be called from kref_put in drm_syncobj_put. 305 */ 306 void drm_syncobj_free(struct kref *kref) 307 { 308 struct drm_syncobj *syncobj = container_of(kref, 309 struct drm_syncobj, 310 refcount); 311 drm_syncobj_replace_fence(syncobj, NULL); 312 kfree(syncobj); 313 } 314 EXPORT_SYMBOL(drm_syncobj_free); 315 316 /** 317 * drm_syncobj_create - create a new syncobj 318 * @out_syncobj: returned syncobj 319 * @flags: DRM_SYNCOBJ_* flags 320 * @fence: if non-NULL, the syncobj will represent this fence 321 * 322 * This is the first function to create a sync object. After creating, drivers 323 * probably want to make it available to userspace, either through 324 * drm_syncobj_get_handle() or drm_syncobj_get_fd(). 325 * 326 * Returns 0 on success or a negative error value on failure. 327 */ 328 int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags, 329 struct dma_fence *fence) 330 { 331 struct drm_syncobj *syncobj; 332 333 syncobj = kzalloc(sizeof(struct drm_syncobj), GFP_KERNEL); 334 if (!syncobj) 335 return -ENOMEM; 336 337 kref_init(&syncobj->refcount); 338 INIT_LIST_HEAD(&syncobj->cb_list); 339 spin_lock_init(&syncobj->lock); 340 341 if (flags & DRM_SYNCOBJ_CREATE_SIGNALED) 342 drm_syncobj_assign_null_handle(syncobj); 343 344 if (fence) 345 drm_syncobj_replace_fence(syncobj, fence); 346 347 *out_syncobj = syncobj; 348 return 0; 349 } 350 EXPORT_SYMBOL(drm_syncobj_create); 351 352 /** 353 * drm_syncobj_get_handle - get a handle from a syncobj 354 * @file_private: drm file private pointer 355 * @syncobj: Sync object to export 356 * @handle: out parameter with the new handle 357 * 358 * Exports a sync object created with drm_syncobj_create() as a handle on 359 * @file_private to userspace. 360 * 361 * Returns 0 on success or a negative error value on failure. 362 */ 363 int drm_syncobj_get_handle(struct drm_file *file_private, 364 struct drm_syncobj *syncobj, u32 *handle) 365 { 366 int ret; 367 368 /* take a reference to put in the idr */ 369 drm_syncobj_get(syncobj); 370 371 idr_preload(GFP_KERNEL); 372 spin_lock(&file_private->syncobj_table_lock); 373 ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT); 374 spin_unlock(&file_private->syncobj_table_lock); 375 376 idr_preload_end(); 377 378 if (ret < 0) { 379 drm_syncobj_put(syncobj); 380 return ret; 381 } 382 383 *handle = ret; 384 return 0; 385 } 386 EXPORT_SYMBOL(drm_syncobj_get_handle); 387 388 static int drm_syncobj_create_as_handle(struct drm_file *file_private, 389 u32 *handle, uint32_t flags) 390 { 391 int ret; 392 struct drm_syncobj *syncobj; 393 394 ret = drm_syncobj_create(&syncobj, flags, NULL); 395 if (ret) 396 return ret; 397 398 ret = drm_syncobj_get_handle(file_private, syncobj, handle); 399 drm_syncobj_put(syncobj); 400 return ret; 401 } 402 403 static int drm_syncobj_destroy(struct drm_file *file_private, 404 u32 handle) 405 { 406 struct drm_syncobj *syncobj; 407 408 spin_lock(&file_private->syncobj_table_lock); 409 syncobj = idr_remove(&file_private->syncobj_idr, handle); 410 spin_unlock(&file_private->syncobj_table_lock); 411 412 if (!syncobj) 413 return -EINVAL; 414 415 drm_syncobj_put(syncobj); 416 return 0; 417 } 418 419 static int drm_syncobj_file_release(struct inode *inode, struct file *file) 420 { 421 struct drm_syncobj *syncobj = file->private_data; 422 423 drm_syncobj_put(syncobj); 424 return 0; 425 } 426 427 static const struct file_operations drm_syncobj_file_fops = { 428 .release = drm_syncobj_file_release, 429 }; 430 431 /** 432 * drm_syncobj_get_fd - get a file descriptor from a syncobj 433 * @syncobj: Sync object to export 434 * @p_fd: out parameter with the new file descriptor 435 * 436 * Exports a sync object created with drm_syncobj_create() as a file descriptor. 437 * 438 * Returns 0 on success or a negative error value on failure. 439 */ 440 int drm_syncobj_get_fd(struct drm_syncobj *syncobj, int *p_fd) 441 { 442 struct file *file; 443 int fd; 444 445 fd = get_unused_fd_flags(O_CLOEXEC); 446 if (fd < 0) 447 return fd; 448 449 file = anon_inode_getfile("syncobj_file", 450 &drm_syncobj_file_fops, 451 syncobj, 0); 452 if (IS_ERR(file)) { 453 put_unused_fd(fd); 454 return PTR_ERR(file); 455 } 456 457 drm_syncobj_get(syncobj); 458 fd_install(fd, file); 459 460 *p_fd = fd; 461 return 0; 462 } 463 EXPORT_SYMBOL(drm_syncobj_get_fd); 464 465 static int drm_syncobj_handle_to_fd(struct drm_file *file_private, 466 u32 handle, int *p_fd) 467 { 468 struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle); 469 int ret; 470 471 if (!syncobj) 472 return -EINVAL; 473 474 ret = drm_syncobj_get_fd(syncobj, p_fd); 475 drm_syncobj_put(syncobj); 476 return ret; 477 } 478 479 static int drm_syncobj_fd_to_handle(struct drm_file *file_private, 480 int fd, u32 *handle) 481 { 482 struct drm_syncobj *syncobj; 483 struct fd f = fdget(fd); 484 int ret; 485 486 if (!f.file) 487 return -EINVAL; 488 489 if (f.file->f_op != &drm_syncobj_file_fops) { 490 fdput(f); 491 return -EINVAL; 492 } 493 494 /* take a reference to put in the idr */ 495 syncobj = f.file->private_data; 496 drm_syncobj_get(syncobj); 497 498 idr_preload(GFP_KERNEL); 499 spin_lock(&file_private->syncobj_table_lock); 500 ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT); 501 spin_unlock(&file_private->syncobj_table_lock); 502 idr_preload_end(); 503 504 if (ret > 0) { 505 *handle = ret; 506 ret = 0; 507 } else 508 drm_syncobj_put(syncobj); 509 510 fdput(f); 511 return ret; 512 } 513 514 static int drm_syncobj_import_sync_file_fence(struct drm_file *file_private, 515 int fd, int handle) 516 { 517 struct dma_fence *fence = sync_file_get_fence(fd); 518 struct drm_syncobj *syncobj; 519 520 if (!fence) 521 return -EINVAL; 522 523 syncobj = drm_syncobj_find(file_private, handle); 524 if (!syncobj) { 525 dma_fence_put(fence); 526 return -ENOENT; 527 } 528 529 drm_syncobj_replace_fence(syncobj, fence); 530 dma_fence_put(fence); 531 drm_syncobj_put(syncobj); 532 return 0; 533 } 534 535 static int drm_syncobj_export_sync_file(struct drm_file *file_private, 536 int handle, int *p_fd) 537 { 538 int ret; 539 struct dma_fence *fence; 540 struct sync_file *sync_file; 541 int fd = get_unused_fd_flags(O_CLOEXEC); 542 543 if (fd < 0) 544 return fd; 545 546 ret = drm_syncobj_find_fence(file_private, handle, 0, 0, &fence); 547 if (ret) 548 goto err_put_fd; 549 550 sync_file = sync_file_create(fence); 551 552 dma_fence_put(fence); 553 554 if (!sync_file) { 555 ret = -EINVAL; 556 goto err_put_fd; 557 } 558 559 fd_install(fd, sync_file->file); 560 561 *p_fd = fd; 562 return 0; 563 err_put_fd: 564 put_unused_fd(fd); 565 return ret; 566 } 567 /** 568 * drm_syncobj_open - initalizes syncobj file-private structures at devnode open time 569 * @file_private: drm file-private structure to set up 570 * 571 * Called at device open time, sets up the structure for handling refcounting 572 * of sync objects. 573 */ 574 void 575 drm_syncobj_open(struct drm_file *file_private) 576 { 577 idr_init_base(&file_private->syncobj_idr, 1); 578 spin_lock_init(&file_private->syncobj_table_lock); 579 } 580 581 static int 582 drm_syncobj_release_handle(int id, void *ptr, void *data) 583 { 584 struct drm_syncobj *syncobj = ptr; 585 586 drm_syncobj_put(syncobj); 587 return 0; 588 } 589 590 /** 591 * drm_syncobj_release - release file-private sync object resources 592 * @file_private: drm file-private structure to clean up 593 * 594 * Called at close time when the filp is going away. 595 * 596 * Releases any remaining references on objects by this filp. 597 */ 598 void 599 drm_syncobj_release(struct drm_file *file_private) 600 { 601 idr_for_each(&file_private->syncobj_idr, 602 &drm_syncobj_release_handle, file_private); 603 idr_destroy(&file_private->syncobj_idr); 604 } 605 606 int 607 drm_syncobj_create_ioctl(struct drm_device *dev, void *data, 608 struct drm_file *file_private) 609 { 610 struct drm_syncobj_create *args = data; 611 612 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) 613 return -EOPNOTSUPP; 614 615 /* no valid flags yet */ 616 if (args->flags & ~DRM_SYNCOBJ_CREATE_SIGNALED) 617 return -EINVAL; 618 619 return drm_syncobj_create_as_handle(file_private, 620 &args->handle, args->flags); 621 } 622 623 int 624 drm_syncobj_destroy_ioctl(struct drm_device *dev, void *data, 625 struct drm_file *file_private) 626 { 627 struct drm_syncobj_destroy *args = data; 628 629 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) 630 return -EOPNOTSUPP; 631 632 /* make sure padding is empty */ 633 if (args->pad) 634 return -EINVAL; 635 return drm_syncobj_destroy(file_private, args->handle); 636 } 637 638 int 639 drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data, 640 struct drm_file *file_private) 641 { 642 struct drm_syncobj_handle *args = data; 643 644 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) 645 return -EOPNOTSUPP; 646 647 if (args->pad) 648 return -EINVAL; 649 650 if (args->flags != 0 && 651 args->flags != DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE) 652 return -EINVAL; 653 654 if (args->flags & DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE) 655 return drm_syncobj_export_sync_file(file_private, args->handle, 656 &args->fd); 657 658 return drm_syncobj_handle_to_fd(file_private, args->handle, 659 &args->fd); 660 } 661 662 int 663 drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data, 664 struct drm_file *file_private) 665 { 666 struct drm_syncobj_handle *args = data; 667 668 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) 669 return -EOPNOTSUPP; 670 671 if (args->pad) 672 return -EINVAL; 673 674 if (args->flags != 0 && 675 args->flags != DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE) 676 return -EINVAL; 677 678 if (args->flags & DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE) 679 return drm_syncobj_import_sync_file_fence(file_private, 680 args->fd, 681 args->handle); 682 683 return drm_syncobj_fd_to_handle(file_private, args->fd, 684 &args->handle); 685 } 686 687 static int drm_syncobj_transfer_to_timeline(struct drm_file *file_private, 688 struct drm_syncobj_transfer *args) 689 { 690 struct drm_syncobj *timeline_syncobj = NULL; 691 struct dma_fence *fence; 692 struct dma_fence_chain *chain; 693 int ret; 694 695 timeline_syncobj = drm_syncobj_find(file_private, args->dst_handle); 696 if (!timeline_syncobj) { 697 return -ENOENT; 698 } 699 ret = drm_syncobj_find_fence(file_private, args->src_handle, 700 args->src_point, args->flags, 701 &fence); 702 if (ret) 703 goto err; 704 chain = kzalloc(sizeof(struct dma_fence_chain), GFP_KERNEL); 705 if (!chain) { 706 ret = -ENOMEM; 707 goto err1; 708 } 709 drm_syncobj_add_point(timeline_syncobj, chain, fence, args->dst_point); 710 err1: 711 dma_fence_put(fence); 712 err: 713 drm_syncobj_put(timeline_syncobj); 714 715 return ret; 716 } 717 718 static int 719 drm_syncobj_transfer_to_binary(struct drm_file *file_private, 720 struct drm_syncobj_transfer *args) 721 { 722 struct drm_syncobj *binary_syncobj = NULL; 723 struct dma_fence *fence; 724 int ret; 725 726 binary_syncobj = drm_syncobj_find(file_private, args->dst_handle); 727 if (!binary_syncobj) 728 return -ENOENT; 729 ret = drm_syncobj_find_fence(file_private, args->src_handle, 730 args->src_point, args->flags, &fence); 731 if (ret) 732 goto err; 733 drm_syncobj_replace_fence(binary_syncobj, fence); 734 dma_fence_put(fence); 735 err: 736 drm_syncobj_put(binary_syncobj); 737 738 return ret; 739 } 740 int 741 drm_syncobj_transfer_ioctl(struct drm_device *dev, void *data, 742 struct drm_file *file_private) 743 { 744 struct drm_syncobj_transfer *args = data; 745 int ret; 746 747 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE)) 748 return -EOPNOTSUPP; 749 750 if (args->pad) 751 return -EINVAL; 752 753 if (args->dst_point) 754 ret = drm_syncobj_transfer_to_timeline(file_private, args); 755 else 756 ret = drm_syncobj_transfer_to_binary(file_private, args); 757 758 return ret; 759 } 760 761 static void syncobj_wait_fence_func(struct dma_fence *fence, 762 struct dma_fence_cb *cb) 763 { 764 struct syncobj_wait_entry *wait = 765 container_of(cb, struct syncobj_wait_entry, fence_cb); 766 767 wake_up_process(wait->task); 768 } 769 770 static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj, 771 struct syncobj_wait_entry *wait) 772 { 773 struct dma_fence *fence; 774 775 /* This happens inside the syncobj lock */ 776 fence = rcu_dereference_protected(syncobj->fence, 777 lockdep_is_held(&syncobj->lock)); 778 dma_fence_get(fence); 779 if (!fence || dma_fence_chain_find_seqno(&fence, wait->point)) { 780 dma_fence_put(fence); 781 return; 782 } else if (!fence) { 783 wait->fence = dma_fence_get_stub(); 784 } else { 785 wait->fence = fence; 786 } 787 788 wake_up_process(wait->task); 789 list_del_init(&wait->node); 790 } 791 792 static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs, 793 void __user *user_points, 794 uint32_t count, 795 uint32_t flags, 796 signed long timeout, 797 uint32_t *idx) 798 { 799 struct syncobj_wait_entry *entries; 800 struct dma_fence *fence; 801 uint64_t *points; 802 uint32_t signaled_count, i; 803 804 points = kmalloc_array(count, sizeof(*points), GFP_KERNEL); 805 if (points == NULL) 806 return -ENOMEM; 807 808 if (!user_points) { 809 memset(points, 0, count * sizeof(uint64_t)); 810 811 } else if (copy_from_user(points, user_points, 812 sizeof(uint64_t) * count)) { 813 timeout = -EFAULT; 814 goto err_free_points; 815 } 816 817 entries = kcalloc(count, sizeof(*entries), GFP_KERNEL); 818 if (!entries) { 819 timeout = -ENOMEM; 820 goto err_free_points; 821 } 822 /* Walk the list of sync objects and initialize entries. We do 823 * this up-front so that we can properly return -EINVAL if there is 824 * a syncobj with a missing fence and then never have the chance of 825 * returning -EINVAL again. 826 */ 827 signaled_count = 0; 828 for (i = 0; i < count; ++i) { 829 struct dma_fence *fence; 830 831 entries[i].task = current; 832 entries[i].point = points[i]; 833 fence = drm_syncobj_fence_get(syncobjs[i]); 834 if (!fence || dma_fence_chain_find_seqno(&fence, points[i])) { 835 dma_fence_put(fence); 836 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) { 837 continue; 838 } else { 839 timeout = -EINVAL; 840 goto cleanup_entries; 841 } 842 } 843 844 if (fence) 845 entries[i].fence = fence; 846 else 847 entries[i].fence = dma_fence_get_stub(); 848 849 if ((flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE) || 850 dma_fence_is_signaled(entries[i].fence)) { 851 if (signaled_count == 0 && idx) 852 *idx = i; 853 signaled_count++; 854 } 855 } 856 857 if (signaled_count == count || 858 (signaled_count > 0 && 859 !(flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL))) 860 goto cleanup_entries; 861 862 /* There's a very annoying laxness in the dma_fence API here, in 863 * that backends are not required to automatically report when a 864 * fence is signaled prior to fence->ops->enable_signaling() being 865 * called. So here if we fail to match signaled_count, we need to 866 * fallthough and try a 0 timeout wait! 867 */ 868 869 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) { 870 for (i = 0; i < count; ++i) 871 drm_syncobj_fence_add_wait(syncobjs[i], &entries[i]); 872 } 873 874 do { 875 set_current_state(TASK_INTERRUPTIBLE); 876 877 signaled_count = 0; 878 for (i = 0; i < count; ++i) { 879 fence = entries[i].fence; 880 if (!fence) 881 continue; 882 883 if ((flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE) || 884 dma_fence_is_signaled(fence) || 885 (!entries[i].fence_cb.func && 886 dma_fence_add_callback(fence, 887 &entries[i].fence_cb, 888 syncobj_wait_fence_func))) { 889 /* The fence has been signaled */ 890 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL) { 891 signaled_count++; 892 } else { 893 if (idx) 894 *idx = i; 895 goto done_waiting; 896 } 897 } 898 } 899 900 if (signaled_count == count) 901 goto done_waiting; 902 903 if (timeout == 0) { 904 timeout = -ETIME; 905 goto done_waiting; 906 } 907 908 if (signal_pending(current)) { 909 timeout = -ERESTARTSYS; 910 goto done_waiting; 911 } 912 913 timeout = schedule_timeout(timeout); 914 } while (1); 915 916 done_waiting: 917 __set_current_state(TASK_RUNNING); 918 919 cleanup_entries: 920 for (i = 0; i < count; ++i) { 921 drm_syncobj_remove_wait(syncobjs[i], &entries[i]); 922 if (entries[i].fence_cb.func) 923 dma_fence_remove_callback(entries[i].fence, 924 &entries[i].fence_cb); 925 dma_fence_put(entries[i].fence); 926 } 927 kfree(entries); 928 929 err_free_points: 930 kfree(points); 931 932 return timeout; 933 } 934 935 /** 936 * drm_timeout_abs_to_jiffies - calculate jiffies timeout from absolute value 937 * 938 * @timeout_nsec: timeout nsec component in ns, 0 for poll 939 * 940 * Calculate the timeout in jiffies from an absolute time in sec/nsec. 941 */ 942 signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec) 943 { 944 ktime_t abs_timeout, now; 945 u64 timeout_ns, timeout_jiffies64; 946 947 /* make 0 timeout means poll - absolute 0 doesn't seem valid */ 948 if (timeout_nsec == 0) 949 return 0; 950 951 abs_timeout = ns_to_ktime(timeout_nsec); 952 now = ktime_get(); 953 954 if (!ktime_after(abs_timeout, now)) 955 return 0; 956 957 timeout_ns = ktime_to_ns(ktime_sub(abs_timeout, now)); 958 959 timeout_jiffies64 = nsecs_to_jiffies64(timeout_ns); 960 /* clamp timeout to avoid infinite timeout */ 961 if (timeout_jiffies64 >= MAX_SCHEDULE_TIMEOUT - 1) 962 return MAX_SCHEDULE_TIMEOUT - 1; 963 964 return timeout_jiffies64 + 1; 965 } 966 EXPORT_SYMBOL(drm_timeout_abs_to_jiffies); 967 968 static int drm_syncobj_array_wait(struct drm_device *dev, 969 struct drm_file *file_private, 970 struct drm_syncobj_wait *wait, 971 struct drm_syncobj_timeline_wait *timeline_wait, 972 struct drm_syncobj **syncobjs, bool timeline) 973 { 974 signed long timeout = 0; 975 uint32_t first = ~0; 976 977 if (!timeline) { 978 timeout = drm_timeout_abs_to_jiffies(wait->timeout_nsec); 979 timeout = drm_syncobj_array_wait_timeout(syncobjs, 980 NULL, 981 wait->count_handles, 982 wait->flags, 983 timeout, &first); 984 if (timeout < 0) 985 return timeout; 986 wait->first_signaled = first; 987 } else { 988 timeout = drm_timeout_abs_to_jiffies(timeline_wait->timeout_nsec); 989 timeout = drm_syncobj_array_wait_timeout(syncobjs, 990 u64_to_user_ptr(timeline_wait->points), 991 timeline_wait->count_handles, 992 timeline_wait->flags, 993 timeout, &first); 994 if (timeout < 0) 995 return timeout; 996 timeline_wait->first_signaled = first; 997 } 998 return 0; 999 } 1000 1001 static int drm_syncobj_array_find(struct drm_file *file_private, 1002 void __user *user_handles, 1003 uint32_t count_handles, 1004 struct drm_syncobj ***syncobjs_out) 1005 { 1006 uint32_t i, *handles; 1007 struct drm_syncobj **syncobjs; 1008 int ret; 1009 1010 handles = kmalloc_array(count_handles, sizeof(*handles), GFP_KERNEL); 1011 if (handles == NULL) 1012 return -ENOMEM; 1013 1014 if (copy_from_user(handles, user_handles, 1015 sizeof(uint32_t) * count_handles)) { 1016 ret = -EFAULT; 1017 goto err_free_handles; 1018 } 1019 1020 syncobjs = kmalloc_array(count_handles, sizeof(*syncobjs), GFP_KERNEL); 1021 if (syncobjs == NULL) { 1022 ret = -ENOMEM; 1023 goto err_free_handles; 1024 } 1025 1026 for (i = 0; i < count_handles; i++) { 1027 syncobjs[i] = drm_syncobj_find(file_private, handles[i]); 1028 if (!syncobjs[i]) { 1029 ret = -ENOENT; 1030 goto err_put_syncobjs; 1031 } 1032 } 1033 1034 kfree(handles); 1035 *syncobjs_out = syncobjs; 1036 return 0; 1037 1038 err_put_syncobjs: 1039 while (i-- > 0) 1040 drm_syncobj_put(syncobjs[i]); 1041 kfree(syncobjs); 1042 err_free_handles: 1043 kfree(handles); 1044 1045 return ret; 1046 } 1047 1048 static void drm_syncobj_array_free(struct drm_syncobj **syncobjs, 1049 uint32_t count) 1050 { 1051 uint32_t i; 1052 for (i = 0; i < count; i++) 1053 drm_syncobj_put(syncobjs[i]); 1054 kfree(syncobjs); 1055 } 1056 1057 int 1058 drm_syncobj_wait_ioctl(struct drm_device *dev, void *data, 1059 struct drm_file *file_private) 1060 { 1061 struct drm_syncobj_wait *args = data; 1062 struct drm_syncobj **syncobjs; 1063 int ret = 0; 1064 1065 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) 1066 return -EOPNOTSUPP; 1067 1068 if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL | 1069 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT)) 1070 return -EINVAL; 1071 1072 if (args->count_handles == 0) 1073 return -EINVAL; 1074 1075 ret = drm_syncobj_array_find(file_private, 1076 u64_to_user_ptr(args->handles), 1077 args->count_handles, 1078 &syncobjs); 1079 if (ret < 0) 1080 return ret; 1081 1082 ret = drm_syncobj_array_wait(dev, file_private, 1083 args, NULL, syncobjs, false); 1084 1085 drm_syncobj_array_free(syncobjs, args->count_handles); 1086 1087 return ret; 1088 } 1089 1090 int 1091 drm_syncobj_timeline_wait_ioctl(struct drm_device *dev, void *data, 1092 struct drm_file *file_private) 1093 { 1094 struct drm_syncobj_timeline_wait *args = data; 1095 struct drm_syncobj **syncobjs; 1096 int ret = 0; 1097 1098 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE)) 1099 return -EOPNOTSUPP; 1100 1101 if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL | 1102 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT | 1103 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE)) 1104 return -EINVAL; 1105 1106 if (args->count_handles == 0) 1107 return -EINVAL; 1108 1109 ret = drm_syncobj_array_find(file_private, 1110 u64_to_user_ptr(args->handles), 1111 args->count_handles, 1112 &syncobjs); 1113 if (ret < 0) 1114 return ret; 1115 1116 ret = drm_syncobj_array_wait(dev, file_private, 1117 NULL, args, syncobjs, true); 1118 1119 drm_syncobj_array_free(syncobjs, args->count_handles); 1120 1121 return ret; 1122 } 1123 1124 1125 int 1126 drm_syncobj_reset_ioctl(struct drm_device *dev, void *data, 1127 struct drm_file *file_private) 1128 { 1129 struct drm_syncobj_array *args = data; 1130 struct drm_syncobj **syncobjs; 1131 uint32_t i; 1132 int ret; 1133 1134 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) 1135 return -EOPNOTSUPP; 1136 1137 if (args->pad != 0) 1138 return -EINVAL; 1139 1140 if (args->count_handles == 0) 1141 return -EINVAL; 1142 1143 ret = drm_syncobj_array_find(file_private, 1144 u64_to_user_ptr(args->handles), 1145 args->count_handles, 1146 &syncobjs); 1147 if (ret < 0) 1148 return ret; 1149 1150 for (i = 0; i < args->count_handles; i++) 1151 drm_syncobj_replace_fence(syncobjs[i], NULL); 1152 1153 drm_syncobj_array_free(syncobjs, args->count_handles); 1154 1155 return 0; 1156 } 1157 1158 int 1159 drm_syncobj_signal_ioctl(struct drm_device *dev, void *data, 1160 struct drm_file *file_private) 1161 { 1162 struct drm_syncobj_array *args = data; 1163 struct drm_syncobj **syncobjs; 1164 uint32_t i; 1165 int ret; 1166 1167 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) 1168 return -EOPNOTSUPP; 1169 1170 if (args->pad != 0) 1171 return -EINVAL; 1172 1173 if (args->count_handles == 0) 1174 return -EINVAL; 1175 1176 ret = drm_syncobj_array_find(file_private, 1177 u64_to_user_ptr(args->handles), 1178 args->count_handles, 1179 &syncobjs); 1180 if (ret < 0) 1181 return ret; 1182 1183 for (i = 0; i < args->count_handles; i++) 1184 drm_syncobj_assign_null_handle(syncobjs[i]); 1185 1186 drm_syncobj_array_free(syncobjs, args->count_handles); 1187 1188 return ret; 1189 } 1190 1191 int 1192 drm_syncobj_timeline_signal_ioctl(struct drm_device *dev, void *data, 1193 struct drm_file *file_private) 1194 { 1195 struct drm_syncobj_timeline_array *args = data; 1196 struct drm_syncobj **syncobjs; 1197 struct dma_fence_chain **chains; 1198 uint64_t *points; 1199 uint32_t i, j; 1200 int ret; 1201 1202 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE)) 1203 return -EOPNOTSUPP; 1204 1205 if (args->pad != 0) 1206 return -EINVAL; 1207 1208 if (args->count_handles == 0) 1209 return -EINVAL; 1210 1211 ret = drm_syncobj_array_find(file_private, 1212 u64_to_user_ptr(args->handles), 1213 args->count_handles, 1214 &syncobjs); 1215 if (ret < 0) 1216 return ret; 1217 1218 points = kmalloc_array(args->count_handles, sizeof(*points), 1219 GFP_KERNEL); 1220 if (!points) { 1221 ret = -ENOMEM; 1222 goto out; 1223 } 1224 if (!u64_to_user_ptr(args->points)) { 1225 memset(points, 0, args->count_handles * sizeof(uint64_t)); 1226 } else if (copy_from_user(points, u64_to_user_ptr(args->points), 1227 sizeof(uint64_t) * args->count_handles)) { 1228 ret = -EFAULT; 1229 goto err_points; 1230 } 1231 1232 chains = kmalloc_array(args->count_handles, sizeof(void *), GFP_KERNEL); 1233 if (!chains) { 1234 ret = -ENOMEM; 1235 goto err_points; 1236 } 1237 for (i = 0; i < args->count_handles; i++) { 1238 chains[i] = kzalloc(sizeof(struct dma_fence_chain), GFP_KERNEL); 1239 if (!chains[i]) { 1240 for (j = 0; j < i; j++) 1241 kfree(chains[j]); 1242 ret = -ENOMEM; 1243 goto err_chains; 1244 } 1245 } 1246 1247 for (i = 0; i < args->count_handles; i++) { 1248 struct dma_fence *fence = dma_fence_get_stub(); 1249 1250 drm_syncobj_add_point(syncobjs[i], chains[i], 1251 fence, points[i]); 1252 dma_fence_put(fence); 1253 } 1254 err_chains: 1255 kfree(chains); 1256 err_points: 1257 kfree(points); 1258 out: 1259 drm_syncobj_array_free(syncobjs, args->count_handles); 1260 1261 return ret; 1262 } 1263 1264 int drm_syncobj_query_ioctl(struct drm_device *dev, void *data, 1265 struct drm_file *file_private) 1266 { 1267 struct drm_syncobj_timeline_array *args = data; 1268 struct drm_syncobj **syncobjs; 1269 uint64_t __user *points = u64_to_user_ptr(args->points); 1270 uint32_t i; 1271 int ret; 1272 1273 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE)) 1274 return -EOPNOTSUPP; 1275 1276 if (args->pad != 0) 1277 return -EINVAL; 1278 1279 if (args->count_handles == 0) 1280 return -EINVAL; 1281 1282 ret = drm_syncobj_array_find(file_private, 1283 u64_to_user_ptr(args->handles), 1284 args->count_handles, 1285 &syncobjs); 1286 if (ret < 0) 1287 return ret; 1288 1289 for (i = 0; i < args->count_handles; i++) { 1290 struct dma_fence_chain *chain; 1291 struct dma_fence *fence; 1292 uint64_t point; 1293 1294 fence = drm_syncobj_fence_get(syncobjs[i]); 1295 chain = to_dma_fence_chain(fence); 1296 if (chain) { 1297 struct dma_fence *iter, *last_signaled = NULL; 1298 1299 dma_fence_chain_for_each(iter, fence) { 1300 if (!iter) 1301 break; 1302 dma_fence_put(last_signaled); 1303 last_signaled = dma_fence_get(iter); 1304 if (!to_dma_fence_chain(last_signaled)->prev_seqno) 1305 /* It is most likely that timeline has 1306 * unorder points. */ 1307 break; 1308 } 1309 point = dma_fence_is_signaled(last_signaled) ? 1310 last_signaled->seqno : 1311 to_dma_fence_chain(last_signaled)->prev_seqno; 1312 dma_fence_put(last_signaled); 1313 } else { 1314 point = 0; 1315 } 1316 ret = copy_to_user(&points[i], &point, sizeof(uint64_t)); 1317 ret = ret ? -EFAULT : 0; 1318 if (ret) 1319 break; 1320 } 1321 drm_syncobj_array_free(syncobjs, args->count_handles); 1322 1323 return ret; 1324 } 1325