1 /* 2 * Copyright 2017 Red Hat 3 * Parts ported from amdgpu (fence wait code). 4 * Copyright 2016 Advanced Micro Devices, Inc. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the next 14 * paragraph) shall be included in all copies or substantial portions of the 15 * Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 23 * IN THE SOFTWARE. 24 * 25 * Authors: 26 * 27 */ 28 29 /** 30 * DOC: Overview 31 * 32 * DRM synchronisation objects (syncobj, see struct &drm_syncobj) are 33 * persistent objects that contain an optional fence. The fence can be updated 34 * with a new fence, or be NULL. 35 * 36 * syncobj's can be waited upon, where it will wait for the underlying 37 * fence. 38 * 39 * syncobj's can be export to fd's and back, these fd's are opaque and 40 * have no other use case, except passing the syncobj between processes. 41 * 42 * Their primary use-case is to implement Vulkan fences and semaphores. 43 * 44 * syncobj have a kref reference count, but also have an optional file. 45 * The file is only created once the syncobj is exported. 46 * The file takes a reference on the kref. 47 */ 48 49 #include <linux/anon_inodes.h> 50 #include <linux/file.h> 51 #include <linux/fs.h> 52 #include <linux/sched/signal.h> 53 #include <linux/sync_file.h> 54 #include <linux/uaccess.h> 55 56 #include <drm/drm.h> 57 #include <drm/drm_drv.h> 58 #include <drm/drm_file.h> 59 #include <drm/drm_gem.h> 60 #include <drm/drm_print.h> 61 #include <drm/drm_syncobj.h> 62 63 #include "drm_internal.h" 64 65 struct syncobj_wait_entry { 66 struct list_head node; 67 struct task_struct *task; 68 struct dma_fence *fence; 69 struct dma_fence_cb fence_cb; 70 u64 point; 71 }; 72 73 static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj, 74 struct syncobj_wait_entry *wait); 75 76 /** 77 * drm_syncobj_find - lookup and reference a sync object. 78 * @file_private: drm file private pointer 79 * @handle: sync object handle to lookup. 80 * 81 * Returns a reference to the syncobj pointed to by handle or NULL. The 82 * reference must be released by calling drm_syncobj_put(). 83 */ 84 struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private, 85 u32 handle) 86 { 87 struct drm_syncobj *syncobj; 88 89 spin_lock(&file_private->syncobj_table_lock); 90 91 /* Check if we currently have a reference on the object */ 92 syncobj = idr_find(&file_private->syncobj_idr, handle); 93 if (syncobj) 94 drm_syncobj_get(syncobj); 95 96 spin_unlock(&file_private->syncobj_table_lock); 97 98 return syncobj; 99 } 100 EXPORT_SYMBOL(drm_syncobj_find); 101 102 static void drm_syncobj_fence_add_wait(struct drm_syncobj *syncobj, 103 struct syncobj_wait_entry *wait) 104 { 105 struct dma_fence *fence; 106 107 if (wait->fence) 108 return; 109 110 spin_lock(&syncobj->lock); 111 /* We've already tried once to get a fence and failed. Now that we 112 * have the lock, try one more time just to be sure we don't add a 113 * callback when a fence has already been set. 114 */ 115 fence = dma_fence_get(rcu_dereference_protected(syncobj->fence, 1)); 116 if (!fence || dma_fence_chain_find_seqno(&fence, wait->point)) { 117 dma_fence_put(fence); 118 list_add_tail(&wait->node, &syncobj->cb_list); 119 } else if (!fence) { 120 wait->fence = dma_fence_get_stub(); 121 } else { 122 wait->fence = fence; 123 } 124 spin_unlock(&syncobj->lock); 125 } 126 127 static void drm_syncobj_remove_wait(struct drm_syncobj *syncobj, 128 struct syncobj_wait_entry *wait) 129 { 130 if (!wait->node.next) 131 return; 132 133 spin_lock(&syncobj->lock); 134 list_del_init(&wait->node); 135 spin_unlock(&syncobj->lock); 136 } 137 138 /** 139 * drm_syncobj_add_point - add new timeline point to the syncobj 140 * @syncobj: sync object to add timeline point do 141 * @chain: chain node to use to add the point 142 * @fence: fence to encapsulate in the chain node 143 * @point: sequence number to use for the point 144 * 145 * Add the chain node as new timeline point to the syncobj. 146 */ 147 void drm_syncobj_add_point(struct drm_syncobj *syncobj, 148 struct dma_fence_chain *chain, 149 struct dma_fence *fence, 150 uint64_t point) 151 { 152 struct syncobj_wait_entry *cur, *tmp; 153 struct dma_fence *prev; 154 155 dma_fence_get(fence); 156 157 spin_lock(&syncobj->lock); 158 159 prev = drm_syncobj_fence_get(syncobj); 160 /* You are adding an unorder point to timeline, which could cause payload returned from query_ioctl is 0! */ 161 if (prev && prev->seqno >= point) 162 DRM_ERROR("You are adding an unorder point to timeline!\n"); 163 dma_fence_chain_init(chain, prev, fence, point); 164 rcu_assign_pointer(syncobj->fence, &chain->base); 165 166 list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node) 167 syncobj_wait_syncobj_func(syncobj, cur); 168 spin_unlock(&syncobj->lock); 169 170 /* Walk the chain once to trigger garbage collection */ 171 dma_fence_chain_for_each(fence, prev); 172 dma_fence_put(prev); 173 } 174 EXPORT_SYMBOL(drm_syncobj_add_point); 175 176 /** 177 * drm_syncobj_replace_fence - replace fence in a sync object. 178 * @syncobj: Sync object to replace fence in 179 * @fence: fence to install in sync file. 180 * 181 * This replaces the fence on a sync object. 182 */ 183 void drm_syncobj_replace_fence(struct drm_syncobj *syncobj, 184 struct dma_fence *fence) 185 { 186 struct dma_fence *old_fence; 187 struct syncobj_wait_entry *cur, *tmp; 188 189 if (fence) 190 dma_fence_get(fence); 191 192 spin_lock(&syncobj->lock); 193 194 old_fence = rcu_dereference_protected(syncobj->fence, 195 lockdep_is_held(&syncobj->lock)); 196 rcu_assign_pointer(syncobj->fence, fence); 197 198 if (fence != old_fence) { 199 list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node) 200 syncobj_wait_syncobj_func(syncobj, cur); 201 } 202 203 spin_unlock(&syncobj->lock); 204 205 dma_fence_put(old_fence); 206 } 207 EXPORT_SYMBOL(drm_syncobj_replace_fence); 208 209 /** 210 * drm_syncobj_assign_null_handle - assign a stub fence to the sync object 211 * @syncobj: sync object to assign the fence on 212 * 213 * Assign a already signaled stub fence to the sync object. 214 */ 215 static void drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj) 216 { 217 struct dma_fence *fence = dma_fence_get_stub(); 218 219 drm_syncobj_replace_fence(syncobj, fence); 220 dma_fence_put(fence); 221 } 222 223 /* 5s default for wait submission */ 224 #define DRM_SYNCOBJ_WAIT_FOR_SUBMIT_TIMEOUT 5000000000ULL 225 /** 226 * drm_syncobj_find_fence - lookup and reference the fence in a sync object 227 * @file_private: drm file private pointer 228 * @handle: sync object handle to lookup. 229 * @point: timeline point 230 * @flags: DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT or not 231 * @fence: out parameter for the fence 232 * 233 * This is just a convenience function that combines drm_syncobj_find() and 234 * drm_syncobj_fence_get(). 235 * 236 * Returns 0 on success or a negative error value on failure. On success @fence 237 * contains a reference to the fence, which must be released by calling 238 * dma_fence_put(). 239 */ 240 int drm_syncobj_find_fence(struct drm_file *file_private, 241 u32 handle, u64 point, u64 flags, 242 struct dma_fence **fence) 243 { 244 struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle); 245 struct syncobj_wait_entry wait; 246 u64 timeout = nsecs_to_jiffies64(DRM_SYNCOBJ_WAIT_FOR_SUBMIT_TIMEOUT); 247 int ret; 248 249 if (!syncobj) 250 return -ENOENT; 251 252 *fence = drm_syncobj_fence_get(syncobj); 253 drm_syncobj_put(syncobj); 254 255 if (*fence) { 256 ret = dma_fence_chain_find_seqno(fence, point); 257 if (!ret) 258 return 0; 259 dma_fence_put(*fence); 260 } else { 261 ret = -EINVAL; 262 } 263 264 if (!(flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT)) 265 return ret; 266 267 memset(&wait, 0, sizeof(wait)); 268 wait.task = current; 269 wait.point = point; 270 drm_syncobj_fence_add_wait(syncobj, &wait); 271 272 do { 273 set_current_state(TASK_INTERRUPTIBLE); 274 if (wait.fence) { 275 ret = 0; 276 break; 277 } 278 if (timeout == 0) { 279 ret = -ETIME; 280 break; 281 } 282 283 if (signal_pending(current)) { 284 ret = -ERESTARTSYS; 285 break; 286 } 287 288 timeout = schedule_timeout(timeout); 289 } while (1); 290 291 __set_current_state(TASK_RUNNING); 292 *fence = wait.fence; 293 294 if (wait.node.next) 295 drm_syncobj_remove_wait(syncobj, &wait); 296 297 return ret; 298 } 299 EXPORT_SYMBOL(drm_syncobj_find_fence); 300 301 /** 302 * drm_syncobj_free - free a sync object. 303 * @kref: kref to free. 304 * 305 * Only to be called from kref_put in drm_syncobj_put. 306 */ 307 void drm_syncobj_free(struct kref *kref) 308 { 309 struct drm_syncobj *syncobj = container_of(kref, 310 struct drm_syncobj, 311 refcount); 312 drm_syncobj_replace_fence(syncobj, NULL); 313 kfree(syncobj); 314 } 315 EXPORT_SYMBOL(drm_syncobj_free); 316 317 /** 318 * drm_syncobj_create - create a new syncobj 319 * @out_syncobj: returned syncobj 320 * @flags: DRM_SYNCOBJ_* flags 321 * @fence: if non-NULL, the syncobj will represent this fence 322 * 323 * This is the first function to create a sync object. After creating, drivers 324 * probably want to make it available to userspace, either through 325 * drm_syncobj_get_handle() or drm_syncobj_get_fd(). 326 * 327 * Returns 0 on success or a negative error value on failure. 328 */ 329 int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags, 330 struct dma_fence *fence) 331 { 332 struct drm_syncobj *syncobj; 333 334 syncobj = kzalloc(sizeof(struct drm_syncobj), GFP_KERNEL); 335 if (!syncobj) 336 return -ENOMEM; 337 338 kref_init(&syncobj->refcount); 339 INIT_LIST_HEAD(&syncobj->cb_list); 340 spin_lock_init(&syncobj->lock); 341 342 if (flags & DRM_SYNCOBJ_CREATE_SIGNALED) 343 drm_syncobj_assign_null_handle(syncobj); 344 345 if (fence) 346 drm_syncobj_replace_fence(syncobj, fence); 347 348 *out_syncobj = syncobj; 349 return 0; 350 } 351 EXPORT_SYMBOL(drm_syncobj_create); 352 353 /** 354 * drm_syncobj_get_handle - get a handle from a syncobj 355 * @file_private: drm file private pointer 356 * @syncobj: Sync object to export 357 * @handle: out parameter with the new handle 358 * 359 * Exports a sync object created with drm_syncobj_create() as a handle on 360 * @file_private to userspace. 361 * 362 * Returns 0 on success or a negative error value on failure. 363 */ 364 int drm_syncobj_get_handle(struct drm_file *file_private, 365 struct drm_syncobj *syncobj, u32 *handle) 366 { 367 int ret; 368 369 /* take a reference to put in the idr */ 370 drm_syncobj_get(syncobj); 371 372 idr_preload(GFP_KERNEL); 373 spin_lock(&file_private->syncobj_table_lock); 374 ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT); 375 spin_unlock(&file_private->syncobj_table_lock); 376 377 idr_preload_end(); 378 379 if (ret < 0) { 380 drm_syncobj_put(syncobj); 381 return ret; 382 } 383 384 *handle = ret; 385 return 0; 386 } 387 EXPORT_SYMBOL(drm_syncobj_get_handle); 388 389 static int drm_syncobj_create_as_handle(struct drm_file *file_private, 390 u32 *handle, uint32_t flags) 391 { 392 int ret; 393 struct drm_syncobj *syncobj; 394 395 ret = drm_syncobj_create(&syncobj, flags, NULL); 396 if (ret) 397 return ret; 398 399 ret = drm_syncobj_get_handle(file_private, syncobj, handle); 400 drm_syncobj_put(syncobj); 401 return ret; 402 } 403 404 static int drm_syncobj_destroy(struct drm_file *file_private, 405 u32 handle) 406 { 407 struct drm_syncobj *syncobj; 408 409 spin_lock(&file_private->syncobj_table_lock); 410 syncobj = idr_remove(&file_private->syncobj_idr, handle); 411 spin_unlock(&file_private->syncobj_table_lock); 412 413 if (!syncobj) 414 return -EINVAL; 415 416 drm_syncobj_put(syncobj); 417 return 0; 418 } 419 420 static int drm_syncobj_file_release(struct inode *inode, struct file *file) 421 { 422 struct drm_syncobj *syncobj = file->private_data; 423 424 drm_syncobj_put(syncobj); 425 return 0; 426 } 427 428 static const struct file_operations drm_syncobj_file_fops = { 429 .release = drm_syncobj_file_release, 430 }; 431 432 /** 433 * drm_syncobj_get_fd - get a file descriptor from a syncobj 434 * @syncobj: Sync object to export 435 * @p_fd: out parameter with the new file descriptor 436 * 437 * Exports a sync object created with drm_syncobj_create() as a file descriptor. 438 * 439 * Returns 0 on success or a negative error value on failure. 440 */ 441 int drm_syncobj_get_fd(struct drm_syncobj *syncobj, int *p_fd) 442 { 443 struct file *file; 444 int fd; 445 446 fd = get_unused_fd_flags(O_CLOEXEC); 447 if (fd < 0) 448 return fd; 449 450 file = anon_inode_getfile("syncobj_file", 451 &drm_syncobj_file_fops, 452 syncobj, 0); 453 if (IS_ERR(file)) { 454 put_unused_fd(fd); 455 return PTR_ERR(file); 456 } 457 458 drm_syncobj_get(syncobj); 459 fd_install(fd, file); 460 461 *p_fd = fd; 462 return 0; 463 } 464 EXPORT_SYMBOL(drm_syncobj_get_fd); 465 466 static int drm_syncobj_handle_to_fd(struct drm_file *file_private, 467 u32 handle, int *p_fd) 468 { 469 struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle); 470 int ret; 471 472 if (!syncobj) 473 return -EINVAL; 474 475 ret = drm_syncobj_get_fd(syncobj, p_fd); 476 drm_syncobj_put(syncobj); 477 return ret; 478 } 479 480 static int drm_syncobj_fd_to_handle(struct drm_file *file_private, 481 int fd, u32 *handle) 482 { 483 struct drm_syncobj *syncobj; 484 struct fd f = fdget(fd); 485 int ret; 486 487 if (!f.file) 488 return -EINVAL; 489 490 if (f.file->f_op != &drm_syncobj_file_fops) { 491 fdput(f); 492 return -EINVAL; 493 } 494 495 /* take a reference to put in the idr */ 496 syncobj = f.file->private_data; 497 drm_syncobj_get(syncobj); 498 499 idr_preload(GFP_KERNEL); 500 spin_lock(&file_private->syncobj_table_lock); 501 ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT); 502 spin_unlock(&file_private->syncobj_table_lock); 503 idr_preload_end(); 504 505 if (ret > 0) { 506 *handle = ret; 507 ret = 0; 508 } else 509 drm_syncobj_put(syncobj); 510 511 fdput(f); 512 return ret; 513 } 514 515 static int drm_syncobj_import_sync_file_fence(struct drm_file *file_private, 516 int fd, int handle) 517 { 518 struct dma_fence *fence = sync_file_get_fence(fd); 519 struct drm_syncobj *syncobj; 520 521 if (!fence) 522 return -EINVAL; 523 524 syncobj = drm_syncobj_find(file_private, handle); 525 if (!syncobj) { 526 dma_fence_put(fence); 527 return -ENOENT; 528 } 529 530 drm_syncobj_replace_fence(syncobj, fence); 531 dma_fence_put(fence); 532 drm_syncobj_put(syncobj); 533 return 0; 534 } 535 536 static int drm_syncobj_export_sync_file(struct drm_file *file_private, 537 int handle, int *p_fd) 538 { 539 int ret; 540 struct dma_fence *fence; 541 struct sync_file *sync_file; 542 int fd = get_unused_fd_flags(O_CLOEXEC); 543 544 if (fd < 0) 545 return fd; 546 547 ret = drm_syncobj_find_fence(file_private, handle, 0, 0, &fence); 548 if (ret) 549 goto err_put_fd; 550 551 sync_file = sync_file_create(fence); 552 553 dma_fence_put(fence); 554 555 if (!sync_file) { 556 ret = -EINVAL; 557 goto err_put_fd; 558 } 559 560 fd_install(fd, sync_file->file); 561 562 *p_fd = fd; 563 return 0; 564 err_put_fd: 565 put_unused_fd(fd); 566 return ret; 567 } 568 /** 569 * drm_syncobj_open - initalizes syncobj file-private structures at devnode open time 570 * @file_private: drm file-private structure to set up 571 * 572 * Called at device open time, sets up the structure for handling refcounting 573 * of sync objects. 574 */ 575 void 576 drm_syncobj_open(struct drm_file *file_private) 577 { 578 idr_init_base(&file_private->syncobj_idr, 1); 579 spin_lock_init(&file_private->syncobj_table_lock); 580 } 581 582 static int 583 drm_syncobj_release_handle(int id, void *ptr, void *data) 584 { 585 struct drm_syncobj *syncobj = ptr; 586 587 drm_syncobj_put(syncobj); 588 return 0; 589 } 590 591 /** 592 * drm_syncobj_release - release file-private sync object resources 593 * @file_private: drm file-private structure to clean up 594 * 595 * Called at close time when the filp is going away. 596 * 597 * Releases any remaining references on objects by this filp. 598 */ 599 void 600 drm_syncobj_release(struct drm_file *file_private) 601 { 602 idr_for_each(&file_private->syncobj_idr, 603 &drm_syncobj_release_handle, file_private); 604 idr_destroy(&file_private->syncobj_idr); 605 } 606 607 int 608 drm_syncobj_create_ioctl(struct drm_device *dev, void *data, 609 struct drm_file *file_private) 610 { 611 struct drm_syncobj_create *args = data; 612 613 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) 614 return -EOPNOTSUPP; 615 616 /* no valid flags yet */ 617 if (args->flags & ~DRM_SYNCOBJ_CREATE_SIGNALED) 618 return -EINVAL; 619 620 return drm_syncobj_create_as_handle(file_private, 621 &args->handle, args->flags); 622 } 623 624 int 625 drm_syncobj_destroy_ioctl(struct drm_device *dev, void *data, 626 struct drm_file *file_private) 627 { 628 struct drm_syncobj_destroy *args = data; 629 630 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) 631 return -EOPNOTSUPP; 632 633 /* make sure padding is empty */ 634 if (args->pad) 635 return -EINVAL; 636 return drm_syncobj_destroy(file_private, args->handle); 637 } 638 639 int 640 drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data, 641 struct drm_file *file_private) 642 { 643 struct drm_syncobj_handle *args = data; 644 645 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) 646 return -EOPNOTSUPP; 647 648 if (args->pad) 649 return -EINVAL; 650 651 if (args->flags != 0 && 652 args->flags != DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE) 653 return -EINVAL; 654 655 if (args->flags & DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE) 656 return drm_syncobj_export_sync_file(file_private, args->handle, 657 &args->fd); 658 659 return drm_syncobj_handle_to_fd(file_private, args->handle, 660 &args->fd); 661 } 662 663 int 664 drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data, 665 struct drm_file *file_private) 666 { 667 struct drm_syncobj_handle *args = data; 668 669 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) 670 return -EOPNOTSUPP; 671 672 if (args->pad) 673 return -EINVAL; 674 675 if (args->flags != 0 && 676 args->flags != DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE) 677 return -EINVAL; 678 679 if (args->flags & DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE) 680 return drm_syncobj_import_sync_file_fence(file_private, 681 args->fd, 682 args->handle); 683 684 return drm_syncobj_fd_to_handle(file_private, args->fd, 685 &args->handle); 686 } 687 688 static int drm_syncobj_transfer_to_timeline(struct drm_file *file_private, 689 struct drm_syncobj_transfer *args) 690 { 691 struct drm_syncobj *timeline_syncobj = NULL; 692 struct dma_fence *fence; 693 struct dma_fence_chain *chain; 694 int ret; 695 696 timeline_syncobj = drm_syncobj_find(file_private, args->dst_handle); 697 if (!timeline_syncobj) { 698 return -ENOENT; 699 } 700 ret = drm_syncobj_find_fence(file_private, args->src_handle, 701 args->src_point, args->flags, 702 &fence); 703 if (ret) 704 goto err; 705 chain = kzalloc(sizeof(struct dma_fence_chain), GFP_KERNEL); 706 if (!chain) { 707 ret = -ENOMEM; 708 goto err1; 709 } 710 drm_syncobj_add_point(timeline_syncobj, chain, fence, args->dst_point); 711 err1: 712 dma_fence_put(fence); 713 err: 714 drm_syncobj_put(timeline_syncobj); 715 716 return ret; 717 } 718 719 static int 720 drm_syncobj_transfer_to_binary(struct drm_file *file_private, 721 struct drm_syncobj_transfer *args) 722 { 723 struct drm_syncobj *binary_syncobj = NULL; 724 struct dma_fence *fence; 725 int ret; 726 727 binary_syncobj = drm_syncobj_find(file_private, args->dst_handle); 728 if (!binary_syncobj) 729 return -ENOENT; 730 ret = drm_syncobj_find_fence(file_private, args->src_handle, 731 args->src_point, args->flags, &fence); 732 if (ret) 733 goto err; 734 drm_syncobj_replace_fence(binary_syncobj, fence); 735 dma_fence_put(fence); 736 err: 737 drm_syncobj_put(binary_syncobj); 738 739 return ret; 740 } 741 int 742 drm_syncobj_transfer_ioctl(struct drm_device *dev, void *data, 743 struct drm_file *file_private) 744 { 745 struct drm_syncobj_transfer *args = data; 746 int ret; 747 748 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE)) 749 return -EOPNOTSUPP; 750 751 if (args->pad) 752 return -EINVAL; 753 754 if (args->dst_point) 755 ret = drm_syncobj_transfer_to_timeline(file_private, args); 756 else 757 ret = drm_syncobj_transfer_to_binary(file_private, args); 758 759 return ret; 760 } 761 762 static void syncobj_wait_fence_func(struct dma_fence *fence, 763 struct dma_fence_cb *cb) 764 { 765 struct syncobj_wait_entry *wait = 766 container_of(cb, struct syncobj_wait_entry, fence_cb); 767 768 wake_up_process(wait->task); 769 } 770 771 static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj, 772 struct syncobj_wait_entry *wait) 773 { 774 struct dma_fence *fence; 775 776 /* This happens inside the syncobj lock */ 777 fence = rcu_dereference_protected(syncobj->fence, 778 lockdep_is_held(&syncobj->lock)); 779 dma_fence_get(fence); 780 if (!fence || dma_fence_chain_find_seqno(&fence, wait->point)) { 781 dma_fence_put(fence); 782 return; 783 } else if (!fence) { 784 wait->fence = dma_fence_get_stub(); 785 } else { 786 wait->fence = fence; 787 } 788 789 wake_up_process(wait->task); 790 list_del_init(&wait->node); 791 } 792 793 static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs, 794 void __user *user_points, 795 uint32_t count, 796 uint32_t flags, 797 signed long timeout, 798 uint32_t *idx) 799 { 800 struct syncobj_wait_entry *entries; 801 struct dma_fence *fence; 802 uint64_t *points; 803 uint32_t signaled_count, i; 804 805 points = kmalloc_array(count, sizeof(*points), GFP_KERNEL); 806 if (points == NULL) 807 return -ENOMEM; 808 809 if (!user_points) { 810 memset(points, 0, count * sizeof(uint64_t)); 811 812 } else if (copy_from_user(points, user_points, 813 sizeof(uint64_t) * count)) { 814 timeout = -EFAULT; 815 goto err_free_points; 816 } 817 818 entries = kcalloc(count, sizeof(*entries), GFP_KERNEL); 819 if (!entries) { 820 timeout = -ENOMEM; 821 goto err_free_points; 822 } 823 /* Walk the list of sync objects and initialize entries. We do 824 * this up-front so that we can properly return -EINVAL if there is 825 * a syncobj with a missing fence and then never have the chance of 826 * returning -EINVAL again. 827 */ 828 signaled_count = 0; 829 for (i = 0; i < count; ++i) { 830 struct dma_fence *fence; 831 832 entries[i].task = current; 833 entries[i].point = points[i]; 834 fence = drm_syncobj_fence_get(syncobjs[i]); 835 if (!fence || dma_fence_chain_find_seqno(&fence, points[i])) { 836 dma_fence_put(fence); 837 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) { 838 continue; 839 } else { 840 timeout = -EINVAL; 841 goto cleanup_entries; 842 } 843 } 844 845 if (fence) 846 entries[i].fence = fence; 847 else 848 entries[i].fence = dma_fence_get_stub(); 849 850 if ((flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE) || 851 dma_fence_is_signaled(entries[i].fence)) { 852 if (signaled_count == 0 && idx) 853 *idx = i; 854 signaled_count++; 855 } 856 } 857 858 if (signaled_count == count || 859 (signaled_count > 0 && 860 !(flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL))) 861 goto cleanup_entries; 862 863 /* There's a very annoying laxness in the dma_fence API here, in 864 * that backends are not required to automatically report when a 865 * fence is signaled prior to fence->ops->enable_signaling() being 866 * called. So here if we fail to match signaled_count, we need to 867 * fallthough and try a 0 timeout wait! 868 */ 869 870 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) { 871 for (i = 0; i < count; ++i) 872 drm_syncobj_fence_add_wait(syncobjs[i], &entries[i]); 873 } 874 875 do { 876 set_current_state(TASK_INTERRUPTIBLE); 877 878 signaled_count = 0; 879 for (i = 0; i < count; ++i) { 880 fence = entries[i].fence; 881 if (!fence) 882 continue; 883 884 if ((flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE) || 885 dma_fence_is_signaled(fence) || 886 (!entries[i].fence_cb.func && 887 dma_fence_add_callback(fence, 888 &entries[i].fence_cb, 889 syncobj_wait_fence_func))) { 890 /* The fence has been signaled */ 891 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL) { 892 signaled_count++; 893 } else { 894 if (idx) 895 *idx = i; 896 goto done_waiting; 897 } 898 } 899 } 900 901 if (signaled_count == count) 902 goto done_waiting; 903 904 if (timeout == 0) { 905 timeout = -ETIME; 906 goto done_waiting; 907 } 908 909 if (signal_pending(current)) { 910 timeout = -ERESTARTSYS; 911 goto done_waiting; 912 } 913 914 timeout = schedule_timeout(timeout); 915 } while (1); 916 917 done_waiting: 918 __set_current_state(TASK_RUNNING); 919 920 cleanup_entries: 921 for (i = 0; i < count; ++i) { 922 drm_syncobj_remove_wait(syncobjs[i], &entries[i]); 923 if (entries[i].fence_cb.func) 924 dma_fence_remove_callback(entries[i].fence, 925 &entries[i].fence_cb); 926 dma_fence_put(entries[i].fence); 927 } 928 kfree(entries); 929 930 err_free_points: 931 kfree(points); 932 933 return timeout; 934 } 935 936 /** 937 * drm_timeout_abs_to_jiffies - calculate jiffies timeout from absolute value 938 * 939 * @timeout_nsec: timeout nsec component in ns, 0 for poll 940 * 941 * Calculate the timeout in jiffies from an absolute time in sec/nsec. 942 */ 943 signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec) 944 { 945 ktime_t abs_timeout, now; 946 u64 timeout_ns, timeout_jiffies64; 947 948 /* make 0 timeout means poll - absolute 0 doesn't seem valid */ 949 if (timeout_nsec == 0) 950 return 0; 951 952 abs_timeout = ns_to_ktime(timeout_nsec); 953 now = ktime_get(); 954 955 if (!ktime_after(abs_timeout, now)) 956 return 0; 957 958 timeout_ns = ktime_to_ns(ktime_sub(abs_timeout, now)); 959 960 timeout_jiffies64 = nsecs_to_jiffies64(timeout_ns); 961 /* clamp timeout to avoid infinite timeout */ 962 if (timeout_jiffies64 >= MAX_SCHEDULE_TIMEOUT - 1) 963 return MAX_SCHEDULE_TIMEOUT - 1; 964 965 return timeout_jiffies64 + 1; 966 } 967 EXPORT_SYMBOL(drm_timeout_abs_to_jiffies); 968 969 static int drm_syncobj_array_wait(struct drm_device *dev, 970 struct drm_file *file_private, 971 struct drm_syncobj_wait *wait, 972 struct drm_syncobj_timeline_wait *timeline_wait, 973 struct drm_syncobj **syncobjs, bool timeline) 974 { 975 signed long timeout = 0; 976 uint32_t first = ~0; 977 978 if (!timeline) { 979 timeout = drm_timeout_abs_to_jiffies(wait->timeout_nsec); 980 timeout = drm_syncobj_array_wait_timeout(syncobjs, 981 NULL, 982 wait->count_handles, 983 wait->flags, 984 timeout, &first); 985 if (timeout < 0) 986 return timeout; 987 wait->first_signaled = first; 988 } else { 989 timeout = drm_timeout_abs_to_jiffies(timeline_wait->timeout_nsec); 990 timeout = drm_syncobj_array_wait_timeout(syncobjs, 991 u64_to_user_ptr(timeline_wait->points), 992 timeline_wait->count_handles, 993 timeline_wait->flags, 994 timeout, &first); 995 if (timeout < 0) 996 return timeout; 997 timeline_wait->first_signaled = first; 998 } 999 return 0; 1000 } 1001 1002 static int drm_syncobj_array_find(struct drm_file *file_private, 1003 void __user *user_handles, 1004 uint32_t count_handles, 1005 struct drm_syncobj ***syncobjs_out) 1006 { 1007 uint32_t i, *handles; 1008 struct drm_syncobj **syncobjs; 1009 int ret; 1010 1011 handles = kmalloc_array(count_handles, sizeof(*handles), GFP_KERNEL); 1012 if (handles == NULL) 1013 return -ENOMEM; 1014 1015 if (copy_from_user(handles, user_handles, 1016 sizeof(uint32_t) * count_handles)) { 1017 ret = -EFAULT; 1018 goto err_free_handles; 1019 } 1020 1021 syncobjs = kmalloc_array(count_handles, sizeof(*syncobjs), GFP_KERNEL); 1022 if (syncobjs == NULL) { 1023 ret = -ENOMEM; 1024 goto err_free_handles; 1025 } 1026 1027 for (i = 0; i < count_handles; i++) { 1028 syncobjs[i] = drm_syncobj_find(file_private, handles[i]); 1029 if (!syncobjs[i]) { 1030 ret = -ENOENT; 1031 goto err_put_syncobjs; 1032 } 1033 } 1034 1035 kfree(handles); 1036 *syncobjs_out = syncobjs; 1037 return 0; 1038 1039 err_put_syncobjs: 1040 while (i-- > 0) 1041 drm_syncobj_put(syncobjs[i]); 1042 kfree(syncobjs); 1043 err_free_handles: 1044 kfree(handles); 1045 1046 return ret; 1047 } 1048 1049 static void drm_syncobj_array_free(struct drm_syncobj **syncobjs, 1050 uint32_t count) 1051 { 1052 uint32_t i; 1053 for (i = 0; i < count; i++) 1054 drm_syncobj_put(syncobjs[i]); 1055 kfree(syncobjs); 1056 } 1057 1058 int 1059 drm_syncobj_wait_ioctl(struct drm_device *dev, void *data, 1060 struct drm_file *file_private) 1061 { 1062 struct drm_syncobj_wait *args = data; 1063 struct drm_syncobj **syncobjs; 1064 int ret = 0; 1065 1066 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) 1067 return -EOPNOTSUPP; 1068 1069 if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL | 1070 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT)) 1071 return -EINVAL; 1072 1073 if (args->count_handles == 0) 1074 return -EINVAL; 1075 1076 ret = drm_syncobj_array_find(file_private, 1077 u64_to_user_ptr(args->handles), 1078 args->count_handles, 1079 &syncobjs); 1080 if (ret < 0) 1081 return ret; 1082 1083 ret = drm_syncobj_array_wait(dev, file_private, 1084 args, NULL, syncobjs, false); 1085 1086 drm_syncobj_array_free(syncobjs, args->count_handles); 1087 1088 return ret; 1089 } 1090 1091 int 1092 drm_syncobj_timeline_wait_ioctl(struct drm_device *dev, void *data, 1093 struct drm_file *file_private) 1094 { 1095 struct drm_syncobj_timeline_wait *args = data; 1096 struct drm_syncobj **syncobjs; 1097 int ret = 0; 1098 1099 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE)) 1100 return -EOPNOTSUPP; 1101 1102 if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL | 1103 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT | 1104 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE)) 1105 return -EINVAL; 1106 1107 if (args->count_handles == 0) 1108 return -EINVAL; 1109 1110 ret = drm_syncobj_array_find(file_private, 1111 u64_to_user_ptr(args->handles), 1112 args->count_handles, 1113 &syncobjs); 1114 if (ret < 0) 1115 return ret; 1116 1117 ret = drm_syncobj_array_wait(dev, file_private, 1118 NULL, args, syncobjs, true); 1119 1120 drm_syncobj_array_free(syncobjs, args->count_handles); 1121 1122 return ret; 1123 } 1124 1125 1126 int 1127 drm_syncobj_reset_ioctl(struct drm_device *dev, void *data, 1128 struct drm_file *file_private) 1129 { 1130 struct drm_syncobj_array *args = data; 1131 struct drm_syncobj **syncobjs; 1132 uint32_t i; 1133 int ret; 1134 1135 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) 1136 return -EOPNOTSUPP; 1137 1138 if (args->pad != 0) 1139 return -EINVAL; 1140 1141 if (args->count_handles == 0) 1142 return -EINVAL; 1143 1144 ret = drm_syncobj_array_find(file_private, 1145 u64_to_user_ptr(args->handles), 1146 args->count_handles, 1147 &syncobjs); 1148 if (ret < 0) 1149 return ret; 1150 1151 for (i = 0; i < args->count_handles; i++) 1152 drm_syncobj_replace_fence(syncobjs[i], NULL); 1153 1154 drm_syncobj_array_free(syncobjs, args->count_handles); 1155 1156 return 0; 1157 } 1158 1159 int 1160 drm_syncobj_signal_ioctl(struct drm_device *dev, void *data, 1161 struct drm_file *file_private) 1162 { 1163 struct drm_syncobj_array *args = data; 1164 struct drm_syncobj **syncobjs; 1165 uint32_t i; 1166 int ret; 1167 1168 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) 1169 return -EOPNOTSUPP; 1170 1171 if (args->pad != 0) 1172 return -EINVAL; 1173 1174 if (args->count_handles == 0) 1175 return -EINVAL; 1176 1177 ret = drm_syncobj_array_find(file_private, 1178 u64_to_user_ptr(args->handles), 1179 args->count_handles, 1180 &syncobjs); 1181 if (ret < 0) 1182 return ret; 1183 1184 for (i = 0; i < args->count_handles; i++) 1185 drm_syncobj_assign_null_handle(syncobjs[i]); 1186 1187 drm_syncobj_array_free(syncobjs, args->count_handles); 1188 1189 return ret; 1190 } 1191 1192 int 1193 drm_syncobj_timeline_signal_ioctl(struct drm_device *dev, void *data, 1194 struct drm_file *file_private) 1195 { 1196 struct drm_syncobj_timeline_array *args = data; 1197 struct drm_syncobj **syncobjs; 1198 struct dma_fence_chain **chains; 1199 uint64_t *points; 1200 uint32_t i, j; 1201 int ret; 1202 1203 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE)) 1204 return -EOPNOTSUPP; 1205 1206 if (args->pad != 0) 1207 return -EINVAL; 1208 1209 if (args->count_handles == 0) 1210 return -EINVAL; 1211 1212 ret = drm_syncobj_array_find(file_private, 1213 u64_to_user_ptr(args->handles), 1214 args->count_handles, 1215 &syncobjs); 1216 if (ret < 0) 1217 return ret; 1218 1219 points = kmalloc_array(args->count_handles, sizeof(*points), 1220 GFP_KERNEL); 1221 if (!points) { 1222 ret = -ENOMEM; 1223 goto out; 1224 } 1225 if (!u64_to_user_ptr(args->points)) { 1226 memset(points, 0, args->count_handles * sizeof(uint64_t)); 1227 } else if (copy_from_user(points, u64_to_user_ptr(args->points), 1228 sizeof(uint64_t) * args->count_handles)) { 1229 ret = -EFAULT; 1230 goto err_points; 1231 } 1232 1233 chains = kmalloc_array(args->count_handles, sizeof(void *), GFP_KERNEL); 1234 if (!chains) { 1235 ret = -ENOMEM; 1236 goto err_points; 1237 } 1238 for (i = 0; i < args->count_handles; i++) { 1239 chains[i] = kzalloc(sizeof(struct dma_fence_chain), GFP_KERNEL); 1240 if (!chains[i]) { 1241 for (j = 0; j < i; j++) 1242 kfree(chains[j]); 1243 ret = -ENOMEM; 1244 goto err_chains; 1245 } 1246 } 1247 1248 for (i = 0; i < args->count_handles; i++) { 1249 struct dma_fence *fence = dma_fence_get_stub(); 1250 1251 drm_syncobj_add_point(syncobjs[i], chains[i], 1252 fence, points[i]); 1253 dma_fence_put(fence); 1254 } 1255 err_chains: 1256 kfree(chains); 1257 err_points: 1258 kfree(points); 1259 out: 1260 drm_syncobj_array_free(syncobjs, args->count_handles); 1261 1262 return ret; 1263 } 1264 1265 int drm_syncobj_query_ioctl(struct drm_device *dev, void *data, 1266 struct drm_file *file_private) 1267 { 1268 struct drm_syncobj_timeline_array *args = data; 1269 struct drm_syncobj **syncobjs; 1270 uint64_t __user *points = u64_to_user_ptr(args->points); 1271 uint32_t i; 1272 int ret; 1273 1274 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE)) 1275 return -EOPNOTSUPP; 1276 1277 if (args->pad != 0) 1278 return -EINVAL; 1279 1280 if (args->count_handles == 0) 1281 return -EINVAL; 1282 1283 ret = drm_syncobj_array_find(file_private, 1284 u64_to_user_ptr(args->handles), 1285 args->count_handles, 1286 &syncobjs); 1287 if (ret < 0) 1288 return ret; 1289 1290 for (i = 0; i < args->count_handles; i++) { 1291 struct dma_fence_chain *chain; 1292 struct dma_fence *fence; 1293 uint64_t point; 1294 1295 fence = drm_syncobj_fence_get(syncobjs[i]); 1296 chain = to_dma_fence_chain(fence); 1297 if (chain) { 1298 struct dma_fence *iter, *last_signaled = NULL; 1299 1300 dma_fence_chain_for_each(iter, fence) { 1301 if (iter->context != fence->context) { 1302 dma_fence_put(iter); 1303 /* It is most likely that timeline has 1304 * unorder points. */ 1305 break; 1306 } 1307 dma_fence_put(last_signaled); 1308 last_signaled = dma_fence_get(iter); 1309 } 1310 point = dma_fence_is_signaled(last_signaled) ? 1311 last_signaled->seqno : 1312 to_dma_fence_chain(last_signaled)->prev_seqno; 1313 dma_fence_put(last_signaled); 1314 } else { 1315 point = 0; 1316 } 1317 ret = copy_to_user(&points[i], &point, sizeof(uint64_t)); 1318 ret = ret ? -EFAULT : 0; 1319 if (ret) 1320 break; 1321 } 1322 drm_syncobj_array_free(syncobjs, args->count_handles); 1323 1324 return ret; 1325 } 1326