1 /* 2 * Copyright 2017 Red Hat 3 * Parts ported from amdgpu (fence wait code). 4 * Copyright 2016 Advanced Micro Devices, Inc. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the next 14 * paragraph) shall be included in all copies or substantial portions of the 15 * Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 23 * IN THE SOFTWARE. 24 * 25 * Authors: 26 * 27 */ 28 29 /** 30 * DOC: Overview 31 * 32 * DRM synchronisation objects (syncobj, see struct &drm_syncobj) are 33 * persistent objects that contain an optional fence. The fence can be updated 34 * with a new fence, or be NULL. 35 * 36 * syncobj's can be waited upon, where it will wait for the underlying 37 * fence. 38 * 39 * syncobj's can be export to fd's and back, these fd's are opaque and 40 * have no other use case, except passing the syncobj between processes. 41 * 42 * Their primary use-case is to implement Vulkan fences and semaphores. 43 * 44 * syncobj have a kref reference count, but also have an optional file. 45 * The file is only created once the syncobj is exported. 46 * The file takes a reference on the kref. 47 */ 48 49 #include <drm/drmP.h> 50 #include <linux/file.h> 51 #include <linux/fs.h> 52 #include <linux/anon_inodes.h> 53 #include <linux/sync_file.h> 54 #include <linux/sched/signal.h> 55 56 #include "drm_internal.h" 57 #include <drm/drm_syncobj.h> 58 59 /** 60 * drm_syncobj_find - lookup and reference a sync object. 61 * @file_private: drm file private pointer 62 * @handle: sync object handle to lookup. 63 * 64 * Returns a reference to the syncobj pointed to by handle or NULL. The 65 * reference must be released by calling drm_syncobj_put(). 66 */ 67 struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private, 68 u32 handle) 69 { 70 struct drm_syncobj *syncobj; 71 72 spin_lock(&file_private->syncobj_table_lock); 73 74 /* Check if we currently have a reference on the object */ 75 syncobj = idr_find(&file_private->syncobj_idr, handle); 76 if (syncobj) 77 drm_syncobj_get(syncobj); 78 79 spin_unlock(&file_private->syncobj_table_lock); 80 81 return syncobj; 82 } 83 EXPORT_SYMBOL(drm_syncobj_find); 84 85 static void drm_syncobj_add_callback_locked(struct drm_syncobj *syncobj, 86 struct drm_syncobj_cb *cb, 87 drm_syncobj_func_t func) 88 { 89 cb->func = func; 90 list_add_tail(&cb->node, &syncobj->cb_list); 91 } 92 93 static int drm_syncobj_fence_get_or_add_callback(struct drm_syncobj *syncobj, 94 struct dma_fence **fence, 95 struct drm_syncobj_cb *cb, 96 drm_syncobj_func_t func) 97 { 98 int ret; 99 100 *fence = drm_syncobj_fence_get(syncobj); 101 if (*fence) 102 return 1; 103 104 spin_lock(&syncobj->lock); 105 /* We've already tried once to get a fence and failed. Now that we 106 * have the lock, try one more time just to be sure we don't add a 107 * callback when a fence has already been set. 108 */ 109 if (syncobj->fence) { 110 *fence = dma_fence_get(rcu_dereference_protected(syncobj->fence, 111 lockdep_is_held(&syncobj->lock))); 112 ret = 1; 113 } else { 114 *fence = NULL; 115 drm_syncobj_add_callback_locked(syncobj, cb, func); 116 ret = 0; 117 } 118 spin_unlock(&syncobj->lock); 119 120 return ret; 121 } 122 123 void drm_syncobj_add_callback(struct drm_syncobj *syncobj, 124 struct drm_syncobj_cb *cb, 125 drm_syncobj_func_t func) 126 { 127 spin_lock(&syncobj->lock); 128 drm_syncobj_add_callback_locked(syncobj, cb, func); 129 spin_unlock(&syncobj->lock); 130 } 131 132 void drm_syncobj_remove_callback(struct drm_syncobj *syncobj, 133 struct drm_syncobj_cb *cb) 134 { 135 spin_lock(&syncobj->lock); 136 list_del_init(&cb->node); 137 spin_unlock(&syncobj->lock); 138 } 139 140 /** 141 * drm_syncobj_replace_fence - replace fence in a sync object. 142 * @syncobj: Sync object to replace fence in 143 * @fence: fence to install in sync file. 144 * 145 * This replaces the fence on a sync object. 146 */ 147 void drm_syncobj_replace_fence(struct drm_syncobj *syncobj, 148 struct dma_fence *fence) 149 { 150 struct dma_fence *old_fence; 151 struct drm_syncobj_cb *cur, *tmp; 152 153 if (fence) 154 dma_fence_get(fence); 155 156 spin_lock(&syncobj->lock); 157 158 old_fence = rcu_dereference_protected(syncobj->fence, 159 lockdep_is_held(&syncobj->lock)); 160 rcu_assign_pointer(syncobj->fence, fence); 161 162 if (fence != old_fence) { 163 list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node) { 164 list_del_init(&cur->node); 165 cur->func(syncobj, cur); 166 } 167 } 168 169 spin_unlock(&syncobj->lock); 170 171 dma_fence_put(old_fence); 172 } 173 EXPORT_SYMBOL(drm_syncobj_replace_fence); 174 175 struct drm_syncobj_null_fence { 176 struct dma_fence base; 177 spinlock_t lock; 178 }; 179 180 static const char *drm_syncobj_null_fence_get_name(struct dma_fence *fence) 181 { 182 return "syncobjnull"; 183 } 184 185 static bool drm_syncobj_null_fence_enable_signaling(struct dma_fence *fence) 186 { 187 dma_fence_enable_sw_signaling(fence); 188 return !dma_fence_is_signaled(fence); 189 } 190 191 static const struct dma_fence_ops drm_syncobj_null_fence_ops = { 192 .get_driver_name = drm_syncobj_null_fence_get_name, 193 .get_timeline_name = drm_syncobj_null_fence_get_name, 194 .enable_signaling = drm_syncobj_null_fence_enable_signaling, 195 .release = NULL, 196 }; 197 198 static int drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj) 199 { 200 struct drm_syncobj_null_fence *fence; 201 fence = kzalloc(sizeof(*fence), GFP_KERNEL); 202 if (fence == NULL) 203 return -ENOMEM; 204 205 spin_lock_init(&fence->lock); 206 dma_fence_init(&fence->base, &drm_syncobj_null_fence_ops, 207 &fence->lock, 0, 0); 208 dma_fence_signal(&fence->base); 209 210 drm_syncobj_replace_fence(syncobj, &fence->base); 211 212 dma_fence_put(&fence->base); 213 214 return 0; 215 } 216 217 /** 218 * drm_syncobj_find_fence - lookup and reference the fence in a sync object 219 * @file_private: drm file private pointer 220 * @handle: sync object handle to lookup. 221 * @fence: out parameter for the fence 222 * 223 * This is just a convenience function that combines drm_syncobj_find() and 224 * drm_syncobj_fence_get(). 225 * 226 * Returns 0 on success or a negative error value on failure. On success @fence 227 * contains a reference to the fence, which must be released by calling 228 * dma_fence_put(). 229 */ 230 int drm_syncobj_find_fence(struct drm_file *file_private, 231 u32 handle, 232 struct dma_fence **fence) 233 { 234 struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle); 235 int ret = 0; 236 237 if (!syncobj) 238 return -ENOENT; 239 240 *fence = drm_syncobj_fence_get(syncobj); 241 if (!*fence) { 242 ret = -EINVAL; 243 } 244 drm_syncobj_put(syncobj); 245 return ret; 246 } 247 EXPORT_SYMBOL(drm_syncobj_find_fence); 248 249 /** 250 * drm_syncobj_free - free a sync object. 251 * @kref: kref to free. 252 * 253 * Only to be called from kref_put in drm_syncobj_put. 254 */ 255 void drm_syncobj_free(struct kref *kref) 256 { 257 struct drm_syncobj *syncobj = container_of(kref, 258 struct drm_syncobj, 259 refcount); 260 drm_syncobj_replace_fence(syncobj, NULL); 261 kfree(syncobj); 262 } 263 EXPORT_SYMBOL(drm_syncobj_free); 264 265 /** 266 * drm_syncobj_create - create a new syncobj 267 * @out_syncobj: returned syncobj 268 * @flags: DRM_SYNCOBJ_* flags 269 * @fence: if non-NULL, the syncobj will represent this fence 270 * 271 * This is the first function to create a sync object. After creating, drivers 272 * probably want to make it available to userspace, either through 273 * drm_syncobj_get_handle() or drm_syncobj_get_fd(). 274 * 275 * Returns 0 on success or a negative error value on failure. 276 */ 277 int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags, 278 struct dma_fence *fence) 279 { 280 int ret; 281 struct drm_syncobj *syncobj; 282 283 syncobj = kzalloc(sizeof(struct drm_syncobj), GFP_KERNEL); 284 if (!syncobj) 285 return -ENOMEM; 286 287 kref_init(&syncobj->refcount); 288 INIT_LIST_HEAD(&syncobj->cb_list); 289 spin_lock_init(&syncobj->lock); 290 291 if (flags & DRM_SYNCOBJ_CREATE_SIGNALED) { 292 ret = drm_syncobj_assign_null_handle(syncobj); 293 if (ret < 0) { 294 drm_syncobj_put(syncobj); 295 return ret; 296 } 297 } 298 299 if (fence) 300 drm_syncobj_replace_fence(syncobj, fence); 301 302 *out_syncobj = syncobj; 303 return 0; 304 } 305 EXPORT_SYMBOL(drm_syncobj_create); 306 307 /** 308 * drm_syncobj_get_handle - get a handle from a syncobj 309 * @file_private: drm file private pointer 310 * @syncobj: Sync object to export 311 * @handle: out parameter with the new handle 312 * 313 * Exports a sync object created with drm_syncobj_create() as a handle on 314 * @file_private to userspace. 315 * 316 * Returns 0 on success or a negative error value on failure. 317 */ 318 int drm_syncobj_get_handle(struct drm_file *file_private, 319 struct drm_syncobj *syncobj, u32 *handle) 320 { 321 int ret; 322 323 /* take a reference to put in the idr */ 324 drm_syncobj_get(syncobj); 325 326 idr_preload(GFP_KERNEL); 327 spin_lock(&file_private->syncobj_table_lock); 328 ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT); 329 spin_unlock(&file_private->syncobj_table_lock); 330 331 idr_preload_end(); 332 333 if (ret < 0) { 334 drm_syncobj_put(syncobj); 335 return ret; 336 } 337 338 *handle = ret; 339 return 0; 340 } 341 EXPORT_SYMBOL(drm_syncobj_get_handle); 342 343 static int drm_syncobj_create_as_handle(struct drm_file *file_private, 344 u32 *handle, uint32_t flags) 345 { 346 int ret; 347 struct drm_syncobj *syncobj; 348 349 ret = drm_syncobj_create(&syncobj, flags, NULL); 350 if (ret) 351 return ret; 352 353 ret = drm_syncobj_get_handle(file_private, syncobj, handle); 354 drm_syncobj_put(syncobj); 355 return ret; 356 } 357 358 static int drm_syncobj_destroy(struct drm_file *file_private, 359 u32 handle) 360 { 361 struct drm_syncobj *syncobj; 362 363 spin_lock(&file_private->syncobj_table_lock); 364 syncobj = idr_remove(&file_private->syncobj_idr, handle); 365 spin_unlock(&file_private->syncobj_table_lock); 366 367 if (!syncobj) 368 return -EINVAL; 369 370 drm_syncobj_put(syncobj); 371 return 0; 372 } 373 374 static int drm_syncobj_file_release(struct inode *inode, struct file *file) 375 { 376 struct drm_syncobj *syncobj = file->private_data; 377 378 drm_syncobj_put(syncobj); 379 return 0; 380 } 381 382 static const struct file_operations drm_syncobj_file_fops = { 383 .release = drm_syncobj_file_release, 384 }; 385 386 /** 387 * drm_syncobj_get_fd - get a file descriptor from a syncobj 388 * @syncobj: Sync object to export 389 * @p_fd: out parameter with the new file descriptor 390 * 391 * Exports a sync object created with drm_syncobj_create() as a file descriptor. 392 * 393 * Returns 0 on success or a negative error value on failure. 394 */ 395 int drm_syncobj_get_fd(struct drm_syncobj *syncobj, int *p_fd) 396 { 397 struct file *file; 398 int fd; 399 400 fd = get_unused_fd_flags(O_CLOEXEC); 401 if (fd < 0) 402 return fd; 403 404 file = anon_inode_getfile("syncobj_file", 405 &drm_syncobj_file_fops, 406 syncobj, 0); 407 if (IS_ERR(file)) { 408 put_unused_fd(fd); 409 return PTR_ERR(file); 410 } 411 412 drm_syncobj_get(syncobj); 413 fd_install(fd, file); 414 415 *p_fd = fd; 416 return 0; 417 } 418 EXPORT_SYMBOL(drm_syncobj_get_fd); 419 420 static int drm_syncobj_handle_to_fd(struct drm_file *file_private, 421 u32 handle, int *p_fd) 422 { 423 struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle); 424 int ret; 425 426 if (!syncobj) 427 return -EINVAL; 428 429 ret = drm_syncobj_get_fd(syncobj, p_fd); 430 drm_syncobj_put(syncobj); 431 return ret; 432 } 433 434 static int drm_syncobj_fd_to_handle(struct drm_file *file_private, 435 int fd, u32 *handle) 436 { 437 struct drm_syncobj *syncobj; 438 struct file *file; 439 int ret; 440 441 file = fget(fd); 442 if (!file) 443 return -EINVAL; 444 445 if (file->f_op != &drm_syncobj_file_fops) { 446 fput(file); 447 return -EINVAL; 448 } 449 450 /* take a reference to put in the idr */ 451 syncobj = file->private_data; 452 drm_syncobj_get(syncobj); 453 454 idr_preload(GFP_KERNEL); 455 spin_lock(&file_private->syncobj_table_lock); 456 ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT); 457 spin_unlock(&file_private->syncobj_table_lock); 458 idr_preload_end(); 459 460 if (ret > 0) { 461 *handle = ret; 462 ret = 0; 463 } else 464 drm_syncobj_put(syncobj); 465 466 fput(file); 467 return ret; 468 } 469 470 static int drm_syncobj_import_sync_file_fence(struct drm_file *file_private, 471 int fd, int handle) 472 { 473 struct dma_fence *fence = sync_file_get_fence(fd); 474 struct drm_syncobj *syncobj; 475 476 if (!fence) 477 return -EINVAL; 478 479 syncobj = drm_syncobj_find(file_private, handle); 480 if (!syncobj) { 481 dma_fence_put(fence); 482 return -ENOENT; 483 } 484 485 drm_syncobj_replace_fence(syncobj, fence); 486 dma_fence_put(fence); 487 drm_syncobj_put(syncobj); 488 return 0; 489 } 490 491 static int drm_syncobj_export_sync_file(struct drm_file *file_private, 492 int handle, int *p_fd) 493 { 494 int ret; 495 struct dma_fence *fence; 496 struct sync_file *sync_file; 497 int fd = get_unused_fd_flags(O_CLOEXEC); 498 499 if (fd < 0) 500 return fd; 501 502 ret = drm_syncobj_find_fence(file_private, handle, &fence); 503 if (ret) 504 goto err_put_fd; 505 506 sync_file = sync_file_create(fence); 507 508 dma_fence_put(fence); 509 510 if (!sync_file) { 511 ret = -EINVAL; 512 goto err_put_fd; 513 } 514 515 fd_install(fd, sync_file->file); 516 517 *p_fd = fd; 518 return 0; 519 err_put_fd: 520 put_unused_fd(fd); 521 return ret; 522 } 523 /** 524 * drm_syncobj_open - initalizes syncobj file-private structures at devnode open time 525 * @file_private: drm file-private structure to set up 526 * 527 * Called at device open time, sets up the structure for handling refcounting 528 * of sync objects. 529 */ 530 void 531 drm_syncobj_open(struct drm_file *file_private) 532 { 533 idr_init_base(&file_private->syncobj_idr, 1); 534 spin_lock_init(&file_private->syncobj_table_lock); 535 } 536 537 static int 538 drm_syncobj_release_handle(int id, void *ptr, void *data) 539 { 540 struct drm_syncobj *syncobj = ptr; 541 542 drm_syncobj_put(syncobj); 543 return 0; 544 } 545 546 /** 547 * drm_syncobj_release - release file-private sync object resources 548 * @file_private: drm file-private structure to clean up 549 * 550 * Called at close time when the filp is going away. 551 * 552 * Releases any remaining references on objects by this filp. 553 */ 554 void 555 drm_syncobj_release(struct drm_file *file_private) 556 { 557 idr_for_each(&file_private->syncobj_idr, 558 &drm_syncobj_release_handle, file_private); 559 idr_destroy(&file_private->syncobj_idr); 560 } 561 562 int 563 drm_syncobj_create_ioctl(struct drm_device *dev, void *data, 564 struct drm_file *file_private) 565 { 566 struct drm_syncobj_create *args = data; 567 568 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) 569 return -ENODEV; 570 571 /* no valid flags yet */ 572 if (args->flags & ~DRM_SYNCOBJ_CREATE_SIGNALED) 573 return -EINVAL; 574 575 return drm_syncobj_create_as_handle(file_private, 576 &args->handle, args->flags); 577 } 578 579 int 580 drm_syncobj_destroy_ioctl(struct drm_device *dev, void *data, 581 struct drm_file *file_private) 582 { 583 struct drm_syncobj_destroy *args = data; 584 585 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) 586 return -ENODEV; 587 588 /* make sure padding is empty */ 589 if (args->pad) 590 return -EINVAL; 591 return drm_syncobj_destroy(file_private, args->handle); 592 } 593 594 int 595 drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data, 596 struct drm_file *file_private) 597 { 598 struct drm_syncobj_handle *args = data; 599 600 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) 601 return -ENODEV; 602 603 if (args->pad) 604 return -EINVAL; 605 606 if (args->flags != 0 && 607 args->flags != DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE) 608 return -EINVAL; 609 610 if (args->flags & DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE) 611 return drm_syncobj_export_sync_file(file_private, args->handle, 612 &args->fd); 613 614 return drm_syncobj_handle_to_fd(file_private, args->handle, 615 &args->fd); 616 } 617 618 int 619 drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data, 620 struct drm_file *file_private) 621 { 622 struct drm_syncobj_handle *args = data; 623 624 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) 625 return -ENODEV; 626 627 if (args->pad) 628 return -EINVAL; 629 630 if (args->flags != 0 && 631 args->flags != DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE) 632 return -EINVAL; 633 634 if (args->flags & DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE) 635 return drm_syncobj_import_sync_file_fence(file_private, 636 args->fd, 637 args->handle); 638 639 return drm_syncobj_fd_to_handle(file_private, args->fd, 640 &args->handle); 641 } 642 643 struct syncobj_wait_entry { 644 struct task_struct *task; 645 struct dma_fence *fence; 646 struct dma_fence_cb fence_cb; 647 struct drm_syncobj_cb syncobj_cb; 648 }; 649 650 static void syncobj_wait_fence_func(struct dma_fence *fence, 651 struct dma_fence_cb *cb) 652 { 653 struct syncobj_wait_entry *wait = 654 container_of(cb, struct syncobj_wait_entry, fence_cb); 655 656 wake_up_process(wait->task); 657 } 658 659 static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj, 660 struct drm_syncobj_cb *cb) 661 { 662 struct syncobj_wait_entry *wait = 663 container_of(cb, struct syncobj_wait_entry, syncobj_cb); 664 665 /* This happens inside the syncobj lock */ 666 wait->fence = dma_fence_get(rcu_dereference_protected(syncobj->fence, 667 lockdep_is_held(&syncobj->lock))); 668 wake_up_process(wait->task); 669 } 670 671 static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs, 672 uint32_t count, 673 uint32_t flags, 674 signed long timeout, 675 uint32_t *idx) 676 { 677 struct syncobj_wait_entry *entries; 678 struct dma_fence *fence; 679 signed long ret; 680 uint32_t signaled_count, i; 681 682 entries = kcalloc(count, sizeof(*entries), GFP_KERNEL); 683 if (!entries) 684 return -ENOMEM; 685 686 /* Walk the list of sync objects and initialize entries. We do 687 * this up-front so that we can properly return -EINVAL if there is 688 * a syncobj with a missing fence and then never have the chance of 689 * returning -EINVAL again. 690 */ 691 signaled_count = 0; 692 for (i = 0; i < count; ++i) { 693 entries[i].task = current; 694 entries[i].fence = drm_syncobj_fence_get(syncobjs[i]); 695 if (!entries[i].fence) { 696 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) { 697 continue; 698 } else { 699 ret = -EINVAL; 700 goto cleanup_entries; 701 } 702 } 703 704 if (dma_fence_is_signaled(entries[i].fence)) { 705 if (signaled_count == 0 && idx) 706 *idx = i; 707 signaled_count++; 708 } 709 } 710 711 /* Initialize ret to the max of timeout and 1. That way, the 712 * default return value indicates a successful wait and not a 713 * timeout. 714 */ 715 ret = max_t(signed long, timeout, 1); 716 717 if (signaled_count == count || 718 (signaled_count > 0 && 719 !(flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL))) 720 goto cleanup_entries; 721 722 /* There's a very annoying laxness in the dma_fence API here, in 723 * that backends are not required to automatically report when a 724 * fence is signaled prior to fence->ops->enable_signaling() being 725 * called. So here if we fail to match signaled_count, we need to 726 * fallthough and try a 0 timeout wait! 727 */ 728 729 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) { 730 for (i = 0; i < count; ++i) { 731 drm_syncobj_fence_get_or_add_callback(syncobjs[i], 732 &entries[i].fence, 733 &entries[i].syncobj_cb, 734 syncobj_wait_syncobj_func); 735 } 736 } 737 738 do { 739 set_current_state(TASK_INTERRUPTIBLE); 740 741 signaled_count = 0; 742 for (i = 0; i < count; ++i) { 743 fence = entries[i].fence; 744 if (!fence) 745 continue; 746 747 if (dma_fence_is_signaled(fence) || 748 (!entries[i].fence_cb.func && 749 dma_fence_add_callback(fence, 750 &entries[i].fence_cb, 751 syncobj_wait_fence_func))) { 752 /* The fence has been signaled */ 753 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL) { 754 signaled_count++; 755 } else { 756 if (idx) 757 *idx = i; 758 goto done_waiting; 759 } 760 } 761 } 762 763 if (signaled_count == count) 764 goto done_waiting; 765 766 if (timeout == 0) { 767 /* If we are doing a 0 timeout wait and we got 768 * here, then we just timed out. 769 */ 770 ret = 0; 771 goto done_waiting; 772 } 773 774 ret = schedule_timeout(ret); 775 776 if (ret > 0 && signal_pending(current)) 777 ret = -ERESTARTSYS; 778 } while (ret > 0); 779 780 done_waiting: 781 __set_current_state(TASK_RUNNING); 782 783 cleanup_entries: 784 for (i = 0; i < count; ++i) { 785 if (entries[i].syncobj_cb.func) 786 drm_syncobj_remove_callback(syncobjs[i], 787 &entries[i].syncobj_cb); 788 if (entries[i].fence_cb.func) 789 dma_fence_remove_callback(entries[i].fence, 790 &entries[i].fence_cb); 791 dma_fence_put(entries[i].fence); 792 } 793 kfree(entries); 794 795 return ret; 796 } 797 798 /** 799 * drm_timeout_abs_to_jiffies - calculate jiffies timeout from absolute value 800 * 801 * @timeout_nsec: timeout nsec component in ns, 0 for poll 802 * 803 * Calculate the timeout in jiffies from an absolute time in sec/nsec. 804 */ 805 static signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec) 806 { 807 ktime_t abs_timeout, now; 808 u64 timeout_ns, timeout_jiffies64; 809 810 /* make 0 timeout means poll - absolute 0 doesn't seem valid */ 811 if (timeout_nsec == 0) 812 return 0; 813 814 abs_timeout = ns_to_ktime(timeout_nsec); 815 now = ktime_get(); 816 817 if (!ktime_after(abs_timeout, now)) 818 return 0; 819 820 timeout_ns = ktime_to_ns(ktime_sub(abs_timeout, now)); 821 822 timeout_jiffies64 = nsecs_to_jiffies64(timeout_ns); 823 /* clamp timeout to avoid infinite timeout */ 824 if (timeout_jiffies64 >= MAX_SCHEDULE_TIMEOUT - 1) 825 return MAX_SCHEDULE_TIMEOUT - 1; 826 827 return timeout_jiffies64 + 1; 828 } 829 830 static int drm_syncobj_array_wait(struct drm_device *dev, 831 struct drm_file *file_private, 832 struct drm_syncobj_wait *wait, 833 struct drm_syncobj **syncobjs) 834 { 835 signed long timeout = drm_timeout_abs_to_jiffies(wait->timeout_nsec); 836 signed long ret = 0; 837 uint32_t first = ~0; 838 839 ret = drm_syncobj_array_wait_timeout(syncobjs, 840 wait->count_handles, 841 wait->flags, 842 timeout, &first); 843 if (ret < 0) 844 return ret; 845 846 wait->first_signaled = first; 847 if (ret == 0) 848 return -ETIME; 849 return 0; 850 } 851 852 static int drm_syncobj_array_find(struct drm_file *file_private, 853 void __user *user_handles, 854 uint32_t count_handles, 855 struct drm_syncobj ***syncobjs_out) 856 { 857 uint32_t i, *handles; 858 struct drm_syncobj **syncobjs; 859 int ret; 860 861 handles = kmalloc_array(count_handles, sizeof(*handles), GFP_KERNEL); 862 if (handles == NULL) 863 return -ENOMEM; 864 865 if (copy_from_user(handles, user_handles, 866 sizeof(uint32_t) * count_handles)) { 867 ret = -EFAULT; 868 goto err_free_handles; 869 } 870 871 syncobjs = kmalloc_array(count_handles, sizeof(*syncobjs), GFP_KERNEL); 872 if (syncobjs == NULL) { 873 ret = -ENOMEM; 874 goto err_free_handles; 875 } 876 877 for (i = 0; i < count_handles; i++) { 878 syncobjs[i] = drm_syncobj_find(file_private, handles[i]); 879 if (!syncobjs[i]) { 880 ret = -ENOENT; 881 goto err_put_syncobjs; 882 } 883 } 884 885 kfree(handles); 886 *syncobjs_out = syncobjs; 887 return 0; 888 889 err_put_syncobjs: 890 while (i-- > 0) 891 drm_syncobj_put(syncobjs[i]); 892 kfree(syncobjs); 893 err_free_handles: 894 kfree(handles); 895 896 return ret; 897 } 898 899 static void drm_syncobj_array_free(struct drm_syncobj **syncobjs, 900 uint32_t count) 901 { 902 uint32_t i; 903 for (i = 0; i < count; i++) 904 drm_syncobj_put(syncobjs[i]); 905 kfree(syncobjs); 906 } 907 908 int 909 drm_syncobj_wait_ioctl(struct drm_device *dev, void *data, 910 struct drm_file *file_private) 911 { 912 struct drm_syncobj_wait *args = data; 913 struct drm_syncobj **syncobjs; 914 int ret = 0; 915 916 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) 917 return -ENODEV; 918 919 if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL | 920 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT)) 921 return -EINVAL; 922 923 if (args->count_handles == 0) 924 return -EINVAL; 925 926 ret = drm_syncobj_array_find(file_private, 927 u64_to_user_ptr(args->handles), 928 args->count_handles, 929 &syncobjs); 930 if (ret < 0) 931 return ret; 932 933 ret = drm_syncobj_array_wait(dev, file_private, 934 args, syncobjs); 935 936 drm_syncobj_array_free(syncobjs, args->count_handles); 937 938 return ret; 939 } 940 941 int 942 drm_syncobj_reset_ioctl(struct drm_device *dev, void *data, 943 struct drm_file *file_private) 944 { 945 struct drm_syncobj_array *args = data; 946 struct drm_syncobj **syncobjs; 947 uint32_t i; 948 int ret; 949 950 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) 951 return -ENODEV; 952 953 if (args->pad != 0) 954 return -EINVAL; 955 956 if (args->count_handles == 0) 957 return -EINVAL; 958 959 ret = drm_syncobj_array_find(file_private, 960 u64_to_user_ptr(args->handles), 961 args->count_handles, 962 &syncobjs); 963 if (ret < 0) 964 return ret; 965 966 for (i = 0; i < args->count_handles; i++) 967 drm_syncobj_replace_fence(syncobjs[i], NULL); 968 969 drm_syncobj_array_free(syncobjs, args->count_handles); 970 971 return 0; 972 } 973 974 int 975 drm_syncobj_signal_ioctl(struct drm_device *dev, void *data, 976 struct drm_file *file_private) 977 { 978 struct drm_syncobj_array *args = data; 979 struct drm_syncobj **syncobjs; 980 uint32_t i; 981 int ret; 982 983 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) 984 return -ENODEV; 985 986 if (args->pad != 0) 987 return -EINVAL; 988 989 if (args->count_handles == 0) 990 return -EINVAL; 991 992 ret = drm_syncobj_array_find(file_private, 993 u64_to_user_ptr(args->handles), 994 args->count_handles, 995 &syncobjs); 996 if (ret < 0) 997 return ret; 998 999 for (i = 0; i < args->count_handles; i++) { 1000 ret = drm_syncobj_assign_null_handle(syncobjs[i]); 1001 if (ret < 0) 1002 break; 1003 } 1004 1005 drm_syncobj_array_free(syncobjs, args->count_handles); 1006 1007 return ret; 1008 } 1009