1 /* 2 * Copyright 2017 Red Hat 3 * Parts ported from amdgpu (fence wait code). 4 * Copyright 2016 Advanced Micro Devices, Inc. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the next 14 * paragraph) shall be included in all copies or substantial portions of the 15 * Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 23 * IN THE SOFTWARE. 24 * 25 * Authors: 26 * 27 */ 28 29 /** 30 * DOC: Overview 31 * 32 * DRM synchronisation objects (syncobj) are a persistent objects, 33 * that contain an optional fence. The fence can be updated with a new 34 * fence, or be NULL. 35 * 36 * syncobj's can be waited upon, where it will wait for the underlying 37 * fence. 38 * 39 * syncobj's can be export to fd's and back, these fd's are opaque and 40 * have no other use case, except passing the syncobj between processes. 41 * 42 * Their primary use-case is to implement Vulkan fences and semaphores. 43 * 44 * syncobj have a kref reference count, but also have an optional file. 45 * The file is only created once the syncobj is exported. 46 * The file takes a reference on the kref. 47 */ 48 49 #include <drm/drmP.h> 50 #include <linux/file.h> 51 #include <linux/fs.h> 52 #include <linux/anon_inodes.h> 53 #include <linux/sync_file.h> 54 #include <linux/sched/signal.h> 55 56 #include "drm_internal.h" 57 #include <drm/drm_syncobj.h> 58 59 /** 60 * drm_syncobj_find - lookup and reference a sync object. 61 * @file_private: drm file private pointer 62 * @handle: sync object handle to lookup. 63 * 64 * Returns a reference to the syncobj pointed to by handle or NULL. 65 */ 66 struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private, 67 u32 handle) 68 { 69 struct drm_syncobj *syncobj; 70 71 spin_lock(&file_private->syncobj_table_lock); 72 73 /* Check if we currently have a reference on the object */ 74 syncobj = idr_find(&file_private->syncobj_idr, handle); 75 if (syncobj) 76 drm_syncobj_get(syncobj); 77 78 spin_unlock(&file_private->syncobj_table_lock); 79 80 return syncobj; 81 } 82 EXPORT_SYMBOL(drm_syncobj_find); 83 84 static void drm_syncobj_add_callback_locked(struct drm_syncobj *syncobj, 85 struct drm_syncobj_cb *cb, 86 drm_syncobj_func_t func) 87 { 88 cb->func = func; 89 list_add_tail(&cb->node, &syncobj->cb_list); 90 } 91 92 static int drm_syncobj_fence_get_or_add_callback(struct drm_syncobj *syncobj, 93 struct dma_fence **fence, 94 struct drm_syncobj_cb *cb, 95 drm_syncobj_func_t func) 96 { 97 int ret; 98 99 *fence = drm_syncobj_fence_get(syncobj); 100 if (*fence) 101 return 1; 102 103 spin_lock(&syncobj->lock); 104 /* We've already tried once to get a fence and failed. Now that we 105 * have the lock, try one more time just to be sure we don't add a 106 * callback when a fence has already been set. 107 */ 108 if (syncobj->fence) { 109 *fence = dma_fence_get(rcu_dereference_protected(syncobj->fence, 110 lockdep_is_held(&syncobj->lock))); 111 ret = 1; 112 } else { 113 *fence = NULL; 114 drm_syncobj_add_callback_locked(syncobj, cb, func); 115 ret = 0; 116 } 117 spin_unlock(&syncobj->lock); 118 119 return ret; 120 } 121 122 /** 123 * drm_syncobj_add_callback - adds a callback to syncobj::cb_list 124 * @syncobj: Sync object to which to add the callback 125 * @cb: Callback to add 126 * @func: Func to use when initializing the drm_syncobj_cb struct 127 * 128 * This adds a callback to be called next time the fence is replaced 129 */ 130 void drm_syncobj_add_callback(struct drm_syncobj *syncobj, 131 struct drm_syncobj_cb *cb, 132 drm_syncobj_func_t func) 133 { 134 spin_lock(&syncobj->lock); 135 drm_syncobj_add_callback_locked(syncobj, cb, func); 136 spin_unlock(&syncobj->lock); 137 } 138 EXPORT_SYMBOL(drm_syncobj_add_callback); 139 140 /** 141 * drm_syncobj_add_callback - removes a callback to syncobj::cb_list 142 * @syncobj: Sync object from which to remove the callback 143 * @cb: Callback to remove 144 */ 145 void drm_syncobj_remove_callback(struct drm_syncobj *syncobj, 146 struct drm_syncobj_cb *cb) 147 { 148 spin_lock(&syncobj->lock); 149 list_del_init(&cb->node); 150 spin_unlock(&syncobj->lock); 151 } 152 EXPORT_SYMBOL(drm_syncobj_remove_callback); 153 154 /** 155 * drm_syncobj_replace_fence - replace fence in a sync object. 156 * @syncobj: Sync object to replace fence in 157 * @fence: fence to install in sync file. 158 * 159 * This replaces the fence on a sync object. 160 */ 161 void drm_syncobj_replace_fence(struct drm_syncobj *syncobj, 162 struct dma_fence *fence) 163 { 164 struct dma_fence *old_fence; 165 struct drm_syncobj_cb *cur, *tmp; 166 167 if (fence) 168 dma_fence_get(fence); 169 170 spin_lock(&syncobj->lock); 171 172 old_fence = rcu_dereference_protected(syncobj->fence, 173 lockdep_is_held(&syncobj->lock)); 174 rcu_assign_pointer(syncobj->fence, fence); 175 176 if (fence != old_fence) { 177 list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node) { 178 list_del_init(&cur->node); 179 cur->func(syncobj, cur); 180 } 181 } 182 183 spin_unlock(&syncobj->lock); 184 185 dma_fence_put(old_fence); 186 } 187 EXPORT_SYMBOL(drm_syncobj_replace_fence); 188 189 struct drm_syncobj_null_fence { 190 struct dma_fence base; 191 spinlock_t lock; 192 }; 193 194 static const char *drm_syncobj_null_fence_get_name(struct dma_fence *fence) 195 { 196 return "syncobjnull"; 197 } 198 199 static bool drm_syncobj_null_fence_enable_signaling(struct dma_fence *fence) 200 { 201 dma_fence_enable_sw_signaling(fence); 202 return !dma_fence_is_signaled(fence); 203 } 204 205 static const struct dma_fence_ops drm_syncobj_null_fence_ops = { 206 .get_driver_name = drm_syncobj_null_fence_get_name, 207 .get_timeline_name = drm_syncobj_null_fence_get_name, 208 .enable_signaling = drm_syncobj_null_fence_enable_signaling, 209 .wait = dma_fence_default_wait, 210 .release = NULL, 211 }; 212 213 static int drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj) 214 { 215 struct drm_syncobj_null_fence *fence; 216 fence = kzalloc(sizeof(*fence), GFP_KERNEL); 217 if (fence == NULL) 218 return -ENOMEM; 219 220 spin_lock_init(&fence->lock); 221 dma_fence_init(&fence->base, &drm_syncobj_null_fence_ops, 222 &fence->lock, 0, 0); 223 dma_fence_signal(&fence->base); 224 225 drm_syncobj_replace_fence(syncobj, &fence->base); 226 227 dma_fence_put(&fence->base); 228 229 return 0; 230 } 231 232 int drm_syncobj_find_fence(struct drm_file *file_private, 233 u32 handle, 234 struct dma_fence **fence) 235 { 236 struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle); 237 int ret = 0; 238 239 if (!syncobj) 240 return -ENOENT; 241 242 *fence = drm_syncobj_fence_get(syncobj); 243 if (!*fence) { 244 ret = -EINVAL; 245 } 246 drm_syncobj_put(syncobj); 247 return ret; 248 } 249 EXPORT_SYMBOL(drm_syncobj_find_fence); 250 251 /** 252 * drm_syncobj_free - free a sync object. 253 * @kref: kref to free. 254 * 255 * Only to be called from kref_put in drm_syncobj_put. 256 */ 257 void drm_syncobj_free(struct kref *kref) 258 { 259 struct drm_syncobj *syncobj = container_of(kref, 260 struct drm_syncobj, 261 refcount); 262 drm_syncobj_replace_fence(syncobj, NULL); 263 kfree(syncobj); 264 } 265 EXPORT_SYMBOL(drm_syncobj_free); 266 267 /** 268 * drm_syncobj_create - create a new syncobj 269 * @out_syncobj: returned syncobj 270 * @flags: DRM_SYNCOBJ_* flags 271 * @fence: if non-NULL, the syncobj will represent this fence 272 */ 273 int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags, 274 struct dma_fence *fence) 275 { 276 int ret; 277 struct drm_syncobj *syncobj; 278 279 syncobj = kzalloc(sizeof(struct drm_syncobj), GFP_KERNEL); 280 if (!syncobj) 281 return -ENOMEM; 282 283 kref_init(&syncobj->refcount); 284 INIT_LIST_HEAD(&syncobj->cb_list); 285 spin_lock_init(&syncobj->lock); 286 287 if (flags & DRM_SYNCOBJ_CREATE_SIGNALED) { 288 ret = drm_syncobj_assign_null_handle(syncobj); 289 if (ret < 0) { 290 drm_syncobj_put(syncobj); 291 return ret; 292 } 293 } 294 295 if (fence) 296 drm_syncobj_replace_fence(syncobj, fence); 297 298 *out_syncobj = syncobj; 299 return 0; 300 } 301 EXPORT_SYMBOL(drm_syncobj_create); 302 303 /** 304 * drm_syncobj_get_handle - get a handle from a syncobj 305 */ 306 int drm_syncobj_get_handle(struct drm_file *file_private, 307 struct drm_syncobj *syncobj, u32 *handle) 308 { 309 int ret; 310 311 /* take a reference to put in the idr */ 312 drm_syncobj_get(syncobj); 313 314 idr_preload(GFP_KERNEL); 315 spin_lock(&file_private->syncobj_table_lock); 316 ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT); 317 spin_unlock(&file_private->syncobj_table_lock); 318 319 idr_preload_end(); 320 321 if (ret < 0) { 322 drm_syncobj_put(syncobj); 323 return ret; 324 } 325 326 *handle = ret; 327 return 0; 328 } 329 EXPORT_SYMBOL(drm_syncobj_get_handle); 330 331 static int drm_syncobj_create_as_handle(struct drm_file *file_private, 332 u32 *handle, uint32_t flags) 333 { 334 int ret; 335 struct drm_syncobj *syncobj; 336 337 ret = drm_syncobj_create(&syncobj, flags, NULL); 338 if (ret) 339 return ret; 340 341 ret = drm_syncobj_get_handle(file_private, syncobj, handle); 342 drm_syncobj_put(syncobj); 343 return ret; 344 } 345 346 static int drm_syncobj_destroy(struct drm_file *file_private, 347 u32 handle) 348 { 349 struct drm_syncobj *syncobj; 350 351 spin_lock(&file_private->syncobj_table_lock); 352 syncobj = idr_remove(&file_private->syncobj_idr, handle); 353 spin_unlock(&file_private->syncobj_table_lock); 354 355 if (!syncobj) 356 return -EINVAL; 357 358 drm_syncobj_put(syncobj); 359 return 0; 360 } 361 362 static int drm_syncobj_file_release(struct inode *inode, struct file *file) 363 { 364 struct drm_syncobj *syncobj = file->private_data; 365 366 drm_syncobj_put(syncobj); 367 return 0; 368 } 369 370 static const struct file_operations drm_syncobj_file_fops = { 371 .release = drm_syncobj_file_release, 372 }; 373 374 static int drm_syncobj_alloc_file(struct drm_syncobj *syncobj) 375 { 376 struct file *file = anon_inode_getfile("syncobj_file", 377 &drm_syncobj_file_fops, 378 syncobj, 0); 379 if (IS_ERR(file)) 380 return PTR_ERR(file); 381 382 drm_syncobj_get(syncobj); 383 if (cmpxchg(&syncobj->file, NULL, file)) { 384 /* lost the race */ 385 fput(file); 386 } 387 388 return 0; 389 } 390 391 int drm_syncobj_get_fd(struct drm_syncobj *syncobj, int *p_fd) 392 { 393 int ret; 394 int fd; 395 396 fd = get_unused_fd_flags(O_CLOEXEC); 397 if (fd < 0) 398 return fd; 399 400 if (!syncobj->file) { 401 ret = drm_syncobj_alloc_file(syncobj); 402 if (ret) { 403 put_unused_fd(fd); 404 return ret; 405 } 406 } 407 fd_install(fd, syncobj->file); 408 *p_fd = fd; 409 return 0; 410 } 411 EXPORT_SYMBOL(drm_syncobj_get_fd); 412 413 static int drm_syncobj_handle_to_fd(struct drm_file *file_private, 414 u32 handle, int *p_fd) 415 { 416 struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle); 417 int ret; 418 419 if (!syncobj) 420 return -EINVAL; 421 422 ret = drm_syncobj_get_fd(syncobj, p_fd); 423 drm_syncobj_put(syncobj); 424 return ret; 425 } 426 427 static struct drm_syncobj *drm_syncobj_fdget(int fd) 428 { 429 struct file *file = fget(fd); 430 431 if (!file) 432 return NULL; 433 if (file->f_op != &drm_syncobj_file_fops) 434 goto err; 435 436 return file->private_data; 437 err: 438 fput(file); 439 return NULL; 440 }; 441 442 static int drm_syncobj_fd_to_handle(struct drm_file *file_private, 443 int fd, u32 *handle) 444 { 445 struct drm_syncobj *syncobj = drm_syncobj_fdget(fd); 446 int ret; 447 448 if (!syncobj) 449 return -EINVAL; 450 451 /* take a reference to put in the idr */ 452 drm_syncobj_get(syncobj); 453 454 idr_preload(GFP_KERNEL); 455 spin_lock(&file_private->syncobj_table_lock); 456 ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT); 457 spin_unlock(&file_private->syncobj_table_lock); 458 idr_preload_end(); 459 460 if (ret < 0) { 461 fput(syncobj->file); 462 return ret; 463 } 464 *handle = ret; 465 return 0; 466 } 467 468 static int drm_syncobj_import_sync_file_fence(struct drm_file *file_private, 469 int fd, int handle) 470 { 471 struct dma_fence *fence = sync_file_get_fence(fd); 472 struct drm_syncobj *syncobj; 473 474 if (!fence) 475 return -EINVAL; 476 477 syncobj = drm_syncobj_find(file_private, handle); 478 if (!syncobj) { 479 dma_fence_put(fence); 480 return -ENOENT; 481 } 482 483 drm_syncobj_replace_fence(syncobj, fence); 484 dma_fence_put(fence); 485 drm_syncobj_put(syncobj); 486 return 0; 487 } 488 489 static int drm_syncobj_export_sync_file(struct drm_file *file_private, 490 int handle, int *p_fd) 491 { 492 int ret; 493 struct dma_fence *fence; 494 struct sync_file *sync_file; 495 int fd = get_unused_fd_flags(O_CLOEXEC); 496 497 if (fd < 0) 498 return fd; 499 500 ret = drm_syncobj_find_fence(file_private, handle, &fence); 501 if (ret) 502 goto err_put_fd; 503 504 sync_file = sync_file_create(fence); 505 506 dma_fence_put(fence); 507 508 if (!sync_file) { 509 ret = -EINVAL; 510 goto err_put_fd; 511 } 512 513 fd_install(fd, sync_file->file); 514 515 *p_fd = fd; 516 return 0; 517 err_put_fd: 518 put_unused_fd(fd); 519 return ret; 520 } 521 /** 522 * drm_syncobj_open - initalizes syncobj file-private structures at devnode open time 523 * @file_private: drm file-private structure to set up 524 * 525 * Called at device open time, sets up the structure for handling refcounting 526 * of sync objects. 527 */ 528 void 529 drm_syncobj_open(struct drm_file *file_private) 530 { 531 idr_init(&file_private->syncobj_idr); 532 spin_lock_init(&file_private->syncobj_table_lock); 533 } 534 535 static int 536 drm_syncobj_release_handle(int id, void *ptr, void *data) 537 { 538 struct drm_syncobj *syncobj = ptr; 539 540 drm_syncobj_put(syncobj); 541 return 0; 542 } 543 544 /** 545 * drm_syncobj_release - release file-private sync object resources 546 * @file_private: drm file-private structure to clean up 547 * 548 * Called at close time when the filp is going away. 549 * 550 * Releases any remaining references on objects by this filp. 551 */ 552 void 553 drm_syncobj_release(struct drm_file *file_private) 554 { 555 idr_for_each(&file_private->syncobj_idr, 556 &drm_syncobj_release_handle, file_private); 557 idr_destroy(&file_private->syncobj_idr); 558 } 559 560 int 561 drm_syncobj_create_ioctl(struct drm_device *dev, void *data, 562 struct drm_file *file_private) 563 { 564 struct drm_syncobj_create *args = data; 565 566 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) 567 return -ENODEV; 568 569 /* no valid flags yet */ 570 if (args->flags & ~DRM_SYNCOBJ_CREATE_SIGNALED) 571 return -EINVAL; 572 573 return drm_syncobj_create_as_handle(file_private, 574 &args->handle, args->flags); 575 } 576 577 int 578 drm_syncobj_destroy_ioctl(struct drm_device *dev, void *data, 579 struct drm_file *file_private) 580 { 581 struct drm_syncobj_destroy *args = data; 582 583 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) 584 return -ENODEV; 585 586 /* make sure padding is empty */ 587 if (args->pad) 588 return -EINVAL; 589 return drm_syncobj_destroy(file_private, args->handle); 590 } 591 592 int 593 drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data, 594 struct drm_file *file_private) 595 { 596 struct drm_syncobj_handle *args = data; 597 598 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) 599 return -ENODEV; 600 601 if (args->pad) 602 return -EINVAL; 603 604 if (args->flags != 0 && 605 args->flags != DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE) 606 return -EINVAL; 607 608 if (args->flags & DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE) 609 return drm_syncobj_export_sync_file(file_private, args->handle, 610 &args->fd); 611 612 return drm_syncobj_handle_to_fd(file_private, args->handle, 613 &args->fd); 614 } 615 616 int 617 drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data, 618 struct drm_file *file_private) 619 { 620 struct drm_syncobj_handle *args = data; 621 622 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) 623 return -ENODEV; 624 625 if (args->pad) 626 return -EINVAL; 627 628 if (args->flags != 0 && 629 args->flags != DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE) 630 return -EINVAL; 631 632 if (args->flags & DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE) 633 return drm_syncobj_import_sync_file_fence(file_private, 634 args->fd, 635 args->handle); 636 637 return drm_syncobj_fd_to_handle(file_private, args->fd, 638 &args->handle); 639 } 640 641 struct syncobj_wait_entry { 642 struct task_struct *task; 643 struct dma_fence *fence; 644 struct dma_fence_cb fence_cb; 645 struct drm_syncobj_cb syncobj_cb; 646 }; 647 648 static void syncobj_wait_fence_func(struct dma_fence *fence, 649 struct dma_fence_cb *cb) 650 { 651 struct syncobj_wait_entry *wait = 652 container_of(cb, struct syncobj_wait_entry, fence_cb); 653 654 wake_up_process(wait->task); 655 } 656 657 static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj, 658 struct drm_syncobj_cb *cb) 659 { 660 struct syncobj_wait_entry *wait = 661 container_of(cb, struct syncobj_wait_entry, syncobj_cb); 662 663 /* This happens inside the syncobj lock */ 664 wait->fence = dma_fence_get(rcu_dereference_protected(syncobj->fence, 665 lockdep_is_held(&syncobj->lock))); 666 wake_up_process(wait->task); 667 } 668 669 static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs, 670 uint32_t count, 671 uint32_t flags, 672 signed long timeout, 673 uint32_t *idx) 674 { 675 struct syncobj_wait_entry *entries; 676 struct dma_fence *fence; 677 signed long ret; 678 uint32_t signaled_count, i; 679 680 entries = kcalloc(count, sizeof(*entries), GFP_KERNEL); 681 if (!entries) 682 return -ENOMEM; 683 684 /* Walk the list of sync objects and initialize entries. We do 685 * this up-front so that we can properly return -EINVAL if there is 686 * a syncobj with a missing fence and then never have the chance of 687 * returning -EINVAL again. 688 */ 689 signaled_count = 0; 690 for (i = 0; i < count; ++i) { 691 entries[i].task = current; 692 entries[i].fence = drm_syncobj_fence_get(syncobjs[i]); 693 if (!entries[i].fence) { 694 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) { 695 continue; 696 } else { 697 ret = -EINVAL; 698 goto cleanup_entries; 699 } 700 } 701 702 if (dma_fence_is_signaled(entries[i].fence)) { 703 if (signaled_count == 0 && idx) 704 *idx = i; 705 signaled_count++; 706 } 707 } 708 709 /* Initialize ret to the max of timeout and 1. That way, the 710 * default return value indicates a successful wait and not a 711 * timeout. 712 */ 713 ret = max_t(signed long, timeout, 1); 714 715 if (signaled_count == count || 716 (signaled_count > 0 && 717 !(flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL))) 718 goto cleanup_entries; 719 720 /* There's a very annoying laxness in the dma_fence API here, in 721 * that backends are not required to automatically report when a 722 * fence is signaled prior to fence->ops->enable_signaling() being 723 * called. So here if we fail to match signaled_count, we need to 724 * fallthough and try a 0 timeout wait! 725 */ 726 727 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) { 728 for (i = 0; i < count; ++i) { 729 drm_syncobj_fence_get_or_add_callback(syncobjs[i], 730 &entries[i].fence, 731 &entries[i].syncobj_cb, 732 syncobj_wait_syncobj_func); 733 } 734 } 735 736 do { 737 set_current_state(TASK_INTERRUPTIBLE); 738 739 signaled_count = 0; 740 for (i = 0; i < count; ++i) { 741 fence = entries[i].fence; 742 if (!fence) 743 continue; 744 745 if (dma_fence_is_signaled(fence) || 746 (!entries[i].fence_cb.func && 747 dma_fence_add_callback(fence, 748 &entries[i].fence_cb, 749 syncobj_wait_fence_func))) { 750 /* The fence has been signaled */ 751 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL) { 752 signaled_count++; 753 } else { 754 if (idx) 755 *idx = i; 756 goto done_waiting; 757 } 758 } 759 } 760 761 if (signaled_count == count) 762 goto done_waiting; 763 764 if (timeout == 0) { 765 /* If we are doing a 0 timeout wait and we got 766 * here, then we just timed out. 767 */ 768 ret = 0; 769 goto done_waiting; 770 } 771 772 ret = schedule_timeout(ret); 773 774 if (ret > 0 && signal_pending(current)) 775 ret = -ERESTARTSYS; 776 } while (ret > 0); 777 778 done_waiting: 779 __set_current_state(TASK_RUNNING); 780 781 cleanup_entries: 782 for (i = 0; i < count; ++i) { 783 if (entries[i].syncobj_cb.func) 784 drm_syncobj_remove_callback(syncobjs[i], 785 &entries[i].syncobj_cb); 786 if (entries[i].fence_cb.func) 787 dma_fence_remove_callback(entries[i].fence, 788 &entries[i].fence_cb); 789 dma_fence_put(entries[i].fence); 790 } 791 kfree(entries); 792 793 return ret; 794 } 795 796 /** 797 * drm_timeout_abs_to_jiffies - calculate jiffies timeout from absolute value 798 * 799 * @timeout_nsec: timeout nsec component in ns, 0 for poll 800 * 801 * Calculate the timeout in jiffies from an absolute time in sec/nsec. 802 */ 803 static signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec) 804 { 805 ktime_t abs_timeout, now; 806 u64 timeout_ns, timeout_jiffies64; 807 808 /* make 0 timeout means poll - absolute 0 doesn't seem valid */ 809 if (timeout_nsec == 0) 810 return 0; 811 812 abs_timeout = ns_to_ktime(timeout_nsec); 813 now = ktime_get(); 814 815 if (!ktime_after(abs_timeout, now)) 816 return 0; 817 818 timeout_ns = ktime_to_ns(ktime_sub(abs_timeout, now)); 819 820 timeout_jiffies64 = nsecs_to_jiffies64(timeout_ns); 821 /* clamp timeout to avoid infinite timeout */ 822 if (timeout_jiffies64 >= MAX_SCHEDULE_TIMEOUT - 1) 823 return MAX_SCHEDULE_TIMEOUT - 1; 824 825 return timeout_jiffies64 + 1; 826 } 827 828 static int drm_syncobj_array_wait(struct drm_device *dev, 829 struct drm_file *file_private, 830 struct drm_syncobj_wait *wait, 831 struct drm_syncobj **syncobjs) 832 { 833 signed long timeout = drm_timeout_abs_to_jiffies(wait->timeout_nsec); 834 signed long ret = 0; 835 uint32_t first = ~0; 836 837 ret = drm_syncobj_array_wait_timeout(syncobjs, 838 wait->count_handles, 839 wait->flags, 840 timeout, &first); 841 if (ret < 0) 842 return ret; 843 844 wait->first_signaled = first; 845 if (ret == 0) 846 return -ETIME; 847 return 0; 848 } 849 850 static int drm_syncobj_array_find(struct drm_file *file_private, 851 void __user *user_handles, 852 uint32_t count_handles, 853 struct drm_syncobj ***syncobjs_out) 854 { 855 uint32_t i, *handles; 856 struct drm_syncobj **syncobjs; 857 int ret; 858 859 handles = kmalloc_array(count_handles, sizeof(*handles), GFP_KERNEL); 860 if (handles == NULL) 861 return -ENOMEM; 862 863 if (copy_from_user(handles, user_handles, 864 sizeof(uint32_t) * count_handles)) { 865 ret = -EFAULT; 866 goto err_free_handles; 867 } 868 869 syncobjs = kmalloc_array(count_handles, sizeof(*syncobjs), GFP_KERNEL); 870 if (syncobjs == NULL) { 871 ret = -ENOMEM; 872 goto err_free_handles; 873 } 874 875 for (i = 0; i < count_handles; i++) { 876 syncobjs[i] = drm_syncobj_find(file_private, handles[i]); 877 if (!syncobjs[i]) { 878 ret = -ENOENT; 879 goto err_put_syncobjs; 880 } 881 } 882 883 kfree(handles); 884 *syncobjs_out = syncobjs; 885 return 0; 886 887 err_put_syncobjs: 888 while (i-- > 0) 889 drm_syncobj_put(syncobjs[i]); 890 kfree(syncobjs); 891 err_free_handles: 892 kfree(handles); 893 894 return ret; 895 } 896 897 static void drm_syncobj_array_free(struct drm_syncobj **syncobjs, 898 uint32_t count) 899 { 900 uint32_t i; 901 for (i = 0; i < count; i++) 902 drm_syncobj_put(syncobjs[i]); 903 kfree(syncobjs); 904 } 905 906 int 907 drm_syncobj_wait_ioctl(struct drm_device *dev, void *data, 908 struct drm_file *file_private) 909 { 910 struct drm_syncobj_wait *args = data; 911 struct drm_syncobj **syncobjs; 912 int ret = 0; 913 914 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) 915 return -ENODEV; 916 917 if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL | 918 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT)) 919 return -EINVAL; 920 921 if (args->count_handles == 0) 922 return -EINVAL; 923 924 ret = drm_syncobj_array_find(file_private, 925 u64_to_user_ptr(args->handles), 926 args->count_handles, 927 &syncobjs); 928 if (ret < 0) 929 return ret; 930 931 ret = drm_syncobj_array_wait(dev, file_private, 932 args, syncobjs); 933 934 drm_syncobj_array_free(syncobjs, args->count_handles); 935 936 return ret; 937 } 938 939 int 940 drm_syncobj_reset_ioctl(struct drm_device *dev, void *data, 941 struct drm_file *file_private) 942 { 943 struct drm_syncobj_array *args = data; 944 struct drm_syncobj **syncobjs; 945 uint32_t i; 946 int ret; 947 948 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) 949 return -ENODEV; 950 951 if (args->pad != 0) 952 return -EINVAL; 953 954 if (args->count_handles == 0) 955 return -EINVAL; 956 957 ret = drm_syncobj_array_find(file_private, 958 u64_to_user_ptr(args->handles), 959 args->count_handles, 960 &syncobjs); 961 if (ret < 0) 962 return ret; 963 964 for (i = 0; i < args->count_handles; i++) 965 drm_syncobj_replace_fence(syncobjs[i], NULL); 966 967 drm_syncobj_array_free(syncobjs, args->count_handles); 968 969 return 0; 970 } 971 972 int 973 drm_syncobj_signal_ioctl(struct drm_device *dev, void *data, 974 struct drm_file *file_private) 975 { 976 struct drm_syncobj_array *args = data; 977 struct drm_syncobj **syncobjs; 978 uint32_t i; 979 int ret; 980 981 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) 982 return -ENODEV; 983 984 if (args->pad != 0) 985 return -EINVAL; 986 987 if (args->count_handles == 0) 988 return -EINVAL; 989 990 ret = drm_syncobj_array_find(file_private, 991 u64_to_user_ptr(args->handles), 992 args->count_handles, 993 &syncobjs); 994 if (ret < 0) 995 return ret; 996 997 for (i = 0; i < args->count_handles; i++) { 998 ret = drm_syncobj_assign_null_handle(syncobjs[i]); 999 if (ret < 0) 1000 break; 1001 } 1002 1003 drm_syncobj_array_free(syncobjs, args->count_handles); 1004 1005 return ret; 1006 } 1007