1 /* 2 * Copyright 2017 Red Hat 3 * Parts ported from amdgpu (fence wait code). 4 * Copyright 2016 Advanced Micro Devices, Inc. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the next 14 * paragraph) shall be included in all copies or substantial portions of the 15 * Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 23 * IN THE SOFTWARE. 24 * 25 * Authors: 26 * 27 */ 28 29 /** 30 * DOC: Overview 31 * 32 * DRM synchronisation objects (syncobj) are a persistent objects, 33 * that contain an optional fence. The fence can be updated with a new 34 * fence, or be NULL. 35 * 36 * syncobj's can be waited upon, where it will wait for the underlying 37 * fence. 38 * 39 * syncobj's can be export to fd's and back, these fd's are opaque and 40 * have no other use case, except passing the syncobj between processes. 41 * 42 * Their primary use-case is to implement Vulkan fences and semaphores. 43 * 44 * syncobj have a kref reference count, but also have an optional file. 45 * The file is only created once the syncobj is exported. 46 * The file takes a reference on the kref. 47 */ 48 49 #include <drm/drmP.h> 50 #include <linux/file.h> 51 #include <linux/fs.h> 52 #include <linux/anon_inodes.h> 53 #include <linux/sync_file.h> 54 #include <linux/sched/signal.h> 55 56 #include "drm_internal.h" 57 #include <drm/drm_syncobj.h> 58 59 /** 60 * drm_syncobj_find - lookup and reference a sync object. 61 * @file_private: drm file private pointer 62 * @handle: sync object handle to lookup. 63 * 64 * Returns a reference to the syncobj pointed to by handle or NULL. 65 */ 66 struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private, 67 u32 handle) 68 { 69 struct drm_syncobj *syncobj; 70 71 spin_lock(&file_private->syncobj_table_lock); 72 73 /* Check if we currently have a reference on the object */ 74 syncobj = idr_find(&file_private->syncobj_idr, handle); 75 if (syncobj) 76 drm_syncobj_get(syncobj); 77 78 spin_unlock(&file_private->syncobj_table_lock); 79 80 return syncobj; 81 } 82 EXPORT_SYMBOL(drm_syncobj_find); 83 84 static void drm_syncobj_add_callback_locked(struct drm_syncobj *syncobj, 85 struct drm_syncobj_cb *cb, 86 drm_syncobj_func_t func) 87 { 88 cb->func = func; 89 list_add_tail(&cb->node, &syncobj->cb_list); 90 } 91 92 static int drm_syncobj_fence_get_or_add_callback(struct drm_syncobj *syncobj, 93 struct dma_fence **fence, 94 struct drm_syncobj_cb *cb, 95 drm_syncobj_func_t func) 96 { 97 int ret; 98 99 *fence = drm_syncobj_fence_get(syncobj); 100 if (*fence) 101 return 1; 102 103 spin_lock(&syncobj->lock); 104 /* We've already tried once to get a fence and failed. Now that we 105 * have the lock, try one more time just to be sure we don't add a 106 * callback when a fence has already been set. 107 */ 108 if (syncobj->fence) { 109 *fence = dma_fence_get(syncobj->fence); 110 ret = 1; 111 } else { 112 *fence = NULL; 113 drm_syncobj_add_callback_locked(syncobj, cb, func); 114 ret = 0; 115 } 116 spin_unlock(&syncobj->lock); 117 118 return ret; 119 } 120 121 /** 122 * drm_syncobj_add_callback - adds a callback to syncobj::cb_list 123 * @syncobj: Sync object to which to add the callback 124 * @cb: Callback to add 125 * @func: Func to use when initializing the drm_syncobj_cb struct 126 * 127 * This adds a callback to be called next time the fence is replaced 128 */ 129 void drm_syncobj_add_callback(struct drm_syncobj *syncobj, 130 struct drm_syncobj_cb *cb, 131 drm_syncobj_func_t func) 132 { 133 spin_lock(&syncobj->lock); 134 drm_syncobj_add_callback_locked(syncobj, cb, func); 135 spin_unlock(&syncobj->lock); 136 } 137 EXPORT_SYMBOL(drm_syncobj_add_callback); 138 139 /** 140 * drm_syncobj_add_callback - removes a callback to syncobj::cb_list 141 * @syncobj: Sync object from which to remove the callback 142 * @cb: Callback to remove 143 */ 144 void drm_syncobj_remove_callback(struct drm_syncobj *syncobj, 145 struct drm_syncobj_cb *cb) 146 { 147 spin_lock(&syncobj->lock); 148 list_del_init(&cb->node); 149 spin_unlock(&syncobj->lock); 150 } 151 EXPORT_SYMBOL(drm_syncobj_remove_callback); 152 153 /** 154 * drm_syncobj_replace_fence - replace fence in a sync object. 155 * @syncobj: Sync object to replace fence in 156 * @fence: fence to install in sync file. 157 * 158 * This replaces the fence on a sync object. 159 */ 160 void drm_syncobj_replace_fence(struct drm_syncobj *syncobj, 161 struct dma_fence *fence) 162 { 163 struct dma_fence *old_fence; 164 struct drm_syncobj_cb *cur, *tmp; 165 166 if (fence) 167 dma_fence_get(fence); 168 169 spin_lock(&syncobj->lock); 170 171 old_fence = syncobj->fence; 172 syncobj->fence = fence; 173 174 if (fence != old_fence) { 175 list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node) { 176 list_del_init(&cur->node); 177 cur->func(syncobj, cur); 178 } 179 } 180 181 spin_unlock(&syncobj->lock); 182 183 dma_fence_put(old_fence); 184 } 185 EXPORT_SYMBOL(drm_syncobj_replace_fence); 186 187 struct drm_syncobj_null_fence { 188 struct dma_fence base; 189 spinlock_t lock; 190 }; 191 192 static const char *drm_syncobj_null_fence_get_name(struct dma_fence *fence) 193 { 194 return "syncobjnull"; 195 } 196 197 static bool drm_syncobj_null_fence_enable_signaling(struct dma_fence *fence) 198 { 199 dma_fence_enable_sw_signaling(fence); 200 return !dma_fence_is_signaled(fence); 201 } 202 203 static const struct dma_fence_ops drm_syncobj_null_fence_ops = { 204 .get_driver_name = drm_syncobj_null_fence_get_name, 205 .get_timeline_name = drm_syncobj_null_fence_get_name, 206 .enable_signaling = drm_syncobj_null_fence_enable_signaling, 207 .wait = dma_fence_default_wait, 208 .release = NULL, 209 }; 210 211 static int drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj) 212 { 213 struct drm_syncobj_null_fence *fence; 214 fence = kzalloc(sizeof(*fence), GFP_KERNEL); 215 if (fence == NULL) 216 return -ENOMEM; 217 218 spin_lock_init(&fence->lock); 219 dma_fence_init(&fence->base, &drm_syncobj_null_fence_ops, 220 &fence->lock, 0, 0); 221 dma_fence_signal(&fence->base); 222 223 drm_syncobj_replace_fence(syncobj, &fence->base); 224 225 dma_fence_put(&fence->base); 226 227 return 0; 228 } 229 230 int drm_syncobj_find_fence(struct drm_file *file_private, 231 u32 handle, 232 struct dma_fence **fence) 233 { 234 struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle); 235 int ret = 0; 236 237 if (!syncobj) 238 return -ENOENT; 239 240 *fence = drm_syncobj_fence_get(syncobj); 241 if (!*fence) { 242 ret = -EINVAL; 243 } 244 drm_syncobj_put(syncobj); 245 return ret; 246 } 247 EXPORT_SYMBOL(drm_syncobj_find_fence); 248 249 /** 250 * drm_syncobj_free - free a sync object. 251 * @kref: kref to free. 252 * 253 * Only to be called from kref_put in drm_syncobj_put. 254 */ 255 void drm_syncobj_free(struct kref *kref) 256 { 257 struct drm_syncobj *syncobj = container_of(kref, 258 struct drm_syncobj, 259 refcount); 260 drm_syncobj_replace_fence(syncobj, NULL); 261 kfree(syncobj); 262 } 263 EXPORT_SYMBOL(drm_syncobj_free); 264 265 /** 266 * drm_syncobj_create - create a new syncobj 267 * @out_syncobj: returned syncobj 268 * @flags: DRM_SYNCOBJ_* flags 269 * @fence: if non-NULL, the syncobj will represent this fence 270 */ 271 int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags, 272 struct dma_fence *fence) 273 { 274 int ret; 275 struct drm_syncobj *syncobj; 276 277 syncobj = kzalloc(sizeof(struct drm_syncobj), GFP_KERNEL); 278 if (!syncobj) 279 return -ENOMEM; 280 281 kref_init(&syncobj->refcount); 282 INIT_LIST_HEAD(&syncobj->cb_list); 283 spin_lock_init(&syncobj->lock); 284 285 if (flags & DRM_SYNCOBJ_CREATE_SIGNALED) { 286 ret = drm_syncobj_assign_null_handle(syncobj); 287 if (ret < 0) { 288 drm_syncobj_put(syncobj); 289 return ret; 290 } 291 } 292 293 if (fence) 294 drm_syncobj_replace_fence(syncobj, fence); 295 296 *out_syncobj = syncobj; 297 return 0; 298 } 299 EXPORT_SYMBOL(drm_syncobj_create); 300 301 /** 302 * drm_syncobj_get_handle - get a handle from a syncobj 303 */ 304 int drm_syncobj_get_handle(struct drm_file *file_private, 305 struct drm_syncobj *syncobj, u32 *handle) 306 { 307 int ret; 308 309 /* take a reference to put in the idr */ 310 drm_syncobj_get(syncobj); 311 312 idr_preload(GFP_KERNEL); 313 spin_lock(&file_private->syncobj_table_lock); 314 ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT); 315 spin_unlock(&file_private->syncobj_table_lock); 316 317 idr_preload_end(); 318 319 if (ret < 0) { 320 drm_syncobj_put(syncobj); 321 return ret; 322 } 323 324 *handle = ret; 325 return 0; 326 } 327 EXPORT_SYMBOL(drm_syncobj_get_handle); 328 329 static int drm_syncobj_create_as_handle(struct drm_file *file_private, 330 u32 *handle, uint32_t flags) 331 { 332 int ret; 333 struct drm_syncobj *syncobj; 334 335 ret = drm_syncobj_create(&syncobj, flags, NULL); 336 if (ret) 337 return ret; 338 339 ret = drm_syncobj_get_handle(file_private, syncobj, handle); 340 drm_syncobj_put(syncobj); 341 return ret; 342 } 343 344 static int drm_syncobj_destroy(struct drm_file *file_private, 345 u32 handle) 346 { 347 struct drm_syncobj *syncobj; 348 349 spin_lock(&file_private->syncobj_table_lock); 350 syncobj = idr_remove(&file_private->syncobj_idr, handle); 351 spin_unlock(&file_private->syncobj_table_lock); 352 353 if (!syncobj) 354 return -EINVAL; 355 356 drm_syncobj_put(syncobj); 357 return 0; 358 } 359 360 static int drm_syncobj_file_release(struct inode *inode, struct file *file) 361 { 362 struct drm_syncobj *syncobj = file->private_data; 363 364 drm_syncobj_put(syncobj); 365 return 0; 366 } 367 368 static const struct file_operations drm_syncobj_file_fops = { 369 .release = drm_syncobj_file_release, 370 }; 371 372 static int drm_syncobj_alloc_file(struct drm_syncobj *syncobj) 373 { 374 struct file *file = anon_inode_getfile("syncobj_file", 375 &drm_syncobj_file_fops, 376 syncobj, 0); 377 if (IS_ERR(file)) 378 return PTR_ERR(file); 379 380 drm_syncobj_get(syncobj); 381 if (cmpxchg(&syncobj->file, NULL, file)) { 382 /* lost the race */ 383 fput(file); 384 } 385 386 return 0; 387 } 388 389 int drm_syncobj_get_fd(struct drm_syncobj *syncobj, int *p_fd) 390 { 391 int ret; 392 int fd; 393 394 fd = get_unused_fd_flags(O_CLOEXEC); 395 if (fd < 0) 396 return fd; 397 398 if (!syncobj->file) { 399 ret = drm_syncobj_alloc_file(syncobj); 400 if (ret) { 401 put_unused_fd(fd); 402 return ret; 403 } 404 } 405 fd_install(fd, syncobj->file); 406 *p_fd = fd; 407 return 0; 408 } 409 EXPORT_SYMBOL(drm_syncobj_get_fd); 410 411 static int drm_syncobj_handle_to_fd(struct drm_file *file_private, 412 u32 handle, int *p_fd) 413 { 414 struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle); 415 int ret; 416 417 if (!syncobj) 418 return -EINVAL; 419 420 ret = drm_syncobj_get_fd(syncobj, p_fd); 421 drm_syncobj_put(syncobj); 422 return ret; 423 } 424 425 static struct drm_syncobj *drm_syncobj_fdget(int fd) 426 { 427 struct file *file = fget(fd); 428 429 if (!file) 430 return NULL; 431 if (file->f_op != &drm_syncobj_file_fops) 432 goto err; 433 434 return file->private_data; 435 err: 436 fput(file); 437 return NULL; 438 }; 439 440 static int drm_syncobj_fd_to_handle(struct drm_file *file_private, 441 int fd, u32 *handle) 442 { 443 struct drm_syncobj *syncobj = drm_syncobj_fdget(fd); 444 int ret; 445 446 if (!syncobj) 447 return -EINVAL; 448 449 /* take a reference to put in the idr */ 450 drm_syncobj_get(syncobj); 451 452 idr_preload(GFP_KERNEL); 453 spin_lock(&file_private->syncobj_table_lock); 454 ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT); 455 spin_unlock(&file_private->syncobj_table_lock); 456 idr_preload_end(); 457 458 if (ret < 0) { 459 fput(syncobj->file); 460 return ret; 461 } 462 *handle = ret; 463 return 0; 464 } 465 466 static int drm_syncobj_import_sync_file_fence(struct drm_file *file_private, 467 int fd, int handle) 468 { 469 struct dma_fence *fence = sync_file_get_fence(fd); 470 struct drm_syncobj *syncobj; 471 472 if (!fence) 473 return -EINVAL; 474 475 syncobj = drm_syncobj_find(file_private, handle); 476 if (!syncobj) { 477 dma_fence_put(fence); 478 return -ENOENT; 479 } 480 481 drm_syncobj_replace_fence(syncobj, fence); 482 dma_fence_put(fence); 483 drm_syncobj_put(syncobj); 484 return 0; 485 } 486 487 static int drm_syncobj_export_sync_file(struct drm_file *file_private, 488 int handle, int *p_fd) 489 { 490 int ret; 491 struct dma_fence *fence; 492 struct sync_file *sync_file; 493 int fd = get_unused_fd_flags(O_CLOEXEC); 494 495 if (fd < 0) 496 return fd; 497 498 ret = drm_syncobj_find_fence(file_private, handle, &fence); 499 if (ret) 500 goto err_put_fd; 501 502 sync_file = sync_file_create(fence); 503 504 dma_fence_put(fence); 505 506 if (!sync_file) { 507 ret = -EINVAL; 508 goto err_put_fd; 509 } 510 511 fd_install(fd, sync_file->file); 512 513 *p_fd = fd; 514 return 0; 515 err_put_fd: 516 put_unused_fd(fd); 517 return ret; 518 } 519 /** 520 * drm_syncobj_open - initalizes syncobj file-private structures at devnode open time 521 * @file_private: drm file-private structure to set up 522 * 523 * Called at device open time, sets up the structure for handling refcounting 524 * of sync objects. 525 */ 526 void 527 drm_syncobj_open(struct drm_file *file_private) 528 { 529 idr_init(&file_private->syncobj_idr); 530 spin_lock_init(&file_private->syncobj_table_lock); 531 } 532 533 static int 534 drm_syncobj_release_handle(int id, void *ptr, void *data) 535 { 536 struct drm_syncobj *syncobj = ptr; 537 538 drm_syncobj_put(syncobj); 539 return 0; 540 } 541 542 /** 543 * drm_syncobj_release - release file-private sync object resources 544 * @file_private: drm file-private structure to clean up 545 * 546 * Called at close time when the filp is going away. 547 * 548 * Releases any remaining references on objects by this filp. 549 */ 550 void 551 drm_syncobj_release(struct drm_file *file_private) 552 { 553 idr_for_each(&file_private->syncobj_idr, 554 &drm_syncobj_release_handle, file_private); 555 idr_destroy(&file_private->syncobj_idr); 556 } 557 558 int 559 drm_syncobj_create_ioctl(struct drm_device *dev, void *data, 560 struct drm_file *file_private) 561 { 562 struct drm_syncobj_create *args = data; 563 564 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) 565 return -ENODEV; 566 567 /* no valid flags yet */ 568 if (args->flags & ~DRM_SYNCOBJ_CREATE_SIGNALED) 569 return -EINVAL; 570 571 return drm_syncobj_create_as_handle(file_private, 572 &args->handle, args->flags); 573 } 574 575 int 576 drm_syncobj_destroy_ioctl(struct drm_device *dev, void *data, 577 struct drm_file *file_private) 578 { 579 struct drm_syncobj_destroy *args = data; 580 581 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) 582 return -ENODEV; 583 584 /* make sure padding is empty */ 585 if (args->pad) 586 return -EINVAL; 587 return drm_syncobj_destroy(file_private, args->handle); 588 } 589 590 int 591 drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data, 592 struct drm_file *file_private) 593 { 594 struct drm_syncobj_handle *args = data; 595 596 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) 597 return -ENODEV; 598 599 if (args->pad) 600 return -EINVAL; 601 602 if (args->flags != 0 && 603 args->flags != DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE) 604 return -EINVAL; 605 606 if (args->flags & DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE) 607 return drm_syncobj_export_sync_file(file_private, args->handle, 608 &args->fd); 609 610 return drm_syncobj_handle_to_fd(file_private, args->handle, 611 &args->fd); 612 } 613 614 int 615 drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data, 616 struct drm_file *file_private) 617 { 618 struct drm_syncobj_handle *args = data; 619 620 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) 621 return -ENODEV; 622 623 if (args->pad) 624 return -EINVAL; 625 626 if (args->flags != 0 && 627 args->flags != DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE) 628 return -EINVAL; 629 630 if (args->flags & DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE) 631 return drm_syncobj_import_sync_file_fence(file_private, 632 args->fd, 633 args->handle); 634 635 return drm_syncobj_fd_to_handle(file_private, args->fd, 636 &args->handle); 637 } 638 639 struct syncobj_wait_entry { 640 struct task_struct *task; 641 struct dma_fence *fence; 642 struct dma_fence_cb fence_cb; 643 struct drm_syncobj_cb syncobj_cb; 644 }; 645 646 static void syncobj_wait_fence_func(struct dma_fence *fence, 647 struct dma_fence_cb *cb) 648 { 649 struct syncobj_wait_entry *wait = 650 container_of(cb, struct syncobj_wait_entry, fence_cb); 651 652 wake_up_process(wait->task); 653 } 654 655 static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj, 656 struct drm_syncobj_cb *cb) 657 { 658 struct syncobj_wait_entry *wait = 659 container_of(cb, struct syncobj_wait_entry, syncobj_cb); 660 661 /* This happens inside the syncobj lock */ 662 wait->fence = dma_fence_get(syncobj->fence); 663 wake_up_process(wait->task); 664 } 665 666 static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs, 667 uint32_t count, 668 uint32_t flags, 669 signed long timeout, 670 uint32_t *idx) 671 { 672 struct syncobj_wait_entry *entries; 673 struct dma_fence *fence; 674 signed long ret; 675 uint32_t signaled_count, i; 676 677 entries = kcalloc(count, sizeof(*entries), GFP_KERNEL); 678 if (!entries) 679 return -ENOMEM; 680 681 /* Walk the list of sync objects and initialize entries. We do 682 * this up-front so that we can properly return -EINVAL if there is 683 * a syncobj with a missing fence and then never have the chance of 684 * returning -EINVAL again. 685 */ 686 signaled_count = 0; 687 for (i = 0; i < count; ++i) { 688 entries[i].task = current; 689 entries[i].fence = drm_syncobj_fence_get(syncobjs[i]); 690 if (!entries[i].fence) { 691 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) { 692 continue; 693 } else { 694 ret = -EINVAL; 695 goto cleanup_entries; 696 } 697 } 698 699 if (dma_fence_is_signaled(entries[i].fence)) { 700 if (signaled_count == 0 && idx) 701 *idx = i; 702 signaled_count++; 703 } 704 } 705 706 /* Initialize ret to the max of timeout and 1. That way, the 707 * default return value indicates a successful wait and not a 708 * timeout. 709 */ 710 ret = max_t(signed long, timeout, 1); 711 712 if (signaled_count == count || 713 (signaled_count > 0 && 714 !(flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL))) 715 goto cleanup_entries; 716 717 /* There's a very annoying laxness in the dma_fence API here, in 718 * that backends are not required to automatically report when a 719 * fence is signaled prior to fence->ops->enable_signaling() being 720 * called. So here if we fail to match signaled_count, we need to 721 * fallthough and try a 0 timeout wait! 722 */ 723 724 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) { 725 for (i = 0; i < count; ++i) { 726 drm_syncobj_fence_get_or_add_callback(syncobjs[i], 727 &entries[i].fence, 728 &entries[i].syncobj_cb, 729 syncobj_wait_syncobj_func); 730 } 731 } 732 733 do { 734 set_current_state(TASK_INTERRUPTIBLE); 735 736 signaled_count = 0; 737 for (i = 0; i < count; ++i) { 738 fence = entries[i].fence; 739 if (!fence) 740 continue; 741 742 if (dma_fence_is_signaled(fence) || 743 (!entries[i].fence_cb.func && 744 dma_fence_add_callback(fence, 745 &entries[i].fence_cb, 746 syncobj_wait_fence_func))) { 747 /* The fence has been signaled */ 748 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL) { 749 signaled_count++; 750 } else { 751 if (idx) 752 *idx = i; 753 goto done_waiting; 754 } 755 } 756 } 757 758 if (signaled_count == count) 759 goto done_waiting; 760 761 if (timeout == 0) { 762 /* If we are doing a 0 timeout wait and we got 763 * here, then we just timed out. 764 */ 765 ret = 0; 766 goto done_waiting; 767 } 768 769 ret = schedule_timeout(ret); 770 771 if (ret > 0 && signal_pending(current)) 772 ret = -ERESTARTSYS; 773 } while (ret > 0); 774 775 done_waiting: 776 __set_current_state(TASK_RUNNING); 777 778 cleanup_entries: 779 for (i = 0; i < count; ++i) { 780 if (entries[i].syncobj_cb.func) 781 drm_syncobj_remove_callback(syncobjs[i], 782 &entries[i].syncobj_cb); 783 if (entries[i].fence_cb.func) 784 dma_fence_remove_callback(entries[i].fence, 785 &entries[i].fence_cb); 786 dma_fence_put(entries[i].fence); 787 } 788 kfree(entries); 789 790 return ret; 791 } 792 793 /** 794 * drm_timeout_abs_to_jiffies - calculate jiffies timeout from absolute value 795 * 796 * @timeout_nsec: timeout nsec component in ns, 0 for poll 797 * 798 * Calculate the timeout in jiffies from an absolute time in sec/nsec. 799 */ 800 static signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec) 801 { 802 ktime_t abs_timeout, now; 803 u64 timeout_ns, timeout_jiffies64; 804 805 /* make 0 timeout means poll - absolute 0 doesn't seem valid */ 806 if (timeout_nsec == 0) 807 return 0; 808 809 abs_timeout = ns_to_ktime(timeout_nsec); 810 now = ktime_get(); 811 812 if (!ktime_after(abs_timeout, now)) 813 return 0; 814 815 timeout_ns = ktime_to_ns(ktime_sub(abs_timeout, now)); 816 817 timeout_jiffies64 = nsecs_to_jiffies64(timeout_ns); 818 /* clamp timeout to avoid infinite timeout */ 819 if (timeout_jiffies64 >= MAX_SCHEDULE_TIMEOUT - 1) 820 return MAX_SCHEDULE_TIMEOUT - 1; 821 822 return timeout_jiffies64 + 1; 823 } 824 825 static int drm_syncobj_array_wait(struct drm_device *dev, 826 struct drm_file *file_private, 827 struct drm_syncobj_wait *wait, 828 struct drm_syncobj **syncobjs) 829 { 830 signed long timeout = drm_timeout_abs_to_jiffies(wait->timeout_nsec); 831 signed long ret = 0; 832 uint32_t first = ~0; 833 834 ret = drm_syncobj_array_wait_timeout(syncobjs, 835 wait->count_handles, 836 wait->flags, 837 timeout, &first); 838 if (ret < 0) 839 return ret; 840 841 wait->first_signaled = first; 842 if (ret == 0) 843 return -ETIME; 844 return 0; 845 } 846 847 static int drm_syncobj_array_find(struct drm_file *file_private, 848 void __user *user_handles, 849 uint32_t count_handles, 850 struct drm_syncobj ***syncobjs_out) 851 { 852 uint32_t i, *handles; 853 struct drm_syncobj **syncobjs; 854 int ret; 855 856 handles = kmalloc_array(count_handles, sizeof(*handles), GFP_KERNEL); 857 if (handles == NULL) 858 return -ENOMEM; 859 860 if (copy_from_user(handles, user_handles, 861 sizeof(uint32_t) * count_handles)) { 862 ret = -EFAULT; 863 goto err_free_handles; 864 } 865 866 syncobjs = kmalloc_array(count_handles, sizeof(*syncobjs), GFP_KERNEL); 867 if (syncobjs == NULL) { 868 ret = -ENOMEM; 869 goto err_free_handles; 870 } 871 872 for (i = 0; i < count_handles; i++) { 873 syncobjs[i] = drm_syncobj_find(file_private, handles[i]); 874 if (!syncobjs[i]) { 875 ret = -ENOENT; 876 goto err_put_syncobjs; 877 } 878 } 879 880 kfree(handles); 881 *syncobjs_out = syncobjs; 882 return 0; 883 884 err_put_syncobjs: 885 while (i-- > 0) 886 drm_syncobj_put(syncobjs[i]); 887 kfree(syncobjs); 888 err_free_handles: 889 kfree(handles); 890 891 return ret; 892 } 893 894 static void drm_syncobj_array_free(struct drm_syncobj **syncobjs, 895 uint32_t count) 896 { 897 uint32_t i; 898 for (i = 0; i < count; i++) 899 drm_syncobj_put(syncobjs[i]); 900 kfree(syncobjs); 901 } 902 903 int 904 drm_syncobj_wait_ioctl(struct drm_device *dev, void *data, 905 struct drm_file *file_private) 906 { 907 struct drm_syncobj_wait *args = data; 908 struct drm_syncobj **syncobjs; 909 int ret = 0; 910 911 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) 912 return -ENODEV; 913 914 if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL | 915 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT)) 916 return -EINVAL; 917 918 if (args->count_handles == 0) 919 return -EINVAL; 920 921 ret = drm_syncobj_array_find(file_private, 922 u64_to_user_ptr(args->handles), 923 args->count_handles, 924 &syncobjs); 925 if (ret < 0) 926 return ret; 927 928 ret = drm_syncobj_array_wait(dev, file_private, 929 args, syncobjs); 930 931 drm_syncobj_array_free(syncobjs, args->count_handles); 932 933 return ret; 934 } 935 936 int 937 drm_syncobj_reset_ioctl(struct drm_device *dev, void *data, 938 struct drm_file *file_private) 939 { 940 struct drm_syncobj_array *args = data; 941 struct drm_syncobj **syncobjs; 942 uint32_t i; 943 int ret; 944 945 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) 946 return -ENODEV; 947 948 if (args->pad != 0) 949 return -EINVAL; 950 951 if (args->count_handles == 0) 952 return -EINVAL; 953 954 ret = drm_syncobj_array_find(file_private, 955 u64_to_user_ptr(args->handles), 956 args->count_handles, 957 &syncobjs); 958 if (ret < 0) 959 return ret; 960 961 for (i = 0; i < args->count_handles; i++) 962 drm_syncobj_replace_fence(syncobjs[i], NULL); 963 964 drm_syncobj_array_free(syncobjs, args->count_handles); 965 966 return 0; 967 } 968 969 int 970 drm_syncobj_signal_ioctl(struct drm_device *dev, void *data, 971 struct drm_file *file_private) 972 { 973 struct drm_syncobj_array *args = data; 974 struct drm_syncobj **syncobjs; 975 uint32_t i; 976 int ret; 977 978 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) 979 return -ENODEV; 980 981 if (args->pad != 0) 982 return -EINVAL; 983 984 if (args->count_handles == 0) 985 return -EINVAL; 986 987 ret = drm_syncobj_array_find(file_private, 988 u64_to_user_ptr(args->handles), 989 args->count_handles, 990 &syncobjs); 991 if (ret < 0) 992 return ret; 993 994 for (i = 0; i < args->count_handles; i++) { 995 ret = drm_syncobj_assign_null_handle(syncobjs[i]); 996 if (ret < 0) 997 break; 998 } 999 1000 drm_syncobj_array_free(syncobjs, args->count_handles); 1001 1002 return ret; 1003 } 1004