1 /* 2 * Copyright 2017 Red Hat 3 * Parts ported from amdgpu (fence wait code). 4 * Copyright 2016 Advanced Micro Devices, Inc. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the next 14 * paragraph) shall be included in all copies or substantial portions of the 15 * Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 23 * IN THE SOFTWARE. 24 * 25 * Authors: 26 * 27 */ 28 29 /** 30 * DOC: Overview 31 * 32 * DRM synchronisation objects (syncobj) are a persistent objects, 33 * that contain an optional fence. The fence can be updated with a new 34 * fence, or be NULL. 35 * 36 * syncobj's can be waited upon, where it will wait for the underlying 37 * fence. 38 * 39 * syncobj's can be export to fd's and back, these fd's are opaque and 40 * have no other use case, except passing the syncobj between processes. 41 * 42 * Their primary use-case is to implement Vulkan fences and semaphores. 43 * 44 * syncobj have a kref reference count, but also have an optional file. 45 * The file is only created once the syncobj is exported. 46 * The file takes a reference on the kref. 47 */ 48 49 #include <drm/drmP.h> 50 #include <linux/file.h> 51 #include <linux/fs.h> 52 #include <linux/anon_inodes.h> 53 #include <linux/sync_file.h> 54 #include <linux/sched/signal.h> 55 56 #include "drm_internal.h" 57 #include <drm/drm_syncobj.h> 58 59 /** 60 * drm_syncobj_find - lookup and reference a sync object. 61 * @file_private: drm file private pointer 62 * @handle: sync object handle to lookup. 63 * 64 * Returns a reference to the syncobj pointed to by handle or NULL. 65 */ 66 struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private, 67 u32 handle) 68 { 69 struct drm_syncobj *syncobj; 70 71 spin_lock(&file_private->syncobj_table_lock); 72 73 /* Check if we currently have a reference on the object */ 74 syncobj = idr_find(&file_private->syncobj_idr, handle); 75 if (syncobj) 76 drm_syncobj_get(syncobj); 77 78 spin_unlock(&file_private->syncobj_table_lock); 79 80 return syncobj; 81 } 82 EXPORT_SYMBOL(drm_syncobj_find); 83 84 static void drm_syncobj_add_callback_locked(struct drm_syncobj *syncobj, 85 struct drm_syncobj_cb *cb, 86 drm_syncobj_func_t func) 87 { 88 cb->func = func; 89 list_add_tail(&cb->node, &syncobj->cb_list); 90 } 91 92 static int drm_syncobj_fence_get_or_add_callback(struct drm_syncobj *syncobj, 93 struct dma_fence **fence, 94 struct drm_syncobj_cb *cb, 95 drm_syncobj_func_t func) 96 { 97 int ret; 98 99 *fence = drm_syncobj_fence_get(syncobj); 100 if (*fence) 101 return 1; 102 103 spin_lock(&syncobj->lock); 104 /* We've already tried once to get a fence and failed. Now that we 105 * have the lock, try one more time just to be sure we don't add a 106 * callback when a fence has already been set. 107 */ 108 if (syncobj->fence) { 109 *fence = dma_fence_get(syncobj->fence); 110 ret = 1; 111 } else { 112 *fence = NULL; 113 drm_syncobj_add_callback_locked(syncobj, cb, func); 114 ret = 0; 115 } 116 spin_unlock(&syncobj->lock); 117 118 return ret; 119 } 120 121 /** 122 * drm_syncobj_add_callback - adds a callback to syncobj::cb_list 123 * @syncobj: Sync object to which to add the callback 124 * @cb: Callback to add 125 * @func: Func to use when initializing the drm_syncobj_cb struct 126 * 127 * This adds a callback to be called next time the fence is replaced 128 */ 129 void drm_syncobj_add_callback(struct drm_syncobj *syncobj, 130 struct drm_syncobj_cb *cb, 131 drm_syncobj_func_t func) 132 { 133 spin_lock(&syncobj->lock); 134 drm_syncobj_add_callback_locked(syncobj, cb, func); 135 spin_unlock(&syncobj->lock); 136 } 137 EXPORT_SYMBOL(drm_syncobj_add_callback); 138 139 /** 140 * drm_syncobj_add_callback - removes a callback to syncobj::cb_list 141 * @syncobj: Sync object from which to remove the callback 142 * @cb: Callback to remove 143 */ 144 void drm_syncobj_remove_callback(struct drm_syncobj *syncobj, 145 struct drm_syncobj_cb *cb) 146 { 147 spin_lock(&syncobj->lock); 148 list_del_init(&cb->node); 149 spin_unlock(&syncobj->lock); 150 } 151 EXPORT_SYMBOL(drm_syncobj_remove_callback); 152 153 /** 154 * drm_syncobj_replace_fence - replace fence in a sync object. 155 * @syncobj: Sync object to replace fence in 156 * @fence: fence to install in sync file. 157 * 158 * This replaces the fence on a sync object. 159 */ 160 void drm_syncobj_replace_fence(struct drm_syncobj *syncobj, 161 struct dma_fence *fence) 162 { 163 struct dma_fence *old_fence; 164 struct drm_syncobj_cb *cur, *tmp; 165 166 if (fence) 167 dma_fence_get(fence); 168 169 spin_lock(&syncobj->lock); 170 171 old_fence = syncobj->fence; 172 syncobj->fence = fence; 173 174 if (fence != old_fence) { 175 list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node) { 176 list_del_init(&cur->node); 177 cur->func(syncobj, cur); 178 } 179 } 180 181 spin_unlock(&syncobj->lock); 182 183 dma_fence_put(old_fence); 184 } 185 EXPORT_SYMBOL(drm_syncobj_replace_fence); 186 187 struct drm_syncobj_null_fence { 188 struct dma_fence base; 189 spinlock_t lock; 190 }; 191 192 static const char *drm_syncobj_null_fence_get_name(struct dma_fence *fence) 193 { 194 return "syncobjnull"; 195 } 196 197 static bool drm_syncobj_null_fence_enable_signaling(struct dma_fence *fence) 198 { 199 dma_fence_enable_sw_signaling(fence); 200 return !dma_fence_is_signaled(fence); 201 } 202 203 static const struct dma_fence_ops drm_syncobj_null_fence_ops = { 204 .get_driver_name = drm_syncobj_null_fence_get_name, 205 .get_timeline_name = drm_syncobj_null_fence_get_name, 206 .enable_signaling = drm_syncobj_null_fence_enable_signaling, 207 .wait = dma_fence_default_wait, 208 .release = NULL, 209 }; 210 211 static int drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj) 212 { 213 struct drm_syncobj_null_fence *fence; 214 fence = kzalloc(sizeof(*fence), GFP_KERNEL); 215 if (fence == NULL) 216 return -ENOMEM; 217 218 spin_lock_init(&fence->lock); 219 dma_fence_init(&fence->base, &drm_syncobj_null_fence_ops, 220 &fence->lock, 0, 0); 221 dma_fence_signal(&fence->base); 222 223 drm_syncobj_replace_fence(syncobj, &fence->base); 224 225 dma_fence_put(&fence->base); 226 227 return 0; 228 } 229 230 int drm_syncobj_find_fence(struct drm_file *file_private, 231 u32 handle, 232 struct dma_fence **fence) 233 { 234 struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle); 235 int ret = 0; 236 237 if (!syncobj) 238 return -ENOENT; 239 240 *fence = drm_syncobj_fence_get(syncobj); 241 if (!*fence) { 242 ret = -EINVAL; 243 } 244 drm_syncobj_put(syncobj); 245 return ret; 246 } 247 EXPORT_SYMBOL(drm_syncobj_find_fence); 248 249 /** 250 * drm_syncobj_free - free a sync object. 251 * @kref: kref to free. 252 * 253 * Only to be called from kref_put in drm_syncobj_put. 254 */ 255 void drm_syncobj_free(struct kref *kref) 256 { 257 struct drm_syncobj *syncobj = container_of(kref, 258 struct drm_syncobj, 259 refcount); 260 drm_syncobj_replace_fence(syncobj, NULL); 261 kfree(syncobj); 262 } 263 EXPORT_SYMBOL(drm_syncobj_free); 264 265 /** 266 * drm_syncobj_create - create a new syncobj 267 * @out_syncobj: returned syncobj 268 * @flags: DRM_SYNCOBJ_* flags 269 * @fence: if non-NULL, the syncobj will represent this fence 270 */ 271 int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags, 272 struct dma_fence *fence) 273 { 274 int ret; 275 struct drm_syncobj *syncobj; 276 277 syncobj = kzalloc(sizeof(struct drm_syncobj), GFP_KERNEL); 278 if (!syncobj) 279 return -ENOMEM; 280 281 kref_init(&syncobj->refcount); 282 INIT_LIST_HEAD(&syncobj->cb_list); 283 spin_lock_init(&syncobj->lock); 284 285 if (flags & DRM_SYNCOBJ_CREATE_SIGNALED) { 286 ret = drm_syncobj_assign_null_handle(syncobj); 287 if (ret < 0) { 288 drm_syncobj_put(syncobj); 289 return ret; 290 } 291 } 292 293 if (fence) 294 drm_syncobj_replace_fence(syncobj, fence); 295 296 *out_syncobj = syncobj; 297 return 0; 298 } 299 EXPORT_SYMBOL(drm_syncobj_create); 300 301 /** 302 * drm_syncobj_get_handle - get a handle from a syncobj 303 */ 304 int drm_syncobj_get_handle(struct drm_file *file_private, 305 struct drm_syncobj *syncobj, u32 *handle) 306 { 307 int ret; 308 309 /* take a reference to put in the idr */ 310 drm_syncobj_get(syncobj); 311 312 idr_preload(GFP_KERNEL); 313 spin_lock(&file_private->syncobj_table_lock); 314 ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT); 315 spin_unlock(&file_private->syncobj_table_lock); 316 317 idr_preload_end(); 318 319 if (ret < 0) { 320 drm_syncobj_put(syncobj); 321 return ret; 322 } 323 324 *handle = ret; 325 return 0; 326 } 327 EXPORT_SYMBOL(drm_syncobj_get_handle); 328 329 static int drm_syncobj_create_as_handle(struct drm_file *file_private, 330 u32 *handle, uint32_t flags) 331 { 332 int ret; 333 struct drm_syncobj *syncobj; 334 335 ret = drm_syncobj_create(&syncobj, flags, NULL); 336 if (ret) 337 return ret; 338 339 ret = drm_syncobj_get_handle(file_private, syncobj, handle); 340 drm_syncobj_put(syncobj); 341 return ret; 342 } 343 344 static int drm_syncobj_destroy(struct drm_file *file_private, 345 u32 handle) 346 { 347 struct drm_syncobj *syncobj; 348 349 spin_lock(&file_private->syncobj_table_lock); 350 syncobj = idr_remove(&file_private->syncobj_idr, handle); 351 spin_unlock(&file_private->syncobj_table_lock); 352 353 if (!syncobj) 354 return -EINVAL; 355 356 drm_syncobj_put(syncobj); 357 return 0; 358 } 359 360 static int drm_syncobj_file_release(struct inode *inode, struct file *file) 361 { 362 struct drm_syncobj *syncobj = file->private_data; 363 364 drm_syncobj_put(syncobj); 365 return 0; 366 } 367 368 static const struct file_operations drm_syncobj_file_fops = { 369 .release = drm_syncobj_file_release, 370 }; 371 372 int drm_syncobj_get_fd(struct drm_syncobj *syncobj, int *p_fd) 373 { 374 struct file *file; 375 int fd; 376 377 fd = get_unused_fd_flags(O_CLOEXEC); 378 if (fd < 0) 379 return fd; 380 381 file = anon_inode_getfile("syncobj_file", 382 &drm_syncobj_file_fops, 383 syncobj, 0); 384 if (IS_ERR(file)) { 385 put_unused_fd(fd); 386 return PTR_ERR(file); 387 } 388 389 drm_syncobj_get(syncobj); 390 fd_install(fd, file); 391 392 *p_fd = fd; 393 return 0; 394 } 395 EXPORT_SYMBOL(drm_syncobj_get_fd); 396 397 static int drm_syncobj_handle_to_fd(struct drm_file *file_private, 398 u32 handle, int *p_fd) 399 { 400 struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle); 401 int ret; 402 403 if (!syncobj) 404 return -EINVAL; 405 406 ret = drm_syncobj_get_fd(syncobj, p_fd); 407 drm_syncobj_put(syncobj); 408 return ret; 409 } 410 411 static int drm_syncobj_fd_to_handle(struct drm_file *file_private, 412 int fd, u32 *handle) 413 { 414 struct drm_syncobj *syncobj; 415 struct file *file; 416 int ret; 417 418 file = fget(fd); 419 if (!file) 420 return -EINVAL; 421 422 if (file->f_op != &drm_syncobj_file_fops) { 423 fput(file); 424 return -EINVAL; 425 } 426 427 /* take a reference to put in the idr */ 428 syncobj = file->private_data; 429 drm_syncobj_get(syncobj); 430 431 idr_preload(GFP_KERNEL); 432 spin_lock(&file_private->syncobj_table_lock); 433 ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT); 434 spin_unlock(&file_private->syncobj_table_lock); 435 idr_preload_end(); 436 437 if (ret > 0) { 438 *handle = ret; 439 ret = 0; 440 } else 441 drm_syncobj_put(syncobj); 442 443 fput(file); 444 return ret; 445 } 446 447 static int drm_syncobj_import_sync_file_fence(struct drm_file *file_private, 448 int fd, int handle) 449 { 450 struct dma_fence *fence = sync_file_get_fence(fd); 451 struct drm_syncobj *syncobj; 452 453 if (!fence) 454 return -EINVAL; 455 456 syncobj = drm_syncobj_find(file_private, handle); 457 if (!syncobj) { 458 dma_fence_put(fence); 459 return -ENOENT; 460 } 461 462 drm_syncobj_replace_fence(syncobj, fence); 463 dma_fence_put(fence); 464 drm_syncobj_put(syncobj); 465 return 0; 466 } 467 468 static int drm_syncobj_export_sync_file(struct drm_file *file_private, 469 int handle, int *p_fd) 470 { 471 int ret; 472 struct dma_fence *fence; 473 struct sync_file *sync_file; 474 int fd = get_unused_fd_flags(O_CLOEXEC); 475 476 if (fd < 0) 477 return fd; 478 479 ret = drm_syncobj_find_fence(file_private, handle, &fence); 480 if (ret) 481 goto err_put_fd; 482 483 sync_file = sync_file_create(fence); 484 485 dma_fence_put(fence); 486 487 if (!sync_file) { 488 ret = -EINVAL; 489 goto err_put_fd; 490 } 491 492 fd_install(fd, sync_file->file); 493 494 *p_fd = fd; 495 return 0; 496 err_put_fd: 497 put_unused_fd(fd); 498 return ret; 499 } 500 /** 501 * drm_syncobj_open - initalizes syncobj file-private structures at devnode open time 502 * @file_private: drm file-private structure to set up 503 * 504 * Called at device open time, sets up the structure for handling refcounting 505 * of sync objects. 506 */ 507 void 508 drm_syncobj_open(struct drm_file *file_private) 509 { 510 idr_init(&file_private->syncobj_idr); 511 spin_lock_init(&file_private->syncobj_table_lock); 512 } 513 514 static int 515 drm_syncobj_release_handle(int id, void *ptr, void *data) 516 { 517 struct drm_syncobj *syncobj = ptr; 518 519 drm_syncobj_put(syncobj); 520 return 0; 521 } 522 523 /** 524 * drm_syncobj_release - release file-private sync object resources 525 * @file_private: drm file-private structure to clean up 526 * 527 * Called at close time when the filp is going away. 528 * 529 * Releases any remaining references on objects by this filp. 530 */ 531 void 532 drm_syncobj_release(struct drm_file *file_private) 533 { 534 idr_for_each(&file_private->syncobj_idr, 535 &drm_syncobj_release_handle, file_private); 536 idr_destroy(&file_private->syncobj_idr); 537 } 538 539 int 540 drm_syncobj_create_ioctl(struct drm_device *dev, void *data, 541 struct drm_file *file_private) 542 { 543 struct drm_syncobj_create *args = data; 544 545 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) 546 return -ENODEV; 547 548 /* no valid flags yet */ 549 if (args->flags & ~DRM_SYNCOBJ_CREATE_SIGNALED) 550 return -EINVAL; 551 552 return drm_syncobj_create_as_handle(file_private, 553 &args->handle, args->flags); 554 } 555 556 int 557 drm_syncobj_destroy_ioctl(struct drm_device *dev, void *data, 558 struct drm_file *file_private) 559 { 560 struct drm_syncobj_destroy *args = data; 561 562 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) 563 return -ENODEV; 564 565 /* make sure padding is empty */ 566 if (args->pad) 567 return -EINVAL; 568 return drm_syncobj_destroy(file_private, args->handle); 569 } 570 571 int 572 drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data, 573 struct drm_file *file_private) 574 { 575 struct drm_syncobj_handle *args = data; 576 577 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) 578 return -ENODEV; 579 580 if (args->pad) 581 return -EINVAL; 582 583 if (args->flags != 0 && 584 args->flags != DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE) 585 return -EINVAL; 586 587 if (args->flags & DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE) 588 return drm_syncobj_export_sync_file(file_private, args->handle, 589 &args->fd); 590 591 return drm_syncobj_handle_to_fd(file_private, args->handle, 592 &args->fd); 593 } 594 595 int 596 drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data, 597 struct drm_file *file_private) 598 { 599 struct drm_syncobj_handle *args = data; 600 601 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) 602 return -ENODEV; 603 604 if (args->pad) 605 return -EINVAL; 606 607 if (args->flags != 0 && 608 args->flags != DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE) 609 return -EINVAL; 610 611 if (args->flags & DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE) 612 return drm_syncobj_import_sync_file_fence(file_private, 613 args->fd, 614 args->handle); 615 616 return drm_syncobj_fd_to_handle(file_private, args->fd, 617 &args->handle); 618 } 619 620 struct syncobj_wait_entry { 621 struct task_struct *task; 622 struct dma_fence *fence; 623 struct dma_fence_cb fence_cb; 624 struct drm_syncobj_cb syncobj_cb; 625 }; 626 627 static void syncobj_wait_fence_func(struct dma_fence *fence, 628 struct dma_fence_cb *cb) 629 { 630 struct syncobj_wait_entry *wait = 631 container_of(cb, struct syncobj_wait_entry, fence_cb); 632 633 wake_up_process(wait->task); 634 } 635 636 static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj, 637 struct drm_syncobj_cb *cb) 638 { 639 struct syncobj_wait_entry *wait = 640 container_of(cb, struct syncobj_wait_entry, syncobj_cb); 641 642 /* This happens inside the syncobj lock */ 643 wait->fence = dma_fence_get(syncobj->fence); 644 wake_up_process(wait->task); 645 } 646 647 static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs, 648 uint32_t count, 649 uint32_t flags, 650 signed long timeout, 651 uint32_t *idx) 652 { 653 struct syncobj_wait_entry *entries; 654 struct dma_fence *fence; 655 signed long ret; 656 uint32_t signaled_count, i; 657 658 entries = kcalloc(count, sizeof(*entries), GFP_KERNEL); 659 if (!entries) 660 return -ENOMEM; 661 662 /* Walk the list of sync objects and initialize entries. We do 663 * this up-front so that we can properly return -EINVAL if there is 664 * a syncobj with a missing fence and then never have the chance of 665 * returning -EINVAL again. 666 */ 667 signaled_count = 0; 668 for (i = 0; i < count; ++i) { 669 entries[i].task = current; 670 entries[i].fence = drm_syncobj_fence_get(syncobjs[i]); 671 if (!entries[i].fence) { 672 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) { 673 continue; 674 } else { 675 ret = -EINVAL; 676 goto cleanup_entries; 677 } 678 } 679 680 if (dma_fence_is_signaled(entries[i].fence)) { 681 if (signaled_count == 0 && idx) 682 *idx = i; 683 signaled_count++; 684 } 685 } 686 687 /* Initialize ret to the max of timeout and 1. That way, the 688 * default return value indicates a successful wait and not a 689 * timeout. 690 */ 691 ret = max_t(signed long, timeout, 1); 692 693 if (signaled_count == count || 694 (signaled_count > 0 && 695 !(flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL))) 696 goto cleanup_entries; 697 698 /* There's a very annoying laxness in the dma_fence API here, in 699 * that backends are not required to automatically report when a 700 * fence is signaled prior to fence->ops->enable_signaling() being 701 * called. So here if we fail to match signaled_count, we need to 702 * fallthough and try a 0 timeout wait! 703 */ 704 705 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) { 706 for (i = 0; i < count; ++i) { 707 drm_syncobj_fence_get_or_add_callback(syncobjs[i], 708 &entries[i].fence, 709 &entries[i].syncobj_cb, 710 syncobj_wait_syncobj_func); 711 } 712 } 713 714 do { 715 set_current_state(TASK_INTERRUPTIBLE); 716 717 signaled_count = 0; 718 for (i = 0; i < count; ++i) { 719 fence = entries[i].fence; 720 if (!fence) 721 continue; 722 723 if (dma_fence_is_signaled(fence) || 724 (!entries[i].fence_cb.func && 725 dma_fence_add_callback(fence, 726 &entries[i].fence_cb, 727 syncobj_wait_fence_func))) { 728 /* The fence has been signaled */ 729 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL) { 730 signaled_count++; 731 } else { 732 if (idx) 733 *idx = i; 734 goto done_waiting; 735 } 736 } 737 } 738 739 if (signaled_count == count) 740 goto done_waiting; 741 742 if (timeout == 0) { 743 /* If we are doing a 0 timeout wait and we got 744 * here, then we just timed out. 745 */ 746 ret = 0; 747 goto done_waiting; 748 } 749 750 ret = schedule_timeout(ret); 751 752 if (ret > 0 && signal_pending(current)) 753 ret = -ERESTARTSYS; 754 } while (ret > 0); 755 756 done_waiting: 757 __set_current_state(TASK_RUNNING); 758 759 cleanup_entries: 760 for (i = 0; i < count; ++i) { 761 if (entries[i].syncobj_cb.func) 762 drm_syncobj_remove_callback(syncobjs[i], 763 &entries[i].syncobj_cb); 764 if (entries[i].fence_cb.func) 765 dma_fence_remove_callback(entries[i].fence, 766 &entries[i].fence_cb); 767 dma_fence_put(entries[i].fence); 768 } 769 kfree(entries); 770 771 return ret; 772 } 773 774 /** 775 * drm_timeout_abs_to_jiffies - calculate jiffies timeout from absolute value 776 * 777 * @timeout_nsec: timeout nsec component in ns, 0 for poll 778 * 779 * Calculate the timeout in jiffies from an absolute time in sec/nsec. 780 */ 781 static signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec) 782 { 783 ktime_t abs_timeout, now; 784 u64 timeout_ns, timeout_jiffies64; 785 786 /* make 0 timeout means poll - absolute 0 doesn't seem valid */ 787 if (timeout_nsec == 0) 788 return 0; 789 790 abs_timeout = ns_to_ktime(timeout_nsec); 791 now = ktime_get(); 792 793 if (!ktime_after(abs_timeout, now)) 794 return 0; 795 796 timeout_ns = ktime_to_ns(ktime_sub(abs_timeout, now)); 797 798 timeout_jiffies64 = nsecs_to_jiffies64(timeout_ns); 799 /* clamp timeout to avoid infinite timeout */ 800 if (timeout_jiffies64 >= MAX_SCHEDULE_TIMEOUT - 1) 801 return MAX_SCHEDULE_TIMEOUT - 1; 802 803 return timeout_jiffies64 + 1; 804 } 805 806 static int drm_syncobj_array_wait(struct drm_device *dev, 807 struct drm_file *file_private, 808 struct drm_syncobj_wait *wait, 809 struct drm_syncobj **syncobjs) 810 { 811 signed long timeout = drm_timeout_abs_to_jiffies(wait->timeout_nsec); 812 signed long ret = 0; 813 uint32_t first = ~0; 814 815 ret = drm_syncobj_array_wait_timeout(syncobjs, 816 wait->count_handles, 817 wait->flags, 818 timeout, &first); 819 if (ret < 0) 820 return ret; 821 822 wait->first_signaled = first; 823 if (ret == 0) 824 return -ETIME; 825 return 0; 826 } 827 828 static int drm_syncobj_array_find(struct drm_file *file_private, 829 void __user *user_handles, 830 uint32_t count_handles, 831 struct drm_syncobj ***syncobjs_out) 832 { 833 uint32_t i, *handles; 834 struct drm_syncobj **syncobjs; 835 int ret; 836 837 handles = kmalloc_array(count_handles, sizeof(*handles), GFP_KERNEL); 838 if (handles == NULL) 839 return -ENOMEM; 840 841 if (copy_from_user(handles, user_handles, 842 sizeof(uint32_t) * count_handles)) { 843 ret = -EFAULT; 844 goto err_free_handles; 845 } 846 847 syncobjs = kmalloc_array(count_handles, sizeof(*syncobjs), GFP_KERNEL); 848 if (syncobjs == NULL) { 849 ret = -ENOMEM; 850 goto err_free_handles; 851 } 852 853 for (i = 0; i < count_handles; i++) { 854 syncobjs[i] = drm_syncobj_find(file_private, handles[i]); 855 if (!syncobjs[i]) { 856 ret = -ENOENT; 857 goto err_put_syncobjs; 858 } 859 } 860 861 kfree(handles); 862 *syncobjs_out = syncobjs; 863 return 0; 864 865 err_put_syncobjs: 866 while (i-- > 0) 867 drm_syncobj_put(syncobjs[i]); 868 kfree(syncobjs); 869 err_free_handles: 870 kfree(handles); 871 872 return ret; 873 } 874 875 static void drm_syncobj_array_free(struct drm_syncobj **syncobjs, 876 uint32_t count) 877 { 878 uint32_t i; 879 for (i = 0; i < count; i++) 880 drm_syncobj_put(syncobjs[i]); 881 kfree(syncobjs); 882 } 883 884 int 885 drm_syncobj_wait_ioctl(struct drm_device *dev, void *data, 886 struct drm_file *file_private) 887 { 888 struct drm_syncobj_wait *args = data; 889 struct drm_syncobj **syncobjs; 890 int ret = 0; 891 892 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) 893 return -ENODEV; 894 895 if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL | 896 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT)) 897 return -EINVAL; 898 899 if (args->count_handles == 0) 900 return -EINVAL; 901 902 ret = drm_syncobj_array_find(file_private, 903 u64_to_user_ptr(args->handles), 904 args->count_handles, 905 &syncobjs); 906 if (ret < 0) 907 return ret; 908 909 ret = drm_syncobj_array_wait(dev, file_private, 910 args, syncobjs); 911 912 drm_syncobj_array_free(syncobjs, args->count_handles); 913 914 return ret; 915 } 916 917 int 918 drm_syncobj_reset_ioctl(struct drm_device *dev, void *data, 919 struct drm_file *file_private) 920 { 921 struct drm_syncobj_array *args = data; 922 struct drm_syncobj **syncobjs; 923 uint32_t i; 924 int ret; 925 926 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) 927 return -ENODEV; 928 929 if (args->pad != 0) 930 return -EINVAL; 931 932 if (args->count_handles == 0) 933 return -EINVAL; 934 935 ret = drm_syncobj_array_find(file_private, 936 u64_to_user_ptr(args->handles), 937 args->count_handles, 938 &syncobjs); 939 if (ret < 0) 940 return ret; 941 942 for (i = 0; i < args->count_handles; i++) 943 drm_syncobj_replace_fence(syncobjs[i], NULL); 944 945 drm_syncobj_array_free(syncobjs, args->count_handles); 946 947 return 0; 948 } 949 950 int 951 drm_syncobj_signal_ioctl(struct drm_device *dev, void *data, 952 struct drm_file *file_private) 953 { 954 struct drm_syncobj_array *args = data; 955 struct drm_syncobj **syncobjs; 956 uint32_t i; 957 int ret; 958 959 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) 960 return -ENODEV; 961 962 if (args->pad != 0) 963 return -EINVAL; 964 965 if (args->count_handles == 0) 966 return -EINVAL; 967 968 ret = drm_syncobj_array_find(file_private, 969 u64_to_user_ptr(args->handles), 970 args->count_handles, 971 &syncobjs); 972 if (ret < 0) 973 return ret; 974 975 for (i = 0; i < args->count_handles; i++) { 976 ret = drm_syncobj_assign_null_handle(syncobjs[i]); 977 if (ret < 0) 978 break; 979 } 980 981 drm_syncobj_array_free(syncobjs, args->count_handles); 982 983 return ret; 984 } 985