1 // SPDX-License-Identifier: GPL-2.0 OR MIT 2 /************************************************************************** 3 * 4 * Copyright 2011-2023 VMware, Inc., Palo Alto, CA., USA 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28 #include <linux/sched/signal.h> 29 30 #include "vmwgfx_drv.h" 31 32 #define VMW_FENCE_WRAP (1 << 31) 33 34 struct vmw_fence_manager { 35 struct vmw_private *dev_priv; 36 spinlock_t lock; 37 struct list_head fence_list; 38 struct work_struct work; 39 bool fifo_down; 40 struct list_head cleanup_list; 41 uint32_t pending_actions[VMW_ACTION_MAX]; 42 struct mutex goal_irq_mutex; 43 bool goal_irq_on; /* Protected by @goal_irq_mutex */ 44 bool seqno_valid; /* Protected by @lock, and may not be set to true 45 without the @goal_irq_mutex held. */ 46 u64 ctx; 47 }; 48 49 struct vmw_user_fence { 50 struct ttm_base_object base; 51 struct vmw_fence_obj fence; 52 }; 53 54 /** 55 * struct vmw_event_fence_action - fence action that delivers a drm event. 56 * 57 * @action: A struct vmw_fence_action to hook up to a fence. 58 * @event: A pointer to the pending event. 59 * @fence: A referenced pointer to the fence to keep it alive while @action 60 * hangs on it. 61 * @dev: Pointer to a struct drm_device so we can access the event stuff. 62 * @tv_sec: If non-null, the variable pointed to will be assigned 63 * current time tv_sec val when the fence signals. 64 * @tv_usec: Must be set if @tv_sec is set, and the variable pointed to will 65 * be assigned the current time tv_usec val when the fence signals. 66 */ 67 struct vmw_event_fence_action { 68 struct vmw_fence_action action; 69 70 struct drm_pending_event *event; 71 struct vmw_fence_obj *fence; 72 struct drm_device *dev; 73 74 uint32_t *tv_sec; 75 uint32_t *tv_usec; 76 }; 77 78 static struct vmw_fence_manager * 79 fman_from_fence(struct vmw_fence_obj *fence) 80 { 81 return container_of(fence->base.lock, struct vmw_fence_manager, lock); 82 } 83 84 static u32 vmw_fence_goal_read(struct vmw_private *vmw) 85 { 86 if ((vmw->capabilities2 & SVGA_CAP2_EXTRA_REGS) != 0) 87 return vmw_read(vmw, SVGA_REG_FENCE_GOAL); 88 else 89 return vmw_fifo_mem_read(vmw, SVGA_FIFO_FENCE_GOAL); 90 } 91 92 static void vmw_fence_goal_write(struct vmw_private *vmw, u32 value) 93 { 94 if ((vmw->capabilities2 & SVGA_CAP2_EXTRA_REGS) != 0) 95 vmw_write(vmw, SVGA_REG_FENCE_GOAL, value); 96 else 97 vmw_fifo_mem_write(vmw, SVGA_FIFO_FENCE_GOAL, value); 98 } 99 100 /* 101 * Note on fencing subsystem usage of irqs: 102 * Typically the vmw_fences_update function is called 103 * 104 * a) When a new fence seqno has been submitted by the fifo code. 105 * b) On-demand when we have waiters. Sleeping waiters will switch on the 106 * ANY_FENCE irq and call vmw_fences_update function each time an ANY_FENCE 107 * irq is received. When the last fence waiter is gone, that IRQ is masked 108 * away. 109 * 110 * In situations where there are no waiters and we don't submit any new fences, 111 * fence objects may not be signaled. This is perfectly OK, since there are 112 * no consumers of the signaled data, but that is NOT ok when there are fence 113 * actions attached to a fence. The fencing subsystem then makes use of the 114 * FENCE_GOAL irq and sets the fence goal seqno to that of the next fence 115 * which has an action attached, and each time vmw_fences_update is called, 116 * the subsystem makes sure the fence goal seqno is updated. 117 * 118 * The fence goal seqno irq is on as long as there are unsignaled fence 119 * objects with actions attached to them. 120 */ 121 122 static void vmw_fence_obj_destroy(struct dma_fence *f) 123 { 124 struct vmw_fence_obj *fence = 125 container_of(f, struct vmw_fence_obj, base); 126 struct vmw_fence_manager *fman = fman_from_fence(fence); 127 128 if (!list_empty(&fence->head)) { 129 spin_lock(&fman->lock); 130 list_del_init(&fence->head); 131 spin_unlock(&fman->lock); 132 } 133 fence->destroy(fence); 134 } 135 136 static const char *vmw_fence_get_driver_name(struct dma_fence *f) 137 { 138 return "vmwgfx"; 139 } 140 141 static const char *vmw_fence_get_timeline_name(struct dma_fence *f) 142 { 143 return "svga"; 144 } 145 146 static bool vmw_fence_enable_signaling(struct dma_fence *f) 147 { 148 struct vmw_fence_obj *fence = 149 container_of(f, struct vmw_fence_obj, base); 150 151 struct vmw_fence_manager *fman = fman_from_fence(fence); 152 struct vmw_private *dev_priv = fman->dev_priv; 153 154 u32 seqno = vmw_fence_read(dev_priv); 155 if (seqno - fence->base.seqno < VMW_FENCE_WRAP) 156 return false; 157 158 return true; 159 } 160 161 struct vmwgfx_wait_cb { 162 struct dma_fence_cb base; 163 struct task_struct *task; 164 }; 165 166 static void 167 vmwgfx_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb) 168 { 169 struct vmwgfx_wait_cb *wait = 170 container_of(cb, struct vmwgfx_wait_cb, base); 171 172 wake_up_process(wait->task); 173 } 174 175 static void __vmw_fences_update(struct vmw_fence_manager *fman); 176 177 static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout) 178 { 179 struct vmw_fence_obj *fence = 180 container_of(f, struct vmw_fence_obj, base); 181 182 struct vmw_fence_manager *fman = fman_from_fence(fence); 183 struct vmw_private *dev_priv = fman->dev_priv; 184 struct vmwgfx_wait_cb cb; 185 long ret = timeout; 186 187 if (likely(vmw_fence_obj_signaled(fence))) 188 return timeout; 189 190 vmw_seqno_waiter_add(dev_priv); 191 192 spin_lock(f->lock); 193 194 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags)) 195 goto out; 196 197 if (intr && signal_pending(current)) { 198 ret = -ERESTARTSYS; 199 goto out; 200 } 201 202 cb.base.func = vmwgfx_wait_cb; 203 cb.task = current; 204 list_add(&cb.base.node, &f->cb_list); 205 206 for (;;) { 207 __vmw_fences_update(fman); 208 209 /* 210 * We can use the barrier free __set_current_state() since 211 * DMA_FENCE_FLAG_SIGNALED_BIT + wakeup is protected by the 212 * fence spinlock. 213 */ 214 if (intr) 215 __set_current_state(TASK_INTERRUPTIBLE); 216 else 217 __set_current_state(TASK_UNINTERRUPTIBLE); 218 219 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags)) { 220 if (ret == 0 && timeout > 0) 221 ret = 1; 222 break; 223 } 224 225 if (intr && signal_pending(current)) { 226 ret = -ERESTARTSYS; 227 break; 228 } 229 230 if (ret == 0) 231 break; 232 233 spin_unlock(f->lock); 234 235 ret = schedule_timeout(ret); 236 237 spin_lock(f->lock); 238 } 239 __set_current_state(TASK_RUNNING); 240 if (!list_empty(&cb.base.node)) 241 list_del(&cb.base.node); 242 243 out: 244 spin_unlock(f->lock); 245 246 vmw_seqno_waiter_remove(dev_priv); 247 248 return ret; 249 } 250 251 static const struct dma_fence_ops vmw_fence_ops = { 252 .get_driver_name = vmw_fence_get_driver_name, 253 .get_timeline_name = vmw_fence_get_timeline_name, 254 .enable_signaling = vmw_fence_enable_signaling, 255 .wait = vmw_fence_wait, 256 .release = vmw_fence_obj_destroy, 257 }; 258 259 /* 260 * Execute signal actions on fences recently signaled. 261 * This is done from a workqueue so we don't have to execute 262 * signal actions from atomic context. 263 */ 264 265 static void vmw_fence_work_func(struct work_struct *work) 266 { 267 struct vmw_fence_manager *fman = 268 container_of(work, struct vmw_fence_manager, work); 269 struct list_head list; 270 struct vmw_fence_action *action, *next_action; 271 bool seqno_valid; 272 273 do { 274 INIT_LIST_HEAD(&list); 275 mutex_lock(&fman->goal_irq_mutex); 276 277 spin_lock(&fman->lock); 278 list_splice_init(&fman->cleanup_list, &list); 279 seqno_valid = fman->seqno_valid; 280 spin_unlock(&fman->lock); 281 282 if (!seqno_valid && fman->goal_irq_on) { 283 fman->goal_irq_on = false; 284 vmw_goal_waiter_remove(fman->dev_priv); 285 } 286 mutex_unlock(&fman->goal_irq_mutex); 287 288 if (list_empty(&list)) 289 return; 290 291 /* 292 * At this point, only we should be able to manipulate the 293 * list heads of the actions we have on the private list. 294 * hence fman::lock not held. 295 */ 296 297 list_for_each_entry_safe(action, next_action, &list, head) { 298 list_del_init(&action->head); 299 if (action->cleanup) 300 action->cleanup(action); 301 } 302 } while (1); 303 } 304 305 struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv) 306 { 307 struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL); 308 309 if (unlikely(!fman)) 310 return NULL; 311 312 fman->dev_priv = dev_priv; 313 spin_lock_init(&fman->lock); 314 INIT_LIST_HEAD(&fman->fence_list); 315 INIT_LIST_HEAD(&fman->cleanup_list); 316 INIT_WORK(&fman->work, &vmw_fence_work_func); 317 fman->fifo_down = true; 318 mutex_init(&fman->goal_irq_mutex); 319 fman->ctx = dma_fence_context_alloc(1); 320 321 return fman; 322 } 323 324 void vmw_fence_manager_takedown(struct vmw_fence_manager *fman) 325 { 326 bool lists_empty; 327 328 (void) cancel_work_sync(&fman->work); 329 330 spin_lock(&fman->lock); 331 lists_empty = list_empty(&fman->fence_list) && 332 list_empty(&fman->cleanup_list); 333 spin_unlock(&fman->lock); 334 335 BUG_ON(!lists_empty); 336 kfree(fman); 337 } 338 339 static int vmw_fence_obj_init(struct vmw_fence_manager *fman, 340 struct vmw_fence_obj *fence, u32 seqno, 341 void (*destroy) (struct vmw_fence_obj *fence)) 342 { 343 int ret = 0; 344 345 dma_fence_init(&fence->base, &vmw_fence_ops, &fman->lock, 346 fman->ctx, seqno); 347 INIT_LIST_HEAD(&fence->seq_passed_actions); 348 fence->destroy = destroy; 349 350 spin_lock(&fman->lock); 351 if (unlikely(fman->fifo_down)) { 352 ret = -EBUSY; 353 goto out_unlock; 354 } 355 list_add_tail(&fence->head, &fman->fence_list); 356 357 out_unlock: 358 spin_unlock(&fman->lock); 359 return ret; 360 361 } 362 363 static void vmw_fences_perform_actions(struct vmw_fence_manager *fman, 364 struct list_head *list) 365 { 366 struct vmw_fence_action *action, *next_action; 367 368 list_for_each_entry_safe(action, next_action, list, head) { 369 list_del_init(&action->head); 370 fman->pending_actions[action->type]--; 371 if (action->seq_passed != NULL) 372 action->seq_passed(action); 373 374 /* 375 * Add the cleanup action to the cleanup list so that 376 * it will be performed by a worker task. 377 */ 378 379 list_add_tail(&action->head, &fman->cleanup_list); 380 } 381 } 382 383 /** 384 * vmw_fence_goal_new_locked - Figure out a new device fence goal 385 * seqno if needed. 386 * 387 * @fman: Pointer to a fence manager. 388 * @passed_seqno: The seqno the device currently signals as passed. 389 * 390 * This function should be called with the fence manager lock held. 391 * It is typically called when we have a new passed_seqno, and 392 * we might need to update the fence goal. It checks to see whether 393 * the current fence goal has already passed, and, in that case, 394 * scans through all unsignaled fences to get the next fence object with an 395 * action attached, and sets the seqno of that fence as a new fence goal. 396 * 397 * returns true if the device goal seqno was updated. False otherwise. 398 */ 399 static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman, 400 u32 passed_seqno) 401 { 402 u32 goal_seqno; 403 struct vmw_fence_obj *fence, *next_fence; 404 405 if (likely(!fman->seqno_valid)) 406 return false; 407 408 goal_seqno = vmw_fence_goal_read(fman->dev_priv); 409 if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP)) 410 return false; 411 412 fman->seqno_valid = false; 413 list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) { 414 if (!list_empty(&fence->seq_passed_actions)) { 415 fman->seqno_valid = true; 416 vmw_fence_goal_write(fman->dev_priv, 417 fence->base.seqno); 418 break; 419 } 420 } 421 422 return true; 423 } 424 425 426 /** 427 * vmw_fence_goal_check_locked - Replace the device fence goal seqno if 428 * needed. 429 * 430 * @fence: Pointer to a struct vmw_fence_obj the seqno of which should be 431 * considered as a device fence goal. 432 * 433 * This function should be called with the fence manager lock held. 434 * It is typically called when an action has been attached to a fence to 435 * check whether the seqno of that fence should be used for a fence 436 * goal interrupt. This is typically needed if the current fence goal is 437 * invalid, or has a higher seqno than that of the current fence object. 438 * 439 * returns true if the device goal seqno was updated. False otherwise. 440 */ 441 static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence) 442 { 443 struct vmw_fence_manager *fman = fman_from_fence(fence); 444 u32 goal_seqno; 445 446 if (dma_fence_is_signaled_locked(&fence->base)) 447 return false; 448 449 goal_seqno = vmw_fence_goal_read(fman->dev_priv); 450 if (likely(fman->seqno_valid && 451 goal_seqno - fence->base.seqno < VMW_FENCE_WRAP)) 452 return false; 453 454 vmw_fence_goal_write(fman->dev_priv, fence->base.seqno); 455 fman->seqno_valid = true; 456 457 return true; 458 } 459 460 static void __vmw_fences_update(struct vmw_fence_manager *fman) 461 { 462 struct vmw_fence_obj *fence, *next_fence; 463 struct list_head action_list; 464 bool needs_rerun; 465 uint32_t seqno, new_seqno; 466 467 seqno = vmw_fence_read(fman->dev_priv); 468 rerun: 469 list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) { 470 if (seqno - fence->base.seqno < VMW_FENCE_WRAP) { 471 list_del_init(&fence->head); 472 dma_fence_signal_locked(&fence->base); 473 INIT_LIST_HEAD(&action_list); 474 list_splice_init(&fence->seq_passed_actions, 475 &action_list); 476 vmw_fences_perform_actions(fman, &action_list); 477 } else 478 break; 479 } 480 481 /* 482 * Rerun if the fence goal seqno was updated, and the 483 * hardware might have raced with that update, so that 484 * we missed a fence_goal irq. 485 */ 486 487 needs_rerun = vmw_fence_goal_new_locked(fman, seqno); 488 if (unlikely(needs_rerun)) { 489 new_seqno = vmw_fence_read(fman->dev_priv); 490 if (new_seqno != seqno) { 491 seqno = new_seqno; 492 goto rerun; 493 } 494 } 495 496 if (!list_empty(&fman->cleanup_list)) 497 (void) schedule_work(&fman->work); 498 } 499 500 void vmw_fences_update(struct vmw_fence_manager *fman) 501 { 502 spin_lock(&fman->lock); 503 __vmw_fences_update(fman); 504 spin_unlock(&fman->lock); 505 } 506 507 bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence) 508 { 509 struct vmw_fence_manager *fman = fman_from_fence(fence); 510 511 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags)) 512 return true; 513 514 vmw_fences_update(fman); 515 516 return dma_fence_is_signaled(&fence->base); 517 } 518 519 int vmw_fence_obj_wait(struct vmw_fence_obj *fence, bool lazy, 520 bool interruptible, unsigned long timeout) 521 { 522 long ret = dma_fence_wait_timeout(&fence->base, interruptible, timeout); 523 524 if (likely(ret > 0)) 525 return 0; 526 else if (ret == 0) 527 return -EBUSY; 528 else 529 return ret; 530 } 531 532 static void vmw_fence_destroy(struct vmw_fence_obj *fence) 533 { 534 dma_fence_free(&fence->base); 535 } 536 537 int vmw_fence_create(struct vmw_fence_manager *fman, 538 uint32_t seqno, 539 struct vmw_fence_obj **p_fence) 540 { 541 struct vmw_fence_obj *fence; 542 int ret; 543 544 fence = kzalloc(sizeof(*fence), GFP_KERNEL); 545 if (unlikely(!fence)) 546 return -ENOMEM; 547 548 ret = vmw_fence_obj_init(fman, fence, seqno, 549 vmw_fence_destroy); 550 if (unlikely(ret != 0)) 551 goto out_err_init; 552 553 *p_fence = fence; 554 return 0; 555 556 out_err_init: 557 kfree(fence); 558 return ret; 559 } 560 561 562 static void vmw_user_fence_destroy(struct vmw_fence_obj *fence) 563 { 564 struct vmw_user_fence *ufence = 565 container_of(fence, struct vmw_user_fence, fence); 566 567 ttm_base_object_kfree(ufence, base); 568 } 569 570 static void vmw_user_fence_base_release(struct ttm_base_object **p_base) 571 { 572 struct ttm_base_object *base = *p_base; 573 struct vmw_user_fence *ufence = 574 container_of(base, struct vmw_user_fence, base); 575 struct vmw_fence_obj *fence = &ufence->fence; 576 577 *p_base = NULL; 578 vmw_fence_obj_unreference(&fence); 579 } 580 581 int vmw_user_fence_create(struct drm_file *file_priv, 582 struct vmw_fence_manager *fman, 583 uint32_t seqno, 584 struct vmw_fence_obj **p_fence, 585 uint32_t *p_handle) 586 { 587 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 588 struct vmw_user_fence *ufence; 589 struct vmw_fence_obj *tmp; 590 int ret; 591 592 ufence = kzalloc(sizeof(*ufence), GFP_KERNEL); 593 if (unlikely(!ufence)) { 594 ret = -ENOMEM; 595 goto out_no_object; 596 } 597 598 ret = vmw_fence_obj_init(fman, &ufence->fence, seqno, 599 vmw_user_fence_destroy); 600 if (unlikely(ret != 0)) { 601 kfree(ufence); 602 goto out_no_object; 603 } 604 605 /* 606 * The base object holds a reference which is freed in 607 * vmw_user_fence_base_release. 608 */ 609 tmp = vmw_fence_obj_reference(&ufence->fence); 610 611 ret = ttm_base_object_init(tfile, &ufence->base, false, 612 VMW_RES_FENCE, 613 &vmw_user_fence_base_release); 614 615 616 if (unlikely(ret != 0)) { 617 /* 618 * Free the base object's reference 619 */ 620 vmw_fence_obj_unreference(&tmp); 621 goto out_err; 622 } 623 624 *p_fence = &ufence->fence; 625 *p_handle = ufence->base.handle; 626 627 return 0; 628 out_err: 629 tmp = &ufence->fence; 630 vmw_fence_obj_unreference(&tmp); 631 out_no_object: 632 return ret; 633 } 634 635 /* 636 * vmw_fence_fifo_down - signal all unsignaled fence objects. 637 */ 638 639 void vmw_fence_fifo_down(struct vmw_fence_manager *fman) 640 { 641 struct list_head action_list; 642 int ret; 643 644 /* 645 * The list may be altered while we traverse it, so always 646 * restart when we've released the fman->lock. 647 */ 648 649 spin_lock(&fman->lock); 650 fman->fifo_down = true; 651 while (!list_empty(&fman->fence_list)) { 652 struct vmw_fence_obj *fence = 653 list_entry(fman->fence_list.prev, struct vmw_fence_obj, 654 head); 655 dma_fence_get(&fence->base); 656 spin_unlock(&fman->lock); 657 658 ret = vmw_fence_obj_wait(fence, false, false, 659 VMW_FENCE_WAIT_TIMEOUT); 660 661 if (unlikely(ret != 0)) { 662 list_del_init(&fence->head); 663 dma_fence_signal(&fence->base); 664 INIT_LIST_HEAD(&action_list); 665 list_splice_init(&fence->seq_passed_actions, 666 &action_list); 667 vmw_fences_perform_actions(fman, &action_list); 668 } 669 670 BUG_ON(!list_empty(&fence->head)); 671 dma_fence_put(&fence->base); 672 spin_lock(&fman->lock); 673 } 674 spin_unlock(&fman->lock); 675 } 676 677 void vmw_fence_fifo_up(struct vmw_fence_manager *fman) 678 { 679 spin_lock(&fman->lock); 680 fman->fifo_down = false; 681 spin_unlock(&fman->lock); 682 } 683 684 685 /** 686 * vmw_fence_obj_lookup - Look up a user-space fence object 687 * 688 * @tfile: A struct ttm_object_file identifying the caller. 689 * @handle: A handle identifying the fence object. 690 * @return: A struct vmw_user_fence base ttm object on success or 691 * an error pointer on failure. 692 * 693 * The fence object is looked up and type-checked. The caller needs 694 * to have opened the fence object first, but since that happens on 695 * creation and fence objects aren't shareable, that's not an 696 * issue currently. 697 */ 698 static struct ttm_base_object * 699 vmw_fence_obj_lookup(struct ttm_object_file *tfile, u32 handle) 700 { 701 struct ttm_base_object *base = ttm_base_object_lookup(tfile, handle); 702 703 if (!base) { 704 pr_err("Invalid fence object handle 0x%08lx.\n", 705 (unsigned long)handle); 706 return ERR_PTR(-EINVAL); 707 } 708 709 if (base->refcount_release != vmw_user_fence_base_release) { 710 pr_err("Invalid fence object handle 0x%08lx.\n", 711 (unsigned long)handle); 712 ttm_base_object_unref(&base); 713 return ERR_PTR(-EINVAL); 714 } 715 716 return base; 717 } 718 719 720 int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data, 721 struct drm_file *file_priv) 722 { 723 struct drm_vmw_fence_wait_arg *arg = 724 (struct drm_vmw_fence_wait_arg *)data; 725 unsigned long timeout; 726 struct ttm_base_object *base; 727 struct vmw_fence_obj *fence; 728 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 729 int ret; 730 uint64_t wait_timeout = ((uint64_t)arg->timeout_us * HZ); 731 732 /* 733 * 64-bit division not present on 32-bit systems, so do an 734 * approximation. (Divide by 1000000). 735 */ 736 737 wait_timeout = (wait_timeout >> 20) + (wait_timeout >> 24) - 738 (wait_timeout >> 26); 739 740 if (!arg->cookie_valid) { 741 arg->cookie_valid = 1; 742 arg->kernel_cookie = jiffies + wait_timeout; 743 } 744 745 base = vmw_fence_obj_lookup(tfile, arg->handle); 746 if (IS_ERR(base)) 747 return PTR_ERR(base); 748 749 fence = &(container_of(base, struct vmw_user_fence, base)->fence); 750 751 timeout = jiffies; 752 if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie)) { 753 ret = ((vmw_fence_obj_signaled(fence)) ? 754 0 : -EBUSY); 755 goto out; 756 } 757 758 timeout = (unsigned long)arg->kernel_cookie - timeout; 759 760 ret = vmw_fence_obj_wait(fence, arg->lazy, true, timeout); 761 762 out: 763 ttm_base_object_unref(&base); 764 765 /* 766 * Optionally unref the fence object. 767 */ 768 769 if (ret == 0 && (arg->wait_options & DRM_VMW_WAIT_OPTION_UNREF)) 770 return ttm_ref_object_base_unref(tfile, arg->handle); 771 return ret; 772 } 773 774 int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data, 775 struct drm_file *file_priv) 776 { 777 struct drm_vmw_fence_signaled_arg *arg = 778 (struct drm_vmw_fence_signaled_arg *) data; 779 struct ttm_base_object *base; 780 struct vmw_fence_obj *fence; 781 struct vmw_fence_manager *fman; 782 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 783 struct vmw_private *dev_priv = vmw_priv(dev); 784 785 base = vmw_fence_obj_lookup(tfile, arg->handle); 786 if (IS_ERR(base)) 787 return PTR_ERR(base); 788 789 fence = &(container_of(base, struct vmw_user_fence, base)->fence); 790 fman = fman_from_fence(fence); 791 792 arg->signaled = vmw_fence_obj_signaled(fence); 793 794 arg->signaled_flags = arg->flags; 795 spin_lock(&fman->lock); 796 arg->passed_seqno = dev_priv->last_read_seqno; 797 spin_unlock(&fman->lock); 798 799 ttm_base_object_unref(&base); 800 801 return 0; 802 } 803 804 805 int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data, 806 struct drm_file *file_priv) 807 { 808 struct drm_vmw_fence_arg *arg = 809 (struct drm_vmw_fence_arg *) data; 810 811 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, 812 arg->handle); 813 } 814 815 /** 816 * vmw_event_fence_action_seq_passed 817 * 818 * @action: The struct vmw_fence_action embedded in a struct 819 * vmw_event_fence_action. 820 * 821 * This function is called when the seqno of the fence where @action is 822 * attached has passed. It queues the event on the submitter's event list. 823 * This function is always called from atomic context. 824 */ 825 static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action) 826 { 827 struct vmw_event_fence_action *eaction = 828 container_of(action, struct vmw_event_fence_action, action); 829 struct drm_device *dev = eaction->dev; 830 struct drm_pending_event *event = eaction->event; 831 832 if (unlikely(event == NULL)) 833 return; 834 835 spin_lock_irq(&dev->event_lock); 836 837 if (likely(eaction->tv_sec != NULL)) { 838 struct timespec64 ts; 839 840 ktime_get_ts64(&ts); 841 /* monotonic time, so no y2038 overflow */ 842 *eaction->tv_sec = ts.tv_sec; 843 *eaction->tv_usec = ts.tv_nsec / NSEC_PER_USEC; 844 } 845 846 drm_send_event_locked(dev, eaction->event); 847 eaction->event = NULL; 848 spin_unlock_irq(&dev->event_lock); 849 } 850 851 /** 852 * vmw_event_fence_action_cleanup 853 * 854 * @action: The struct vmw_fence_action embedded in a struct 855 * vmw_event_fence_action. 856 * 857 * This function is the struct vmw_fence_action destructor. It's typically 858 * called from a workqueue. 859 */ 860 static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action) 861 { 862 struct vmw_event_fence_action *eaction = 863 container_of(action, struct vmw_event_fence_action, action); 864 865 vmw_fence_obj_unreference(&eaction->fence); 866 kfree(eaction); 867 } 868 869 870 /** 871 * vmw_fence_obj_add_action - Add an action to a fence object. 872 * 873 * @fence: The fence object. 874 * @action: The action to add. 875 * 876 * Note that the action callbacks may be executed before this function 877 * returns. 878 */ 879 static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence, 880 struct vmw_fence_action *action) 881 { 882 struct vmw_fence_manager *fman = fman_from_fence(fence); 883 bool run_update = false; 884 885 mutex_lock(&fman->goal_irq_mutex); 886 spin_lock(&fman->lock); 887 888 fman->pending_actions[action->type]++; 889 if (dma_fence_is_signaled_locked(&fence->base)) { 890 struct list_head action_list; 891 892 INIT_LIST_HEAD(&action_list); 893 list_add_tail(&action->head, &action_list); 894 vmw_fences_perform_actions(fman, &action_list); 895 } else { 896 list_add_tail(&action->head, &fence->seq_passed_actions); 897 898 /* 899 * This function may set fman::seqno_valid, so it must 900 * be run with the goal_irq_mutex held. 901 */ 902 run_update = vmw_fence_goal_check_locked(fence); 903 } 904 905 spin_unlock(&fman->lock); 906 907 if (run_update) { 908 if (!fman->goal_irq_on) { 909 fman->goal_irq_on = true; 910 vmw_goal_waiter_add(fman->dev_priv); 911 } 912 vmw_fences_update(fman); 913 } 914 mutex_unlock(&fman->goal_irq_mutex); 915 916 } 917 918 /** 919 * vmw_event_fence_action_queue - Post an event for sending when a fence 920 * object seqno has passed. 921 * 922 * @file_priv: The file connection on which the event should be posted. 923 * @fence: The fence object on which to post the event. 924 * @event: Event to be posted. This event should've been alloced 925 * using k[mz]alloc, and should've been completely initialized. 926 * @tv_sec: If non-null, the variable pointed to will be assigned 927 * current time tv_sec val when the fence signals. 928 * @tv_usec: Must be set if @tv_sec is set, and the variable pointed to will 929 * be assigned the current time tv_usec val when the fence signals. 930 * @interruptible: Interruptible waits if possible. 931 * 932 * As a side effect, the object pointed to by @event may have been 933 * freed when this function returns. If this function returns with 934 * an error code, the caller needs to free that object. 935 */ 936 937 int vmw_event_fence_action_queue(struct drm_file *file_priv, 938 struct vmw_fence_obj *fence, 939 struct drm_pending_event *event, 940 uint32_t *tv_sec, 941 uint32_t *tv_usec, 942 bool interruptible) 943 { 944 struct vmw_event_fence_action *eaction; 945 struct vmw_fence_manager *fman = fman_from_fence(fence); 946 947 eaction = kzalloc(sizeof(*eaction), GFP_KERNEL); 948 if (unlikely(!eaction)) 949 return -ENOMEM; 950 951 eaction->event = event; 952 953 eaction->action.seq_passed = vmw_event_fence_action_seq_passed; 954 eaction->action.cleanup = vmw_event_fence_action_cleanup; 955 eaction->action.type = VMW_ACTION_EVENT; 956 957 eaction->fence = vmw_fence_obj_reference(fence); 958 eaction->dev = &fman->dev_priv->drm; 959 eaction->tv_sec = tv_sec; 960 eaction->tv_usec = tv_usec; 961 962 vmw_fence_obj_add_action(fence, &eaction->action); 963 964 return 0; 965 } 966 967 struct vmw_event_fence_pending { 968 struct drm_pending_event base; 969 struct drm_vmw_event_fence event; 970 }; 971 972 static int vmw_event_fence_action_create(struct drm_file *file_priv, 973 struct vmw_fence_obj *fence, 974 uint32_t flags, 975 uint64_t user_data, 976 bool interruptible) 977 { 978 struct vmw_event_fence_pending *event; 979 struct vmw_fence_manager *fman = fman_from_fence(fence); 980 struct drm_device *dev = &fman->dev_priv->drm; 981 int ret; 982 983 event = kzalloc(sizeof(*event), GFP_KERNEL); 984 if (unlikely(!event)) { 985 DRM_ERROR("Failed to allocate an event.\n"); 986 ret = -ENOMEM; 987 goto out_no_space; 988 } 989 990 event->event.base.type = DRM_VMW_EVENT_FENCE_SIGNALED; 991 event->event.base.length = sizeof(event->event); 992 event->event.user_data = user_data; 993 994 ret = drm_event_reserve_init(dev, file_priv, &event->base, &event->event.base); 995 996 if (unlikely(ret != 0)) { 997 DRM_ERROR("Failed to allocate event space for this file.\n"); 998 kfree(event); 999 goto out_no_space; 1000 } 1001 1002 if (flags & DRM_VMW_FE_FLAG_REQ_TIME) 1003 ret = vmw_event_fence_action_queue(file_priv, fence, 1004 &event->base, 1005 &event->event.tv_sec, 1006 &event->event.tv_usec, 1007 interruptible); 1008 else 1009 ret = vmw_event_fence_action_queue(file_priv, fence, 1010 &event->base, 1011 NULL, 1012 NULL, 1013 interruptible); 1014 if (ret != 0) 1015 goto out_no_queue; 1016 1017 return 0; 1018 1019 out_no_queue: 1020 drm_event_cancel_free(dev, &event->base); 1021 out_no_space: 1022 return ret; 1023 } 1024 1025 int vmw_fence_event_ioctl(struct drm_device *dev, void *data, 1026 struct drm_file *file_priv) 1027 { 1028 struct vmw_private *dev_priv = vmw_priv(dev); 1029 struct drm_vmw_fence_event_arg *arg = 1030 (struct drm_vmw_fence_event_arg *) data; 1031 struct vmw_fence_obj *fence = NULL; 1032 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); 1033 struct ttm_object_file *tfile = vmw_fp->tfile; 1034 struct drm_vmw_fence_rep __user *user_fence_rep = 1035 (struct drm_vmw_fence_rep __user *)(unsigned long) 1036 arg->fence_rep; 1037 uint32_t handle; 1038 int ret; 1039 1040 /* 1041 * Look up an existing fence object, 1042 * and if user-space wants a new reference, 1043 * add one. 1044 */ 1045 if (arg->handle) { 1046 struct ttm_base_object *base = 1047 vmw_fence_obj_lookup(tfile, arg->handle); 1048 1049 if (IS_ERR(base)) 1050 return PTR_ERR(base); 1051 1052 fence = &(container_of(base, struct vmw_user_fence, 1053 base)->fence); 1054 (void) vmw_fence_obj_reference(fence); 1055 1056 if (user_fence_rep != NULL) { 1057 ret = ttm_ref_object_add(vmw_fp->tfile, base, 1058 NULL, false); 1059 if (unlikely(ret != 0)) { 1060 DRM_ERROR("Failed to reference a fence " 1061 "object.\n"); 1062 goto out_no_ref_obj; 1063 } 1064 handle = base->handle; 1065 } 1066 ttm_base_object_unref(&base); 1067 } 1068 1069 /* 1070 * Create a new fence object. 1071 */ 1072 if (!fence) { 1073 ret = vmw_execbuf_fence_commands(file_priv, dev_priv, 1074 &fence, 1075 (user_fence_rep) ? 1076 &handle : NULL); 1077 if (unlikely(ret != 0)) { 1078 DRM_ERROR("Fence event failed to create fence.\n"); 1079 return ret; 1080 } 1081 } 1082 1083 BUG_ON(fence == NULL); 1084 1085 ret = vmw_event_fence_action_create(file_priv, fence, 1086 arg->flags, 1087 arg->user_data, 1088 true); 1089 if (unlikely(ret != 0)) { 1090 if (ret != -ERESTARTSYS) 1091 DRM_ERROR("Failed to attach event to fence.\n"); 1092 goto out_no_create; 1093 } 1094 1095 vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence, 1096 handle, -1); 1097 vmw_fence_obj_unreference(&fence); 1098 return 0; 1099 out_no_create: 1100 if (user_fence_rep != NULL) 1101 ttm_ref_object_base_unref(tfile, handle); 1102 out_no_ref_obj: 1103 vmw_fence_obj_unreference(&fence); 1104 return ret; 1105 } 1106