1 /************************************************************************** 2 * 3 * Copyright © 2011 VMware, Inc., Palo Alto, CA., USA 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28 #include <drm/drmP.h> 29 #include "vmwgfx_drv.h" 30 31 #define VMW_FENCE_WRAP (1 << 31) 32 33 struct vmw_fence_manager { 34 int num_fence_objects; 35 struct vmw_private *dev_priv; 36 spinlock_t lock; 37 struct list_head fence_list; 38 struct work_struct work; 39 u32 user_fence_size; 40 u32 fence_size; 41 u32 event_fence_action_size; 42 bool fifo_down; 43 struct list_head cleanup_list; 44 uint32_t pending_actions[VMW_ACTION_MAX]; 45 struct mutex goal_irq_mutex; 46 bool goal_irq_on; /* Protected by @goal_irq_mutex */ 47 bool seqno_valid; /* Protected by @lock, and may not be set to true 48 without the @goal_irq_mutex held. */ 49 unsigned ctx; 50 }; 51 52 struct vmw_user_fence { 53 struct ttm_base_object base; 54 struct vmw_fence_obj fence; 55 }; 56 57 /** 58 * struct vmw_event_fence_action - fence action that delivers a drm event. 59 * 60 * @e: A struct drm_pending_event that controls the event delivery. 61 * @action: A struct vmw_fence_action to hook up to a fence. 62 * @fence: A referenced pointer to the fence to keep it alive while @action 63 * hangs on it. 64 * @dev: Pointer to a struct drm_device so we can access the event stuff. 65 * @kref: Both @e and @action has destructors, so we need to refcount. 66 * @size: Size accounted for this object. 67 * @tv_sec: If non-null, the variable pointed to will be assigned 68 * current time tv_sec val when the fence signals. 69 * @tv_usec: Must be set if @tv_sec is set, and the variable pointed to will 70 * be assigned the current time tv_usec val when the fence signals. 71 */ 72 struct vmw_event_fence_action { 73 struct vmw_fence_action action; 74 struct list_head fpriv_head; 75 76 struct drm_pending_event *event; 77 struct vmw_fence_obj *fence; 78 struct drm_device *dev; 79 80 uint32_t *tv_sec; 81 uint32_t *tv_usec; 82 }; 83 84 static struct vmw_fence_manager * 85 fman_from_fence(struct vmw_fence_obj *fence) 86 { 87 return container_of(fence->base.lock, struct vmw_fence_manager, lock); 88 } 89 90 /** 91 * Note on fencing subsystem usage of irqs: 92 * Typically the vmw_fences_update function is called 93 * 94 * a) When a new fence seqno has been submitted by the fifo code. 95 * b) On-demand when we have waiters. Sleeping waiters will switch on the 96 * ANY_FENCE irq and call vmw_fences_update function each time an ANY_FENCE 97 * irq is received. When the last fence waiter is gone, that IRQ is masked 98 * away. 99 * 100 * In situations where there are no waiters and we don't submit any new fences, 101 * fence objects may not be signaled. This is perfectly OK, since there are 102 * no consumers of the signaled data, but that is NOT ok when there are fence 103 * actions attached to a fence. The fencing subsystem then makes use of the 104 * FENCE_GOAL irq and sets the fence goal seqno to that of the next fence 105 * which has an action attached, and each time vmw_fences_update is called, 106 * the subsystem makes sure the fence goal seqno is updated. 107 * 108 * The fence goal seqno irq is on as long as there are unsignaled fence 109 * objects with actions attached to them. 110 */ 111 112 static void vmw_fence_obj_destroy(struct fence *f) 113 { 114 struct vmw_fence_obj *fence = 115 container_of(f, struct vmw_fence_obj, base); 116 117 struct vmw_fence_manager *fman = fman_from_fence(fence); 118 unsigned long irq_flags; 119 120 spin_lock_irqsave(&fman->lock, irq_flags); 121 list_del_init(&fence->head); 122 --fman->num_fence_objects; 123 spin_unlock_irqrestore(&fman->lock, irq_flags); 124 fence->destroy(fence); 125 } 126 127 static const char *vmw_fence_get_driver_name(struct fence *f) 128 { 129 return "vmwgfx"; 130 } 131 132 static const char *vmw_fence_get_timeline_name(struct fence *f) 133 { 134 return "svga"; 135 } 136 137 static bool vmw_fence_enable_signaling(struct fence *f) 138 { 139 struct vmw_fence_obj *fence = 140 container_of(f, struct vmw_fence_obj, base); 141 142 struct vmw_fence_manager *fman = fman_from_fence(fence); 143 struct vmw_private *dev_priv = fman->dev_priv; 144 145 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 146 u32 seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE); 147 if (seqno - fence->base.seqno < VMW_FENCE_WRAP) 148 return false; 149 150 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); 151 152 return true; 153 } 154 155 struct vmwgfx_wait_cb { 156 struct fence_cb base; 157 struct task_struct *task; 158 }; 159 160 static void 161 vmwgfx_wait_cb(struct fence *fence, struct fence_cb *cb) 162 { 163 struct vmwgfx_wait_cb *wait = 164 container_of(cb, struct vmwgfx_wait_cb, base); 165 166 wake_up_process(wait->task); 167 } 168 169 static void __vmw_fences_update(struct vmw_fence_manager *fman); 170 171 static long vmw_fence_wait(struct fence *f, bool intr, signed long timeout) 172 { 173 struct vmw_fence_obj *fence = 174 container_of(f, struct vmw_fence_obj, base); 175 176 struct vmw_fence_manager *fman = fman_from_fence(fence); 177 struct vmw_private *dev_priv = fman->dev_priv; 178 struct vmwgfx_wait_cb cb; 179 long ret = timeout; 180 unsigned long irq_flags; 181 182 if (likely(vmw_fence_obj_signaled(fence))) 183 return timeout; 184 185 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); 186 vmw_seqno_waiter_add(dev_priv); 187 188 spin_lock_irqsave(f->lock, irq_flags); 189 190 if (intr && signal_pending(current)) { 191 ret = -ERESTARTSYS; 192 goto out; 193 } 194 195 cb.base.func = vmwgfx_wait_cb; 196 cb.task = current; 197 list_add(&cb.base.node, &f->cb_list); 198 199 while (ret > 0) { 200 __vmw_fences_update(fman); 201 if (test_bit(FENCE_FLAG_SIGNALED_BIT, &f->flags)) 202 break; 203 204 if (intr) 205 __set_current_state(TASK_INTERRUPTIBLE); 206 else 207 __set_current_state(TASK_UNINTERRUPTIBLE); 208 spin_unlock_irqrestore(f->lock, irq_flags); 209 210 ret = schedule_timeout(ret); 211 212 spin_lock_irqsave(f->lock, irq_flags); 213 if (ret > 0 && intr && signal_pending(current)) 214 ret = -ERESTARTSYS; 215 } 216 217 if (!list_empty(&cb.base.node)) 218 list_del(&cb.base.node); 219 __set_current_state(TASK_RUNNING); 220 221 out: 222 spin_unlock_irqrestore(f->lock, irq_flags); 223 224 vmw_seqno_waiter_remove(dev_priv); 225 226 return ret; 227 } 228 229 static struct fence_ops vmw_fence_ops = { 230 .get_driver_name = vmw_fence_get_driver_name, 231 .get_timeline_name = vmw_fence_get_timeline_name, 232 .enable_signaling = vmw_fence_enable_signaling, 233 .wait = vmw_fence_wait, 234 .release = vmw_fence_obj_destroy, 235 }; 236 237 238 /** 239 * Execute signal actions on fences recently signaled. 240 * This is done from a workqueue so we don't have to execute 241 * signal actions from atomic context. 242 */ 243 244 static void vmw_fence_work_func(struct work_struct *work) 245 { 246 struct vmw_fence_manager *fman = 247 container_of(work, struct vmw_fence_manager, work); 248 struct list_head list; 249 struct vmw_fence_action *action, *next_action; 250 bool seqno_valid; 251 252 do { 253 INIT_LIST_HEAD(&list); 254 mutex_lock(&fman->goal_irq_mutex); 255 256 spin_lock_irq(&fman->lock); 257 list_splice_init(&fman->cleanup_list, &list); 258 seqno_valid = fman->seqno_valid; 259 spin_unlock_irq(&fman->lock); 260 261 if (!seqno_valid && fman->goal_irq_on) { 262 fman->goal_irq_on = false; 263 vmw_goal_waiter_remove(fman->dev_priv); 264 } 265 mutex_unlock(&fman->goal_irq_mutex); 266 267 if (list_empty(&list)) 268 return; 269 270 /* 271 * At this point, only we should be able to manipulate the 272 * list heads of the actions we have on the private list. 273 * hence fman::lock not held. 274 */ 275 276 list_for_each_entry_safe(action, next_action, &list, head) { 277 list_del_init(&action->head); 278 if (action->cleanup) 279 action->cleanup(action); 280 } 281 } while (1); 282 } 283 284 struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv) 285 { 286 struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL); 287 288 if (unlikely(fman == NULL)) 289 return NULL; 290 291 fman->dev_priv = dev_priv; 292 spin_lock_init(&fman->lock); 293 INIT_LIST_HEAD(&fman->fence_list); 294 INIT_LIST_HEAD(&fman->cleanup_list); 295 INIT_WORK(&fman->work, &vmw_fence_work_func); 296 fman->fifo_down = true; 297 fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence)); 298 fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj)); 299 fman->event_fence_action_size = 300 ttm_round_pot(sizeof(struct vmw_event_fence_action)); 301 mutex_init(&fman->goal_irq_mutex); 302 fman->ctx = fence_context_alloc(1); 303 304 return fman; 305 } 306 307 void vmw_fence_manager_takedown(struct vmw_fence_manager *fman) 308 { 309 unsigned long irq_flags; 310 bool lists_empty; 311 312 (void) cancel_work_sync(&fman->work); 313 314 spin_lock_irqsave(&fman->lock, irq_flags); 315 lists_empty = list_empty(&fman->fence_list) && 316 list_empty(&fman->cleanup_list); 317 spin_unlock_irqrestore(&fman->lock, irq_flags); 318 319 BUG_ON(!lists_empty); 320 kfree(fman); 321 } 322 323 static int vmw_fence_obj_init(struct vmw_fence_manager *fman, 324 struct vmw_fence_obj *fence, u32 seqno, 325 void (*destroy) (struct vmw_fence_obj *fence)) 326 { 327 unsigned long irq_flags; 328 int ret = 0; 329 330 fence_init(&fence->base, &vmw_fence_ops, &fman->lock, 331 fman->ctx, seqno); 332 INIT_LIST_HEAD(&fence->seq_passed_actions); 333 fence->destroy = destroy; 334 335 spin_lock_irqsave(&fman->lock, irq_flags); 336 if (unlikely(fman->fifo_down)) { 337 ret = -EBUSY; 338 goto out_unlock; 339 } 340 list_add_tail(&fence->head, &fman->fence_list); 341 ++fman->num_fence_objects; 342 343 out_unlock: 344 spin_unlock_irqrestore(&fman->lock, irq_flags); 345 return ret; 346 347 } 348 349 static void vmw_fences_perform_actions(struct vmw_fence_manager *fman, 350 struct list_head *list) 351 { 352 struct vmw_fence_action *action, *next_action; 353 354 list_for_each_entry_safe(action, next_action, list, head) { 355 list_del_init(&action->head); 356 fman->pending_actions[action->type]--; 357 if (action->seq_passed != NULL) 358 action->seq_passed(action); 359 360 /* 361 * Add the cleanup action to the cleanup list so that 362 * it will be performed by a worker task. 363 */ 364 365 list_add_tail(&action->head, &fman->cleanup_list); 366 } 367 } 368 369 /** 370 * vmw_fence_goal_new_locked - Figure out a new device fence goal 371 * seqno if needed. 372 * 373 * @fman: Pointer to a fence manager. 374 * @passed_seqno: The seqno the device currently signals as passed. 375 * 376 * This function should be called with the fence manager lock held. 377 * It is typically called when we have a new passed_seqno, and 378 * we might need to update the fence goal. It checks to see whether 379 * the current fence goal has already passed, and, in that case, 380 * scans through all unsignaled fences to get the next fence object with an 381 * action attached, and sets the seqno of that fence as a new fence goal. 382 * 383 * returns true if the device goal seqno was updated. False otherwise. 384 */ 385 static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman, 386 u32 passed_seqno) 387 { 388 u32 goal_seqno; 389 __le32 __iomem *fifo_mem; 390 struct vmw_fence_obj *fence; 391 392 if (likely(!fman->seqno_valid)) 393 return false; 394 395 fifo_mem = fman->dev_priv->mmio_virt; 396 goal_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE_GOAL); 397 if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP)) 398 return false; 399 400 fman->seqno_valid = false; 401 list_for_each_entry(fence, &fman->fence_list, head) { 402 if (!list_empty(&fence->seq_passed_actions)) { 403 fman->seqno_valid = true; 404 iowrite32(fence->base.seqno, 405 fifo_mem + SVGA_FIFO_FENCE_GOAL); 406 break; 407 } 408 } 409 410 return true; 411 } 412 413 414 /** 415 * vmw_fence_goal_check_locked - Replace the device fence goal seqno if 416 * needed. 417 * 418 * @fence: Pointer to a struct vmw_fence_obj the seqno of which should be 419 * considered as a device fence goal. 420 * 421 * This function should be called with the fence manager lock held. 422 * It is typically called when an action has been attached to a fence to 423 * check whether the seqno of that fence should be used for a fence 424 * goal interrupt. This is typically needed if the current fence goal is 425 * invalid, or has a higher seqno than that of the current fence object. 426 * 427 * returns true if the device goal seqno was updated. False otherwise. 428 */ 429 static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence) 430 { 431 struct vmw_fence_manager *fman = fman_from_fence(fence); 432 u32 goal_seqno; 433 __le32 __iomem *fifo_mem; 434 435 if (fence_is_signaled_locked(&fence->base)) 436 return false; 437 438 fifo_mem = fman->dev_priv->mmio_virt; 439 goal_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE_GOAL); 440 if (likely(fman->seqno_valid && 441 goal_seqno - fence->base.seqno < VMW_FENCE_WRAP)) 442 return false; 443 444 iowrite32(fence->base.seqno, fifo_mem + SVGA_FIFO_FENCE_GOAL); 445 fman->seqno_valid = true; 446 447 return true; 448 } 449 450 static void __vmw_fences_update(struct vmw_fence_manager *fman) 451 { 452 struct vmw_fence_obj *fence, *next_fence; 453 struct list_head action_list; 454 bool needs_rerun; 455 uint32_t seqno, new_seqno; 456 __le32 __iomem *fifo_mem = fman->dev_priv->mmio_virt; 457 458 seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE); 459 rerun: 460 list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) { 461 if (seqno - fence->base.seqno < VMW_FENCE_WRAP) { 462 list_del_init(&fence->head); 463 fence_signal_locked(&fence->base); 464 INIT_LIST_HEAD(&action_list); 465 list_splice_init(&fence->seq_passed_actions, 466 &action_list); 467 vmw_fences_perform_actions(fman, &action_list); 468 } else 469 break; 470 } 471 472 /* 473 * Rerun if the fence goal seqno was updated, and the 474 * hardware might have raced with that update, so that 475 * we missed a fence_goal irq. 476 */ 477 478 needs_rerun = vmw_fence_goal_new_locked(fman, seqno); 479 if (unlikely(needs_rerun)) { 480 new_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE); 481 if (new_seqno != seqno) { 482 seqno = new_seqno; 483 goto rerun; 484 } 485 } 486 487 if (!list_empty(&fman->cleanup_list)) 488 (void) schedule_work(&fman->work); 489 } 490 491 void vmw_fences_update(struct vmw_fence_manager *fman) 492 { 493 unsigned long irq_flags; 494 495 spin_lock_irqsave(&fman->lock, irq_flags); 496 __vmw_fences_update(fman); 497 spin_unlock_irqrestore(&fman->lock, irq_flags); 498 } 499 500 bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence) 501 { 502 struct vmw_fence_manager *fman = fman_from_fence(fence); 503 504 if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags)) 505 return 1; 506 507 vmw_fences_update(fman); 508 509 return fence_is_signaled(&fence->base); 510 } 511 512 int vmw_fence_obj_wait(struct vmw_fence_obj *fence, bool lazy, 513 bool interruptible, unsigned long timeout) 514 { 515 long ret = fence_wait_timeout(&fence->base, interruptible, timeout); 516 517 if (likely(ret > 0)) 518 return 0; 519 else if (ret == 0) 520 return -EBUSY; 521 else 522 return ret; 523 } 524 525 void vmw_fence_obj_flush(struct vmw_fence_obj *fence) 526 { 527 struct vmw_private *dev_priv = fman_from_fence(fence)->dev_priv; 528 529 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); 530 } 531 532 static void vmw_fence_destroy(struct vmw_fence_obj *fence) 533 { 534 fence_free(&fence->base); 535 } 536 537 int vmw_fence_create(struct vmw_fence_manager *fman, 538 uint32_t seqno, 539 struct vmw_fence_obj **p_fence) 540 { 541 struct vmw_fence_obj *fence; 542 int ret; 543 544 fence = kzalloc(sizeof(*fence), GFP_KERNEL); 545 if (unlikely(fence == NULL)) 546 return -ENOMEM; 547 548 ret = vmw_fence_obj_init(fman, fence, seqno, 549 vmw_fence_destroy); 550 if (unlikely(ret != 0)) 551 goto out_err_init; 552 553 *p_fence = fence; 554 return 0; 555 556 out_err_init: 557 kfree(fence); 558 return ret; 559 } 560 561 562 static void vmw_user_fence_destroy(struct vmw_fence_obj *fence) 563 { 564 struct vmw_user_fence *ufence = 565 container_of(fence, struct vmw_user_fence, fence); 566 struct vmw_fence_manager *fman = fman_from_fence(fence); 567 568 ttm_base_object_kfree(ufence, base); 569 /* 570 * Free kernel space accounting. 571 */ 572 ttm_mem_global_free(vmw_mem_glob(fman->dev_priv), 573 fman->user_fence_size); 574 } 575 576 static void vmw_user_fence_base_release(struct ttm_base_object **p_base) 577 { 578 struct ttm_base_object *base = *p_base; 579 struct vmw_user_fence *ufence = 580 container_of(base, struct vmw_user_fence, base); 581 struct vmw_fence_obj *fence = &ufence->fence; 582 583 *p_base = NULL; 584 vmw_fence_obj_unreference(&fence); 585 } 586 587 int vmw_user_fence_create(struct drm_file *file_priv, 588 struct vmw_fence_manager *fman, 589 uint32_t seqno, 590 struct vmw_fence_obj **p_fence, 591 uint32_t *p_handle) 592 { 593 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 594 struct vmw_user_fence *ufence; 595 struct vmw_fence_obj *tmp; 596 struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv); 597 int ret; 598 599 /* 600 * Kernel memory space accounting, since this object may 601 * be created by a user-space request. 602 */ 603 604 ret = ttm_mem_global_alloc(mem_glob, fman->user_fence_size, 605 false, false); 606 if (unlikely(ret != 0)) 607 return ret; 608 609 ufence = kzalloc(sizeof(*ufence), GFP_KERNEL); 610 if (unlikely(ufence == NULL)) { 611 ret = -ENOMEM; 612 goto out_no_object; 613 } 614 615 ret = vmw_fence_obj_init(fman, &ufence->fence, seqno, 616 vmw_user_fence_destroy); 617 if (unlikely(ret != 0)) { 618 kfree(ufence); 619 goto out_no_object; 620 } 621 622 /* 623 * The base object holds a reference which is freed in 624 * vmw_user_fence_base_release. 625 */ 626 tmp = vmw_fence_obj_reference(&ufence->fence); 627 ret = ttm_base_object_init(tfile, &ufence->base, false, 628 VMW_RES_FENCE, 629 &vmw_user_fence_base_release, NULL); 630 631 632 if (unlikely(ret != 0)) { 633 /* 634 * Free the base object's reference 635 */ 636 vmw_fence_obj_unreference(&tmp); 637 goto out_err; 638 } 639 640 *p_fence = &ufence->fence; 641 *p_handle = ufence->base.hash.key; 642 643 return 0; 644 out_err: 645 tmp = &ufence->fence; 646 vmw_fence_obj_unreference(&tmp); 647 out_no_object: 648 ttm_mem_global_free(mem_glob, fman->user_fence_size); 649 return ret; 650 } 651 652 653 /** 654 * vmw_fence_fifo_down - signal all unsignaled fence objects. 655 */ 656 657 void vmw_fence_fifo_down(struct vmw_fence_manager *fman) 658 { 659 struct list_head action_list; 660 int ret; 661 662 /* 663 * The list may be altered while we traverse it, so always 664 * restart when we've released the fman->lock. 665 */ 666 667 spin_lock_irq(&fman->lock); 668 fman->fifo_down = true; 669 while (!list_empty(&fman->fence_list)) { 670 struct vmw_fence_obj *fence = 671 list_entry(fman->fence_list.prev, struct vmw_fence_obj, 672 head); 673 fence_get(&fence->base); 674 spin_unlock_irq(&fman->lock); 675 676 ret = vmw_fence_obj_wait(fence, false, false, 677 VMW_FENCE_WAIT_TIMEOUT); 678 679 if (unlikely(ret != 0)) { 680 list_del_init(&fence->head); 681 fence_signal(&fence->base); 682 INIT_LIST_HEAD(&action_list); 683 list_splice_init(&fence->seq_passed_actions, 684 &action_list); 685 vmw_fences_perform_actions(fman, &action_list); 686 } 687 688 BUG_ON(!list_empty(&fence->head)); 689 fence_put(&fence->base); 690 spin_lock_irq(&fman->lock); 691 } 692 spin_unlock_irq(&fman->lock); 693 } 694 695 void vmw_fence_fifo_up(struct vmw_fence_manager *fman) 696 { 697 unsigned long irq_flags; 698 699 spin_lock_irqsave(&fman->lock, irq_flags); 700 fman->fifo_down = false; 701 spin_unlock_irqrestore(&fman->lock, irq_flags); 702 } 703 704 705 int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data, 706 struct drm_file *file_priv) 707 { 708 struct drm_vmw_fence_wait_arg *arg = 709 (struct drm_vmw_fence_wait_arg *)data; 710 unsigned long timeout; 711 struct ttm_base_object *base; 712 struct vmw_fence_obj *fence; 713 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 714 int ret; 715 uint64_t wait_timeout = ((uint64_t)arg->timeout_us * HZ); 716 717 /* 718 * 64-bit division not present on 32-bit systems, so do an 719 * approximation. (Divide by 1000000). 720 */ 721 722 wait_timeout = (wait_timeout >> 20) + (wait_timeout >> 24) - 723 (wait_timeout >> 26); 724 725 if (!arg->cookie_valid) { 726 arg->cookie_valid = 1; 727 arg->kernel_cookie = jiffies + wait_timeout; 728 } 729 730 base = ttm_base_object_lookup(tfile, arg->handle); 731 if (unlikely(base == NULL)) { 732 printk(KERN_ERR "Wait invalid fence object handle " 733 "0x%08lx.\n", 734 (unsigned long)arg->handle); 735 return -EINVAL; 736 } 737 738 fence = &(container_of(base, struct vmw_user_fence, base)->fence); 739 740 timeout = jiffies; 741 if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie)) { 742 ret = ((vmw_fence_obj_signaled(fence)) ? 743 0 : -EBUSY); 744 goto out; 745 } 746 747 timeout = (unsigned long)arg->kernel_cookie - timeout; 748 749 ret = vmw_fence_obj_wait(fence, arg->lazy, true, timeout); 750 751 out: 752 ttm_base_object_unref(&base); 753 754 /* 755 * Optionally unref the fence object. 756 */ 757 758 if (ret == 0 && (arg->wait_options & DRM_VMW_WAIT_OPTION_UNREF)) 759 return ttm_ref_object_base_unref(tfile, arg->handle, 760 TTM_REF_USAGE); 761 return ret; 762 } 763 764 int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data, 765 struct drm_file *file_priv) 766 { 767 struct drm_vmw_fence_signaled_arg *arg = 768 (struct drm_vmw_fence_signaled_arg *) data; 769 struct ttm_base_object *base; 770 struct vmw_fence_obj *fence; 771 struct vmw_fence_manager *fman; 772 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 773 struct vmw_private *dev_priv = vmw_priv(dev); 774 775 base = ttm_base_object_lookup(tfile, arg->handle); 776 if (unlikely(base == NULL)) { 777 printk(KERN_ERR "Fence signaled invalid fence object handle " 778 "0x%08lx.\n", 779 (unsigned long)arg->handle); 780 return -EINVAL; 781 } 782 783 fence = &(container_of(base, struct vmw_user_fence, base)->fence); 784 fman = fman_from_fence(fence); 785 786 arg->signaled = vmw_fence_obj_signaled(fence); 787 788 arg->signaled_flags = arg->flags; 789 spin_lock_irq(&fman->lock); 790 arg->passed_seqno = dev_priv->last_read_seqno; 791 spin_unlock_irq(&fman->lock); 792 793 ttm_base_object_unref(&base); 794 795 return 0; 796 } 797 798 799 int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data, 800 struct drm_file *file_priv) 801 { 802 struct drm_vmw_fence_arg *arg = 803 (struct drm_vmw_fence_arg *) data; 804 805 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, 806 arg->handle, 807 TTM_REF_USAGE); 808 } 809 810 /** 811 * vmw_event_fence_fpriv_gone - Remove references to struct drm_file objects 812 * 813 * @fman: Pointer to a struct vmw_fence_manager 814 * @event_list: Pointer to linked list of struct vmw_event_fence_action objects 815 * with pointers to a struct drm_file object about to be closed. 816 * 817 * This function removes all pending fence events with references to a 818 * specific struct drm_file object about to be closed. The caller is required 819 * to pass a list of all struct vmw_event_fence_action objects with such 820 * events attached. This function is typically called before the 821 * struct drm_file object's event management is taken down. 822 */ 823 void vmw_event_fence_fpriv_gone(struct vmw_fence_manager *fman, 824 struct list_head *event_list) 825 { 826 struct vmw_event_fence_action *eaction; 827 struct drm_pending_event *event; 828 unsigned long irq_flags; 829 830 while (1) { 831 spin_lock_irqsave(&fman->lock, irq_flags); 832 if (list_empty(event_list)) 833 goto out_unlock; 834 eaction = list_first_entry(event_list, 835 struct vmw_event_fence_action, 836 fpriv_head); 837 list_del_init(&eaction->fpriv_head); 838 event = eaction->event; 839 eaction->event = NULL; 840 spin_unlock_irqrestore(&fman->lock, irq_flags); 841 event->destroy(event); 842 } 843 out_unlock: 844 spin_unlock_irqrestore(&fman->lock, irq_flags); 845 } 846 847 848 /** 849 * vmw_event_fence_action_seq_passed 850 * 851 * @action: The struct vmw_fence_action embedded in a struct 852 * vmw_event_fence_action. 853 * 854 * This function is called when the seqno of the fence where @action is 855 * attached has passed. It queues the event on the submitter's event list. 856 * This function is always called from atomic context, and may be called 857 * from irq context. 858 */ 859 static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action) 860 { 861 struct vmw_event_fence_action *eaction = 862 container_of(action, struct vmw_event_fence_action, action); 863 struct drm_device *dev = eaction->dev; 864 struct drm_pending_event *event = eaction->event; 865 struct drm_file *file_priv; 866 unsigned long irq_flags; 867 868 if (unlikely(event == NULL)) 869 return; 870 871 file_priv = event->file_priv; 872 spin_lock_irqsave(&dev->event_lock, irq_flags); 873 874 if (likely(eaction->tv_sec != NULL)) { 875 struct timeval tv; 876 877 do_gettimeofday(&tv); 878 *eaction->tv_sec = tv.tv_sec; 879 *eaction->tv_usec = tv.tv_usec; 880 } 881 882 list_del_init(&eaction->fpriv_head); 883 list_add_tail(&eaction->event->link, &file_priv->event_list); 884 eaction->event = NULL; 885 wake_up_all(&file_priv->event_wait); 886 spin_unlock_irqrestore(&dev->event_lock, irq_flags); 887 } 888 889 /** 890 * vmw_event_fence_action_cleanup 891 * 892 * @action: The struct vmw_fence_action embedded in a struct 893 * vmw_event_fence_action. 894 * 895 * This function is the struct vmw_fence_action destructor. It's typically 896 * called from a workqueue. 897 */ 898 static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action) 899 { 900 struct vmw_event_fence_action *eaction = 901 container_of(action, struct vmw_event_fence_action, action); 902 struct vmw_fence_manager *fman = fman_from_fence(eaction->fence); 903 unsigned long irq_flags; 904 905 spin_lock_irqsave(&fman->lock, irq_flags); 906 list_del(&eaction->fpriv_head); 907 spin_unlock_irqrestore(&fman->lock, irq_flags); 908 909 vmw_fence_obj_unreference(&eaction->fence); 910 kfree(eaction); 911 } 912 913 914 /** 915 * vmw_fence_obj_add_action - Add an action to a fence object. 916 * 917 * @fence - The fence object. 918 * @action - The action to add. 919 * 920 * Note that the action callbacks may be executed before this function 921 * returns. 922 */ 923 static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence, 924 struct vmw_fence_action *action) 925 { 926 struct vmw_fence_manager *fman = fman_from_fence(fence); 927 unsigned long irq_flags; 928 bool run_update = false; 929 930 mutex_lock(&fman->goal_irq_mutex); 931 spin_lock_irqsave(&fman->lock, irq_flags); 932 933 fman->pending_actions[action->type]++; 934 if (fence_is_signaled_locked(&fence->base)) { 935 struct list_head action_list; 936 937 INIT_LIST_HEAD(&action_list); 938 list_add_tail(&action->head, &action_list); 939 vmw_fences_perform_actions(fman, &action_list); 940 } else { 941 list_add_tail(&action->head, &fence->seq_passed_actions); 942 943 /* 944 * This function may set fman::seqno_valid, so it must 945 * be run with the goal_irq_mutex held. 946 */ 947 run_update = vmw_fence_goal_check_locked(fence); 948 } 949 950 spin_unlock_irqrestore(&fman->lock, irq_flags); 951 952 if (run_update) { 953 if (!fman->goal_irq_on) { 954 fman->goal_irq_on = true; 955 vmw_goal_waiter_add(fman->dev_priv); 956 } 957 vmw_fences_update(fman); 958 } 959 mutex_unlock(&fman->goal_irq_mutex); 960 961 } 962 963 /** 964 * vmw_event_fence_action_create - Post an event for sending when a fence 965 * object seqno has passed. 966 * 967 * @file_priv: The file connection on which the event should be posted. 968 * @fence: The fence object on which to post the event. 969 * @event: Event to be posted. This event should've been alloced 970 * using k[mz]alloc, and should've been completely initialized. 971 * @interruptible: Interruptible waits if possible. 972 * 973 * As a side effect, the object pointed to by @event may have been 974 * freed when this function returns. If this function returns with 975 * an error code, the caller needs to free that object. 976 */ 977 978 int vmw_event_fence_action_queue(struct drm_file *file_priv, 979 struct vmw_fence_obj *fence, 980 struct drm_pending_event *event, 981 uint32_t *tv_sec, 982 uint32_t *tv_usec, 983 bool interruptible) 984 { 985 struct vmw_event_fence_action *eaction; 986 struct vmw_fence_manager *fman = fman_from_fence(fence); 987 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); 988 unsigned long irq_flags; 989 990 eaction = kzalloc(sizeof(*eaction), GFP_KERNEL); 991 if (unlikely(eaction == NULL)) 992 return -ENOMEM; 993 994 eaction->event = event; 995 996 eaction->action.seq_passed = vmw_event_fence_action_seq_passed; 997 eaction->action.cleanup = vmw_event_fence_action_cleanup; 998 eaction->action.type = VMW_ACTION_EVENT; 999 1000 eaction->fence = vmw_fence_obj_reference(fence); 1001 eaction->dev = fman->dev_priv->dev; 1002 eaction->tv_sec = tv_sec; 1003 eaction->tv_usec = tv_usec; 1004 1005 spin_lock_irqsave(&fman->lock, irq_flags); 1006 list_add_tail(&eaction->fpriv_head, &vmw_fp->fence_events); 1007 spin_unlock_irqrestore(&fman->lock, irq_flags); 1008 1009 vmw_fence_obj_add_action(fence, &eaction->action); 1010 1011 return 0; 1012 } 1013 1014 struct vmw_event_fence_pending { 1015 struct drm_pending_event base; 1016 struct drm_vmw_event_fence event; 1017 }; 1018 1019 static int vmw_event_fence_action_create(struct drm_file *file_priv, 1020 struct vmw_fence_obj *fence, 1021 uint32_t flags, 1022 uint64_t user_data, 1023 bool interruptible) 1024 { 1025 struct vmw_event_fence_pending *event; 1026 struct vmw_fence_manager *fman = fman_from_fence(fence); 1027 struct drm_device *dev = fman->dev_priv->dev; 1028 unsigned long irq_flags; 1029 int ret; 1030 1031 spin_lock_irqsave(&dev->event_lock, irq_flags); 1032 1033 ret = (file_priv->event_space < sizeof(event->event)) ? -EBUSY : 0; 1034 if (likely(ret == 0)) 1035 file_priv->event_space -= sizeof(event->event); 1036 1037 spin_unlock_irqrestore(&dev->event_lock, irq_flags); 1038 1039 if (unlikely(ret != 0)) { 1040 DRM_ERROR("Failed to allocate event space for this file.\n"); 1041 goto out_no_space; 1042 } 1043 1044 1045 event = kzalloc(sizeof(*event), GFP_KERNEL); 1046 if (unlikely(event == NULL)) { 1047 DRM_ERROR("Failed to allocate an event.\n"); 1048 ret = -ENOMEM; 1049 goto out_no_event; 1050 } 1051 1052 event->event.base.type = DRM_VMW_EVENT_FENCE_SIGNALED; 1053 event->event.base.length = sizeof(*event); 1054 event->event.user_data = user_data; 1055 1056 event->base.event = &event->event.base; 1057 event->base.file_priv = file_priv; 1058 event->base.destroy = (void (*) (struct drm_pending_event *)) kfree; 1059 1060 1061 if (flags & DRM_VMW_FE_FLAG_REQ_TIME) 1062 ret = vmw_event_fence_action_queue(file_priv, fence, 1063 &event->base, 1064 &event->event.tv_sec, 1065 &event->event.tv_usec, 1066 interruptible); 1067 else 1068 ret = vmw_event_fence_action_queue(file_priv, fence, 1069 &event->base, 1070 NULL, 1071 NULL, 1072 interruptible); 1073 if (ret != 0) 1074 goto out_no_queue; 1075 1076 return 0; 1077 1078 out_no_queue: 1079 event->base.destroy(&event->base); 1080 out_no_event: 1081 spin_lock_irqsave(&dev->event_lock, irq_flags); 1082 file_priv->event_space += sizeof(*event); 1083 spin_unlock_irqrestore(&dev->event_lock, irq_flags); 1084 out_no_space: 1085 return ret; 1086 } 1087 1088 int vmw_fence_event_ioctl(struct drm_device *dev, void *data, 1089 struct drm_file *file_priv) 1090 { 1091 struct vmw_private *dev_priv = vmw_priv(dev); 1092 struct drm_vmw_fence_event_arg *arg = 1093 (struct drm_vmw_fence_event_arg *) data; 1094 struct vmw_fence_obj *fence = NULL; 1095 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); 1096 struct drm_vmw_fence_rep __user *user_fence_rep = 1097 (struct drm_vmw_fence_rep __user *)(unsigned long) 1098 arg->fence_rep; 1099 uint32_t handle; 1100 int ret; 1101 1102 /* 1103 * Look up an existing fence object, 1104 * and if user-space wants a new reference, 1105 * add one. 1106 */ 1107 if (arg->handle) { 1108 struct ttm_base_object *base = 1109 ttm_base_object_lookup_for_ref(dev_priv->tdev, 1110 arg->handle); 1111 1112 if (unlikely(base == NULL)) { 1113 DRM_ERROR("Fence event invalid fence object handle " 1114 "0x%08lx.\n", 1115 (unsigned long)arg->handle); 1116 return -EINVAL; 1117 } 1118 fence = &(container_of(base, struct vmw_user_fence, 1119 base)->fence); 1120 (void) vmw_fence_obj_reference(fence); 1121 1122 if (user_fence_rep != NULL) { 1123 bool existed; 1124 1125 ret = ttm_ref_object_add(vmw_fp->tfile, base, 1126 TTM_REF_USAGE, &existed); 1127 if (unlikely(ret != 0)) { 1128 DRM_ERROR("Failed to reference a fence " 1129 "object.\n"); 1130 goto out_no_ref_obj; 1131 } 1132 handle = base->hash.key; 1133 } 1134 ttm_base_object_unref(&base); 1135 } 1136 1137 /* 1138 * Create a new fence object. 1139 */ 1140 if (!fence) { 1141 ret = vmw_execbuf_fence_commands(file_priv, dev_priv, 1142 &fence, 1143 (user_fence_rep) ? 1144 &handle : NULL); 1145 if (unlikely(ret != 0)) { 1146 DRM_ERROR("Fence event failed to create fence.\n"); 1147 return ret; 1148 } 1149 } 1150 1151 BUG_ON(fence == NULL); 1152 1153 ret = vmw_event_fence_action_create(file_priv, fence, 1154 arg->flags, 1155 arg->user_data, 1156 true); 1157 if (unlikely(ret != 0)) { 1158 if (ret != -ERESTARTSYS) 1159 DRM_ERROR("Failed to attach event to fence.\n"); 1160 goto out_no_create; 1161 } 1162 1163 vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence, 1164 handle); 1165 vmw_fence_obj_unreference(&fence); 1166 return 0; 1167 out_no_create: 1168 if (user_fence_rep != NULL) 1169 ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, 1170 handle, TTM_REF_USAGE); 1171 out_no_ref_obj: 1172 vmw_fence_obj_unreference(&fence); 1173 return ret; 1174 } 1175