1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2019 Intel Corporation 5 */ 6 7 #include <linux/debugobjects.h> 8 9 #include "gt/intel_context.h" 10 #include "gt/intel_engine_pm.h" 11 #include "gt/intel_ring.h" 12 13 #include "i915_drv.h" 14 #include "i915_active.h" 15 #include "i915_globals.h" 16 17 /* 18 * Active refs memory management 19 * 20 * To be more economical with memory, we reap all the i915_active trees as 21 * they idle (when we know the active requests are inactive) and allocate the 22 * nodes from a local slab cache to hopefully reduce the fragmentation. 23 */ 24 static struct i915_global_active { 25 struct i915_global base; 26 struct kmem_cache *slab_cache; 27 } global; 28 29 struct active_node { 30 struct i915_active_fence base; 31 struct i915_active *ref; 32 struct rb_node node; 33 u64 timeline; 34 }; 35 36 static inline struct active_node * 37 node_from_active(struct i915_active_fence *active) 38 { 39 return container_of(active, struct active_node, base); 40 } 41 42 #define take_preallocated_barriers(x) llist_del_all(&(x)->preallocated_barriers) 43 44 static inline bool is_barrier(const struct i915_active_fence *active) 45 { 46 return IS_ERR(rcu_access_pointer(active->fence)); 47 } 48 49 static inline struct llist_node *barrier_to_ll(struct active_node *node) 50 { 51 GEM_BUG_ON(!is_barrier(&node->base)); 52 return (struct llist_node *)&node->base.cb.node; 53 } 54 55 static inline struct intel_engine_cs * 56 __barrier_to_engine(struct active_node *node) 57 { 58 return (struct intel_engine_cs *)READ_ONCE(node->base.cb.node.prev); 59 } 60 61 static inline struct intel_engine_cs * 62 barrier_to_engine(struct active_node *node) 63 { 64 GEM_BUG_ON(!is_barrier(&node->base)); 65 return __barrier_to_engine(node); 66 } 67 68 static inline struct active_node *barrier_from_ll(struct llist_node *x) 69 { 70 return container_of((struct list_head *)x, 71 struct active_node, base.cb.node); 72 } 73 74 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && IS_ENABLED(CONFIG_DEBUG_OBJECTS) 75 76 static void *active_debug_hint(void *addr) 77 { 78 struct i915_active *ref = addr; 79 80 return (void *)ref->active ?: (void *)ref->retire ?: (void *)ref; 81 } 82 83 static struct debug_obj_descr active_debug_desc = { 84 .name = "i915_active", 85 .debug_hint = active_debug_hint, 86 }; 87 88 static void debug_active_init(struct i915_active *ref) 89 { 90 debug_object_init(ref, &active_debug_desc); 91 } 92 93 static void debug_active_activate(struct i915_active *ref) 94 { 95 lockdep_assert_held(&ref->tree_lock); 96 if (!atomic_read(&ref->count)) /* before the first inc */ 97 debug_object_activate(ref, &active_debug_desc); 98 } 99 100 static void debug_active_deactivate(struct i915_active *ref) 101 { 102 lockdep_assert_held(&ref->tree_lock); 103 if (!atomic_read(&ref->count)) /* after the last dec */ 104 debug_object_deactivate(ref, &active_debug_desc); 105 } 106 107 static void debug_active_fini(struct i915_active *ref) 108 { 109 debug_object_free(ref, &active_debug_desc); 110 } 111 112 static void debug_active_assert(struct i915_active *ref) 113 { 114 debug_object_assert_init(ref, &active_debug_desc); 115 } 116 117 #else 118 119 static inline void debug_active_init(struct i915_active *ref) { } 120 static inline void debug_active_activate(struct i915_active *ref) { } 121 static inline void debug_active_deactivate(struct i915_active *ref) { } 122 static inline void debug_active_fini(struct i915_active *ref) { } 123 static inline void debug_active_assert(struct i915_active *ref) { } 124 125 #endif 126 127 static void 128 __active_retire(struct i915_active *ref) 129 { 130 struct active_node *it, *n; 131 struct rb_root root; 132 unsigned long flags; 133 134 GEM_BUG_ON(i915_active_is_idle(ref)); 135 136 /* return the unused nodes to our slabcache -- flushing the allocator */ 137 if (!atomic_dec_and_lock_irqsave(&ref->count, &ref->tree_lock, flags)) 138 return; 139 140 GEM_BUG_ON(rcu_access_pointer(ref->excl.fence)); 141 debug_active_deactivate(ref); 142 143 root = ref->tree; 144 ref->tree = RB_ROOT; 145 ref->cache = NULL; 146 147 spin_unlock_irqrestore(&ref->tree_lock, flags); 148 149 /* After the final retire, the entire struct may be freed */ 150 if (ref->retire) 151 ref->retire(ref); 152 153 /* ... except if you wait on it, you must manage your own references! */ 154 wake_up_var(ref); 155 156 rbtree_postorder_for_each_entry_safe(it, n, &root, node) { 157 GEM_BUG_ON(i915_active_fence_isset(&it->base)); 158 kmem_cache_free(global.slab_cache, it); 159 } 160 } 161 162 static void 163 active_work(struct work_struct *wrk) 164 { 165 struct i915_active *ref = container_of(wrk, typeof(*ref), work); 166 167 GEM_BUG_ON(!atomic_read(&ref->count)); 168 if (atomic_add_unless(&ref->count, -1, 1)) 169 return; 170 171 __active_retire(ref); 172 } 173 174 static void 175 active_retire(struct i915_active *ref) 176 { 177 GEM_BUG_ON(!atomic_read(&ref->count)); 178 if (atomic_add_unless(&ref->count, -1, 1)) 179 return; 180 181 if (ref->flags & I915_ACTIVE_RETIRE_SLEEPS) { 182 queue_work(system_unbound_wq, &ref->work); 183 return; 184 } 185 186 __active_retire(ref); 187 } 188 189 static inline struct dma_fence ** 190 __active_fence_slot(struct i915_active_fence *active) 191 { 192 return (struct dma_fence ** __force)&active->fence; 193 } 194 195 static inline bool 196 active_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb) 197 { 198 struct i915_active_fence *active = 199 container_of(cb, typeof(*active), cb); 200 201 return cmpxchg(__active_fence_slot(active), fence, NULL) == fence; 202 } 203 204 static void 205 node_retire(struct dma_fence *fence, struct dma_fence_cb *cb) 206 { 207 if (active_fence_cb(fence, cb)) 208 active_retire(container_of(cb, struct active_node, base.cb)->ref); 209 } 210 211 static void 212 excl_retire(struct dma_fence *fence, struct dma_fence_cb *cb) 213 { 214 if (active_fence_cb(fence, cb)) 215 active_retire(container_of(cb, struct i915_active, excl.cb)); 216 } 217 218 static struct i915_active_fence * 219 active_instance(struct i915_active *ref, struct intel_timeline *tl) 220 { 221 struct active_node *node, *prealloc; 222 struct rb_node **p, *parent; 223 u64 idx = tl->fence_context; 224 225 /* 226 * We track the most recently used timeline to skip a rbtree search 227 * for the common case, under typical loads we never need the rbtree 228 * at all. We can reuse the last slot if it is empty, that is 229 * after the previous activity has been retired, or if it matches the 230 * current timeline. 231 */ 232 node = READ_ONCE(ref->cache); 233 if (node && node->timeline == idx) 234 return &node->base; 235 236 /* Preallocate a replacement, just in case */ 237 prealloc = kmem_cache_alloc(global.slab_cache, GFP_KERNEL); 238 if (!prealloc) 239 return NULL; 240 241 spin_lock_irq(&ref->tree_lock); 242 GEM_BUG_ON(i915_active_is_idle(ref)); 243 244 parent = NULL; 245 p = &ref->tree.rb_node; 246 while (*p) { 247 parent = *p; 248 249 node = rb_entry(parent, struct active_node, node); 250 if (node->timeline == idx) { 251 kmem_cache_free(global.slab_cache, prealloc); 252 goto out; 253 } 254 255 if (node->timeline < idx) 256 p = &parent->rb_right; 257 else 258 p = &parent->rb_left; 259 } 260 261 node = prealloc; 262 __i915_active_fence_init(&node->base, NULL, node_retire); 263 node->ref = ref; 264 node->timeline = idx; 265 266 rb_link_node(&node->node, parent, p); 267 rb_insert_color(&node->node, &ref->tree); 268 269 out: 270 ref->cache = node; 271 spin_unlock_irq(&ref->tree_lock); 272 273 BUILD_BUG_ON(offsetof(typeof(*node), base)); 274 return &node->base; 275 } 276 277 void __i915_active_init(struct i915_active *ref, 278 int (*active)(struct i915_active *ref), 279 void (*retire)(struct i915_active *ref), 280 struct lock_class_key *mkey, 281 struct lock_class_key *wkey) 282 { 283 unsigned long bits; 284 285 debug_active_init(ref); 286 287 ref->flags = 0; 288 ref->active = active; 289 ref->retire = ptr_unpack_bits(retire, &bits, 2); 290 if (bits & I915_ACTIVE_MAY_SLEEP) 291 ref->flags |= I915_ACTIVE_RETIRE_SLEEPS; 292 293 spin_lock_init(&ref->tree_lock); 294 ref->tree = RB_ROOT; 295 ref->cache = NULL; 296 297 init_llist_head(&ref->preallocated_barriers); 298 atomic_set(&ref->count, 0); 299 __mutex_init(&ref->mutex, "i915_active", mkey); 300 __i915_active_fence_init(&ref->excl, NULL, excl_retire); 301 INIT_WORK(&ref->work, active_work); 302 #if IS_ENABLED(CONFIG_LOCKDEP) 303 lockdep_init_map(&ref->work.lockdep_map, "i915_active.work", wkey, 0); 304 #endif 305 } 306 307 static bool ____active_del_barrier(struct i915_active *ref, 308 struct active_node *node, 309 struct intel_engine_cs *engine) 310 311 { 312 struct llist_node *head = NULL, *tail = NULL; 313 struct llist_node *pos, *next; 314 315 GEM_BUG_ON(node->timeline != engine->kernel_context->timeline->fence_context); 316 317 /* 318 * Rebuild the llist excluding our node. We may perform this 319 * outside of the kernel_context timeline mutex and so someone 320 * else may be manipulating the engine->barrier_tasks, in 321 * which case either we or they will be upset :) 322 * 323 * A second __active_del_barrier() will report failure to claim 324 * the active_node and the caller will just shrug and know not to 325 * claim ownership of its node. 326 * 327 * A concurrent i915_request_add_active_barriers() will miss adding 328 * any of the tasks, but we will try again on the next -- and since 329 * we are actively using the barrier, we know that there will be 330 * at least another opportunity when we idle. 331 */ 332 llist_for_each_safe(pos, next, llist_del_all(&engine->barrier_tasks)) { 333 if (node == barrier_from_ll(pos)) { 334 node = NULL; 335 continue; 336 } 337 338 pos->next = head; 339 head = pos; 340 if (!tail) 341 tail = pos; 342 } 343 if (head) 344 llist_add_batch(head, tail, &engine->barrier_tasks); 345 346 return !node; 347 } 348 349 static bool 350 __active_del_barrier(struct i915_active *ref, struct active_node *node) 351 { 352 return ____active_del_barrier(ref, node, barrier_to_engine(node)); 353 } 354 355 int i915_active_ref(struct i915_active *ref, 356 struct intel_timeline *tl, 357 struct dma_fence *fence) 358 { 359 struct i915_active_fence *active; 360 int err; 361 362 lockdep_assert_held(&tl->mutex); 363 364 /* Prevent reaping in case we malloc/wait while building the tree */ 365 err = i915_active_acquire(ref); 366 if (err) 367 return err; 368 369 active = active_instance(ref, tl); 370 if (!active) { 371 err = -ENOMEM; 372 goto out; 373 } 374 375 if (is_barrier(active)) { /* proto-node used by our idle barrier */ 376 /* 377 * This request is on the kernel_context timeline, and so 378 * we can use it to substitute for the pending idle-barrer 379 * request that we want to emit on the kernel_context. 380 */ 381 __active_del_barrier(ref, node_from_active(active)); 382 RCU_INIT_POINTER(active->fence, NULL); 383 atomic_dec(&ref->count); 384 } 385 if (!__i915_active_fence_set(active, fence)) 386 atomic_inc(&ref->count); 387 388 out: 389 i915_active_release(ref); 390 return err; 391 } 392 393 void i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f) 394 { 395 /* We expect the caller to manage the exclusive timeline ordering */ 396 GEM_BUG_ON(i915_active_is_idle(ref)); 397 398 if (!__i915_active_fence_set(&ref->excl, f)) 399 atomic_inc(&ref->count); 400 } 401 402 bool i915_active_acquire_if_busy(struct i915_active *ref) 403 { 404 debug_active_assert(ref); 405 return atomic_add_unless(&ref->count, 1, 0); 406 } 407 408 int i915_active_acquire(struct i915_active *ref) 409 { 410 int err; 411 412 if (i915_active_acquire_if_busy(ref)) 413 return 0; 414 415 err = mutex_lock_interruptible(&ref->mutex); 416 if (err) 417 return err; 418 419 if (!atomic_read(&ref->count) && ref->active) 420 err = ref->active(ref); 421 if (!err) { 422 spin_lock_irq(&ref->tree_lock); /* vs __active_retire() */ 423 debug_active_activate(ref); 424 atomic_inc(&ref->count); 425 spin_unlock_irq(&ref->tree_lock); 426 } 427 428 mutex_unlock(&ref->mutex); 429 430 return err; 431 } 432 433 void i915_active_release(struct i915_active *ref) 434 { 435 debug_active_assert(ref); 436 active_retire(ref); 437 } 438 439 static void enable_signaling(struct i915_active_fence *active) 440 { 441 struct dma_fence *fence; 442 443 fence = i915_active_fence_get(active); 444 if (!fence) 445 return; 446 447 dma_fence_enable_sw_signaling(fence); 448 dma_fence_put(fence); 449 } 450 451 int i915_active_wait(struct i915_active *ref) 452 { 453 struct active_node *it, *n; 454 int err = 0; 455 456 might_sleep(); 457 458 if (!i915_active_acquire_if_busy(ref)) 459 return 0; 460 461 /* Flush lazy signals */ 462 enable_signaling(&ref->excl); 463 rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) { 464 if (is_barrier(&it->base)) /* unconnected idle barrier */ 465 continue; 466 467 enable_signaling(&it->base); 468 } 469 /* Any fence added after the wait begins will not be auto-signaled */ 470 471 i915_active_release(ref); 472 if (err) 473 return err; 474 475 if (wait_var_event_interruptible(ref, i915_active_is_idle(ref))) 476 return -EINTR; 477 478 flush_work(&ref->work); 479 return 0; 480 } 481 482 int i915_request_await_active(struct i915_request *rq, struct i915_active *ref) 483 { 484 int err = 0; 485 486 if (rcu_access_pointer(ref->excl.fence)) { 487 struct dma_fence *fence; 488 489 rcu_read_lock(); 490 fence = dma_fence_get_rcu_safe(&ref->excl.fence); 491 rcu_read_unlock(); 492 if (fence) { 493 err = i915_request_await_dma_fence(rq, fence); 494 dma_fence_put(fence); 495 } 496 } 497 498 /* In the future we may choose to await on all fences */ 499 500 return err; 501 } 502 503 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) 504 void i915_active_fini(struct i915_active *ref) 505 { 506 debug_active_fini(ref); 507 GEM_BUG_ON(atomic_read(&ref->count)); 508 GEM_BUG_ON(work_pending(&ref->work)); 509 GEM_BUG_ON(!RB_EMPTY_ROOT(&ref->tree)); 510 mutex_destroy(&ref->mutex); 511 } 512 #endif 513 514 static inline bool is_idle_barrier(struct active_node *node, u64 idx) 515 { 516 return node->timeline == idx && !i915_active_fence_isset(&node->base); 517 } 518 519 static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx) 520 { 521 struct rb_node *prev, *p; 522 523 if (RB_EMPTY_ROOT(&ref->tree)) 524 return NULL; 525 526 spin_lock_irq(&ref->tree_lock); 527 GEM_BUG_ON(i915_active_is_idle(ref)); 528 529 /* 530 * Try to reuse any existing barrier nodes already allocated for this 531 * i915_active, due to overlapping active phases there is likely a 532 * node kept alive (as we reuse before parking). We prefer to reuse 533 * completely idle barriers (less hassle in manipulating the llists), 534 * but otherwise any will do. 535 */ 536 if (ref->cache && is_idle_barrier(ref->cache, idx)) { 537 p = &ref->cache->node; 538 goto match; 539 } 540 541 prev = NULL; 542 p = ref->tree.rb_node; 543 while (p) { 544 struct active_node *node = 545 rb_entry(p, struct active_node, node); 546 547 if (is_idle_barrier(node, idx)) 548 goto match; 549 550 prev = p; 551 if (node->timeline < idx) 552 p = p->rb_right; 553 else 554 p = p->rb_left; 555 } 556 557 /* 558 * No quick match, but we did find the leftmost rb_node for the 559 * kernel_context. Walk the rb_tree in-order to see if there were 560 * any idle-barriers on this timeline that we missed, or just use 561 * the first pending barrier. 562 */ 563 for (p = prev; p; p = rb_next(p)) { 564 struct active_node *node = 565 rb_entry(p, struct active_node, node); 566 struct intel_engine_cs *engine; 567 568 if (node->timeline > idx) 569 break; 570 571 if (node->timeline < idx) 572 continue; 573 574 if (is_idle_barrier(node, idx)) 575 goto match; 576 577 /* 578 * The list of pending barriers is protected by the 579 * kernel_context timeline, which notably we do not hold 580 * here. i915_request_add_active_barriers() may consume 581 * the barrier before we claim it, so we have to check 582 * for success. 583 */ 584 engine = __barrier_to_engine(node); 585 smp_rmb(); /* serialise with add_active_barriers */ 586 if (is_barrier(&node->base) && 587 ____active_del_barrier(ref, node, engine)) 588 goto match; 589 } 590 591 spin_unlock_irq(&ref->tree_lock); 592 593 return NULL; 594 595 match: 596 rb_erase(p, &ref->tree); /* Hide from waits and sibling allocations */ 597 if (p == &ref->cache->node) 598 ref->cache = NULL; 599 spin_unlock_irq(&ref->tree_lock); 600 601 return rb_entry(p, struct active_node, node); 602 } 603 604 int i915_active_acquire_preallocate_barrier(struct i915_active *ref, 605 struct intel_engine_cs *engine) 606 { 607 intel_engine_mask_t tmp, mask = engine->mask; 608 struct llist_node *pos = NULL, *next; 609 struct intel_gt *gt = engine->gt; 610 int err; 611 612 GEM_BUG_ON(i915_active_is_idle(ref)); 613 614 /* Wait until the previous preallocation is completed */ 615 while (!llist_empty(&ref->preallocated_barriers)) 616 cond_resched(); 617 618 /* 619 * Preallocate a node for each physical engine supporting the target 620 * engine (remember virtual engines have more than one sibling). 621 * We can then use the preallocated nodes in 622 * i915_active_acquire_barrier() 623 */ 624 for_each_engine_masked(engine, gt, mask, tmp) { 625 u64 idx = engine->kernel_context->timeline->fence_context; 626 struct active_node *node; 627 628 node = reuse_idle_barrier(ref, idx); 629 if (!node) { 630 node = kmem_cache_alloc(global.slab_cache, GFP_KERNEL); 631 if (!node) { 632 err = ENOMEM; 633 goto unwind; 634 } 635 636 RCU_INIT_POINTER(node->base.fence, NULL); 637 node->base.cb.func = node_retire; 638 node->timeline = idx; 639 node->ref = ref; 640 } 641 642 if (!i915_active_fence_isset(&node->base)) { 643 /* 644 * Mark this as being *our* unconnected proto-node. 645 * 646 * Since this node is not in any list, and we have 647 * decoupled it from the rbtree, we can reuse the 648 * request to indicate this is an idle-barrier node 649 * and then we can use the rb_node and list pointers 650 * for our tracking of the pending barrier. 651 */ 652 RCU_INIT_POINTER(node->base.fence, ERR_PTR(-EAGAIN)); 653 node->base.cb.node.prev = (void *)engine; 654 atomic_inc(&ref->count); 655 } 656 GEM_BUG_ON(rcu_access_pointer(node->base.fence) != ERR_PTR(-EAGAIN)); 657 658 GEM_BUG_ON(barrier_to_engine(node) != engine); 659 next = barrier_to_ll(node); 660 next->next = pos; 661 if (!pos) 662 pos = next; 663 intel_engine_pm_get(engine); 664 } 665 666 GEM_BUG_ON(!llist_empty(&ref->preallocated_barriers)); 667 llist_add_batch(next, pos, &ref->preallocated_barriers); 668 669 return 0; 670 671 unwind: 672 while (pos) { 673 struct active_node *node = barrier_from_ll(pos); 674 675 pos = pos->next; 676 677 atomic_dec(&ref->count); 678 intel_engine_pm_put(barrier_to_engine(node)); 679 680 kmem_cache_free(global.slab_cache, node); 681 } 682 return err; 683 } 684 685 void i915_active_acquire_barrier(struct i915_active *ref) 686 { 687 struct llist_node *pos, *next; 688 unsigned long flags; 689 690 GEM_BUG_ON(i915_active_is_idle(ref)); 691 692 /* 693 * Transfer the list of preallocated barriers into the 694 * i915_active rbtree, but only as proto-nodes. They will be 695 * populated by i915_request_add_active_barriers() to point to the 696 * request that will eventually release them. 697 */ 698 llist_for_each_safe(pos, next, take_preallocated_barriers(ref)) { 699 struct active_node *node = barrier_from_ll(pos); 700 struct intel_engine_cs *engine = barrier_to_engine(node); 701 struct rb_node **p, *parent; 702 703 spin_lock_irqsave_nested(&ref->tree_lock, flags, 704 SINGLE_DEPTH_NESTING); 705 parent = NULL; 706 p = &ref->tree.rb_node; 707 while (*p) { 708 struct active_node *it; 709 710 parent = *p; 711 712 it = rb_entry(parent, struct active_node, node); 713 if (it->timeline < node->timeline) 714 p = &parent->rb_right; 715 else 716 p = &parent->rb_left; 717 } 718 rb_link_node(&node->node, parent, p); 719 rb_insert_color(&node->node, &ref->tree); 720 spin_unlock_irqrestore(&ref->tree_lock, flags); 721 722 GEM_BUG_ON(!intel_engine_pm_is_awake(engine)); 723 llist_add(barrier_to_ll(node), &engine->barrier_tasks); 724 intel_engine_pm_put(engine); 725 } 726 } 727 728 static struct dma_fence **ll_to_fence_slot(struct llist_node *node) 729 { 730 return __active_fence_slot(&barrier_from_ll(node)->base); 731 } 732 733 void i915_request_add_active_barriers(struct i915_request *rq) 734 { 735 struct intel_engine_cs *engine = rq->engine; 736 struct llist_node *node, *next; 737 unsigned long flags; 738 739 GEM_BUG_ON(!intel_context_is_barrier(rq->context)); 740 GEM_BUG_ON(intel_engine_is_virtual(engine)); 741 GEM_BUG_ON(i915_request_timeline(rq) != engine->kernel_context->timeline); 742 743 node = llist_del_all(&engine->barrier_tasks); 744 if (!node) 745 return; 746 /* 747 * Attach the list of proto-fences to the in-flight request such 748 * that the parent i915_active will be released when this request 749 * is retired. 750 */ 751 spin_lock_irqsave(&rq->lock, flags); 752 llist_for_each_safe(node, next, node) { 753 /* serialise with reuse_idle_barrier */ 754 smp_store_mb(*ll_to_fence_slot(node), &rq->fence); 755 list_add_tail((struct list_head *)node, &rq->fence.cb_list); 756 } 757 spin_unlock_irqrestore(&rq->lock, flags); 758 } 759 760 /* 761 * __i915_active_fence_set: Update the last active fence along its timeline 762 * @active: the active tracker 763 * @fence: the new fence (under construction) 764 * 765 * Records the new @fence as the last active fence along its timeline in 766 * this active tracker, moving the tracking callbacks from the previous 767 * fence onto this one. Returns the previous fence (if not already completed), 768 * which the caller must ensure is executed before the new fence. To ensure 769 * that the order of fences within the timeline of the i915_active_fence is 770 * understood, it should be locked by the caller. 771 */ 772 struct dma_fence * 773 __i915_active_fence_set(struct i915_active_fence *active, 774 struct dma_fence *fence) 775 { 776 struct dma_fence *prev; 777 unsigned long flags; 778 779 if (fence == rcu_access_pointer(active->fence)) 780 return fence; 781 782 GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)); 783 784 /* 785 * Consider that we have two threads arriving (A and B), with 786 * C already resident as the active->fence. 787 * 788 * A does the xchg first, and so it sees C or NULL depending 789 * on the timing of the interrupt handler. If it is NULL, the 790 * previous fence must have been signaled and we know that 791 * we are first on the timeline. If it is still present, 792 * we acquire the lock on that fence and serialise with the interrupt 793 * handler, in the process removing it from any future interrupt 794 * callback. A will then wait on C before executing (if present). 795 * 796 * As B is second, it sees A as the previous fence and so waits for 797 * it to complete its transition and takes over the occupancy for 798 * itself -- remembering that it needs to wait on A before executing. 799 * 800 * Note the strong ordering of the timeline also provides consistent 801 * nesting rules for the fence->lock; the inner lock is always the 802 * older lock. 803 */ 804 spin_lock_irqsave(fence->lock, flags); 805 prev = xchg(__active_fence_slot(active), fence); 806 if (prev) { 807 GEM_BUG_ON(prev == fence); 808 spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING); 809 __list_del_entry(&active->cb.node); 810 spin_unlock(prev->lock); /* serialise with prev->cb_list */ 811 } 812 GEM_BUG_ON(rcu_access_pointer(active->fence) != fence); 813 list_add_tail(&active->cb.node, &fence->cb_list); 814 spin_unlock_irqrestore(fence->lock, flags); 815 816 return prev; 817 } 818 819 int i915_active_fence_set(struct i915_active_fence *active, 820 struct i915_request *rq) 821 { 822 struct dma_fence *fence; 823 int err = 0; 824 825 /* Must maintain timeline ordering wrt previous active requests */ 826 rcu_read_lock(); 827 fence = __i915_active_fence_set(active, &rq->fence); 828 if (fence) /* but the previous fence may not belong to that timeline! */ 829 fence = dma_fence_get_rcu(fence); 830 rcu_read_unlock(); 831 if (fence) { 832 err = i915_request_await_dma_fence(rq, fence); 833 dma_fence_put(fence); 834 } 835 836 return err; 837 } 838 839 void i915_active_noop(struct dma_fence *fence, struct dma_fence_cb *cb) 840 { 841 active_fence_cb(fence, cb); 842 } 843 844 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 845 #include "selftests/i915_active.c" 846 #endif 847 848 static void i915_global_active_shrink(void) 849 { 850 kmem_cache_shrink(global.slab_cache); 851 } 852 853 static void i915_global_active_exit(void) 854 { 855 kmem_cache_destroy(global.slab_cache); 856 } 857 858 static struct i915_global_active global = { { 859 .shrink = i915_global_active_shrink, 860 .exit = i915_global_active_exit, 861 } }; 862 863 int __init i915_global_active_init(void) 864 { 865 global.slab_cache = KMEM_CACHE(active_node, SLAB_HWCACHE_ALIGN); 866 if (!global.slab_cache) 867 return -ENOMEM; 868 869 i915_global_register(&global.base); 870 return 0; 871 } 872