1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2019 Intel Corporation 5 */ 6 7 #include <linux/debugobjects.h> 8 9 #include "gt/intel_context.h" 10 #include "gt/intel_engine_pm.h" 11 #include "gt/intel_ring.h" 12 13 #include "i915_drv.h" 14 #include "i915_active.h" 15 #include "i915_globals.h" 16 17 /* 18 * Active refs memory management 19 * 20 * To be more economical with memory, we reap all the i915_active trees as 21 * they idle (when we know the active requests are inactive) and allocate the 22 * nodes from a local slab cache to hopefully reduce the fragmentation. 23 */ 24 static struct i915_global_active { 25 struct i915_global base; 26 struct kmem_cache *slab_cache; 27 } global; 28 29 struct active_node { 30 struct i915_active_fence base; 31 struct i915_active *ref; 32 struct rb_node node; 33 u64 timeline; 34 }; 35 36 static inline struct active_node * 37 node_from_active(struct i915_active_fence *active) 38 { 39 return container_of(active, struct active_node, base); 40 } 41 42 #define take_preallocated_barriers(x) llist_del_all(&(x)->preallocated_barriers) 43 44 static inline bool is_barrier(const struct i915_active_fence *active) 45 { 46 return IS_ERR(rcu_access_pointer(active->fence)); 47 } 48 49 static inline struct llist_node *barrier_to_ll(struct active_node *node) 50 { 51 GEM_BUG_ON(!is_barrier(&node->base)); 52 return (struct llist_node *)&node->base.cb.node; 53 } 54 55 static inline struct intel_engine_cs * 56 __barrier_to_engine(struct active_node *node) 57 { 58 return (struct intel_engine_cs *)READ_ONCE(node->base.cb.node.prev); 59 } 60 61 static inline struct intel_engine_cs * 62 barrier_to_engine(struct active_node *node) 63 { 64 GEM_BUG_ON(!is_barrier(&node->base)); 65 return __barrier_to_engine(node); 66 } 67 68 static inline struct active_node *barrier_from_ll(struct llist_node *x) 69 { 70 return container_of((struct list_head *)x, 71 struct active_node, base.cb.node); 72 } 73 74 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && IS_ENABLED(CONFIG_DEBUG_OBJECTS) 75 76 static void *active_debug_hint(void *addr) 77 { 78 struct i915_active *ref = addr; 79 80 return (void *)ref->active ?: (void *)ref->retire ?: (void *)ref; 81 } 82 83 static struct debug_obj_descr active_debug_desc = { 84 .name = "i915_active", 85 .debug_hint = active_debug_hint, 86 }; 87 88 static void debug_active_init(struct i915_active *ref) 89 { 90 debug_object_init(ref, &active_debug_desc); 91 } 92 93 static void debug_active_activate(struct i915_active *ref) 94 { 95 lockdep_assert_held(&ref->tree_lock); 96 if (!atomic_read(&ref->count)) /* before the first inc */ 97 debug_object_activate(ref, &active_debug_desc); 98 } 99 100 static void debug_active_deactivate(struct i915_active *ref) 101 { 102 lockdep_assert_held(&ref->tree_lock); 103 if (!atomic_read(&ref->count)) /* after the last dec */ 104 debug_object_deactivate(ref, &active_debug_desc); 105 } 106 107 static void debug_active_fini(struct i915_active *ref) 108 { 109 debug_object_free(ref, &active_debug_desc); 110 } 111 112 static void debug_active_assert(struct i915_active *ref) 113 { 114 debug_object_assert_init(ref, &active_debug_desc); 115 } 116 117 #else 118 119 static inline void debug_active_init(struct i915_active *ref) { } 120 static inline void debug_active_activate(struct i915_active *ref) { } 121 static inline void debug_active_deactivate(struct i915_active *ref) { } 122 static inline void debug_active_fini(struct i915_active *ref) { } 123 static inline void debug_active_assert(struct i915_active *ref) { } 124 125 #endif 126 127 static void 128 __active_retire(struct i915_active *ref) 129 { 130 struct active_node *it, *n; 131 struct rb_root root; 132 unsigned long flags; 133 134 GEM_BUG_ON(i915_active_is_idle(ref)); 135 136 /* return the unused nodes to our slabcache -- flushing the allocator */ 137 if (!atomic_dec_and_lock_irqsave(&ref->count, &ref->tree_lock, flags)) 138 return; 139 140 GEM_BUG_ON(rcu_access_pointer(ref->excl.fence)); 141 debug_active_deactivate(ref); 142 143 root = ref->tree; 144 ref->tree = RB_ROOT; 145 ref->cache = NULL; 146 147 spin_unlock_irqrestore(&ref->tree_lock, flags); 148 149 /* After the final retire, the entire struct may be freed */ 150 if (ref->retire) 151 ref->retire(ref); 152 153 /* ... except if you wait on it, you must manage your own references! */ 154 wake_up_var(ref); 155 156 rbtree_postorder_for_each_entry_safe(it, n, &root, node) { 157 GEM_BUG_ON(i915_active_fence_isset(&it->base)); 158 kmem_cache_free(global.slab_cache, it); 159 } 160 } 161 162 static void 163 active_work(struct work_struct *wrk) 164 { 165 struct i915_active *ref = container_of(wrk, typeof(*ref), work); 166 167 GEM_BUG_ON(!atomic_read(&ref->count)); 168 if (atomic_add_unless(&ref->count, -1, 1)) 169 return; 170 171 __active_retire(ref); 172 } 173 174 static void 175 active_retire(struct i915_active *ref) 176 { 177 GEM_BUG_ON(!atomic_read(&ref->count)); 178 if (atomic_add_unless(&ref->count, -1, 1)) 179 return; 180 181 if (ref->flags & I915_ACTIVE_RETIRE_SLEEPS) { 182 queue_work(system_unbound_wq, &ref->work); 183 return; 184 } 185 186 __active_retire(ref); 187 } 188 189 static inline struct dma_fence ** 190 __active_fence_slot(struct i915_active_fence *active) 191 { 192 return (struct dma_fence ** __force)&active->fence; 193 } 194 195 static inline bool 196 active_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb) 197 { 198 struct i915_active_fence *active = 199 container_of(cb, typeof(*active), cb); 200 201 return cmpxchg(__active_fence_slot(active), fence, NULL) == fence; 202 } 203 204 static void 205 node_retire(struct dma_fence *fence, struct dma_fence_cb *cb) 206 { 207 if (active_fence_cb(fence, cb)) 208 active_retire(container_of(cb, struct active_node, base.cb)->ref); 209 } 210 211 static void 212 excl_retire(struct dma_fence *fence, struct dma_fence_cb *cb) 213 { 214 if (active_fence_cb(fence, cb)) 215 active_retire(container_of(cb, struct i915_active, excl.cb)); 216 } 217 218 static struct i915_active_fence * 219 active_instance(struct i915_active *ref, struct intel_timeline *tl) 220 { 221 struct active_node *node, *prealloc; 222 struct rb_node **p, *parent; 223 u64 idx = tl->fence_context; 224 225 /* 226 * We track the most recently used timeline to skip a rbtree search 227 * for the common case, under typical loads we never need the rbtree 228 * at all. We can reuse the last slot if it is empty, that is 229 * after the previous activity has been retired, or if it matches the 230 * current timeline. 231 */ 232 node = READ_ONCE(ref->cache); 233 if (node && node->timeline == idx) 234 return &node->base; 235 236 /* Preallocate a replacement, just in case */ 237 prealloc = kmem_cache_alloc(global.slab_cache, GFP_KERNEL); 238 if (!prealloc) 239 return NULL; 240 241 spin_lock_irq(&ref->tree_lock); 242 GEM_BUG_ON(i915_active_is_idle(ref)); 243 244 parent = NULL; 245 p = &ref->tree.rb_node; 246 while (*p) { 247 parent = *p; 248 249 node = rb_entry(parent, struct active_node, node); 250 if (node->timeline == idx) { 251 kmem_cache_free(global.slab_cache, prealloc); 252 goto out; 253 } 254 255 if (node->timeline < idx) 256 p = &parent->rb_right; 257 else 258 p = &parent->rb_left; 259 } 260 261 node = prealloc; 262 __i915_active_fence_init(&node->base, NULL, node_retire); 263 node->ref = ref; 264 node->timeline = idx; 265 266 rb_link_node(&node->node, parent, p); 267 rb_insert_color(&node->node, &ref->tree); 268 269 out: 270 ref->cache = node; 271 spin_unlock_irq(&ref->tree_lock); 272 273 BUILD_BUG_ON(offsetof(typeof(*node), base)); 274 return &node->base; 275 } 276 277 void __i915_active_init(struct i915_active *ref, 278 int (*active)(struct i915_active *ref), 279 void (*retire)(struct i915_active *ref), 280 struct lock_class_key *mkey, 281 struct lock_class_key *wkey) 282 { 283 unsigned long bits; 284 285 debug_active_init(ref); 286 287 ref->flags = 0; 288 ref->active = active; 289 ref->retire = ptr_unpack_bits(retire, &bits, 2); 290 if (bits & I915_ACTIVE_MAY_SLEEP) 291 ref->flags |= I915_ACTIVE_RETIRE_SLEEPS; 292 293 spin_lock_init(&ref->tree_lock); 294 ref->tree = RB_ROOT; 295 ref->cache = NULL; 296 297 init_llist_head(&ref->preallocated_barriers); 298 atomic_set(&ref->count, 0); 299 __mutex_init(&ref->mutex, "i915_active", mkey); 300 __i915_active_fence_init(&ref->excl, NULL, excl_retire); 301 INIT_WORK(&ref->work, active_work); 302 #if IS_ENABLED(CONFIG_LOCKDEP) 303 lockdep_init_map(&ref->work.lockdep_map, "i915_active.work", wkey, 0); 304 #endif 305 } 306 307 static bool ____active_del_barrier(struct i915_active *ref, 308 struct active_node *node, 309 struct intel_engine_cs *engine) 310 311 { 312 struct llist_node *head = NULL, *tail = NULL; 313 struct llist_node *pos, *next; 314 315 GEM_BUG_ON(node->timeline != engine->kernel_context->timeline->fence_context); 316 317 /* 318 * Rebuild the llist excluding our node. We may perform this 319 * outside of the kernel_context timeline mutex and so someone 320 * else may be manipulating the engine->barrier_tasks, in 321 * which case either we or they will be upset :) 322 * 323 * A second __active_del_barrier() will report failure to claim 324 * the active_node and the caller will just shrug and know not to 325 * claim ownership of its node. 326 * 327 * A concurrent i915_request_add_active_barriers() will miss adding 328 * any of the tasks, but we will try again on the next -- and since 329 * we are actively using the barrier, we know that there will be 330 * at least another opportunity when we idle. 331 */ 332 llist_for_each_safe(pos, next, llist_del_all(&engine->barrier_tasks)) { 333 if (node == barrier_from_ll(pos)) { 334 node = NULL; 335 continue; 336 } 337 338 pos->next = head; 339 head = pos; 340 if (!tail) 341 tail = pos; 342 } 343 if (head) 344 llist_add_batch(head, tail, &engine->barrier_tasks); 345 346 return !node; 347 } 348 349 static bool 350 __active_del_barrier(struct i915_active *ref, struct active_node *node) 351 { 352 return ____active_del_barrier(ref, node, barrier_to_engine(node)); 353 } 354 355 int i915_active_ref(struct i915_active *ref, 356 struct intel_timeline *tl, 357 struct dma_fence *fence) 358 { 359 struct i915_active_fence *active; 360 int err; 361 362 lockdep_assert_held(&tl->mutex); 363 364 /* Prevent reaping in case we malloc/wait while building the tree */ 365 err = i915_active_acquire(ref); 366 if (err) 367 return err; 368 369 active = active_instance(ref, tl); 370 if (!active) { 371 err = -ENOMEM; 372 goto out; 373 } 374 375 if (is_barrier(active)) { /* proto-node used by our idle barrier */ 376 /* 377 * This request is on the kernel_context timeline, and so 378 * we can use it to substitute for the pending idle-barrer 379 * request that we want to emit on the kernel_context. 380 */ 381 __active_del_barrier(ref, node_from_active(active)); 382 RCU_INIT_POINTER(active->fence, NULL); 383 atomic_dec(&ref->count); 384 } 385 if (!__i915_active_fence_set(active, fence)) 386 atomic_inc(&ref->count); 387 388 out: 389 i915_active_release(ref); 390 return err; 391 } 392 393 struct dma_fence * 394 i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f) 395 { 396 struct dma_fence *prev; 397 398 /* We expect the caller to manage the exclusive timeline ordering */ 399 GEM_BUG_ON(i915_active_is_idle(ref)); 400 401 rcu_read_lock(); 402 prev = __i915_active_fence_set(&ref->excl, f); 403 if (prev) 404 prev = dma_fence_get_rcu(prev); 405 else 406 atomic_inc(&ref->count); 407 rcu_read_unlock(); 408 409 return prev; 410 } 411 412 bool i915_active_acquire_if_busy(struct i915_active *ref) 413 { 414 debug_active_assert(ref); 415 return atomic_add_unless(&ref->count, 1, 0); 416 } 417 418 int i915_active_acquire(struct i915_active *ref) 419 { 420 int err; 421 422 if (i915_active_acquire_if_busy(ref)) 423 return 0; 424 425 err = mutex_lock_interruptible(&ref->mutex); 426 if (err) 427 return err; 428 429 if (likely(!i915_active_acquire_if_busy(ref))) { 430 if (ref->active) 431 err = ref->active(ref); 432 if (!err) { 433 spin_lock_irq(&ref->tree_lock); /* __active_retire() */ 434 debug_active_activate(ref); 435 atomic_inc(&ref->count); 436 spin_unlock_irq(&ref->tree_lock); 437 } 438 } 439 440 mutex_unlock(&ref->mutex); 441 442 return err; 443 } 444 445 void i915_active_release(struct i915_active *ref) 446 { 447 debug_active_assert(ref); 448 active_retire(ref); 449 } 450 451 static void enable_signaling(struct i915_active_fence *active) 452 { 453 struct dma_fence *fence; 454 455 fence = i915_active_fence_get(active); 456 if (!fence) 457 return; 458 459 dma_fence_enable_sw_signaling(fence); 460 dma_fence_put(fence); 461 } 462 463 int i915_active_wait(struct i915_active *ref) 464 { 465 struct active_node *it, *n; 466 int err = 0; 467 468 might_sleep(); 469 470 if (!i915_active_acquire_if_busy(ref)) 471 return 0; 472 473 /* Flush lazy signals */ 474 enable_signaling(&ref->excl); 475 rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) { 476 if (is_barrier(&it->base)) /* unconnected idle barrier */ 477 continue; 478 479 enable_signaling(&it->base); 480 } 481 /* Any fence added after the wait begins will not be auto-signaled */ 482 483 i915_active_release(ref); 484 if (err) 485 return err; 486 487 if (wait_var_event_interruptible(ref, i915_active_is_idle(ref))) 488 return -EINTR; 489 490 flush_work(&ref->work); 491 return 0; 492 } 493 494 int i915_request_await_active(struct i915_request *rq, struct i915_active *ref) 495 { 496 int err = 0; 497 498 if (rcu_access_pointer(ref->excl.fence)) { 499 struct dma_fence *fence; 500 501 rcu_read_lock(); 502 fence = dma_fence_get_rcu_safe(&ref->excl.fence); 503 rcu_read_unlock(); 504 if (fence) { 505 err = i915_request_await_dma_fence(rq, fence); 506 dma_fence_put(fence); 507 } 508 } 509 510 /* In the future we may choose to await on all fences */ 511 512 return err; 513 } 514 515 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) 516 void i915_active_fini(struct i915_active *ref) 517 { 518 debug_active_fini(ref); 519 GEM_BUG_ON(atomic_read(&ref->count)); 520 GEM_BUG_ON(work_pending(&ref->work)); 521 GEM_BUG_ON(!RB_EMPTY_ROOT(&ref->tree)); 522 mutex_destroy(&ref->mutex); 523 } 524 #endif 525 526 static inline bool is_idle_barrier(struct active_node *node, u64 idx) 527 { 528 return node->timeline == idx && !i915_active_fence_isset(&node->base); 529 } 530 531 static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx) 532 { 533 struct rb_node *prev, *p; 534 535 if (RB_EMPTY_ROOT(&ref->tree)) 536 return NULL; 537 538 spin_lock_irq(&ref->tree_lock); 539 GEM_BUG_ON(i915_active_is_idle(ref)); 540 541 /* 542 * Try to reuse any existing barrier nodes already allocated for this 543 * i915_active, due to overlapping active phases there is likely a 544 * node kept alive (as we reuse before parking). We prefer to reuse 545 * completely idle barriers (less hassle in manipulating the llists), 546 * but otherwise any will do. 547 */ 548 if (ref->cache && is_idle_barrier(ref->cache, idx)) { 549 p = &ref->cache->node; 550 goto match; 551 } 552 553 prev = NULL; 554 p = ref->tree.rb_node; 555 while (p) { 556 struct active_node *node = 557 rb_entry(p, struct active_node, node); 558 559 if (is_idle_barrier(node, idx)) 560 goto match; 561 562 prev = p; 563 if (node->timeline < idx) 564 p = p->rb_right; 565 else 566 p = p->rb_left; 567 } 568 569 /* 570 * No quick match, but we did find the leftmost rb_node for the 571 * kernel_context. Walk the rb_tree in-order to see if there were 572 * any idle-barriers on this timeline that we missed, or just use 573 * the first pending barrier. 574 */ 575 for (p = prev; p; p = rb_next(p)) { 576 struct active_node *node = 577 rb_entry(p, struct active_node, node); 578 struct intel_engine_cs *engine; 579 580 if (node->timeline > idx) 581 break; 582 583 if (node->timeline < idx) 584 continue; 585 586 if (is_idle_barrier(node, idx)) 587 goto match; 588 589 /* 590 * The list of pending barriers is protected by the 591 * kernel_context timeline, which notably we do not hold 592 * here. i915_request_add_active_barriers() may consume 593 * the barrier before we claim it, so we have to check 594 * for success. 595 */ 596 engine = __barrier_to_engine(node); 597 smp_rmb(); /* serialise with add_active_barriers */ 598 if (is_barrier(&node->base) && 599 ____active_del_barrier(ref, node, engine)) 600 goto match; 601 } 602 603 spin_unlock_irq(&ref->tree_lock); 604 605 return NULL; 606 607 match: 608 rb_erase(p, &ref->tree); /* Hide from waits and sibling allocations */ 609 if (p == &ref->cache->node) 610 ref->cache = NULL; 611 spin_unlock_irq(&ref->tree_lock); 612 613 return rb_entry(p, struct active_node, node); 614 } 615 616 int i915_active_acquire_preallocate_barrier(struct i915_active *ref, 617 struct intel_engine_cs *engine) 618 { 619 intel_engine_mask_t tmp, mask = engine->mask; 620 struct llist_node *first = NULL, *last = NULL; 621 struct intel_gt *gt = engine->gt; 622 int err; 623 624 GEM_BUG_ON(i915_active_is_idle(ref)); 625 626 /* Wait until the previous preallocation is completed */ 627 while (!llist_empty(&ref->preallocated_barriers)) 628 cond_resched(); 629 630 /* 631 * Preallocate a node for each physical engine supporting the target 632 * engine (remember virtual engines have more than one sibling). 633 * We can then use the preallocated nodes in 634 * i915_active_acquire_barrier() 635 */ 636 GEM_BUG_ON(!mask); 637 for_each_engine_masked(engine, gt, mask, tmp) { 638 u64 idx = engine->kernel_context->timeline->fence_context; 639 struct llist_node *prev = first; 640 struct active_node *node; 641 642 node = reuse_idle_barrier(ref, idx); 643 if (!node) { 644 node = kmem_cache_alloc(global.slab_cache, GFP_KERNEL); 645 if (!node) { 646 err = ENOMEM; 647 goto unwind; 648 } 649 650 RCU_INIT_POINTER(node->base.fence, NULL); 651 node->base.cb.func = node_retire; 652 node->timeline = idx; 653 node->ref = ref; 654 } 655 656 if (!i915_active_fence_isset(&node->base)) { 657 /* 658 * Mark this as being *our* unconnected proto-node. 659 * 660 * Since this node is not in any list, and we have 661 * decoupled it from the rbtree, we can reuse the 662 * request to indicate this is an idle-barrier node 663 * and then we can use the rb_node and list pointers 664 * for our tracking of the pending barrier. 665 */ 666 RCU_INIT_POINTER(node->base.fence, ERR_PTR(-EAGAIN)); 667 node->base.cb.node.prev = (void *)engine; 668 atomic_inc(&ref->count); 669 } 670 GEM_BUG_ON(rcu_access_pointer(node->base.fence) != ERR_PTR(-EAGAIN)); 671 672 GEM_BUG_ON(barrier_to_engine(node) != engine); 673 first = barrier_to_ll(node); 674 first->next = prev; 675 if (!last) 676 last = first; 677 intel_engine_pm_get(engine); 678 } 679 680 GEM_BUG_ON(!llist_empty(&ref->preallocated_barriers)); 681 llist_add_batch(first, last, &ref->preallocated_barriers); 682 683 return 0; 684 685 unwind: 686 while (first) { 687 struct active_node *node = barrier_from_ll(first); 688 689 first = first->next; 690 691 atomic_dec(&ref->count); 692 intel_engine_pm_put(barrier_to_engine(node)); 693 694 kmem_cache_free(global.slab_cache, node); 695 } 696 return err; 697 } 698 699 void i915_active_acquire_barrier(struct i915_active *ref) 700 { 701 struct llist_node *pos, *next; 702 unsigned long flags; 703 704 GEM_BUG_ON(i915_active_is_idle(ref)); 705 706 /* 707 * Transfer the list of preallocated barriers into the 708 * i915_active rbtree, but only as proto-nodes. They will be 709 * populated by i915_request_add_active_barriers() to point to the 710 * request that will eventually release them. 711 */ 712 llist_for_each_safe(pos, next, take_preallocated_barriers(ref)) { 713 struct active_node *node = barrier_from_ll(pos); 714 struct intel_engine_cs *engine = barrier_to_engine(node); 715 struct rb_node **p, *parent; 716 717 spin_lock_irqsave_nested(&ref->tree_lock, flags, 718 SINGLE_DEPTH_NESTING); 719 parent = NULL; 720 p = &ref->tree.rb_node; 721 while (*p) { 722 struct active_node *it; 723 724 parent = *p; 725 726 it = rb_entry(parent, struct active_node, node); 727 if (it->timeline < node->timeline) 728 p = &parent->rb_right; 729 else 730 p = &parent->rb_left; 731 } 732 rb_link_node(&node->node, parent, p); 733 rb_insert_color(&node->node, &ref->tree); 734 spin_unlock_irqrestore(&ref->tree_lock, flags); 735 736 GEM_BUG_ON(!intel_engine_pm_is_awake(engine)); 737 llist_add(barrier_to_ll(node), &engine->barrier_tasks); 738 intel_engine_pm_put(engine); 739 } 740 } 741 742 static struct dma_fence **ll_to_fence_slot(struct llist_node *node) 743 { 744 return __active_fence_slot(&barrier_from_ll(node)->base); 745 } 746 747 void i915_request_add_active_barriers(struct i915_request *rq) 748 { 749 struct intel_engine_cs *engine = rq->engine; 750 struct llist_node *node, *next; 751 unsigned long flags; 752 753 GEM_BUG_ON(!intel_context_is_barrier(rq->context)); 754 GEM_BUG_ON(intel_engine_is_virtual(engine)); 755 GEM_BUG_ON(i915_request_timeline(rq) != engine->kernel_context->timeline); 756 757 node = llist_del_all(&engine->barrier_tasks); 758 if (!node) 759 return; 760 /* 761 * Attach the list of proto-fences to the in-flight request such 762 * that the parent i915_active will be released when this request 763 * is retired. 764 */ 765 spin_lock_irqsave(&rq->lock, flags); 766 llist_for_each_safe(node, next, node) { 767 /* serialise with reuse_idle_barrier */ 768 smp_store_mb(*ll_to_fence_slot(node), &rq->fence); 769 list_add_tail((struct list_head *)node, &rq->fence.cb_list); 770 } 771 spin_unlock_irqrestore(&rq->lock, flags); 772 } 773 774 /* 775 * __i915_active_fence_set: Update the last active fence along its timeline 776 * @active: the active tracker 777 * @fence: the new fence (under construction) 778 * 779 * Records the new @fence as the last active fence along its timeline in 780 * this active tracker, moving the tracking callbacks from the previous 781 * fence onto this one. Returns the previous fence (if not already completed), 782 * which the caller must ensure is executed before the new fence. To ensure 783 * that the order of fences within the timeline of the i915_active_fence is 784 * understood, it should be locked by the caller. 785 */ 786 struct dma_fence * 787 __i915_active_fence_set(struct i915_active_fence *active, 788 struct dma_fence *fence) 789 { 790 struct dma_fence *prev; 791 unsigned long flags; 792 793 if (fence == rcu_access_pointer(active->fence)) 794 return fence; 795 796 GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)); 797 798 /* 799 * Consider that we have two threads arriving (A and B), with 800 * C already resident as the active->fence. 801 * 802 * A does the xchg first, and so it sees C or NULL depending 803 * on the timing of the interrupt handler. If it is NULL, the 804 * previous fence must have been signaled and we know that 805 * we are first on the timeline. If it is still present, 806 * we acquire the lock on that fence and serialise with the interrupt 807 * handler, in the process removing it from any future interrupt 808 * callback. A will then wait on C before executing (if present). 809 * 810 * As B is second, it sees A as the previous fence and so waits for 811 * it to complete its transition and takes over the occupancy for 812 * itself -- remembering that it needs to wait on A before executing. 813 * 814 * Note the strong ordering of the timeline also provides consistent 815 * nesting rules for the fence->lock; the inner lock is always the 816 * older lock. 817 */ 818 spin_lock_irqsave(fence->lock, flags); 819 prev = xchg(__active_fence_slot(active), fence); 820 if (prev) { 821 GEM_BUG_ON(prev == fence); 822 spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING); 823 __list_del_entry(&active->cb.node); 824 spin_unlock(prev->lock); /* serialise with prev->cb_list */ 825 } 826 list_add_tail(&active->cb.node, &fence->cb_list); 827 spin_unlock_irqrestore(fence->lock, flags); 828 829 return prev; 830 } 831 832 int i915_active_fence_set(struct i915_active_fence *active, 833 struct i915_request *rq) 834 { 835 struct dma_fence *fence; 836 int err = 0; 837 838 /* Must maintain timeline ordering wrt previous active requests */ 839 rcu_read_lock(); 840 fence = __i915_active_fence_set(active, &rq->fence); 841 if (fence) /* but the previous fence may not belong to that timeline! */ 842 fence = dma_fence_get_rcu(fence); 843 rcu_read_unlock(); 844 if (fence) { 845 err = i915_request_await_dma_fence(rq, fence); 846 dma_fence_put(fence); 847 } 848 849 return err; 850 } 851 852 void i915_active_noop(struct dma_fence *fence, struct dma_fence_cb *cb) 853 { 854 active_fence_cb(fence, cb); 855 } 856 857 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 858 #include "selftests/i915_active.c" 859 #endif 860 861 static void i915_global_active_shrink(void) 862 { 863 kmem_cache_shrink(global.slab_cache); 864 } 865 866 static void i915_global_active_exit(void) 867 { 868 kmem_cache_destroy(global.slab_cache); 869 } 870 871 static struct i915_global_active global = { { 872 .shrink = i915_global_active_shrink, 873 .exit = i915_global_active_exit, 874 } }; 875 876 int __init i915_global_active_init(void) 877 { 878 global.slab_cache = KMEM_CACHE(active_node, SLAB_HWCACHE_ALIGN); 879 if (!global.slab_cache) 880 return -ENOMEM; 881 882 i915_global_register(&global.base); 883 return 0; 884 } 885