xref: /openbmc/linux/drivers/gpu/drm/i915/i915_active.c (revision 2b77dcc5)
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2019 Intel Corporation
5  */
6 
7 #include <linux/debugobjects.h>
8 
9 #include "gt/intel_engine_pm.h"
10 #include "gt/intel_ring.h"
11 
12 #include "i915_drv.h"
13 #include "i915_active.h"
14 #include "i915_globals.h"
15 
16 /*
17  * Active refs memory management
18  *
19  * To be more economical with memory, we reap all the i915_active trees as
20  * they idle (when we know the active requests are inactive) and allocate the
21  * nodes from a local slab cache to hopefully reduce the fragmentation.
22  */
23 static struct i915_global_active {
24 	struct i915_global base;
25 	struct kmem_cache *slab_cache;
26 } global;
27 
28 struct active_node {
29 	struct i915_active_fence base;
30 	struct i915_active *ref;
31 	struct rb_node node;
32 	u64 timeline;
33 };
34 
35 static inline struct active_node *
36 node_from_active(struct i915_active_fence *active)
37 {
38 	return container_of(active, struct active_node, base);
39 }
40 
41 #define take_preallocated_barriers(x) llist_del_all(&(x)->preallocated_barriers)
42 
43 static inline bool is_barrier(const struct i915_active_fence *active)
44 {
45 	return IS_ERR(rcu_access_pointer(active->fence));
46 }
47 
48 static inline struct llist_node *barrier_to_ll(struct active_node *node)
49 {
50 	GEM_BUG_ON(!is_barrier(&node->base));
51 	return (struct llist_node *)&node->base.cb.node;
52 }
53 
54 static inline struct intel_engine_cs *
55 __barrier_to_engine(struct active_node *node)
56 {
57 	return (struct intel_engine_cs *)READ_ONCE(node->base.cb.node.prev);
58 }
59 
60 static inline struct intel_engine_cs *
61 barrier_to_engine(struct active_node *node)
62 {
63 	GEM_BUG_ON(!is_barrier(&node->base));
64 	return __barrier_to_engine(node);
65 }
66 
67 static inline struct active_node *barrier_from_ll(struct llist_node *x)
68 {
69 	return container_of((struct list_head *)x,
70 			    struct active_node, base.cb.node);
71 }
72 
73 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && IS_ENABLED(CONFIG_DEBUG_OBJECTS)
74 
75 static void *active_debug_hint(void *addr)
76 {
77 	struct i915_active *ref = addr;
78 
79 	return (void *)ref->active ?: (void *)ref->retire ?: (void *)ref;
80 }
81 
82 static struct debug_obj_descr active_debug_desc = {
83 	.name = "i915_active",
84 	.debug_hint = active_debug_hint,
85 };
86 
87 static void debug_active_init(struct i915_active *ref)
88 {
89 	debug_object_init(ref, &active_debug_desc);
90 }
91 
92 static void debug_active_activate(struct i915_active *ref)
93 {
94 	lockdep_assert_held(&ref->mutex);
95 	if (!atomic_read(&ref->count)) /* before the first inc */
96 		debug_object_activate(ref, &active_debug_desc);
97 }
98 
99 static void debug_active_deactivate(struct i915_active *ref)
100 {
101 	lockdep_assert_held(&ref->mutex);
102 	if (!atomic_read(&ref->count)) /* after the last dec */
103 		debug_object_deactivate(ref, &active_debug_desc);
104 }
105 
106 static void debug_active_fini(struct i915_active *ref)
107 {
108 	debug_object_free(ref, &active_debug_desc);
109 }
110 
111 static void debug_active_assert(struct i915_active *ref)
112 {
113 	debug_object_assert_init(ref, &active_debug_desc);
114 }
115 
116 #else
117 
118 static inline void debug_active_init(struct i915_active *ref) { }
119 static inline void debug_active_activate(struct i915_active *ref) { }
120 static inline void debug_active_deactivate(struct i915_active *ref) { }
121 static inline void debug_active_fini(struct i915_active *ref) { }
122 static inline void debug_active_assert(struct i915_active *ref) { }
123 
124 #endif
125 
126 static void
127 __active_retire(struct i915_active *ref)
128 {
129 	struct active_node *it, *n;
130 	struct rb_root root;
131 	bool retire = false;
132 
133 	lockdep_assert_held(&ref->mutex);
134 	GEM_BUG_ON(i915_active_is_idle(ref));
135 
136 	/* return the unused nodes to our slabcache -- flushing the allocator */
137 	if (atomic_dec_and_test(&ref->count)) {
138 		debug_active_deactivate(ref);
139 		root = ref->tree;
140 		ref->tree = RB_ROOT;
141 		ref->cache = NULL;
142 		retire = true;
143 	}
144 
145 	mutex_unlock(&ref->mutex);
146 	if (!retire)
147 		return;
148 
149 	GEM_BUG_ON(rcu_access_pointer(ref->excl.fence));
150 	rbtree_postorder_for_each_entry_safe(it, n, &root, node) {
151 		GEM_BUG_ON(i915_active_fence_isset(&it->base));
152 		kmem_cache_free(global.slab_cache, it);
153 	}
154 
155 	/* After the final retire, the entire struct may be freed */
156 	if (ref->retire)
157 		ref->retire(ref);
158 
159 	/* ... except if you wait on it, you must manage your own references! */
160 	wake_up_var(ref);
161 }
162 
163 static void
164 active_work(struct work_struct *wrk)
165 {
166 	struct i915_active *ref = container_of(wrk, typeof(*ref), work);
167 
168 	GEM_BUG_ON(!atomic_read(&ref->count));
169 	if (atomic_add_unless(&ref->count, -1, 1))
170 		return;
171 
172 	mutex_lock(&ref->mutex);
173 	__active_retire(ref);
174 }
175 
176 static void
177 active_retire(struct i915_active *ref)
178 {
179 	GEM_BUG_ON(!atomic_read(&ref->count));
180 	if (atomic_add_unless(&ref->count, -1, 1))
181 		return;
182 
183 	/* If we are inside interrupt context (fence signaling), defer */
184 	if (ref->flags & I915_ACTIVE_RETIRE_SLEEPS ||
185 	    !mutex_trylock(&ref->mutex)) {
186 		queue_work(system_unbound_wq, &ref->work);
187 		return;
188 	}
189 
190 	__active_retire(ref);
191 }
192 
193 static void
194 node_retire(struct dma_fence *fence, struct dma_fence_cb *cb)
195 {
196 	i915_active_fence_cb(fence, cb);
197 	active_retire(container_of(cb, struct active_node, base.cb)->ref);
198 }
199 
200 static void
201 excl_retire(struct dma_fence *fence, struct dma_fence_cb *cb)
202 {
203 	i915_active_fence_cb(fence, cb);
204 	active_retire(container_of(cb, struct i915_active, excl.cb));
205 }
206 
207 static struct i915_active_fence *
208 active_instance(struct i915_active *ref, struct intel_timeline *tl)
209 {
210 	struct active_node *node, *prealloc;
211 	struct rb_node **p, *parent;
212 	u64 idx = tl->fence_context;
213 
214 	/*
215 	 * We track the most recently used timeline to skip a rbtree search
216 	 * for the common case, under typical loads we never need the rbtree
217 	 * at all. We can reuse the last slot if it is empty, that is
218 	 * after the previous activity has been retired, or if it matches the
219 	 * current timeline.
220 	 */
221 	node = READ_ONCE(ref->cache);
222 	if (node && node->timeline == idx)
223 		return &node->base;
224 
225 	/* Preallocate a replacement, just in case */
226 	prealloc = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
227 	if (!prealloc)
228 		return NULL;
229 
230 	mutex_lock(&ref->mutex);
231 	GEM_BUG_ON(i915_active_is_idle(ref));
232 
233 	parent = NULL;
234 	p = &ref->tree.rb_node;
235 	while (*p) {
236 		parent = *p;
237 
238 		node = rb_entry(parent, struct active_node, node);
239 		if (node->timeline == idx) {
240 			kmem_cache_free(global.slab_cache, prealloc);
241 			goto out;
242 		}
243 
244 		if (node->timeline < idx)
245 			p = &parent->rb_right;
246 		else
247 			p = &parent->rb_left;
248 	}
249 
250 	node = prealloc;
251 	__i915_active_fence_init(&node->base, &tl->mutex, NULL, node_retire);
252 	node->ref = ref;
253 	node->timeline = idx;
254 
255 	rb_link_node(&node->node, parent, p);
256 	rb_insert_color(&node->node, &ref->tree);
257 
258 out:
259 	ref->cache = node;
260 	mutex_unlock(&ref->mutex);
261 
262 	BUILD_BUG_ON(offsetof(typeof(*node), base));
263 	return &node->base;
264 }
265 
266 void __i915_active_init(struct i915_active *ref,
267 			int (*active)(struct i915_active *ref),
268 			void (*retire)(struct i915_active *ref),
269 			struct lock_class_key *key)
270 {
271 	unsigned long bits;
272 
273 	debug_active_init(ref);
274 
275 	ref->flags = 0;
276 	ref->active = active;
277 	ref->retire = ptr_unpack_bits(retire, &bits, 2);
278 	if (bits & I915_ACTIVE_MAY_SLEEP)
279 		ref->flags |= I915_ACTIVE_RETIRE_SLEEPS;
280 
281 	ref->tree = RB_ROOT;
282 	ref->cache = NULL;
283 	init_llist_head(&ref->preallocated_barriers);
284 	atomic_set(&ref->count, 0);
285 	__mutex_init(&ref->mutex, "i915_active", key);
286 	__i915_active_fence_init(&ref->excl, &ref->mutex, NULL, excl_retire);
287 	INIT_WORK(&ref->work, active_work);
288 }
289 
290 static bool ____active_del_barrier(struct i915_active *ref,
291 				   struct active_node *node,
292 				   struct intel_engine_cs *engine)
293 
294 {
295 	struct llist_node *head = NULL, *tail = NULL;
296 	struct llist_node *pos, *next;
297 
298 	GEM_BUG_ON(node->timeline != engine->kernel_context->timeline->fence_context);
299 
300 	/*
301 	 * Rebuild the llist excluding our node. We may perform this
302 	 * outside of the kernel_context timeline mutex and so someone
303 	 * else may be manipulating the engine->barrier_tasks, in
304 	 * which case either we or they will be upset :)
305 	 *
306 	 * A second __active_del_barrier() will report failure to claim
307 	 * the active_node and the caller will just shrug and know not to
308 	 * claim ownership of its node.
309 	 *
310 	 * A concurrent i915_request_add_active_barriers() will miss adding
311 	 * any of the tasks, but we will try again on the next -- and since
312 	 * we are actively using the barrier, we know that there will be
313 	 * at least another opportunity when we idle.
314 	 */
315 	llist_for_each_safe(pos, next, llist_del_all(&engine->barrier_tasks)) {
316 		if (node == barrier_from_ll(pos)) {
317 			node = NULL;
318 			continue;
319 		}
320 
321 		pos->next = head;
322 		head = pos;
323 		if (!tail)
324 			tail = pos;
325 	}
326 	if (head)
327 		llist_add_batch(head, tail, &engine->barrier_tasks);
328 
329 	return !node;
330 }
331 
332 static bool
333 __active_del_barrier(struct i915_active *ref, struct active_node *node)
334 {
335 	return ____active_del_barrier(ref, node, barrier_to_engine(node));
336 }
337 
338 int i915_active_ref(struct i915_active *ref,
339 		    struct intel_timeline *tl,
340 		    struct dma_fence *fence)
341 {
342 	struct i915_active_fence *active;
343 	int err;
344 
345 	lockdep_assert_held(&tl->mutex);
346 
347 	/* Prevent reaping in case we malloc/wait while building the tree */
348 	err = i915_active_acquire(ref);
349 	if (err)
350 		return err;
351 
352 	active = active_instance(ref, tl);
353 	if (!active) {
354 		err = -ENOMEM;
355 		goto out;
356 	}
357 
358 	if (is_barrier(active)) { /* proto-node used by our idle barrier */
359 		/*
360 		 * This request is on the kernel_context timeline, and so
361 		 * we can use it to substitute for the pending idle-barrer
362 		 * request that we want to emit on the kernel_context.
363 		 */
364 		__active_del_barrier(ref, node_from_active(active));
365 		RCU_INIT_POINTER(active->fence, NULL);
366 		atomic_dec(&ref->count);
367 	}
368 	if (!__i915_active_fence_set(active, fence))
369 		atomic_inc(&ref->count);
370 
371 out:
372 	i915_active_release(ref);
373 	return err;
374 }
375 
376 void i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f)
377 {
378 	/* We expect the caller to manage the exclusive timeline ordering */
379 	GEM_BUG_ON(i915_active_is_idle(ref));
380 
381 	/*
382 	 * As we don't know which mutex the caller is using, we told a small
383 	 * lie to the debug code that it is using the i915_active.mutex;
384 	 * and now we must stick to that lie.
385 	 */
386 	mutex_acquire(&ref->mutex.dep_map, 0, 0, _THIS_IP_);
387 	if (!__i915_active_fence_set(&ref->excl, f))
388 		atomic_inc(&ref->count);
389 	mutex_release(&ref->mutex.dep_map, 0, _THIS_IP_);
390 }
391 
392 bool i915_active_acquire_if_busy(struct i915_active *ref)
393 {
394 	debug_active_assert(ref);
395 	return atomic_add_unless(&ref->count, 1, 0);
396 }
397 
398 int i915_active_acquire(struct i915_active *ref)
399 {
400 	int err;
401 
402 	if (i915_active_acquire_if_busy(ref))
403 		return 0;
404 
405 	err = mutex_lock_interruptible(&ref->mutex);
406 	if (err)
407 		return err;
408 
409 	if (!atomic_read(&ref->count) && ref->active)
410 		err = ref->active(ref);
411 	if (!err) {
412 		debug_active_activate(ref);
413 		atomic_inc(&ref->count);
414 	}
415 
416 	mutex_unlock(&ref->mutex);
417 
418 	return err;
419 }
420 
421 void i915_active_release(struct i915_active *ref)
422 {
423 	debug_active_assert(ref);
424 	active_retire(ref);
425 }
426 
427 static void enable_signaling(struct i915_active_fence *active)
428 {
429 	struct dma_fence *fence;
430 
431 	fence = i915_active_fence_get(active);
432 	if (!fence)
433 		return;
434 
435 	dma_fence_enable_sw_signaling(fence);
436 	dma_fence_put(fence);
437 }
438 
439 int i915_active_wait(struct i915_active *ref)
440 {
441 	struct active_node *it, *n;
442 	int err = 0;
443 
444 	might_sleep();
445 
446 	if (!i915_active_acquire_if_busy(ref))
447 		return 0;
448 
449 	/* Flush lazy signals */
450 	enable_signaling(&ref->excl);
451 	rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
452 		if (is_barrier(&it->base)) /* unconnected idle barrier */
453 			continue;
454 
455 		enable_signaling(&it->base);
456 	}
457 	/* Any fence added after the wait begins will not be auto-signaled */
458 
459 	i915_active_release(ref);
460 	if (err)
461 		return err;
462 
463 	if (wait_var_event_interruptible(ref, i915_active_is_idle(ref)))
464 		return -EINTR;
465 
466 	return 0;
467 }
468 
469 int i915_request_await_active(struct i915_request *rq, struct i915_active *ref)
470 {
471 	int err = 0;
472 
473 	if (rcu_access_pointer(ref->excl.fence)) {
474 		struct dma_fence *fence;
475 
476 		rcu_read_lock();
477 		fence = dma_fence_get_rcu_safe(&ref->excl.fence);
478 		rcu_read_unlock();
479 		if (fence) {
480 			err = i915_request_await_dma_fence(rq, fence);
481 			dma_fence_put(fence);
482 		}
483 	}
484 
485 	/* In the future we may choose to await on all fences */
486 
487 	return err;
488 }
489 
490 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
491 void i915_active_fini(struct i915_active *ref)
492 {
493 	debug_active_fini(ref);
494 	GEM_BUG_ON(atomic_read(&ref->count));
495 	GEM_BUG_ON(work_pending(&ref->work));
496 	GEM_BUG_ON(!RB_EMPTY_ROOT(&ref->tree));
497 	mutex_destroy(&ref->mutex);
498 }
499 #endif
500 
501 static inline bool is_idle_barrier(struct active_node *node, u64 idx)
502 {
503 	return node->timeline == idx && !i915_active_fence_isset(&node->base);
504 }
505 
506 static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
507 {
508 	struct rb_node *prev, *p;
509 
510 	if (RB_EMPTY_ROOT(&ref->tree))
511 		return NULL;
512 
513 	mutex_lock(&ref->mutex);
514 	GEM_BUG_ON(i915_active_is_idle(ref));
515 
516 	/*
517 	 * Try to reuse any existing barrier nodes already allocated for this
518 	 * i915_active, due to overlapping active phases there is likely a
519 	 * node kept alive (as we reuse before parking). We prefer to reuse
520 	 * completely idle barriers (less hassle in manipulating the llists),
521 	 * but otherwise any will do.
522 	 */
523 	if (ref->cache && is_idle_barrier(ref->cache, idx)) {
524 		p = &ref->cache->node;
525 		goto match;
526 	}
527 
528 	prev = NULL;
529 	p = ref->tree.rb_node;
530 	while (p) {
531 		struct active_node *node =
532 			rb_entry(p, struct active_node, node);
533 
534 		if (is_idle_barrier(node, idx))
535 			goto match;
536 
537 		prev = p;
538 		if (node->timeline < idx)
539 			p = p->rb_right;
540 		else
541 			p = p->rb_left;
542 	}
543 
544 	/*
545 	 * No quick match, but we did find the leftmost rb_node for the
546 	 * kernel_context. Walk the rb_tree in-order to see if there were
547 	 * any idle-barriers on this timeline that we missed, or just use
548 	 * the first pending barrier.
549 	 */
550 	for (p = prev; p; p = rb_next(p)) {
551 		struct active_node *node =
552 			rb_entry(p, struct active_node, node);
553 		struct intel_engine_cs *engine;
554 
555 		if (node->timeline > idx)
556 			break;
557 
558 		if (node->timeline < idx)
559 			continue;
560 
561 		if (is_idle_barrier(node, idx))
562 			goto match;
563 
564 		/*
565 		 * The list of pending barriers is protected by the
566 		 * kernel_context timeline, which notably we do not hold
567 		 * here. i915_request_add_active_barriers() may consume
568 		 * the barrier before we claim it, so we have to check
569 		 * for success.
570 		 */
571 		engine = __barrier_to_engine(node);
572 		smp_rmb(); /* serialise with add_active_barriers */
573 		if (is_barrier(&node->base) &&
574 		    ____active_del_barrier(ref, node, engine))
575 			goto match;
576 	}
577 
578 	mutex_unlock(&ref->mutex);
579 
580 	return NULL;
581 
582 match:
583 	rb_erase(p, &ref->tree); /* Hide from waits and sibling allocations */
584 	if (p == &ref->cache->node)
585 		ref->cache = NULL;
586 	mutex_unlock(&ref->mutex);
587 
588 	return rb_entry(p, struct active_node, node);
589 }
590 
591 int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
592 					    struct intel_engine_cs *engine)
593 {
594 	intel_engine_mask_t tmp, mask = engine->mask;
595 	struct intel_gt *gt = engine->gt;
596 	struct llist_node *pos, *next;
597 	int err;
598 
599 	GEM_BUG_ON(i915_active_is_idle(ref));
600 	GEM_BUG_ON(!llist_empty(&ref->preallocated_barriers));
601 
602 	/*
603 	 * Preallocate a node for each physical engine supporting the target
604 	 * engine (remember virtual engines have more than one sibling).
605 	 * We can then use the preallocated nodes in
606 	 * i915_active_acquire_barrier()
607 	 */
608 	for_each_engine_masked(engine, gt, mask, tmp) {
609 		u64 idx = engine->kernel_context->timeline->fence_context;
610 		struct active_node *node;
611 
612 		node = reuse_idle_barrier(ref, idx);
613 		if (!node) {
614 			node = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
615 			if (!node) {
616 				err = ENOMEM;
617 				goto unwind;
618 			}
619 
620 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
621 			node->base.lock =
622 				&engine->kernel_context->timeline->mutex;
623 #endif
624 			RCU_INIT_POINTER(node->base.fence, NULL);
625 			node->base.cb.func = node_retire;
626 			node->timeline = idx;
627 			node->ref = ref;
628 		}
629 
630 		if (!i915_active_fence_isset(&node->base)) {
631 			/*
632 			 * Mark this as being *our* unconnected proto-node.
633 			 *
634 			 * Since this node is not in any list, and we have
635 			 * decoupled it from the rbtree, we can reuse the
636 			 * request to indicate this is an idle-barrier node
637 			 * and then we can use the rb_node and list pointers
638 			 * for our tracking of the pending barrier.
639 			 */
640 			RCU_INIT_POINTER(node->base.fence, ERR_PTR(-EAGAIN));
641 			node->base.cb.node.prev = (void *)engine;
642 			atomic_inc(&ref->count);
643 		}
644 
645 		GEM_BUG_ON(barrier_to_engine(node) != engine);
646 		llist_add(barrier_to_ll(node), &ref->preallocated_barriers);
647 		intel_engine_pm_get(engine);
648 	}
649 
650 	return 0;
651 
652 unwind:
653 	llist_for_each_safe(pos, next, take_preallocated_barriers(ref)) {
654 		struct active_node *node = barrier_from_ll(pos);
655 
656 		atomic_dec(&ref->count);
657 		intel_engine_pm_put(barrier_to_engine(node));
658 
659 		kmem_cache_free(global.slab_cache, node);
660 	}
661 	return err;
662 }
663 
664 void i915_active_acquire_barrier(struct i915_active *ref)
665 {
666 	struct llist_node *pos, *next;
667 
668 	GEM_BUG_ON(i915_active_is_idle(ref));
669 
670 	/*
671 	 * Transfer the list of preallocated barriers into the
672 	 * i915_active rbtree, but only as proto-nodes. They will be
673 	 * populated by i915_request_add_active_barriers() to point to the
674 	 * request that will eventually release them.
675 	 */
676 	mutex_lock_nested(&ref->mutex, SINGLE_DEPTH_NESTING);
677 	llist_for_each_safe(pos, next, take_preallocated_barriers(ref)) {
678 		struct active_node *node = barrier_from_ll(pos);
679 		struct intel_engine_cs *engine = barrier_to_engine(node);
680 		struct rb_node **p, *parent;
681 
682 		parent = NULL;
683 		p = &ref->tree.rb_node;
684 		while (*p) {
685 			struct active_node *it;
686 
687 			parent = *p;
688 
689 			it = rb_entry(parent, struct active_node, node);
690 			if (it->timeline < node->timeline)
691 				p = &parent->rb_right;
692 			else
693 				p = &parent->rb_left;
694 		}
695 		rb_link_node(&node->node, parent, p);
696 		rb_insert_color(&node->node, &ref->tree);
697 
698 		GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
699 		llist_add(barrier_to_ll(node), &engine->barrier_tasks);
700 		intel_engine_pm_put(engine);
701 	}
702 	mutex_unlock(&ref->mutex);
703 }
704 
705 void i915_request_add_active_barriers(struct i915_request *rq)
706 {
707 	struct intel_engine_cs *engine = rq->engine;
708 	struct llist_node *node, *next;
709 	unsigned long flags;
710 
711 	GEM_BUG_ON(intel_engine_is_virtual(engine));
712 	GEM_BUG_ON(i915_request_timeline(rq) != engine->kernel_context->timeline);
713 
714 	node = llist_del_all(&engine->barrier_tasks);
715 	if (!node)
716 		return;
717 	/*
718 	 * Attach the list of proto-fences to the in-flight request such
719 	 * that the parent i915_active will be released when this request
720 	 * is retired.
721 	 */
722 	spin_lock_irqsave(&rq->lock, flags);
723 	llist_for_each_safe(node, next, node) {
724 		RCU_INIT_POINTER(barrier_from_ll(node)->base.fence, &rq->fence);
725 		smp_wmb(); /* serialise with reuse_idle_barrier */
726 		list_add_tail((struct list_head *)node, &rq->fence.cb_list);
727 	}
728 	spin_unlock_irqrestore(&rq->lock, flags);
729 }
730 
731 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
732 #define active_is_held(active) lockdep_is_held((active)->lock)
733 #else
734 #define active_is_held(active) true
735 #endif
736 
737 /*
738  * __i915_active_fence_set: Update the last active fence along its timeline
739  * @active: the active tracker
740  * @fence: the new fence (under construction)
741  *
742  * Records the new @fence as the last active fence along its timeline in
743  * this active tracker, moving the tracking callbacks from the previous
744  * fence onto this one. Returns the previous fence (if not already completed),
745  * which the caller must ensure is executed before the new fence. To ensure
746  * that the order of fences within the timeline of the i915_active_fence is
747  * maintained, it must be locked by the caller.
748  */
749 struct dma_fence *
750 __i915_active_fence_set(struct i915_active_fence *active,
751 			struct dma_fence *fence)
752 {
753 	struct dma_fence *prev;
754 	unsigned long flags;
755 
756 	/* NB: must be serialised by an outer timeline mutex (active->lock) */
757 	spin_lock_irqsave(fence->lock, flags);
758 	GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags));
759 
760 	prev = rcu_dereference_protected(active->fence, active_is_held(active));
761 	if (prev) {
762 		GEM_BUG_ON(prev == fence);
763 		spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING);
764 		__list_del_entry(&active->cb.node);
765 		spin_unlock(prev->lock); /* serialise with prev->cb_list */
766 
767 		/*
768 		 * active->fence is reset by the callback from inside
769 		 * interrupt context. We need to serialise our list
770 		 * manipulation with the fence->lock to prevent the prev
771 		 * being lost inside an interrupt (it can't be replaced as
772 		 * no other caller is allowed to enter __i915_active_fence_set
773 		 * as we hold the timeline lock). After serialising with
774 		 * the callback, we need to double check which ran first,
775 		 * our list_del() [decoupling prev from the callback] or
776 		 * the callback...
777 		 */
778 		prev = rcu_access_pointer(active->fence);
779 	}
780 
781 	rcu_assign_pointer(active->fence, fence);
782 	list_add_tail(&active->cb.node, &fence->cb_list);
783 
784 	spin_unlock_irqrestore(fence->lock, flags);
785 
786 	return prev;
787 }
788 
789 int i915_active_fence_set(struct i915_active_fence *active,
790 			  struct i915_request *rq)
791 {
792 	struct dma_fence *fence;
793 	int err = 0;
794 
795 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
796 	lockdep_assert_held(active->lock);
797 #endif
798 
799 	/* Must maintain timeline ordering wrt previous active requests */
800 	rcu_read_lock();
801 	fence = __i915_active_fence_set(active, &rq->fence);
802 	if (fence) /* but the previous fence may not belong to that timeline! */
803 		fence = dma_fence_get_rcu(fence);
804 	rcu_read_unlock();
805 	if (fence) {
806 		err = i915_request_await_dma_fence(rq, fence);
807 		dma_fence_put(fence);
808 	}
809 
810 	return err;
811 }
812 
813 void i915_active_noop(struct dma_fence *fence, struct dma_fence_cb *cb)
814 {
815 	i915_active_fence_cb(fence, cb);
816 }
817 
818 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
819 #include "selftests/i915_active.c"
820 #endif
821 
822 static void i915_global_active_shrink(void)
823 {
824 	kmem_cache_shrink(global.slab_cache);
825 }
826 
827 static void i915_global_active_exit(void)
828 {
829 	kmem_cache_destroy(global.slab_cache);
830 }
831 
832 static struct i915_global_active global = { {
833 	.shrink = i915_global_active_shrink,
834 	.exit = i915_global_active_exit,
835 } };
836 
837 int __init i915_global_active_init(void)
838 {
839 	global.slab_cache = KMEM_CACHE(active_node, SLAB_HWCACHE_ALIGN);
840 	if (!global.slab_cache)
841 		return -ENOMEM;
842 
843 	i915_global_register(&global.base);
844 	return 0;
845 }
846