xref: /openbmc/linux/drivers/gpu/drm/i915/i915_active.c (revision 91db9311)
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2019 Intel Corporation
5  */
6 
7 #include <linux/debugobjects.h>
8 
9 #include "gt/intel_engine_pm.h"
10 
11 #include "i915_drv.h"
12 #include "i915_active.h"
13 #include "i915_globals.h"
14 
15 #define BKL(ref) (&(ref)->i915->drm.struct_mutex)
16 
17 /*
18  * Active refs memory management
19  *
20  * To be more economical with memory, we reap all the i915_active trees as
21  * they idle (when we know the active requests are inactive) and allocate the
22  * nodes from a local slab cache to hopefully reduce the fragmentation.
23  */
24 static struct i915_global_active {
25 	struct i915_global base;
26 	struct kmem_cache *slab_cache;
27 } global;
28 
29 struct active_node {
30 	struct i915_active_request base;
31 	struct i915_active *ref;
32 	struct rb_node node;
33 	u64 timeline;
34 };
35 
36 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && IS_ENABLED(CONFIG_DEBUG_OBJECTS)
37 
38 static void *active_debug_hint(void *addr)
39 {
40 	struct i915_active *ref = addr;
41 
42 	return (void *)ref->active ?: (void *)ref->retire ?: (void *)ref;
43 }
44 
45 static struct debug_obj_descr active_debug_desc = {
46 	.name = "i915_active",
47 	.debug_hint = active_debug_hint,
48 };
49 
50 static void debug_active_init(struct i915_active *ref)
51 {
52 	debug_object_init(ref, &active_debug_desc);
53 }
54 
55 static void debug_active_activate(struct i915_active *ref)
56 {
57 	debug_object_activate(ref, &active_debug_desc);
58 }
59 
60 static void debug_active_deactivate(struct i915_active *ref)
61 {
62 	debug_object_deactivate(ref, &active_debug_desc);
63 }
64 
65 static void debug_active_fini(struct i915_active *ref)
66 {
67 	debug_object_free(ref, &active_debug_desc);
68 }
69 
70 static void debug_active_assert(struct i915_active *ref)
71 {
72 	debug_object_assert_init(ref, &active_debug_desc);
73 }
74 
75 #else
76 
77 static inline void debug_active_init(struct i915_active *ref) { }
78 static inline void debug_active_activate(struct i915_active *ref) { }
79 static inline void debug_active_deactivate(struct i915_active *ref) { }
80 static inline void debug_active_fini(struct i915_active *ref) { }
81 static inline void debug_active_assert(struct i915_active *ref) { }
82 
83 #endif
84 
85 static void
86 __active_retire(struct i915_active *ref)
87 {
88 	struct active_node *it, *n;
89 	struct rb_root root;
90 	bool retire = false;
91 
92 	lockdep_assert_held(&ref->mutex);
93 
94 	/* return the unused nodes to our slabcache -- flushing the allocator */
95 	if (atomic_dec_and_test(&ref->count)) {
96 		debug_active_deactivate(ref);
97 		root = ref->tree;
98 		ref->tree = RB_ROOT;
99 		ref->cache = NULL;
100 		retire = true;
101 	}
102 
103 	mutex_unlock(&ref->mutex);
104 	if (!retire)
105 		return;
106 
107 	ref->retire(ref);
108 
109 	rbtree_postorder_for_each_entry_safe(it, n, &root, node) {
110 		GEM_BUG_ON(i915_active_request_isset(&it->base));
111 		kmem_cache_free(global.slab_cache, it);
112 	}
113 }
114 
115 static void
116 active_retire(struct i915_active *ref)
117 {
118 	GEM_BUG_ON(!atomic_read(&ref->count));
119 	if (atomic_add_unless(&ref->count, -1, 1))
120 		return;
121 
122 	/* One active may be flushed from inside the acquire of another */
123 	mutex_lock_nested(&ref->mutex, SINGLE_DEPTH_NESTING);
124 	__active_retire(ref);
125 }
126 
127 static void
128 node_retire(struct i915_active_request *base, struct i915_request *rq)
129 {
130 	active_retire(container_of(base, struct active_node, base)->ref);
131 }
132 
133 static struct i915_active_request *
134 active_instance(struct i915_active *ref, u64 idx)
135 {
136 	struct active_node *node, *prealloc;
137 	struct rb_node **p, *parent;
138 
139 	/*
140 	 * We track the most recently used timeline to skip a rbtree search
141 	 * for the common case, under typical loads we never need the rbtree
142 	 * at all. We can reuse the last slot if it is empty, that is
143 	 * after the previous activity has been retired, or if it matches the
144 	 * current timeline.
145 	 */
146 	node = READ_ONCE(ref->cache);
147 	if (node && node->timeline == idx)
148 		return &node->base;
149 
150 	/* Preallocate a replacement, just in case */
151 	prealloc = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
152 	if (!prealloc)
153 		return NULL;
154 
155 	mutex_lock(&ref->mutex);
156 	GEM_BUG_ON(i915_active_is_idle(ref));
157 
158 	parent = NULL;
159 	p = &ref->tree.rb_node;
160 	while (*p) {
161 		parent = *p;
162 
163 		node = rb_entry(parent, struct active_node, node);
164 		if (node->timeline == idx) {
165 			kmem_cache_free(global.slab_cache, prealloc);
166 			goto out;
167 		}
168 
169 		if (node->timeline < idx)
170 			p = &parent->rb_right;
171 		else
172 			p = &parent->rb_left;
173 	}
174 
175 	node = prealloc;
176 	i915_active_request_init(&node->base, NULL, node_retire);
177 	node->ref = ref;
178 	node->timeline = idx;
179 
180 	rb_link_node(&node->node, parent, p);
181 	rb_insert_color(&node->node, &ref->tree);
182 
183 out:
184 	ref->cache = node;
185 	mutex_unlock(&ref->mutex);
186 
187 	return &node->base;
188 }
189 
190 void __i915_active_init(struct drm_i915_private *i915,
191 			struct i915_active *ref,
192 			int (*active)(struct i915_active *ref),
193 			void (*retire)(struct i915_active *ref),
194 			struct lock_class_key *key)
195 {
196 	debug_active_init(ref);
197 
198 	ref->i915 = i915;
199 	ref->flags = 0;
200 	ref->active = active;
201 	ref->retire = retire;
202 	ref->tree = RB_ROOT;
203 	ref->cache = NULL;
204 	init_llist_head(&ref->barriers);
205 	atomic_set(&ref->count, 0);
206 	__mutex_init(&ref->mutex, "i915_active", key);
207 }
208 
209 int i915_active_ref(struct i915_active *ref,
210 		    u64 timeline,
211 		    struct i915_request *rq)
212 {
213 	struct i915_active_request *active;
214 	int err;
215 
216 	/* Prevent reaping in case we malloc/wait while building the tree */
217 	err = i915_active_acquire(ref);
218 	if (err)
219 		return err;
220 
221 	active = active_instance(ref, timeline);
222 	if (!active) {
223 		err = -ENOMEM;
224 		goto out;
225 	}
226 
227 	if (!i915_active_request_isset(active))
228 		atomic_inc(&ref->count);
229 	__i915_active_request_set(active, rq);
230 
231 out:
232 	i915_active_release(ref);
233 	return err;
234 }
235 
236 int i915_active_acquire(struct i915_active *ref)
237 {
238 	int err;
239 
240 	debug_active_assert(ref);
241 	if (atomic_add_unless(&ref->count, 1, 0))
242 		return 0;
243 
244 	err = mutex_lock_interruptible(&ref->mutex);
245 	if (err)
246 		return err;
247 
248 	if (!atomic_read(&ref->count) && ref->active)
249 		err = ref->active(ref);
250 	if (!err) {
251 		debug_active_activate(ref);
252 		atomic_inc(&ref->count);
253 	}
254 
255 	mutex_unlock(&ref->mutex);
256 
257 	return err;
258 }
259 
260 void i915_active_release(struct i915_active *ref)
261 {
262 	debug_active_assert(ref);
263 	active_retire(ref);
264 }
265 
266 static void __active_ungrab(struct i915_active *ref)
267 {
268 	clear_and_wake_up_bit(I915_ACTIVE_GRAB_BIT, &ref->flags);
269 }
270 
271 bool i915_active_trygrab(struct i915_active *ref)
272 {
273 	debug_active_assert(ref);
274 
275 	if (test_and_set_bit(I915_ACTIVE_GRAB_BIT, &ref->flags))
276 		return false;
277 
278 	if (!atomic_add_unless(&ref->count, 1, 0)) {
279 		__active_ungrab(ref);
280 		return false;
281 	}
282 
283 	return true;
284 }
285 
286 void i915_active_ungrab(struct i915_active *ref)
287 {
288 	GEM_BUG_ON(!test_bit(I915_ACTIVE_GRAB_BIT, &ref->flags));
289 
290 	active_retire(ref);
291 	__active_ungrab(ref);
292 }
293 
294 int i915_active_wait(struct i915_active *ref)
295 {
296 	struct active_node *it, *n;
297 	int err;
298 
299 	might_sleep();
300 	might_lock(&ref->mutex);
301 
302 	if (i915_active_is_idle(ref))
303 		return 0;
304 
305 	err = mutex_lock_interruptible(&ref->mutex);
306 	if (err)
307 		return err;
308 
309 	if (!atomic_add_unless(&ref->count, 1, 0)) {
310 		mutex_unlock(&ref->mutex);
311 		return 0;
312 	}
313 
314 	rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
315 		err = i915_active_request_retire(&it->base, BKL(ref));
316 		if (err)
317 			break;
318 	}
319 
320 	__active_retire(ref);
321 	if (err)
322 		return err;
323 
324 	if (wait_on_bit(&ref->flags, I915_ACTIVE_GRAB_BIT, TASK_KILLABLE))
325 		return -EINTR;
326 
327 	if (!i915_active_is_idle(ref))
328 		return -EBUSY;
329 
330 	return 0;
331 }
332 
333 int i915_request_await_active_request(struct i915_request *rq,
334 				      struct i915_active_request *active)
335 {
336 	struct i915_request *barrier =
337 		i915_active_request_raw(active, &rq->i915->drm.struct_mutex);
338 
339 	return barrier ? i915_request_await_dma_fence(rq, &barrier->fence) : 0;
340 }
341 
342 int i915_request_await_active(struct i915_request *rq, struct i915_active *ref)
343 {
344 	struct active_node *it, *n;
345 	int err;
346 
347 	if (RB_EMPTY_ROOT(&ref->tree))
348 		return 0;
349 
350 	/* await allocates and so we need to avoid hitting the shrinker */
351 	err = i915_active_acquire(ref);
352 	if (err)
353 		return err;
354 
355 	mutex_lock(&ref->mutex);
356 	rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
357 		err = i915_request_await_active_request(rq, &it->base);
358 		if (err)
359 			break;
360 	}
361 	mutex_unlock(&ref->mutex);
362 
363 	i915_active_release(ref);
364 	return err;
365 }
366 
367 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
368 void i915_active_fini(struct i915_active *ref)
369 {
370 	debug_active_fini(ref);
371 	GEM_BUG_ON(!RB_EMPTY_ROOT(&ref->tree));
372 	GEM_BUG_ON(atomic_read(&ref->count));
373 	mutex_destroy(&ref->mutex);
374 }
375 #endif
376 
377 int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
378 					    struct intel_engine_cs *engine)
379 {
380 	struct drm_i915_private *i915 = engine->i915;
381 	intel_engine_mask_t tmp, mask = engine->mask;
382 	struct llist_node *pos, *next;
383 	int err;
384 
385 	GEM_BUG_ON(!mask);
386 	for_each_engine_masked(engine, i915, mask, tmp) {
387 		struct intel_context *kctx = engine->kernel_context;
388 		struct active_node *node;
389 
390 		node = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
391 		if (unlikely(!node)) {
392 			err = -ENOMEM;
393 			goto unwind;
394 		}
395 
396 		i915_active_request_init(&node->base,
397 					 (void *)engine, node_retire);
398 		node->timeline = kctx->ring->timeline->fence_context;
399 		node->ref = ref;
400 		atomic_inc(&ref->count);
401 
402 		intel_engine_pm_get(engine);
403 		llist_add((struct llist_node *)&node->base.link,
404 			  &ref->barriers);
405 	}
406 
407 	return 0;
408 
409 unwind:
410 	llist_for_each_safe(pos, next, llist_del_all(&ref->barriers)) {
411 		struct active_node *node;
412 
413 		node = container_of((struct list_head *)pos,
414 				    typeof(*node), base.link);
415 		engine = (void *)rcu_access_pointer(node->base.request);
416 
417 		intel_engine_pm_put(engine);
418 		kmem_cache_free(global.slab_cache, node);
419 	}
420 	return err;
421 }
422 
423 void i915_active_acquire_barrier(struct i915_active *ref)
424 {
425 	struct llist_node *pos, *next;
426 
427 	GEM_BUG_ON(i915_active_is_idle(ref));
428 
429 	mutex_lock_nested(&ref->mutex, SINGLE_DEPTH_NESTING);
430 	llist_for_each_safe(pos, next, llist_del_all(&ref->barriers)) {
431 		struct intel_engine_cs *engine;
432 		struct active_node *node;
433 		struct rb_node **p, *parent;
434 
435 		node = container_of((struct list_head *)pos,
436 				    typeof(*node), base.link);
437 
438 		engine = (void *)rcu_access_pointer(node->base.request);
439 		RCU_INIT_POINTER(node->base.request, ERR_PTR(-EAGAIN));
440 
441 		parent = NULL;
442 		p = &ref->tree.rb_node;
443 		while (*p) {
444 			parent = *p;
445 			if (rb_entry(parent,
446 				     struct active_node,
447 				     node)->timeline < node->timeline)
448 				p = &parent->rb_right;
449 			else
450 				p = &parent->rb_left;
451 		}
452 		rb_link_node(&node->node, parent, p);
453 		rb_insert_color(&node->node, &ref->tree);
454 
455 		llist_add((struct llist_node *)&node->base.link,
456 			  &engine->barrier_tasks);
457 		intel_engine_pm_put(engine);
458 	}
459 	mutex_unlock(&ref->mutex);
460 }
461 
462 void i915_request_add_barriers(struct i915_request *rq)
463 {
464 	struct intel_engine_cs *engine = rq->engine;
465 	struct llist_node *node, *next;
466 
467 	llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks))
468 		list_add_tail((struct list_head *)node, &rq->active_list);
469 }
470 
471 int i915_active_request_set(struct i915_active_request *active,
472 			    struct i915_request *rq)
473 {
474 	int err;
475 
476 	/* Must maintain ordering wrt previous active requests */
477 	err = i915_request_await_active_request(rq, active);
478 	if (err)
479 		return err;
480 
481 	__i915_active_request_set(active, rq);
482 	return 0;
483 }
484 
485 void i915_active_retire_noop(struct i915_active_request *active,
486 			     struct i915_request *request)
487 {
488 	/* Space left intentionally blank */
489 }
490 
491 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
492 #include "selftests/i915_active.c"
493 #endif
494 
495 static void i915_global_active_shrink(void)
496 {
497 	kmem_cache_shrink(global.slab_cache);
498 }
499 
500 static void i915_global_active_exit(void)
501 {
502 	kmem_cache_destroy(global.slab_cache);
503 }
504 
505 static struct i915_global_active global = { {
506 	.shrink = i915_global_active_shrink,
507 	.exit = i915_global_active_exit,
508 } };
509 
510 int __init i915_global_active_init(void)
511 {
512 	global.slab_cache = KMEM_CACHE(active_node, SLAB_HWCACHE_ALIGN);
513 	if (!global.slab_cache)
514 		return -ENOMEM;
515 
516 	i915_global_register(&global.base);
517 	return 0;
518 }
519