xref: /openbmc/linux/drivers/gpu/drm/i915/i915_active.c (revision 1a59d1b8)
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2019 Intel Corporation
5  */
6 
7 #include "i915_drv.h"
8 #include "i915_active.h"
9 #include "i915_globals.h"
10 
11 #define BKL(ref) (&(ref)->i915->drm.struct_mutex)
12 
13 /*
14  * Active refs memory management
15  *
16  * To be more economical with memory, we reap all the i915_active trees as
17  * they idle (when we know the active requests are inactive) and allocate the
18  * nodes from a local slab cache to hopefully reduce the fragmentation.
19  */
20 static struct i915_global_active {
21 	struct i915_global base;
22 	struct kmem_cache *slab_cache;
23 } global;
24 
25 struct active_node {
26 	struct i915_active_request base;
27 	struct i915_active *ref;
28 	struct rb_node node;
29 	u64 timeline;
30 };
31 
32 static void
33 __active_park(struct i915_active *ref)
34 {
35 	struct active_node *it, *n;
36 
37 	rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
38 		GEM_BUG_ON(i915_active_request_isset(&it->base));
39 		kmem_cache_free(global.slab_cache, it);
40 	}
41 	ref->tree = RB_ROOT;
42 }
43 
44 static void
45 __active_retire(struct i915_active *ref)
46 {
47 	GEM_BUG_ON(!ref->count);
48 	if (--ref->count)
49 		return;
50 
51 	/* return the unused nodes to our slabcache */
52 	__active_park(ref);
53 
54 	ref->retire(ref);
55 }
56 
57 static void
58 node_retire(struct i915_active_request *base, struct i915_request *rq)
59 {
60 	__active_retire(container_of(base, struct active_node, base)->ref);
61 }
62 
63 static void
64 last_retire(struct i915_active_request *base, struct i915_request *rq)
65 {
66 	__active_retire(container_of(base, struct i915_active, last));
67 }
68 
69 static struct i915_active_request *
70 active_instance(struct i915_active *ref, u64 idx)
71 {
72 	struct active_node *node;
73 	struct rb_node **p, *parent;
74 	struct i915_request *old;
75 
76 	/*
77 	 * We track the most recently used timeline to skip a rbtree search
78 	 * for the common case, under typical loads we never need the rbtree
79 	 * at all. We can reuse the last slot if it is empty, that is
80 	 * after the previous activity has been retired, or if it matches the
81 	 * current timeline.
82 	 *
83 	 * Note that we allow the timeline to be active simultaneously in
84 	 * the rbtree and the last cache. We do this to avoid having
85 	 * to search and replace the rbtree element for a new timeline, with
86 	 * the cost being that we must be aware that the ref may be retired
87 	 * twice for the same timeline (as the older rbtree element will be
88 	 * retired before the new request added to last).
89 	 */
90 	old = i915_active_request_raw(&ref->last, BKL(ref));
91 	if (!old || old->fence.context == idx)
92 		goto out;
93 
94 	/* Move the currently active fence into the rbtree */
95 	idx = old->fence.context;
96 
97 	parent = NULL;
98 	p = &ref->tree.rb_node;
99 	while (*p) {
100 		parent = *p;
101 
102 		node = rb_entry(parent, struct active_node, node);
103 		if (node->timeline == idx)
104 			goto replace;
105 
106 		if (node->timeline < idx)
107 			p = &parent->rb_right;
108 		else
109 			p = &parent->rb_left;
110 	}
111 
112 	node = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
113 
114 	/* kmalloc may retire the ref->last (thanks shrinker)! */
115 	if (unlikely(!i915_active_request_raw(&ref->last, BKL(ref)))) {
116 		kmem_cache_free(global.slab_cache, node);
117 		goto out;
118 	}
119 
120 	if (unlikely(!node))
121 		return ERR_PTR(-ENOMEM);
122 
123 	i915_active_request_init(&node->base, NULL, node_retire);
124 	node->ref = ref;
125 	node->timeline = idx;
126 
127 	rb_link_node(&node->node, parent, p);
128 	rb_insert_color(&node->node, &ref->tree);
129 
130 replace:
131 	/*
132 	 * Overwrite the previous active slot in the rbtree with last,
133 	 * leaving last zeroed. If the previous slot is still active,
134 	 * we must be careful as we now only expect to receive one retire
135 	 * callback not two, and so much undo the active counting for the
136 	 * overwritten slot.
137 	 */
138 	if (i915_active_request_isset(&node->base)) {
139 		/* Retire ourselves from the old rq->active_list */
140 		__list_del_entry(&node->base.link);
141 		ref->count--;
142 		GEM_BUG_ON(!ref->count);
143 	}
144 	GEM_BUG_ON(list_empty(&ref->last.link));
145 	list_replace_init(&ref->last.link, &node->base.link);
146 	node->base.request = fetch_and_zero(&ref->last.request);
147 
148 out:
149 	return &ref->last;
150 }
151 
152 void i915_active_init(struct drm_i915_private *i915,
153 		      struct i915_active *ref,
154 		      void (*retire)(struct i915_active *ref))
155 {
156 	ref->i915 = i915;
157 	ref->retire = retire;
158 	ref->tree = RB_ROOT;
159 	i915_active_request_init(&ref->last, NULL, last_retire);
160 	ref->count = 0;
161 }
162 
163 int i915_active_ref(struct i915_active *ref,
164 		    u64 timeline,
165 		    struct i915_request *rq)
166 {
167 	struct i915_active_request *active;
168 	int err = 0;
169 
170 	/* Prevent reaping in case we malloc/wait while building the tree */
171 	i915_active_acquire(ref);
172 
173 	active = active_instance(ref, timeline);
174 	if (IS_ERR(active)) {
175 		err = PTR_ERR(active);
176 		goto out;
177 	}
178 
179 	if (!i915_active_request_isset(active))
180 		ref->count++;
181 	__i915_active_request_set(active, rq);
182 
183 	GEM_BUG_ON(!ref->count);
184 out:
185 	i915_active_release(ref);
186 	return err;
187 }
188 
189 bool i915_active_acquire(struct i915_active *ref)
190 {
191 	lockdep_assert_held(BKL(ref));
192 	return !ref->count++;
193 }
194 
195 void i915_active_release(struct i915_active *ref)
196 {
197 	lockdep_assert_held(BKL(ref));
198 	__active_retire(ref);
199 }
200 
201 int i915_active_wait(struct i915_active *ref)
202 {
203 	struct active_node *it, *n;
204 	int ret = 0;
205 
206 	if (i915_active_acquire(ref))
207 		goto out_release;
208 
209 	ret = i915_active_request_retire(&ref->last, BKL(ref));
210 	if (ret)
211 		goto out_release;
212 
213 	rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
214 		ret = i915_active_request_retire(&it->base, BKL(ref));
215 		if (ret)
216 			break;
217 	}
218 
219 out_release:
220 	i915_active_release(ref);
221 	return ret;
222 }
223 
224 int i915_request_await_active_request(struct i915_request *rq,
225 				      struct i915_active_request *active)
226 {
227 	struct i915_request *barrier =
228 		i915_active_request_raw(active, &rq->i915->drm.struct_mutex);
229 
230 	return barrier ? i915_request_await_dma_fence(rq, &barrier->fence) : 0;
231 }
232 
233 int i915_request_await_active(struct i915_request *rq, struct i915_active *ref)
234 {
235 	struct active_node *it, *n;
236 	int err = 0;
237 
238 	/* await allocates and so we need to avoid hitting the shrinker */
239 	if (i915_active_acquire(ref))
240 		goto out; /* was idle */
241 
242 	err = i915_request_await_active_request(rq, &ref->last);
243 	if (err)
244 		goto out;
245 
246 	rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
247 		err = i915_request_await_active_request(rq, &it->base);
248 		if (err)
249 			goto out;
250 	}
251 
252 out:
253 	i915_active_release(ref);
254 	return err;
255 }
256 
257 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
258 void i915_active_fini(struct i915_active *ref)
259 {
260 	GEM_BUG_ON(i915_active_request_isset(&ref->last));
261 	GEM_BUG_ON(!RB_EMPTY_ROOT(&ref->tree));
262 	GEM_BUG_ON(ref->count);
263 }
264 #endif
265 
266 int i915_active_request_set(struct i915_active_request *active,
267 			    struct i915_request *rq)
268 {
269 	int err;
270 
271 	/* Must maintain ordering wrt previous active requests */
272 	err = i915_request_await_active_request(rq, active);
273 	if (err)
274 		return err;
275 
276 	__i915_active_request_set(active, rq);
277 	return 0;
278 }
279 
280 void i915_active_retire_noop(struct i915_active_request *active,
281 			     struct i915_request *request)
282 {
283 	/* Space left intentionally blank */
284 }
285 
286 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
287 #include "selftests/i915_active.c"
288 #endif
289 
290 static void i915_global_active_shrink(void)
291 {
292 	kmem_cache_shrink(global.slab_cache);
293 }
294 
295 static void i915_global_active_exit(void)
296 {
297 	kmem_cache_destroy(global.slab_cache);
298 }
299 
300 static struct i915_global_active global = { {
301 	.shrink = i915_global_active_shrink,
302 	.exit = i915_global_active_exit,
303 } };
304 
305 int __init i915_global_active_init(void)
306 {
307 	global.slab_cache = KMEM_CACHE(active_node, SLAB_HWCACHE_ALIGN);
308 	if (!global.slab_cache)
309 		return -ENOMEM;
310 
311 	i915_global_register(&global.base);
312 	return 0;
313 }
314