xref: /openbmc/linux/drivers/gpu/drm/i915/i915_scheduler.c (revision 023e41632e065d49bcbe31b3c4b336217f96a271)
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2018 Intel Corporation
5  */
6 
7 #include <linux/mutex.h>
8 
9 #include "i915_drv.h"
10 #include "i915_request.h"
11 #include "i915_scheduler.h"
12 
13 static DEFINE_SPINLOCK(schedule_lock);
14 
15 static const struct i915_request *
16 node_to_request(const struct i915_sched_node *node)
17 {
18 	return container_of(node, const struct i915_request, sched);
19 }
20 
21 static inline bool node_signaled(const struct i915_sched_node *node)
22 {
23 	return i915_request_completed(node_to_request(node));
24 }
25 
26 void i915_sched_node_init(struct i915_sched_node *node)
27 {
28 	INIT_LIST_HEAD(&node->signalers_list);
29 	INIT_LIST_HEAD(&node->waiters_list);
30 	INIT_LIST_HEAD(&node->link);
31 	node->attr.priority = I915_PRIORITY_INVALID;
32 }
33 
34 static struct i915_dependency *
35 i915_dependency_alloc(struct drm_i915_private *i915)
36 {
37 	return kmem_cache_alloc(i915->dependencies, GFP_KERNEL);
38 }
39 
40 static void
41 i915_dependency_free(struct drm_i915_private *i915,
42 		     struct i915_dependency *dep)
43 {
44 	kmem_cache_free(i915->dependencies, dep);
45 }
46 
47 bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
48 				      struct i915_sched_node *signal,
49 				      struct i915_dependency *dep,
50 				      unsigned long flags)
51 {
52 	bool ret = false;
53 
54 	spin_lock(&schedule_lock);
55 
56 	if (!node_signaled(signal)) {
57 		INIT_LIST_HEAD(&dep->dfs_link);
58 		list_add(&dep->wait_link, &signal->waiters_list);
59 		list_add(&dep->signal_link, &node->signalers_list);
60 		dep->signaler = signal;
61 		dep->flags = flags;
62 
63 		ret = true;
64 	}
65 
66 	spin_unlock(&schedule_lock);
67 
68 	return ret;
69 }
70 
71 int i915_sched_node_add_dependency(struct drm_i915_private *i915,
72 				   struct i915_sched_node *node,
73 				   struct i915_sched_node *signal)
74 {
75 	struct i915_dependency *dep;
76 
77 	dep = i915_dependency_alloc(i915);
78 	if (!dep)
79 		return -ENOMEM;
80 
81 	if (!__i915_sched_node_add_dependency(node, signal, dep,
82 					      I915_DEPENDENCY_ALLOC))
83 		i915_dependency_free(i915, dep);
84 
85 	return 0;
86 }
87 
88 void i915_sched_node_fini(struct drm_i915_private *i915,
89 			  struct i915_sched_node *node)
90 {
91 	struct i915_dependency *dep, *tmp;
92 
93 	GEM_BUG_ON(!list_empty(&node->link));
94 
95 	spin_lock(&schedule_lock);
96 
97 	/*
98 	 * Everyone we depended upon (the fences we wait to be signaled)
99 	 * should retire before us and remove themselves from our list.
100 	 * However, retirement is run independently on each timeline and
101 	 * so we may be called out-of-order.
102 	 */
103 	list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) {
104 		GEM_BUG_ON(!node_signaled(dep->signaler));
105 		GEM_BUG_ON(!list_empty(&dep->dfs_link));
106 
107 		list_del(&dep->wait_link);
108 		if (dep->flags & I915_DEPENDENCY_ALLOC)
109 			i915_dependency_free(i915, dep);
110 	}
111 
112 	/* Remove ourselves from everyone who depends upon us */
113 	list_for_each_entry_safe(dep, tmp, &node->waiters_list, wait_link) {
114 		GEM_BUG_ON(dep->signaler != node);
115 		GEM_BUG_ON(!list_empty(&dep->dfs_link));
116 
117 		list_del(&dep->signal_link);
118 		if (dep->flags & I915_DEPENDENCY_ALLOC)
119 			i915_dependency_free(i915, dep);
120 	}
121 
122 	spin_unlock(&schedule_lock);
123 }
124 
125 static inline struct i915_priolist *to_priolist(struct rb_node *rb)
126 {
127 	return rb_entry(rb, struct i915_priolist, node);
128 }
129 
130 static void assert_priolists(struct intel_engine_execlists * const execlists)
131 {
132 	struct rb_node *rb;
133 	long last_prio, i;
134 
135 	if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
136 		return;
137 
138 	GEM_BUG_ON(rb_first_cached(&execlists->queue) !=
139 		   rb_first(&execlists->queue.rb_root));
140 
141 	last_prio = (INT_MAX >> I915_USER_PRIORITY_SHIFT) + 1;
142 	for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
143 		const struct i915_priolist *p = to_priolist(rb);
144 
145 		GEM_BUG_ON(p->priority >= last_prio);
146 		last_prio = p->priority;
147 
148 		GEM_BUG_ON(!p->used);
149 		for (i = 0; i < ARRAY_SIZE(p->requests); i++) {
150 			if (list_empty(&p->requests[i]))
151 				continue;
152 
153 			GEM_BUG_ON(!(p->used & BIT(i)));
154 		}
155 	}
156 }
157 
158 struct list_head *
159 i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio)
160 {
161 	struct intel_engine_execlists * const execlists = &engine->execlists;
162 	struct i915_priolist *p;
163 	struct rb_node **parent, *rb;
164 	bool first = true;
165 	int idx, i;
166 
167 	lockdep_assert_held(&engine->timeline.lock);
168 	assert_priolists(execlists);
169 
170 	/* buckets sorted from highest [in slot 0] to lowest priority */
171 	idx = I915_PRIORITY_COUNT - (prio & I915_PRIORITY_MASK) - 1;
172 	prio >>= I915_USER_PRIORITY_SHIFT;
173 	if (unlikely(execlists->no_priolist))
174 		prio = I915_PRIORITY_NORMAL;
175 
176 find_priolist:
177 	/* most positive priority is scheduled first, equal priorities fifo */
178 	rb = NULL;
179 	parent = &execlists->queue.rb_root.rb_node;
180 	while (*parent) {
181 		rb = *parent;
182 		p = to_priolist(rb);
183 		if (prio > p->priority) {
184 			parent = &rb->rb_left;
185 		} else if (prio < p->priority) {
186 			parent = &rb->rb_right;
187 			first = false;
188 		} else {
189 			goto out;
190 		}
191 	}
192 
193 	if (prio == I915_PRIORITY_NORMAL) {
194 		p = &execlists->default_priolist;
195 	} else {
196 		p = kmem_cache_alloc(engine->i915->priorities, GFP_ATOMIC);
197 		/* Convert an allocation failure to a priority bump */
198 		if (unlikely(!p)) {
199 			prio = I915_PRIORITY_NORMAL; /* recurses just once */
200 
201 			/* To maintain ordering with all rendering, after an
202 			 * allocation failure we have to disable all scheduling.
203 			 * Requests will then be executed in fifo, and schedule
204 			 * will ensure that dependencies are emitted in fifo.
205 			 * There will be still some reordering with existing
206 			 * requests, so if userspace lied about their
207 			 * dependencies that reordering may be visible.
208 			 */
209 			execlists->no_priolist = true;
210 			goto find_priolist;
211 		}
212 	}
213 
214 	p->priority = prio;
215 	for (i = 0; i < ARRAY_SIZE(p->requests); i++)
216 		INIT_LIST_HEAD(&p->requests[i]);
217 	rb_link_node(&p->node, rb, parent);
218 	rb_insert_color_cached(&p->node, &execlists->queue, first);
219 	p->used = 0;
220 
221 out:
222 	p->used |= BIT(idx);
223 	return &p->requests[idx];
224 }
225 
226 struct sched_cache {
227 	struct list_head *priolist;
228 };
229 
230 static struct intel_engine_cs *
231 sched_lock_engine(const struct i915_sched_node *node,
232 		  struct intel_engine_cs *locked,
233 		  struct sched_cache *cache)
234 {
235 	struct intel_engine_cs *engine = node_to_request(node)->engine;
236 
237 	GEM_BUG_ON(!locked);
238 
239 	if (engine != locked) {
240 		spin_unlock(&locked->timeline.lock);
241 		memset(cache, 0, sizeof(*cache));
242 		spin_lock(&engine->timeline.lock);
243 	}
244 
245 	return engine;
246 }
247 
248 static bool inflight(const struct i915_request *rq,
249 		     const struct intel_engine_cs *engine)
250 {
251 	const struct i915_request *active;
252 
253 	if (!i915_request_is_active(rq))
254 		return false;
255 
256 	active = port_request(engine->execlists.port);
257 	return active->hw_context == rq->hw_context;
258 }
259 
260 static void __i915_schedule(struct i915_request *rq,
261 			    const struct i915_sched_attr *attr)
262 {
263 	struct intel_engine_cs *engine;
264 	struct i915_dependency *dep, *p;
265 	struct i915_dependency stack;
266 	const int prio = attr->priority;
267 	struct sched_cache cache;
268 	LIST_HEAD(dfs);
269 
270 	/* Needed in order to use the temporary link inside i915_dependency */
271 	lockdep_assert_held(&schedule_lock);
272 	GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
273 
274 	if (i915_request_completed(rq))
275 		return;
276 
277 	if (prio <= READ_ONCE(rq->sched.attr.priority))
278 		return;
279 
280 	stack.signaler = &rq->sched;
281 	list_add(&stack.dfs_link, &dfs);
282 
283 	/*
284 	 * Recursively bump all dependent priorities to match the new request.
285 	 *
286 	 * A naive approach would be to use recursion:
287 	 * static void update_priorities(struct i915_sched_node *node, prio) {
288 	 *	list_for_each_entry(dep, &node->signalers_list, signal_link)
289 	 *		update_priorities(dep->signal, prio)
290 	 *	queue_request(node);
291 	 * }
292 	 * but that may have unlimited recursion depth and so runs a very
293 	 * real risk of overunning the kernel stack. Instead, we build
294 	 * a flat list of all dependencies starting with the current request.
295 	 * As we walk the list of dependencies, we add all of its dependencies
296 	 * to the end of the list (this may include an already visited
297 	 * request) and continue to walk onwards onto the new dependencies. The
298 	 * end result is a topological list of requests in reverse order, the
299 	 * last element in the list is the request we must execute first.
300 	 */
301 	list_for_each_entry(dep, &dfs, dfs_link) {
302 		struct i915_sched_node *node = dep->signaler;
303 
304 		/*
305 		 * Within an engine, there can be no cycle, but we may
306 		 * refer to the same dependency chain multiple times
307 		 * (redundant dependencies are not eliminated) and across
308 		 * engines.
309 		 */
310 		list_for_each_entry(p, &node->signalers_list, signal_link) {
311 			GEM_BUG_ON(p == dep); /* no cycles! */
312 
313 			if (node_signaled(p->signaler))
314 				continue;
315 
316 			GEM_BUG_ON(p->signaler->attr.priority < node->attr.priority);
317 			if (prio > READ_ONCE(p->signaler->attr.priority))
318 				list_move_tail(&p->dfs_link, &dfs);
319 		}
320 	}
321 
322 	/*
323 	 * If we didn't need to bump any existing priorities, and we haven't
324 	 * yet submitted this request (i.e. there is no potential race with
325 	 * execlists_submit_request()), we can set our own priority and skip
326 	 * acquiring the engine locks.
327 	 */
328 	if (rq->sched.attr.priority == I915_PRIORITY_INVALID) {
329 		GEM_BUG_ON(!list_empty(&rq->sched.link));
330 		rq->sched.attr = *attr;
331 
332 		if (stack.dfs_link.next == stack.dfs_link.prev)
333 			return;
334 
335 		__list_del_entry(&stack.dfs_link);
336 	}
337 
338 	memset(&cache, 0, sizeof(cache));
339 	engine = rq->engine;
340 	spin_lock_irq(&engine->timeline.lock);
341 
342 	/* Fifo and depth-first replacement ensure our deps execute before us */
343 	list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
344 		struct i915_sched_node *node = dep->signaler;
345 
346 		INIT_LIST_HEAD(&dep->dfs_link);
347 
348 		engine = sched_lock_engine(node, engine, &cache);
349 		lockdep_assert_held(&engine->timeline.lock);
350 
351 		/* Recheck after acquiring the engine->timeline.lock */
352 		if (prio <= node->attr.priority || node_signaled(node))
353 			continue;
354 
355 		node->attr.priority = prio;
356 		if (!list_empty(&node->link)) {
357 			if (!cache.priolist)
358 				cache.priolist =
359 					i915_sched_lookup_priolist(engine,
360 								   prio);
361 			list_move_tail(&node->link, cache.priolist);
362 		} else {
363 			/*
364 			 * If the request is not in the priolist queue because
365 			 * it is not yet runnable, then it doesn't contribute
366 			 * to our preemption decisions. On the other hand,
367 			 * if the request is on the HW, it too is not in the
368 			 * queue; but in that case we may still need to reorder
369 			 * the inflight requests.
370 			 */
371 			if (!i915_sw_fence_done(&node_to_request(node)->submit))
372 				continue;
373 		}
374 
375 		if (prio <= engine->execlists.queue_priority_hint)
376 			continue;
377 
378 		engine->execlists.queue_priority_hint = prio;
379 
380 		/*
381 		 * If we are already the currently executing context, don't
382 		 * bother evaluating if we should preempt ourselves.
383 		 */
384 		if (inflight(node_to_request(node), engine))
385 			continue;
386 
387 		/* Defer (tasklet) submission until after all of our updates. */
388 		tasklet_hi_schedule(&engine->execlists.tasklet);
389 	}
390 
391 	spin_unlock_irq(&engine->timeline.lock);
392 }
393 
394 void i915_schedule(struct i915_request *rq, const struct i915_sched_attr *attr)
395 {
396 	spin_lock(&schedule_lock);
397 	__i915_schedule(rq, attr);
398 	spin_unlock(&schedule_lock);
399 }
400 
401 void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump)
402 {
403 	struct i915_sched_attr attr;
404 
405 	GEM_BUG_ON(bump & ~I915_PRIORITY_MASK);
406 
407 	if (READ_ONCE(rq->sched.attr.priority) == I915_PRIORITY_INVALID)
408 		return;
409 
410 	spin_lock_bh(&schedule_lock);
411 
412 	attr = rq->sched.attr;
413 	attr.priority |= bump;
414 	__i915_schedule(rq, &attr);
415 
416 	spin_unlock_bh(&schedule_lock);
417 }
418