1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2018 Intel Corporation
5  */
6 
7 #include <linux/mutex.h>
8 
9 #include "i915_drv.h"
10 #include "i915_globals.h"
11 #include "i915_request.h"
12 #include "i915_scheduler.h"
13 
14 static struct i915_global_scheduler {
15 	struct i915_global base;
16 	struct kmem_cache *slab_dependencies;
17 	struct kmem_cache *slab_priorities;
18 } global;
19 
20 static DEFINE_SPINLOCK(schedule_lock);
21 
22 static const struct i915_request *
23 node_to_request(const struct i915_sched_node *node)
24 {
25 	return container_of(node, const struct i915_request, sched);
26 }
27 
28 static inline bool node_started(const struct i915_sched_node *node)
29 {
30 	return i915_request_started(node_to_request(node));
31 }
32 
33 static inline bool node_signaled(const struct i915_sched_node *node)
34 {
35 	return i915_request_completed(node_to_request(node));
36 }
37 
38 void i915_sched_node_init(struct i915_sched_node *node)
39 {
40 	INIT_LIST_HEAD(&node->signalers_list);
41 	INIT_LIST_HEAD(&node->waiters_list);
42 	INIT_LIST_HEAD(&node->link);
43 	node->attr.priority = I915_PRIORITY_INVALID;
44 	node->flags = 0;
45 }
46 
47 static struct i915_dependency *
48 i915_dependency_alloc(void)
49 {
50 	return kmem_cache_alloc(global.slab_dependencies, GFP_KERNEL);
51 }
52 
53 static void
54 i915_dependency_free(struct i915_dependency *dep)
55 {
56 	kmem_cache_free(global.slab_dependencies, dep);
57 }
58 
59 bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
60 				      struct i915_sched_node *signal,
61 				      struct i915_dependency *dep,
62 				      unsigned long flags)
63 {
64 	bool ret = false;
65 
66 	spin_lock(&schedule_lock);
67 
68 	if (!node_signaled(signal)) {
69 		INIT_LIST_HEAD(&dep->dfs_link);
70 		list_add(&dep->wait_link, &signal->waiters_list);
71 		list_add(&dep->signal_link, &node->signalers_list);
72 		dep->signaler = signal;
73 		dep->flags = flags;
74 
75 		/* Keep track of whether anyone on this chain has a semaphore */
76 		if (signal->flags & I915_SCHED_HAS_SEMAPHORE &&
77 		    !node_started(signal))
78 			node->flags |= I915_SCHED_HAS_SEMAPHORE;
79 
80 		ret = true;
81 	}
82 
83 	spin_unlock(&schedule_lock);
84 
85 	return ret;
86 }
87 
88 int i915_sched_node_add_dependency(struct i915_sched_node *node,
89 				   struct i915_sched_node *signal)
90 {
91 	struct i915_dependency *dep;
92 
93 	dep = i915_dependency_alloc();
94 	if (!dep)
95 		return -ENOMEM;
96 
97 	if (!__i915_sched_node_add_dependency(node, signal, dep,
98 					      I915_DEPENDENCY_ALLOC))
99 		i915_dependency_free(dep);
100 
101 	return 0;
102 }
103 
104 void i915_sched_node_fini(struct i915_sched_node *node)
105 {
106 	struct i915_dependency *dep, *tmp;
107 
108 	GEM_BUG_ON(!list_empty(&node->link));
109 
110 	spin_lock(&schedule_lock);
111 
112 	/*
113 	 * Everyone we depended upon (the fences we wait to be signaled)
114 	 * should retire before us and remove themselves from our list.
115 	 * However, retirement is run independently on each timeline and
116 	 * so we may be called out-of-order.
117 	 */
118 	list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) {
119 		GEM_BUG_ON(!node_signaled(dep->signaler));
120 		GEM_BUG_ON(!list_empty(&dep->dfs_link));
121 
122 		list_del(&dep->wait_link);
123 		if (dep->flags & I915_DEPENDENCY_ALLOC)
124 			i915_dependency_free(dep);
125 	}
126 
127 	/* Remove ourselves from everyone who depends upon us */
128 	list_for_each_entry_safe(dep, tmp, &node->waiters_list, wait_link) {
129 		GEM_BUG_ON(dep->signaler != node);
130 		GEM_BUG_ON(!list_empty(&dep->dfs_link));
131 
132 		list_del(&dep->signal_link);
133 		if (dep->flags & I915_DEPENDENCY_ALLOC)
134 			i915_dependency_free(dep);
135 	}
136 
137 	spin_unlock(&schedule_lock);
138 }
139 
140 static inline struct i915_priolist *to_priolist(struct rb_node *rb)
141 {
142 	return rb_entry(rb, struct i915_priolist, node);
143 }
144 
145 static void assert_priolists(struct intel_engine_execlists * const execlists)
146 {
147 	struct rb_node *rb;
148 	long last_prio, i;
149 
150 	if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
151 		return;
152 
153 	GEM_BUG_ON(rb_first_cached(&execlists->queue) !=
154 		   rb_first(&execlists->queue.rb_root));
155 
156 	last_prio = (INT_MAX >> I915_USER_PRIORITY_SHIFT) + 1;
157 	for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
158 		const struct i915_priolist *p = to_priolist(rb);
159 
160 		GEM_BUG_ON(p->priority >= last_prio);
161 		last_prio = p->priority;
162 
163 		GEM_BUG_ON(!p->used);
164 		for (i = 0; i < ARRAY_SIZE(p->requests); i++) {
165 			if (list_empty(&p->requests[i]))
166 				continue;
167 
168 			GEM_BUG_ON(!(p->used & BIT(i)));
169 		}
170 	}
171 }
172 
173 struct list_head *
174 i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio)
175 {
176 	struct intel_engine_execlists * const execlists = &engine->execlists;
177 	struct i915_priolist *p;
178 	struct rb_node **parent, *rb;
179 	bool first = true;
180 	int idx, i;
181 
182 	lockdep_assert_held(&engine->timeline.lock);
183 	assert_priolists(execlists);
184 
185 	/* buckets sorted from highest [in slot 0] to lowest priority */
186 	idx = I915_PRIORITY_COUNT - (prio & I915_PRIORITY_MASK) - 1;
187 	prio >>= I915_USER_PRIORITY_SHIFT;
188 	if (unlikely(execlists->no_priolist))
189 		prio = I915_PRIORITY_NORMAL;
190 
191 find_priolist:
192 	/* most positive priority is scheduled first, equal priorities fifo */
193 	rb = NULL;
194 	parent = &execlists->queue.rb_root.rb_node;
195 	while (*parent) {
196 		rb = *parent;
197 		p = to_priolist(rb);
198 		if (prio > p->priority) {
199 			parent = &rb->rb_left;
200 		} else if (prio < p->priority) {
201 			parent = &rb->rb_right;
202 			first = false;
203 		} else {
204 			goto out;
205 		}
206 	}
207 
208 	if (prio == I915_PRIORITY_NORMAL) {
209 		p = &execlists->default_priolist;
210 	} else {
211 		p = kmem_cache_alloc(global.slab_priorities, GFP_ATOMIC);
212 		/* Convert an allocation failure to a priority bump */
213 		if (unlikely(!p)) {
214 			prio = I915_PRIORITY_NORMAL; /* recurses just once */
215 
216 			/* To maintain ordering with all rendering, after an
217 			 * allocation failure we have to disable all scheduling.
218 			 * Requests will then be executed in fifo, and schedule
219 			 * will ensure that dependencies are emitted in fifo.
220 			 * There will be still some reordering with existing
221 			 * requests, so if userspace lied about their
222 			 * dependencies that reordering may be visible.
223 			 */
224 			execlists->no_priolist = true;
225 			goto find_priolist;
226 		}
227 	}
228 
229 	p->priority = prio;
230 	for (i = 0; i < ARRAY_SIZE(p->requests); i++)
231 		INIT_LIST_HEAD(&p->requests[i]);
232 	rb_link_node(&p->node, rb, parent);
233 	rb_insert_color_cached(&p->node, &execlists->queue, first);
234 	p->used = 0;
235 
236 out:
237 	p->used |= BIT(idx);
238 	return &p->requests[idx];
239 }
240 
241 struct sched_cache {
242 	struct list_head *priolist;
243 };
244 
245 static struct intel_engine_cs *
246 sched_lock_engine(const struct i915_sched_node *node,
247 		  struct intel_engine_cs *locked,
248 		  struct sched_cache *cache)
249 {
250 	struct intel_engine_cs *engine = node_to_request(node)->engine;
251 
252 	GEM_BUG_ON(!locked);
253 
254 	if (engine != locked) {
255 		spin_unlock(&locked->timeline.lock);
256 		memset(cache, 0, sizeof(*cache));
257 		spin_lock(&engine->timeline.lock);
258 	}
259 
260 	return engine;
261 }
262 
263 static bool inflight(const struct i915_request *rq,
264 		     const struct intel_engine_cs *engine)
265 {
266 	const struct i915_request *active;
267 
268 	if (!i915_request_is_active(rq))
269 		return false;
270 
271 	active = port_request(engine->execlists.port);
272 	return active->hw_context == rq->hw_context;
273 }
274 
275 static void __i915_schedule(struct i915_request *rq,
276 			    const struct i915_sched_attr *attr)
277 {
278 	struct intel_engine_cs *engine;
279 	struct i915_dependency *dep, *p;
280 	struct i915_dependency stack;
281 	const int prio = attr->priority;
282 	struct sched_cache cache;
283 	LIST_HEAD(dfs);
284 
285 	/* Needed in order to use the temporary link inside i915_dependency */
286 	lockdep_assert_held(&schedule_lock);
287 	GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
288 
289 	if (i915_request_completed(rq))
290 		return;
291 
292 	if (prio <= READ_ONCE(rq->sched.attr.priority))
293 		return;
294 
295 	stack.signaler = &rq->sched;
296 	list_add(&stack.dfs_link, &dfs);
297 
298 	/*
299 	 * Recursively bump all dependent priorities to match the new request.
300 	 *
301 	 * A naive approach would be to use recursion:
302 	 * static void update_priorities(struct i915_sched_node *node, prio) {
303 	 *	list_for_each_entry(dep, &node->signalers_list, signal_link)
304 	 *		update_priorities(dep->signal, prio)
305 	 *	queue_request(node);
306 	 * }
307 	 * but that may have unlimited recursion depth and so runs a very
308 	 * real risk of overunning the kernel stack. Instead, we build
309 	 * a flat list of all dependencies starting with the current request.
310 	 * As we walk the list of dependencies, we add all of its dependencies
311 	 * to the end of the list (this may include an already visited
312 	 * request) and continue to walk onwards onto the new dependencies. The
313 	 * end result is a topological list of requests in reverse order, the
314 	 * last element in the list is the request we must execute first.
315 	 */
316 	list_for_each_entry(dep, &dfs, dfs_link) {
317 		struct i915_sched_node *node = dep->signaler;
318 
319 		/* If we are already flying, we know we have no signalers */
320 		if (node_started(node))
321 			continue;
322 
323 		/*
324 		 * Within an engine, there can be no cycle, but we may
325 		 * refer to the same dependency chain multiple times
326 		 * (redundant dependencies are not eliminated) and across
327 		 * engines.
328 		 */
329 		list_for_each_entry(p, &node->signalers_list, signal_link) {
330 			GEM_BUG_ON(p == dep); /* no cycles! */
331 
332 			if (node_signaled(p->signaler))
333 				continue;
334 
335 			if (prio > READ_ONCE(p->signaler->attr.priority))
336 				list_move_tail(&p->dfs_link, &dfs);
337 		}
338 	}
339 
340 	/*
341 	 * If we didn't need to bump any existing priorities, and we haven't
342 	 * yet submitted this request (i.e. there is no potential race with
343 	 * execlists_submit_request()), we can set our own priority and skip
344 	 * acquiring the engine locks.
345 	 */
346 	if (rq->sched.attr.priority == I915_PRIORITY_INVALID) {
347 		GEM_BUG_ON(!list_empty(&rq->sched.link));
348 		rq->sched.attr = *attr;
349 
350 		if (stack.dfs_link.next == stack.dfs_link.prev)
351 			return;
352 
353 		__list_del_entry(&stack.dfs_link);
354 	}
355 
356 	memset(&cache, 0, sizeof(cache));
357 	engine = rq->engine;
358 	spin_lock_irq(&engine->timeline.lock);
359 
360 	/* Fifo and depth-first replacement ensure our deps execute before us */
361 	list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
362 		struct i915_sched_node *node = dep->signaler;
363 
364 		INIT_LIST_HEAD(&dep->dfs_link);
365 
366 		engine = sched_lock_engine(node, engine, &cache);
367 		lockdep_assert_held(&engine->timeline.lock);
368 
369 		/* Recheck after acquiring the engine->timeline.lock */
370 		if (prio <= node->attr.priority || node_signaled(node))
371 			continue;
372 
373 		node->attr.priority = prio;
374 		if (!list_empty(&node->link)) {
375 			if (!cache.priolist)
376 				cache.priolist =
377 					i915_sched_lookup_priolist(engine,
378 								   prio);
379 			list_move_tail(&node->link, cache.priolist);
380 		} else {
381 			/*
382 			 * If the request is not in the priolist queue because
383 			 * it is not yet runnable, then it doesn't contribute
384 			 * to our preemption decisions. On the other hand,
385 			 * if the request is on the HW, it too is not in the
386 			 * queue; but in that case we may still need to reorder
387 			 * the inflight requests.
388 			 */
389 			if (!i915_sw_fence_done(&node_to_request(node)->submit))
390 				continue;
391 		}
392 
393 		if (prio <= engine->execlists.queue_priority_hint)
394 			continue;
395 
396 		engine->execlists.queue_priority_hint = prio;
397 
398 		/*
399 		 * If we are already the currently executing context, don't
400 		 * bother evaluating if we should preempt ourselves.
401 		 */
402 		if (inflight(node_to_request(node), engine))
403 			continue;
404 
405 		/* Defer (tasklet) submission until after all of our updates. */
406 		tasklet_hi_schedule(&engine->execlists.tasklet);
407 	}
408 
409 	spin_unlock_irq(&engine->timeline.lock);
410 }
411 
412 void i915_schedule(struct i915_request *rq, const struct i915_sched_attr *attr)
413 {
414 	spin_lock(&schedule_lock);
415 	__i915_schedule(rq, attr);
416 	spin_unlock(&schedule_lock);
417 }
418 
419 void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump)
420 {
421 	struct i915_sched_attr attr;
422 
423 	GEM_BUG_ON(bump & ~I915_PRIORITY_MASK);
424 
425 	if (READ_ONCE(rq->sched.attr.priority) == I915_PRIORITY_INVALID)
426 		return;
427 
428 	spin_lock_bh(&schedule_lock);
429 
430 	attr = rq->sched.attr;
431 	attr.priority |= bump;
432 	__i915_schedule(rq, &attr);
433 
434 	spin_unlock_bh(&schedule_lock);
435 }
436 
437 void __i915_priolist_free(struct i915_priolist *p)
438 {
439 	kmem_cache_free(global.slab_priorities, p);
440 }
441 
442 static void i915_global_scheduler_shrink(void)
443 {
444 	kmem_cache_shrink(global.slab_dependencies);
445 	kmem_cache_shrink(global.slab_priorities);
446 }
447 
448 static void i915_global_scheduler_exit(void)
449 {
450 	kmem_cache_destroy(global.slab_dependencies);
451 	kmem_cache_destroy(global.slab_priorities);
452 }
453 
454 static struct i915_global_scheduler global = { {
455 	.shrink = i915_global_scheduler_shrink,
456 	.exit = i915_global_scheduler_exit,
457 } };
458 
459 int __init i915_global_scheduler_init(void)
460 {
461 	global.slab_dependencies = KMEM_CACHE(i915_dependency,
462 					      SLAB_HWCACHE_ALIGN);
463 	if (!global.slab_dependencies)
464 		return -ENOMEM;
465 
466 	global.slab_priorities = KMEM_CACHE(i915_priolist,
467 					    SLAB_HWCACHE_ALIGN);
468 	if (!global.slab_priorities)
469 		goto err_priorities;
470 
471 	i915_global_register(&global.base);
472 	return 0;
473 
474 err_priorities:
475 	kmem_cache_destroy(global.slab_priorities);
476 	return -ENOMEM;
477 }
478