xref: /openbmc/linux/kernel/sched/deadline.c (revision 93df8a1e)
1 /*
2  * Deadline Scheduling Class (SCHED_DEADLINE)
3  *
4  * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS).
5  *
6  * Tasks that periodically executes their instances for less than their
7  * runtime won't miss any of their deadlines.
8  * Tasks that are not periodic or sporadic or that tries to execute more
9  * than their reserved bandwidth will be slowed down (and may potentially
10  * miss some of their deadlines), and won't affect any other task.
11  *
12  * Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>,
13  *                    Juri Lelli <juri.lelli@gmail.com>,
14  *                    Michael Trimarchi <michael@amarulasolutions.com>,
15  *                    Fabio Checconi <fchecconi@gmail.com>
16  */
17 #include "sched.h"
18 
19 #include <linux/slab.h>
20 
21 struct dl_bandwidth def_dl_bandwidth;
22 
23 static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
24 {
25 	return container_of(dl_se, struct task_struct, dl);
26 }
27 
28 static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq)
29 {
30 	return container_of(dl_rq, struct rq, dl);
31 }
32 
33 static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se)
34 {
35 	struct task_struct *p = dl_task_of(dl_se);
36 	struct rq *rq = task_rq(p);
37 
38 	return &rq->dl;
39 }
40 
41 static inline int on_dl_rq(struct sched_dl_entity *dl_se)
42 {
43 	return !RB_EMPTY_NODE(&dl_se->rb_node);
44 }
45 
46 static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq)
47 {
48 	struct sched_dl_entity *dl_se = &p->dl;
49 
50 	return dl_rq->rb_leftmost == &dl_se->rb_node;
51 }
52 
53 void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime)
54 {
55 	raw_spin_lock_init(&dl_b->dl_runtime_lock);
56 	dl_b->dl_period = period;
57 	dl_b->dl_runtime = runtime;
58 }
59 
60 void init_dl_bw(struct dl_bw *dl_b)
61 {
62 	raw_spin_lock_init(&dl_b->lock);
63 	raw_spin_lock(&def_dl_bandwidth.dl_runtime_lock);
64 	if (global_rt_runtime() == RUNTIME_INF)
65 		dl_b->bw = -1;
66 	else
67 		dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime());
68 	raw_spin_unlock(&def_dl_bandwidth.dl_runtime_lock);
69 	dl_b->total_bw = 0;
70 }
71 
72 void init_dl_rq(struct dl_rq *dl_rq)
73 {
74 	dl_rq->rb_root = RB_ROOT;
75 
76 #ifdef CONFIG_SMP
77 	/* zero means no -deadline tasks */
78 	dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0;
79 
80 	dl_rq->dl_nr_migratory = 0;
81 	dl_rq->overloaded = 0;
82 	dl_rq->pushable_dl_tasks_root = RB_ROOT;
83 #else
84 	init_dl_bw(&dl_rq->dl_bw);
85 #endif
86 }
87 
88 #ifdef CONFIG_SMP
89 
90 static inline int dl_overloaded(struct rq *rq)
91 {
92 	return atomic_read(&rq->rd->dlo_count);
93 }
94 
95 static inline void dl_set_overload(struct rq *rq)
96 {
97 	if (!rq->online)
98 		return;
99 
100 	cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask);
101 	/*
102 	 * Must be visible before the overload count is
103 	 * set (as in sched_rt.c).
104 	 *
105 	 * Matched by the barrier in pull_dl_task().
106 	 */
107 	smp_wmb();
108 	atomic_inc(&rq->rd->dlo_count);
109 }
110 
111 static inline void dl_clear_overload(struct rq *rq)
112 {
113 	if (!rq->online)
114 		return;
115 
116 	atomic_dec(&rq->rd->dlo_count);
117 	cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask);
118 }
119 
120 static void update_dl_migration(struct dl_rq *dl_rq)
121 {
122 	if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) {
123 		if (!dl_rq->overloaded) {
124 			dl_set_overload(rq_of_dl_rq(dl_rq));
125 			dl_rq->overloaded = 1;
126 		}
127 	} else if (dl_rq->overloaded) {
128 		dl_clear_overload(rq_of_dl_rq(dl_rq));
129 		dl_rq->overloaded = 0;
130 	}
131 }
132 
133 static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
134 {
135 	struct task_struct *p = dl_task_of(dl_se);
136 
137 	if (p->nr_cpus_allowed > 1)
138 		dl_rq->dl_nr_migratory++;
139 
140 	update_dl_migration(dl_rq);
141 }
142 
143 static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
144 {
145 	struct task_struct *p = dl_task_of(dl_se);
146 
147 	if (p->nr_cpus_allowed > 1)
148 		dl_rq->dl_nr_migratory--;
149 
150 	update_dl_migration(dl_rq);
151 }
152 
153 /*
154  * The list of pushable -deadline task is not a plist, like in
155  * sched_rt.c, it is an rb-tree with tasks ordered by deadline.
156  */
157 static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
158 {
159 	struct dl_rq *dl_rq = &rq->dl;
160 	struct rb_node **link = &dl_rq->pushable_dl_tasks_root.rb_node;
161 	struct rb_node *parent = NULL;
162 	struct task_struct *entry;
163 	int leftmost = 1;
164 
165 	BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks));
166 
167 	while (*link) {
168 		parent = *link;
169 		entry = rb_entry(parent, struct task_struct,
170 				 pushable_dl_tasks);
171 		if (dl_entity_preempt(&p->dl, &entry->dl))
172 			link = &parent->rb_left;
173 		else {
174 			link = &parent->rb_right;
175 			leftmost = 0;
176 		}
177 	}
178 
179 	if (leftmost)
180 		dl_rq->pushable_dl_tasks_leftmost = &p->pushable_dl_tasks;
181 
182 	rb_link_node(&p->pushable_dl_tasks, parent, link);
183 	rb_insert_color(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
184 }
185 
186 static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
187 {
188 	struct dl_rq *dl_rq = &rq->dl;
189 
190 	if (RB_EMPTY_NODE(&p->pushable_dl_tasks))
191 		return;
192 
193 	if (dl_rq->pushable_dl_tasks_leftmost == &p->pushable_dl_tasks) {
194 		struct rb_node *next_node;
195 
196 		next_node = rb_next(&p->pushable_dl_tasks);
197 		dl_rq->pushable_dl_tasks_leftmost = next_node;
198 	}
199 
200 	rb_erase(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
201 	RB_CLEAR_NODE(&p->pushable_dl_tasks);
202 }
203 
204 static inline int has_pushable_dl_tasks(struct rq *rq)
205 {
206 	return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root);
207 }
208 
209 static int push_dl_task(struct rq *rq);
210 
211 static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
212 {
213 	return dl_task(prev);
214 }
215 
216 static DEFINE_PER_CPU(struct callback_head, dl_push_head);
217 static DEFINE_PER_CPU(struct callback_head, dl_pull_head);
218 
219 static void push_dl_tasks(struct rq *);
220 static void pull_dl_task(struct rq *);
221 
222 static inline void queue_push_tasks(struct rq *rq)
223 {
224 	if (!has_pushable_dl_tasks(rq))
225 		return;
226 
227 	queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks);
228 }
229 
230 static inline void queue_pull_task(struct rq *rq)
231 {
232 	queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task);
233 }
234 
235 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq);
236 
237 static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p)
238 {
239 	struct rq *later_rq = NULL;
240 	bool fallback = false;
241 
242 	later_rq = find_lock_later_rq(p, rq);
243 
244 	if (!later_rq) {
245 		int cpu;
246 
247 		/*
248 		 * If we cannot preempt any rq, fall back to pick any
249 		 * online cpu.
250 		 */
251 		fallback = true;
252 		cpu = cpumask_any_and(cpu_active_mask, tsk_cpus_allowed(p));
253 		if (cpu >= nr_cpu_ids) {
254 			/*
255 			 * Fail to find any suitable cpu.
256 			 * The task will never come back!
257 			 */
258 			BUG_ON(dl_bandwidth_enabled());
259 
260 			/*
261 			 * If admission control is disabled we
262 			 * try a little harder to let the task
263 			 * run.
264 			 */
265 			cpu = cpumask_any(cpu_active_mask);
266 		}
267 		later_rq = cpu_rq(cpu);
268 		double_lock_balance(rq, later_rq);
269 	}
270 
271 	/*
272 	 * By now the task is replenished and enqueued; migrate it.
273 	 */
274 	deactivate_task(rq, p, 0);
275 	set_task_cpu(p, later_rq->cpu);
276 	activate_task(later_rq, p, 0);
277 
278 	if (!fallback)
279 		resched_curr(later_rq);
280 
281 	double_unlock_balance(later_rq, rq);
282 
283 	return later_rq;
284 }
285 
286 #else
287 
288 static inline
289 void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
290 {
291 }
292 
293 static inline
294 void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
295 {
296 }
297 
298 static inline
299 void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
300 {
301 }
302 
303 static inline
304 void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
305 {
306 }
307 
308 static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
309 {
310 	return false;
311 }
312 
313 static inline void pull_dl_task(struct rq *rq)
314 {
315 }
316 
317 static inline void queue_push_tasks(struct rq *rq)
318 {
319 }
320 
321 static inline void queue_pull_task(struct rq *rq)
322 {
323 }
324 #endif /* CONFIG_SMP */
325 
326 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
327 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags);
328 static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
329 				  int flags);
330 
331 /*
332  * We are being explicitly informed that a new instance is starting,
333  * and this means that:
334  *  - the absolute deadline of the entity has to be placed at
335  *    current time + relative deadline;
336  *  - the runtime of the entity has to be set to the maximum value.
337  *
338  * The capability of specifying such event is useful whenever a -deadline
339  * entity wants to (try to!) synchronize its behaviour with the scheduler's
340  * one, and to (try to!) reconcile itself with its own scheduling
341  * parameters.
342  */
343 static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se,
344 				       struct sched_dl_entity *pi_se)
345 {
346 	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
347 	struct rq *rq = rq_of_dl_rq(dl_rq);
348 
349 	WARN_ON(!dl_se->dl_new || dl_se->dl_throttled);
350 
351 	/*
352 	 * We use the regular wall clock time to set deadlines in the
353 	 * future; in fact, we must consider execution overheads (time
354 	 * spent on hardirq context, etc.).
355 	 */
356 	dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
357 	dl_se->runtime = pi_se->dl_runtime;
358 	dl_se->dl_new = 0;
359 }
360 
361 /*
362  * Pure Earliest Deadline First (EDF) scheduling does not deal with the
363  * possibility of a entity lasting more than what it declared, and thus
364  * exhausting its runtime.
365  *
366  * Here we are interested in making runtime overrun possible, but we do
367  * not want a entity which is misbehaving to affect the scheduling of all
368  * other entities.
369  * Therefore, a budgeting strategy called Constant Bandwidth Server (CBS)
370  * is used, in order to confine each entity within its own bandwidth.
371  *
372  * This function deals exactly with that, and ensures that when the runtime
373  * of a entity is replenished, its deadline is also postponed. That ensures
374  * the overrunning entity can't interfere with other entity in the system and
375  * can't make them miss their deadlines. Reasons why this kind of overruns
376  * could happen are, typically, a entity voluntarily trying to overcome its
377  * runtime, or it just underestimated it during sched_setattr().
378  */
379 static void replenish_dl_entity(struct sched_dl_entity *dl_se,
380 				struct sched_dl_entity *pi_se)
381 {
382 	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
383 	struct rq *rq = rq_of_dl_rq(dl_rq);
384 
385 	BUG_ON(pi_se->dl_runtime <= 0);
386 
387 	/*
388 	 * This could be the case for a !-dl task that is boosted.
389 	 * Just go with full inherited parameters.
390 	 */
391 	if (dl_se->dl_deadline == 0) {
392 		dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
393 		dl_se->runtime = pi_se->dl_runtime;
394 	}
395 
396 	/*
397 	 * We keep moving the deadline away until we get some
398 	 * available runtime for the entity. This ensures correct
399 	 * handling of situations where the runtime overrun is
400 	 * arbitrary large.
401 	 */
402 	while (dl_se->runtime <= 0) {
403 		dl_se->deadline += pi_se->dl_period;
404 		dl_se->runtime += pi_se->dl_runtime;
405 	}
406 
407 	/*
408 	 * At this point, the deadline really should be "in
409 	 * the future" with respect to rq->clock. If it's
410 	 * not, we are, for some reason, lagging too much!
411 	 * Anyway, after having warn userspace abut that,
412 	 * we still try to keep the things running by
413 	 * resetting the deadline and the budget of the
414 	 * entity.
415 	 */
416 	if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
417 		printk_deferred_once("sched: DL replenish lagged to much\n");
418 		dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
419 		dl_se->runtime = pi_se->dl_runtime;
420 	}
421 
422 	if (dl_se->dl_yielded)
423 		dl_se->dl_yielded = 0;
424 	if (dl_se->dl_throttled)
425 		dl_se->dl_throttled = 0;
426 }
427 
428 /*
429  * Here we check if --at time t-- an entity (which is probably being
430  * [re]activated or, in general, enqueued) can use its remaining runtime
431  * and its current deadline _without_ exceeding the bandwidth it is
432  * assigned (function returns true if it can't). We are in fact applying
433  * one of the CBS rules: when a task wakes up, if the residual runtime
434  * over residual deadline fits within the allocated bandwidth, then we
435  * can keep the current (absolute) deadline and residual budget without
436  * disrupting the schedulability of the system. Otherwise, we should
437  * refill the runtime and set the deadline a period in the future,
438  * because keeping the current (absolute) deadline of the task would
439  * result in breaking guarantees promised to other tasks (refer to
440  * Documentation/scheduler/sched-deadline.txt for more informations).
441  *
442  * This function returns true if:
443  *
444  *   runtime / (deadline - t) > dl_runtime / dl_period ,
445  *
446  * IOW we can't recycle current parameters.
447  *
448  * Notice that the bandwidth check is done against the period. For
449  * task with deadline equal to period this is the same of using
450  * dl_deadline instead of dl_period in the equation above.
451  */
452 static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
453 			       struct sched_dl_entity *pi_se, u64 t)
454 {
455 	u64 left, right;
456 
457 	/*
458 	 * left and right are the two sides of the equation above,
459 	 * after a bit of shuffling to use multiplications instead
460 	 * of divisions.
461 	 *
462 	 * Note that none of the time values involved in the two
463 	 * multiplications are absolute: dl_deadline and dl_runtime
464 	 * are the relative deadline and the maximum runtime of each
465 	 * instance, runtime is the runtime left for the last instance
466 	 * and (deadline - t), since t is rq->clock, is the time left
467 	 * to the (absolute) deadline. Even if overflowing the u64 type
468 	 * is very unlikely to occur in both cases, here we scale down
469 	 * as we want to avoid that risk at all. Scaling down by 10
470 	 * means that we reduce granularity to 1us. We are fine with it,
471 	 * since this is only a true/false check and, anyway, thinking
472 	 * of anything below microseconds resolution is actually fiction
473 	 * (but still we want to give the user that illusion >;).
474 	 */
475 	left = (pi_se->dl_period >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
476 	right = ((dl_se->deadline - t) >> DL_SCALE) *
477 		(pi_se->dl_runtime >> DL_SCALE);
478 
479 	return dl_time_before(right, left);
480 }
481 
482 /*
483  * When a -deadline entity is queued back on the runqueue, its runtime and
484  * deadline might need updating.
485  *
486  * The policy here is that we update the deadline of the entity only if:
487  *  - the current deadline is in the past,
488  *  - using the remaining runtime with the current deadline would make
489  *    the entity exceed its bandwidth.
490  */
491 static void update_dl_entity(struct sched_dl_entity *dl_se,
492 			     struct sched_dl_entity *pi_se)
493 {
494 	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
495 	struct rq *rq = rq_of_dl_rq(dl_rq);
496 
497 	/*
498 	 * The arrival of a new instance needs special treatment, i.e.,
499 	 * the actual scheduling parameters have to be "renewed".
500 	 */
501 	if (dl_se->dl_new) {
502 		setup_new_dl_entity(dl_se, pi_se);
503 		return;
504 	}
505 
506 	if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
507 	    dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) {
508 		dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
509 		dl_se->runtime = pi_se->dl_runtime;
510 	}
511 }
512 
513 /*
514  * If the entity depleted all its runtime, and if we want it to sleep
515  * while waiting for some new execution time to become available, we
516  * set the bandwidth enforcement timer to the replenishment instant
517  * and try to activate it.
518  *
519  * Notice that it is important for the caller to know if the timer
520  * actually started or not (i.e., the replenishment instant is in
521  * the future or in the past).
522  */
523 static int start_dl_timer(struct task_struct *p)
524 {
525 	struct sched_dl_entity *dl_se = &p->dl;
526 	struct hrtimer *timer = &dl_se->dl_timer;
527 	struct rq *rq = task_rq(p);
528 	ktime_t now, act;
529 	s64 delta;
530 
531 	lockdep_assert_held(&rq->lock);
532 
533 	/*
534 	 * We want the timer to fire at the deadline, but considering
535 	 * that it is actually coming from rq->clock and not from
536 	 * hrtimer's time base reading.
537 	 */
538 	act = ns_to_ktime(dl_se->deadline);
539 	now = hrtimer_cb_get_time(timer);
540 	delta = ktime_to_ns(now) - rq_clock(rq);
541 	act = ktime_add_ns(act, delta);
542 
543 	/*
544 	 * If the expiry time already passed, e.g., because the value
545 	 * chosen as the deadline is too small, don't even try to
546 	 * start the timer in the past!
547 	 */
548 	if (ktime_us_delta(act, now) < 0)
549 		return 0;
550 
551 	/*
552 	 * !enqueued will guarantee another callback; even if one is already in
553 	 * progress. This ensures a balanced {get,put}_task_struct().
554 	 *
555 	 * The race against __run_timer() clearing the enqueued state is
556 	 * harmless because we're holding task_rq()->lock, therefore the timer
557 	 * expiring after we've done the check will wait on its task_rq_lock()
558 	 * and observe our state.
559 	 */
560 	if (!hrtimer_is_queued(timer)) {
561 		get_task_struct(p);
562 		hrtimer_start(timer, act, HRTIMER_MODE_ABS);
563 	}
564 
565 	return 1;
566 }
567 
568 /*
569  * This is the bandwidth enforcement timer callback. If here, we know
570  * a task is not on its dl_rq, since the fact that the timer was running
571  * means the task is throttled and needs a runtime replenishment.
572  *
573  * However, what we actually do depends on the fact the task is active,
574  * (it is on its rq) or has been removed from there by a call to
575  * dequeue_task_dl(). In the former case we must issue the runtime
576  * replenishment and add the task back to the dl_rq; in the latter, we just
577  * do nothing but clearing dl_throttled, so that runtime and deadline
578  * updating (and the queueing back to dl_rq) will be done by the
579  * next call to enqueue_task_dl().
580  */
581 static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
582 {
583 	struct sched_dl_entity *dl_se = container_of(timer,
584 						     struct sched_dl_entity,
585 						     dl_timer);
586 	struct task_struct *p = dl_task_of(dl_se);
587 	unsigned long flags;
588 	struct rq *rq;
589 
590 	rq = task_rq_lock(p, &flags);
591 
592 	/*
593 	 * The task might have changed its scheduling policy to something
594 	 * different than SCHED_DEADLINE (through switched_fromd_dl()).
595 	 */
596 	if (!dl_task(p)) {
597 		__dl_clear_params(p);
598 		goto unlock;
599 	}
600 
601 	/*
602 	 * This is possible if switched_from_dl() raced against a running
603 	 * callback that took the above !dl_task() path and we've since then
604 	 * switched back into SCHED_DEADLINE.
605 	 *
606 	 * There's nothing to do except drop our task reference.
607 	 */
608 	if (dl_se->dl_new)
609 		goto unlock;
610 
611 	/*
612 	 * The task might have been boosted by someone else and might be in the
613 	 * boosting/deboosting path, its not throttled.
614 	 */
615 	if (dl_se->dl_boosted)
616 		goto unlock;
617 
618 	/*
619 	 * Spurious timer due to start_dl_timer() race; or we already received
620 	 * a replenishment from rt_mutex_setprio().
621 	 */
622 	if (!dl_se->dl_throttled)
623 		goto unlock;
624 
625 	sched_clock_tick();
626 	update_rq_clock(rq);
627 
628 	/*
629 	 * If the throttle happened during sched-out; like:
630 	 *
631 	 *   schedule()
632 	 *     deactivate_task()
633 	 *       dequeue_task_dl()
634 	 *         update_curr_dl()
635 	 *           start_dl_timer()
636 	 *         __dequeue_task_dl()
637 	 *     prev->on_rq = 0;
638 	 *
639 	 * We can be both throttled and !queued. Replenish the counter
640 	 * but do not enqueue -- wait for our wakeup to do that.
641 	 */
642 	if (!task_on_rq_queued(p)) {
643 		replenish_dl_entity(dl_se, dl_se);
644 		goto unlock;
645 	}
646 
647 	enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
648 	if (dl_task(rq->curr))
649 		check_preempt_curr_dl(rq, p, 0);
650 	else
651 		resched_curr(rq);
652 
653 #ifdef CONFIG_SMP
654 	/*
655 	 * Perform balancing operations here; after the replenishments.  We
656 	 * cannot drop rq->lock before this, otherwise the assertion in
657 	 * start_dl_timer() about not missing updates is not true.
658 	 *
659 	 * If we find that the rq the task was on is no longer available, we
660 	 * need to select a new rq.
661 	 *
662 	 * XXX figure out if select_task_rq_dl() deals with offline cpus.
663 	 */
664 	if (unlikely(!rq->online))
665 		rq = dl_task_offline_migration(rq, p);
666 
667 	/*
668 	 * Queueing this task back might have overloaded rq, check if we need
669 	 * to kick someone away.
670 	 */
671 	if (has_pushable_dl_tasks(rq))
672 		push_dl_task(rq);
673 #endif
674 
675 unlock:
676 	task_rq_unlock(rq, p, &flags);
677 
678 	/*
679 	 * This can free the task_struct, including this hrtimer, do not touch
680 	 * anything related to that after this.
681 	 */
682 	put_task_struct(p);
683 
684 	return HRTIMER_NORESTART;
685 }
686 
687 void init_dl_task_timer(struct sched_dl_entity *dl_se)
688 {
689 	struct hrtimer *timer = &dl_se->dl_timer;
690 
691 	hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
692 	timer->function = dl_task_timer;
693 }
694 
695 static
696 int dl_runtime_exceeded(struct sched_dl_entity *dl_se)
697 {
698 	return (dl_se->runtime <= 0);
699 }
700 
701 extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
702 
703 /*
704  * Update the current task's runtime statistics (provided it is still
705  * a -deadline task and has not been removed from the dl_rq).
706  */
707 static void update_curr_dl(struct rq *rq)
708 {
709 	struct task_struct *curr = rq->curr;
710 	struct sched_dl_entity *dl_se = &curr->dl;
711 	u64 delta_exec;
712 
713 	if (!dl_task(curr) || !on_dl_rq(dl_se))
714 		return;
715 
716 	/*
717 	 * Consumed budget is computed considering the time as
718 	 * observed by schedulable tasks (excluding time spent
719 	 * in hardirq context, etc.). Deadlines are instead
720 	 * computed using hard walltime. This seems to be the more
721 	 * natural solution, but the full ramifications of this
722 	 * approach need further study.
723 	 */
724 	delta_exec = rq_clock_task(rq) - curr->se.exec_start;
725 	if (unlikely((s64)delta_exec <= 0))
726 		return;
727 
728 	schedstat_set(curr->se.statistics.exec_max,
729 		      max(curr->se.statistics.exec_max, delta_exec));
730 
731 	curr->se.sum_exec_runtime += delta_exec;
732 	account_group_exec_runtime(curr, delta_exec);
733 
734 	curr->se.exec_start = rq_clock_task(rq);
735 	cpuacct_charge(curr, delta_exec);
736 
737 	sched_rt_avg_update(rq, delta_exec);
738 
739 	dl_se->runtime -= dl_se->dl_yielded ? 0 : delta_exec;
740 	if (dl_runtime_exceeded(dl_se)) {
741 		dl_se->dl_throttled = 1;
742 		__dequeue_task_dl(rq, curr, 0);
743 		if (unlikely(dl_se->dl_boosted || !start_dl_timer(curr)))
744 			enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
745 
746 		if (!is_leftmost(curr, &rq->dl))
747 			resched_curr(rq);
748 	}
749 
750 	/*
751 	 * Because -- for now -- we share the rt bandwidth, we need to
752 	 * account our runtime there too, otherwise actual rt tasks
753 	 * would be able to exceed the shared quota.
754 	 *
755 	 * Account to the root rt group for now.
756 	 *
757 	 * The solution we're working towards is having the RT groups scheduled
758 	 * using deadline servers -- however there's a few nasties to figure
759 	 * out before that can happen.
760 	 */
761 	if (rt_bandwidth_enabled()) {
762 		struct rt_rq *rt_rq = &rq->rt;
763 
764 		raw_spin_lock(&rt_rq->rt_runtime_lock);
765 		/*
766 		 * We'll let actual RT tasks worry about the overflow here, we
767 		 * have our own CBS to keep us inline; only account when RT
768 		 * bandwidth is relevant.
769 		 */
770 		if (sched_rt_bandwidth_account(rt_rq))
771 			rt_rq->rt_time += delta_exec;
772 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
773 	}
774 }
775 
776 #ifdef CONFIG_SMP
777 
778 static struct task_struct *pick_next_earliest_dl_task(struct rq *rq, int cpu);
779 
780 static inline u64 next_deadline(struct rq *rq)
781 {
782 	struct task_struct *next = pick_next_earliest_dl_task(rq, rq->cpu);
783 
784 	if (next && dl_prio(next->prio))
785 		return next->dl.deadline;
786 	else
787 		return 0;
788 }
789 
790 static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
791 {
792 	struct rq *rq = rq_of_dl_rq(dl_rq);
793 
794 	if (dl_rq->earliest_dl.curr == 0 ||
795 	    dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
796 		/*
797 		 * If the dl_rq had no -deadline tasks, or if the new task
798 		 * has shorter deadline than the current one on dl_rq, we
799 		 * know that the previous earliest becomes our next earliest,
800 		 * as the new task becomes the earliest itself.
801 		 */
802 		dl_rq->earliest_dl.next = dl_rq->earliest_dl.curr;
803 		dl_rq->earliest_dl.curr = deadline;
804 		cpudl_set(&rq->rd->cpudl, rq->cpu, deadline, 1);
805 	} else if (dl_rq->earliest_dl.next == 0 ||
806 		   dl_time_before(deadline, dl_rq->earliest_dl.next)) {
807 		/*
808 		 * On the other hand, if the new -deadline task has a
809 		 * a later deadline than the earliest one on dl_rq, but
810 		 * it is earlier than the next (if any), we must
811 		 * recompute the next-earliest.
812 		 */
813 		dl_rq->earliest_dl.next = next_deadline(rq);
814 	}
815 }
816 
817 static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
818 {
819 	struct rq *rq = rq_of_dl_rq(dl_rq);
820 
821 	/*
822 	 * Since we may have removed our earliest (and/or next earliest)
823 	 * task we must recompute them.
824 	 */
825 	if (!dl_rq->dl_nr_running) {
826 		dl_rq->earliest_dl.curr = 0;
827 		dl_rq->earliest_dl.next = 0;
828 		cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0);
829 	} else {
830 		struct rb_node *leftmost = dl_rq->rb_leftmost;
831 		struct sched_dl_entity *entry;
832 
833 		entry = rb_entry(leftmost, struct sched_dl_entity, rb_node);
834 		dl_rq->earliest_dl.curr = entry->deadline;
835 		dl_rq->earliest_dl.next = next_deadline(rq);
836 		cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline, 1);
837 	}
838 }
839 
840 #else
841 
842 static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
843 static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
844 
845 #endif /* CONFIG_SMP */
846 
847 static inline
848 void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
849 {
850 	int prio = dl_task_of(dl_se)->prio;
851 	u64 deadline = dl_se->deadline;
852 
853 	WARN_ON(!dl_prio(prio));
854 	dl_rq->dl_nr_running++;
855 	add_nr_running(rq_of_dl_rq(dl_rq), 1);
856 
857 	inc_dl_deadline(dl_rq, deadline);
858 	inc_dl_migration(dl_se, dl_rq);
859 }
860 
861 static inline
862 void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
863 {
864 	int prio = dl_task_of(dl_se)->prio;
865 
866 	WARN_ON(!dl_prio(prio));
867 	WARN_ON(!dl_rq->dl_nr_running);
868 	dl_rq->dl_nr_running--;
869 	sub_nr_running(rq_of_dl_rq(dl_rq), 1);
870 
871 	dec_dl_deadline(dl_rq, dl_se->deadline);
872 	dec_dl_migration(dl_se, dl_rq);
873 }
874 
875 static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
876 {
877 	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
878 	struct rb_node **link = &dl_rq->rb_root.rb_node;
879 	struct rb_node *parent = NULL;
880 	struct sched_dl_entity *entry;
881 	int leftmost = 1;
882 
883 	BUG_ON(!RB_EMPTY_NODE(&dl_se->rb_node));
884 
885 	while (*link) {
886 		parent = *link;
887 		entry = rb_entry(parent, struct sched_dl_entity, rb_node);
888 		if (dl_time_before(dl_se->deadline, entry->deadline))
889 			link = &parent->rb_left;
890 		else {
891 			link = &parent->rb_right;
892 			leftmost = 0;
893 		}
894 	}
895 
896 	if (leftmost)
897 		dl_rq->rb_leftmost = &dl_se->rb_node;
898 
899 	rb_link_node(&dl_se->rb_node, parent, link);
900 	rb_insert_color(&dl_se->rb_node, &dl_rq->rb_root);
901 
902 	inc_dl_tasks(dl_se, dl_rq);
903 }
904 
905 static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
906 {
907 	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
908 
909 	if (RB_EMPTY_NODE(&dl_se->rb_node))
910 		return;
911 
912 	if (dl_rq->rb_leftmost == &dl_se->rb_node) {
913 		struct rb_node *next_node;
914 
915 		next_node = rb_next(&dl_se->rb_node);
916 		dl_rq->rb_leftmost = next_node;
917 	}
918 
919 	rb_erase(&dl_se->rb_node, &dl_rq->rb_root);
920 	RB_CLEAR_NODE(&dl_se->rb_node);
921 
922 	dec_dl_tasks(dl_se, dl_rq);
923 }
924 
925 static void
926 enqueue_dl_entity(struct sched_dl_entity *dl_se,
927 		  struct sched_dl_entity *pi_se, int flags)
928 {
929 	BUG_ON(on_dl_rq(dl_se));
930 
931 	/*
932 	 * If this is a wakeup or a new instance, the scheduling
933 	 * parameters of the task might need updating. Otherwise,
934 	 * we want a replenishment of its runtime.
935 	 */
936 	if (dl_se->dl_new || flags & ENQUEUE_WAKEUP)
937 		update_dl_entity(dl_se, pi_se);
938 	else if (flags & ENQUEUE_REPLENISH)
939 		replenish_dl_entity(dl_se, pi_se);
940 
941 	__enqueue_dl_entity(dl_se);
942 }
943 
944 static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
945 {
946 	__dequeue_dl_entity(dl_se);
947 }
948 
949 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
950 {
951 	struct task_struct *pi_task = rt_mutex_get_top_task(p);
952 	struct sched_dl_entity *pi_se = &p->dl;
953 
954 	/*
955 	 * Use the scheduling parameters of the top pi-waiter
956 	 * task if we have one and its (relative) deadline is
957 	 * smaller than our one... OTW we keep our runtime and
958 	 * deadline.
959 	 */
960 	if (pi_task && p->dl.dl_boosted && dl_prio(pi_task->normal_prio)) {
961 		pi_se = &pi_task->dl;
962 	} else if (!dl_prio(p->normal_prio)) {
963 		/*
964 		 * Special case in which we have a !SCHED_DEADLINE task
965 		 * that is going to be deboosted, but exceedes its
966 		 * runtime while doing so. No point in replenishing
967 		 * it, as it's going to return back to its original
968 		 * scheduling class after this.
969 		 */
970 		BUG_ON(!p->dl.dl_boosted || flags != ENQUEUE_REPLENISH);
971 		return;
972 	}
973 
974 	/*
975 	 * If p is throttled, we do nothing. In fact, if it exhausted
976 	 * its budget it needs a replenishment and, since it now is on
977 	 * its rq, the bandwidth timer callback (which clearly has not
978 	 * run yet) will take care of this.
979 	 */
980 	if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH))
981 		return;
982 
983 	enqueue_dl_entity(&p->dl, pi_se, flags);
984 
985 	if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
986 		enqueue_pushable_dl_task(rq, p);
987 }
988 
989 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
990 {
991 	dequeue_dl_entity(&p->dl);
992 	dequeue_pushable_dl_task(rq, p);
993 }
994 
995 static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
996 {
997 	update_curr_dl(rq);
998 	__dequeue_task_dl(rq, p, flags);
999 }
1000 
1001 /*
1002  * Yield task semantic for -deadline tasks is:
1003  *
1004  *   get off from the CPU until our next instance, with
1005  *   a new runtime. This is of little use now, since we
1006  *   don't have a bandwidth reclaiming mechanism. Anyway,
1007  *   bandwidth reclaiming is planned for the future, and
1008  *   yield_task_dl will indicate that some spare budget
1009  *   is available for other task instances to use it.
1010  */
1011 static void yield_task_dl(struct rq *rq)
1012 {
1013 	struct task_struct *p = rq->curr;
1014 
1015 	/*
1016 	 * We make the task go to sleep until its current deadline by
1017 	 * forcing its runtime to zero. This way, update_curr_dl() stops
1018 	 * it and the bandwidth timer will wake it up and will give it
1019 	 * new scheduling parameters (thanks to dl_yielded=1).
1020 	 */
1021 	if (p->dl.runtime > 0) {
1022 		rq->curr->dl.dl_yielded = 1;
1023 		p->dl.runtime = 0;
1024 	}
1025 	update_rq_clock(rq);
1026 	update_curr_dl(rq);
1027 	/*
1028 	 * Tell update_rq_clock() that we've just updated,
1029 	 * so we don't do microscopic update in schedule()
1030 	 * and double the fastpath cost.
1031 	 */
1032 	rq_clock_skip_update(rq, true);
1033 }
1034 
1035 #ifdef CONFIG_SMP
1036 
1037 static int find_later_rq(struct task_struct *task);
1038 
1039 static int
1040 select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags)
1041 {
1042 	struct task_struct *curr;
1043 	struct rq *rq;
1044 
1045 	if (sd_flag != SD_BALANCE_WAKE)
1046 		goto out;
1047 
1048 	rq = cpu_rq(cpu);
1049 
1050 	rcu_read_lock();
1051 	curr = READ_ONCE(rq->curr); /* unlocked access */
1052 
1053 	/*
1054 	 * If we are dealing with a -deadline task, we must
1055 	 * decide where to wake it up.
1056 	 * If it has a later deadline and the current task
1057 	 * on this rq can't move (provided the waking task
1058 	 * can!) we prefer to send it somewhere else. On the
1059 	 * other hand, if it has a shorter deadline, we
1060 	 * try to make it stay here, it might be important.
1061 	 */
1062 	if (unlikely(dl_task(curr)) &&
1063 	    (curr->nr_cpus_allowed < 2 ||
1064 	     !dl_entity_preempt(&p->dl, &curr->dl)) &&
1065 	    (p->nr_cpus_allowed > 1)) {
1066 		int target = find_later_rq(p);
1067 
1068 		if (target != -1 &&
1069 				dl_time_before(p->dl.deadline,
1070 					cpu_rq(target)->dl.earliest_dl.curr))
1071 			cpu = target;
1072 	}
1073 	rcu_read_unlock();
1074 
1075 out:
1076 	return cpu;
1077 }
1078 
1079 static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
1080 {
1081 	/*
1082 	 * Current can't be migrated, useless to reschedule,
1083 	 * let's hope p can move out.
1084 	 */
1085 	if (rq->curr->nr_cpus_allowed == 1 ||
1086 	    cpudl_find(&rq->rd->cpudl, rq->curr, NULL) == -1)
1087 		return;
1088 
1089 	/*
1090 	 * p is migratable, so let's not schedule it and
1091 	 * see if it is pushed or pulled somewhere else.
1092 	 */
1093 	if (p->nr_cpus_allowed != 1 &&
1094 	    cpudl_find(&rq->rd->cpudl, p, NULL) != -1)
1095 		return;
1096 
1097 	resched_curr(rq);
1098 }
1099 
1100 #endif /* CONFIG_SMP */
1101 
1102 /*
1103  * Only called when both the current and waking task are -deadline
1104  * tasks.
1105  */
1106 static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
1107 				  int flags)
1108 {
1109 	if (dl_entity_preempt(&p->dl, &rq->curr->dl)) {
1110 		resched_curr(rq);
1111 		return;
1112 	}
1113 
1114 #ifdef CONFIG_SMP
1115 	/*
1116 	 * In the unlikely case current and p have the same deadline
1117 	 * let us try to decide what's the best thing to do...
1118 	 */
1119 	if ((p->dl.deadline == rq->curr->dl.deadline) &&
1120 	    !test_tsk_need_resched(rq->curr))
1121 		check_preempt_equal_dl(rq, p);
1122 #endif /* CONFIG_SMP */
1123 }
1124 
1125 #ifdef CONFIG_SCHED_HRTICK
1126 static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1127 {
1128 	hrtick_start(rq, p->dl.runtime);
1129 }
1130 #else /* !CONFIG_SCHED_HRTICK */
1131 static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1132 {
1133 }
1134 #endif
1135 
1136 static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
1137 						   struct dl_rq *dl_rq)
1138 {
1139 	struct rb_node *left = dl_rq->rb_leftmost;
1140 
1141 	if (!left)
1142 		return NULL;
1143 
1144 	return rb_entry(left, struct sched_dl_entity, rb_node);
1145 }
1146 
1147 struct task_struct *pick_next_task_dl(struct rq *rq, struct task_struct *prev)
1148 {
1149 	struct sched_dl_entity *dl_se;
1150 	struct task_struct *p;
1151 	struct dl_rq *dl_rq;
1152 
1153 	dl_rq = &rq->dl;
1154 
1155 	if (need_pull_dl_task(rq, prev)) {
1156 		/*
1157 		 * This is OK, because current is on_cpu, which avoids it being
1158 		 * picked for load-balance and preemption/IRQs are still
1159 		 * disabled avoiding further scheduler activity on it and we're
1160 		 * being very careful to re-start the picking loop.
1161 		 */
1162 		lockdep_unpin_lock(&rq->lock);
1163 		pull_dl_task(rq);
1164 		lockdep_pin_lock(&rq->lock);
1165 		/*
1166 		 * pull_rt_task() can drop (and re-acquire) rq->lock; this
1167 		 * means a stop task can slip in, in which case we need to
1168 		 * re-start task selection.
1169 		 */
1170 		if (rq->stop && task_on_rq_queued(rq->stop))
1171 			return RETRY_TASK;
1172 	}
1173 
1174 	/*
1175 	 * When prev is DL, we may throttle it in put_prev_task().
1176 	 * So, we update time before we check for dl_nr_running.
1177 	 */
1178 	if (prev->sched_class == &dl_sched_class)
1179 		update_curr_dl(rq);
1180 
1181 	if (unlikely(!dl_rq->dl_nr_running))
1182 		return NULL;
1183 
1184 	put_prev_task(rq, prev);
1185 
1186 	dl_se = pick_next_dl_entity(rq, dl_rq);
1187 	BUG_ON(!dl_se);
1188 
1189 	p = dl_task_of(dl_se);
1190 	p->se.exec_start = rq_clock_task(rq);
1191 
1192 	/* Running task will never be pushed. */
1193        dequeue_pushable_dl_task(rq, p);
1194 
1195 	if (hrtick_enabled(rq))
1196 		start_hrtick_dl(rq, p);
1197 
1198 	queue_push_tasks(rq);
1199 
1200 	return p;
1201 }
1202 
1203 static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
1204 {
1205 	update_curr_dl(rq);
1206 
1207 	if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
1208 		enqueue_pushable_dl_task(rq, p);
1209 }
1210 
1211 static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
1212 {
1213 	update_curr_dl(rq);
1214 
1215 	/*
1216 	 * Even when we have runtime, update_curr_dl() might have resulted in us
1217 	 * not being the leftmost task anymore. In that case NEED_RESCHED will
1218 	 * be set and schedule() will start a new hrtick for the next task.
1219 	 */
1220 	if (hrtick_enabled(rq) && queued && p->dl.runtime > 0 &&
1221 	    is_leftmost(p, &rq->dl))
1222 		start_hrtick_dl(rq, p);
1223 }
1224 
1225 static void task_fork_dl(struct task_struct *p)
1226 {
1227 	/*
1228 	 * SCHED_DEADLINE tasks cannot fork and this is achieved through
1229 	 * sched_fork()
1230 	 */
1231 }
1232 
1233 static void task_dead_dl(struct task_struct *p)
1234 {
1235 	struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
1236 
1237 	/*
1238 	 * Since we are TASK_DEAD we won't slip out of the domain!
1239 	 */
1240 	raw_spin_lock_irq(&dl_b->lock);
1241 	/* XXX we should retain the bw until 0-lag */
1242 	dl_b->total_bw -= p->dl.dl_bw;
1243 	raw_spin_unlock_irq(&dl_b->lock);
1244 }
1245 
1246 static void set_curr_task_dl(struct rq *rq)
1247 {
1248 	struct task_struct *p = rq->curr;
1249 
1250 	p->se.exec_start = rq_clock_task(rq);
1251 
1252 	/* You can't push away the running task */
1253 	dequeue_pushable_dl_task(rq, p);
1254 }
1255 
1256 #ifdef CONFIG_SMP
1257 
1258 /* Only try algorithms three times */
1259 #define DL_MAX_TRIES 3
1260 
1261 static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
1262 {
1263 	if (!task_running(rq, p) &&
1264 	    cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
1265 		return 1;
1266 	return 0;
1267 }
1268 
1269 /* Returns the second earliest -deadline task, NULL otherwise */
1270 static struct task_struct *pick_next_earliest_dl_task(struct rq *rq, int cpu)
1271 {
1272 	struct rb_node *next_node = rq->dl.rb_leftmost;
1273 	struct sched_dl_entity *dl_se;
1274 	struct task_struct *p = NULL;
1275 
1276 next_node:
1277 	next_node = rb_next(next_node);
1278 	if (next_node) {
1279 		dl_se = rb_entry(next_node, struct sched_dl_entity, rb_node);
1280 		p = dl_task_of(dl_se);
1281 
1282 		if (pick_dl_task(rq, p, cpu))
1283 			return p;
1284 
1285 		goto next_node;
1286 	}
1287 
1288 	return NULL;
1289 }
1290 
1291 /*
1292  * Return the earliest pushable rq's task, which is suitable to be executed
1293  * on the CPU, NULL otherwise:
1294  */
1295 static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu)
1296 {
1297 	struct rb_node *next_node = rq->dl.pushable_dl_tasks_leftmost;
1298 	struct task_struct *p = NULL;
1299 
1300 	if (!has_pushable_dl_tasks(rq))
1301 		return NULL;
1302 
1303 next_node:
1304 	if (next_node) {
1305 		p = rb_entry(next_node, struct task_struct, pushable_dl_tasks);
1306 
1307 		if (pick_dl_task(rq, p, cpu))
1308 			return p;
1309 
1310 		next_node = rb_next(next_node);
1311 		goto next_node;
1312 	}
1313 
1314 	return NULL;
1315 }
1316 
1317 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
1318 
1319 static int find_later_rq(struct task_struct *task)
1320 {
1321 	struct sched_domain *sd;
1322 	struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl);
1323 	int this_cpu = smp_processor_id();
1324 	int best_cpu, cpu = task_cpu(task);
1325 
1326 	/* Make sure the mask is initialized first */
1327 	if (unlikely(!later_mask))
1328 		return -1;
1329 
1330 	if (task->nr_cpus_allowed == 1)
1331 		return -1;
1332 
1333 	/*
1334 	 * We have to consider system topology and task affinity
1335 	 * first, then we can look for a suitable cpu.
1336 	 */
1337 	best_cpu = cpudl_find(&task_rq(task)->rd->cpudl,
1338 			task, later_mask);
1339 	if (best_cpu == -1)
1340 		return -1;
1341 
1342 	/*
1343 	 * If we are here, some target has been found,
1344 	 * the most suitable of which is cached in best_cpu.
1345 	 * This is, among the runqueues where the current tasks
1346 	 * have later deadlines than the task's one, the rq
1347 	 * with the latest possible one.
1348 	 *
1349 	 * Now we check how well this matches with task's
1350 	 * affinity and system topology.
1351 	 *
1352 	 * The last cpu where the task run is our first
1353 	 * guess, since it is most likely cache-hot there.
1354 	 */
1355 	if (cpumask_test_cpu(cpu, later_mask))
1356 		return cpu;
1357 	/*
1358 	 * Check if this_cpu is to be skipped (i.e., it is
1359 	 * not in the mask) or not.
1360 	 */
1361 	if (!cpumask_test_cpu(this_cpu, later_mask))
1362 		this_cpu = -1;
1363 
1364 	rcu_read_lock();
1365 	for_each_domain(cpu, sd) {
1366 		if (sd->flags & SD_WAKE_AFFINE) {
1367 
1368 			/*
1369 			 * If possible, preempting this_cpu is
1370 			 * cheaper than migrating.
1371 			 */
1372 			if (this_cpu != -1 &&
1373 			    cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1374 				rcu_read_unlock();
1375 				return this_cpu;
1376 			}
1377 
1378 			/*
1379 			 * Last chance: if best_cpu is valid and is
1380 			 * in the mask, that becomes our choice.
1381 			 */
1382 			if (best_cpu < nr_cpu_ids &&
1383 			    cpumask_test_cpu(best_cpu, sched_domain_span(sd))) {
1384 				rcu_read_unlock();
1385 				return best_cpu;
1386 			}
1387 		}
1388 	}
1389 	rcu_read_unlock();
1390 
1391 	/*
1392 	 * At this point, all our guesses failed, we just return
1393 	 * 'something', and let the caller sort the things out.
1394 	 */
1395 	if (this_cpu != -1)
1396 		return this_cpu;
1397 
1398 	cpu = cpumask_any(later_mask);
1399 	if (cpu < nr_cpu_ids)
1400 		return cpu;
1401 
1402 	return -1;
1403 }
1404 
1405 /* Locks the rq it finds */
1406 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
1407 {
1408 	struct rq *later_rq = NULL;
1409 	int tries;
1410 	int cpu;
1411 
1412 	for (tries = 0; tries < DL_MAX_TRIES; tries++) {
1413 		cpu = find_later_rq(task);
1414 
1415 		if ((cpu == -1) || (cpu == rq->cpu))
1416 			break;
1417 
1418 		later_rq = cpu_rq(cpu);
1419 
1420 		if (!dl_time_before(task->dl.deadline,
1421 					later_rq->dl.earliest_dl.curr)) {
1422 			/*
1423 			 * Target rq has tasks of equal or earlier deadline,
1424 			 * retrying does not release any lock and is unlikely
1425 			 * to yield a different result.
1426 			 */
1427 			later_rq = NULL;
1428 			break;
1429 		}
1430 
1431 		/* Retry if something changed. */
1432 		if (double_lock_balance(rq, later_rq)) {
1433 			if (unlikely(task_rq(task) != rq ||
1434 				     !cpumask_test_cpu(later_rq->cpu,
1435 				                       &task->cpus_allowed) ||
1436 				     task_running(rq, task) ||
1437 				     !task_on_rq_queued(task))) {
1438 				double_unlock_balance(rq, later_rq);
1439 				later_rq = NULL;
1440 				break;
1441 			}
1442 		}
1443 
1444 		/*
1445 		 * If the rq we found has no -deadline task, or
1446 		 * its earliest one has a later deadline than our
1447 		 * task, the rq is a good one.
1448 		 */
1449 		if (!later_rq->dl.dl_nr_running ||
1450 		    dl_time_before(task->dl.deadline,
1451 				   later_rq->dl.earliest_dl.curr))
1452 			break;
1453 
1454 		/* Otherwise we try again. */
1455 		double_unlock_balance(rq, later_rq);
1456 		later_rq = NULL;
1457 	}
1458 
1459 	return later_rq;
1460 }
1461 
1462 static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
1463 {
1464 	struct task_struct *p;
1465 
1466 	if (!has_pushable_dl_tasks(rq))
1467 		return NULL;
1468 
1469 	p = rb_entry(rq->dl.pushable_dl_tasks_leftmost,
1470 		     struct task_struct, pushable_dl_tasks);
1471 
1472 	BUG_ON(rq->cpu != task_cpu(p));
1473 	BUG_ON(task_current(rq, p));
1474 	BUG_ON(p->nr_cpus_allowed <= 1);
1475 
1476 	BUG_ON(!task_on_rq_queued(p));
1477 	BUG_ON(!dl_task(p));
1478 
1479 	return p;
1480 }
1481 
1482 /*
1483  * See if the non running -deadline tasks on this rq
1484  * can be sent to some other CPU where they can preempt
1485  * and start executing.
1486  */
1487 static int push_dl_task(struct rq *rq)
1488 {
1489 	struct task_struct *next_task;
1490 	struct rq *later_rq;
1491 	int ret = 0;
1492 
1493 	if (!rq->dl.overloaded)
1494 		return 0;
1495 
1496 	next_task = pick_next_pushable_dl_task(rq);
1497 	if (!next_task)
1498 		return 0;
1499 
1500 retry:
1501 	if (unlikely(next_task == rq->curr)) {
1502 		WARN_ON(1);
1503 		return 0;
1504 	}
1505 
1506 	/*
1507 	 * If next_task preempts rq->curr, and rq->curr
1508 	 * can move away, it makes sense to just reschedule
1509 	 * without going further in pushing next_task.
1510 	 */
1511 	if (dl_task(rq->curr) &&
1512 	    dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
1513 	    rq->curr->nr_cpus_allowed > 1) {
1514 		resched_curr(rq);
1515 		return 0;
1516 	}
1517 
1518 	/* We might release rq lock */
1519 	get_task_struct(next_task);
1520 
1521 	/* Will lock the rq it'll find */
1522 	later_rq = find_lock_later_rq(next_task, rq);
1523 	if (!later_rq) {
1524 		struct task_struct *task;
1525 
1526 		/*
1527 		 * We must check all this again, since
1528 		 * find_lock_later_rq releases rq->lock and it is
1529 		 * then possible that next_task has migrated.
1530 		 */
1531 		task = pick_next_pushable_dl_task(rq);
1532 		if (task_cpu(next_task) == rq->cpu && task == next_task) {
1533 			/*
1534 			 * The task is still there. We don't try
1535 			 * again, some other cpu will pull it when ready.
1536 			 */
1537 			goto out;
1538 		}
1539 
1540 		if (!task)
1541 			/* No more tasks */
1542 			goto out;
1543 
1544 		put_task_struct(next_task);
1545 		next_task = task;
1546 		goto retry;
1547 	}
1548 
1549 	deactivate_task(rq, next_task, 0);
1550 	set_task_cpu(next_task, later_rq->cpu);
1551 	activate_task(later_rq, next_task, 0);
1552 	ret = 1;
1553 
1554 	resched_curr(later_rq);
1555 
1556 	double_unlock_balance(rq, later_rq);
1557 
1558 out:
1559 	put_task_struct(next_task);
1560 
1561 	return ret;
1562 }
1563 
1564 static void push_dl_tasks(struct rq *rq)
1565 {
1566 	/* Terminates as it moves a -deadline task */
1567 	while (push_dl_task(rq))
1568 		;
1569 }
1570 
1571 static void pull_dl_task(struct rq *this_rq)
1572 {
1573 	int this_cpu = this_rq->cpu, cpu;
1574 	struct task_struct *p;
1575 	bool resched = false;
1576 	struct rq *src_rq;
1577 	u64 dmin = LONG_MAX;
1578 
1579 	if (likely(!dl_overloaded(this_rq)))
1580 		return;
1581 
1582 	/*
1583 	 * Match the barrier from dl_set_overloaded; this guarantees that if we
1584 	 * see overloaded we must also see the dlo_mask bit.
1585 	 */
1586 	smp_rmb();
1587 
1588 	for_each_cpu(cpu, this_rq->rd->dlo_mask) {
1589 		if (this_cpu == cpu)
1590 			continue;
1591 
1592 		src_rq = cpu_rq(cpu);
1593 
1594 		/*
1595 		 * It looks racy, abd it is! However, as in sched_rt.c,
1596 		 * we are fine with this.
1597 		 */
1598 		if (this_rq->dl.dl_nr_running &&
1599 		    dl_time_before(this_rq->dl.earliest_dl.curr,
1600 				   src_rq->dl.earliest_dl.next))
1601 			continue;
1602 
1603 		/* Might drop this_rq->lock */
1604 		double_lock_balance(this_rq, src_rq);
1605 
1606 		/*
1607 		 * If there are no more pullable tasks on the
1608 		 * rq, we're done with it.
1609 		 */
1610 		if (src_rq->dl.dl_nr_running <= 1)
1611 			goto skip;
1612 
1613 		p = pick_earliest_pushable_dl_task(src_rq, this_cpu);
1614 
1615 		/*
1616 		 * We found a task to be pulled if:
1617 		 *  - it preempts our current (if there's one),
1618 		 *  - it will preempt the last one we pulled (if any).
1619 		 */
1620 		if (p && dl_time_before(p->dl.deadline, dmin) &&
1621 		    (!this_rq->dl.dl_nr_running ||
1622 		     dl_time_before(p->dl.deadline,
1623 				    this_rq->dl.earliest_dl.curr))) {
1624 			WARN_ON(p == src_rq->curr);
1625 			WARN_ON(!task_on_rq_queued(p));
1626 
1627 			/*
1628 			 * Then we pull iff p has actually an earlier
1629 			 * deadline than the current task of its runqueue.
1630 			 */
1631 			if (dl_time_before(p->dl.deadline,
1632 					   src_rq->curr->dl.deadline))
1633 				goto skip;
1634 
1635 			resched = true;
1636 
1637 			deactivate_task(src_rq, p, 0);
1638 			set_task_cpu(p, this_cpu);
1639 			activate_task(this_rq, p, 0);
1640 			dmin = p->dl.deadline;
1641 
1642 			/* Is there any other task even earlier? */
1643 		}
1644 skip:
1645 		double_unlock_balance(this_rq, src_rq);
1646 	}
1647 
1648 	if (resched)
1649 		resched_curr(this_rq);
1650 }
1651 
1652 /*
1653  * Since the task is not running and a reschedule is not going to happen
1654  * anytime soon on its runqueue, we try pushing it away now.
1655  */
1656 static void task_woken_dl(struct rq *rq, struct task_struct *p)
1657 {
1658 	if (!task_running(rq, p) &&
1659 	    !test_tsk_need_resched(rq->curr) &&
1660 	    has_pushable_dl_tasks(rq) &&
1661 	    p->nr_cpus_allowed > 1 &&
1662 	    dl_task(rq->curr) &&
1663 	    (rq->curr->nr_cpus_allowed < 2 ||
1664 	     !dl_entity_preempt(&p->dl, &rq->curr->dl))) {
1665 		push_dl_tasks(rq);
1666 	}
1667 }
1668 
1669 static void set_cpus_allowed_dl(struct task_struct *p,
1670 				const struct cpumask *new_mask)
1671 {
1672 	struct rq *rq;
1673 	struct root_domain *src_rd;
1674 	int weight;
1675 
1676 	BUG_ON(!dl_task(p));
1677 
1678 	rq = task_rq(p);
1679 	src_rd = rq->rd;
1680 	/*
1681 	 * Migrating a SCHED_DEADLINE task between exclusive
1682 	 * cpusets (different root_domains) entails a bandwidth
1683 	 * update. We already made space for us in the destination
1684 	 * domain (see cpuset_can_attach()).
1685 	 */
1686 	if (!cpumask_intersects(src_rd->span, new_mask)) {
1687 		struct dl_bw *src_dl_b;
1688 
1689 		src_dl_b = dl_bw_of(cpu_of(rq));
1690 		/*
1691 		 * We now free resources of the root_domain we are migrating
1692 		 * off. In the worst case, sched_setattr() may temporary fail
1693 		 * until we complete the update.
1694 		 */
1695 		raw_spin_lock(&src_dl_b->lock);
1696 		__dl_clear(src_dl_b, p->dl.dl_bw);
1697 		raw_spin_unlock(&src_dl_b->lock);
1698 	}
1699 
1700 	/*
1701 	 * Update only if the task is actually running (i.e.,
1702 	 * it is on the rq AND it is not throttled).
1703 	 */
1704 	if (!on_dl_rq(&p->dl))
1705 		return;
1706 
1707 	weight = cpumask_weight(new_mask);
1708 
1709 	/*
1710 	 * Only update if the process changes its state from whether it
1711 	 * can migrate or not.
1712 	 */
1713 	if ((p->nr_cpus_allowed > 1) == (weight > 1))
1714 		return;
1715 
1716 	/*
1717 	 * The process used to be able to migrate OR it can now migrate
1718 	 */
1719 	if (weight <= 1) {
1720 		if (!task_current(rq, p))
1721 			dequeue_pushable_dl_task(rq, p);
1722 		BUG_ON(!rq->dl.dl_nr_migratory);
1723 		rq->dl.dl_nr_migratory--;
1724 	} else {
1725 		if (!task_current(rq, p))
1726 			enqueue_pushable_dl_task(rq, p);
1727 		rq->dl.dl_nr_migratory++;
1728 	}
1729 
1730 	update_dl_migration(&rq->dl);
1731 }
1732 
1733 /* Assumes rq->lock is held */
1734 static void rq_online_dl(struct rq *rq)
1735 {
1736 	if (rq->dl.overloaded)
1737 		dl_set_overload(rq);
1738 
1739 	cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu);
1740 	if (rq->dl.dl_nr_running > 0)
1741 		cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr, 1);
1742 }
1743 
1744 /* Assumes rq->lock is held */
1745 static void rq_offline_dl(struct rq *rq)
1746 {
1747 	if (rq->dl.overloaded)
1748 		dl_clear_overload(rq);
1749 
1750 	cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0);
1751 	cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu);
1752 }
1753 
1754 void __init init_sched_dl_class(void)
1755 {
1756 	unsigned int i;
1757 
1758 	for_each_possible_cpu(i)
1759 		zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i),
1760 					GFP_KERNEL, cpu_to_node(i));
1761 }
1762 
1763 #endif /* CONFIG_SMP */
1764 
1765 static void switched_from_dl(struct rq *rq, struct task_struct *p)
1766 {
1767 	/*
1768 	 * Start the deadline timer; if we switch back to dl before this we'll
1769 	 * continue consuming our current CBS slice. If we stay outside of
1770 	 * SCHED_DEADLINE until the deadline passes, the timer will reset the
1771 	 * task.
1772 	 */
1773 	if (!start_dl_timer(p))
1774 		__dl_clear_params(p);
1775 
1776 	/*
1777 	 * Since this might be the only -deadline task on the rq,
1778 	 * this is the right place to try to pull some other one
1779 	 * from an overloaded cpu, if any.
1780 	 */
1781 	if (!task_on_rq_queued(p) || rq->dl.dl_nr_running)
1782 		return;
1783 
1784 	queue_pull_task(rq);
1785 }
1786 
1787 /*
1788  * When switching to -deadline, we may overload the rq, then
1789  * we try to push someone off, if possible.
1790  */
1791 static void switched_to_dl(struct rq *rq, struct task_struct *p)
1792 {
1793 	if (task_on_rq_queued(p) && rq->curr != p) {
1794 #ifdef CONFIG_SMP
1795 		if (p->nr_cpus_allowed > 1 && rq->dl.overloaded)
1796 			queue_push_tasks(rq);
1797 #else
1798 		if (dl_task(rq->curr))
1799 			check_preempt_curr_dl(rq, p, 0);
1800 		else
1801 			resched_curr(rq);
1802 #endif
1803 	}
1804 }
1805 
1806 /*
1807  * If the scheduling parameters of a -deadline task changed,
1808  * a push or pull operation might be needed.
1809  */
1810 static void prio_changed_dl(struct rq *rq, struct task_struct *p,
1811 			    int oldprio)
1812 {
1813 	if (task_on_rq_queued(p) || rq->curr == p) {
1814 #ifdef CONFIG_SMP
1815 		/*
1816 		 * This might be too much, but unfortunately
1817 		 * we don't have the old deadline value, and
1818 		 * we can't argue if the task is increasing
1819 		 * or lowering its prio, so...
1820 		 */
1821 		if (!rq->dl.overloaded)
1822 			queue_pull_task(rq);
1823 
1824 		/*
1825 		 * If we now have a earlier deadline task than p,
1826 		 * then reschedule, provided p is still on this
1827 		 * runqueue.
1828 		 */
1829 		if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline))
1830 			resched_curr(rq);
1831 #else
1832 		/*
1833 		 * Again, we don't know if p has a earlier
1834 		 * or later deadline, so let's blindly set a
1835 		 * (maybe not needed) rescheduling point.
1836 		 */
1837 		resched_curr(rq);
1838 #endif /* CONFIG_SMP */
1839 	} else
1840 		switched_to_dl(rq, p);
1841 }
1842 
1843 const struct sched_class dl_sched_class = {
1844 	.next			= &rt_sched_class,
1845 	.enqueue_task		= enqueue_task_dl,
1846 	.dequeue_task		= dequeue_task_dl,
1847 	.yield_task		= yield_task_dl,
1848 
1849 	.check_preempt_curr	= check_preempt_curr_dl,
1850 
1851 	.pick_next_task		= pick_next_task_dl,
1852 	.put_prev_task		= put_prev_task_dl,
1853 
1854 #ifdef CONFIG_SMP
1855 	.select_task_rq		= select_task_rq_dl,
1856 	.set_cpus_allowed       = set_cpus_allowed_dl,
1857 	.rq_online              = rq_online_dl,
1858 	.rq_offline             = rq_offline_dl,
1859 	.task_woken		= task_woken_dl,
1860 #endif
1861 
1862 	.set_curr_task		= set_curr_task_dl,
1863 	.task_tick		= task_tick_dl,
1864 	.task_fork              = task_fork_dl,
1865 	.task_dead		= task_dead_dl,
1866 
1867 	.prio_changed           = prio_changed_dl,
1868 	.switched_from		= switched_from_dl,
1869 	.switched_to		= switched_to_dl,
1870 
1871 	.update_curr		= update_curr_dl,
1872 };
1873 
1874 #ifdef CONFIG_SCHED_DEBUG
1875 extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq);
1876 
1877 void print_dl_stats(struct seq_file *m, int cpu)
1878 {
1879 	print_dl_rq(m, cpu, &cpu_rq(cpu)->dl);
1880 }
1881 #endif /* CONFIG_SCHED_DEBUG */
1882