xref: /openbmc/linux/kernel/sched/deadline.c (revision 3c6a73cc)
1 /*
2  * Deadline Scheduling Class (SCHED_DEADLINE)
3  *
4  * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS).
5  *
6  * Tasks that periodically executes their instances for less than their
7  * runtime won't miss any of their deadlines.
8  * Tasks that are not periodic or sporadic or that tries to execute more
9  * than their reserved bandwidth will be slowed down (and may potentially
10  * miss some of their deadlines), and won't affect any other task.
11  *
12  * Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>,
13  *                    Juri Lelli <juri.lelli@gmail.com>,
14  *                    Michael Trimarchi <michael@amarulasolutions.com>,
15  *                    Fabio Checconi <fchecconi@gmail.com>
16  */
17 #include "sched.h"
18 
19 #include <linux/slab.h>
20 
21 struct dl_bandwidth def_dl_bandwidth;
22 
23 static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
24 {
25 	return container_of(dl_se, struct task_struct, dl);
26 }
27 
28 static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq)
29 {
30 	return container_of(dl_rq, struct rq, dl);
31 }
32 
33 static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se)
34 {
35 	struct task_struct *p = dl_task_of(dl_se);
36 	struct rq *rq = task_rq(p);
37 
38 	return &rq->dl;
39 }
40 
41 static inline int on_dl_rq(struct sched_dl_entity *dl_se)
42 {
43 	return !RB_EMPTY_NODE(&dl_se->rb_node);
44 }
45 
46 static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq)
47 {
48 	struct sched_dl_entity *dl_se = &p->dl;
49 
50 	return dl_rq->rb_leftmost == &dl_se->rb_node;
51 }
52 
53 void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime)
54 {
55 	raw_spin_lock_init(&dl_b->dl_runtime_lock);
56 	dl_b->dl_period = period;
57 	dl_b->dl_runtime = runtime;
58 }
59 
60 void init_dl_bw(struct dl_bw *dl_b)
61 {
62 	raw_spin_lock_init(&dl_b->lock);
63 	raw_spin_lock(&def_dl_bandwidth.dl_runtime_lock);
64 	if (global_rt_runtime() == RUNTIME_INF)
65 		dl_b->bw = -1;
66 	else
67 		dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime());
68 	raw_spin_unlock(&def_dl_bandwidth.dl_runtime_lock);
69 	dl_b->total_bw = 0;
70 }
71 
72 void init_dl_rq(struct dl_rq *dl_rq, struct rq *rq)
73 {
74 	dl_rq->rb_root = RB_ROOT;
75 
76 #ifdef CONFIG_SMP
77 	/* zero means no -deadline tasks */
78 	dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0;
79 
80 	dl_rq->dl_nr_migratory = 0;
81 	dl_rq->overloaded = 0;
82 	dl_rq->pushable_dl_tasks_root = RB_ROOT;
83 #else
84 	init_dl_bw(&dl_rq->dl_bw);
85 #endif
86 }
87 
88 #ifdef CONFIG_SMP
89 
90 static inline int dl_overloaded(struct rq *rq)
91 {
92 	return atomic_read(&rq->rd->dlo_count);
93 }
94 
95 static inline void dl_set_overload(struct rq *rq)
96 {
97 	if (!rq->online)
98 		return;
99 
100 	cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask);
101 	/*
102 	 * Must be visible before the overload count is
103 	 * set (as in sched_rt.c).
104 	 *
105 	 * Matched by the barrier in pull_dl_task().
106 	 */
107 	smp_wmb();
108 	atomic_inc(&rq->rd->dlo_count);
109 }
110 
111 static inline void dl_clear_overload(struct rq *rq)
112 {
113 	if (!rq->online)
114 		return;
115 
116 	atomic_dec(&rq->rd->dlo_count);
117 	cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask);
118 }
119 
120 static void update_dl_migration(struct dl_rq *dl_rq)
121 {
122 	if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) {
123 		if (!dl_rq->overloaded) {
124 			dl_set_overload(rq_of_dl_rq(dl_rq));
125 			dl_rq->overloaded = 1;
126 		}
127 	} else if (dl_rq->overloaded) {
128 		dl_clear_overload(rq_of_dl_rq(dl_rq));
129 		dl_rq->overloaded = 0;
130 	}
131 }
132 
133 static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
134 {
135 	struct task_struct *p = dl_task_of(dl_se);
136 
137 	if (p->nr_cpus_allowed > 1)
138 		dl_rq->dl_nr_migratory++;
139 
140 	update_dl_migration(dl_rq);
141 }
142 
143 static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
144 {
145 	struct task_struct *p = dl_task_of(dl_se);
146 
147 	if (p->nr_cpus_allowed > 1)
148 		dl_rq->dl_nr_migratory--;
149 
150 	update_dl_migration(dl_rq);
151 }
152 
153 /*
154  * The list of pushable -deadline task is not a plist, like in
155  * sched_rt.c, it is an rb-tree with tasks ordered by deadline.
156  */
157 static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
158 {
159 	struct dl_rq *dl_rq = &rq->dl;
160 	struct rb_node **link = &dl_rq->pushable_dl_tasks_root.rb_node;
161 	struct rb_node *parent = NULL;
162 	struct task_struct *entry;
163 	int leftmost = 1;
164 
165 	BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks));
166 
167 	while (*link) {
168 		parent = *link;
169 		entry = rb_entry(parent, struct task_struct,
170 				 pushable_dl_tasks);
171 		if (dl_entity_preempt(&p->dl, &entry->dl))
172 			link = &parent->rb_left;
173 		else {
174 			link = &parent->rb_right;
175 			leftmost = 0;
176 		}
177 	}
178 
179 	if (leftmost)
180 		dl_rq->pushable_dl_tasks_leftmost = &p->pushable_dl_tasks;
181 
182 	rb_link_node(&p->pushable_dl_tasks, parent, link);
183 	rb_insert_color(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
184 }
185 
186 static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
187 {
188 	struct dl_rq *dl_rq = &rq->dl;
189 
190 	if (RB_EMPTY_NODE(&p->pushable_dl_tasks))
191 		return;
192 
193 	if (dl_rq->pushable_dl_tasks_leftmost == &p->pushable_dl_tasks) {
194 		struct rb_node *next_node;
195 
196 		next_node = rb_next(&p->pushable_dl_tasks);
197 		dl_rq->pushable_dl_tasks_leftmost = next_node;
198 	}
199 
200 	rb_erase(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
201 	RB_CLEAR_NODE(&p->pushable_dl_tasks);
202 }
203 
204 static inline int has_pushable_dl_tasks(struct rq *rq)
205 {
206 	return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root);
207 }
208 
209 static int push_dl_task(struct rq *rq);
210 
211 static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
212 {
213 	return dl_task(prev);
214 }
215 
216 static inline void set_post_schedule(struct rq *rq)
217 {
218 	rq->post_schedule = has_pushable_dl_tasks(rq);
219 }
220 
221 #else
222 
223 static inline
224 void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
225 {
226 }
227 
228 static inline
229 void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
230 {
231 }
232 
233 static inline
234 void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
235 {
236 }
237 
238 static inline
239 void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
240 {
241 }
242 
243 static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
244 {
245 	return false;
246 }
247 
248 static inline int pull_dl_task(struct rq *rq)
249 {
250 	return 0;
251 }
252 
253 static inline void set_post_schedule(struct rq *rq)
254 {
255 }
256 #endif /* CONFIG_SMP */
257 
258 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
259 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags);
260 static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
261 				  int flags);
262 
263 /*
264  * We are being explicitly informed that a new instance is starting,
265  * and this means that:
266  *  - the absolute deadline of the entity has to be placed at
267  *    current time + relative deadline;
268  *  - the runtime of the entity has to be set to the maximum value.
269  *
270  * The capability of specifying such event is useful whenever a -deadline
271  * entity wants to (try to!) synchronize its behaviour with the scheduler's
272  * one, and to (try to!) reconcile itself with its own scheduling
273  * parameters.
274  */
275 static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se,
276 				       struct sched_dl_entity *pi_se)
277 {
278 	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
279 	struct rq *rq = rq_of_dl_rq(dl_rq);
280 
281 	WARN_ON(!dl_se->dl_new || dl_se->dl_throttled);
282 
283 	/*
284 	 * We use the regular wall clock time to set deadlines in the
285 	 * future; in fact, we must consider execution overheads (time
286 	 * spent on hardirq context, etc.).
287 	 */
288 	dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
289 	dl_se->runtime = pi_se->dl_runtime;
290 	dl_se->dl_new = 0;
291 }
292 
293 /*
294  * Pure Earliest Deadline First (EDF) scheduling does not deal with the
295  * possibility of a entity lasting more than what it declared, and thus
296  * exhausting its runtime.
297  *
298  * Here we are interested in making runtime overrun possible, but we do
299  * not want a entity which is misbehaving to affect the scheduling of all
300  * other entities.
301  * Therefore, a budgeting strategy called Constant Bandwidth Server (CBS)
302  * is used, in order to confine each entity within its own bandwidth.
303  *
304  * This function deals exactly with that, and ensures that when the runtime
305  * of a entity is replenished, its deadline is also postponed. That ensures
306  * the overrunning entity can't interfere with other entity in the system and
307  * can't make them miss their deadlines. Reasons why this kind of overruns
308  * could happen are, typically, a entity voluntarily trying to overcome its
309  * runtime, or it just underestimated it during sched_setattr().
310  */
311 static void replenish_dl_entity(struct sched_dl_entity *dl_se,
312 				struct sched_dl_entity *pi_se)
313 {
314 	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
315 	struct rq *rq = rq_of_dl_rq(dl_rq);
316 
317 	BUG_ON(pi_se->dl_runtime <= 0);
318 
319 	/*
320 	 * This could be the case for a !-dl task that is boosted.
321 	 * Just go with full inherited parameters.
322 	 */
323 	if (dl_se->dl_deadline == 0) {
324 		dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
325 		dl_se->runtime = pi_se->dl_runtime;
326 	}
327 
328 	/*
329 	 * We keep moving the deadline away until we get some
330 	 * available runtime for the entity. This ensures correct
331 	 * handling of situations where the runtime overrun is
332 	 * arbitrary large.
333 	 */
334 	while (dl_se->runtime <= 0) {
335 		dl_se->deadline += pi_se->dl_period;
336 		dl_se->runtime += pi_se->dl_runtime;
337 	}
338 
339 	/*
340 	 * At this point, the deadline really should be "in
341 	 * the future" with respect to rq->clock. If it's
342 	 * not, we are, for some reason, lagging too much!
343 	 * Anyway, after having warn userspace abut that,
344 	 * we still try to keep the things running by
345 	 * resetting the deadline and the budget of the
346 	 * entity.
347 	 */
348 	if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
349 		printk_deferred_once("sched: DL replenish lagged to much\n");
350 		dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
351 		dl_se->runtime = pi_se->dl_runtime;
352 	}
353 }
354 
355 /*
356  * Here we check if --at time t-- an entity (which is probably being
357  * [re]activated or, in general, enqueued) can use its remaining runtime
358  * and its current deadline _without_ exceeding the bandwidth it is
359  * assigned (function returns true if it can't). We are in fact applying
360  * one of the CBS rules: when a task wakes up, if the residual runtime
361  * over residual deadline fits within the allocated bandwidth, then we
362  * can keep the current (absolute) deadline and residual budget without
363  * disrupting the schedulability of the system. Otherwise, we should
364  * refill the runtime and set the deadline a period in the future,
365  * because keeping the current (absolute) deadline of the task would
366  * result in breaking guarantees promised to other tasks (refer to
367  * Documentation/scheduler/sched-deadline.txt for more informations).
368  *
369  * This function returns true if:
370  *
371  *   runtime / (deadline - t) > dl_runtime / dl_period ,
372  *
373  * IOW we can't recycle current parameters.
374  *
375  * Notice that the bandwidth check is done against the period. For
376  * task with deadline equal to period this is the same of using
377  * dl_deadline instead of dl_period in the equation above.
378  */
379 static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
380 			       struct sched_dl_entity *pi_se, u64 t)
381 {
382 	u64 left, right;
383 
384 	/*
385 	 * left and right are the two sides of the equation above,
386 	 * after a bit of shuffling to use multiplications instead
387 	 * of divisions.
388 	 *
389 	 * Note that none of the time values involved in the two
390 	 * multiplications are absolute: dl_deadline and dl_runtime
391 	 * are the relative deadline and the maximum runtime of each
392 	 * instance, runtime is the runtime left for the last instance
393 	 * and (deadline - t), since t is rq->clock, is the time left
394 	 * to the (absolute) deadline. Even if overflowing the u64 type
395 	 * is very unlikely to occur in both cases, here we scale down
396 	 * as we want to avoid that risk at all. Scaling down by 10
397 	 * means that we reduce granularity to 1us. We are fine with it,
398 	 * since this is only a true/false check and, anyway, thinking
399 	 * of anything below microseconds resolution is actually fiction
400 	 * (but still we want to give the user that illusion >;).
401 	 */
402 	left = (pi_se->dl_period >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
403 	right = ((dl_se->deadline - t) >> DL_SCALE) *
404 		(pi_se->dl_runtime >> DL_SCALE);
405 
406 	return dl_time_before(right, left);
407 }
408 
409 /*
410  * When a -deadline entity is queued back on the runqueue, its runtime and
411  * deadline might need updating.
412  *
413  * The policy here is that we update the deadline of the entity only if:
414  *  - the current deadline is in the past,
415  *  - using the remaining runtime with the current deadline would make
416  *    the entity exceed its bandwidth.
417  */
418 static void update_dl_entity(struct sched_dl_entity *dl_se,
419 			     struct sched_dl_entity *pi_se)
420 {
421 	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
422 	struct rq *rq = rq_of_dl_rq(dl_rq);
423 
424 	/*
425 	 * The arrival of a new instance needs special treatment, i.e.,
426 	 * the actual scheduling parameters have to be "renewed".
427 	 */
428 	if (dl_se->dl_new) {
429 		setup_new_dl_entity(dl_se, pi_se);
430 		return;
431 	}
432 
433 	if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
434 	    dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) {
435 		dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
436 		dl_se->runtime = pi_se->dl_runtime;
437 	}
438 }
439 
440 /*
441  * If the entity depleted all its runtime, and if we want it to sleep
442  * while waiting for some new execution time to become available, we
443  * set the bandwidth enforcement timer to the replenishment instant
444  * and try to activate it.
445  *
446  * Notice that it is important for the caller to know if the timer
447  * actually started or not (i.e., the replenishment instant is in
448  * the future or in the past).
449  */
450 static int start_dl_timer(struct sched_dl_entity *dl_se, bool boosted)
451 {
452 	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
453 	struct rq *rq = rq_of_dl_rq(dl_rq);
454 	ktime_t now, act;
455 	ktime_t soft, hard;
456 	unsigned long range;
457 	s64 delta;
458 
459 	if (boosted)
460 		return 0;
461 	/*
462 	 * We want the timer to fire at the deadline, but considering
463 	 * that it is actually coming from rq->clock and not from
464 	 * hrtimer's time base reading.
465 	 */
466 	act = ns_to_ktime(dl_se->deadline);
467 	now = hrtimer_cb_get_time(&dl_se->dl_timer);
468 	delta = ktime_to_ns(now) - rq_clock(rq);
469 	act = ktime_add_ns(act, delta);
470 
471 	/*
472 	 * If the expiry time already passed, e.g., because the value
473 	 * chosen as the deadline is too small, don't even try to
474 	 * start the timer in the past!
475 	 */
476 	if (ktime_us_delta(act, now) < 0)
477 		return 0;
478 
479 	hrtimer_set_expires(&dl_se->dl_timer, act);
480 
481 	soft = hrtimer_get_softexpires(&dl_se->dl_timer);
482 	hard = hrtimer_get_expires(&dl_se->dl_timer);
483 	range = ktime_to_ns(ktime_sub(hard, soft));
484 	__hrtimer_start_range_ns(&dl_se->dl_timer, soft,
485 				 range, HRTIMER_MODE_ABS, 0);
486 
487 	return hrtimer_active(&dl_se->dl_timer);
488 }
489 
490 /*
491  * This is the bandwidth enforcement timer callback. If here, we know
492  * a task is not on its dl_rq, since the fact that the timer was running
493  * means the task is throttled and needs a runtime replenishment.
494  *
495  * However, what we actually do depends on the fact the task is active,
496  * (it is on its rq) or has been removed from there by a call to
497  * dequeue_task_dl(). In the former case we must issue the runtime
498  * replenishment and add the task back to the dl_rq; in the latter, we just
499  * do nothing but clearing dl_throttled, so that runtime and deadline
500  * updating (and the queueing back to dl_rq) will be done by the
501  * next call to enqueue_task_dl().
502  */
503 static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
504 {
505 	struct sched_dl_entity *dl_se = container_of(timer,
506 						     struct sched_dl_entity,
507 						     dl_timer);
508 	struct task_struct *p = dl_task_of(dl_se);
509 	struct rq *rq;
510 again:
511 	rq = task_rq(p);
512 	raw_spin_lock(&rq->lock);
513 
514 	if (rq != task_rq(p)) {
515 		/* Task was moved, retrying. */
516 		raw_spin_unlock(&rq->lock);
517 		goto again;
518 	}
519 
520 	/*
521 	 * We need to take care of a possible races here. In fact, the
522 	 * task might have changed its scheduling policy to something
523 	 * different from SCHED_DEADLINE or changed its reservation
524 	 * parameters (through sched_setattr()).
525 	 */
526 	if (!dl_task(p) || dl_se->dl_new)
527 		goto unlock;
528 
529 	sched_clock_tick();
530 	update_rq_clock(rq);
531 	dl_se->dl_throttled = 0;
532 	dl_se->dl_yielded = 0;
533 	if (task_on_rq_queued(p)) {
534 		enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
535 		if (task_has_dl_policy(rq->curr))
536 			check_preempt_curr_dl(rq, p, 0);
537 		else
538 			resched_curr(rq);
539 #ifdef CONFIG_SMP
540 		/*
541 		 * Queueing this task back might have overloaded rq,
542 		 * check if we need to kick someone away.
543 		 */
544 		if (has_pushable_dl_tasks(rq))
545 			push_dl_task(rq);
546 #endif
547 	}
548 unlock:
549 	raw_spin_unlock(&rq->lock);
550 
551 	return HRTIMER_NORESTART;
552 }
553 
554 void init_dl_task_timer(struct sched_dl_entity *dl_se)
555 {
556 	struct hrtimer *timer = &dl_se->dl_timer;
557 
558 	if (hrtimer_active(timer)) {
559 		hrtimer_try_to_cancel(timer);
560 		return;
561 	}
562 
563 	hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
564 	timer->function = dl_task_timer;
565 }
566 
567 static
568 int dl_runtime_exceeded(struct rq *rq, struct sched_dl_entity *dl_se)
569 {
570 	int dmiss = dl_time_before(dl_se->deadline, rq_clock(rq));
571 	int rorun = dl_se->runtime <= 0;
572 
573 	if (!rorun && !dmiss)
574 		return 0;
575 
576 	/*
577 	 * If we are beyond our current deadline and we are still
578 	 * executing, then we have already used some of the runtime of
579 	 * the next instance. Thus, if we do not account that, we are
580 	 * stealing bandwidth from the system at each deadline miss!
581 	 */
582 	if (dmiss) {
583 		dl_se->runtime = rorun ? dl_se->runtime : 0;
584 		dl_se->runtime -= rq_clock(rq) - dl_se->deadline;
585 	}
586 
587 	return 1;
588 }
589 
590 extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
591 
592 /*
593  * Update the current task's runtime statistics (provided it is still
594  * a -deadline task and has not been removed from the dl_rq).
595  */
596 static void update_curr_dl(struct rq *rq)
597 {
598 	struct task_struct *curr = rq->curr;
599 	struct sched_dl_entity *dl_se = &curr->dl;
600 	u64 delta_exec;
601 
602 	if (!dl_task(curr) || !on_dl_rq(dl_se))
603 		return;
604 
605 	/*
606 	 * Consumed budget is computed considering the time as
607 	 * observed by schedulable tasks (excluding time spent
608 	 * in hardirq context, etc.). Deadlines are instead
609 	 * computed using hard walltime. This seems to be the more
610 	 * natural solution, but the full ramifications of this
611 	 * approach need further study.
612 	 */
613 	delta_exec = rq_clock_task(rq) - curr->se.exec_start;
614 	if (unlikely((s64)delta_exec <= 0))
615 		return;
616 
617 	schedstat_set(curr->se.statistics.exec_max,
618 		      max(curr->se.statistics.exec_max, delta_exec));
619 
620 	curr->se.sum_exec_runtime += delta_exec;
621 	account_group_exec_runtime(curr, delta_exec);
622 
623 	curr->se.exec_start = rq_clock_task(rq);
624 	cpuacct_charge(curr, delta_exec);
625 
626 	sched_rt_avg_update(rq, delta_exec);
627 
628 	dl_se->runtime -= delta_exec;
629 	if (dl_runtime_exceeded(rq, dl_se)) {
630 		__dequeue_task_dl(rq, curr, 0);
631 		if (likely(start_dl_timer(dl_se, curr->dl.dl_boosted)))
632 			dl_se->dl_throttled = 1;
633 		else
634 			enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
635 
636 		if (!is_leftmost(curr, &rq->dl))
637 			resched_curr(rq);
638 	}
639 
640 	/*
641 	 * Because -- for now -- we share the rt bandwidth, we need to
642 	 * account our runtime there too, otherwise actual rt tasks
643 	 * would be able to exceed the shared quota.
644 	 *
645 	 * Account to the root rt group for now.
646 	 *
647 	 * The solution we're working towards is having the RT groups scheduled
648 	 * using deadline servers -- however there's a few nasties to figure
649 	 * out before that can happen.
650 	 */
651 	if (rt_bandwidth_enabled()) {
652 		struct rt_rq *rt_rq = &rq->rt;
653 
654 		raw_spin_lock(&rt_rq->rt_runtime_lock);
655 		/*
656 		 * We'll let actual RT tasks worry about the overflow here, we
657 		 * have our own CBS to keep us inline; only account when RT
658 		 * bandwidth is relevant.
659 		 */
660 		if (sched_rt_bandwidth_account(rt_rq))
661 			rt_rq->rt_time += delta_exec;
662 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
663 	}
664 }
665 
666 #ifdef CONFIG_SMP
667 
668 static struct task_struct *pick_next_earliest_dl_task(struct rq *rq, int cpu);
669 
670 static inline u64 next_deadline(struct rq *rq)
671 {
672 	struct task_struct *next = pick_next_earliest_dl_task(rq, rq->cpu);
673 
674 	if (next && dl_prio(next->prio))
675 		return next->dl.deadline;
676 	else
677 		return 0;
678 }
679 
680 static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
681 {
682 	struct rq *rq = rq_of_dl_rq(dl_rq);
683 
684 	if (dl_rq->earliest_dl.curr == 0 ||
685 	    dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
686 		/*
687 		 * If the dl_rq had no -deadline tasks, or if the new task
688 		 * has shorter deadline than the current one on dl_rq, we
689 		 * know that the previous earliest becomes our next earliest,
690 		 * as the new task becomes the earliest itself.
691 		 */
692 		dl_rq->earliest_dl.next = dl_rq->earliest_dl.curr;
693 		dl_rq->earliest_dl.curr = deadline;
694 		cpudl_set(&rq->rd->cpudl, rq->cpu, deadline, 1);
695 	} else if (dl_rq->earliest_dl.next == 0 ||
696 		   dl_time_before(deadline, dl_rq->earliest_dl.next)) {
697 		/*
698 		 * On the other hand, if the new -deadline task has a
699 		 * a later deadline than the earliest one on dl_rq, but
700 		 * it is earlier than the next (if any), we must
701 		 * recompute the next-earliest.
702 		 */
703 		dl_rq->earliest_dl.next = next_deadline(rq);
704 	}
705 }
706 
707 static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
708 {
709 	struct rq *rq = rq_of_dl_rq(dl_rq);
710 
711 	/*
712 	 * Since we may have removed our earliest (and/or next earliest)
713 	 * task we must recompute them.
714 	 */
715 	if (!dl_rq->dl_nr_running) {
716 		dl_rq->earliest_dl.curr = 0;
717 		dl_rq->earliest_dl.next = 0;
718 		cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0);
719 	} else {
720 		struct rb_node *leftmost = dl_rq->rb_leftmost;
721 		struct sched_dl_entity *entry;
722 
723 		entry = rb_entry(leftmost, struct sched_dl_entity, rb_node);
724 		dl_rq->earliest_dl.curr = entry->deadline;
725 		dl_rq->earliest_dl.next = next_deadline(rq);
726 		cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline, 1);
727 	}
728 }
729 
730 #else
731 
732 static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
733 static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
734 
735 #endif /* CONFIG_SMP */
736 
737 static inline
738 void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
739 {
740 	int prio = dl_task_of(dl_se)->prio;
741 	u64 deadline = dl_se->deadline;
742 
743 	WARN_ON(!dl_prio(prio));
744 	dl_rq->dl_nr_running++;
745 	add_nr_running(rq_of_dl_rq(dl_rq), 1);
746 
747 	inc_dl_deadline(dl_rq, deadline);
748 	inc_dl_migration(dl_se, dl_rq);
749 }
750 
751 static inline
752 void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
753 {
754 	int prio = dl_task_of(dl_se)->prio;
755 
756 	WARN_ON(!dl_prio(prio));
757 	WARN_ON(!dl_rq->dl_nr_running);
758 	dl_rq->dl_nr_running--;
759 	sub_nr_running(rq_of_dl_rq(dl_rq), 1);
760 
761 	dec_dl_deadline(dl_rq, dl_se->deadline);
762 	dec_dl_migration(dl_se, dl_rq);
763 }
764 
765 static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
766 {
767 	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
768 	struct rb_node **link = &dl_rq->rb_root.rb_node;
769 	struct rb_node *parent = NULL;
770 	struct sched_dl_entity *entry;
771 	int leftmost = 1;
772 
773 	BUG_ON(!RB_EMPTY_NODE(&dl_se->rb_node));
774 
775 	while (*link) {
776 		parent = *link;
777 		entry = rb_entry(parent, struct sched_dl_entity, rb_node);
778 		if (dl_time_before(dl_se->deadline, entry->deadline))
779 			link = &parent->rb_left;
780 		else {
781 			link = &parent->rb_right;
782 			leftmost = 0;
783 		}
784 	}
785 
786 	if (leftmost)
787 		dl_rq->rb_leftmost = &dl_se->rb_node;
788 
789 	rb_link_node(&dl_se->rb_node, parent, link);
790 	rb_insert_color(&dl_se->rb_node, &dl_rq->rb_root);
791 
792 	inc_dl_tasks(dl_se, dl_rq);
793 }
794 
795 static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
796 {
797 	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
798 
799 	if (RB_EMPTY_NODE(&dl_se->rb_node))
800 		return;
801 
802 	if (dl_rq->rb_leftmost == &dl_se->rb_node) {
803 		struct rb_node *next_node;
804 
805 		next_node = rb_next(&dl_se->rb_node);
806 		dl_rq->rb_leftmost = next_node;
807 	}
808 
809 	rb_erase(&dl_se->rb_node, &dl_rq->rb_root);
810 	RB_CLEAR_NODE(&dl_se->rb_node);
811 
812 	dec_dl_tasks(dl_se, dl_rq);
813 }
814 
815 static void
816 enqueue_dl_entity(struct sched_dl_entity *dl_se,
817 		  struct sched_dl_entity *pi_se, int flags)
818 {
819 	BUG_ON(on_dl_rq(dl_se));
820 
821 	/*
822 	 * If this is a wakeup or a new instance, the scheduling
823 	 * parameters of the task might need updating. Otherwise,
824 	 * we want a replenishment of its runtime.
825 	 */
826 	if (!dl_se->dl_new && flags & ENQUEUE_REPLENISH)
827 		replenish_dl_entity(dl_se, pi_se);
828 	else
829 		update_dl_entity(dl_se, pi_se);
830 
831 	__enqueue_dl_entity(dl_se);
832 }
833 
834 static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
835 {
836 	__dequeue_dl_entity(dl_se);
837 }
838 
839 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
840 {
841 	struct task_struct *pi_task = rt_mutex_get_top_task(p);
842 	struct sched_dl_entity *pi_se = &p->dl;
843 
844 	/*
845 	 * Use the scheduling parameters of the top pi-waiter
846 	 * task if we have one and its (relative) deadline is
847 	 * smaller than our one... OTW we keep our runtime and
848 	 * deadline.
849 	 */
850 	if (pi_task && p->dl.dl_boosted && dl_prio(pi_task->normal_prio))
851 		pi_se = &pi_task->dl;
852 
853 	/*
854 	 * If p is throttled, we do nothing. In fact, if it exhausted
855 	 * its budget it needs a replenishment and, since it now is on
856 	 * its rq, the bandwidth timer callback (which clearly has not
857 	 * run yet) will take care of this.
858 	 */
859 	if (p->dl.dl_throttled)
860 		return;
861 
862 	enqueue_dl_entity(&p->dl, pi_se, flags);
863 
864 	if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
865 		enqueue_pushable_dl_task(rq, p);
866 }
867 
868 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
869 {
870 	dequeue_dl_entity(&p->dl);
871 	dequeue_pushable_dl_task(rq, p);
872 }
873 
874 static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
875 {
876 	update_curr_dl(rq);
877 	__dequeue_task_dl(rq, p, flags);
878 }
879 
880 /*
881  * Yield task semantic for -deadline tasks is:
882  *
883  *   get off from the CPU until our next instance, with
884  *   a new runtime. This is of little use now, since we
885  *   don't have a bandwidth reclaiming mechanism. Anyway,
886  *   bandwidth reclaiming is planned for the future, and
887  *   yield_task_dl will indicate that some spare budget
888  *   is available for other task instances to use it.
889  */
890 static void yield_task_dl(struct rq *rq)
891 {
892 	struct task_struct *p = rq->curr;
893 
894 	/*
895 	 * We make the task go to sleep until its current deadline by
896 	 * forcing its runtime to zero. This way, update_curr_dl() stops
897 	 * it and the bandwidth timer will wake it up and will give it
898 	 * new scheduling parameters (thanks to dl_yielded=1).
899 	 */
900 	if (p->dl.runtime > 0) {
901 		rq->curr->dl.dl_yielded = 1;
902 		p->dl.runtime = 0;
903 	}
904 	update_curr_dl(rq);
905 }
906 
907 #ifdef CONFIG_SMP
908 
909 static int find_later_rq(struct task_struct *task);
910 
911 static int
912 select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags)
913 {
914 	struct task_struct *curr;
915 	struct rq *rq;
916 
917 	if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
918 		goto out;
919 
920 	rq = cpu_rq(cpu);
921 
922 	rcu_read_lock();
923 	curr = ACCESS_ONCE(rq->curr); /* unlocked access */
924 
925 	/*
926 	 * If we are dealing with a -deadline task, we must
927 	 * decide where to wake it up.
928 	 * If it has a later deadline and the current task
929 	 * on this rq can't move (provided the waking task
930 	 * can!) we prefer to send it somewhere else. On the
931 	 * other hand, if it has a shorter deadline, we
932 	 * try to make it stay here, it might be important.
933 	 */
934 	if (unlikely(dl_task(curr)) &&
935 	    (curr->nr_cpus_allowed < 2 ||
936 	     !dl_entity_preempt(&p->dl, &curr->dl)) &&
937 	    (p->nr_cpus_allowed > 1)) {
938 		int target = find_later_rq(p);
939 
940 		if (target != -1)
941 			cpu = target;
942 	}
943 	rcu_read_unlock();
944 
945 out:
946 	return cpu;
947 }
948 
949 static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
950 {
951 	/*
952 	 * Current can't be migrated, useless to reschedule,
953 	 * let's hope p can move out.
954 	 */
955 	if (rq->curr->nr_cpus_allowed == 1 ||
956 	    cpudl_find(&rq->rd->cpudl, rq->curr, NULL) == -1)
957 		return;
958 
959 	/*
960 	 * p is migratable, so let's not schedule it and
961 	 * see if it is pushed or pulled somewhere else.
962 	 */
963 	if (p->nr_cpus_allowed != 1 &&
964 	    cpudl_find(&rq->rd->cpudl, p, NULL) != -1)
965 		return;
966 
967 	resched_curr(rq);
968 }
969 
970 static int pull_dl_task(struct rq *this_rq);
971 
972 #endif /* CONFIG_SMP */
973 
974 /*
975  * Only called when both the current and waking task are -deadline
976  * tasks.
977  */
978 static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
979 				  int flags)
980 {
981 	if (dl_entity_preempt(&p->dl, &rq->curr->dl)) {
982 		resched_curr(rq);
983 		return;
984 	}
985 
986 #ifdef CONFIG_SMP
987 	/*
988 	 * In the unlikely case current and p have the same deadline
989 	 * let us try to decide what's the best thing to do...
990 	 */
991 	if ((p->dl.deadline == rq->curr->dl.deadline) &&
992 	    !test_tsk_need_resched(rq->curr))
993 		check_preempt_equal_dl(rq, p);
994 #endif /* CONFIG_SMP */
995 }
996 
997 #ifdef CONFIG_SCHED_HRTICK
998 static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
999 {
1000 	hrtick_start(rq, p->dl.runtime);
1001 }
1002 #endif
1003 
1004 static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
1005 						   struct dl_rq *dl_rq)
1006 {
1007 	struct rb_node *left = dl_rq->rb_leftmost;
1008 
1009 	if (!left)
1010 		return NULL;
1011 
1012 	return rb_entry(left, struct sched_dl_entity, rb_node);
1013 }
1014 
1015 struct task_struct *pick_next_task_dl(struct rq *rq, struct task_struct *prev)
1016 {
1017 	struct sched_dl_entity *dl_se;
1018 	struct task_struct *p;
1019 	struct dl_rq *dl_rq;
1020 
1021 	dl_rq = &rq->dl;
1022 
1023 	if (need_pull_dl_task(rq, prev)) {
1024 		pull_dl_task(rq);
1025 		/*
1026 		 * pull_rt_task() can drop (and re-acquire) rq->lock; this
1027 		 * means a stop task can slip in, in which case we need to
1028 		 * re-start task selection.
1029 		 */
1030 		if (rq->stop && task_on_rq_queued(rq->stop))
1031 			return RETRY_TASK;
1032 	}
1033 
1034 	/*
1035 	 * When prev is DL, we may throttle it in put_prev_task().
1036 	 * So, we update time before we check for dl_nr_running.
1037 	 */
1038 	if (prev->sched_class == &dl_sched_class)
1039 		update_curr_dl(rq);
1040 
1041 	if (unlikely(!dl_rq->dl_nr_running))
1042 		return NULL;
1043 
1044 	put_prev_task(rq, prev);
1045 
1046 	dl_se = pick_next_dl_entity(rq, dl_rq);
1047 	BUG_ON(!dl_se);
1048 
1049 	p = dl_task_of(dl_se);
1050 	p->se.exec_start = rq_clock_task(rq);
1051 
1052 	/* Running task will never be pushed. */
1053        dequeue_pushable_dl_task(rq, p);
1054 
1055 #ifdef CONFIG_SCHED_HRTICK
1056 	if (hrtick_enabled(rq))
1057 		start_hrtick_dl(rq, p);
1058 #endif
1059 
1060 	set_post_schedule(rq);
1061 
1062 	return p;
1063 }
1064 
1065 static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
1066 {
1067 	update_curr_dl(rq);
1068 
1069 	if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
1070 		enqueue_pushable_dl_task(rq, p);
1071 }
1072 
1073 static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
1074 {
1075 	update_curr_dl(rq);
1076 
1077 #ifdef CONFIG_SCHED_HRTICK
1078 	if (hrtick_enabled(rq) && queued && p->dl.runtime > 0)
1079 		start_hrtick_dl(rq, p);
1080 #endif
1081 }
1082 
1083 static void task_fork_dl(struct task_struct *p)
1084 {
1085 	/*
1086 	 * SCHED_DEADLINE tasks cannot fork and this is achieved through
1087 	 * sched_fork()
1088 	 */
1089 }
1090 
1091 static void task_dead_dl(struct task_struct *p)
1092 {
1093 	struct hrtimer *timer = &p->dl.dl_timer;
1094 	struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
1095 
1096 	/*
1097 	 * Since we are TASK_DEAD we won't slip out of the domain!
1098 	 */
1099 	raw_spin_lock_irq(&dl_b->lock);
1100 	dl_b->total_bw -= p->dl.dl_bw;
1101 	raw_spin_unlock_irq(&dl_b->lock);
1102 
1103 	hrtimer_cancel(timer);
1104 }
1105 
1106 static void set_curr_task_dl(struct rq *rq)
1107 {
1108 	struct task_struct *p = rq->curr;
1109 
1110 	p->se.exec_start = rq_clock_task(rq);
1111 
1112 	/* You can't push away the running task */
1113 	dequeue_pushable_dl_task(rq, p);
1114 }
1115 
1116 #ifdef CONFIG_SMP
1117 
1118 /* Only try algorithms three times */
1119 #define DL_MAX_TRIES 3
1120 
1121 static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
1122 {
1123 	if (!task_running(rq, p) &&
1124 	    cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
1125 		return 1;
1126 	return 0;
1127 }
1128 
1129 /* Returns the second earliest -deadline task, NULL otherwise */
1130 static struct task_struct *pick_next_earliest_dl_task(struct rq *rq, int cpu)
1131 {
1132 	struct rb_node *next_node = rq->dl.rb_leftmost;
1133 	struct sched_dl_entity *dl_se;
1134 	struct task_struct *p = NULL;
1135 
1136 next_node:
1137 	next_node = rb_next(next_node);
1138 	if (next_node) {
1139 		dl_se = rb_entry(next_node, struct sched_dl_entity, rb_node);
1140 		p = dl_task_of(dl_se);
1141 
1142 		if (pick_dl_task(rq, p, cpu))
1143 			return p;
1144 
1145 		goto next_node;
1146 	}
1147 
1148 	return NULL;
1149 }
1150 
1151 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
1152 
1153 static int find_later_rq(struct task_struct *task)
1154 {
1155 	struct sched_domain *sd;
1156 	struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl);
1157 	int this_cpu = smp_processor_id();
1158 	int best_cpu, cpu = task_cpu(task);
1159 
1160 	/* Make sure the mask is initialized first */
1161 	if (unlikely(!later_mask))
1162 		return -1;
1163 
1164 	if (task->nr_cpus_allowed == 1)
1165 		return -1;
1166 
1167 	/*
1168 	 * We have to consider system topology and task affinity
1169 	 * first, then we can look for a suitable cpu.
1170 	 */
1171 	cpumask_copy(later_mask, task_rq(task)->rd->span);
1172 	cpumask_and(later_mask, later_mask, cpu_active_mask);
1173 	cpumask_and(later_mask, later_mask, &task->cpus_allowed);
1174 	best_cpu = cpudl_find(&task_rq(task)->rd->cpudl,
1175 			task, later_mask);
1176 	if (best_cpu == -1)
1177 		return -1;
1178 
1179 	/*
1180 	 * If we are here, some target has been found,
1181 	 * the most suitable of which is cached in best_cpu.
1182 	 * This is, among the runqueues where the current tasks
1183 	 * have later deadlines than the task's one, the rq
1184 	 * with the latest possible one.
1185 	 *
1186 	 * Now we check how well this matches with task's
1187 	 * affinity and system topology.
1188 	 *
1189 	 * The last cpu where the task run is our first
1190 	 * guess, since it is most likely cache-hot there.
1191 	 */
1192 	if (cpumask_test_cpu(cpu, later_mask))
1193 		return cpu;
1194 	/*
1195 	 * Check if this_cpu is to be skipped (i.e., it is
1196 	 * not in the mask) or not.
1197 	 */
1198 	if (!cpumask_test_cpu(this_cpu, later_mask))
1199 		this_cpu = -1;
1200 
1201 	rcu_read_lock();
1202 	for_each_domain(cpu, sd) {
1203 		if (sd->flags & SD_WAKE_AFFINE) {
1204 
1205 			/*
1206 			 * If possible, preempting this_cpu is
1207 			 * cheaper than migrating.
1208 			 */
1209 			if (this_cpu != -1 &&
1210 			    cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1211 				rcu_read_unlock();
1212 				return this_cpu;
1213 			}
1214 
1215 			/*
1216 			 * Last chance: if best_cpu is valid and is
1217 			 * in the mask, that becomes our choice.
1218 			 */
1219 			if (best_cpu < nr_cpu_ids &&
1220 			    cpumask_test_cpu(best_cpu, sched_domain_span(sd))) {
1221 				rcu_read_unlock();
1222 				return best_cpu;
1223 			}
1224 		}
1225 	}
1226 	rcu_read_unlock();
1227 
1228 	/*
1229 	 * At this point, all our guesses failed, we just return
1230 	 * 'something', and let the caller sort the things out.
1231 	 */
1232 	if (this_cpu != -1)
1233 		return this_cpu;
1234 
1235 	cpu = cpumask_any(later_mask);
1236 	if (cpu < nr_cpu_ids)
1237 		return cpu;
1238 
1239 	return -1;
1240 }
1241 
1242 /* Locks the rq it finds */
1243 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
1244 {
1245 	struct rq *later_rq = NULL;
1246 	int tries;
1247 	int cpu;
1248 
1249 	for (tries = 0; tries < DL_MAX_TRIES; tries++) {
1250 		cpu = find_later_rq(task);
1251 
1252 		if ((cpu == -1) || (cpu == rq->cpu))
1253 			break;
1254 
1255 		later_rq = cpu_rq(cpu);
1256 
1257 		/* Retry if something changed. */
1258 		if (double_lock_balance(rq, later_rq)) {
1259 			if (unlikely(task_rq(task) != rq ||
1260 				     !cpumask_test_cpu(later_rq->cpu,
1261 				                       &task->cpus_allowed) ||
1262 				     task_running(rq, task) ||
1263 				     !task_on_rq_queued(task))) {
1264 				double_unlock_balance(rq, later_rq);
1265 				later_rq = NULL;
1266 				break;
1267 			}
1268 		}
1269 
1270 		/*
1271 		 * If the rq we found has no -deadline task, or
1272 		 * its earliest one has a later deadline than our
1273 		 * task, the rq is a good one.
1274 		 */
1275 		if (!later_rq->dl.dl_nr_running ||
1276 		    dl_time_before(task->dl.deadline,
1277 				   later_rq->dl.earliest_dl.curr))
1278 			break;
1279 
1280 		/* Otherwise we try again. */
1281 		double_unlock_balance(rq, later_rq);
1282 		later_rq = NULL;
1283 	}
1284 
1285 	return later_rq;
1286 }
1287 
1288 static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
1289 {
1290 	struct task_struct *p;
1291 
1292 	if (!has_pushable_dl_tasks(rq))
1293 		return NULL;
1294 
1295 	p = rb_entry(rq->dl.pushable_dl_tasks_leftmost,
1296 		     struct task_struct, pushable_dl_tasks);
1297 
1298 	BUG_ON(rq->cpu != task_cpu(p));
1299 	BUG_ON(task_current(rq, p));
1300 	BUG_ON(p->nr_cpus_allowed <= 1);
1301 
1302 	BUG_ON(!task_on_rq_queued(p));
1303 	BUG_ON(!dl_task(p));
1304 
1305 	return p;
1306 }
1307 
1308 /*
1309  * See if the non running -deadline tasks on this rq
1310  * can be sent to some other CPU where they can preempt
1311  * and start executing.
1312  */
1313 static int push_dl_task(struct rq *rq)
1314 {
1315 	struct task_struct *next_task;
1316 	struct rq *later_rq;
1317 
1318 	if (!rq->dl.overloaded)
1319 		return 0;
1320 
1321 	next_task = pick_next_pushable_dl_task(rq);
1322 	if (!next_task)
1323 		return 0;
1324 
1325 retry:
1326 	if (unlikely(next_task == rq->curr)) {
1327 		WARN_ON(1);
1328 		return 0;
1329 	}
1330 
1331 	/*
1332 	 * If next_task preempts rq->curr, and rq->curr
1333 	 * can move away, it makes sense to just reschedule
1334 	 * without going further in pushing next_task.
1335 	 */
1336 	if (dl_task(rq->curr) &&
1337 	    dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
1338 	    rq->curr->nr_cpus_allowed > 1) {
1339 		resched_curr(rq);
1340 		return 0;
1341 	}
1342 
1343 	/* We might release rq lock */
1344 	get_task_struct(next_task);
1345 
1346 	/* Will lock the rq it'll find */
1347 	later_rq = find_lock_later_rq(next_task, rq);
1348 	if (!later_rq) {
1349 		struct task_struct *task;
1350 
1351 		/*
1352 		 * We must check all this again, since
1353 		 * find_lock_later_rq releases rq->lock and it is
1354 		 * then possible that next_task has migrated.
1355 		 */
1356 		task = pick_next_pushable_dl_task(rq);
1357 		if (task_cpu(next_task) == rq->cpu && task == next_task) {
1358 			/*
1359 			 * The task is still there. We don't try
1360 			 * again, some other cpu will pull it when ready.
1361 			 */
1362 			dequeue_pushable_dl_task(rq, next_task);
1363 			goto out;
1364 		}
1365 
1366 		if (!task)
1367 			/* No more tasks */
1368 			goto out;
1369 
1370 		put_task_struct(next_task);
1371 		next_task = task;
1372 		goto retry;
1373 	}
1374 
1375 	deactivate_task(rq, next_task, 0);
1376 	set_task_cpu(next_task, later_rq->cpu);
1377 	activate_task(later_rq, next_task, 0);
1378 
1379 	resched_curr(later_rq);
1380 
1381 	double_unlock_balance(rq, later_rq);
1382 
1383 out:
1384 	put_task_struct(next_task);
1385 
1386 	return 1;
1387 }
1388 
1389 static void push_dl_tasks(struct rq *rq)
1390 {
1391 	/* Terminates as it moves a -deadline task */
1392 	while (push_dl_task(rq))
1393 		;
1394 }
1395 
1396 static int pull_dl_task(struct rq *this_rq)
1397 {
1398 	int this_cpu = this_rq->cpu, ret = 0, cpu;
1399 	struct task_struct *p;
1400 	struct rq *src_rq;
1401 	u64 dmin = LONG_MAX;
1402 
1403 	if (likely(!dl_overloaded(this_rq)))
1404 		return 0;
1405 
1406 	/*
1407 	 * Match the barrier from dl_set_overloaded; this guarantees that if we
1408 	 * see overloaded we must also see the dlo_mask bit.
1409 	 */
1410 	smp_rmb();
1411 
1412 	for_each_cpu(cpu, this_rq->rd->dlo_mask) {
1413 		if (this_cpu == cpu)
1414 			continue;
1415 
1416 		src_rq = cpu_rq(cpu);
1417 
1418 		/*
1419 		 * It looks racy, abd it is! However, as in sched_rt.c,
1420 		 * we are fine with this.
1421 		 */
1422 		if (this_rq->dl.dl_nr_running &&
1423 		    dl_time_before(this_rq->dl.earliest_dl.curr,
1424 				   src_rq->dl.earliest_dl.next))
1425 			continue;
1426 
1427 		/* Might drop this_rq->lock */
1428 		double_lock_balance(this_rq, src_rq);
1429 
1430 		/*
1431 		 * If there are no more pullable tasks on the
1432 		 * rq, we're done with it.
1433 		 */
1434 		if (src_rq->dl.dl_nr_running <= 1)
1435 			goto skip;
1436 
1437 		p = pick_next_earliest_dl_task(src_rq, this_cpu);
1438 
1439 		/*
1440 		 * We found a task to be pulled if:
1441 		 *  - it preempts our current (if there's one),
1442 		 *  - it will preempt the last one we pulled (if any).
1443 		 */
1444 		if (p && dl_time_before(p->dl.deadline, dmin) &&
1445 		    (!this_rq->dl.dl_nr_running ||
1446 		     dl_time_before(p->dl.deadline,
1447 				    this_rq->dl.earliest_dl.curr))) {
1448 			WARN_ON(p == src_rq->curr);
1449 			WARN_ON(!task_on_rq_queued(p));
1450 
1451 			/*
1452 			 * Then we pull iff p has actually an earlier
1453 			 * deadline than the current task of its runqueue.
1454 			 */
1455 			if (dl_time_before(p->dl.deadline,
1456 					   src_rq->curr->dl.deadline))
1457 				goto skip;
1458 
1459 			ret = 1;
1460 
1461 			deactivate_task(src_rq, p, 0);
1462 			set_task_cpu(p, this_cpu);
1463 			activate_task(this_rq, p, 0);
1464 			dmin = p->dl.deadline;
1465 
1466 			/* Is there any other task even earlier? */
1467 		}
1468 skip:
1469 		double_unlock_balance(this_rq, src_rq);
1470 	}
1471 
1472 	return ret;
1473 }
1474 
1475 static void post_schedule_dl(struct rq *rq)
1476 {
1477 	push_dl_tasks(rq);
1478 }
1479 
1480 /*
1481  * Since the task is not running and a reschedule is not going to happen
1482  * anytime soon on its runqueue, we try pushing it away now.
1483  */
1484 static void task_woken_dl(struct rq *rq, struct task_struct *p)
1485 {
1486 	if (!task_running(rq, p) &&
1487 	    !test_tsk_need_resched(rq->curr) &&
1488 	    has_pushable_dl_tasks(rq) &&
1489 	    p->nr_cpus_allowed > 1 &&
1490 	    dl_task(rq->curr) &&
1491 	    (rq->curr->nr_cpus_allowed < 2 ||
1492 	     dl_entity_preempt(&rq->curr->dl, &p->dl))) {
1493 		push_dl_tasks(rq);
1494 	}
1495 }
1496 
1497 static void set_cpus_allowed_dl(struct task_struct *p,
1498 				const struct cpumask *new_mask)
1499 {
1500 	struct rq *rq;
1501 	int weight;
1502 
1503 	BUG_ON(!dl_task(p));
1504 
1505 	/*
1506 	 * Update only if the task is actually running (i.e.,
1507 	 * it is on the rq AND it is not throttled).
1508 	 */
1509 	if (!on_dl_rq(&p->dl))
1510 		return;
1511 
1512 	weight = cpumask_weight(new_mask);
1513 
1514 	/*
1515 	 * Only update if the process changes its state from whether it
1516 	 * can migrate or not.
1517 	 */
1518 	if ((p->nr_cpus_allowed > 1) == (weight > 1))
1519 		return;
1520 
1521 	rq = task_rq(p);
1522 
1523 	/*
1524 	 * The process used to be able to migrate OR it can now migrate
1525 	 */
1526 	if (weight <= 1) {
1527 		if (!task_current(rq, p))
1528 			dequeue_pushable_dl_task(rq, p);
1529 		BUG_ON(!rq->dl.dl_nr_migratory);
1530 		rq->dl.dl_nr_migratory--;
1531 	} else {
1532 		if (!task_current(rq, p))
1533 			enqueue_pushable_dl_task(rq, p);
1534 		rq->dl.dl_nr_migratory++;
1535 	}
1536 
1537 	update_dl_migration(&rq->dl);
1538 }
1539 
1540 /* Assumes rq->lock is held */
1541 static void rq_online_dl(struct rq *rq)
1542 {
1543 	if (rq->dl.overloaded)
1544 		dl_set_overload(rq);
1545 
1546 	if (rq->dl.dl_nr_running > 0)
1547 		cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr, 1);
1548 }
1549 
1550 /* Assumes rq->lock is held */
1551 static void rq_offline_dl(struct rq *rq)
1552 {
1553 	if (rq->dl.overloaded)
1554 		dl_clear_overload(rq);
1555 
1556 	cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0);
1557 }
1558 
1559 void init_sched_dl_class(void)
1560 {
1561 	unsigned int i;
1562 
1563 	for_each_possible_cpu(i)
1564 		zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i),
1565 					GFP_KERNEL, cpu_to_node(i));
1566 }
1567 
1568 #endif /* CONFIG_SMP */
1569 
1570 static void switched_from_dl(struct rq *rq, struct task_struct *p)
1571 {
1572 	if (hrtimer_active(&p->dl.dl_timer) && !dl_policy(p->policy))
1573 		hrtimer_try_to_cancel(&p->dl.dl_timer);
1574 
1575 	__dl_clear_params(p);
1576 
1577 #ifdef CONFIG_SMP
1578 	/*
1579 	 * Since this might be the only -deadline task on the rq,
1580 	 * this is the right place to try to pull some other one
1581 	 * from an overloaded cpu, if any.
1582 	 */
1583 	if (!rq->dl.dl_nr_running)
1584 		pull_dl_task(rq);
1585 #endif
1586 }
1587 
1588 /*
1589  * When switching to -deadline, we may overload the rq, then
1590  * we try to push someone off, if possible.
1591  */
1592 static void switched_to_dl(struct rq *rq, struct task_struct *p)
1593 {
1594 	int check_resched = 1;
1595 
1596 	/*
1597 	 * If p is throttled, don't consider the possibility
1598 	 * of preempting rq->curr, the check will be done right
1599 	 * after its runtime will get replenished.
1600 	 */
1601 	if (unlikely(p->dl.dl_throttled))
1602 		return;
1603 
1604 	if (task_on_rq_queued(p) && rq->curr != p) {
1605 #ifdef CONFIG_SMP
1606 		if (rq->dl.overloaded && push_dl_task(rq) && rq != task_rq(p))
1607 			/* Only reschedule if pushing failed */
1608 			check_resched = 0;
1609 #endif /* CONFIG_SMP */
1610 		if (check_resched && task_has_dl_policy(rq->curr))
1611 			check_preempt_curr_dl(rq, p, 0);
1612 	}
1613 }
1614 
1615 /*
1616  * If the scheduling parameters of a -deadline task changed,
1617  * a push or pull operation might be needed.
1618  */
1619 static void prio_changed_dl(struct rq *rq, struct task_struct *p,
1620 			    int oldprio)
1621 {
1622 	if (task_on_rq_queued(p) || rq->curr == p) {
1623 #ifdef CONFIG_SMP
1624 		/*
1625 		 * This might be too much, but unfortunately
1626 		 * we don't have the old deadline value, and
1627 		 * we can't argue if the task is increasing
1628 		 * or lowering its prio, so...
1629 		 */
1630 		if (!rq->dl.overloaded)
1631 			pull_dl_task(rq);
1632 
1633 		/*
1634 		 * If we now have a earlier deadline task than p,
1635 		 * then reschedule, provided p is still on this
1636 		 * runqueue.
1637 		 */
1638 		if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline) &&
1639 		    rq->curr == p)
1640 			resched_curr(rq);
1641 #else
1642 		/*
1643 		 * Again, we don't know if p has a earlier
1644 		 * or later deadline, so let's blindly set a
1645 		 * (maybe not needed) rescheduling point.
1646 		 */
1647 		resched_curr(rq);
1648 #endif /* CONFIG_SMP */
1649 	} else
1650 		switched_to_dl(rq, p);
1651 }
1652 
1653 const struct sched_class dl_sched_class = {
1654 	.next			= &rt_sched_class,
1655 	.enqueue_task		= enqueue_task_dl,
1656 	.dequeue_task		= dequeue_task_dl,
1657 	.yield_task		= yield_task_dl,
1658 
1659 	.check_preempt_curr	= check_preempt_curr_dl,
1660 
1661 	.pick_next_task		= pick_next_task_dl,
1662 	.put_prev_task		= put_prev_task_dl,
1663 
1664 #ifdef CONFIG_SMP
1665 	.select_task_rq		= select_task_rq_dl,
1666 	.set_cpus_allowed       = set_cpus_allowed_dl,
1667 	.rq_online              = rq_online_dl,
1668 	.rq_offline             = rq_offline_dl,
1669 	.post_schedule		= post_schedule_dl,
1670 	.task_woken		= task_woken_dl,
1671 #endif
1672 
1673 	.set_curr_task		= set_curr_task_dl,
1674 	.task_tick		= task_tick_dl,
1675 	.task_fork              = task_fork_dl,
1676 	.task_dead		= task_dead_dl,
1677 
1678 	.prio_changed           = prio_changed_dl,
1679 	.switched_from		= switched_from_dl,
1680 	.switched_to		= switched_to_dl,
1681 };
1682