xref: /openbmc/linux/kernel/sched/deadline.c (revision 821aecd09e5ad2f8d4c3d8195333d272b392f7d3)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Deadline Scheduling Class (SCHED_DEADLINE)
4  *
5  * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS).
6  *
7  * Tasks that periodically executes their instances for less than their
8  * runtime won't miss any of their deadlines.
9  * Tasks that are not periodic or sporadic or that tries to execute more
10  * than their reserved bandwidth will be slowed down (and may potentially
11  * miss some of their deadlines), and won't affect any other task.
12  *
13  * Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>,
14  *                    Juri Lelli <juri.lelli@gmail.com>,
15  *                    Michael Trimarchi <michael@amarulasolutions.com>,
16  *                    Fabio Checconi <fchecconi@gmail.com>
17  */
18 #include "sched.h"
19 #include "pelt.h"
20 
21 static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
22 {
23 	return container_of(dl_se, struct task_struct, dl);
24 }
25 
26 static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq)
27 {
28 	return container_of(dl_rq, struct rq, dl);
29 }
30 
31 static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se)
32 {
33 	struct task_struct *p = dl_task_of(dl_se);
34 	struct rq *rq = task_rq(p);
35 
36 	return &rq->dl;
37 }
38 
39 static inline int on_dl_rq(struct sched_dl_entity *dl_se)
40 {
41 	return !RB_EMPTY_NODE(&dl_se->rb_node);
42 }
43 
44 #ifdef CONFIG_RT_MUTEXES
45 static inline struct sched_dl_entity *pi_of(struct sched_dl_entity *dl_se)
46 {
47 	return dl_se->pi_se;
48 }
49 
50 static inline bool is_dl_boosted(struct sched_dl_entity *dl_se)
51 {
52 	return pi_of(dl_se) != dl_se;
53 }
54 #else
55 static inline struct sched_dl_entity *pi_of(struct sched_dl_entity *dl_se)
56 {
57 	return dl_se;
58 }
59 
60 static inline bool is_dl_boosted(struct sched_dl_entity *dl_se)
61 {
62 	return false;
63 }
64 #endif
65 
66 #ifdef CONFIG_SMP
67 static inline struct dl_bw *dl_bw_of(int i)
68 {
69 	RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
70 			 "sched RCU must be held");
71 	return &cpu_rq(i)->rd->dl_bw;
72 }
73 
74 static inline int dl_bw_cpus(int i)
75 {
76 	struct root_domain *rd = cpu_rq(i)->rd;
77 	int cpus;
78 
79 	RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
80 			 "sched RCU must be held");
81 
82 	if (cpumask_subset(rd->span, cpu_active_mask))
83 		return cpumask_weight(rd->span);
84 
85 	cpus = 0;
86 
87 	for_each_cpu_and(i, rd->span, cpu_active_mask)
88 		cpus++;
89 
90 	return cpus;
91 }
92 
93 static inline unsigned long __dl_bw_capacity(int i)
94 {
95 	struct root_domain *rd = cpu_rq(i)->rd;
96 	unsigned long cap = 0;
97 
98 	RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
99 			 "sched RCU must be held");
100 
101 	for_each_cpu_and(i, rd->span, cpu_active_mask)
102 		cap += capacity_orig_of(i);
103 
104 	return cap;
105 }
106 
107 /*
108  * XXX Fix: If 'rq->rd == def_root_domain' perform AC against capacity
109  * of the CPU the task is running on rather rd's \Sum CPU capacity.
110  */
111 static inline unsigned long dl_bw_capacity(int i)
112 {
113 	if (!static_branch_unlikely(&sched_asym_cpucapacity) &&
114 	    capacity_orig_of(i) == SCHED_CAPACITY_SCALE) {
115 		return dl_bw_cpus(i) << SCHED_CAPACITY_SHIFT;
116 	} else {
117 		return __dl_bw_capacity(i);
118 	}
119 }
120 
121 static inline bool dl_bw_visited(int cpu, u64 gen)
122 {
123 	struct root_domain *rd = cpu_rq(cpu)->rd;
124 
125 	if (rd->visit_gen == gen)
126 		return true;
127 
128 	rd->visit_gen = gen;
129 	return false;
130 }
131 
132 static inline
133 void __dl_update(struct dl_bw *dl_b, s64 bw)
134 {
135 	struct root_domain *rd = container_of(dl_b, struct root_domain, dl_bw);
136 	int i;
137 
138 	RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
139 			 "sched RCU must be held");
140 	for_each_cpu_and(i, rd->span, cpu_active_mask) {
141 		struct rq *rq = cpu_rq(i);
142 
143 		rq->dl.extra_bw += bw;
144 	}
145 }
146 #else
147 static inline struct dl_bw *dl_bw_of(int i)
148 {
149 	return &cpu_rq(i)->dl.dl_bw;
150 }
151 
152 static inline int dl_bw_cpus(int i)
153 {
154 	return 1;
155 }
156 
157 static inline unsigned long dl_bw_capacity(int i)
158 {
159 	return SCHED_CAPACITY_SCALE;
160 }
161 
162 static inline bool dl_bw_visited(int cpu, u64 gen)
163 {
164 	return false;
165 }
166 
167 static inline
168 void __dl_update(struct dl_bw *dl_b, s64 bw)
169 {
170 	struct dl_rq *dl = container_of(dl_b, struct dl_rq, dl_bw);
171 
172 	dl->extra_bw += bw;
173 }
174 #endif
175 
176 static inline
177 void __dl_sub(struct dl_bw *dl_b, u64 tsk_bw, int cpus)
178 {
179 	dl_b->total_bw -= tsk_bw;
180 	__dl_update(dl_b, (s32)tsk_bw / cpus);
181 }
182 
183 static inline
184 void __dl_add(struct dl_bw *dl_b, u64 tsk_bw, int cpus)
185 {
186 	dl_b->total_bw += tsk_bw;
187 	__dl_update(dl_b, -((s32)tsk_bw / cpus));
188 }
189 
190 static inline bool
191 __dl_overflow(struct dl_bw *dl_b, unsigned long cap, u64 old_bw, u64 new_bw)
192 {
193 	return dl_b->bw != -1 &&
194 	       cap_scale(dl_b->bw, cap) < dl_b->total_bw - old_bw + new_bw;
195 }
196 
197 static inline
198 void __add_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
199 {
200 	u64 old = dl_rq->running_bw;
201 
202 	lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
203 	dl_rq->running_bw += dl_bw;
204 	SCHED_WARN_ON(dl_rq->running_bw < old); /* overflow */
205 	SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
206 	/* kick cpufreq (see the comment in kernel/sched/sched.h). */
207 	cpufreq_update_util(rq_of_dl_rq(dl_rq), 0);
208 }
209 
210 static inline
211 void __sub_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
212 {
213 	u64 old = dl_rq->running_bw;
214 
215 	lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
216 	dl_rq->running_bw -= dl_bw;
217 	SCHED_WARN_ON(dl_rq->running_bw > old); /* underflow */
218 	if (dl_rq->running_bw > old)
219 		dl_rq->running_bw = 0;
220 	/* kick cpufreq (see the comment in kernel/sched/sched.h). */
221 	cpufreq_update_util(rq_of_dl_rq(dl_rq), 0);
222 }
223 
224 static inline
225 void __add_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
226 {
227 	u64 old = dl_rq->this_bw;
228 
229 	lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
230 	dl_rq->this_bw += dl_bw;
231 	SCHED_WARN_ON(dl_rq->this_bw < old); /* overflow */
232 }
233 
234 static inline
235 void __sub_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
236 {
237 	u64 old = dl_rq->this_bw;
238 
239 	lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
240 	dl_rq->this_bw -= dl_bw;
241 	SCHED_WARN_ON(dl_rq->this_bw > old); /* underflow */
242 	if (dl_rq->this_bw > old)
243 		dl_rq->this_bw = 0;
244 	SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
245 }
246 
247 static inline
248 void add_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
249 {
250 	if (!dl_entity_is_special(dl_se))
251 		__add_rq_bw(dl_se->dl_bw, dl_rq);
252 }
253 
254 static inline
255 void sub_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
256 {
257 	if (!dl_entity_is_special(dl_se))
258 		__sub_rq_bw(dl_se->dl_bw, dl_rq);
259 }
260 
261 static inline
262 void add_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
263 {
264 	if (!dl_entity_is_special(dl_se))
265 		__add_running_bw(dl_se->dl_bw, dl_rq);
266 }
267 
268 static inline
269 void sub_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
270 {
271 	if (!dl_entity_is_special(dl_se))
272 		__sub_running_bw(dl_se->dl_bw, dl_rq);
273 }
274 
275 static void dl_change_utilization(struct task_struct *p, u64 new_bw)
276 {
277 	struct rq *rq;
278 
279 	BUG_ON(p->dl.flags & SCHED_FLAG_SUGOV);
280 
281 	if (task_on_rq_queued(p))
282 		return;
283 
284 	rq = task_rq(p);
285 	if (p->dl.dl_non_contending) {
286 		sub_running_bw(&p->dl, &rq->dl);
287 		p->dl.dl_non_contending = 0;
288 		/*
289 		 * If the timer handler is currently running and the
290 		 * timer cannot be canceled, inactive_task_timer()
291 		 * will see that dl_not_contending is not set, and
292 		 * will not touch the rq's active utilization,
293 		 * so we are still safe.
294 		 */
295 		if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
296 			put_task_struct(p);
297 	}
298 	__sub_rq_bw(p->dl.dl_bw, &rq->dl);
299 	__add_rq_bw(new_bw, &rq->dl);
300 }
301 
302 /*
303  * The utilization of a task cannot be immediately removed from
304  * the rq active utilization (running_bw) when the task blocks.
305  * Instead, we have to wait for the so called "0-lag time".
306  *
307  * If a task blocks before the "0-lag time", a timer (the inactive
308  * timer) is armed, and running_bw is decreased when the timer
309  * fires.
310  *
311  * If the task wakes up again before the inactive timer fires,
312  * the timer is canceled, whereas if the task wakes up after the
313  * inactive timer fired (and running_bw has been decreased) the
314  * task's utilization has to be added to running_bw again.
315  * A flag in the deadline scheduling entity (dl_non_contending)
316  * is used to avoid race conditions between the inactive timer handler
317  * and task wakeups.
318  *
319  * The following diagram shows how running_bw is updated. A task is
320  * "ACTIVE" when its utilization contributes to running_bw; an
321  * "ACTIVE contending" task is in the TASK_RUNNING state, while an
322  * "ACTIVE non contending" task is a blocked task for which the "0-lag time"
323  * has not passed yet. An "INACTIVE" task is a task for which the "0-lag"
324  * time already passed, which does not contribute to running_bw anymore.
325  *                              +------------------+
326  *             wakeup           |    ACTIVE        |
327  *          +------------------>+   contending     |
328  *          | add_running_bw    |                  |
329  *          |                   +----+------+------+
330  *          |                        |      ^
331  *          |                dequeue |      |
332  * +--------+-------+                |      |
333  * |                |   t >= 0-lag   |      | wakeup
334  * |    INACTIVE    |<---------------+      |
335  * |                | sub_running_bw |      |
336  * +--------+-------+                |      |
337  *          ^                        |      |
338  *          |              t < 0-lag |      |
339  *          |                        |      |
340  *          |                        V      |
341  *          |                   +----+------+------+
342  *          | sub_running_bw    |    ACTIVE        |
343  *          +-------------------+                  |
344  *            inactive timer    |  non contending  |
345  *            fired             +------------------+
346  *
347  * The task_non_contending() function is invoked when a task
348  * blocks, and checks if the 0-lag time already passed or
349  * not (in the first case, it directly updates running_bw;
350  * in the second case, it arms the inactive timer).
351  *
352  * The task_contending() function is invoked when a task wakes
353  * up, and checks if the task is still in the "ACTIVE non contending"
354  * state or not (in the second case, it updates running_bw).
355  */
356 static void task_non_contending(struct task_struct *p)
357 {
358 	struct sched_dl_entity *dl_se = &p->dl;
359 	struct hrtimer *timer = &dl_se->inactive_timer;
360 	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
361 	struct rq *rq = rq_of_dl_rq(dl_rq);
362 	s64 zerolag_time;
363 
364 	/*
365 	 * If this is a non-deadline task that has been boosted,
366 	 * do nothing
367 	 */
368 	if (dl_se->dl_runtime == 0)
369 		return;
370 
371 	if (dl_entity_is_special(dl_se))
372 		return;
373 
374 	WARN_ON(dl_se->dl_non_contending);
375 
376 	zerolag_time = dl_se->deadline -
377 		 div64_long((dl_se->runtime * dl_se->dl_period),
378 			dl_se->dl_runtime);
379 
380 	/*
381 	 * Using relative times instead of the absolute "0-lag time"
382 	 * allows to simplify the code
383 	 */
384 	zerolag_time -= rq_clock(rq);
385 
386 	/*
387 	 * If the "0-lag time" already passed, decrease the active
388 	 * utilization now, instead of starting a timer
389 	 */
390 	if ((zerolag_time < 0) || hrtimer_active(&dl_se->inactive_timer)) {
391 		if (dl_task(p))
392 			sub_running_bw(dl_se, dl_rq);
393 		if (!dl_task(p) || READ_ONCE(p->__state) == TASK_DEAD) {
394 			struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
395 
396 			if (READ_ONCE(p->__state) == TASK_DEAD)
397 				sub_rq_bw(&p->dl, &rq->dl);
398 			raw_spin_lock(&dl_b->lock);
399 			__dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
400 			__dl_clear_params(p);
401 			raw_spin_unlock(&dl_b->lock);
402 		}
403 
404 		return;
405 	}
406 
407 	dl_se->dl_non_contending = 1;
408 	get_task_struct(p);
409 	hrtimer_start(timer, ns_to_ktime(zerolag_time), HRTIMER_MODE_REL_HARD);
410 }
411 
412 static void task_contending(struct sched_dl_entity *dl_se, int flags)
413 {
414 	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
415 
416 	/*
417 	 * If this is a non-deadline task that has been boosted,
418 	 * do nothing
419 	 */
420 	if (dl_se->dl_runtime == 0)
421 		return;
422 
423 	if (flags & ENQUEUE_MIGRATED)
424 		add_rq_bw(dl_se, dl_rq);
425 
426 	if (dl_se->dl_non_contending) {
427 		dl_se->dl_non_contending = 0;
428 		/*
429 		 * If the timer handler is currently running and the
430 		 * timer cannot be canceled, inactive_task_timer()
431 		 * will see that dl_not_contending is not set, and
432 		 * will not touch the rq's active utilization,
433 		 * so we are still safe.
434 		 */
435 		if (hrtimer_try_to_cancel(&dl_se->inactive_timer) == 1)
436 			put_task_struct(dl_task_of(dl_se));
437 	} else {
438 		/*
439 		 * Since "dl_non_contending" is not set, the
440 		 * task's utilization has already been removed from
441 		 * active utilization (either when the task blocked,
442 		 * when the "inactive timer" fired).
443 		 * So, add it back.
444 		 */
445 		add_running_bw(dl_se, dl_rq);
446 	}
447 }
448 
449 static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq)
450 {
451 	struct sched_dl_entity *dl_se = &p->dl;
452 
453 	return rb_first_cached(&dl_rq->root) == &dl_se->rb_node;
454 }
455 
456 static void init_dl_rq_bw_ratio(struct dl_rq *dl_rq);
457 
458 void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime)
459 {
460 	raw_spin_lock_init(&dl_b->dl_runtime_lock);
461 	dl_b->dl_period = period;
462 	dl_b->dl_runtime = runtime;
463 }
464 
465 void init_dl_bw(struct dl_bw *dl_b)
466 {
467 	raw_spin_lock_init(&dl_b->lock);
468 	if (global_rt_runtime() == RUNTIME_INF)
469 		dl_b->bw = -1;
470 	else
471 		dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime());
472 	dl_b->total_bw = 0;
473 }
474 
475 void init_dl_rq(struct dl_rq *dl_rq)
476 {
477 	dl_rq->root = RB_ROOT_CACHED;
478 
479 #ifdef CONFIG_SMP
480 	/* zero means no -deadline tasks */
481 	dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0;
482 
483 	dl_rq->dl_nr_migratory = 0;
484 	dl_rq->overloaded = 0;
485 	dl_rq->pushable_dl_tasks_root = RB_ROOT_CACHED;
486 #else
487 	init_dl_bw(&dl_rq->dl_bw);
488 #endif
489 
490 	dl_rq->running_bw = 0;
491 	dl_rq->this_bw = 0;
492 	init_dl_rq_bw_ratio(dl_rq);
493 }
494 
495 #ifdef CONFIG_SMP
496 
497 static inline int dl_overloaded(struct rq *rq)
498 {
499 	return atomic_read(&rq->rd->dlo_count);
500 }
501 
502 static inline void dl_set_overload(struct rq *rq)
503 {
504 	if (!rq->online)
505 		return;
506 
507 	cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask);
508 	/*
509 	 * Must be visible before the overload count is
510 	 * set (as in sched_rt.c).
511 	 *
512 	 * Matched by the barrier in pull_dl_task().
513 	 */
514 	smp_wmb();
515 	atomic_inc(&rq->rd->dlo_count);
516 }
517 
518 static inline void dl_clear_overload(struct rq *rq)
519 {
520 	if (!rq->online)
521 		return;
522 
523 	atomic_dec(&rq->rd->dlo_count);
524 	cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask);
525 }
526 
527 static void update_dl_migration(struct dl_rq *dl_rq)
528 {
529 	if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) {
530 		if (!dl_rq->overloaded) {
531 			dl_set_overload(rq_of_dl_rq(dl_rq));
532 			dl_rq->overloaded = 1;
533 		}
534 	} else if (dl_rq->overloaded) {
535 		dl_clear_overload(rq_of_dl_rq(dl_rq));
536 		dl_rq->overloaded = 0;
537 	}
538 }
539 
540 static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
541 {
542 	struct task_struct *p = dl_task_of(dl_se);
543 
544 	if (p->nr_cpus_allowed > 1)
545 		dl_rq->dl_nr_migratory++;
546 
547 	update_dl_migration(dl_rq);
548 }
549 
550 static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
551 {
552 	struct task_struct *p = dl_task_of(dl_se);
553 
554 	if (p->nr_cpus_allowed > 1)
555 		dl_rq->dl_nr_migratory--;
556 
557 	update_dl_migration(dl_rq);
558 }
559 
560 #define __node_2_pdl(node) \
561 	rb_entry((node), struct task_struct, pushable_dl_tasks)
562 
563 static inline bool __pushable_less(struct rb_node *a, const struct rb_node *b)
564 {
565 	return dl_entity_preempt(&__node_2_pdl(a)->dl, &__node_2_pdl(b)->dl);
566 }
567 
568 /*
569  * The list of pushable -deadline task is not a plist, like in
570  * sched_rt.c, it is an rb-tree with tasks ordered by deadline.
571  */
572 static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
573 {
574 	struct rb_node *leftmost;
575 
576 	BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks));
577 
578 	leftmost = rb_add_cached(&p->pushable_dl_tasks,
579 				 &rq->dl.pushable_dl_tasks_root,
580 				 __pushable_less);
581 	if (leftmost)
582 		rq->dl.earliest_dl.next = p->dl.deadline;
583 }
584 
585 static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
586 {
587 	struct dl_rq *dl_rq = &rq->dl;
588 	struct rb_root_cached *root = &dl_rq->pushable_dl_tasks_root;
589 	struct rb_node *leftmost;
590 
591 	if (RB_EMPTY_NODE(&p->pushable_dl_tasks))
592 		return;
593 
594 	leftmost = rb_erase_cached(&p->pushable_dl_tasks, root);
595 	if (leftmost)
596 		dl_rq->earliest_dl.next = __node_2_pdl(leftmost)->dl.deadline;
597 
598 	RB_CLEAR_NODE(&p->pushable_dl_tasks);
599 }
600 
601 static inline int has_pushable_dl_tasks(struct rq *rq)
602 {
603 	return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root.rb_root);
604 }
605 
606 static int push_dl_task(struct rq *rq);
607 
608 static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
609 {
610 	return rq->online && dl_task(prev);
611 }
612 
613 static DEFINE_PER_CPU(struct callback_head, dl_push_head);
614 static DEFINE_PER_CPU(struct callback_head, dl_pull_head);
615 
616 static void push_dl_tasks(struct rq *);
617 static void pull_dl_task(struct rq *);
618 
619 static inline void deadline_queue_push_tasks(struct rq *rq)
620 {
621 	if (!has_pushable_dl_tasks(rq))
622 		return;
623 
624 	queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks);
625 }
626 
627 static inline void deadline_queue_pull_task(struct rq *rq)
628 {
629 	queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task);
630 }
631 
632 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq);
633 
634 static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p)
635 {
636 	struct rq *later_rq = NULL;
637 	struct dl_bw *dl_b;
638 
639 	later_rq = find_lock_later_rq(p, rq);
640 	if (!later_rq) {
641 		int cpu;
642 
643 		/*
644 		 * If we cannot preempt any rq, fall back to pick any
645 		 * online CPU:
646 		 */
647 		cpu = cpumask_any_and(cpu_active_mask, p->cpus_ptr);
648 		if (cpu >= nr_cpu_ids) {
649 			/*
650 			 * Failed to find any suitable CPU.
651 			 * The task will never come back!
652 			 */
653 			BUG_ON(dl_bandwidth_enabled());
654 
655 			/*
656 			 * If admission control is disabled we
657 			 * try a little harder to let the task
658 			 * run.
659 			 */
660 			cpu = cpumask_any(cpu_active_mask);
661 		}
662 		later_rq = cpu_rq(cpu);
663 		double_lock_balance(rq, later_rq);
664 	}
665 
666 	if (p->dl.dl_non_contending || p->dl.dl_throttled) {
667 		/*
668 		 * Inactive timer is armed (or callback is running, but
669 		 * waiting for us to release rq locks). In any case, when it
670 		 * will fire (or continue), it will see running_bw of this
671 		 * task migrated to later_rq (and correctly handle it).
672 		 */
673 		sub_running_bw(&p->dl, &rq->dl);
674 		sub_rq_bw(&p->dl, &rq->dl);
675 
676 		add_rq_bw(&p->dl, &later_rq->dl);
677 		add_running_bw(&p->dl, &later_rq->dl);
678 	} else {
679 		sub_rq_bw(&p->dl, &rq->dl);
680 		add_rq_bw(&p->dl, &later_rq->dl);
681 	}
682 
683 	/*
684 	 * And we finally need to fixup root_domain(s) bandwidth accounting,
685 	 * since p is still hanging out in the old (now moved to default) root
686 	 * domain.
687 	 */
688 	dl_b = &rq->rd->dl_bw;
689 	raw_spin_lock(&dl_b->lock);
690 	__dl_sub(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
691 	raw_spin_unlock(&dl_b->lock);
692 
693 	dl_b = &later_rq->rd->dl_bw;
694 	raw_spin_lock(&dl_b->lock);
695 	__dl_add(dl_b, p->dl.dl_bw, cpumask_weight(later_rq->rd->span));
696 	raw_spin_unlock(&dl_b->lock);
697 
698 	set_task_cpu(p, later_rq->cpu);
699 	double_unlock_balance(later_rq, rq);
700 
701 	return later_rq;
702 }
703 
704 #else
705 
706 static inline
707 void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
708 {
709 }
710 
711 static inline
712 void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
713 {
714 }
715 
716 static inline
717 void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
718 {
719 }
720 
721 static inline
722 void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
723 {
724 }
725 
726 static inline void deadline_queue_push_tasks(struct rq *rq)
727 {
728 }
729 
730 static inline void deadline_queue_pull_task(struct rq *rq)
731 {
732 }
733 #endif /* CONFIG_SMP */
734 
735 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
736 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags);
737 static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p, int flags);
738 
739 /*
740  * We are being explicitly informed that a new instance is starting,
741  * and this means that:
742  *  - the absolute deadline of the entity has to be placed at
743  *    current time + relative deadline;
744  *  - the runtime of the entity has to be set to the maximum value.
745  *
746  * The capability of specifying such event is useful whenever a -deadline
747  * entity wants to (try to!) synchronize its behaviour with the scheduler's
748  * one, and to (try to!) reconcile itself with its own scheduling
749  * parameters.
750  */
751 static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se)
752 {
753 	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
754 	struct rq *rq = rq_of_dl_rq(dl_rq);
755 
756 	WARN_ON(is_dl_boosted(dl_se));
757 	WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline));
758 
759 	/*
760 	 * We are racing with the deadline timer. So, do nothing because
761 	 * the deadline timer handler will take care of properly recharging
762 	 * the runtime and postponing the deadline
763 	 */
764 	if (dl_se->dl_throttled)
765 		return;
766 
767 	/*
768 	 * We use the regular wall clock time to set deadlines in the
769 	 * future; in fact, we must consider execution overheads (time
770 	 * spent on hardirq context, etc.).
771 	 */
772 	dl_se->deadline = rq_clock(rq) + dl_se->dl_deadline;
773 	dl_se->runtime = dl_se->dl_runtime;
774 }
775 
776 /*
777  * Pure Earliest Deadline First (EDF) scheduling does not deal with the
778  * possibility of a entity lasting more than what it declared, and thus
779  * exhausting its runtime.
780  *
781  * Here we are interested in making runtime overrun possible, but we do
782  * not want a entity which is misbehaving to affect the scheduling of all
783  * other entities.
784  * Therefore, a budgeting strategy called Constant Bandwidth Server (CBS)
785  * is used, in order to confine each entity within its own bandwidth.
786  *
787  * This function deals exactly with that, and ensures that when the runtime
788  * of a entity is replenished, its deadline is also postponed. That ensures
789  * the overrunning entity can't interfere with other entity in the system and
790  * can't make them miss their deadlines. Reasons why this kind of overruns
791  * could happen are, typically, a entity voluntarily trying to overcome its
792  * runtime, or it just underestimated it during sched_setattr().
793  */
794 static void replenish_dl_entity(struct sched_dl_entity *dl_se)
795 {
796 	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
797 	struct rq *rq = rq_of_dl_rq(dl_rq);
798 
799 	BUG_ON(pi_of(dl_se)->dl_runtime <= 0);
800 
801 	/*
802 	 * This could be the case for a !-dl task that is boosted.
803 	 * Just go with full inherited parameters.
804 	 */
805 	if (dl_se->dl_deadline == 0) {
806 		dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline;
807 		dl_se->runtime = pi_of(dl_se)->dl_runtime;
808 	}
809 
810 	if (dl_se->dl_yielded && dl_se->runtime > 0)
811 		dl_se->runtime = 0;
812 
813 	/*
814 	 * We keep moving the deadline away until we get some
815 	 * available runtime for the entity. This ensures correct
816 	 * handling of situations where the runtime overrun is
817 	 * arbitrary large.
818 	 */
819 	while (dl_se->runtime <= 0) {
820 		dl_se->deadline += pi_of(dl_se)->dl_period;
821 		dl_se->runtime += pi_of(dl_se)->dl_runtime;
822 	}
823 
824 	/*
825 	 * At this point, the deadline really should be "in
826 	 * the future" with respect to rq->clock. If it's
827 	 * not, we are, for some reason, lagging too much!
828 	 * Anyway, after having warn userspace abut that,
829 	 * we still try to keep the things running by
830 	 * resetting the deadline and the budget of the
831 	 * entity.
832 	 */
833 	if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
834 		printk_deferred_once("sched: DL replenish lagged too much\n");
835 		dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline;
836 		dl_se->runtime = pi_of(dl_se)->dl_runtime;
837 	}
838 
839 	if (dl_se->dl_yielded)
840 		dl_se->dl_yielded = 0;
841 	if (dl_se->dl_throttled)
842 		dl_se->dl_throttled = 0;
843 }
844 
845 /*
846  * Here we check if --at time t-- an entity (which is probably being
847  * [re]activated or, in general, enqueued) can use its remaining runtime
848  * and its current deadline _without_ exceeding the bandwidth it is
849  * assigned (function returns true if it can't). We are in fact applying
850  * one of the CBS rules: when a task wakes up, if the residual runtime
851  * over residual deadline fits within the allocated bandwidth, then we
852  * can keep the current (absolute) deadline and residual budget without
853  * disrupting the schedulability of the system. Otherwise, we should
854  * refill the runtime and set the deadline a period in the future,
855  * because keeping the current (absolute) deadline of the task would
856  * result in breaking guarantees promised to other tasks (refer to
857  * Documentation/scheduler/sched-deadline.rst for more information).
858  *
859  * This function returns true if:
860  *
861  *   runtime / (deadline - t) > dl_runtime / dl_deadline ,
862  *
863  * IOW we can't recycle current parameters.
864  *
865  * Notice that the bandwidth check is done against the deadline. For
866  * task with deadline equal to period this is the same of using
867  * dl_period instead of dl_deadline in the equation above.
868  */
869 static bool dl_entity_overflow(struct sched_dl_entity *dl_se, u64 t)
870 {
871 	u64 left, right;
872 
873 	/*
874 	 * left and right are the two sides of the equation above,
875 	 * after a bit of shuffling to use multiplications instead
876 	 * of divisions.
877 	 *
878 	 * Note that none of the time values involved in the two
879 	 * multiplications are absolute: dl_deadline and dl_runtime
880 	 * are the relative deadline and the maximum runtime of each
881 	 * instance, runtime is the runtime left for the last instance
882 	 * and (deadline - t), since t is rq->clock, is the time left
883 	 * to the (absolute) deadline. Even if overflowing the u64 type
884 	 * is very unlikely to occur in both cases, here we scale down
885 	 * as we want to avoid that risk at all. Scaling down by 10
886 	 * means that we reduce granularity to 1us. We are fine with it,
887 	 * since this is only a true/false check and, anyway, thinking
888 	 * of anything below microseconds resolution is actually fiction
889 	 * (but still we want to give the user that illusion >;).
890 	 */
891 	left = (pi_of(dl_se)->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
892 	right = ((dl_se->deadline - t) >> DL_SCALE) *
893 		(pi_of(dl_se)->dl_runtime >> DL_SCALE);
894 
895 	return dl_time_before(right, left);
896 }
897 
898 /*
899  * Revised wakeup rule [1]: For self-suspending tasks, rather then
900  * re-initializing task's runtime and deadline, the revised wakeup
901  * rule adjusts the task's runtime to avoid the task to overrun its
902  * density.
903  *
904  * Reasoning: a task may overrun the density if:
905  *    runtime / (deadline - t) > dl_runtime / dl_deadline
906  *
907  * Therefore, runtime can be adjusted to:
908  *     runtime = (dl_runtime / dl_deadline) * (deadline - t)
909  *
910  * In such way that runtime will be equal to the maximum density
911  * the task can use without breaking any rule.
912  *
913  * [1] Luca Abeni, Giuseppe Lipari, and Juri Lelli. 2015. Constant
914  * bandwidth server revisited. SIGBED Rev. 11, 4 (January 2015), 19-24.
915  */
916 static void
917 update_dl_revised_wakeup(struct sched_dl_entity *dl_se, struct rq *rq)
918 {
919 	u64 laxity = dl_se->deadline - rq_clock(rq);
920 
921 	/*
922 	 * If the task has deadline < period, and the deadline is in the past,
923 	 * it should already be throttled before this check.
924 	 *
925 	 * See update_dl_entity() comments for further details.
926 	 */
927 	WARN_ON(dl_time_before(dl_se->deadline, rq_clock(rq)));
928 
929 	dl_se->runtime = (dl_se->dl_density * laxity) >> BW_SHIFT;
930 }
931 
932 /*
933  * Regarding the deadline, a task with implicit deadline has a relative
934  * deadline == relative period. A task with constrained deadline has a
935  * relative deadline <= relative period.
936  *
937  * We support constrained deadline tasks. However, there are some restrictions
938  * applied only for tasks which do not have an implicit deadline. See
939  * update_dl_entity() to know more about such restrictions.
940  *
941  * The dl_is_implicit() returns true if the task has an implicit deadline.
942  */
943 static inline bool dl_is_implicit(struct sched_dl_entity *dl_se)
944 {
945 	return dl_se->dl_deadline == dl_se->dl_period;
946 }
947 
948 /*
949  * When a deadline entity is placed in the runqueue, its runtime and deadline
950  * might need to be updated. This is done by a CBS wake up rule. There are two
951  * different rules: 1) the original CBS; and 2) the Revisited CBS.
952  *
953  * When the task is starting a new period, the Original CBS is used. In this
954  * case, the runtime is replenished and a new absolute deadline is set.
955  *
956  * When a task is queued before the begin of the next period, using the
957  * remaining runtime and deadline could make the entity to overflow, see
958  * dl_entity_overflow() to find more about runtime overflow. When such case
959  * is detected, the runtime and deadline need to be updated.
960  *
961  * If the task has an implicit deadline, i.e., deadline == period, the Original
962  * CBS is applied. the runtime is replenished and a new absolute deadline is
963  * set, as in the previous cases.
964  *
965  * However, the Original CBS does not work properly for tasks with
966  * deadline < period, which are said to have a constrained deadline. By
967  * applying the Original CBS, a constrained deadline task would be able to run
968  * runtime/deadline in a period. With deadline < period, the task would
969  * overrun the runtime/period allowed bandwidth, breaking the admission test.
970  *
971  * In order to prevent this misbehave, the Revisited CBS is used for
972  * constrained deadline tasks when a runtime overflow is detected. In the
973  * Revisited CBS, rather than replenishing & setting a new absolute deadline,
974  * the remaining runtime of the task is reduced to avoid runtime overflow.
975  * Please refer to the comments update_dl_revised_wakeup() function to find
976  * more about the Revised CBS rule.
977  */
978 static void update_dl_entity(struct sched_dl_entity *dl_se)
979 {
980 	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
981 	struct rq *rq = rq_of_dl_rq(dl_rq);
982 
983 	if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
984 	    dl_entity_overflow(dl_se, rq_clock(rq))) {
985 
986 		if (unlikely(!dl_is_implicit(dl_se) &&
987 			     !dl_time_before(dl_se->deadline, rq_clock(rq)) &&
988 			     !is_dl_boosted(dl_se))) {
989 			update_dl_revised_wakeup(dl_se, rq);
990 			return;
991 		}
992 
993 		dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline;
994 		dl_se->runtime = pi_of(dl_se)->dl_runtime;
995 	}
996 }
997 
998 static inline u64 dl_next_period(struct sched_dl_entity *dl_se)
999 {
1000 	return dl_se->deadline - dl_se->dl_deadline + dl_se->dl_period;
1001 }
1002 
1003 /*
1004  * If the entity depleted all its runtime, and if we want it to sleep
1005  * while waiting for some new execution time to become available, we
1006  * set the bandwidth replenishment timer to the replenishment instant
1007  * and try to activate it.
1008  *
1009  * Notice that it is important for the caller to know if the timer
1010  * actually started or not (i.e., the replenishment instant is in
1011  * the future or in the past).
1012  */
1013 static int start_dl_timer(struct task_struct *p)
1014 {
1015 	struct sched_dl_entity *dl_se = &p->dl;
1016 	struct hrtimer *timer = &dl_se->dl_timer;
1017 	struct rq *rq = task_rq(p);
1018 	ktime_t now, act;
1019 	s64 delta;
1020 
1021 	lockdep_assert_rq_held(rq);
1022 
1023 	/*
1024 	 * We want the timer to fire at the deadline, but considering
1025 	 * that it is actually coming from rq->clock and not from
1026 	 * hrtimer's time base reading.
1027 	 */
1028 	act = ns_to_ktime(dl_next_period(dl_se));
1029 	now = hrtimer_cb_get_time(timer);
1030 	delta = ktime_to_ns(now) - rq_clock(rq);
1031 	act = ktime_add_ns(act, delta);
1032 
1033 	/*
1034 	 * If the expiry time already passed, e.g., because the value
1035 	 * chosen as the deadline is too small, don't even try to
1036 	 * start the timer in the past!
1037 	 */
1038 	if (ktime_us_delta(act, now) < 0)
1039 		return 0;
1040 
1041 	/*
1042 	 * !enqueued will guarantee another callback; even if one is already in
1043 	 * progress. This ensures a balanced {get,put}_task_struct().
1044 	 *
1045 	 * The race against __run_timer() clearing the enqueued state is
1046 	 * harmless because we're holding task_rq()->lock, therefore the timer
1047 	 * expiring after we've done the check will wait on its task_rq_lock()
1048 	 * and observe our state.
1049 	 */
1050 	if (!hrtimer_is_queued(timer)) {
1051 		get_task_struct(p);
1052 		hrtimer_start(timer, act, HRTIMER_MODE_ABS_HARD);
1053 	}
1054 
1055 	return 1;
1056 }
1057 
1058 /*
1059  * This is the bandwidth enforcement timer callback. If here, we know
1060  * a task is not on its dl_rq, since the fact that the timer was running
1061  * means the task is throttled and needs a runtime replenishment.
1062  *
1063  * However, what we actually do depends on the fact the task is active,
1064  * (it is on its rq) or has been removed from there by a call to
1065  * dequeue_task_dl(). In the former case we must issue the runtime
1066  * replenishment and add the task back to the dl_rq; in the latter, we just
1067  * do nothing but clearing dl_throttled, so that runtime and deadline
1068  * updating (and the queueing back to dl_rq) will be done by the
1069  * next call to enqueue_task_dl().
1070  */
1071 static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
1072 {
1073 	struct sched_dl_entity *dl_se = container_of(timer,
1074 						     struct sched_dl_entity,
1075 						     dl_timer);
1076 	struct task_struct *p = dl_task_of(dl_se);
1077 	struct rq_flags rf;
1078 	struct rq *rq;
1079 
1080 	rq = task_rq_lock(p, &rf);
1081 
1082 	/*
1083 	 * The task might have changed its scheduling policy to something
1084 	 * different than SCHED_DEADLINE (through switched_from_dl()).
1085 	 */
1086 	if (!dl_task(p))
1087 		goto unlock;
1088 
1089 	/*
1090 	 * The task might have been boosted by someone else and might be in the
1091 	 * boosting/deboosting path, its not throttled.
1092 	 */
1093 	if (is_dl_boosted(dl_se))
1094 		goto unlock;
1095 
1096 	/*
1097 	 * Spurious timer due to start_dl_timer() race; or we already received
1098 	 * a replenishment from rt_mutex_setprio().
1099 	 */
1100 	if (!dl_se->dl_throttled)
1101 		goto unlock;
1102 
1103 	sched_clock_tick();
1104 	update_rq_clock(rq);
1105 
1106 	/*
1107 	 * If the throttle happened during sched-out; like:
1108 	 *
1109 	 *   schedule()
1110 	 *     deactivate_task()
1111 	 *       dequeue_task_dl()
1112 	 *         update_curr_dl()
1113 	 *           start_dl_timer()
1114 	 *         __dequeue_task_dl()
1115 	 *     prev->on_rq = 0;
1116 	 *
1117 	 * We can be both throttled and !queued. Replenish the counter
1118 	 * but do not enqueue -- wait for our wakeup to do that.
1119 	 */
1120 	if (!task_on_rq_queued(p)) {
1121 		replenish_dl_entity(dl_se);
1122 		goto unlock;
1123 	}
1124 
1125 #ifdef CONFIG_SMP
1126 	if (unlikely(!rq->online)) {
1127 		/*
1128 		 * If the runqueue is no longer available, migrate the
1129 		 * task elsewhere. This necessarily changes rq.
1130 		 */
1131 		lockdep_unpin_lock(__rq_lockp(rq), rf.cookie);
1132 		rq = dl_task_offline_migration(rq, p);
1133 		rf.cookie = lockdep_pin_lock(__rq_lockp(rq));
1134 		update_rq_clock(rq);
1135 
1136 		/*
1137 		 * Now that the task has been migrated to the new RQ and we
1138 		 * have that locked, proceed as normal and enqueue the task
1139 		 * there.
1140 		 */
1141 	}
1142 #endif
1143 
1144 	enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
1145 	if (dl_task(rq->curr))
1146 		check_preempt_curr_dl(rq, p, 0);
1147 	else
1148 		resched_curr(rq);
1149 
1150 #ifdef CONFIG_SMP
1151 	/*
1152 	 * Queueing this task back might have overloaded rq, check if we need
1153 	 * to kick someone away.
1154 	 */
1155 	if (has_pushable_dl_tasks(rq)) {
1156 		/*
1157 		 * Nothing relies on rq->lock after this, so its safe to drop
1158 		 * rq->lock.
1159 		 */
1160 		rq_unpin_lock(rq, &rf);
1161 		push_dl_task(rq);
1162 		rq_repin_lock(rq, &rf);
1163 	}
1164 #endif
1165 
1166 unlock:
1167 	task_rq_unlock(rq, p, &rf);
1168 
1169 	/*
1170 	 * This can free the task_struct, including this hrtimer, do not touch
1171 	 * anything related to that after this.
1172 	 */
1173 	put_task_struct(p);
1174 
1175 	return HRTIMER_NORESTART;
1176 }
1177 
1178 void init_dl_task_timer(struct sched_dl_entity *dl_se)
1179 {
1180 	struct hrtimer *timer = &dl_se->dl_timer;
1181 
1182 	hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
1183 	timer->function = dl_task_timer;
1184 }
1185 
1186 /*
1187  * During the activation, CBS checks if it can reuse the current task's
1188  * runtime and period. If the deadline of the task is in the past, CBS
1189  * cannot use the runtime, and so it replenishes the task. This rule
1190  * works fine for implicit deadline tasks (deadline == period), and the
1191  * CBS was designed for implicit deadline tasks. However, a task with
1192  * constrained deadline (deadline < period) might be awakened after the
1193  * deadline, but before the next period. In this case, replenishing the
1194  * task would allow it to run for runtime / deadline. As in this case
1195  * deadline < period, CBS enables a task to run for more than the
1196  * runtime / period. In a very loaded system, this can cause a domino
1197  * effect, making other tasks miss their deadlines.
1198  *
1199  * To avoid this problem, in the activation of a constrained deadline
1200  * task after the deadline but before the next period, throttle the
1201  * task and set the replenishing timer to the begin of the next period,
1202  * unless it is boosted.
1203  */
1204 static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se)
1205 {
1206 	struct task_struct *p = dl_task_of(dl_se);
1207 	struct rq *rq = rq_of_dl_rq(dl_rq_of_se(dl_se));
1208 
1209 	if (dl_time_before(dl_se->deadline, rq_clock(rq)) &&
1210 	    dl_time_before(rq_clock(rq), dl_next_period(dl_se))) {
1211 		if (unlikely(is_dl_boosted(dl_se) || !start_dl_timer(p)))
1212 			return;
1213 		dl_se->dl_throttled = 1;
1214 		if (dl_se->runtime > 0)
1215 			dl_se->runtime = 0;
1216 	}
1217 }
1218 
1219 static
1220 int dl_runtime_exceeded(struct sched_dl_entity *dl_se)
1221 {
1222 	return (dl_se->runtime <= 0);
1223 }
1224 
1225 extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
1226 
1227 /*
1228  * This function implements the GRUB accounting rule:
1229  * according to the GRUB reclaiming algorithm, the runtime is
1230  * not decreased as "dq = -dt", but as
1231  * "dq = -max{u / Umax, (1 - Uinact - Uextra)} dt",
1232  * where u is the utilization of the task, Umax is the maximum reclaimable
1233  * utilization, Uinact is the (per-runqueue) inactive utilization, computed
1234  * as the difference between the "total runqueue utilization" and the
1235  * runqueue active utilization, and Uextra is the (per runqueue) extra
1236  * reclaimable utilization.
1237  * Since rq->dl.running_bw and rq->dl.this_bw contain utilizations
1238  * multiplied by 2^BW_SHIFT, the result has to be shifted right by
1239  * BW_SHIFT.
1240  * Since rq->dl.bw_ratio contains 1 / Umax multiplied by 2^RATIO_SHIFT,
1241  * dl_bw is multiped by rq->dl.bw_ratio and shifted right by RATIO_SHIFT.
1242  * Since delta is a 64 bit variable, to have an overflow its value
1243  * should be larger than 2^(64 - 20 - 8), which is more than 64 seconds.
1244  * So, overflow is not an issue here.
1245  */
1246 static u64 grub_reclaim(u64 delta, struct rq *rq, struct sched_dl_entity *dl_se)
1247 {
1248 	u64 u_inact = rq->dl.this_bw - rq->dl.running_bw; /* Utot - Uact */
1249 	u64 u_act;
1250 	u64 u_act_min = (dl_se->dl_bw * rq->dl.bw_ratio) >> RATIO_SHIFT;
1251 
1252 	/*
1253 	 * Instead of computing max{u * bw_ratio, (1 - u_inact - u_extra)},
1254 	 * we compare u_inact + rq->dl.extra_bw with
1255 	 * 1 - (u * rq->dl.bw_ratio >> RATIO_SHIFT), because
1256 	 * u_inact + rq->dl.extra_bw can be larger than
1257 	 * 1 * (so, 1 - u_inact - rq->dl.extra_bw would be negative
1258 	 * leading to wrong results)
1259 	 */
1260 	if (u_inact + rq->dl.extra_bw > BW_UNIT - u_act_min)
1261 		u_act = u_act_min;
1262 	else
1263 		u_act = BW_UNIT - u_inact - rq->dl.extra_bw;
1264 
1265 	return (delta * u_act) >> BW_SHIFT;
1266 }
1267 
1268 /*
1269  * Update the current task's runtime statistics (provided it is still
1270  * a -deadline task and has not been removed from the dl_rq).
1271  */
1272 static void update_curr_dl(struct rq *rq)
1273 {
1274 	struct task_struct *curr = rq->curr;
1275 	struct sched_dl_entity *dl_se = &curr->dl;
1276 	u64 delta_exec, scaled_delta_exec;
1277 	int cpu = cpu_of(rq);
1278 	u64 now;
1279 
1280 	if (!dl_task(curr) || !on_dl_rq(dl_se))
1281 		return;
1282 
1283 	/*
1284 	 * Consumed budget is computed considering the time as
1285 	 * observed by schedulable tasks (excluding time spent
1286 	 * in hardirq context, etc.). Deadlines are instead
1287 	 * computed using hard walltime. This seems to be the more
1288 	 * natural solution, but the full ramifications of this
1289 	 * approach need further study.
1290 	 */
1291 	now = rq_clock_task(rq);
1292 	delta_exec = now - curr->se.exec_start;
1293 	if (unlikely((s64)delta_exec <= 0)) {
1294 		if (unlikely(dl_se->dl_yielded))
1295 			goto throttle;
1296 		return;
1297 	}
1298 
1299 	schedstat_set(curr->stats.exec_max,
1300 		      max(curr->stats.exec_max, delta_exec));
1301 
1302 	trace_sched_stat_runtime(curr, delta_exec, 0);
1303 
1304 	curr->se.sum_exec_runtime += delta_exec;
1305 	account_group_exec_runtime(curr, delta_exec);
1306 
1307 	curr->se.exec_start = now;
1308 	cgroup_account_cputime(curr, delta_exec);
1309 
1310 	if (dl_entity_is_special(dl_se))
1311 		return;
1312 
1313 	/*
1314 	 * For tasks that participate in GRUB, we implement GRUB-PA: the
1315 	 * spare reclaimed bandwidth is used to clock down frequency.
1316 	 *
1317 	 * For the others, we still need to scale reservation parameters
1318 	 * according to current frequency and CPU maximum capacity.
1319 	 */
1320 	if (unlikely(dl_se->flags & SCHED_FLAG_RECLAIM)) {
1321 		scaled_delta_exec = grub_reclaim(delta_exec,
1322 						 rq,
1323 						 &curr->dl);
1324 	} else {
1325 		unsigned long scale_freq = arch_scale_freq_capacity(cpu);
1326 		unsigned long scale_cpu = arch_scale_cpu_capacity(cpu);
1327 
1328 		scaled_delta_exec = cap_scale(delta_exec, scale_freq);
1329 		scaled_delta_exec = cap_scale(scaled_delta_exec, scale_cpu);
1330 	}
1331 
1332 	dl_se->runtime -= scaled_delta_exec;
1333 
1334 throttle:
1335 	if (dl_runtime_exceeded(dl_se) || dl_se->dl_yielded) {
1336 		dl_se->dl_throttled = 1;
1337 
1338 		/* If requested, inform the user about runtime overruns. */
1339 		if (dl_runtime_exceeded(dl_se) &&
1340 		    (dl_se->flags & SCHED_FLAG_DL_OVERRUN))
1341 			dl_se->dl_overrun = 1;
1342 
1343 		__dequeue_task_dl(rq, curr, 0);
1344 		if (unlikely(is_dl_boosted(dl_se) || !start_dl_timer(curr)))
1345 			enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
1346 
1347 		if (!is_leftmost(curr, &rq->dl))
1348 			resched_curr(rq);
1349 	}
1350 
1351 	/*
1352 	 * Because -- for now -- we share the rt bandwidth, we need to
1353 	 * account our runtime there too, otherwise actual rt tasks
1354 	 * would be able to exceed the shared quota.
1355 	 *
1356 	 * Account to the root rt group for now.
1357 	 *
1358 	 * The solution we're working towards is having the RT groups scheduled
1359 	 * using deadline servers -- however there's a few nasties to figure
1360 	 * out before that can happen.
1361 	 */
1362 	if (rt_bandwidth_enabled()) {
1363 		struct rt_rq *rt_rq = &rq->rt;
1364 
1365 		raw_spin_lock(&rt_rq->rt_runtime_lock);
1366 		/*
1367 		 * We'll let actual RT tasks worry about the overflow here, we
1368 		 * have our own CBS to keep us inline; only account when RT
1369 		 * bandwidth is relevant.
1370 		 */
1371 		if (sched_rt_bandwidth_account(rt_rq))
1372 			rt_rq->rt_time += delta_exec;
1373 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
1374 	}
1375 }
1376 
1377 static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer)
1378 {
1379 	struct sched_dl_entity *dl_se = container_of(timer,
1380 						     struct sched_dl_entity,
1381 						     inactive_timer);
1382 	struct task_struct *p = dl_task_of(dl_se);
1383 	struct rq_flags rf;
1384 	struct rq *rq;
1385 
1386 	rq = task_rq_lock(p, &rf);
1387 
1388 	sched_clock_tick();
1389 	update_rq_clock(rq);
1390 
1391 	if (!dl_task(p) || READ_ONCE(p->__state) == TASK_DEAD) {
1392 		struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
1393 
1394 		if (READ_ONCE(p->__state) == TASK_DEAD && dl_se->dl_non_contending) {
1395 			sub_running_bw(&p->dl, dl_rq_of_se(&p->dl));
1396 			sub_rq_bw(&p->dl, dl_rq_of_se(&p->dl));
1397 			dl_se->dl_non_contending = 0;
1398 		}
1399 
1400 		raw_spin_lock(&dl_b->lock);
1401 		__dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
1402 		raw_spin_unlock(&dl_b->lock);
1403 		__dl_clear_params(p);
1404 
1405 		goto unlock;
1406 	}
1407 	if (dl_se->dl_non_contending == 0)
1408 		goto unlock;
1409 
1410 	sub_running_bw(dl_se, &rq->dl);
1411 	dl_se->dl_non_contending = 0;
1412 unlock:
1413 	task_rq_unlock(rq, p, &rf);
1414 	put_task_struct(p);
1415 
1416 	return HRTIMER_NORESTART;
1417 }
1418 
1419 void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se)
1420 {
1421 	struct hrtimer *timer = &dl_se->inactive_timer;
1422 
1423 	hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
1424 	timer->function = inactive_task_timer;
1425 }
1426 
1427 #define __node_2_dle(node) \
1428 	rb_entry((node), struct sched_dl_entity, rb_node)
1429 
1430 #ifdef CONFIG_SMP
1431 
1432 static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
1433 {
1434 	struct rq *rq = rq_of_dl_rq(dl_rq);
1435 
1436 	if (dl_rq->earliest_dl.curr == 0 ||
1437 	    dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
1438 		if (dl_rq->earliest_dl.curr == 0)
1439 			cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_HIGHER);
1440 		dl_rq->earliest_dl.curr = deadline;
1441 		cpudl_set(&rq->rd->cpudl, rq->cpu, deadline);
1442 	}
1443 }
1444 
1445 static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
1446 {
1447 	struct rq *rq = rq_of_dl_rq(dl_rq);
1448 
1449 	/*
1450 	 * Since we may have removed our earliest (and/or next earliest)
1451 	 * task we must recompute them.
1452 	 */
1453 	if (!dl_rq->dl_nr_running) {
1454 		dl_rq->earliest_dl.curr = 0;
1455 		dl_rq->earliest_dl.next = 0;
1456 		cpudl_clear(&rq->rd->cpudl, rq->cpu);
1457 		cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
1458 	} else {
1459 		struct rb_node *leftmost = rb_first_cached(&dl_rq->root);
1460 		struct sched_dl_entity *entry = __node_2_dle(leftmost);
1461 
1462 		dl_rq->earliest_dl.curr = entry->deadline;
1463 		cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline);
1464 	}
1465 }
1466 
1467 #else
1468 
1469 static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
1470 static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
1471 
1472 #endif /* CONFIG_SMP */
1473 
1474 static inline
1475 void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
1476 {
1477 	int prio = dl_task_of(dl_se)->prio;
1478 	u64 deadline = dl_se->deadline;
1479 
1480 	WARN_ON(!dl_prio(prio));
1481 	dl_rq->dl_nr_running++;
1482 	add_nr_running(rq_of_dl_rq(dl_rq), 1);
1483 
1484 	inc_dl_deadline(dl_rq, deadline);
1485 	inc_dl_migration(dl_se, dl_rq);
1486 }
1487 
1488 static inline
1489 void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
1490 {
1491 	int prio = dl_task_of(dl_se)->prio;
1492 
1493 	WARN_ON(!dl_prio(prio));
1494 	WARN_ON(!dl_rq->dl_nr_running);
1495 	dl_rq->dl_nr_running--;
1496 	sub_nr_running(rq_of_dl_rq(dl_rq), 1);
1497 
1498 	dec_dl_deadline(dl_rq, dl_se->deadline);
1499 	dec_dl_migration(dl_se, dl_rq);
1500 }
1501 
1502 static inline bool __dl_less(struct rb_node *a, const struct rb_node *b)
1503 {
1504 	return dl_time_before(__node_2_dle(a)->deadline, __node_2_dle(b)->deadline);
1505 }
1506 
1507 static inline struct sched_statistics *
1508 __schedstats_from_dl_se(struct sched_dl_entity *dl_se)
1509 {
1510 	return &dl_task_of(dl_se)->stats;
1511 }
1512 
1513 static inline void
1514 update_stats_wait_start_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se)
1515 {
1516 	struct sched_statistics *stats;
1517 
1518 	if (!schedstat_enabled())
1519 		return;
1520 
1521 	stats = __schedstats_from_dl_se(dl_se);
1522 	__update_stats_wait_start(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats);
1523 }
1524 
1525 static inline void
1526 update_stats_wait_end_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se)
1527 {
1528 	struct sched_statistics *stats;
1529 
1530 	if (!schedstat_enabled())
1531 		return;
1532 
1533 	stats = __schedstats_from_dl_se(dl_se);
1534 	__update_stats_wait_end(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats);
1535 }
1536 
1537 static inline void
1538 update_stats_enqueue_sleeper_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se)
1539 {
1540 	struct sched_statistics *stats;
1541 
1542 	if (!schedstat_enabled())
1543 		return;
1544 
1545 	stats = __schedstats_from_dl_se(dl_se);
1546 	__update_stats_enqueue_sleeper(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats);
1547 }
1548 
1549 static inline void
1550 update_stats_enqueue_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se,
1551 			int flags)
1552 {
1553 	if (!schedstat_enabled())
1554 		return;
1555 
1556 	if (flags & ENQUEUE_WAKEUP)
1557 		update_stats_enqueue_sleeper_dl(dl_rq, dl_se);
1558 }
1559 
1560 static inline void
1561 update_stats_dequeue_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se,
1562 			int flags)
1563 {
1564 	struct task_struct *p = dl_task_of(dl_se);
1565 
1566 	if (!schedstat_enabled())
1567 		return;
1568 
1569 	if ((flags & DEQUEUE_SLEEP)) {
1570 		unsigned int state;
1571 
1572 		state = READ_ONCE(p->__state);
1573 		if (state & TASK_INTERRUPTIBLE)
1574 			__schedstat_set(p->stats.sleep_start,
1575 					rq_clock(rq_of_dl_rq(dl_rq)));
1576 
1577 		if (state & TASK_UNINTERRUPTIBLE)
1578 			__schedstat_set(p->stats.block_start,
1579 					rq_clock(rq_of_dl_rq(dl_rq)));
1580 	}
1581 }
1582 
1583 static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
1584 {
1585 	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1586 
1587 	BUG_ON(!RB_EMPTY_NODE(&dl_se->rb_node));
1588 
1589 	rb_add_cached(&dl_se->rb_node, &dl_rq->root, __dl_less);
1590 
1591 	inc_dl_tasks(dl_se, dl_rq);
1592 }
1593 
1594 static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
1595 {
1596 	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1597 
1598 	if (RB_EMPTY_NODE(&dl_se->rb_node))
1599 		return;
1600 
1601 	rb_erase_cached(&dl_se->rb_node, &dl_rq->root);
1602 
1603 	RB_CLEAR_NODE(&dl_se->rb_node);
1604 
1605 	dec_dl_tasks(dl_se, dl_rq);
1606 }
1607 
1608 static void
1609 enqueue_dl_entity(struct sched_dl_entity *dl_se, int flags)
1610 {
1611 	BUG_ON(on_dl_rq(dl_se));
1612 
1613 	update_stats_enqueue_dl(dl_rq_of_se(dl_se), dl_se, flags);
1614 
1615 	/*
1616 	 * If this is a wakeup or a new instance, the scheduling
1617 	 * parameters of the task might need updating. Otherwise,
1618 	 * we want a replenishment of its runtime.
1619 	 */
1620 	if (flags & ENQUEUE_WAKEUP) {
1621 		task_contending(dl_se, flags);
1622 		update_dl_entity(dl_se);
1623 	} else if (flags & ENQUEUE_REPLENISH) {
1624 		replenish_dl_entity(dl_se);
1625 	} else if ((flags & ENQUEUE_RESTORE) &&
1626 		  dl_time_before(dl_se->deadline,
1627 				 rq_clock(rq_of_dl_rq(dl_rq_of_se(dl_se))))) {
1628 		setup_new_dl_entity(dl_se);
1629 	}
1630 
1631 	__enqueue_dl_entity(dl_se);
1632 }
1633 
1634 static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
1635 {
1636 	__dequeue_dl_entity(dl_se);
1637 }
1638 
1639 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1640 {
1641 	if (is_dl_boosted(&p->dl)) {
1642 		/*
1643 		 * Because of delays in the detection of the overrun of a
1644 		 * thread's runtime, it might be the case that a thread
1645 		 * goes to sleep in a rt mutex with negative runtime. As
1646 		 * a consequence, the thread will be throttled.
1647 		 *
1648 		 * While waiting for the mutex, this thread can also be
1649 		 * boosted via PI, resulting in a thread that is throttled
1650 		 * and boosted at the same time.
1651 		 *
1652 		 * In this case, the boost overrides the throttle.
1653 		 */
1654 		if (p->dl.dl_throttled) {
1655 			/*
1656 			 * The replenish timer needs to be canceled. No
1657 			 * problem if it fires concurrently: boosted threads
1658 			 * are ignored in dl_task_timer().
1659 			 */
1660 			hrtimer_try_to_cancel(&p->dl.dl_timer);
1661 			p->dl.dl_throttled = 0;
1662 		}
1663 	} else if (!dl_prio(p->normal_prio)) {
1664 		/*
1665 		 * Special case in which we have a !SCHED_DEADLINE task that is going
1666 		 * to be deboosted, but exceeds its runtime while doing so. No point in
1667 		 * replenishing it, as it's going to return back to its original
1668 		 * scheduling class after this. If it has been throttled, we need to
1669 		 * clear the flag, otherwise the task may wake up as throttled after
1670 		 * being boosted again with no means to replenish the runtime and clear
1671 		 * the throttle.
1672 		 */
1673 		p->dl.dl_throttled = 0;
1674 		BUG_ON(!is_dl_boosted(&p->dl) || flags != ENQUEUE_REPLENISH);
1675 		return;
1676 	}
1677 
1678 	/*
1679 	 * Check if a constrained deadline task was activated
1680 	 * after the deadline but before the next period.
1681 	 * If that is the case, the task will be throttled and
1682 	 * the replenishment timer will be set to the next period.
1683 	 */
1684 	if (!p->dl.dl_throttled && !dl_is_implicit(&p->dl))
1685 		dl_check_constrained_dl(&p->dl);
1686 
1687 	if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & ENQUEUE_RESTORE) {
1688 		add_rq_bw(&p->dl, &rq->dl);
1689 		add_running_bw(&p->dl, &rq->dl);
1690 	}
1691 
1692 	/*
1693 	 * If p is throttled, we do not enqueue it. In fact, if it exhausted
1694 	 * its budget it needs a replenishment and, since it now is on
1695 	 * its rq, the bandwidth timer callback (which clearly has not
1696 	 * run yet) will take care of this.
1697 	 * However, the active utilization does not depend on the fact
1698 	 * that the task is on the runqueue or not (but depends on the
1699 	 * task's state - in GRUB parlance, "inactive" vs "active contending").
1700 	 * In other words, even if a task is throttled its utilization must
1701 	 * be counted in the active utilization; hence, we need to call
1702 	 * add_running_bw().
1703 	 */
1704 	if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH)) {
1705 		if (flags & ENQUEUE_WAKEUP)
1706 			task_contending(&p->dl, flags);
1707 
1708 		return;
1709 	}
1710 
1711 	check_schedstat_required();
1712 	update_stats_wait_start_dl(dl_rq_of_se(&p->dl), &p->dl);
1713 
1714 	enqueue_dl_entity(&p->dl, flags);
1715 
1716 	if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1717 		enqueue_pushable_dl_task(rq, p);
1718 }
1719 
1720 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1721 {
1722 	update_stats_dequeue_dl(&rq->dl, &p->dl, flags);
1723 	dequeue_dl_entity(&p->dl);
1724 	dequeue_pushable_dl_task(rq, p);
1725 }
1726 
1727 static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1728 {
1729 	update_curr_dl(rq);
1730 	__dequeue_task_dl(rq, p, flags);
1731 
1732 	if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & DEQUEUE_SAVE) {
1733 		sub_running_bw(&p->dl, &rq->dl);
1734 		sub_rq_bw(&p->dl, &rq->dl);
1735 	}
1736 
1737 	/*
1738 	 * This check allows to start the inactive timer (or to immediately
1739 	 * decrease the active utilization, if needed) in two cases:
1740 	 * when the task blocks and when it is terminating
1741 	 * (p->state == TASK_DEAD). We can handle the two cases in the same
1742 	 * way, because from GRUB's point of view the same thing is happening
1743 	 * (the task moves from "active contending" to "active non contending"
1744 	 * or "inactive")
1745 	 */
1746 	if (flags & DEQUEUE_SLEEP)
1747 		task_non_contending(p);
1748 }
1749 
1750 /*
1751  * Yield task semantic for -deadline tasks is:
1752  *
1753  *   get off from the CPU until our next instance, with
1754  *   a new runtime. This is of little use now, since we
1755  *   don't have a bandwidth reclaiming mechanism. Anyway,
1756  *   bandwidth reclaiming is planned for the future, and
1757  *   yield_task_dl will indicate that some spare budget
1758  *   is available for other task instances to use it.
1759  */
1760 static void yield_task_dl(struct rq *rq)
1761 {
1762 	/*
1763 	 * We make the task go to sleep until its current deadline by
1764 	 * forcing its runtime to zero. This way, update_curr_dl() stops
1765 	 * it and the bandwidth timer will wake it up and will give it
1766 	 * new scheduling parameters (thanks to dl_yielded=1).
1767 	 */
1768 	rq->curr->dl.dl_yielded = 1;
1769 
1770 	update_rq_clock(rq);
1771 	update_curr_dl(rq);
1772 	/*
1773 	 * Tell update_rq_clock() that we've just updated,
1774 	 * so we don't do microscopic update in schedule()
1775 	 * and double the fastpath cost.
1776 	 */
1777 	rq_clock_skip_update(rq);
1778 }
1779 
1780 #ifdef CONFIG_SMP
1781 
1782 static int find_later_rq(struct task_struct *task);
1783 
1784 static int
1785 select_task_rq_dl(struct task_struct *p, int cpu, int flags)
1786 {
1787 	struct task_struct *curr;
1788 	bool select_rq;
1789 	struct rq *rq;
1790 
1791 	if (!(flags & WF_TTWU))
1792 		goto out;
1793 
1794 	rq = cpu_rq(cpu);
1795 
1796 	rcu_read_lock();
1797 	curr = READ_ONCE(rq->curr); /* unlocked access */
1798 
1799 	/*
1800 	 * If we are dealing with a -deadline task, we must
1801 	 * decide where to wake it up.
1802 	 * If it has a later deadline and the current task
1803 	 * on this rq can't move (provided the waking task
1804 	 * can!) we prefer to send it somewhere else. On the
1805 	 * other hand, if it has a shorter deadline, we
1806 	 * try to make it stay here, it might be important.
1807 	 */
1808 	select_rq = unlikely(dl_task(curr)) &&
1809 		    (curr->nr_cpus_allowed < 2 ||
1810 		     !dl_entity_preempt(&p->dl, &curr->dl)) &&
1811 		    p->nr_cpus_allowed > 1;
1812 
1813 	/*
1814 	 * Take the capacity of the CPU into account to
1815 	 * ensure it fits the requirement of the task.
1816 	 */
1817 	if (static_branch_unlikely(&sched_asym_cpucapacity))
1818 		select_rq |= !dl_task_fits_capacity(p, cpu);
1819 
1820 	if (select_rq) {
1821 		int target = find_later_rq(p);
1822 
1823 		if (target != -1 &&
1824 				(dl_time_before(p->dl.deadline,
1825 					cpu_rq(target)->dl.earliest_dl.curr) ||
1826 				(cpu_rq(target)->dl.dl_nr_running == 0)))
1827 			cpu = target;
1828 	}
1829 	rcu_read_unlock();
1830 
1831 out:
1832 	return cpu;
1833 }
1834 
1835 static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused)
1836 {
1837 	struct rq *rq;
1838 
1839 	if (READ_ONCE(p->__state) != TASK_WAKING)
1840 		return;
1841 
1842 	rq = task_rq(p);
1843 	/*
1844 	 * Since p->state == TASK_WAKING, set_task_cpu() has been called
1845 	 * from try_to_wake_up(). Hence, p->pi_lock is locked, but
1846 	 * rq->lock is not... So, lock it
1847 	 */
1848 	raw_spin_rq_lock(rq);
1849 	if (p->dl.dl_non_contending) {
1850 		update_rq_clock(rq);
1851 		sub_running_bw(&p->dl, &rq->dl);
1852 		p->dl.dl_non_contending = 0;
1853 		/*
1854 		 * If the timer handler is currently running and the
1855 		 * timer cannot be canceled, inactive_task_timer()
1856 		 * will see that dl_not_contending is not set, and
1857 		 * will not touch the rq's active utilization,
1858 		 * so we are still safe.
1859 		 */
1860 		if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
1861 			put_task_struct(p);
1862 	}
1863 	sub_rq_bw(&p->dl, &rq->dl);
1864 	raw_spin_rq_unlock(rq);
1865 }
1866 
1867 static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
1868 {
1869 	/*
1870 	 * Current can't be migrated, useless to reschedule,
1871 	 * let's hope p can move out.
1872 	 */
1873 	if (rq->curr->nr_cpus_allowed == 1 ||
1874 	    !cpudl_find(&rq->rd->cpudl, rq->curr, NULL))
1875 		return;
1876 
1877 	/*
1878 	 * p is migratable, so let's not schedule it and
1879 	 * see if it is pushed or pulled somewhere else.
1880 	 */
1881 	if (p->nr_cpus_allowed != 1 &&
1882 	    cpudl_find(&rq->rd->cpudl, p, NULL))
1883 		return;
1884 
1885 	resched_curr(rq);
1886 }
1887 
1888 static int balance_dl(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
1889 {
1890 	if (!on_dl_rq(&p->dl) && need_pull_dl_task(rq, p)) {
1891 		/*
1892 		 * This is OK, because current is on_cpu, which avoids it being
1893 		 * picked for load-balance and preemption/IRQs are still
1894 		 * disabled avoiding further scheduler activity on it and we've
1895 		 * not yet started the picking loop.
1896 		 */
1897 		rq_unpin_lock(rq, rf);
1898 		pull_dl_task(rq);
1899 		rq_repin_lock(rq, rf);
1900 	}
1901 
1902 	return sched_stop_runnable(rq) || sched_dl_runnable(rq);
1903 }
1904 #endif /* CONFIG_SMP */
1905 
1906 /*
1907  * Only called when both the current and waking task are -deadline
1908  * tasks.
1909  */
1910 static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
1911 				  int flags)
1912 {
1913 	if (dl_entity_preempt(&p->dl, &rq->curr->dl)) {
1914 		resched_curr(rq);
1915 		return;
1916 	}
1917 
1918 #ifdef CONFIG_SMP
1919 	/*
1920 	 * In the unlikely case current and p have the same deadline
1921 	 * let us try to decide what's the best thing to do...
1922 	 */
1923 	if ((p->dl.deadline == rq->curr->dl.deadline) &&
1924 	    !test_tsk_need_resched(rq->curr))
1925 		check_preempt_equal_dl(rq, p);
1926 #endif /* CONFIG_SMP */
1927 }
1928 
1929 #ifdef CONFIG_SCHED_HRTICK
1930 static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1931 {
1932 	hrtick_start(rq, p->dl.runtime);
1933 }
1934 #else /* !CONFIG_SCHED_HRTICK */
1935 static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1936 {
1937 }
1938 #endif
1939 
1940 static void set_next_task_dl(struct rq *rq, struct task_struct *p, bool first)
1941 {
1942 	struct sched_dl_entity *dl_se = &p->dl;
1943 	struct dl_rq *dl_rq = &rq->dl;
1944 
1945 	p->se.exec_start = rq_clock_task(rq);
1946 	if (on_dl_rq(&p->dl))
1947 		update_stats_wait_end_dl(dl_rq, dl_se);
1948 
1949 	/* You can't push away the running task */
1950 	dequeue_pushable_dl_task(rq, p);
1951 
1952 	if (!first)
1953 		return;
1954 
1955 	if (hrtick_enabled_dl(rq))
1956 		start_hrtick_dl(rq, p);
1957 
1958 	if (rq->curr->sched_class != &dl_sched_class)
1959 		update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
1960 
1961 	deadline_queue_push_tasks(rq);
1962 }
1963 
1964 static struct sched_dl_entity *pick_next_dl_entity(struct dl_rq *dl_rq)
1965 {
1966 	struct rb_node *left = rb_first_cached(&dl_rq->root);
1967 
1968 	if (!left)
1969 		return NULL;
1970 
1971 	return __node_2_dle(left);
1972 }
1973 
1974 static struct task_struct *pick_task_dl(struct rq *rq)
1975 {
1976 	struct sched_dl_entity *dl_se;
1977 	struct dl_rq *dl_rq = &rq->dl;
1978 	struct task_struct *p;
1979 
1980 	if (!sched_dl_runnable(rq))
1981 		return NULL;
1982 
1983 	dl_se = pick_next_dl_entity(dl_rq);
1984 	BUG_ON(!dl_se);
1985 	p = dl_task_of(dl_se);
1986 
1987 	return p;
1988 }
1989 
1990 static struct task_struct *pick_next_task_dl(struct rq *rq)
1991 {
1992 	struct task_struct *p;
1993 
1994 	p = pick_task_dl(rq);
1995 	if (p)
1996 		set_next_task_dl(rq, p, true);
1997 
1998 	return p;
1999 }
2000 
2001 static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
2002 {
2003 	struct sched_dl_entity *dl_se = &p->dl;
2004 	struct dl_rq *dl_rq = &rq->dl;
2005 
2006 	if (on_dl_rq(&p->dl))
2007 		update_stats_wait_start_dl(dl_rq, dl_se);
2008 
2009 	update_curr_dl(rq);
2010 
2011 	update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
2012 	if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
2013 		enqueue_pushable_dl_task(rq, p);
2014 }
2015 
2016 /*
2017  * scheduler tick hitting a task of our scheduling class.
2018  *
2019  * NOTE: This function can be called remotely by the tick offload that
2020  * goes along full dynticks. Therefore no local assumption can be made
2021  * and everything must be accessed through the @rq and @curr passed in
2022  * parameters.
2023  */
2024 static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
2025 {
2026 	update_curr_dl(rq);
2027 
2028 	update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
2029 	/*
2030 	 * Even when we have runtime, update_curr_dl() might have resulted in us
2031 	 * not being the leftmost task anymore. In that case NEED_RESCHED will
2032 	 * be set and schedule() will start a new hrtick for the next task.
2033 	 */
2034 	if (hrtick_enabled_dl(rq) && queued && p->dl.runtime > 0 &&
2035 	    is_leftmost(p, &rq->dl))
2036 		start_hrtick_dl(rq, p);
2037 }
2038 
2039 static void task_fork_dl(struct task_struct *p)
2040 {
2041 	/*
2042 	 * SCHED_DEADLINE tasks cannot fork and this is achieved through
2043 	 * sched_fork()
2044 	 */
2045 }
2046 
2047 #ifdef CONFIG_SMP
2048 
2049 /* Only try algorithms three times */
2050 #define DL_MAX_TRIES 3
2051 
2052 static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
2053 {
2054 	if (!task_running(rq, p) &&
2055 	    cpumask_test_cpu(cpu, &p->cpus_mask))
2056 		return 1;
2057 	return 0;
2058 }
2059 
2060 /*
2061  * Return the earliest pushable rq's task, which is suitable to be executed
2062  * on the CPU, NULL otherwise:
2063  */
2064 static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu)
2065 {
2066 	struct task_struct *p = NULL;
2067 	struct rb_node *next_node;
2068 
2069 	if (!has_pushable_dl_tasks(rq))
2070 		return NULL;
2071 
2072 	next_node = rb_first_cached(&rq->dl.pushable_dl_tasks_root);
2073 
2074 next_node:
2075 	if (next_node) {
2076 		p = __node_2_pdl(next_node);
2077 
2078 		if (pick_dl_task(rq, p, cpu))
2079 			return p;
2080 
2081 		next_node = rb_next(next_node);
2082 		goto next_node;
2083 	}
2084 
2085 	return NULL;
2086 }
2087 
2088 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
2089 
2090 static int find_later_rq(struct task_struct *task)
2091 {
2092 	struct sched_domain *sd;
2093 	struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl);
2094 	int this_cpu = smp_processor_id();
2095 	int cpu = task_cpu(task);
2096 
2097 	/* Make sure the mask is initialized first */
2098 	if (unlikely(!later_mask))
2099 		return -1;
2100 
2101 	if (task->nr_cpus_allowed == 1)
2102 		return -1;
2103 
2104 	/*
2105 	 * We have to consider system topology and task affinity
2106 	 * first, then we can look for a suitable CPU.
2107 	 */
2108 	if (!cpudl_find(&task_rq(task)->rd->cpudl, task, later_mask))
2109 		return -1;
2110 
2111 	/*
2112 	 * If we are here, some targets have been found, including
2113 	 * the most suitable which is, among the runqueues where the
2114 	 * current tasks have later deadlines than the task's one, the
2115 	 * rq with the latest possible one.
2116 	 *
2117 	 * Now we check how well this matches with task's
2118 	 * affinity and system topology.
2119 	 *
2120 	 * The last CPU where the task run is our first
2121 	 * guess, since it is most likely cache-hot there.
2122 	 */
2123 	if (cpumask_test_cpu(cpu, later_mask))
2124 		return cpu;
2125 	/*
2126 	 * Check if this_cpu is to be skipped (i.e., it is
2127 	 * not in the mask) or not.
2128 	 */
2129 	if (!cpumask_test_cpu(this_cpu, later_mask))
2130 		this_cpu = -1;
2131 
2132 	rcu_read_lock();
2133 	for_each_domain(cpu, sd) {
2134 		if (sd->flags & SD_WAKE_AFFINE) {
2135 			int best_cpu;
2136 
2137 			/*
2138 			 * If possible, preempting this_cpu is
2139 			 * cheaper than migrating.
2140 			 */
2141 			if (this_cpu != -1 &&
2142 			    cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
2143 				rcu_read_unlock();
2144 				return this_cpu;
2145 			}
2146 
2147 			best_cpu = cpumask_any_and_distribute(later_mask,
2148 							      sched_domain_span(sd));
2149 			/*
2150 			 * Last chance: if a CPU being in both later_mask
2151 			 * and current sd span is valid, that becomes our
2152 			 * choice. Of course, the latest possible CPU is
2153 			 * already under consideration through later_mask.
2154 			 */
2155 			if (best_cpu < nr_cpu_ids) {
2156 				rcu_read_unlock();
2157 				return best_cpu;
2158 			}
2159 		}
2160 	}
2161 	rcu_read_unlock();
2162 
2163 	/*
2164 	 * At this point, all our guesses failed, we just return
2165 	 * 'something', and let the caller sort the things out.
2166 	 */
2167 	if (this_cpu != -1)
2168 		return this_cpu;
2169 
2170 	cpu = cpumask_any_distribute(later_mask);
2171 	if (cpu < nr_cpu_ids)
2172 		return cpu;
2173 
2174 	return -1;
2175 }
2176 
2177 /* Locks the rq it finds */
2178 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
2179 {
2180 	struct rq *later_rq = NULL;
2181 	int tries;
2182 	int cpu;
2183 
2184 	for (tries = 0; tries < DL_MAX_TRIES; tries++) {
2185 		cpu = find_later_rq(task);
2186 
2187 		if ((cpu == -1) || (cpu == rq->cpu))
2188 			break;
2189 
2190 		later_rq = cpu_rq(cpu);
2191 
2192 		if (later_rq->dl.dl_nr_running &&
2193 		    !dl_time_before(task->dl.deadline,
2194 					later_rq->dl.earliest_dl.curr)) {
2195 			/*
2196 			 * Target rq has tasks of equal or earlier deadline,
2197 			 * retrying does not release any lock and is unlikely
2198 			 * to yield a different result.
2199 			 */
2200 			later_rq = NULL;
2201 			break;
2202 		}
2203 
2204 		/* Retry if something changed. */
2205 		if (double_lock_balance(rq, later_rq)) {
2206 			if (unlikely(task_rq(task) != rq ||
2207 				     !cpumask_test_cpu(later_rq->cpu, &task->cpus_mask) ||
2208 				     task_running(rq, task) ||
2209 				     !dl_task(task) ||
2210 				     !task_on_rq_queued(task))) {
2211 				double_unlock_balance(rq, later_rq);
2212 				later_rq = NULL;
2213 				break;
2214 			}
2215 		}
2216 
2217 		/*
2218 		 * If the rq we found has no -deadline task, or
2219 		 * its earliest one has a later deadline than our
2220 		 * task, the rq is a good one.
2221 		 */
2222 		if (!later_rq->dl.dl_nr_running ||
2223 		    dl_time_before(task->dl.deadline,
2224 				   later_rq->dl.earliest_dl.curr))
2225 			break;
2226 
2227 		/* Otherwise we try again. */
2228 		double_unlock_balance(rq, later_rq);
2229 		later_rq = NULL;
2230 	}
2231 
2232 	return later_rq;
2233 }
2234 
2235 static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
2236 {
2237 	struct task_struct *p;
2238 
2239 	if (!has_pushable_dl_tasks(rq))
2240 		return NULL;
2241 
2242 	p = __node_2_pdl(rb_first_cached(&rq->dl.pushable_dl_tasks_root));
2243 
2244 	BUG_ON(rq->cpu != task_cpu(p));
2245 	BUG_ON(task_current(rq, p));
2246 	BUG_ON(p->nr_cpus_allowed <= 1);
2247 
2248 	BUG_ON(!task_on_rq_queued(p));
2249 	BUG_ON(!dl_task(p));
2250 
2251 	return p;
2252 }
2253 
2254 /*
2255  * See if the non running -deadline tasks on this rq
2256  * can be sent to some other CPU where they can preempt
2257  * and start executing.
2258  */
2259 static int push_dl_task(struct rq *rq)
2260 {
2261 	struct task_struct *next_task;
2262 	struct rq *later_rq;
2263 	int ret = 0;
2264 
2265 	if (!rq->dl.overloaded)
2266 		return 0;
2267 
2268 	next_task = pick_next_pushable_dl_task(rq);
2269 	if (!next_task)
2270 		return 0;
2271 
2272 retry:
2273 	/*
2274 	 * If next_task preempts rq->curr, and rq->curr
2275 	 * can move away, it makes sense to just reschedule
2276 	 * without going further in pushing next_task.
2277 	 */
2278 	if (dl_task(rq->curr) &&
2279 	    dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
2280 	    rq->curr->nr_cpus_allowed > 1) {
2281 		resched_curr(rq);
2282 		return 0;
2283 	}
2284 
2285 	if (is_migration_disabled(next_task))
2286 		return 0;
2287 
2288 	if (WARN_ON(next_task == rq->curr))
2289 		return 0;
2290 
2291 	/* We might release rq lock */
2292 	get_task_struct(next_task);
2293 
2294 	/* Will lock the rq it'll find */
2295 	later_rq = find_lock_later_rq(next_task, rq);
2296 	if (!later_rq) {
2297 		struct task_struct *task;
2298 
2299 		/*
2300 		 * We must check all this again, since
2301 		 * find_lock_later_rq releases rq->lock and it is
2302 		 * then possible that next_task has migrated.
2303 		 */
2304 		task = pick_next_pushable_dl_task(rq);
2305 		if (task == next_task) {
2306 			/*
2307 			 * The task is still there. We don't try
2308 			 * again, some other CPU will pull it when ready.
2309 			 */
2310 			goto out;
2311 		}
2312 
2313 		if (!task)
2314 			/* No more tasks */
2315 			goto out;
2316 
2317 		put_task_struct(next_task);
2318 		next_task = task;
2319 		goto retry;
2320 	}
2321 
2322 	deactivate_task(rq, next_task, 0);
2323 	set_task_cpu(next_task, later_rq->cpu);
2324 
2325 	/*
2326 	 * Update the later_rq clock here, because the clock is used
2327 	 * by the cpufreq_update_util() inside __add_running_bw().
2328 	 */
2329 	update_rq_clock(later_rq);
2330 	activate_task(later_rq, next_task, ENQUEUE_NOCLOCK);
2331 	ret = 1;
2332 
2333 	resched_curr(later_rq);
2334 
2335 	double_unlock_balance(rq, later_rq);
2336 
2337 out:
2338 	put_task_struct(next_task);
2339 
2340 	return ret;
2341 }
2342 
2343 static void push_dl_tasks(struct rq *rq)
2344 {
2345 	/* push_dl_task() will return true if it moved a -deadline task */
2346 	while (push_dl_task(rq))
2347 		;
2348 }
2349 
2350 static void pull_dl_task(struct rq *this_rq)
2351 {
2352 	int this_cpu = this_rq->cpu, cpu;
2353 	struct task_struct *p, *push_task;
2354 	bool resched = false;
2355 	struct rq *src_rq;
2356 	u64 dmin = LONG_MAX;
2357 
2358 	if (likely(!dl_overloaded(this_rq)))
2359 		return;
2360 
2361 	/*
2362 	 * Match the barrier from dl_set_overloaded; this guarantees that if we
2363 	 * see overloaded we must also see the dlo_mask bit.
2364 	 */
2365 	smp_rmb();
2366 
2367 	for_each_cpu(cpu, this_rq->rd->dlo_mask) {
2368 		if (this_cpu == cpu)
2369 			continue;
2370 
2371 		src_rq = cpu_rq(cpu);
2372 
2373 		/*
2374 		 * It looks racy, abd it is! However, as in sched_rt.c,
2375 		 * we are fine with this.
2376 		 */
2377 		if (this_rq->dl.dl_nr_running &&
2378 		    dl_time_before(this_rq->dl.earliest_dl.curr,
2379 				   src_rq->dl.earliest_dl.next))
2380 			continue;
2381 
2382 		/* Might drop this_rq->lock */
2383 		push_task = NULL;
2384 		double_lock_balance(this_rq, src_rq);
2385 
2386 		/*
2387 		 * If there are no more pullable tasks on the
2388 		 * rq, we're done with it.
2389 		 */
2390 		if (src_rq->dl.dl_nr_running <= 1)
2391 			goto skip;
2392 
2393 		p = pick_earliest_pushable_dl_task(src_rq, this_cpu);
2394 
2395 		/*
2396 		 * We found a task to be pulled if:
2397 		 *  - it preempts our current (if there's one),
2398 		 *  - it will preempt the last one we pulled (if any).
2399 		 */
2400 		if (p && dl_time_before(p->dl.deadline, dmin) &&
2401 		    (!this_rq->dl.dl_nr_running ||
2402 		     dl_time_before(p->dl.deadline,
2403 				    this_rq->dl.earliest_dl.curr))) {
2404 			WARN_ON(p == src_rq->curr);
2405 			WARN_ON(!task_on_rq_queued(p));
2406 
2407 			/*
2408 			 * Then we pull iff p has actually an earlier
2409 			 * deadline than the current task of its runqueue.
2410 			 */
2411 			if (dl_time_before(p->dl.deadline,
2412 					   src_rq->curr->dl.deadline))
2413 				goto skip;
2414 
2415 			if (is_migration_disabled(p)) {
2416 				push_task = get_push_task(src_rq);
2417 			} else {
2418 				deactivate_task(src_rq, p, 0);
2419 				set_task_cpu(p, this_cpu);
2420 				activate_task(this_rq, p, 0);
2421 				dmin = p->dl.deadline;
2422 				resched = true;
2423 			}
2424 
2425 			/* Is there any other task even earlier? */
2426 		}
2427 skip:
2428 		double_unlock_balance(this_rq, src_rq);
2429 
2430 		if (push_task) {
2431 			raw_spin_rq_unlock(this_rq);
2432 			stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop,
2433 					    push_task, &src_rq->push_work);
2434 			raw_spin_rq_lock(this_rq);
2435 		}
2436 	}
2437 
2438 	if (resched)
2439 		resched_curr(this_rq);
2440 }
2441 
2442 /*
2443  * Since the task is not running and a reschedule is not going to happen
2444  * anytime soon on its runqueue, we try pushing it away now.
2445  */
2446 static void task_woken_dl(struct rq *rq, struct task_struct *p)
2447 {
2448 	if (!task_running(rq, p) &&
2449 	    !test_tsk_need_resched(rq->curr) &&
2450 	    p->nr_cpus_allowed > 1 &&
2451 	    dl_task(rq->curr) &&
2452 	    (rq->curr->nr_cpus_allowed < 2 ||
2453 	     !dl_entity_preempt(&p->dl, &rq->curr->dl))) {
2454 		push_dl_tasks(rq);
2455 	}
2456 }
2457 
2458 static void set_cpus_allowed_dl(struct task_struct *p,
2459 				const struct cpumask *new_mask,
2460 				u32 flags)
2461 {
2462 	struct root_domain *src_rd;
2463 	struct rq *rq;
2464 
2465 	BUG_ON(!dl_task(p));
2466 
2467 	rq = task_rq(p);
2468 	src_rd = rq->rd;
2469 	/*
2470 	 * Migrating a SCHED_DEADLINE task between exclusive
2471 	 * cpusets (different root_domains) entails a bandwidth
2472 	 * update. We already made space for us in the destination
2473 	 * domain (see cpuset_can_attach()).
2474 	 */
2475 	if (!cpumask_intersects(src_rd->span, new_mask)) {
2476 		struct dl_bw *src_dl_b;
2477 
2478 		src_dl_b = dl_bw_of(cpu_of(rq));
2479 		/*
2480 		 * We now free resources of the root_domain we are migrating
2481 		 * off. In the worst case, sched_setattr() may temporary fail
2482 		 * until we complete the update.
2483 		 */
2484 		raw_spin_lock(&src_dl_b->lock);
2485 		__dl_sub(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
2486 		raw_spin_unlock(&src_dl_b->lock);
2487 	}
2488 
2489 	set_cpus_allowed_common(p, new_mask, flags);
2490 }
2491 
2492 /* Assumes rq->lock is held */
2493 static void rq_online_dl(struct rq *rq)
2494 {
2495 	if (rq->dl.overloaded)
2496 		dl_set_overload(rq);
2497 
2498 	cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu);
2499 	if (rq->dl.dl_nr_running > 0)
2500 		cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr);
2501 }
2502 
2503 /* Assumes rq->lock is held */
2504 static void rq_offline_dl(struct rq *rq)
2505 {
2506 	if (rq->dl.overloaded)
2507 		dl_clear_overload(rq);
2508 
2509 	cpudl_clear(&rq->rd->cpudl, rq->cpu);
2510 	cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu);
2511 }
2512 
2513 void __init init_sched_dl_class(void)
2514 {
2515 	unsigned int i;
2516 
2517 	for_each_possible_cpu(i)
2518 		zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i),
2519 					GFP_KERNEL, cpu_to_node(i));
2520 }
2521 
2522 void dl_add_task_root_domain(struct task_struct *p)
2523 {
2524 	struct rq_flags rf;
2525 	struct rq *rq;
2526 	struct dl_bw *dl_b;
2527 
2528 	raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
2529 	if (!dl_task(p)) {
2530 		raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
2531 		return;
2532 	}
2533 
2534 	rq = __task_rq_lock(p, &rf);
2535 
2536 	dl_b = &rq->rd->dl_bw;
2537 	raw_spin_lock(&dl_b->lock);
2538 
2539 	__dl_add(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
2540 
2541 	raw_spin_unlock(&dl_b->lock);
2542 
2543 	task_rq_unlock(rq, p, &rf);
2544 }
2545 
2546 void dl_clear_root_domain(struct root_domain *rd)
2547 {
2548 	unsigned long flags;
2549 
2550 	raw_spin_lock_irqsave(&rd->dl_bw.lock, flags);
2551 	rd->dl_bw.total_bw = 0;
2552 	raw_spin_unlock_irqrestore(&rd->dl_bw.lock, flags);
2553 }
2554 
2555 #endif /* CONFIG_SMP */
2556 
2557 static void switched_from_dl(struct rq *rq, struct task_struct *p)
2558 {
2559 	/*
2560 	 * task_non_contending() can start the "inactive timer" (if the 0-lag
2561 	 * time is in the future). If the task switches back to dl before
2562 	 * the "inactive timer" fires, it can continue to consume its current
2563 	 * runtime using its current deadline. If it stays outside of
2564 	 * SCHED_DEADLINE until the 0-lag time passes, inactive_task_timer()
2565 	 * will reset the task parameters.
2566 	 */
2567 	if (task_on_rq_queued(p) && p->dl.dl_runtime)
2568 		task_non_contending(p);
2569 
2570 	if (!task_on_rq_queued(p)) {
2571 		/*
2572 		 * Inactive timer is armed. However, p is leaving DEADLINE and
2573 		 * might migrate away from this rq while continuing to run on
2574 		 * some other class. We need to remove its contribution from
2575 		 * this rq running_bw now, or sub_rq_bw (below) will complain.
2576 		 */
2577 		if (p->dl.dl_non_contending)
2578 			sub_running_bw(&p->dl, &rq->dl);
2579 		sub_rq_bw(&p->dl, &rq->dl);
2580 	}
2581 
2582 	/*
2583 	 * We cannot use inactive_task_timer() to invoke sub_running_bw()
2584 	 * at the 0-lag time, because the task could have been migrated
2585 	 * while SCHED_OTHER in the meanwhile.
2586 	 */
2587 	if (p->dl.dl_non_contending)
2588 		p->dl.dl_non_contending = 0;
2589 
2590 	/*
2591 	 * Since this might be the only -deadline task on the rq,
2592 	 * this is the right place to try to pull some other one
2593 	 * from an overloaded CPU, if any.
2594 	 */
2595 	if (!task_on_rq_queued(p) || rq->dl.dl_nr_running)
2596 		return;
2597 
2598 	deadline_queue_pull_task(rq);
2599 }
2600 
2601 /*
2602  * When switching to -deadline, we may overload the rq, then
2603  * we try to push someone off, if possible.
2604  */
2605 static void switched_to_dl(struct rq *rq, struct task_struct *p)
2606 {
2607 	if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
2608 		put_task_struct(p);
2609 
2610 	/* If p is not queued we will update its parameters at next wakeup. */
2611 	if (!task_on_rq_queued(p)) {
2612 		add_rq_bw(&p->dl, &rq->dl);
2613 
2614 		return;
2615 	}
2616 
2617 	if (rq->curr != p) {
2618 #ifdef CONFIG_SMP
2619 		if (p->nr_cpus_allowed > 1 && rq->dl.overloaded)
2620 			deadline_queue_push_tasks(rq);
2621 #endif
2622 		if (dl_task(rq->curr))
2623 			check_preempt_curr_dl(rq, p, 0);
2624 		else
2625 			resched_curr(rq);
2626 	} else {
2627 		update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
2628 	}
2629 }
2630 
2631 /*
2632  * If the scheduling parameters of a -deadline task changed,
2633  * a push or pull operation might be needed.
2634  */
2635 static void prio_changed_dl(struct rq *rq, struct task_struct *p,
2636 			    int oldprio)
2637 {
2638 	if (task_on_rq_queued(p) || task_current(rq, p)) {
2639 #ifdef CONFIG_SMP
2640 		/*
2641 		 * This might be too much, but unfortunately
2642 		 * we don't have the old deadline value, and
2643 		 * we can't argue if the task is increasing
2644 		 * or lowering its prio, so...
2645 		 */
2646 		if (!rq->dl.overloaded)
2647 			deadline_queue_pull_task(rq);
2648 
2649 		/*
2650 		 * If we now have a earlier deadline task than p,
2651 		 * then reschedule, provided p is still on this
2652 		 * runqueue.
2653 		 */
2654 		if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline))
2655 			resched_curr(rq);
2656 #else
2657 		/*
2658 		 * Again, we don't know if p has a earlier
2659 		 * or later deadline, so let's blindly set a
2660 		 * (maybe not needed) rescheduling point.
2661 		 */
2662 		resched_curr(rq);
2663 #endif /* CONFIG_SMP */
2664 	}
2665 }
2666 
2667 DEFINE_SCHED_CLASS(dl) = {
2668 
2669 	.enqueue_task		= enqueue_task_dl,
2670 	.dequeue_task		= dequeue_task_dl,
2671 	.yield_task		= yield_task_dl,
2672 
2673 	.check_preempt_curr	= check_preempt_curr_dl,
2674 
2675 	.pick_next_task		= pick_next_task_dl,
2676 	.put_prev_task		= put_prev_task_dl,
2677 	.set_next_task		= set_next_task_dl,
2678 
2679 #ifdef CONFIG_SMP
2680 	.balance		= balance_dl,
2681 	.pick_task		= pick_task_dl,
2682 	.select_task_rq		= select_task_rq_dl,
2683 	.migrate_task_rq	= migrate_task_rq_dl,
2684 	.set_cpus_allowed       = set_cpus_allowed_dl,
2685 	.rq_online              = rq_online_dl,
2686 	.rq_offline             = rq_offline_dl,
2687 	.task_woken		= task_woken_dl,
2688 	.find_lock_rq		= find_lock_later_rq,
2689 #endif
2690 
2691 	.task_tick		= task_tick_dl,
2692 	.task_fork              = task_fork_dl,
2693 
2694 	.prio_changed           = prio_changed_dl,
2695 	.switched_from		= switched_from_dl,
2696 	.switched_to		= switched_to_dl,
2697 
2698 	.update_curr		= update_curr_dl,
2699 };
2700 
2701 /* Used for dl_bw check and update, used under sched_rt_handler()::mutex */
2702 static u64 dl_generation;
2703 
2704 int sched_dl_global_validate(void)
2705 {
2706 	u64 runtime = global_rt_runtime();
2707 	u64 period = global_rt_period();
2708 	u64 new_bw = to_ratio(period, runtime);
2709 	u64 gen = ++dl_generation;
2710 	struct dl_bw *dl_b;
2711 	int cpu, cpus, ret = 0;
2712 	unsigned long flags;
2713 
2714 	/*
2715 	 * Here we want to check the bandwidth not being set to some
2716 	 * value smaller than the currently allocated bandwidth in
2717 	 * any of the root_domains.
2718 	 */
2719 	for_each_possible_cpu(cpu) {
2720 		rcu_read_lock_sched();
2721 
2722 		if (dl_bw_visited(cpu, gen))
2723 			goto next;
2724 
2725 		dl_b = dl_bw_of(cpu);
2726 		cpus = dl_bw_cpus(cpu);
2727 
2728 		raw_spin_lock_irqsave(&dl_b->lock, flags);
2729 		if (new_bw * cpus < dl_b->total_bw)
2730 			ret = -EBUSY;
2731 		raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2732 
2733 next:
2734 		rcu_read_unlock_sched();
2735 
2736 		if (ret)
2737 			break;
2738 	}
2739 
2740 	return ret;
2741 }
2742 
2743 static void init_dl_rq_bw_ratio(struct dl_rq *dl_rq)
2744 {
2745 	if (global_rt_runtime() == RUNTIME_INF) {
2746 		dl_rq->bw_ratio = 1 << RATIO_SHIFT;
2747 		dl_rq->extra_bw = 1 << BW_SHIFT;
2748 	} else {
2749 		dl_rq->bw_ratio = to_ratio(global_rt_runtime(),
2750 			  global_rt_period()) >> (BW_SHIFT - RATIO_SHIFT);
2751 		dl_rq->extra_bw = to_ratio(global_rt_period(),
2752 						    global_rt_runtime());
2753 	}
2754 }
2755 
2756 void sched_dl_do_global(void)
2757 {
2758 	u64 new_bw = -1;
2759 	u64 gen = ++dl_generation;
2760 	struct dl_bw *dl_b;
2761 	int cpu;
2762 	unsigned long flags;
2763 
2764 	if (global_rt_runtime() != RUNTIME_INF)
2765 		new_bw = to_ratio(global_rt_period(), global_rt_runtime());
2766 
2767 	for_each_possible_cpu(cpu) {
2768 		rcu_read_lock_sched();
2769 
2770 		if (dl_bw_visited(cpu, gen)) {
2771 			rcu_read_unlock_sched();
2772 			continue;
2773 		}
2774 
2775 		dl_b = dl_bw_of(cpu);
2776 
2777 		raw_spin_lock_irqsave(&dl_b->lock, flags);
2778 		dl_b->bw = new_bw;
2779 		raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2780 
2781 		rcu_read_unlock_sched();
2782 		init_dl_rq_bw_ratio(&cpu_rq(cpu)->dl);
2783 	}
2784 }
2785 
2786 /*
2787  * We must be sure that accepting a new task (or allowing changing the
2788  * parameters of an existing one) is consistent with the bandwidth
2789  * constraints. If yes, this function also accordingly updates the currently
2790  * allocated bandwidth to reflect the new situation.
2791  *
2792  * This function is called while holding p's rq->lock.
2793  */
2794 int sched_dl_overflow(struct task_struct *p, int policy,
2795 		      const struct sched_attr *attr)
2796 {
2797 	u64 period = attr->sched_period ?: attr->sched_deadline;
2798 	u64 runtime = attr->sched_runtime;
2799 	u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0;
2800 	int cpus, err = -1, cpu = task_cpu(p);
2801 	struct dl_bw *dl_b = dl_bw_of(cpu);
2802 	unsigned long cap;
2803 
2804 	if (attr->sched_flags & SCHED_FLAG_SUGOV)
2805 		return 0;
2806 
2807 	/* !deadline task may carry old deadline bandwidth */
2808 	if (new_bw == p->dl.dl_bw && task_has_dl_policy(p))
2809 		return 0;
2810 
2811 	/*
2812 	 * Either if a task, enters, leave, or stays -deadline but changes
2813 	 * its parameters, we may need to update accordingly the total
2814 	 * allocated bandwidth of the container.
2815 	 */
2816 	raw_spin_lock(&dl_b->lock);
2817 	cpus = dl_bw_cpus(cpu);
2818 	cap = dl_bw_capacity(cpu);
2819 
2820 	if (dl_policy(policy) && !task_has_dl_policy(p) &&
2821 	    !__dl_overflow(dl_b, cap, 0, new_bw)) {
2822 		if (hrtimer_active(&p->dl.inactive_timer))
2823 			__dl_sub(dl_b, p->dl.dl_bw, cpus);
2824 		__dl_add(dl_b, new_bw, cpus);
2825 		err = 0;
2826 	} else if (dl_policy(policy) && task_has_dl_policy(p) &&
2827 		   !__dl_overflow(dl_b, cap, p->dl.dl_bw, new_bw)) {
2828 		/*
2829 		 * XXX this is slightly incorrect: when the task
2830 		 * utilization decreases, we should delay the total
2831 		 * utilization change until the task's 0-lag point.
2832 		 * But this would require to set the task's "inactive
2833 		 * timer" when the task is not inactive.
2834 		 */
2835 		__dl_sub(dl_b, p->dl.dl_bw, cpus);
2836 		__dl_add(dl_b, new_bw, cpus);
2837 		dl_change_utilization(p, new_bw);
2838 		err = 0;
2839 	} else if (!dl_policy(policy) && task_has_dl_policy(p)) {
2840 		/*
2841 		 * Do not decrease the total deadline utilization here,
2842 		 * switched_from_dl() will take care to do it at the correct
2843 		 * (0-lag) time.
2844 		 */
2845 		err = 0;
2846 	}
2847 	raw_spin_unlock(&dl_b->lock);
2848 
2849 	return err;
2850 }
2851 
2852 /*
2853  * This function initializes the sched_dl_entity of a newly becoming
2854  * SCHED_DEADLINE task.
2855  *
2856  * Only the static values are considered here, the actual runtime and the
2857  * absolute deadline will be properly calculated when the task is enqueued
2858  * for the first time with its new policy.
2859  */
2860 void __setparam_dl(struct task_struct *p, const struct sched_attr *attr)
2861 {
2862 	struct sched_dl_entity *dl_se = &p->dl;
2863 
2864 	dl_se->dl_runtime = attr->sched_runtime;
2865 	dl_se->dl_deadline = attr->sched_deadline;
2866 	dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
2867 	dl_se->flags = attr->sched_flags & SCHED_DL_FLAGS;
2868 	dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
2869 	dl_se->dl_density = to_ratio(dl_se->dl_deadline, dl_se->dl_runtime);
2870 }
2871 
2872 void __getparam_dl(struct task_struct *p, struct sched_attr *attr)
2873 {
2874 	struct sched_dl_entity *dl_se = &p->dl;
2875 
2876 	attr->sched_priority = p->rt_priority;
2877 	attr->sched_runtime = dl_se->dl_runtime;
2878 	attr->sched_deadline = dl_se->dl_deadline;
2879 	attr->sched_period = dl_se->dl_period;
2880 	attr->sched_flags &= ~SCHED_DL_FLAGS;
2881 	attr->sched_flags |= dl_se->flags;
2882 }
2883 
2884 /*
2885  * Default limits for DL period; on the top end we guard against small util
2886  * tasks still getting ridiculously long effective runtimes, on the bottom end we
2887  * guard against timer DoS.
2888  */
2889 unsigned int sysctl_sched_dl_period_max = 1 << 22; /* ~4 seconds */
2890 unsigned int sysctl_sched_dl_period_min = 100;     /* 100 us */
2891 
2892 /*
2893  * This function validates the new parameters of a -deadline task.
2894  * We ask for the deadline not being zero, and greater or equal
2895  * than the runtime, as well as the period of being zero or
2896  * greater than deadline. Furthermore, we have to be sure that
2897  * user parameters are above the internal resolution of 1us (we
2898  * check sched_runtime only since it is always the smaller one) and
2899  * below 2^63 ns (we have to check both sched_deadline and
2900  * sched_period, as the latter can be zero).
2901  */
2902 bool __checkparam_dl(const struct sched_attr *attr)
2903 {
2904 	u64 period, max, min;
2905 
2906 	/* special dl tasks don't actually use any parameter */
2907 	if (attr->sched_flags & SCHED_FLAG_SUGOV)
2908 		return true;
2909 
2910 	/* deadline != 0 */
2911 	if (attr->sched_deadline == 0)
2912 		return false;
2913 
2914 	/*
2915 	 * Since we truncate DL_SCALE bits, make sure we're at least
2916 	 * that big.
2917 	 */
2918 	if (attr->sched_runtime < (1ULL << DL_SCALE))
2919 		return false;
2920 
2921 	/*
2922 	 * Since we use the MSB for wrap-around and sign issues, make
2923 	 * sure it's not set (mind that period can be equal to zero).
2924 	 */
2925 	if (attr->sched_deadline & (1ULL << 63) ||
2926 	    attr->sched_period & (1ULL << 63))
2927 		return false;
2928 
2929 	period = attr->sched_period;
2930 	if (!period)
2931 		period = attr->sched_deadline;
2932 
2933 	/* runtime <= deadline <= period (if period != 0) */
2934 	if (period < attr->sched_deadline ||
2935 	    attr->sched_deadline < attr->sched_runtime)
2936 		return false;
2937 
2938 	max = (u64)READ_ONCE(sysctl_sched_dl_period_max) * NSEC_PER_USEC;
2939 	min = (u64)READ_ONCE(sysctl_sched_dl_period_min) * NSEC_PER_USEC;
2940 
2941 	if (period < min || period > max)
2942 		return false;
2943 
2944 	return true;
2945 }
2946 
2947 /*
2948  * This function clears the sched_dl_entity static params.
2949  */
2950 void __dl_clear_params(struct task_struct *p)
2951 {
2952 	struct sched_dl_entity *dl_se = &p->dl;
2953 
2954 	dl_se->dl_runtime		= 0;
2955 	dl_se->dl_deadline		= 0;
2956 	dl_se->dl_period		= 0;
2957 	dl_se->flags			= 0;
2958 	dl_se->dl_bw			= 0;
2959 	dl_se->dl_density		= 0;
2960 
2961 	dl_se->dl_throttled		= 0;
2962 	dl_se->dl_yielded		= 0;
2963 	dl_se->dl_non_contending	= 0;
2964 	dl_se->dl_overrun		= 0;
2965 
2966 #ifdef CONFIG_RT_MUTEXES
2967 	dl_se->pi_se			= dl_se;
2968 #endif
2969 }
2970 
2971 bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr)
2972 {
2973 	struct sched_dl_entity *dl_se = &p->dl;
2974 
2975 	if (dl_se->dl_runtime != attr->sched_runtime ||
2976 	    dl_se->dl_deadline != attr->sched_deadline ||
2977 	    dl_se->dl_period != attr->sched_period ||
2978 	    dl_se->flags != (attr->sched_flags & SCHED_DL_FLAGS))
2979 		return true;
2980 
2981 	return false;
2982 }
2983 
2984 #ifdef CONFIG_SMP
2985 int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur,
2986 				 const struct cpumask *trial)
2987 {
2988 	int ret = 1, trial_cpus;
2989 	struct dl_bw *cur_dl_b;
2990 	unsigned long flags;
2991 
2992 	rcu_read_lock_sched();
2993 	cur_dl_b = dl_bw_of(cpumask_any(cur));
2994 	trial_cpus = cpumask_weight(trial);
2995 
2996 	raw_spin_lock_irqsave(&cur_dl_b->lock, flags);
2997 	if (cur_dl_b->bw != -1 &&
2998 	    cur_dl_b->bw * trial_cpus < cur_dl_b->total_bw)
2999 		ret = 0;
3000 	raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags);
3001 	rcu_read_unlock_sched();
3002 
3003 	return ret;
3004 }
3005 
3006 int dl_cpu_busy(int cpu, struct task_struct *p)
3007 {
3008 	unsigned long flags, cap;
3009 	struct dl_bw *dl_b;
3010 	bool overflow;
3011 
3012 	rcu_read_lock_sched();
3013 	dl_b = dl_bw_of(cpu);
3014 	raw_spin_lock_irqsave(&dl_b->lock, flags);
3015 	cap = dl_bw_capacity(cpu);
3016 	overflow = __dl_overflow(dl_b, cap, 0, p ? p->dl.dl_bw : 0);
3017 
3018 	if (!overflow && p) {
3019 		/*
3020 		 * We reserve space for this task in the destination
3021 		 * root_domain, as we can't fail after this point.
3022 		 * We will free resources in the source root_domain
3023 		 * later on (see set_cpus_allowed_dl()).
3024 		 */
3025 		__dl_add(dl_b, p->dl.dl_bw, dl_bw_cpus(cpu));
3026 	}
3027 
3028 	raw_spin_unlock_irqrestore(&dl_b->lock, flags);
3029 	rcu_read_unlock_sched();
3030 
3031 	return overflow ? -EBUSY : 0;
3032 }
3033 #endif
3034 
3035 #ifdef CONFIG_SCHED_DEBUG
3036 void print_dl_stats(struct seq_file *m, int cpu)
3037 {
3038 	print_dl_rq(m, cpu, &cpu_rq(cpu)->dl);
3039 }
3040 #endif /* CONFIG_SCHED_DEBUG */
3041