xref: /openbmc/linux/kernel/sched/deadline.c (revision aa0dc6a7)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Deadline Scheduling Class (SCHED_DEADLINE)
4  *
5  * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS).
6  *
7  * Tasks that periodically executes their instances for less than their
8  * runtime won't miss any of their deadlines.
9  * Tasks that are not periodic or sporadic or that tries to execute more
10  * than their reserved bandwidth will be slowed down (and may potentially
11  * miss some of their deadlines), and won't affect any other task.
12  *
13  * Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>,
14  *                    Juri Lelli <juri.lelli@gmail.com>,
15  *                    Michael Trimarchi <michael@amarulasolutions.com>,
16  *                    Fabio Checconi <fchecconi@gmail.com>
17  */
18 #include "sched.h"
19 #include "pelt.h"
20 
21 struct dl_bandwidth def_dl_bandwidth;
22 
23 static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
24 {
25 	return container_of(dl_se, struct task_struct, dl);
26 }
27 
28 static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq)
29 {
30 	return container_of(dl_rq, struct rq, dl);
31 }
32 
33 static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se)
34 {
35 	struct task_struct *p = dl_task_of(dl_se);
36 	struct rq *rq = task_rq(p);
37 
38 	return &rq->dl;
39 }
40 
41 static inline int on_dl_rq(struct sched_dl_entity *dl_se)
42 {
43 	return !RB_EMPTY_NODE(&dl_se->rb_node);
44 }
45 
46 #ifdef CONFIG_RT_MUTEXES
47 static inline struct sched_dl_entity *pi_of(struct sched_dl_entity *dl_se)
48 {
49 	return dl_se->pi_se;
50 }
51 
52 static inline bool is_dl_boosted(struct sched_dl_entity *dl_se)
53 {
54 	return pi_of(dl_se) != dl_se;
55 }
56 #else
57 static inline struct sched_dl_entity *pi_of(struct sched_dl_entity *dl_se)
58 {
59 	return dl_se;
60 }
61 
62 static inline bool is_dl_boosted(struct sched_dl_entity *dl_se)
63 {
64 	return false;
65 }
66 #endif
67 
68 #ifdef CONFIG_SMP
69 static inline struct dl_bw *dl_bw_of(int i)
70 {
71 	RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
72 			 "sched RCU must be held");
73 	return &cpu_rq(i)->rd->dl_bw;
74 }
75 
76 static inline int dl_bw_cpus(int i)
77 {
78 	struct root_domain *rd = cpu_rq(i)->rd;
79 	int cpus;
80 
81 	RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
82 			 "sched RCU must be held");
83 
84 	if (cpumask_subset(rd->span, cpu_active_mask))
85 		return cpumask_weight(rd->span);
86 
87 	cpus = 0;
88 
89 	for_each_cpu_and(i, rd->span, cpu_active_mask)
90 		cpus++;
91 
92 	return cpus;
93 }
94 
95 static inline unsigned long __dl_bw_capacity(int i)
96 {
97 	struct root_domain *rd = cpu_rq(i)->rd;
98 	unsigned long cap = 0;
99 
100 	RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
101 			 "sched RCU must be held");
102 
103 	for_each_cpu_and(i, rd->span, cpu_active_mask)
104 		cap += capacity_orig_of(i);
105 
106 	return cap;
107 }
108 
109 /*
110  * XXX Fix: If 'rq->rd == def_root_domain' perform AC against capacity
111  * of the CPU the task is running on rather rd's \Sum CPU capacity.
112  */
113 static inline unsigned long dl_bw_capacity(int i)
114 {
115 	if (!static_branch_unlikely(&sched_asym_cpucapacity) &&
116 	    capacity_orig_of(i) == SCHED_CAPACITY_SCALE) {
117 		return dl_bw_cpus(i) << SCHED_CAPACITY_SHIFT;
118 	} else {
119 		return __dl_bw_capacity(i);
120 	}
121 }
122 
123 static inline bool dl_bw_visited(int cpu, u64 gen)
124 {
125 	struct root_domain *rd = cpu_rq(cpu)->rd;
126 
127 	if (rd->visit_gen == gen)
128 		return true;
129 
130 	rd->visit_gen = gen;
131 	return false;
132 }
133 #else
134 static inline struct dl_bw *dl_bw_of(int i)
135 {
136 	return &cpu_rq(i)->dl.dl_bw;
137 }
138 
139 static inline int dl_bw_cpus(int i)
140 {
141 	return 1;
142 }
143 
144 static inline unsigned long dl_bw_capacity(int i)
145 {
146 	return SCHED_CAPACITY_SCALE;
147 }
148 
149 static inline bool dl_bw_visited(int cpu, u64 gen)
150 {
151 	return false;
152 }
153 #endif
154 
155 static inline
156 void __add_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
157 {
158 	u64 old = dl_rq->running_bw;
159 
160 	lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
161 	dl_rq->running_bw += dl_bw;
162 	SCHED_WARN_ON(dl_rq->running_bw < old); /* overflow */
163 	SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
164 	/* kick cpufreq (see the comment in kernel/sched/sched.h). */
165 	cpufreq_update_util(rq_of_dl_rq(dl_rq), 0);
166 }
167 
168 static inline
169 void __sub_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
170 {
171 	u64 old = dl_rq->running_bw;
172 
173 	lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
174 	dl_rq->running_bw -= dl_bw;
175 	SCHED_WARN_ON(dl_rq->running_bw > old); /* underflow */
176 	if (dl_rq->running_bw > old)
177 		dl_rq->running_bw = 0;
178 	/* kick cpufreq (see the comment in kernel/sched/sched.h). */
179 	cpufreq_update_util(rq_of_dl_rq(dl_rq), 0);
180 }
181 
182 static inline
183 void __add_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
184 {
185 	u64 old = dl_rq->this_bw;
186 
187 	lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
188 	dl_rq->this_bw += dl_bw;
189 	SCHED_WARN_ON(dl_rq->this_bw < old); /* overflow */
190 }
191 
192 static inline
193 void __sub_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
194 {
195 	u64 old = dl_rq->this_bw;
196 
197 	lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
198 	dl_rq->this_bw -= dl_bw;
199 	SCHED_WARN_ON(dl_rq->this_bw > old); /* underflow */
200 	if (dl_rq->this_bw > old)
201 		dl_rq->this_bw = 0;
202 	SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
203 }
204 
205 static inline
206 void add_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
207 {
208 	if (!dl_entity_is_special(dl_se))
209 		__add_rq_bw(dl_se->dl_bw, dl_rq);
210 }
211 
212 static inline
213 void sub_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
214 {
215 	if (!dl_entity_is_special(dl_se))
216 		__sub_rq_bw(dl_se->dl_bw, dl_rq);
217 }
218 
219 static inline
220 void add_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
221 {
222 	if (!dl_entity_is_special(dl_se))
223 		__add_running_bw(dl_se->dl_bw, dl_rq);
224 }
225 
226 static inline
227 void sub_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
228 {
229 	if (!dl_entity_is_special(dl_se))
230 		__sub_running_bw(dl_se->dl_bw, dl_rq);
231 }
232 
233 static void dl_change_utilization(struct task_struct *p, u64 new_bw)
234 {
235 	struct rq *rq;
236 
237 	BUG_ON(p->dl.flags & SCHED_FLAG_SUGOV);
238 
239 	if (task_on_rq_queued(p))
240 		return;
241 
242 	rq = task_rq(p);
243 	if (p->dl.dl_non_contending) {
244 		sub_running_bw(&p->dl, &rq->dl);
245 		p->dl.dl_non_contending = 0;
246 		/*
247 		 * If the timer handler is currently running and the
248 		 * timer cannot be canceled, inactive_task_timer()
249 		 * will see that dl_not_contending is not set, and
250 		 * will not touch the rq's active utilization,
251 		 * so we are still safe.
252 		 */
253 		if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
254 			put_task_struct(p);
255 	}
256 	__sub_rq_bw(p->dl.dl_bw, &rq->dl);
257 	__add_rq_bw(new_bw, &rq->dl);
258 }
259 
260 /*
261  * The utilization of a task cannot be immediately removed from
262  * the rq active utilization (running_bw) when the task blocks.
263  * Instead, we have to wait for the so called "0-lag time".
264  *
265  * If a task blocks before the "0-lag time", a timer (the inactive
266  * timer) is armed, and running_bw is decreased when the timer
267  * fires.
268  *
269  * If the task wakes up again before the inactive timer fires,
270  * the timer is canceled, whereas if the task wakes up after the
271  * inactive timer fired (and running_bw has been decreased) the
272  * task's utilization has to be added to running_bw again.
273  * A flag in the deadline scheduling entity (dl_non_contending)
274  * is used to avoid race conditions between the inactive timer handler
275  * and task wakeups.
276  *
277  * The following diagram shows how running_bw is updated. A task is
278  * "ACTIVE" when its utilization contributes to running_bw; an
279  * "ACTIVE contending" task is in the TASK_RUNNING state, while an
280  * "ACTIVE non contending" task is a blocked task for which the "0-lag time"
281  * has not passed yet. An "INACTIVE" task is a task for which the "0-lag"
282  * time already passed, which does not contribute to running_bw anymore.
283  *                              +------------------+
284  *             wakeup           |    ACTIVE        |
285  *          +------------------>+   contending     |
286  *          | add_running_bw    |                  |
287  *          |                   +----+------+------+
288  *          |                        |      ^
289  *          |                dequeue |      |
290  * +--------+-------+                |      |
291  * |                |   t >= 0-lag   |      | wakeup
292  * |    INACTIVE    |<---------------+      |
293  * |                | sub_running_bw |      |
294  * +--------+-------+                |      |
295  *          ^                        |      |
296  *          |              t < 0-lag |      |
297  *          |                        |      |
298  *          |                        V      |
299  *          |                   +----+------+------+
300  *          | sub_running_bw    |    ACTIVE        |
301  *          +-------------------+                  |
302  *            inactive timer    |  non contending  |
303  *            fired             +------------------+
304  *
305  * The task_non_contending() function is invoked when a task
306  * blocks, and checks if the 0-lag time already passed or
307  * not (in the first case, it directly updates running_bw;
308  * in the second case, it arms the inactive timer).
309  *
310  * The task_contending() function is invoked when a task wakes
311  * up, and checks if the task is still in the "ACTIVE non contending"
312  * state or not (in the second case, it updates running_bw).
313  */
314 static void task_non_contending(struct task_struct *p)
315 {
316 	struct sched_dl_entity *dl_se = &p->dl;
317 	struct hrtimer *timer = &dl_se->inactive_timer;
318 	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
319 	struct rq *rq = rq_of_dl_rq(dl_rq);
320 	s64 zerolag_time;
321 
322 	/*
323 	 * If this is a non-deadline task that has been boosted,
324 	 * do nothing
325 	 */
326 	if (dl_se->dl_runtime == 0)
327 		return;
328 
329 	if (dl_entity_is_special(dl_se))
330 		return;
331 
332 	WARN_ON(dl_se->dl_non_contending);
333 
334 	zerolag_time = dl_se->deadline -
335 		 div64_long((dl_se->runtime * dl_se->dl_period),
336 			dl_se->dl_runtime);
337 
338 	/*
339 	 * Using relative times instead of the absolute "0-lag time"
340 	 * allows to simplify the code
341 	 */
342 	zerolag_time -= rq_clock(rq);
343 
344 	/*
345 	 * If the "0-lag time" already passed, decrease the active
346 	 * utilization now, instead of starting a timer
347 	 */
348 	if ((zerolag_time < 0) || hrtimer_active(&dl_se->inactive_timer)) {
349 		if (dl_task(p))
350 			sub_running_bw(dl_se, dl_rq);
351 		if (!dl_task(p) || READ_ONCE(p->__state) == TASK_DEAD) {
352 			struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
353 
354 			if (READ_ONCE(p->__state) == TASK_DEAD)
355 				sub_rq_bw(&p->dl, &rq->dl);
356 			raw_spin_lock(&dl_b->lock);
357 			__dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
358 			__dl_clear_params(p);
359 			raw_spin_unlock(&dl_b->lock);
360 		}
361 
362 		return;
363 	}
364 
365 	dl_se->dl_non_contending = 1;
366 	get_task_struct(p);
367 	hrtimer_start(timer, ns_to_ktime(zerolag_time), HRTIMER_MODE_REL_HARD);
368 }
369 
370 static void task_contending(struct sched_dl_entity *dl_se, int flags)
371 {
372 	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
373 
374 	/*
375 	 * If this is a non-deadline task that has been boosted,
376 	 * do nothing
377 	 */
378 	if (dl_se->dl_runtime == 0)
379 		return;
380 
381 	if (flags & ENQUEUE_MIGRATED)
382 		add_rq_bw(dl_se, dl_rq);
383 
384 	if (dl_se->dl_non_contending) {
385 		dl_se->dl_non_contending = 0;
386 		/*
387 		 * If the timer handler is currently running and the
388 		 * timer cannot be canceled, inactive_task_timer()
389 		 * will see that dl_not_contending is not set, and
390 		 * will not touch the rq's active utilization,
391 		 * so we are still safe.
392 		 */
393 		if (hrtimer_try_to_cancel(&dl_se->inactive_timer) == 1)
394 			put_task_struct(dl_task_of(dl_se));
395 	} else {
396 		/*
397 		 * Since "dl_non_contending" is not set, the
398 		 * task's utilization has already been removed from
399 		 * active utilization (either when the task blocked,
400 		 * when the "inactive timer" fired).
401 		 * So, add it back.
402 		 */
403 		add_running_bw(dl_se, dl_rq);
404 	}
405 }
406 
407 static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq)
408 {
409 	struct sched_dl_entity *dl_se = &p->dl;
410 
411 	return dl_rq->root.rb_leftmost == &dl_se->rb_node;
412 }
413 
414 static void init_dl_rq_bw_ratio(struct dl_rq *dl_rq);
415 
416 void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime)
417 {
418 	raw_spin_lock_init(&dl_b->dl_runtime_lock);
419 	dl_b->dl_period = period;
420 	dl_b->dl_runtime = runtime;
421 }
422 
423 void init_dl_bw(struct dl_bw *dl_b)
424 {
425 	raw_spin_lock_init(&dl_b->lock);
426 	raw_spin_lock(&def_dl_bandwidth.dl_runtime_lock);
427 	if (global_rt_runtime() == RUNTIME_INF)
428 		dl_b->bw = -1;
429 	else
430 		dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime());
431 	raw_spin_unlock(&def_dl_bandwidth.dl_runtime_lock);
432 	dl_b->total_bw = 0;
433 }
434 
435 void init_dl_rq(struct dl_rq *dl_rq)
436 {
437 	dl_rq->root = RB_ROOT_CACHED;
438 
439 #ifdef CONFIG_SMP
440 	/* zero means no -deadline tasks */
441 	dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0;
442 
443 	dl_rq->dl_nr_migratory = 0;
444 	dl_rq->overloaded = 0;
445 	dl_rq->pushable_dl_tasks_root = RB_ROOT_CACHED;
446 #else
447 	init_dl_bw(&dl_rq->dl_bw);
448 #endif
449 
450 	dl_rq->running_bw = 0;
451 	dl_rq->this_bw = 0;
452 	init_dl_rq_bw_ratio(dl_rq);
453 }
454 
455 #ifdef CONFIG_SMP
456 
457 static inline int dl_overloaded(struct rq *rq)
458 {
459 	return atomic_read(&rq->rd->dlo_count);
460 }
461 
462 static inline void dl_set_overload(struct rq *rq)
463 {
464 	if (!rq->online)
465 		return;
466 
467 	cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask);
468 	/*
469 	 * Must be visible before the overload count is
470 	 * set (as in sched_rt.c).
471 	 *
472 	 * Matched by the barrier in pull_dl_task().
473 	 */
474 	smp_wmb();
475 	atomic_inc(&rq->rd->dlo_count);
476 }
477 
478 static inline void dl_clear_overload(struct rq *rq)
479 {
480 	if (!rq->online)
481 		return;
482 
483 	atomic_dec(&rq->rd->dlo_count);
484 	cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask);
485 }
486 
487 static void update_dl_migration(struct dl_rq *dl_rq)
488 {
489 	if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) {
490 		if (!dl_rq->overloaded) {
491 			dl_set_overload(rq_of_dl_rq(dl_rq));
492 			dl_rq->overloaded = 1;
493 		}
494 	} else if (dl_rq->overloaded) {
495 		dl_clear_overload(rq_of_dl_rq(dl_rq));
496 		dl_rq->overloaded = 0;
497 	}
498 }
499 
500 static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
501 {
502 	struct task_struct *p = dl_task_of(dl_se);
503 
504 	if (p->nr_cpus_allowed > 1)
505 		dl_rq->dl_nr_migratory++;
506 
507 	update_dl_migration(dl_rq);
508 }
509 
510 static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
511 {
512 	struct task_struct *p = dl_task_of(dl_se);
513 
514 	if (p->nr_cpus_allowed > 1)
515 		dl_rq->dl_nr_migratory--;
516 
517 	update_dl_migration(dl_rq);
518 }
519 
520 #define __node_2_pdl(node) \
521 	rb_entry((node), struct task_struct, pushable_dl_tasks)
522 
523 static inline bool __pushable_less(struct rb_node *a, const struct rb_node *b)
524 {
525 	return dl_entity_preempt(&__node_2_pdl(a)->dl, &__node_2_pdl(b)->dl);
526 }
527 
528 /*
529  * The list of pushable -deadline task is not a plist, like in
530  * sched_rt.c, it is an rb-tree with tasks ordered by deadline.
531  */
532 static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
533 {
534 	struct rb_node *leftmost;
535 
536 	BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks));
537 
538 	leftmost = rb_add_cached(&p->pushable_dl_tasks,
539 				 &rq->dl.pushable_dl_tasks_root,
540 				 __pushable_less);
541 	if (leftmost)
542 		rq->dl.earliest_dl.next = p->dl.deadline;
543 }
544 
545 static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
546 {
547 	struct dl_rq *dl_rq = &rq->dl;
548 	struct rb_root_cached *root = &dl_rq->pushable_dl_tasks_root;
549 	struct rb_node *leftmost;
550 
551 	if (RB_EMPTY_NODE(&p->pushable_dl_tasks))
552 		return;
553 
554 	leftmost = rb_erase_cached(&p->pushable_dl_tasks, root);
555 	if (leftmost)
556 		dl_rq->earliest_dl.next = __node_2_pdl(leftmost)->dl.deadline;
557 
558 	RB_CLEAR_NODE(&p->pushable_dl_tasks);
559 }
560 
561 static inline int has_pushable_dl_tasks(struct rq *rq)
562 {
563 	return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root.rb_root);
564 }
565 
566 static int push_dl_task(struct rq *rq);
567 
568 static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
569 {
570 	return rq->online && dl_task(prev);
571 }
572 
573 static DEFINE_PER_CPU(struct callback_head, dl_push_head);
574 static DEFINE_PER_CPU(struct callback_head, dl_pull_head);
575 
576 static void push_dl_tasks(struct rq *);
577 static void pull_dl_task(struct rq *);
578 
579 static inline void deadline_queue_push_tasks(struct rq *rq)
580 {
581 	if (!has_pushable_dl_tasks(rq))
582 		return;
583 
584 	queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks);
585 }
586 
587 static inline void deadline_queue_pull_task(struct rq *rq)
588 {
589 	queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task);
590 }
591 
592 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq);
593 
594 static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p)
595 {
596 	struct rq *later_rq = NULL;
597 	struct dl_bw *dl_b;
598 
599 	later_rq = find_lock_later_rq(p, rq);
600 	if (!later_rq) {
601 		int cpu;
602 
603 		/*
604 		 * If we cannot preempt any rq, fall back to pick any
605 		 * online CPU:
606 		 */
607 		cpu = cpumask_any_and(cpu_active_mask, p->cpus_ptr);
608 		if (cpu >= nr_cpu_ids) {
609 			/*
610 			 * Failed to find any suitable CPU.
611 			 * The task will never come back!
612 			 */
613 			BUG_ON(dl_bandwidth_enabled());
614 
615 			/*
616 			 * If admission control is disabled we
617 			 * try a little harder to let the task
618 			 * run.
619 			 */
620 			cpu = cpumask_any(cpu_active_mask);
621 		}
622 		later_rq = cpu_rq(cpu);
623 		double_lock_balance(rq, later_rq);
624 	}
625 
626 	if (p->dl.dl_non_contending || p->dl.dl_throttled) {
627 		/*
628 		 * Inactive timer is armed (or callback is running, but
629 		 * waiting for us to release rq locks). In any case, when it
630 		 * will fire (or continue), it will see running_bw of this
631 		 * task migrated to later_rq (and correctly handle it).
632 		 */
633 		sub_running_bw(&p->dl, &rq->dl);
634 		sub_rq_bw(&p->dl, &rq->dl);
635 
636 		add_rq_bw(&p->dl, &later_rq->dl);
637 		add_running_bw(&p->dl, &later_rq->dl);
638 	} else {
639 		sub_rq_bw(&p->dl, &rq->dl);
640 		add_rq_bw(&p->dl, &later_rq->dl);
641 	}
642 
643 	/*
644 	 * And we finally need to fixup root_domain(s) bandwidth accounting,
645 	 * since p is still hanging out in the old (now moved to default) root
646 	 * domain.
647 	 */
648 	dl_b = &rq->rd->dl_bw;
649 	raw_spin_lock(&dl_b->lock);
650 	__dl_sub(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
651 	raw_spin_unlock(&dl_b->lock);
652 
653 	dl_b = &later_rq->rd->dl_bw;
654 	raw_spin_lock(&dl_b->lock);
655 	__dl_add(dl_b, p->dl.dl_bw, cpumask_weight(later_rq->rd->span));
656 	raw_spin_unlock(&dl_b->lock);
657 
658 	set_task_cpu(p, later_rq->cpu);
659 	double_unlock_balance(later_rq, rq);
660 
661 	return later_rq;
662 }
663 
664 #else
665 
666 static inline
667 void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
668 {
669 }
670 
671 static inline
672 void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
673 {
674 }
675 
676 static inline
677 void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
678 {
679 }
680 
681 static inline
682 void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
683 {
684 }
685 
686 static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
687 {
688 	return false;
689 }
690 
691 static inline void pull_dl_task(struct rq *rq)
692 {
693 }
694 
695 static inline void deadline_queue_push_tasks(struct rq *rq)
696 {
697 }
698 
699 static inline void deadline_queue_pull_task(struct rq *rq)
700 {
701 }
702 #endif /* CONFIG_SMP */
703 
704 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
705 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags);
706 static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p, int flags);
707 
708 /*
709  * We are being explicitly informed that a new instance is starting,
710  * and this means that:
711  *  - the absolute deadline of the entity has to be placed at
712  *    current time + relative deadline;
713  *  - the runtime of the entity has to be set to the maximum value.
714  *
715  * The capability of specifying such event is useful whenever a -deadline
716  * entity wants to (try to!) synchronize its behaviour with the scheduler's
717  * one, and to (try to!) reconcile itself with its own scheduling
718  * parameters.
719  */
720 static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se)
721 {
722 	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
723 	struct rq *rq = rq_of_dl_rq(dl_rq);
724 
725 	WARN_ON(is_dl_boosted(dl_se));
726 	WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline));
727 
728 	/*
729 	 * We are racing with the deadline timer. So, do nothing because
730 	 * the deadline timer handler will take care of properly recharging
731 	 * the runtime and postponing the deadline
732 	 */
733 	if (dl_se->dl_throttled)
734 		return;
735 
736 	/*
737 	 * We use the regular wall clock time to set deadlines in the
738 	 * future; in fact, we must consider execution overheads (time
739 	 * spent on hardirq context, etc.).
740 	 */
741 	dl_se->deadline = rq_clock(rq) + dl_se->dl_deadline;
742 	dl_se->runtime = dl_se->dl_runtime;
743 }
744 
745 /*
746  * Pure Earliest Deadline First (EDF) scheduling does not deal with the
747  * possibility of a entity lasting more than what it declared, and thus
748  * exhausting its runtime.
749  *
750  * Here we are interested in making runtime overrun possible, but we do
751  * not want a entity which is misbehaving to affect the scheduling of all
752  * other entities.
753  * Therefore, a budgeting strategy called Constant Bandwidth Server (CBS)
754  * is used, in order to confine each entity within its own bandwidth.
755  *
756  * This function deals exactly with that, and ensures that when the runtime
757  * of a entity is replenished, its deadline is also postponed. That ensures
758  * the overrunning entity can't interfere with other entity in the system and
759  * can't make them miss their deadlines. Reasons why this kind of overruns
760  * could happen are, typically, a entity voluntarily trying to overcome its
761  * runtime, or it just underestimated it during sched_setattr().
762  */
763 static void replenish_dl_entity(struct sched_dl_entity *dl_se)
764 {
765 	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
766 	struct rq *rq = rq_of_dl_rq(dl_rq);
767 
768 	BUG_ON(pi_of(dl_se)->dl_runtime <= 0);
769 
770 	/*
771 	 * This could be the case for a !-dl task that is boosted.
772 	 * Just go with full inherited parameters.
773 	 */
774 	if (dl_se->dl_deadline == 0) {
775 		dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline;
776 		dl_se->runtime = pi_of(dl_se)->dl_runtime;
777 	}
778 
779 	if (dl_se->dl_yielded && dl_se->runtime > 0)
780 		dl_se->runtime = 0;
781 
782 	/*
783 	 * We keep moving the deadline away until we get some
784 	 * available runtime for the entity. This ensures correct
785 	 * handling of situations where the runtime overrun is
786 	 * arbitrary large.
787 	 */
788 	while (dl_se->runtime <= 0) {
789 		dl_se->deadline += pi_of(dl_se)->dl_period;
790 		dl_se->runtime += pi_of(dl_se)->dl_runtime;
791 	}
792 
793 	/*
794 	 * At this point, the deadline really should be "in
795 	 * the future" with respect to rq->clock. If it's
796 	 * not, we are, for some reason, lagging too much!
797 	 * Anyway, after having warn userspace abut that,
798 	 * we still try to keep the things running by
799 	 * resetting the deadline and the budget of the
800 	 * entity.
801 	 */
802 	if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
803 		printk_deferred_once("sched: DL replenish lagged too much\n");
804 		dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline;
805 		dl_se->runtime = pi_of(dl_se)->dl_runtime;
806 	}
807 
808 	if (dl_se->dl_yielded)
809 		dl_se->dl_yielded = 0;
810 	if (dl_se->dl_throttled)
811 		dl_se->dl_throttled = 0;
812 }
813 
814 /*
815  * Here we check if --at time t-- an entity (which is probably being
816  * [re]activated or, in general, enqueued) can use its remaining runtime
817  * and its current deadline _without_ exceeding the bandwidth it is
818  * assigned (function returns true if it can't). We are in fact applying
819  * one of the CBS rules: when a task wakes up, if the residual runtime
820  * over residual deadline fits within the allocated bandwidth, then we
821  * can keep the current (absolute) deadline and residual budget without
822  * disrupting the schedulability of the system. Otherwise, we should
823  * refill the runtime and set the deadline a period in the future,
824  * because keeping the current (absolute) deadline of the task would
825  * result in breaking guarantees promised to other tasks (refer to
826  * Documentation/scheduler/sched-deadline.rst for more information).
827  *
828  * This function returns true if:
829  *
830  *   runtime / (deadline - t) > dl_runtime / dl_deadline ,
831  *
832  * IOW we can't recycle current parameters.
833  *
834  * Notice that the bandwidth check is done against the deadline. For
835  * task with deadline equal to period this is the same of using
836  * dl_period instead of dl_deadline in the equation above.
837  */
838 static bool dl_entity_overflow(struct sched_dl_entity *dl_se, u64 t)
839 {
840 	u64 left, right;
841 
842 	/*
843 	 * left and right are the two sides of the equation above,
844 	 * after a bit of shuffling to use multiplications instead
845 	 * of divisions.
846 	 *
847 	 * Note that none of the time values involved in the two
848 	 * multiplications are absolute: dl_deadline and dl_runtime
849 	 * are the relative deadline and the maximum runtime of each
850 	 * instance, runtime is the runtime left for the last instance
851 	 * and (deadline - t), since t is rq->clock, is the time left
852 	 * to the (absolute) deadline. Even if overflowing the u64 type
853 	 * is very unlikely to occur in both cases, here we scale down
854 	 * as we want to avoid that risk at all. Scaling down by 10
855 	 * means that we reduce granularity to 1us. We are fine with it,
856 	 * since this is only a true/false check and, anyway, thinking
857 	 * of anything below microseconds resolution is actually fiction
858 	 * (but still we want to give the user that illusion >;).
859 	 */
860 	left = (pi_of(dl_se)->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
861 	right = ((dl_se->deadline - t) >> DL_SCALE) *
862 		(pi_of(dl_se)->dl_runtime >> DL_SCALE);
863 
864 	return dl_time_before(right, left);
865 }
866 
867 /*
868  * Revised wakeup rule [1]: For self-suspending tasks, rather then
869  * re-initializing task's runtime and deadline, the revised wakeup
870  * rule adjusts the task's runtime to avoid the task to overrun its
871  * density.
872  *
873  * Reasoning: a task may overrun the density if:
874  *    runtime / (deadline - t) > dl_runtime / dl_deadline
875  *
876  * Therefore, runtime can be adjusted to:
877  *     runtime = (dl_runtime / dl_deadline) * (deadline - t)
878  *
879  * In such way that runtime will be equal to the maximum density
880  * the task can use without breaking any rule.
881  *
882  * [1] Luca Abeni, Giuseppe Lipari, and Juri Lelli. 2015. Constant
883  * bandwidth server revisited. SIGBED Rev. 11, 4 (January 2015), 19-24.
884  */
885 static void
886 update_dl_revised_wakeup(struct sched_dl_entity *dl_se, struct rq *rq)
887 {
888 	u64 laxity = dl_se->deadline - rq_clock(rq);
889 
890 	/*
891 	 * If the task has deadline < period, and the deadline is in the past,
892 	 * it should already be throttled before this check.
893 	 *
894 	 * See update_dl_entity() comments for further details.
895 	 */
896 	WARN_ON(dl_time_before(dl_se->deadline, rq_clock(rq)));
897 
898 	dl_se->runtime = (dl_se->dl_density * laxity) >> BW_SHIFT;
899 }
900 
901 /*
902  * Regarding the deadline, a task with implicit deadline has a relative
903  * deadline == relative period. A task with constrained deadline has a
904  * relative deadline <= relative period.
905  *
906  * We support constrained deadline tasks. However, there are some restrictions
907  * applied only for tasks which do not have an implicit deadline. See
908  * update_dl_entity() to know more about such restrictions.
909  *
910  * The dl_is_implicit() returns true if the task has an implicit deadline.
911  */
912 static inline bool dl_is_implicit(struct sched_dl_entity *dl_se)
913 {
914 	return dl_se->dl_deadline == dl_se->dl_period;
915 }
916 
917 /*
918  * When a deadline entity is placed in the runqueue, its runtime and deadline
919  * might need to be updated. This is done by a CBS wake up rule. There are two
920  * different rules: 1) the original CBS; and 2) the Revisited CBS.
921  *
922  * When the task is starting a new period, the Original CBS is used. In this
923  * case, the runtime is replenished and a new absolute deadline is set.
924  *
925  * When a task is queued before the begin of the next period, using the
926  * remaining runtime and deadline could make the entity to overflow, see
927  * dl_entity_overflow() to find more about runtime overflow. When such case
928  * is detected, the runtime and deadline need to be updated.
929  *
930  * If the task has an implicit deadline, i.e., deadline == period, the Original
931  * CBS is applied. the runtime is replenished and a new absolute deadline is
932  * set, as in the previous cases.
933  *
934  * However, the Original CBS does not work properly for tasks with
935  * deadline < period, which are said to have a constrained deadline. By
936  * applying the Original CBS, a constrained deadline task would be able to run
937  * runtime/deadline in a period. With deadline < period, the task would
938  * overrun the runtime/period allowed bandwidth, breaking the admission test.
939  *
940  * In order to prevent this misbehave, the Revisited CBS is used for
941  * constrained deadline tasks when a runtime overflow is detected. In the
942  * Revisited CBS, rather than replenishing & setting a new absolute deadline,
943  * the remaining runtime of the task is reduced to avoid runtime overflow.
944  * Please refer to the comments update_dl_revised_wakeup() function to find
945  * more about the Revised CBS rule.
946  */
947 static void update_dl_entity(struct sched_dl_entity *dl_se)
948 {
949 	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
950 	struct rq *rq = rq_of_dl_rq(dl_rq);
951 
952 	if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
953 	    dl_entity_overflow(dl_se, rq_clock(rq))) {
954 
955 		if (unlikely(!dl_is_implicit(dl_se) &&
956 			     !dl_time_before(dl_se->deadline, rq_clock(rq)) &&
957 			     !is_dl_boosted(dl_se))) {
958 			update_dl_revised_wakeup(dl_se, rq);
959 			return;
960 		}
961 
962 		dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline;
963 		dl_se->runtime = pi_of(dl_se)->dl_runtime;
964 	}
965 }
966 
967 static inline u64 dl_next_period(struct sched_dl_entity *dl_se)
968 {
969 	return dl_se->deadline - dl_se->dl_deadline + dl_se->dl_period;
970 }
971 
972 /*
973  * If the entity depleted all its runtime, and if we want it to sleep
974  * while waiting for some new execution time to become available, we
975  * set the bandwidth replenishment timer to the replenishment instant
976  * and try to activate it.
977  *
978  * Notice that it is important for the caller to know if the timer
979  * actually started or not (i.e., the replenishment instant is in
980  * the future or in the past).
981  */
982 static int start_dl_timer(struct task_struct *p)
983 {
984 	struct sched_dl_entity *dl_se = &p->dl;
985 	struct hrtimer *timer = &dl_se->dl_timer;
986 	struct rq *rq = task_rq(p);
987 	ktime_t now, act;
988 	s64 delta;
989 
990 	lockdep_assert_rq_held(rq);
991 
992 	/*
993 	 * We want the timer to fire at the deadline, but considering
994 	 * that it is actually coming from rq->clock and not from
995 	 * hrtimer's time base reading.
996 	 */
997 	act = ns_to_ktime(dl_next_period(dl_se));
998 	now = hrtimer_cb_get_time(timer);
999 	delta = ktime_to_ns(now) - rq_clock(rq);
1000 	act = ktime_add_ns(act, delta);
1001 
1002 	/*
1003 	 * If the expiry time already passed, e.g., because the value
1004 	 * chosen as the deadline is too small, don't even try to
1005 	 * start the timer in the past!
1006 	 */
1007 	if (ktime_us_delta(act, now) < 0)
1008 		return 0;
1009 
1010 	/*
1011 	 * !enqueued will guarantee another callback; even if one is already in
1012 	 * progress. This ensures a balanced {get,put}_task_struct().
1013 	 *
1014 	 * The race against __run_timer() clearing the enqueued state is
1015 	 * harmless because we're holding task_rq()->lock, therefore the timer
1016 	 * expiring after we've done the check will wait on its task_rq_lock()
1017 	 * and observe our state.
1018 	 */
1019 	if (!hrtimer_is_queued(timer)) {
1020 		get_task_struct(p);
1021 		hrtimer_start(timer, act, HRTIMER_MODE_ABS_HARD);
1022 	}
1023 
1024 	return 1;
1025 }
1026 
1027 /*
1028  * This is the bandwidth enforcement timer callback. If here, we know
1029  * a task is not on its dl_rq, since the fact that the timer was running
1030  * means the task is throttled and needs a runtime replenishment.
1031  *
1032  * However, what we actually do depends on the fact the task is active,
1033  * (it is on its rq) or has been removed from there by a call to
1034  * dequeue_task_dl(). In the former case we must issue the runtime
1035  * replenishment and add the task back to the dl_rq; in the latter, we just
1036  * do nothing but clearing dl_throttled, so that runtime and deadline
1037  * updating (and the queueing back to dl_rq) will be done by the
1038  * next call to enqueue_task_dl().
1039  */
1040 static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
1041 {
1042 	struct sched_dl_entity *dl_se = container_of(timer,
1043 						     struct sched_dl_entity,
1044 						     dl_timer);
1045 	struct task_struct *p = dl_task_of(dl_se);
1046 	struct rq_flags rf;
1047 	struct rq *rq;
1048 
1049 	rq = task_rq_lock(p, &rf);
1050 
1051 	/*
1052 	 * The task might have changed its scheduling policy to something
1053 	 * different than SCHED_DEADLINE (through switched_from_dl()).
1054 	 */
1055 	if (!dl_task(p))
1056 		goto unlock;
1057 
1058 	/*
1059 	 * The task might have been boosted by someone else and might be in the
1060 	 * boosting/deboosting path, its not throttled.
1061 	 */
1062 	if (is_dl_boosted(dl_se))
1063 		goto unlock;
1064 
1065 	/*
1066 	 * Spurious timer due to start_dl_timer() race; or we already received
1067 	 * a replenishment from rt_mutex_setprio().
1068 	 */
1069 	if (!dl_se->dl_throttled)
1070 		goto unlock;
1071 
1072 	sched_clock_tick();
1073 	update_rq_clock(rq);
1074 
1075 	/*
1076 	 * If the throttle happened during sched-out; like:
1077 	 *
1078 	 *   schedule()
1079 	 *     deactivate_task()
1080 	 *       dequeue_task_dl()
1081 	 *         update_curr_dl()
1082 	 *           start_dl_timer()
1083 	 *         __dequeue_task_dl()
1084 	 *     prev->on_rq = 0;
1085 	 *
1086 	 * We can be both throttled and !queued. Replenish the counter
1087 	 * but do not enqueue -- wait for our wakeup to do that.
1088 	 */
1089 	if (!task_on_rq_queued(p)) {
1090 		replenish_dl_entity(dl_se);
1091 		goto unlock;
1092 	}
1093 
1094 #ifdef CONFIG_SMP
1095 	if (unlikely(!rq->online)) {
1096 		/*
1097 		 * If the runqueue is no longer available, migrate the
1098 		 * task elsewhere. This necessarily changes rq.
1099 		 */
1100 		lockdep_unpin_lock(__rq_lockp(rq), rf.cookie);
1101 		rq = dl_task_offline_migration(rq, p);
1102 		rf.cookie = lockdep_pin_lock(__rq_lockp(rq));
1103 		update_rq_clock(rq);
1104 
1105 		/*
1106 		 * Now that the task has been migrated to the new RQ and we
1107 		 * have that locked, proceed as normal and enqueue the task
1108 		 * there.
1109 		 */
1110 	}
1111 #endif
1112 
1113 	enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
1114 	if (dl_task(rq->curr))
1115 		check_preempt_curr_dl(rq, p, 0);
1116 	else
1117 		resched_curr(rq);
1118 
1119 #ifdef CONFIG_SMP
1120 	/*
1121 	 * Queueing this task back might have overloaded rq, check if we need
1122 	 * to kick someone away.
1123 	 */
1124 	if (has_pushable_dl_tasks(rq)) {
1125 		/*
1126 		 * Nothing relies on rq->lock after this, so its safe to drop
1127 		 * rq->lock.
1128 		 */
1129 		rq_unpin_lock(rq, &rf);
1130 		push_dl_task(rq);
1131 		rq_repin_lock(rq, &rf);
1132 	}
1133 #endif
1134 
1135 unlock:
1136 	task_rq_unlock(rq, p, &rf);
1137 
1138 	/*
1139 	 * This can free the task_struct, including this hrtimer, do not touch
1140 	 * anything related to that after this.
1141 	 */
1142 	put_task_struct(p);
1143 
1144 	return HRTIMER_NORESTART;
1145 }
1146 
1147 void init_dl_task_timer(struct sched_dl_entity *dl_se)
1148 {
1149 	struct hrtimer *timer = &dl_se->dl_timer;
1150 
1151 	hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
1152 	timer->function = dl_task_timer;
1153 }
1154 
1155 /*
1156  * During the activation, CBS checks if it can reuse the current task's
1157  * runtime and period. If the deadline of the task is in the past, CBS
1158  * cannot use the runtime, and so it replenishes the task. This rule
1159  * works fine for implicit deadline tasks (deadline == period), and the
1160  * CBS was designed for implicit deadline tasks. However, a task with
1161  * constrained deadline (deadline < period) might be awakened after the
1162  * deadline, but before the next period. In this case, replenishing the
1163  * task would allow it to run for runtime / deadline. As in this case
1164  * deadline < period, CBS enables a task to run for more than the
1165  * runtime / period. In a very loaded system, this can cause a domino
1166  * effect, making other tasks miss their deadlines.
1167  *
1168  * To avoid this problem, in the activation of a constrained deadline
1169  * task after the deadline but before the next period, throttle the
1170  * task and set the replenishing timer to the begin of the next period,
1171  * unless it is boosted.
1172  */
1173 static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se)
1174 {
1175 	struct task_struct *p = dl_task_of(dl_se);
1176 	struct rq *rq = rq_of_dl_rq(dl_rq_of_se(dl_se));
1177 
1178 	if (dl_time_before(dl_se->deadline, rq_clock(rq)) &&
1179 	    dl_time_before(rq_clock(rq), dl_next_period(dl_se))) {
1180 		if (unlikely(is_dl_boosted(dl_se) || !start_dl_timer(p)))
1181 			return;
1182 		dl_se->dl_throttled = 1;
1183 		if (dl_se->runtime > 0)
1184 			dl_se->runtime = 0;
1185 	}
1186 }
1187 
1188 static
1189 int dl_runtime_exceeded(struct sched_dl_entity *dl_se)
1190 {
1191 	return (dl_se->runtime <= 0);
1192 }
1193 
1194 extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
1195 
1196 /*
1197  * This function implements the GRUB accounting rule:
1198  * according to the GRUB reclaiming algorithm, the runtime is
1199  * not decreased as "dq = -dt", but as
1200  * "dq = -max{u / Umax, (1 - Uinact - Uextra)} dt",
1201  * where u is the utilization of the task, Umax is the maximum reclaimable
1202  * utilization, Uinact is the (per-runqueue) inactive utilization, computed
1203  * as the difference between the "total runqueue utilization" and the
1204  * runqueue active utilization, and Uextra is the (per runqueue) extra
1205  * reclaimable utilization.
1206  * Since rq->dl.running_bw and rq->dl.this_bw contain utilizations
1207  * multiplied by 2^BW_SHIFT, the result has to be shifted right by
1208  * BW_SHIFT.
1209  * Since rq->dl.bw_ratio contains 1 / Umax multiplied by 2^RATIO_SHIFT,
1210  * dl_bw is multiped by rq->dl.bw_ratio and shifted right by RATIO_SHIFT.
1211  * Since delta is a 64 bit variable, to have an overflow its value
1212  * should be larger than 2^(64 - 20 - 8), which is more than 64 seconds.
1213  * So, overflow is not an issue here.
1214  */
1215 static u64 grub_reclaim(u64 delta, struct rq *rq, struct sched_dl_entity *dl_se)
1216 {
1217 	u64 u_inact = rq->dl.this_bw - rq->dl.running_bw; /* Utot - Uact */
1218 	u64 u_act;
1219 	u64 u_act_min = (dl_se->dl_bw * rq->dl.bw_ratio) >> RATIO_SHIFT;
1220 
1221 	/*
1222 	 * Instead of computing max{u * bw_ratio, (1 - u_inact - u_extra)},
1223 	 * we compare u_inact + rq->dl.extra_bw with
1224 	 * 1 - (u * rq->dl.bw_ratio >> RATIO_SHIFT), because
1225 	 * u_inact + rq->dl.extra_bw can be larger than
1226 	 * 1 * (so, 1 - u_inact - rq->dl.extra_bw would be negative
1227 	 * leading to wrong results)
1228 	 */
1229 	if (u_inact + rq->dl.extra_bw > BW_UNIT - u_act_min)
1230 		u_act = u_act_min;
1231 	else
1232 		u_act = BW_UNIT - u_inact - rq->dl.extra_bw;
1233 
1234 	return (delta * u_act) >> BW_SHIFT;
1235 }
1236 
1237 /*
1238  * Update the current task's runtime statistics (provided it is still
1239  * a -deadline task and has not been removed from the dl_rq).
1240  */
1241 static void update_curr_dl(struct rq *rq)
1242 {
1243 	struct task_struct *curr = rq->curr;
1244 	struct sched_dl_entity *dl_se = &curr->dl;
1245 	u64 delta_exec, scaled_delta_exec;
1246 	int cpu = cpu_of(rq);
1247 	u64 now;
1248 
1249 	if (!dl_task(curr) || !on_dl_rq(dl_se))
1250 		return;
1251 
1252 	/*
1253 	 * Consumed budget is computed considering the time as
1254 	 * observed by schedulable tasks (excluding time spent
1255 	 * in hardirq context, etc.). Deadlines are instead
1256 	 * computed using hard walltime. This seems to be the more
1257 	 * natural solution, but the full ramifications of this
1258 	 * approach need further study.
1259 	 */
1260 	now = rq_clock_task(rq);
1261 	delta_exec = now - curr->se.exec_start;
1262 	if (unlikely((s64)delta_exec <= 0)) {
1263 		if (unlikely(dl_se->dl_yielded))
1264 			goto throttle;
1265 		return;
1266 	}
1267 
1268 	schedstat_set(curr->se.statistics.exec_max,
1269 		      max(curr->se.statistics.exec_max, delta_exec));
1270 
1271 	curr->se.sum_exec_runtime += delta_exec;
1272 	account_group_exec_runtime(curr, delta_exec);
1273 
1274 	curr->se.exec_start = now;
1275 	cgroup_account_cputime(curr, delta_exec);
1276 
1277 	if (dl_entity_is_special(dl_se))
1278 		return;
1279 
1280 	/*
1281 	 * For tasks that participate in GRUB, we implement GRUB-PA: the
1282 	 * spare reclaimed bandwidth is used to clock down frequency.
1283 	 *
1284 	 * For the others, we still need to scale reservation parameters
1285 	 * according to current frequency and CPU maximum capacity.
1286 	 */
1287 	if (unlikely(dl_se->flags & SCHED_FLAG_RECLAIM)) {
1288 		scaled_delta_exec = grub_reclaim(delta_exec,
1289 						 rq,
1290 						 &curr->dl);
1291 	} else {
1292 		unsigned long scale_freq = arch_scale_freq_capacity(cpu);
1293 		unsigned long scale_cpu = arch_scale_cpu_capacity(cpu);
1294 
1295 		scaled_delta_exec = cap_scale(delta_exec, scale_freq);
1296 		scaled_delta_exec = cap_scale(scaled_delta_exec, scale_cpu);
1297 	}
1298 
1299 	dl_se->runtime -= scaled_delta_exec;
1300 
1301 throttle:
1302 	if (dl_runtime_exceeded(dl_se) || dl_se->dl_yielded) {
1303 		dl_se->dl_throttled = 1;
1304 
1305 		/* If requested, inform the user about runtime overruns. */
1306 		if (dl_runtime_exceeded(dl_se) &&
1307 		    (dl_se->flags & SCHED_FLAG_DL_OVERRUN))
1308 			dl_se->dl_overrun = 1;
1309 
1310 		__dequeue_task_dl(rq, curr, 0);
1311 		if (unlikely(is_dl_boosted(dl_se) || !start_dl_timer(curr)))
1312 			enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
1313 
1314 		if (!is_leftmost(curr, &rq->dl))
1315 			resched_curr(rq);
1316 	}
1317 
1318 	/*
1319 	 * Because -- for now -- we share the rt bandwidth, we need to
1320 	 * account our runtime there too, otherwise actual rt tasks
1321 	 * would be able to exceed the shared quota.
1322 	 *
1323 	 * Account to the root rt group for now.
1324 	 *
1325 	 * The solution we're working towards is having the RT groups scheduled
1326 	 * using deadline servers -- however there's a few nasties to figure
1327 	 * out before that can happen.
1328 	 */
1329 	if (rt_bandwidth_enabled()) {
1330 		struct rt_rq *rt_rq = &rq->rt;
1331 
1332 		raw_spin_lock(&rt_rq->rt_runtime_lock);
1333 		/*
1334 		 * We'll let actual RT tasks worry about the overflow here, we
1335 		 * have our own CBS to keep us inline; only account when RT
1336 		 * bandwidth is relevant.
1337 		 */
1338 		if (sched_rt_bandwidth_account(rt_rq))
1339 			rt_rq->rt_time += delta_exec;
1340 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
1341 	}
1342 }
1343 
1344 static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer)
1345 {
1346 	struct sched_dl_entity *dl_se = container_of(timer,
1347 						     struct sched_dl_entity,
1348 						     inactive_timer);
1349 	struct task_struct *p = dl_task_of(dl_se);
1350 	struct rq_flags rf;
1351 	struct rq *rq;
1352 
1353 	rq = task_rq_lock(p, &rf);
1354 
1355 	sched_clock_tick();
1356 	update_rq_clock(rq);
1357 
1358 	if (!dl_task(p) || READ_ONCE(p->__state) == TASK_DEAD) {
1359 		struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
1360 
1361 		if (READ_ONCE(p->__state) == TASK_DEAD && dl_se->dl_non_contending) {
1362 			sub_running_bw(&p->dl, dl_rq_of_se(&p->dl));
1363 			sub_rq_bw(&p->dl, dl_rq_of_se(&p->dl));
1364 			dl_se->dl_non_contending = 0;
1365 		}
1366 
1367 		raw_spin_lock(&dl_b->lock);
1368 		__dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
1369 		raw_spin_unlock(&dl_b->lock);
1370 		__dl_clear_params(p);
1371 
1372 		goto unlock;
1373 	}
1374 	if (dl_se->dl_non_contending == 0)
1375 		goto unlock;
1376 
1377 	sub_running_bw(dl_se, &rq->dl);
1378 	dl_se->dl_non_contending = 0;
1379 unlock:
1380 	task_rq_unlock(rq, p, &rf);
1381 	put_task_struct(p);
1382 
1383 	return HRTIMER_NORESTART;
1384 }
1385 
1386 void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se)
1387 {
1388 	struct hrtimer *timer = &dl_se->inactive_timer;
1389 
1390 	hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
1391 	timer->function = inactive_task_timer;
1392 }
1393 
1394 #ifdef CONFIG_SMP
1395 
1396 static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
1397 {
1398 	struct rq *rq = rq_of_dl_rq(dl_rq);
1399 
1400 	if (dl_rq->earliest_dl.curr == 0 ||
1401 	    dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
1402 		if (dl_rq->earliest_dl.curr == 0)
1403 			cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_HIGHER);
1404 		dl_rq->earliest_dl.curr = deadline;
1405 		cpudl_set(&rq->rd->cpudl, rq->cpu, deadline);
1406 	}
1407 }
1408 
1409 static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
1410 {
1411 	struct rq *rq = rq_of_dl_rq(dl_rq);
1412 
1413 	/*
1414 	 * Since we may have removed our earliest (and/or next earliest)
1415 	 * task we must recompute them.
1416 	 */
1417 	if (!dl_rq->dl_nr_running) {
1418 		dl_rq->earliest_dl.curr = 0;
1419 		dl_rq->earliest_dl.next = 0;
1420 		cpudl_clear(&rq->rd->cpudl, rq->cpu);
1421 		cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
1422 	} else {
1423 		struct rb_node *leftmost = dl_rq->root.rb_leftmost;
1424 		struct sched_dl_entity *entry;
1425 
1426 		entry = rb_entry(leftmost, struct sched_dl_entity, rb_node);
1427 		dl_rq->earliest_dl.curr = entry->deadline;
1428 		cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline);
1429 	}
1430 }
1431 
1432 #else
1433 
1434 static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
1435 static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
1436 
1437 #endif /* CONFIG_SMP */
1438 
1439 static inline
1440 void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
1441 {
1442 	int prio = dl_task_of(dl_se)->prio;
1443 	u64 deadline = dl_se->deadline;
1444 
1445 	WARN_ON(!dl_prio(prio));
1446 	dl_rq->dl_nr_running++;
1447 	add_nr_running(rq_of_dl_rq(dl_rq), 1);
1448 
1449 	inc_dl_deadline(dl_rq, deadline);
1450 	inc_dl_migration(dl_se, dl_rq);
1451 }
1452 
1453 static inline
1454 void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
1455 {
1456 	int prio = dl_task_of(dl_se)->prio;
1457 
1458 	WARN_ON(!dl_prio(prio));
1459 	WARN_ON(!dl_rq->dl_nr_running);
1460 	dl_rq->dl_nr_running--;
1461 	sub_nr_running(rq_of_dl_rq(dl_rq), 1);
1462 
1463 	dec_dl_deadline(dl_rq, dl_se->deadline);
1464 	dec_dl_migration(dl_se, dl_rq);
1465 }
1466 
1467 #define __node_2_dle(node) \
1468 	rb_entry((node), struct sched_dl_entity, rb_node)
1469 
1470 static inline bool __dl_less(struct rb_node *a, const struct rb_node *b)
1471 {
1472 	return dl_time_before(__node_2_dle(a)->deadline, __node_2_dle(b)->deadline);
1473 }
1474 
1475 static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
1476 {
1477 	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1478 
1479 	BUG_ON(!RB_EMPTY_NODE(&dl_se->rb_node));
1480 
1481 	rb_add_cached(&dl_se->rb_node, &dl_rq->root, __dl_less);
1482 
1483 	inc_dl_tasks(dl_se, dl_rq);
1484 }
1485 
1486 static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
1487 {
1488 	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1489 
1490 	if (RB_EMPTY_NODE(&dl_se->rb_node))
1491 		return;
1492 
1493 	rb_erase_cached(&dl_se->rb_node, &dl_rq->root);
1494 
1495 	RB_CLEAR_NODE(&dl_se->rb_node);
1496 
1497 	dec_dl_tasks(dl_se, dl_rq);
1498 }
1499 
1500 static void
1501 enqueue_dl_entity(struct sched_dl_entity *dl_se, int flags)
1502 {
1503 	BUG_ON(on_dl_rq(dl_se));
1504 
1505 	/*
1506 	 * If this is a wakeup or a new instance, the scheduling
1507 	 * parameters of the task might need updating. Otherwise,
1508 	 * we want a replenishment of its runtime.
1509 	 */
1510 	if (flags & ENQUEUE_WAKEUP) {
1511 		task_contending(dl_se, flags);
1512 		update_dl_entity(dl_se);
1513 	} else if (flags & ENQUEUE_REPLENISH) {
1514 		replenish_dl_entity(dl_se);
1515 	} else if ((flags & ENQUEUE_RESTORE) &&
1516 		  dl_time_before(dl_se->deadline,
1517 				 rq_clock(rq_of_dl_rq(dl_rq_of_se(dl_se))))) {
1518 		setup_new_dl_entity(dl_se);
1519 	}
1520 
1521 	__enqueue_dl_entity(dl_se);
1522 }
1523 
1524 static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
1525 {
1526 	__dequeue_dl_entity(dl_se);
1527 }
1528 
1529 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1530 {
1531 	if (is_dl_boosted(&p->dl)) {
1532 		/*
1533 		 * Because of delays in the detection of the overrun of a
1534 		 * thread's runtime, it might be the case that a thread
1535 		 * goes to sleep in a rt mutex with negative runtime. As
1536 		 * a consequence, the thread will be throttled.
1537 		 *
1538 		 * While waiting for the mutex, this thread can also be
1539 		 * boosted via PI, resulting in a thread that is throttled
1540 		 * and boosted at the same time.
1541 		 *
1542 		 * In this case, the boost overrides the throttle.
1543 		 */
1544 		if (p->dl.dl_throttled) {
1545 			/*
1546 			 * The replenish timer needs to be canceled. No
1547 			 * problem if it fires concurrently: boosted threads
1548 			 * are ignored in dl_task_timer().
1549 			 */
1550 			hrtimer_try_to_cancel(&p->dl.dl_timer);
1551 			p->dl.dl_throttled = 0;
1552 		}
1553 	} else if (!dl_prio(p->normal_prio)) {
1554 		/*
1555 		 * Special case in which we have a !SCHED_DEADLINE task that is going
1556 		 * to be deboosted, but exceeds its runtime while doing so. No point in
1557 		 * replenishing it, as it's going to return back to its original
1558 		 * scheduling class after this. If it has been throttled, we need to
1559 		 * clear the flag, otherwise the task may wake up as throttled after
1560 		 * being boosted again with no means to replenish the runtime and clear
1561 		 * the throttle.
1562 		 */
1563 		p->dl.dl_throttled = 0;
1564 		BUG_ON(!is_dl_boosted(&p->dl) || flags != ENQUEUE_REPLENISH);
1565 		return;
1566 	}
1567 
1568 	/*
1569 	 * Check if a constrained deadline task was activated
1570 	 * after the deadline but before the next period.
1571 	 * If that is the case, the task will be throttled and
1572 	 * the replenishment timer will be set to the next period.
1573 	 */
1574 	if (!p->dl.dl_throttled && !dl_is_implicit(&p->dl))
1575 		dl_check_constrained_dl(&p->dl);
1576 
1577 	if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & ENQUEUE_RESTORE) {
1578 		add_rq_bw(&p->dl, &rq->dl);
1579 		add_running_bw(&p->dl, &rq->dl);
1580 	}
1581 
1582 	/*
1583 	 * If p is throttled, we do not enqueue it. In fact, if it exhausted
1584 	 * its budget it needs a replenishment and, since it now is on
1585 	 * its rq, the bandwidth timer callback (which clearly has not
1586 	 * run yet) will take care of this.
1587 	 * However, the active utilization does not depend on the fact
1588 	 * that the task is on the runqueue or not (but depends on the
1589 	 * task's state - in GRUB parlance, "inactive" vs "active contending").
1590 	 * In other words, even if a task is throttled its utilization must
1591 	 * be counted in the active utilization; hence, we need to call
1592 	 * add_running_bw().
1593 	 */
1594 	if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH)) {
1595 		if (flags & ENQUEUE_WAKEUP)
1596 			task_contending(&p->dl, flags);
1597 
1598 		return;
1599 	}
1600 
1601 	enqueue_dl_entity(&p->dl, flags);
1602 
1603 	if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1604 		enqueue_pushable_dl_task(rq, p);
1605 }
1606 
1607 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1608 {
1609 	dequeue_dl_entity(&p->dl);
1610 	dequeue_pushable_dl_task(rq, p);
1611 }
1612 
1613 static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1614 {
1615 	update_curr_dl(rq);
1616 	__dequeue_task_dl(rq, p, flags);
1617 
1618 	if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & DEQUEUE_SAVE) {
1619 		sub_running_bw(&p->dl, &rq->dl);
1620 		sub_rq_bw(&p->dl, &rq->dl);
1621 	}
1622 
1623 	/*
1624 	 * This check allows to start the inactive timer (or to immediately
1625 	 * decrease the active utilization, if needed) in two cases:
1626 	 * when the task blocks and when it is terminating
1627 	 * (p->state == TASK_DEAD). We can handle the two cases in the same
1628 	 * way, because from GRUB's point of view the same thing is happening
1629 	 * (the task moves from "active contending" to "active non contending"
1630 	 * or "inactive")
1631 	 */
1632 	if (flags & DEQUEUE_SLEEP)
1633 		task_non_contending(p);
1634 }
1635 
1636 /*
1637  * Yield task semantic for -deadline tasks is:
1638  *
1639  *   get off from the CPU until our next instance, with
1640  *   a new runtime. This is of little use now, since we
1641  *   don't have a bandwidth reclaiming mechanism. Anyway,
1642  *   bandwidth reclaiming is planned for the future, and
1643  *   yield_task_dl will indicate that some spare budget
1644  *   is available for other task instances to use it.
1645  */
1646 static void yield_task_dl(struct rq *rq)
1647 {
1648 	/*
1649 	 * We make the task go to sleep until its current deadline by
1650 	 * forcing its runtime to zero. This way, update_curr_dl() stops
1651 	 * it and the bandwidth timer will wake it up and will give it
1652 	 * new scheduling parameters (thanks to dl_yielded=1).
1653 	 */
1654 	rq->curr->dl.dl_yielded = 1;
1655 
1656 	update_rq_clock(rq);
1657 	update_curr_dl(rq);
1658 	/*
1659 	 * Tell update_rq_clock() that we've just updated,
1660 	 * so we don't do microscopic update in schedule()
1661 	 * and double the fastpath cost.
1662 	 */
1663 	rq_clock_skip_update(rq);
1664 }
1665 
1666 #ifdef CONFIG_SMP
1667 
1668 static int find_later_rq(struct task_struct *task);
1669 
1670 static int
1671 select_task_rq_dl(struct task_struct *p, int cpu, int flags)
1672 {
1673 	struct task_struct *curr;
1674 	bool select_rq;
1675 	struct rq *rq;
1676 
1677 	if (!(flags & WF_TTWU))
1678 		goto out;
1679 
1680 	rq = cpu_rq(cpu);
1681 
1682 	rcu_read_lock();
1683 	curr = READ_ONCE(rq->curr); /* unlocked access */
1684 
1685 	/*
1686 	 * If we are dealing with a -deadline task, we must
1687 	 * decide where to wake it up.
1688 	 * If it has a later deadline and the current task
1689 	 * on this rq can't move (provided the waking task
1690 	 * can!) we prefer to send it somewhere else. On the
1691 	 * other hand, if it has a shorter deadline, we
1692 	 * try to make it stay here, it might be important.
1693 	 */
1694 	select_rq = unlikely(dl_task(curr)) &&
1695 		    (curr->nr_cpus_allowed < 2 ||
1696 		     !dl_entity_preempt(&p->dl, &curr->dl)) &&
1697 		    p->nr_cpus_allowed > 1;
1698 
1699 	/*
1700 	 * Take the capacity of the CPU into account to
1701 	 * ensure it fits the requirement of the task.
1702 	 */
1703 	if (static_branch_unlikely(&sched_asym_cpucapacity))
1704 		select_rq |= !dl_task_fits_capacity(p, cpu);
1705 
1706 	if (select_rq) {
1707 		int target = find_later_rq(p);
1708 
1709 		if (target != -1 &&
1710 				(dl_time_before(p->dl.deadline,
1711 					cpu_rq(target)->dl.earliest_dl.curr) ||
1712 				(cpu_rq(target)->dl.dl_nr_running == 0)))
1713 			cpu = target;
1714 	}
1715 	rcu_read_unlock();
1716 
1717 out:
1718 	return cpu;
1719 }
1720 
1721 static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused)
1722 {
1723 	struct rq *rq;
1724 
1725 	if (READ_ONCE(p->__state) != TASK_WAKING)
1726 		return;
1727 
1728 	rq = task_rq(p);
1729 	/*
1730 	 * Since p->state == TASK_WAKING, set_task_cpu() has been called
1731 	 * from try_to_wake_up(). Hence, p->pi_lock is locked, but
1732 	 * rq->lock is not... So, lock it
1733 	 */
1734 	raw_spin_rq_lock(rq);
1735 	if (p->dl.dl_non_contending) {
1736 		sub_running_bw(&p->dl, &rq->dl);
1737 		p->dl.dl_non_contending = 0;
1738 		/*
1739 		 * If the timer handler is currently running and the
1740 		 * timer cannot be canceled, inactive_task_timer()
1741 		 * will see that dl_not_contending is not set, and
1742 		 * will not touch the rq's active utilization,
1743 		 * so we are still safe.
1744 		 */
1745 		if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
1746 			put_task_struct(p);
1747 	}
1748 	sub_rq_bw(&p->dl, &rq->dl);
1749 	raw_spin_rq_unlock(rq);
1750 }
1751 
1752 static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
1753 {
1754 	/*
1755 	 * Current can't be migrated, useless to reschedule,
1756 	 * let's hope p can move out.
1757 	 */
1758 	if (rq->curr->nr_cpus_allowed == 1 ||
1759 	    !cpudl_find(&rq->rd->cpudl, rq->curr, NULL))
1760 		return;
1761 
1762 	/*
1763 	 * p is migratable, so let's not schedule it and
1764 	 * see if it is pushed or pulled somewhere else.
1765 	 */
1766 	if (p->nr_cpus_allowed != 1 &&
1767 	    cpudl_find(&rq->rd->cpudl, p, NULL))
1768 		return;
1769 
1770 	resched_curr(rq);
1771 }
1772 
1773 static int balance_dl(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
1774 {
1775 	if (!on_dl_rq(&p->dl) && need_pull_dl_task(rq, p)) {
1776 		/*
1777 		 * This is OK, because current is on_cpu, which avoids it being
1778 		 * picked for load-balance and preemption/IRQs are still
1779 		 * disabled avoiding further scheduler activity on it and we've
1780 		 * not yet started the picking loop.
1781 		 */
1782 		rq_unpin_lock(rq, rf);
1783 		pull_dl_task(rq);
1784 		rq_repin_lock(rq, rf);
1785 	}
1786 
1787 	return sched_stop_runnable(rq) || sched_dl_runnable(rq);
1788 }
1789 #endif /* CONFIG_SMP */
1790 
1791 /*
1792  * Only called when both the current and waking task are -deadline
1793  * tasks.
1794  */
1795 static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
1796 				  int flags)
1797 {
1798 	if (dl_entity_preempt(&p->dl, &rq->curr->dl)) {
1799 		resched_curr(rq);
1800 		return;
1801 	}
1802 
1803 #ifdef CONFIG_SMP
1804 	/*
1805 	 * In the unlikely case current and p have the same deadline
1806 	 * let us try to decide what's the best thing to do...
1807 	 */
1808 	if ((p->dl.deadline == rq->curr->dl.deadline) &&
1809 	    !test_tsk_need_resched(rq->curr))
1810 		check_preempt_equal_dl(rq, p);
1811 #endif /* CONFIG_SMP */
1812 }
1813 
1814 #ifdef CONFIG_SCHED_HRTICK
1815 static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1816 {
1817 	hrtick_start(rq, p->dl.runtime);
1818 }
1819 #else /* !CONFIG_SCHED_HRTICK */
1820 static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1821 {
1822 }
1823 #endif
1824 
1825 static void set_next_task_dl(struct rq *rq, struct task_struct *p, bool first)
1826 {
1827 	p->se.exec_start = rq_clock_task(rq);
1828 
1829 	/* You can't push away the running task */
1830 	dequeue_pushable_dl_task(rq, p);
1831 
1832 	if (!first)
1833 		return;
1834 
1835 	if (hrtick_enabled_dl(rq))
1836 		start_hrtick_dl(rq, p);
1837 
1838 	if (rq->curr->sched_class != &dl_sched_class)
1839 		update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
1840 
1841 	deadline_queue_push_tasks(rq);
1842 }
1843 
1844 static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
1845 						   struct dl_rq *dl_rq)
1846 {
1847 	struct rb_node *left = rb_first_cached(&dl_rq->root);
1848 
1849 	if (!left)
1850 		return NULL;
1851 
1852 	return rb_entry(left, struct sched_dl_entity, rb_node);
1853 }
1854 
1855 static struct task_struct *pick_task_dl(struct rq *rq)
1856 {
1857 	struct sched_dl_entity *dl_se;
1858 	struct dl_rq *dl_rq = &rq->dl;
1859 	struct task_struct *p;
1860 
1861 	if (!sched_dl_runnable(rq))
1862 		return NULL;
1863 
1864 	dl_se = pick_next_dl_entity(rq, dl_rq);
1865 	BUG_ON(!dl_se);
1866 	p = dl_task_of(dl_se);
1867 
1868 	return p;
1869 }
1870 
1871 static struct task_struct *pick_next_task_dl(struct rq *rq)
1872 {
1873 	struct task_struct *p;
1874 
1875 	p = pick_task_dl(rq);
1876 	if (p)
1877 		set_next_task_dl(rq, p, true);
1878 
1879 	return p;
1880 }
1881 
1882 static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
1883 {
1884 	update_curr_dl(rq);
1885 
1886 	update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
1887 	if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
1888 		enqueue_pushable_dl_task(rq, p);
1889 }
1890 
1891 /*
1892  * scheduler tick hitting a task of our scheduling class.
1893  *
1894  * NOTE: This function can be called remotely by the tick offload that
1895  * goes along full dynticks. Therefore no local assumption can be made
1896  * and everything must be accessed through the @rq and @curr passed in
1897  * parameters.
1898  */
1899 static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
1900 {
1901 	update_curr_dl(rq);
1902 
1903 	update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
1904 	/*
1905 	 * Even when we have runtime, update_curr_dl() might have resulted in us
1906 	 * not being the leftmost task anymore. In that case NEED_RESCHED will
1907 	 * be set and schedule() will start a new hrtick for the next task.
1908 	 */
1909 	if (hrtick_enabled_dl(rq) && queued && p->dl.runtime > 0 &&
1910 	    is_leftmost(p, &rq->dl))
1911 		start_hrtick_dl(rq, p);
1912 }
1913 
1914 static void task_fork_dl(struct task_struct *p)
1915 {
1916 	/*
1917 	 * SCHED_DEADLINE tasks cannot fork and this is achieved through
1918 	 * sched_fork()
1919 	 */
1920 }
1921 
1922 #ifdef CONFIG_SMP
1923 
1924 /* Only try algorithms three times */
1925 #define DL_MAX_TRIES 3
1926 
1927 static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
1928 {
1929 	if (!task_running(rq, p) &&
1930 	    cpumask_test_cpu(cpu, &p->cpus_mask))
1931 		return 1;
1932 	return 0;
1933 }
1934 
1935 /*
1936  * Return the earliest pushable rq's task, which is suitable to be executed
1937  * on the CPU, NULL otherwise:
1938  */
1939 static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu)
1940 {
1941 	struct rb_node *next_node = rq->dl.pushable_dl_tasks_root.rb_leftmost;
1942 	struct task_struct *p = NULL;
1943 
1944 	if (!has_pushable_dl_tasks(rq))
1945 		return NULL;
1946 
1947 next_node:
1948 	if (next_node) {
1949 		p = rb_entry(next_node, struct task_struct, pushable_dl_tasks);
1950 
1951 		if (pick_dl_task(rq, p, cpu))
1952 			return p;
1953 
1954 		next_node = rb_next(next_node);
1955 		goto next_node;
1956 	}
1957 
1958 	return NULL;
1959 }
1960 
1961 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
1962 
1963 static int find_later_rq(struct task_struct *task)
1964 {
1965 	struct sched_domain *sd;
1966 	struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl);
1967 	int this_cpu = smp_processor_id();
1968 	int cpu = task_cpu(task);
1969 
1970 	/* Make sure the mask is initialized first */
1971 	if (unlikely(!later_mask))
1972 		return -1;
1973 
1974 	if (task->nr_cpus_allowed == 1)
1975 		return -1;
1976 
1977 	/*
1978 	 * We have to consider system topology and task affinity
1979 	 * first, then we can look for a suitable CPU.
1980 	 */
1981 	if (!cpudl_find(&task_rq(task)->rd->cpudl, task, later_mask))
1982 		return -1;
1983 
1984 	/*
1985 	 * If we are here, some targets have been found, including
1986 	 * the most suitable which is, among the runqueues where the
1987 	 * current tasks have later deadlines than the task's one, the
1988 	 * rq with the latest possible one.
1989 	 *
1990 	 * Now we check how well this matches with task's
1991 	 * affinity and system topology.
1992 	 *
1993 	 * The last CPU where the task run is our first
1994 	 * guess, since it is most likely cache-hot there.
1995 	 */
1996 	if (cpumask_test_cpu(cpu, later_mask))
1997 		return cpu;
1998 	/*
1999 	 * Check if this_cpu is to be skipped (i.e., it is
2000 	 * not in the mask) or not.
2001 	 */
2002 	if (!cpumask_test_cpu(this_cpu, later_mask))
2003 		this_cpu = -1;
2004 
2005 	rcu_read_lock();
2006 	for_each_domain(cpu, sd) {
2007 		if (sd->flags & SD_WAKE_AFFINE) {
2008 			int best_cpu;
2009 
2010 			/*
2011 			 * If possible, preempting this_cpu is
2012 			 * cheaper than migrating.
2013 			 */
2014 			if (this_cpu != -1 &&
2015 			    cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
2016 				rcu_read_unlock();
2017 				return this_cpu;
2018 			}
2019 
2020 			best_cpu = cpumask_any_and_distribute(later_mask,
2021 							      sched_domain_span(sd));
2022 			/*
2023 			 * Last chance: if a CPU being in both later_mask
2024 			 * and current sd span is valid, that becomes our
2025 			 * choice. Of course, the latest possible CPU is
2026 			 * already under consideration through later_mask.
2027 			 */
2028 			if (best_cpu < nr_cpu_ids) {
2029 				rcu_read_unlock();
2030 				return best_cpu;
2031 			}
2032 		}
2033 	}
2034 	rcu_read_unlock();
2035 
2036 	/*
2037 	 * At this point, all our guesses failed, we just return
2038 	 * 'something', and let the caller sort the things out.
2039 	 */
2040 	if (this_cpu != -1)
2041 		return this_cpu;
2042 
2043 	cpu = cpumask_any_distribute(later_mask);
2044 	if (cpu < nr_cpu_ids)
2045 		return cpu;
2046 
2047 	return -1;
2048 }
2049 
2050 /* Locks the rq it finds */
2051 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
2052 {
2053 	struct rq *later_rq = NULL;
2054 	int tries;
2055 	int cpu;
2056 
2057 	for (tries = 0; tries < DL_MAX_TRIES; tries++) {
2058 		cpu = find_later_rq(task);
2059 
2060 		if ((cpu == -1) || (cpu == rq->cpu))
2061 			break;
2062 
2063 		later_rq = cpu_rq(cpu);
2064 
2065 		if (later_rq->dl.dl_nr_running &&
2066 		    !dl_time_before(task->dl.deadline,
2067 					later_rq->dl.earliest_dl.curr)) {
2068 			/*
2069 			 * Target rq has tasks of equal or earlier deadline,
2070 			 * retrying does not release any lock and is unlikely
2071 			 * to yield a different result.
2072 			 */
2073 			later_rq = NULL;
2074 			break;
2075 		}
2076 
2077 		/* Retry if something changed. */
2078 		if (double_lock_balance(rq, later_rq)) {
2079 			if (unlikely(task_rq(task) != rq ||
2080 				     !cpumask_test_cpu(later_rq->cpu, &task->cpus_mask) ||
2081 				     task_running(rq, task) ||
2082 				     !dl_task(task) ||
2083 				     !task_on_rq_queued(task))) {
2084 				double_unlock_balance(rq, later_rq);
2085 				later_rq = NULL;
2086 				break;
2087 			}
2088 		}
2089 
2090 		/*
2091 		 * If the rq we found has no -deadline task, or
2092 		 * its earliest one has a later deadline than our
2093 		 * task, the rq is a good one.
2094 		 */
2095 		if (!later_rq->dl.dl_nr_running ||
2096 		    dl_time_before(task->dl.deadline,
2097 				   later_rq->dl.earliest_dl.curr))
2098 			break;
2099 
2100 		/* Otherwise we try again. */
2101 		double_unlock_balance(rq, later_rq);
2102 		later_rq = NULL;
2103 	}
2104 
2105 	return later_rq;
2106 }
2107 
2108 static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
2109 {
2110 	struct task_struct *p;
2111 
2112 	if (!has_pushable_dl_tasks(rq))
2113 		return NULL;
2114 
2115 	p = rb_entry(rq->dl.pushable_dl_tasks_root.rb_leftmost,
2116 		     struct task_struct, pushable_dl_tasks);
2117 
2118 	BUG_ON(rq->cpu != task_cpu(p));
2119 	BUG_ON(task_current(rq, p));
2120 	BUG_ON(p->nr_cpus_allowed <= 1);
2121 
2122 	BUG_ON(!task_on_rq_queued(p));
2123 	BUG_ON(!dl_task(p));
2124 
2125 	return p;
2126 }
2127 
2128 /*
2129  * See if the non running -deadline tasks on this rq
2130  * can be sent to some other CPU where they can preempt
2131  * and start executing.
2132  */
2133 static int push_dl_task(struct rq *rq)
2134 {
2135 	struct task_struct *next_task;
2136 	struct rq *later_rq;
2137 	int ret = 0;
2138 
2139 	if (!rq->dl.overloaded)
2140 		return 0;
2141 
2142 	next_task = pick_next_pushable_dl_task(rq);
2143 	if (!next_task)
2144 		return 0;
2145 
2146 retry:
2147 	if (is_migration_disabled(next_task))
2148 		return 0;
2149 
2150 	if (WARN_ON(next_task == rq->curr))
2151 		return 0;
2152 
2153 	/*
2154 	 * If next_task preempts rq->curr, and rq->curr
2155 	 * can move away, it makes sense to just reschedule
2156 	 * without going further in pushing next_task.
2157 	 */
2158 	if (dl_task(rq->curr) &&
2159 	    dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
2160 	    rq->curr->nr_cpus_allowed > 1) {
2161 		resched_curr(rq);
2162 		return 0;
2163 	}
2164 
2165 	/* We might release rq lock */
2166 	get_task_struct(next_task);
2167 
2168 	/* Will lock the rq it'll find */
2169 	later_rq = find_lock_later_rq(next_task, rq);
2170 	if (!later_rq) {
2171 		struct task_struct *task;
2172 
2173 		/*
2174 		 * We must check all this again, since
2175 		 * find_lock_later_rq releases rq->lock and it is
2176 		 * then possible that next_task has migrated.
2177 		 */
2178 		task = pick_next_pushable_dl_task(rq);
2179 		if (task == next_task) {
2180 			/*
2181 			 * The task is still there. We don't try
2182 			 * again, some other CPU will pull it when ready.
2183 			 */
2184 			goto out;
2185 		}
2186 
2187 		if (!task)
2188 			/* No more tasks */
2189 			goto out;
2190 
2191 		put_task_struct(next_task);
2192 		next_task = task;
2193 		goto retry;
2194 	}
2195 
2196 	deactivate_task(rq, next_task, 0);
2197 	set_task_cpu(next_task, later_rq->cpu);
2198 
2199 	/*
2200 	 * Update the later_rq clock here, because the clock is used
2201 	 * by the cpufreq_update_util() inside __add_running_bw().
2202 	 */
2203 	update_rq_clock(later_rq);
2204 	activate_task(later_rq, next_task, ENQUEUE_NOCLOCK);
2205 	ret = 1;
2206 
2207 	resched_curr(later_rq);
2208 
2209 	double_unlock_balance(rq, later_rq);
2210 
2211 out:
2212 	put_task_struct(next_task);
2213 
2214 	return ret;
2215 }
2216 
2217 static void push_dl_tasks(struct rq *rq)
2218 {
2219 	/* push_dl_task() will return true if it moved a -deadline task */
2220 	while (push_dl_task(rq))
2221 		;
2222 }
2223 
2224 static void pull_dl_task(struct rq *this_rq)
2225 {
2226 	int this_cpu = this_rq->cpu, cpu;
2227 	struct task_struct *p, *push_task;
2228 	bool resched = false;
2229 	struct rq *src_rq;
2230 	u64 dmin = LONG_MAX;
2231 
2232 	if (likely(!dl_overloaded(this_rq)))
2233 		return;
2234 
2235 	/*
2236 	 * Match the barrier from dl_set_overloaded; this guarantees that if we
2237 	 * see overloaded we must also see the dlo_mask bit.
2238 	 */
2239 	smp_rmb();
2240 
2241 	for_each_cpu(cpu, this_rq->rd->dlo_mask) {
2242 		if (this_cpu == cpu)
2243 			continue;
2244 
2245 		src_rq = cpu_rq(cpu);
2246 
2247 		/*
2248 		 * It looks racy, abd it is! However, as in sched_rt.c,
2249 		 * we are fine with this.
2250 		 */
2251 		if (this_rq->dl.dl_nr_running &&
2252 		    dl_time_before(this_rq->dl.earliest_dl.curr,
2253 				   src_rq->dl.earliest_dl.next))
2254 			continue;
2255 
2256 		/* Might drop this_rq->lock */
2257 		push_task = NULL;
2258 		double_lock_balance(this_rq, src_rq);
2259 
2260 		/*
2261 		 * If there are no more pullable tasks on the
2262 		 * rq, we're done with it.
2263 		 */
2264 		if (src_rq->dl.dl_nr_running <= 1)
2265 			goto skip;
2266 
2267 		p = pick_earliest_pushable_dl_task(src_rq, this_cpu);
2268 
2269 		/*
2270 		 * We found a task to be pulled if:
2271 		 *  - it preempts our current (if there's one),
2272 		 *  - it will preempt the last one we pulled (if any).
2273 		 */
2274 		if (p && dl_time_before(p->dl.deadline, dmin) &&
2275 		    (!this_rq->dl.dl_nr_running ||
2276 		     dl_time_before(p->dl.deadline,
2277 				    this_rq->dl.earliest_dl.curr))) {
2278 			WARN_ON(p == src_rq->curr);
2279 			WARN_ON(!task_on_rq_queued(p));
2280 
2281 			/*
2282 			 * Then we pull iff p has actually an earlier
2283 			 * deadline than the current task of its runqueue.
2284 			 */
2285 			if (dl_time_before(p->dl.deadline,
2286 					   src_rq->curr->dl.deadline))
2287 				goto skip;
2288 
2289 			if (is_migration_disabled(p)) {
2290 				push_task = get_push_task(src_rq);
2291 			} else {
2292 				deactivate_task(src_rq, p, 0);
2293 				set_task_cpu(p, this_cpu);
2294 				activate_task(this_rq, p, 0);
2295 				dmin = p->dl.deadline;
2296 				resched = true;
2297 			}
2298 
2299 			/* Is there any other task even earlier? */
2300 		}
2301 skip:
2302 		double_unlock_balance(this_rq, src_rq);
2303 
2304 		if (push_task) {
2305 			raw_spin_rq_unlock(this_rq);
2306 			stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop,
2307 					    push_task, &src_rq->push_work);
2308 			raw_spin_rq_lock(this_rq);
2309 		}
2310 	}
2311 
2312 	if (resched)
2313 		resched_curr(this_rq);
2314 }
2315 
2316 /*
2317  * Since the task is not running and a reschedule is not going to happen
2318  * anytime soon on its runqueue, we try pushing it away now.
2319  */
2320 static void task_woken_dl(struct rq *rq, struct task_struct *p)
2321 {
2322 	if (!task_running(rq, p) &&
2323 	    !test_tsk_need_resched(rq->curr) &&
2324 	    p->nr_cpus_allowed > 1 &&
2325 	    dl_task(rq->curr) &&
2326 	    (rq->curr->nr_cpus_allowed < 2 ||
2327 	     !dl_entity_preempt(&p->dl, &rq->curr->dl))) {
2328 		push_dl_tasks(rq);
2329 	}
2330 }
2331 
2332 static void set_cpus_allowed_dl(struct task_struct *p,
2333 				const struct cpumask *new_mask,
2334 				u32 flags)
2335 {
2336 	struct root_domain *src_rd;
2337 	struct rq *rq;
2338 
2339 	BUG_ON(!dl_task(p));
2340 
2341 	rq = task_rq(p);
2342 	src_rd = rq->rd;
2343 	/*
2344 	 * Migrating a SCHED_DEADLINE task between exclusive
2345 	 * cpusets (different root_domains) entails a bandwidth
2346 	 * update. We already made space for us in the destination
2347 	 * domain (see cpuset_can_attach()).
2348 	 */
2349 	if (!cpumask_intersects(src_rd->span, new_mask)) {
2350 		struct dl_bw *src_dl_b;
2351 
2352 		src_dl_b = dl_bw_of(cpu_of(rq));
2353 		/*
2354 		 * We now free resources of the root_domain we are migrating
2355 		 * off. In the worst case, sched_setattr() may temporary fail
2356 		 * until we complete the update.
2357 		 */
2358 		raw_spin_lock(&src_dl_b->lock);
2359 		__dl_sub(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
2360 		raw_spin_unlock(&src_dl_b->lock);
2361 	}
2362 
2363 	set_cpus_allowed_common(p, new_mask, flags);
2364 }
2365 
2366 /* Assumes rq->lock is held */
2367 static void rq_online_dl(struct rq *rq)
2368 {
2369 	if (rq->dl.overloaded)
2370 		dl_set_overload(rq);
2371 
2372 	cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu);
2373 	if (rq->dl.dl_nr_running > 0)
2374 		cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr);
2375 }
2376 
2377 /* Assumes rq->lock is held */
2378 static void rq_offline_dl(struct rq *rq)
2379 {
2380 	if (rq->dl.overloaded)
2381 		dl_clear_overload(rq);
2382 
2383 	cpudl_clear(&rq->rd->cpudl, rq->cpu);
2384 	cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu);
2385 }
2386 
2387 void __init init_sched_dl_class(void)
2388 {
2389 	unsigned int i;
2390 
2391 	for_each_possible_cpu(i)
2392 		zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i),
2393 					GFP_KERNEL, cpu_to_node(i));
2394 }
2395 
2396 void dl_add_task_root_domain(struct task_struct *p)
2397 {
2398 	struct rq_flags rf;
2399 	struct rq *rq;
2400 	struct dl_bw *dl_b;
2401 
2402 	raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
2403 	if (!dl_task(p)) {
2404 		raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
2405 		return;
2406 	}
2407 
2408 	rq = __task_rq_lock(p, &rf);
2409 
2410 	dl_b = &rq->rd->dl_bw;
2411 	raw_spin_lock(&dl_b->lock);
2412 
2413 	__dl_add(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
2414 
2415 	raw_spin_unlock(&dl_b->lock);
2416 
2417 	task_rq_unlock(rq, p, &rf);
2418 }
2419 
2420 void dl_clear_root_domain(struct root_domain *rd)
2421 {
2422 	unsigned long flags;
2423 
2424 	raw_spin_lock_irqsave(&rd->dl_bw.lock, flags);
2425 	rd->dl_bw.total_bw = 0;
2426 	raw_spin_unlock_irqrestore(&rd->dl_bw.lock, flags);
2427 }
2428 
2429 #endif /* CONFIG_SMP */
2430 
2431 static void switched_from_dl(struct rq *rq, struct task_struct *p)
2432 {
2433 	/*
2434 	 * task_non_contending() can start the "inactive timer" (if the 0-lag
2435 	 * time is in the future). If the task switches back to dl before
2436 	 * the "inactive timer" fires, it can continue to consume its current
2437 	 * runtime using its current deadline. If it stays outside of
2438 	 * SCHED_DEADLINE until the 0-lag time passes, inactive_task_timer()
2439 	 * will reset the task parameters.
2440 	 */
2441 	if (task_on_rq_queued(p) && p->dl.dl_runtime)
2442 		task_non_contending(p);
2443 
2444 	if (!task_on_rq_queued(p)) {
2445 		/*
2446 		 * Inactive timer is armed. However, p is leaving DEADLINE and
2447 		 * might migrate away from this rq while continuing to run on
2448 		 * some other class. We need to remove its contribution from
2449 		 * this rq running_bw now, or sub_rq_bw (below) will complain.
2450 		 */
2451 		if (p->dl.dl_non_contending)
2452 			sub_running_bw(&p->dl, &rq->dl);
2453 		sub_rq_bw(&p->dl, &rq->dl);
2454 	}
2455 
2456 	/*
2457 	 * We cannot use inactive_task_timer() to invoke sub_running_bw()
2458 	 * at the 0-lag time, because the task could have been migrated
2459 	 * while SCHED_OTHER in the meanwhile.
2460 	 */
2461 	if (p->dl.dl_non_contending)
2462 		p->dl.dl_non_contending = 0;
2463 
2464 	/*
2465 	 * Since this might be the only -deadline task on the rq,
2466 	 * this is the right place to try to pull some other one
2467 	 * from an overloaded CPU, if any.
2468 	 */
2469 	if (!task_on_rq_queued(p) || rq->dl.dl_nr_running)
2470 		return;
2471 
2472 	deadline_queue_pull_task(rq);
2473 }
2474 
2475 /*
2476  * When switching to -deadline, we may overload the rq, then
2477  * we try to push someone off, if possible.
2478  */
2479 static void switched_to_dl(struct rq *rq, struct task_struct *p)
2480 {
2481 	if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
2482 		put_task_struct(p);
2483 
2484 	/* If p is not queued we will update its parameters at next wakeup. */
2485 	if (!task_on_rq_queued(p)) {
2486 		add_rq_bw(&p->dl, &rq->dl);
2487 
2488 		return;
2489 	}
2490 
2491 	if (rq->curr != p) {
2492 #ifdef CONFIG_SMP
2493 		if (p->nr_cpus_allowed > 1 && rq->dl.overloaded)
2494 			deadline_queue_push_tasks(rq);
2495 #endif
2496 		if (dl_task(rq->curr))
2497 			check_preempt_curr_dl(rq, p, 0);
2498 		else
2499 			resched_curr(rq);
2500 	} else {
2501 		update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
2502 	}
2503 }
2504 
2505 /*
2506  * If the scheduling parameters of a -deadline task changed,
2507  * a push or pull operation might be needed.
2508  */
2509 static void prio_changed_dl(struct rq *rq, struct task_struct *p,
2510 			    int oldprio)
2511 {
2512 	if (task_on_rq_queued(p) || task_current(rq, p)) {
2513 #ifdef CONFIG_SMP
2514 		/*
2515 		 * This might be too much, but unfortunately
2516 		 * we don't have the old deadline value, and
2517 		 * we can't argue if the task is increasing
2518 		 * or lowering its prio, so...
2519 		 */
2520 		if (!rq->dl.overloaded)
2521 			deadline_queue_pull_task(rq);
2522 
2523 		/*
2524 		 * If we now have a earlier deadline task than p,
2525 		 * then reschedule, provided p is still on this
2526 		 * runqueue.
2527 		 */
2528 		if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline))
2529 			resched_curr(rq);
2530 #else
2531 		/*
2532 		 * Again, we don't know if p has a earlier
2533 		 * or later deadline, so let's blindly set a
2534 		 * (maybe not needed) rescheduling point.
2535 		 */
2536 		resched_curr(rq);
2537 #endif /* CONFIG_SMP */
2538 	}
2539 }
2540 
2541 DEFINE_SCHED_CLASS(dl) = {
2542 
2543 	.enqueue_task		= enqueue_task_dl,
2544 	.dequeue_task		= dequeue_task_dl,
2545 	.yield_task		= yield_task_dl,
2546 
2547 	.check_preempt_curr	= check_preempt_curr_dl,
2548 
2549 	.pick_next_task		= pick_next_task_dl,
2550 	.put_prev_task		= put_prev_task_dl,
2551 	.set_next_task		= set_next_task_dl,
2552 
2553 #ifdef CONFIG_SMP
2554 	.balance		= balance_dl,
2555 	.pick_task		= pick_task_dl,
2556 	.select_task_rq		= select_task_rq_dl,
2557 	.migrate_task_rq	= migrate_task_rq_dl,
2558 	.set_cpus_allowed       = set_cpus_allowed_dl,
2559 	.rq_online              = rq_online_dl,
2560 	.rq_offline             = rq_offline_dl,
2561 	.task_woken		= task_woken_dl,
2562 	.find_lock_rq		= find_lock_later_rq,
2563 #endif
2564 
2565 	.task_tick		= task_tick_dl,
2566 	.task_fork              = task_fork_dl,
2567 
2568 	.prio_changed           = prio_changed_dl,
2569 	.switched_from		= switched_from_dl,
2570 	.switched_to		= switched_to_dl,
2571 
2572 	.update_curr		= update_curr_dl,
2573 };
2574 
2575 /* Used for dl_bw check and update, used under sched_rt_handler()::mutex */
2576 static u64 dl_generation;
2577 
2578 int sched_dl_global_validate(void)
2579 {
2580 	u64 runtime = global_rt_runtime();
2581 	u64 period = global_rt_period();
2582 	u64 new_bw = to_ratio(period, runtime);
2583 	u64 gen = ++dl_generation;
2584 	struct dl_bw *dl_b;
2585 	int cpu, cpus, ret = 0;
2586 	unsigned long flags;
2587 
2588 	/*
2589 	 * Here we want to check the bandwidth not being set to some
2590 	 * value smaller than the currently allocated bandwidth in
2591 	 * any of the root_domains.
2592 	 */
2593 	for_each_possible_cpu(cpu) {
2594 		rcu_read_lock_sched();
2595 
2596 		if (dl_bw_visited(cpu, gen))
2597 			goto next;
2598 
2599 		dl_b = dl_bw_of(cpu);
2600 		cpus = dl_bw_cpus(cpu);
2601 
2602 		raw_spin_lock_irqsave(&dl_b->lock, flags);
2603 		if (new_bw * cpus < dl_b->total_bw)
2604 			ret = -EBUSY;
2605 		raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2606 
2607 next:
2608 		rcu_read_unlock_sched();
2609 
2610 		if (ret)
2611 			break;
2612 	}
2613 
2614 	return ret;
2615 }
2616 
2617 static void init_dl_rq_bw_ratio(struct dl_rq *dl_rq)
2618 {
2619 	if (global_rt_runtime() == RUNTIME_INF) {
2620 		dl_rq->bw_ratio = 1 << RATIO_SHIFT;
2621 		dl_rq->extra_bw = 1 << BW_SHIFT;
2622 	} else {
2623 		dl_rq->bw_ratio = to_ratio(global_rt_runtime(),
2624 			  global_rt_period()) >> (BW_SHIFT - RATIO_SHIFT);
2625 		dl_rq->extra_bw = to_ratio(global_rt_period(),
2626 						    global_rt_runtime());
2627 	}
2628 }
2629 
2630 void sched_dl_do_global(void)
2631 {
2632 	u64 new_bw = -1;
2633 	u64 gen = ++dl_generation;
2634 	struct dl_bw *dl_b;
2635 	int cpu;
2636 	unsigned long flags;
2637 
2638 	def_dl_bandwidth.dl_period = global_rt_period();
2639 	def_dl_bandwidth.dl_runtime = global_rt_runtime();
2640 
2641 	if (global_rt_runtime() != RUNTIME_INF)
2642 		new_bw = to_ratio(global_rt_period(), global_rt_runtime());
2643 
2644 	for_each_possible_cpu(cpu) {
2645 		rcu_read_lock_sched();
2646 
2647 		if (dl_bw_visited(cpu, gen)) {
2648 			rcu_read_unlock_sched();
2649 			continue;
2650 		}
2651 
2652 		dl_b = dl_bw_of(cpu);
2653 
2654 		raw_spin_lock_irqsave(&dl_b->lock, flags);
2655 		dl_b->bw = new_bw;
2656 		raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2657 
2658 		rcu_read_unlock_sched();
2659 		init_dl_rq_bw_ratio(&cpu_rq(cpu)->dl);
2660 	}
2661 }
2662 
2663 /*
2664  * We must be sure that accepting a new task (or allowing changing the
2665  * parameters of an existing one) is consistent with the bandwidth
2666  * constraints. If yes, this function also accordingly updates the currently
2667  * allocated bandwidth to reflect the new situation.
2668  *
2669  * This function is called while holding p's rq->lock.
2670  */
2671 int sched_dl_overflow(struct task_struct *p, int policy,
2672 		      const struct sched_attr *attr)
2673 {
2674 	u64 period = attr->sched_period ?: attr->sched_deadline;
2675 	u64 runtime = attr->sched_runtime;
2676 	u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0;
2677 	int cpus, err = -1, cpu = task_cpu(p);
2678 	struct dl_bw *dl_b = dl_bw_of(cpu);
2679 	unsigned long cap;
2680 
2681 	if (attr->sched_flags & SCHED_FLAG_SUGOV)
2682 		return 0;
2683 
2684 	/* !deadline task may carry old deadline bandwidth */
2685 	if (new_bw == p->dl.dl_bw && task_has_dl_policy(p))
2686 		return 0;
2687 
2688 	/*
2689 	 * Either if a task, enters, leave, or stays -deadline but changes
2690 	 * its parameters, we may need to update accordingly the total
2691 	 * allocated bandwidth of the container.
2692 	 */
2693 	raw_spin_lock(&dl_b->lock);
2694 	cpus = dl_bw_cpus(cpu);
2695 	cap = dl_bw_capacity(cpu);
2696 
2697 	if (dl_policy(policy) && !task_has_dl_policy(p) &&
2698 	    !__dl_overflow(dl_b, cap, 0, new_bw)) {
2699 		if (hrtimer_active(&p->dl.inactive_timer))
2700 			__dl_sub(dl_b, p->dl.dl_bw, cpus);
2701 		__dl_add(dl_b, new_bw, cpus);
2702 		err = 0;
2703 	} else if (dl_policy(policy) && task_has_dl_policy(p) &&
2704 		   !__dl_overflow(dl_b, cap, p->dl.dl_bw, new_bw)) {
2705 		/*
2706 		 * XXX this is slightly incorrect: when the task
2707 		 * utilization decreases, we should delay the total
2708 		 * utilization change until the task's 0-lag point.
2709 		 * But this would require to set the task's "inactive
2710 		 * timer" when the task is not inactive.
2711 		 */
2712 		__dl_sub(dl_b, p->dl.dl_bw, cpus);
2713 		__dl_add(dl_b, new_bw, cpus);
2714 		dl_change_utilization(p, new_bw);
2715 		err = 0;
2716 	} else if (!dl_policy(policy) && task_has_dl_policy(p)) {
2717 		/*
2718 		 * Do not decrease the total deadline utilization here,
2719 		 * switched_from_dl() will take care to do it at the correct
2720 		 * (0-lag) time.
2721 		 */
2722 		err = 0;
2723 	}
2724 	raw_spin_unlock(&dl_b->lock);
2725 
2726 	return err;
2727 }
2728 
2729 /*
2730  * This function initializes the sched_dl_entity of a newly becoming
2731  * SCHED_DEADLINE task.
2732  *
2733  * Only the static values are considered here, the actual runtime and the
2734  * absolute deadline will be properly calculated when the task is enqueued
2735  * for the first time with its new policy.
2736  */
2737 void __setparam_dl(struct task_struct *p, const struct sched_attr *attr)
2738 {
2739 	struct sched_dl_entity *dl_se = &p->dl;
2740 
2741 	dl_se->dl_runtime = attr->sched_runtime;
2742 	dl_se->dl_deadline = attr->sched_deadline;
2743 	dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
2744 	dl_se->flags = attr->sched_flags;
2745 	dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
2746 	dl_se->dl_density = to_ratio(dl_se->dl_deadline, dl_se->dl_runtime);
2747 }
2748 
2749 void __getparam_dl(struct task_struct *p, struct sched_attr *attr)
2750 {
2751 	struct sched_dl_entity *dl_se = &p->dl;
2752 
2753 	attr->sched_priority = p->rt_priority;
2754 	attr->sched_runtime = dl_se->dl_runtime;
2755 	attr->sched_deadline = dl_se->dl_deadline;
2756 	attr->sched_period = dl_se->dl_period;
2757 	attr->sched_flags = dl_se->flags;
2758 }
2759 
2760 /*
2761  * Default limits for DL period; on the top end we guard against small util
2762  * tasks still getting ridiculously long effective runtimes, on the bottom end we
2763  * guard against timer DoS.
2764  */
2765 unsigned int sysctl_sched_dl_period_max = 1 << 22; /* ~4 seconds */
2766 unsigned int sysctl_sched_dl_period_min = 100;     /* 100 us */
2767 
2768 /*
2769  * This function validates the new parameters of a -deadline task.
2770  * We ask for the deadline not being zero, and greater or equal
2771  * than the runtime, as well as the period of being zero or
2772  * greater than deadline. Furthermore, we have to be sure that
2773  * user parameters are above the internal resolution of 1us (we
2774  * check sched_runtime only since it is always the smaller one) and
2775  * below 2^63 ns (we have to check both sched_deadline and
2776  * sched_period, as the latter can be zero).
2777  */
2778 bool __checkparam_dl(const struct sched_attr *attr)
2779 {
2780 	u64 period, max, min;
2781 
2782 	/* special dl tasks don't actually use any parameter */
2783 	if (attr->sched_flags & SCHED_FLAG_SUGOV)
2784 		return true;
2785 
2786 	/* deadline != 0 */
2787 	if (attr->sched_deadline == 0)
2788 		return false;
2789 
2790 	/*
2791 	 * Since we truncate DL_SCALE bits, make sure we're at least
2792 	 * that big.
2793 	 */
2794 	if (attr->sched_runtime < (1ULL << DL_SCALE))
2795 		return false;
2796 
2797 	/*
2798 	 * Since we use the MSB for wrap-around and sign issues, make
2799 	 * sure it's not set (mind that period can be equal to zero).
2800 	 */
2801 	if (attr->sched_deadline & (1ULL << 63) ||
2802 	    attr->sched_period & (1ULL << 63))
2803 		return false;
2804 
2805 	period = attr->sched_period;
2806 	if (!period)
2807 		period = attr->sched_deadline;
2808 
2809 	/* runtime <= deadline <= period (if period != 0) */
2810 	if (period < attr->sched_deadline ||
2811 	    attr->sched_deadline < attr->sched_runtime)
2812 		return false;
2813 
2814 	max = (u64)READ_ONCE(sysctl_sched_dl_period_max) * NSEC_PER_USEC;
2815 	min = (u64)READ_ONCE(sysctl_sched_dl_period_min) * NSEC_PER_USEC;
2816 
2817 	if (period < min || period > max)
2818 		return false;
2819 
2820 	return true;
2821 }
2822 
2823 /*
2824  * This function clears the sched_dl_entity static params.
2825  */
2826 void __dl_clear_params(struct task_struct *p)
2827 {
2828 	struct sched_dl_entity *dl_se = &p->dl;
2829 
2830 	dl_se->dl_runtime		= 0;
2831 	dl_se->dl_deadline		= 0;
2832 	dl_se->dl_period		= 0;
2833 	dl_se->flags			= 0;
2834 	dl_se->dl_bw			= 0;
2835 	dl_se->dl_density		= 0;
2836 
2837 	dl_se->dl_throttled		= 0;
2838 	dl_se->dl_yielded		= 0;
2839 	dl_se->dl_non_contending	= 0;
2840 	dl_se->dl_overrun		= 0;
2841 
2842 #ifdef CONFIG_RT_MUTEXES
2843 	dl_se->pi_se			= dl_se;
2844 #endif
2845 }
2846 
2847 bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr)
2848 {
2849 	struct sched_dl_entity *dl_se = &p->dl;
2850 
2851 	if (dl_se->dl_runtime != attr->sched_runtime ||
2852 	    dl_se->dl_deadline != attr->sched_deadline ||
2853 	    dl_se->dl_period != attr->sched_period ||
2854 	    dl_se->flags != attr->sched_flags)
2855 		return true;
2856 
2857 	return false;
2858 }
2859 
2860 #ifdef CONFIG_SMP
2861 int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed)
2862 {
2863 	unsigned long flags, cap;
2864 	unsigned int dest_cpu;
2865 	struct dl_bw *dl_b;
2866 	bool overflow;
2867 	int ret;
2868 
2869 	dest_cpu = cpumask_any_and(cpu_active_mask, cs_cpus_allowed);
2870 
2871 	rcu_read_lock_sched();
2872 	dl_b = dl_bw_of(dest_cpu);
2873 	raw_spin_lock_irqsave(&dl_b->lock, flags);
2874 	cap = dl_bw_capacity(dest_cpu);
2875 	overflow = __dl_overflow(dl_b, cap, 0, p->dl.dl_bw);
2876 	if (overflow) {
2877 		ret = -EBUSY;
2878 	} else {
2879 		/*
2880 		 * We reserve space for this task in the destination
2881 		 * root_domain, as we can't fail after this point.
2882 		 * We will free resources in the source root_domain
2883 		 * later on (see set_cpus_allowed_dl()).
2884 		 */
2885 		int cpus = dl_bw_cpus(dest_cpu);
2886 
2887 		__dl_add(dl_b, p->dl.dl_bw, cpus);
2888 		ret = 0;
2889 	}
2890 	raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2891 	rcu_read_unlock_sched();
2892 
2893 	return ret;
2894 }
2895 
2896 int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur,
2897 				 const struct cpumask *trial)
2898 {
2899 	int ret = 1, trial_cpus;
2900 	struct dl_bw *cur_dl_b;
2901 	unsigned long flags;
2902 
2903 	rcu_read_lock_sched();
2904 	cur_dl_b = dl_bw_of(cpumask_any(cur));
2905 	trial_cpus = cpumask_weight(trial);
2906 
2907 	raw_spin_lock_irqsave(&cur_dl_b->lock, flags);
2908 	if (cur_dl_b->bw != -1 &&
2909 	    cur_dl_b->bw * trial_cpus < cur_dl_b->total_bw)
2910 		ret = 0;
2911 	raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags);
2912 	rcu_read_unlock_sched();
2913 
2914 	return ret;
2915 }
2916 
2917 bool dl_cpu_busy(unsigned int cpu)
2918 {
2919 	unsigned long flags, cap;
2920 	struct dl_bw *dl_b;
2921 	bool overflow;
2922 
2923 	rcu_read_lock_sched();
2924 	dl_b = dl_bw_of(cpu);
2925 	raw_spin_lock_irqsave(&dl_b->lock, flags);
2926 	cap = dl_bw_capacity(cpu);
2927 	overflow = __dl_overflow(dl_b, cap, 0, 0);
2928 	raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2929 	rcu_read_unlock_sched();
2930 
2931 	return overflow;
2932 }
2933 #endif
2934 
2935 #ifdef CONFIG_SCHED_DEBUG
2936 void print_dl_stats(struct seq_file *m, int cpu)
2937 {
2938 	print_dl_rq(m, cpu, &cpu_rq(cpu)->dl);
2939 }
2940 #endif /* CONFIG_SCHED_DEBUG */
2941