xref: /openbmc/linux/kernel/sched/deadline.c (revision 160b8e75)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Deadline Scheduling Class (SCHED_DEADLINE)
4  *
5  * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS).
6  *
7  * Tasks that periodically executes their instances for less than their
8  * runtime won't miss any of their deadlines.
9  * Tasks that are not periodic or sporadic or that tries to execute more
10  * than their reserved bandwidth will be slowed down (and may potentially
11  * miss some of their deadlines), and won't affect any other task.
12  *
13  * Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>,
14  *                    Juri Lelli <juri.lelli@gmail.com>,
15  *                    Michael Trimarchi <michael@amarulasolutions.com>,
16  *                    Fabio Checconi <fchecconi@gmail.com>
17  */
18 #include "sched.h"
19 
20 #include <linux/slab.h>
21 #include <uapi/linux/sched/types.h>
22 
23 struct dl_bandwidth def_dl_bandwidth;
24 
25 static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
26 {
27 	return container_of(dl_se, struct task_struct, dl);
28 }
29 
30 static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq)
31 {
32 	return container_of(dl_rq, struct rq, dl);
33 }
34 
35 static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se)
36 {
37 	struct task_struct *p = dl_task_of(dl_se);
38 	struct rq *rq = task_rq(p);
39 
40 	return &rq->dl;
41 }
42 
43 static inline int on_dl_rq(struct sched_dl_entity *dl_se)
44 {
45 	return !RB_EMPTY_NODE(&dl_se->rb_node);
46 }
47 
48 #ifdef CONFIG_SMP
49 static inline struct dl_bw *dl_bw_of(int i)
50 {
51 	RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
52 			 "sched RCU must be held");
53 	return &cpu_rq(i)->rd->dl_bw;
54 }
55 
56 static inline int dl_bw_cpus(int i)
57 {
58 	struct root_domain *rd = cpu_rq(i)->rd;
59 	int cpus = 0;
60 
61 	RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
62 			 "sched RCU must be held");
63 	for_each_cpu_and(i, rd->span, cpu_active_mask)
64 		cpus++;
65 
66 	return cpus;
67 }
68 #else
69 static inline struct dl_bw *dl_bw_of(int i)
70 {
71 	return &cpu_rq(i)->dl.dl_bw;
72 }
73 
74 static inline int dl_bw_cpus(int i)
75 {
76 	return 1;
77 }
78 #endif
79 
80 static inline
81 void __add_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
82 {
83 	u64 old = dl_rq->running_bw;
84 
85 	lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
86 	dl_rq->running_bw += dl_bw;
87 	SCHED_WARN_ON(dl_rq->running_bw < old); /* overflow */
88 	SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
89 	/* kick cpufreq (see the comment in kernel/sched/sched.h). */
90 	cpufreq_update_util(rq_of_dl_rq(dl_rq), SCHED_CPUFREQ_DL);
91 }
92 
93 static inline
94 void __sub_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
95 {
96 	u64 old = dl_rq->running_bw;
97 
98 	lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
99 	dl_rq->running_bw -= dl_bw;
100 	SCHED_WARN_ON(dl_rq->running_bw > old); /* underflow */
101 	if (dl_rq->running_bw > old)
102 		dl_rq->running_bw = 0;
103 	/* kick cpufreq (see the comment in kernel/sched/sched.h). */
104 	cpufreq_update_util(rq_of_dl_rq(dl_rq), SCHED_CPUFREQ_DL);
105 }
106 
107 static inline
108 void __add_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
109 {
110 	u64 old = dl_rq->this_bw;
111 
112 	lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
113 	dl_rq->this_bw += dl_bw;
114 	SCHED_WARN_ON(dl_rq->this_bw < old); /* overflow */
115 }
116 
117 static inline
118 void __sub_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
119 {
120 	u64 old = dl_rq->this_bw;
121 
122 	lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
123 	dl_rq->this_bw -= dl_bw;
124 	SCHED_WARN_ON(dl_rq->this_bw > old); /* underflow */
125 	if (dl_rq->this_bw > old)
126 		dl_rq->this_bw = 0;
127 	SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
128 }
129 
130 static inline
131 void add_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
132 {
133 	if (!dl_entity_is_special(dl_se))
134 		__add_rq_bw(dl_se->dl_bw, dl_rq);
135 }
136 
137 static inline
138 void sub_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
139 {
140 	if (!dl_entity_is_special(dl_se))
141 		__sub_rq_bw(dl_se->dl_bw, dl_rq);
142 }
143 
144 static inline
145 void add_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
146 {
147 	if (!dl_entity_is_special(dl_se))
148 		__add_running_bw(dl_se->dl_bw, dl_rq);
149 }
150 
151 static inline
152 void sub_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
153 {
154 	if (!dl_entity_is_special(dl_se))
155 		__sub_running_bw(dl_se->dl_bw, dl_rq);
156 }
157 
158 void dl_change_utilization(struct task_struct *p, u64 new_bw)
159 {
160 	struct rq *rq;
161 
162 	BUG_ON(p->dl.flags & SCHED_FLAG_SUGOV);
163 
164 	if (task_on_rq_queued(p))
165 		return;
166 
167 	rq = task_rq(p);
168 	if (p->dl.dl_non_contending) {
169 		sub_running_bw(&p->dl, &rq->dl);
170 		p->dl.dl_non_contending = 0;
171 		/*
172 		 * If the timer handler is currently running and the
173 		 * timer cannot be cancelled, inactive_task_timer()
174 		 * will see that dl_not_contending is not set, and
175 		 * will not touch the rq's active utilization,
176 		 * so we are still safe.
177 		 */
178 		if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
179 			put_task_struct(p);
180 	}
181 	__sub_rq_bw(p->dl.dl_bw, &rq->dl);
182 	__add_rq_bw(new_bw, &rq->dl);
183 }
184 
185 /*
186  * The utilization of a task cannot be immediately removed from
187  * the rq active utilization (running_bw) when the task blocks.
188  * Instead, we have to wait for the so called "0-lag time".
189  *
190  * If a task blocks before the "0-lag time", a timer (the inactive
191  * timer) is armed, and running_bw is decreased when the timer
192  * fires.
193  *
194  * If the task wakes up again before the inactive timer fires,
195  * the timer is cancelled, whereas if the task wakes up after the
196  * inactive timer fired (and running_bw has been decreased) the
197  * task's utilization has to be added to running_bw again.
198  * A flag in the deadline scheduling entity (dl_non_contending)
199  * is used to avoid race conditions between the inactive timer handler
200  * and task wakeups.
201  *
202  * The following diagram shows how running_bw is updated. A task is
203  * "ACTIVE" when its utilization contributes to running_bw; an
204  * "ACTIVE contending" task is in the TASK_RUNNING state, while an
205  * "ACTIVE non contending" task is a blocked task for which the "0-lag time"
206  * has not passed yet. An "INACTIVE" task is a task for which the "0-lag"
207  * time already passed, which does not contribute to running_bw anymore.
208  *                              +------------------+
209  *             wakeup           |    ACTIVE        |
210  *          +------------------>+   contending     |
211  *          | add_running_bw    |                  |
212  *          |                   +----+------+------+
213  *          |                        |      ^
214  *          |                dequeue |      |
215  * +--------+-------+                |      |
216  * |                |   t >= 0-lag   |      | wakeup
217  * |    INACTIVE    |<---------------+      |
218  * |                | sub_running_bw |      |
219  * +--------+-------+                |      |
220  *          ^                        |      |
221  *          |              t < 0-lag |      |
222  *          |                        |      |
223  *          |                        V      |
224  *          |                   +----+------+------+
225  *          | sub_running_bw    |    ACTIVE        |
226  *          +-------------------+                  |
227  *            inactive timer    |  non contending  |
228  *            fired             +------------------+
229  *
230  * The task_non_contending() function is invoked when a task
231  * blocks, and checks if the 0-lag time already passed or
232  * not (in the first case, it directly updates running_bw;
233  * in the second case, it arms the inactive timer).
234  *
235  * The task_contending() function is invoked when a task wakes
236  * up, and checks if the task is still in the "ACTIVE non contending"
237  * state or not (in the second case, it updates running_bw).
238  */
239 static void task_non_contending(struct task_struct *p)
240 {
241 	struct sched_dl_entity *dl_se = &p->dl;
242 	struct hrtimer *timer = &dl_se->inactive_timer;
243 	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
244 	struct rq *rq = rq_of_dl_rq(dl_rq);
245 	s64 zerolag_time;
246 
247 	/*
248 	 * If this is a non-deadline task that has been boosted,
249 	 * do nothing
250 	 */
251 	if (dl_se->dl_runtime == 0)
252 		return;
253 
254 	if (dl_entity_is_special(dl_se))
255 		return;
256 
257 	WARN_ON(hrtimer_active(&dl_se->inactive_timer));
258 	WARN_ON(dl_se->dl_non_contending);
259 
260 	zerolag_time = dl_se->deadline -
261 		 div64_long((dl_se->runtime * dl_se->dl_period),
262 			dl_se->dl_runtime);
263 
264 	/*
265 	 * Using relative times instead of the absolute "0-lag time"
266 	 * allows to simplify the code
267 	 */
268 	zerolag_time -= rq_clock(rq);
269 
270 	/*
271 	 * If the "0-lag time" already passed, decrease the active
272 	 * utilization now, instead of starting a timer
273 	 */
274 	if (zerolag_time < 0) {
275 		if (dl_task(p))
276 			sub_running_bw(dl_se, dl_rq);
277 		if (!dl_task(p) || p->state == TASK_DEAD) {
278 			struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
279 
280 			if (p->state == TASK_DEAD)
281 				sub_rq_bw(&p->dl, &rq->dl);
282 			raw_spin_lock(&dl_b->lock);
283 			__dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
284 			__dl_clear_params(p);
285 			raw_spin_unlock(&dl_b->lock);
286 		}
287 
288 		return;
289 	}
290 
291 	dl_se->dl_non_contending = 1;
292 	get_task_struct(p);
293 	hrtimer_start(timer, ns_to_ktime(zerolag_time), HRTIMER_MODE_REL);
294 }
295 
296 static void task_contending(struct sched_dl_entity *dl_se, int flags)
297 {
298 	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
299 
300 	/*
301 	 * If this is a non-deadline task that has been boosted,
302 	 * do nothing
303 	 */
304 	if (dl_se->dl_runtime == 0)
305 		return;
306 
307 	if (flags & ENQUEUE_MIGRATED)
308 		add_rq_bw(dl_se, dl_rq);
309 
310 	if (dl_se->dl_non_contending) {
311 		dl_se->dl_non_contending = 0;
312 		/*
313 		 * If the timer handler is currently running and the
314 		 * timer cannot be cancelled, inactive_task_timer()
315 		 * will see that dl_not_contending is not set, and
316 		 * will not touch the rq's active utilization,
317 		 * so we are still safe.
318 		 */
319 		if (hrtimer_try_to_cancel(&dl_se->inactive_timer) == 1)
320 			put_task_struct(dl_task_of(dl_se));
321 	} else {
322 		/*
323 		 * Since "dl_non_contending" is not set, the
324 		 * task's utilization has already been removed from
325 		 * active utilization (either when the task blocked,
326 		 * when the "inactive timer" fired).
327 		 * So, add it back.
328 		 */
329 		add_running_bw(dl_se, dl_rq);
330 	}
331 }
332 
333 static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq)
334 {
335 	struct sched_dl_entity *dl_se = &p->dl;
336 
337 	return dl_rq->root.rb_leftmost == &dl_se->rb_node;
338 }
339 
340 void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime)
341 {
342 	raw_spin_lock_init(&dl_b->dl_runtime_lock);
343 	dl_b->dl_period = period;
344 	dl_b->dl_runtime = runtime;
345 }
346 
347 void init_dl_bw(struct dl_bw *dl_b)
348 {
349 	raw_spin_lock_init(&dl_b->lock);
350 	raw_spin_lock(&def_dl_bandwidth.dl_runtime_lock);
351 	if (global_rt_runtime() == RUNTIME_INF)
352 		dl_b->bw = -1;
353 	else
354 		dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime());
355 	raw_spin_unlock(&def_dl_bandwidth.dl_runtime_lock);
356 	dl_b->total_bw = 0;
357 }
358 
359 void init_dl_rq(struct dl_rq *dl_rq)
360 {
361 	dl_rq->root = RB_ROOT_CACHED;
362 
363 #ifdef CONFIG_SMP
364 	/* zero means no -deadline tasks */
365 	dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0;
366 
367 	dl_rq->dl_nr_migratory = 0;
368 	dl_rq->overloaded = 0;
369 	dl_rq->pushable_dl_tasks_root = RB_ROOT_CACHED;
370 #else
371 	init_dl_bw(&dl_rq->dl_bw);
372 #endif
373 
374 	dl_rq->running_bw = 0;
375 	dl_rq->this_bw = 0;
376 	init_dl_rq_bw_ratio(dl_rq);
377 }
378 
379 #ifdef CONFIG_SMP
380 
381 static inline int dl_overloaded(struct rq *rq)
382 {
383 	return atomic_read(&rq->rd->dlo_count);
384 }
385 
386 static inline void dl_set_overload(struct rq *rq)
387 {
388 	if (!rq->online)
389 		return;
390 
391 	cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask);
392 	/*
393 	 * Must be visible before the overload count is
394 	 * set (as in sched_rt.c).
395 	 *
396 	 * Matched by the barrier in pull_dl_task().
397 	 */
398 	smp_wmb();
399 	atomic_inc(&rq->rd->dlo_count);
400 }
401 
402 static inline void dl_clear_overload(struct rq *rq)
403 {
404 	if (!rq->online)
405 		return;
406 
407 	atomic_dec(&rq->rd->dlo_count);
408 	cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask);
409 }
410 
411 static void update_dl_migration(struct dl_rq *dl_rq)
412 {
413 	if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) {
414 		if (!dl_rq->overloaded) {
415 			dl_set_overload(rq_of_dl_rq(dl_rq));
416 			dl_rq->overloaded = 1;
417 		}
418 	} else if (dl_rq->overloaded) {
419 		dl_clear_overload(rq_of_dl_rq(dl_rq));
420 		dl_rq->overloaded = 0;
421 	}
422 }
423 
424 static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
425 {
426 	struct task_struct *p = dl_task_of(dl_se);
427 
428 	if (p->nr_cpus_allowed > 1)
429 		dl_rq->dl_nr_migratory++;
430 
431 	update_dl_migration(dl_rq);
432 }
433 
434 static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
435 {
436 	struct task_struct *p = dl_task_of(dl_se);
437 
438 	if (p->nr_cpus_allowed > 1)
439 		dl_rq->dl_nr_migratory--;
440 
441 	update_dl_migration(dl_rq);
442 }
443 
444 /*
445  * The list of pushable -deadline task is not a plist, like in
446  * sched_rt.c, it is an rb-tree with tasks ordered by deadline.
447  */
448 static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
449 {
450 	struct dl_rq *dl_rq = &rq->dl;
451 	struct rb_node **link = &dl_rq->pushable_dl_tasks_root.rb_root.rb_node;
452 	struct rb_node *parent = NULL;
453 	struct task_struct *entry;
454 	bool leftmost = true;
455 
456 	BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks));
457 
458 	while (*link) {
459 		parent = *link;
460 		entry = rb_entry(parent, struct task_struct,
461 				 pushable_dl_tasks);
462 		if (dl_entity_preempt(&p->dl, &entry->dl))
463 			link = &parent->rb_left;
464 		else {
465 			link = &parent->rb_right;
466 			leftmost = false;
467 		}
468 	}
469 
470 	if (leftmost)
471 		dl_rq->earliest_dl.next = p->dl.deadline;
472 
473 	rb_link_node(&p->pushable_dl_tasks, parent, link);
474 	rb_insert_color_cached(&p->pushable_dl_tasks,
475 			       &dl_rq->pushable_dl_tasks_root, leftmost);
476 }
477 
478 static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
479 {
480 	struct dl_rq *dl_rq = &rq->dl;
481 
482 	if (RB_EMPTY_NODE(&p->pushable_dl_tasks))
483 		return;
484 
485 	if (dl_rq->pushable_dl_tasks_root.rb_leftmost == &p->pushable_dl_tasks) {
486 		struct rb_node *next_node;
487 
488 		next_node = rb_next(&p->pushable_dl_tasks);
489 		if (next_node) {
490 			dl_rq->earliest_dl.next = rb_entry(next_node,
491 				struct task_struct, pushable_dl_tasks)->dl.deadline;
492 		}
493 	}
494 
495 	rb_erase_cached(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
496 	RB_CLEAR_NODE(&p->pushable_dl_tasks);
497 }
498 
499 static inline int has_pushable_dl_tasks(struct rq *rq)
500 {
501 	return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root.rb_root);
502 }
503 
504 static int push_dl_task(struct rq *rq);
505 
506 static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
507 {
508 	return dl_task(prev);
509 }
510 
511 static DEFINE_PER_CPU(struct callback_head, dl_push_head);
512 static DEFINE_PER_CPU(struct callback_head, dl_pull_head);
513 
514 static void push_dl_tasks(struct rq *);
515 static void pull_dl_task(struct rq *);
516 
517 static inline void queue_push_tasks(struct rq *rq)
518 {
519 	if (!has_pushable_dl_tasks(rq))
520 		return;
521 
522 	queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks);
523 }
524 
525 static inline void queue_pull_task(struct rq *rq)
526 {
527 	queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task);
528 }
529 
530 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq);
531 
532 static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p)
533 {
534 	struct rq *later_rq = NULL;
535 
536 	later_rq = find_lock_later_rq(p, rq);
537 	if (!later_rq) {
538 		int cpu;
539 
540 		/*
541 		 * If we cannot preempt any rq, fall back to pick any
542 		 * online cpu.
543 		 */
544 		cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed);
545 		if (cpu >= nr_cpu_ids) {
546 			/*
547 			 * Fail to find any suitable cpu.
548 			 * The task will never come back!
549 			 */
550 			BUG_ON(dl_bandwidth_enabled());
551 
552 			/*
553 			 * If admission control is disabled we
554 			 * try a little harder to let the task
555 			 * run.
556 			 */
557 			cpu = cpumask_any(cpu_active_mask);
558 		}
559 		later_rq = cpu_rq(cpu);
560 		double_lock_balance(rq, later_rq);
561 	}
562 
563 	set_task_cpu(p, later_rq->cpu);
564 	double_unlock_balance(later_rq, rq);
565 
566 	return later_rq;
567 }
568 
569 #else
570 
571 static inline
572 void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
573 {
574 }
575 
576 static inline
577 void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
578 {
579 }
580 
581 static inline
582 void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
583 {
584 }
585 
586 static inline
587 void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
588 {
589 }
590 
591 static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
592 {
593 	return false;
594 }
595 
596 static inline void pull_dl_task(struct rq *rq)
597 {
598 }
599 
600 static inline void queue_push_tasks(struct rq *rq)
601 {
602 }
603 
604 static inline void queue_pull_task(struct rq *rq)
605 {
606 }
607 #endif /* CONFIG_SMP */
608 
609 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
610 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags);
611 static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
612 				  int flags);
613 
614 /*
615  * We are being explicitly informed that a new instance is starting,
616  * and this means that:
617  *  - the absolute deadline of the entity has to be placed at
618  *    current time + relative deadline;
619  *  - the runtime of the entity has to be set to the maximum value.
620  *
621  * The capability of specifying such event is useful whenever a -deadline
622  * entity wants to (try to!) synchronize its behaviour with the scheduler's
623  * one, and to (try to!) reconcile itself with its own scheduling
624  * parameters.
625  */
626 static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se)
627 {
628 	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
629 	struct rq *rq = rq_of_dl_rq(dl_rq);
630 
631 	WARN_ON(dl_se->dl_boosted);
632 	WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline));
633 
634 	/*
635 	 * We are racing with the deadline timer. So, do nothing because
636 	 * the deadline timer handler will take care of properly recharging
637 	 * the runtime and postponing the deadline
638 	 */
639 	if (dl_se->dl_throttled)
640 		return;
641 
642 	/*
643 	 * We use the regular wall clock time to set deadlines in the
644 	 * future; in fact, we must consider execution overheads (time
645 	 * spent on hardirq context, etc.).
646 	 */
647 	dl_se->deadline = rq_clock(rq) + dl_se->dl_deadline;
648 	dl_se->runtime = dl_se->dl_runtime;
649 }
650 
651 /*
652  * Pure Earliest Deadline First (EDF) scheduling does not deal with the
653  * possibility of a entity lasting more than what it declared, and thus
654  * exhausting its runtime.
655  *
656  * Here we are interested in making runtime overrun possible, but we do
657  * not want a entity which is misbehaving to affect the scheduling of all
658  * other entities.
659  * Therefore, a budgeting strategy called Constant Bandwidth Server (CBS)
660  * is used, in order to confine each entity within its own bandwidth.
661  *
662  * This function deals exactly with that, and ensures that when the runtime
663  * of a entity is replenished, its deadline is also postponed. That ensures
664  * the overrunning entity can't interfere with other entity in the system and
665  * can't make them miss their deadlines. Reasons why this kind of overruns
666  * could happen are, typically, a entity voluntarily trying to overcome its
667  * runtime, or it just underestimated it during sched_setattr().
668  */
669 static void replenish_dl_entity(struct sched_dl_entity *dl_se,
670 				struct sched_dl_entity *pi_se)
671 {
672 	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
673 	struct rq *rq = rq_of_dl_rq(dl_rq);
674 
675 	BUG_ON(pi_se->dl_runtime <= 0);
676 
677 	/*
678 	 * This could be the case for a !-dl task that is boosted.
679 	 * Just go with full inherited parameters.
680 	 */
681 	if (dl_se->dl_deadline == 0) {
682 		dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
683 		dl_se->runtime = pi_se->dl_runtime;
684 	}
685 
686 	if (dl_se->dl_yielded && dl_se->runtime > 0)
687 		dl_se->runtime = 0;
688 
689 	/*
690 	 * We keep moving the deadline away until we get some
691 	 * available runtime for the entity. This ensures correct
692 	 * handling of situations where the runtime overrun is
693 	 * arbitrary large.
694 	 */
695 	while (dl_se->runtime <= 0) {
696 		dl_se->deadline += pi_se->dl_period;
697 		dl_se->runtime += pi_se->dl_runtime;
698 	}
699 
700 	/*
701 	 * At this point, the deadline really should be "in
702 	 * the future" with respect to rq->clock. If it's
703 	 * not, we are, for some reason, lagging too much!
704 	 * Anyway, after having warn userspace abut that,
705 	 * we still try to keep the things running by
706 	 * resetting the deadline and the budget of the
707 	 * entity.
708 	 */
709 	if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
710 		printk_deferred_once("sched: DL replenish lagged too much\n");
711 		dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
712 		dl_se->runtime = pi_se->dl_runtime;
713 	}
714 
715 	if (dl_se->dl_yielded)
716 		dl_se->dl_yielded = 0;
717 	if (dl_se->dl_throttled)
718 		dl_se->dl_throttled = 0;
719 }
720 
721 /*
722  * Here we check if --at time t-- an entity (which is probably being
723  * [re]activated or, in general, enqueued) can use its remaining runtime
724  * and its current deadline _without_ exceeding the bandwidth it is
725  * assigned (function returns true if it can't). We are in fact applying
726  * one of the CBS rules: when a task wakes up, if the residual runtime
727  * over residual deadline fits within the allocated bandwidth, then we
728  * can keep the current (absolute) deadline and residual budget without
729  * disrupting the schedulability of the system. Otherwise, we should
730  * refill the runtime and set the deadline a period in the future,
731  * because keeping the current (absolute) deadline of the task would
732  * result in breaking guarantees promised to other tasks (refer to
733  * Documentation/scheduler/sched-deadline.txt for more informations).
734  *
735  * This function returns true if:
736  *
737  *   runtime / (deadline - t) > dl_runtime / dl_deadline ,
738  *
739  * IOW we can't recycle current parameters.
740  *
741  * Notice that the bandwidth check is done against the deadline. For
742  * task with deadline equal to period this is the same of using
743  * dl_period instead of dl_deadline in the equation above.
744  */
745 static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
746 			       struct sched_dl_entity *pi_se, u64 t)
747 {
748 	u64 left, right;
749 
750 	/*
751 	 * left and right are the two sides of the equation above,
752 	 * after a bit of shuffling to use multiplications instead
753 	 * of divisions.
754 	 *
755 	 * Note that none of the time values involved in the two
756 	 * multiplications are absolute: dl_deadline and dl_runtime
757 	 * are the relative deadline and the maximum runtime of each
758 	 * instance, runtime is the runtime left for the last instance
759 	 * and (deadline - t), since t is rq->clock, is the time left
760 	 * to the (absolute) deadline. Even if overflowing the u64 type
761 	 * is very unlikely to occur in both cases, here we scale down
762 	 * as we want to avoid that risk at all. Scaling down by 10
763 	 * means that we reduce granularity to 1us. We are fine with it,
764 	 * since this is only a true/false check and, anyway, thinking
765 	 * of anything below microseconds resolution is actually fiction
766 	 * (but still we want to give the user that illusion >;).
767 	 */
768 	left = (pi_se->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
769 	right = ((dl_se->deadline - t) >> DL_SCALE) *
770 		(pi_se->dl_runtime >> DL_SCALE);
771 
772 	return dl_time_before(right, left);
773 }
774 
775 /*
776  * Revised wakeup rule [1]: For self-suspending tasks, rather then
777  * re-initializing task's runtime and deadline, the revised wakeup
778  * rule adjusts the task's runtime to avoid the task to overrun its
779  * density.
780  *
781  * Reasoning: a task may overrun the density if:
782  *    runtime / (deadline - t) > dl_runtime / dl_deadline
783  *
784  * Therefore, runtime can be adjusted to:
785  *     runtime = (dl_runtime / dl_deadline) * (deadline - t)
786  *
787  * In such way that runtime will be equal to the maximum density
788  * the task can use without breaking any rule.
789  *
790  * [1] Luca Abeni, Giuseppe Lipari, and Juri Lelli. 2015. Constant
791  * bandwidth server revisited. SIGBED Rev. 11, 4 (January 2015), 19-24.
792  */
793 static void
794 update_dl_revised_wakeup(struct sched_dl_entity *dl_se, struct rq *rq)
795 {
796 	u64 laxity = dl_se->deadline - rq_clock(rq);
797 
798 	/*
799 	 * If the task has deadline < period, and the deadline is in the past,
800 	 * it should already be throttled before this check.
801 	 *
802 	 * See update_dl_entity() comments for further details.
803 	 */
804 	WARN_ON(dl_time_before(dl_se->deadline, rq_clock(rq)));
805 
806 	dl_se->runtime = (dl_se->dl_density * laxity) >> BW_SHIFT;
807 }
808 
809 /*
810  * Regarding the deadline, a task with implicit deadline has a relative
811  * deadline == relative period. A task with constrained deadline has a
812  * relative deadline <= relative period.
813  *
814  * We support constrained deadline tasks. However, there are some restrictions
815  * applied only for tasks which do not have an implicit deadline. See
816  * update_dl_entity() to know more about such restrictions.
817  *
818  * The dl_is_implicit() returns true if the task has an implicit deadline.
819  */
820 static inline bool dl_is_implicit(struct sched_dl_entity *dl_se)
821 {
822 	return dl_se->dl_deadline == dl_se->dl_period;
823 }
824 
825 /*
826  * When a deadline entity is placed in the runqueue, its runtime and deadline
827  * might need to be updated. This is done by a CBS wake up rule. There are two
828  * different rules: 1) the original CBS; and 2) the Revisited CBS.
829  *
830  * When the task is starting a new period, the Original CBS is used. In this
831  * case, the runtime is replenished and a new absolute deadline is set.
832  *
833  * When a task is queued before the begin of the next period, using the
834  * remaining runtime and deadline could make the entity to overflow, see
835  * dl_entity_overflow() to find more about runtime overflow. When such case
836  * is detected, the runtime and deadline need to be updated.
837  *
838  * If the task has an implicit deadline, i.e., deadline == period, the Original
839  * CBS is applied. the runtime is replenished and a new absolute deadline is
840  * set, as in the previous cases.
841  *
842  * However, the Original CBS does not work properly for tasks with
843  * deadline < period, which are said to have a constrained deadline. By
844  * applying the Original CBS, a constrained deadline task would be able to run
845  * runtime/deadline in a period. With deadline < period, the task would
846  * overrun the runtime/period allowed bandwidth, breaking the admission test.
847  *
848  * In order to prevent this misbehave, the Revisited CBS is used for
849  * constrained deadline tasks when a runtime overflow is detected. In the
850  * Revisited CBS, rather than replenishing & setting a new absolute deadline,
851  * the remaining runtime of the task is reduced to avoid runtime overflow.
852  * Please refer to the comments update_dl_revised_wakeup() function to find
853  * more about the Revised CBS rule.
854  */
855 static void update_dl_entity(struct sched_dl_entity *dl_se,
856 			     struct sched_dl_entity *pi_se)
857 {
858 	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
859 	struct rq *rq = rq_of_dl_rq(dl_rq);
860 
861 	if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
862 	    dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) {
863 
864 		if (unlikely(!dl_is_implicit(dl_se) &&
865 			     !dl_time_before(dl_se->deadline, rq_clock(rq)) &&
866 			     !dl_se->dl_boosted)){
867 			update_dl_revised_wakeup(dl_se, rq);
868 			return;
869 		}
870 
871 		dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
872 		dl_se->runtime = pi_se->dl_runtime;
873 	}
874 }
875 
876 static inline u64 dl_next_period(struct sched_dl_entity *dl_se)
877 {
878 	return dl_se->deadline - dl_se->dl_deadline + dl_se->dl_period;
879 }
880 
881 /*
882  * If the entity depleted all its runtime, and if we want it to sleep
883  * while waiting for some new execution time to become available, we
884  * set the bandwidth replenishment timer to the replenishment instant
885  * and try to activate it.
886  *
887  * Notice that it is important for the caller to know if the timer
888  * actually started or not (i.e., the replenishment instant is in
889  * the future or in the past).
890  */
891 static int start_dl_timer(struct task_struct *p)
892 {
893 	struct sched_dl_entity *dl_se = &p->dl;
894 	struct hrtimer *timer = &dl_se->dl_timer;
895 	struct rq *rq = task_rq(p);
896 	ktime_t now, act;
897 	s64 delta;
898 
899 	lockdep_assert_held(&rq->lock);
900 
901 	/*
902 	 * We want the timer to fire at the deadline, but considering
903 	 * that it is actually coming from rq->clock and not from
904 	 * hrtimer's time base reading.
905 	 */
906 	act = ns_to_ktime(dl_next_period(dl_se));
907 	now = hrtimer_cb_get_time(timer);
908 	delta = ktime_to_ns(now) - rq_clock(rq);
909 	act = ktime_add_ns(act, delta);
910 
911 	/*
912 	 * If the expiry time already passed, e.g., because the value
913 	 * chosen as the deadline is too small, don't even try to
914 	 * start the timer in the past!
915 	 */
916 	if (ktime_us_delta(act, now) < 0)
917 		return 0;
918 
919 	/*
920 	 * !enqueued will guarantee another callback; even if one is already in
921 	 * progress. This ensures a balanced {get,put}_task_struct().
922 	 *
923 	 * The race against __run_timer() clearing the enqueued state is
924 	 * harmless because we're holding task_rq()->lock, therefore the timer
925 	 * expiring after we've done the check will wait on its task_rq_lock()
926 	 * and observe our state.
927 	 */
928 	if (!hrtimer_is_queued(timer)) {
929 		get_task_struct(p);
930 		hrtimer_start(timer, act, HRTIMER_MODE_ABS);
931 	}
932 
933 	return 1;
934 }
935 
936 /*
937  * This is the bandwidth enforcement timer callback. If here, we know
938  * a task is not on its dl_rq, since the fact that the timer was running
939  * means the task is throttled and needs a runtime replenishment.
940  *
941  * However, what we actually do depends on the fact the task is active,
942  * (it is on its rq) or has been removed from there by a call to
943  * dequeue_task_dl(). In the former case we must issue the runtime
944  * replenishment and add the task back to the dl_rq; in the latter, we just
945  * do nothing but clearing dl_throttled, so that runtime and deadline
946  * updating (and the queueing back to dl_rq) will be done by the
947  * next call to enqueue_task_dl().
948  */
949 static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
950 {
951 	struct sched_dl_entity *dl_se = container_of(timer,
952 						     struct sched_dl_entity,
953 						     dl_timer);
954 	struct task_struct *p = dl_task_of(dl_se);
955 	struct rq_flags rf;
956 	struct rq *rq;
957 
958 	rq = task_rq_lock(p, &rf);
959 
960 	/*
961 	 * The task might have changed its scheduling policy to something
962 	 * different than SCHED_DEADLINE (through switched_from_dl()).
963 	 */
964 	if (!dl_task(p))
965 		goto unlock;
966 
967 	/*
968 	 * The task might have been boosted by someone else and might be in the
969 	 * boosting/deboosting path, its not throttled.
970 	 */
971 	if (dl_se->dl_boosted)
972 		goto unlock;
973 
974 	/*
975 	 * Spurious timer due to start_dl_timer() race; or we already received
976 	 * a replenishment from rt_mutex_setprio().
977 	 */
978 	if (!dl_se->dl_throttled)
979 		goto unlock;
980 
981 	sched_clock_tick();
982 	update_rq_clock(rq);
983 
984 	/*
985 	 * If the throttle happened during sched-out; like:
986 	 *
987 	 *   schedule()
988 	 *     deactivate_task()
989 	 *       dequeue_task_dl()
990 	 *         update_curr_dl()
991 	 *           start_dl_timer()
992 	 *         __dequeue_task_dl()
993 	 *     prev->on_rq = 0;
994 	 *
995 	 * We can be both throttled and !queued. Replenish the counter
996 	 * but do not enqueue -- wait for our wakeup to do that.
997 	 */
998 	if (!task_on_rq_queued(p)) {
999 		replenish_dl_entity(dl_se, dl_se);
1000 		goto unlock;
1001 	}
1002 
1003 #ifdef CONFIG_SMP
1004 	if (unlikely(!rq->online)) {
1005 		/*
1006 		 * If the runqueue is no longer available, migrate the
1007 		 * task elsewhere. This necessarily changes rq.
1008 		 */
1009 		lockdep_unpin_lock(&rq->lock, rf.cookie);
1010 		rq = dl_task_offline_migration(rq, p);
1011 		rf.cookie = lockdep_pin_lock(&rq->lock);
1012 		update_rq_clock(rq);
1013 
1014 		/*
1015 		 * Now that the task has been migrated to the new RQ and we
1016 		 * have that locked, proceed as normal and enqueue the task
1017 		 * there.
1018 		 */
1019 	}
1020 #endif
1021 
1022 	enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
1023 	if (dl_task(rq->curr))
1024 		check_preempt_curr_dl(rq, p, 0);
1025 	else
1026 		resched_curr(rq);
1027 
1028 #ifdef CONFIG_SMP
1029 	/*
1030 	 * Queueing this task back might have overloaded rq, check if we need
1031 	 * to kick someone away.
1032 	 */
1033 	if (has_pushable_dl_tasks(rq)) {
1034 		/*
1035 		 * Nothing relies on rq->lock after this, so its safe to drop
1036 		 * rq->lock.
1037 		 */
1038 		rq_unpin_lock(rq, &rf);
1039 		push_dl_task(rq);
1040 		rq_repin_lock(rq, &rf);
1041 	}
1042 #endif
1043 
1044 unlock:
1045 	task_rq_unlock(rq, p, &rf);
1046 
1047 	/*
1048 	 * This can free the task_struct, including this hrtimer, do not touch
1049 	 * anything related to that after this.
1050 	 */
1051 	put_task_struct(p);
1052 
1053 	return HRTIMER_NORESTART;
1054 }
1055 
1056 void init_dl_task_timer(struct sched_dl_entity *dl_se)
1057 {
1058 	struct hrtimer *timer = &dl_se->dl_timer;
1059 
1060 	hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1061 	timer->function = dl_task_timer;
1062 }
1063 
1064 /*
1065  * During the activation, CBS checks if it can reuse the current task's
1066  * runtime and period. If the deadline of the task is in the past, CBS
1067  * cannot use the runtime, and so it replenishes the task. This rule
1068  * works fine for implicit deadline tasks (deadline == period), and the
1069  * CBS was designed for implicit deadline tasks. However, a task with
1070  * constrained deadline (deadine < period) might be awakened after the
1071  * deadline, but before the next period. In this case, replenishing the
1072  * task would allow it to run for runtime / deadline. As in this case
1073  * deadline < period, CBS enables a task to run for more than the
1074  * runtime / period. In a very loaded system, this can cause a domino
1075  * effect, making other tasks miss their deadlines.
1076  *
1077  * To avoid this problem, in the activation of a constrained deadline
1078  * task after the deadline but before the next period, throttle the
1079  * task and set the replenishing timer to the begin of the next period,
1080  * unless it is boosted.
1081  */
1082 static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se)
1083 {
1084 	struct task_struct *p = dl_task_of(dl_se);
1085 	struct rq *rq = rq_of_dl_rq(dl_rq_of_se(dl_se));
1086 
1087 	if (dl_time_before(dl_se->deadline, rq_clock(rq)) &&
1088 	    dl_time_before(rq_clock(rq), dl_next_period(dl_se))) {
1089 		if (unlikely(dl_se->dl_boosted || !start_dl_timer(p)))
1090 			return;
1091 		dl_se->dl_throttled = 1;
1092 		if (dl_se->runtime > 0)
1093 			dl_se->runtime = 0;
1094 	}
1095 }
1096 
1097 static
1098 int dl_runtime_exceeded(struct sched_dl_entity *dl_se)
1099 {
1100 	return (dl_se->runtime <= 0);
1101 }
1102 
1103 extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
1104 
1105 /*
1106  * This function implements the GRUB accounting rule:
1107  * according to the GRUB reclaiming algorithm, the runtime is
1108  * not decreased as "dq = -dt", but as
1109  * "dq = -max{u / Umax, (1 - Uinact - Uextra)} dt",
1110  * where u is the utilization of the task, Umax is the maximum reclaimable
1111  * utilization, Uinact is the (per-runqueue) inactive utilization, computed
1112  * as the difference between the "total runqueue utilization" and the
1113  * runqueue active utilization, and Uextra is the (per runqueue) extra
1114  * reclaimable utilization.
1115  * Since rq->dl.running_bw and rq->dl.this_bw contain utilizations
1116  * multiplied by 2^BW_SHIFT, the result has to be shifted right by
1117  * BW_SHIFT.
1118  * Since rq->dl.bw_ratio contains 1 / Umax multipled by 2^RATIO_SHIFT,
1119  * dl_bw is multiped by rq->dl.bw_ratio and shifted right by RATIO_SHIFT.
1120  * Since delta is a 64 bit variable, to have an overflow its value
1121  * should be larger than 2^(64 - 20 - 8), which is more than 64 seconds.
1122  * So, overflow is not an issue here.
1123  */
1124 u64 grub_reclaim(u64 delta, struct rq *rq, struct sched_dl_entity *dl_se)
1125 {
1126 	u64 u_inact = rq->dl.this_bw - rq->dl.running_bw; /* Utot - Uact */
1127 	u64 u_act;
1128 	u64 u_act_min = (dl_se->dl_bw * rq->dl.bw_ratio) >> RATIO_SHIFT;
1129 
1130 	/*
1131 	 * Instead of computing max{u * bw_ratio, (1 - u_inact - u_extra)},
1132 	 * we compare u_inact + rq->dl.extra_bw with
1133 	 * 1 - (u * rq->dl.bw_ratio >> RATIO_SHIFT), because
1134 	 * u_inact + rq->dl.extra_bw can be larger than
1135 	 * 1 * (so, 1 - u_inact - rq->dl.extra_bw would be negative
1136 	 * leading to wrong results)
1137 	 */
1138 	if (u_inact + rq->dl.extra_bw > BW_UNIT - u_act_min)
1139 		u_act = u_act_min;
1140 	else
1141 		u_act = BW_UNIT - u_inact - rq->dl.extra_bw;
1142 
1143 	return (delta * u_act) >> BW_SHIFT;
1144 }
1145 
1146 /*
1147  * Update the current task's runtime statistics (provided it is still
1148  * a -deadline task and has not been removed from the dl_rq).
1149  */
1150 static void update_curr_dl(struct rq *rq)
1151 {
1152 	struct task_struct *curr = rq->curr;
1153 	struct sched_dl_entity *dl_se = &curr->dl;
1154 	u64 delta_exec, scaled_delta_exec;
1155 	int cpu = cpu_of(rq);
1156 
1157 	if (!dl_task(curr) || !on_dl_rq(dl_se))
1158 		return;
1159 
1160 	/*
1161 	 * Consumed budget is computed considering the time as
1162 	 * observed by schedulable tasks (excluding time spent
1163 	 * in hardirq context, etc.). Deadlines are instead
1164 	 * computed using hard walltime. This seems to be the more
1165 	 * natural solution, but the full ramifications of this
1166 	 * approach need further study.
1167 	 */
1168 	delta_exec = rq_clock_task(rq) - curr->se.exec_start;
1169 	if (unlikely((s64)delta_exec <= 0)) {
1170 		if (unlikely(dl_se->dl_yielded))
1171 			goto throttle;
1172 		return;
1173 	}
1174 
1175 	schedstat_set(curr->se.statistics.exec_max,
1176 		      max(curr->se.statistics.exec_max, delta_exec));
1177 
1178 	curr->se.sum_exec_runtime += delta_exec;
1179 	account_group_exec_runtime(curr, delta_exec);
1180 
1181 	curr->se.exec_start = rq_clock_task(rq);
1182 	cgroup_account_cputime(curr, delta_exec);
1183 
1184 	sched_rt_avg_update(rq, delta_exec);
1185 
1186 	if (dl_entity_is_special(dl_se))
1187 		return;
1188 
1189 	/*
1190 	 * For tasks that participate in GRUB, we implement GRUB-PA: the
1191 	 * spare reclaimed bandwidth is used to clock down frequency.
1192 	 *
1193 	 * For the others, we still need to scale reservation parameters
1194 	 * according to current frequency and CPU maximum capacity.
1195 	 */
1196 	if (unlikely(dl_se->flags & SCHED_FLAG_RECLAIM)) {
1197 		scaled_delta_exec = grub_reclaim(delta_exec,
1198 						 rq,
1199 						 &curr->dl);
1200 	} else {
1201 		unsigned long scale_freq = arch_scale_freq_capacity(cpu);
1202 		unsigned long scale_cpu = arch_scale_cpu_capacity(NULL, cpu);
1203 
1204 		scaled_delta_exec = cap_scale(delta_exec, scale_freq);
1205 		scaled_delta_exec = cap_scale(scaled_delta_exec, scale_cpu);
1206 	}
1207 
1208 	dl_se->runtime -= scaled_delta_exec;
1209 
1210 throttle:
1211 	if (dl_runtime_exceeded(dl_se) || dl_se->dl_yielded) {
1212 		dl_se->dl_throttled = 1;
1213 
1214 		/* If requested, inform the user about runtime overruns. */
1215 		if (dl_runtime_exceeded(dl_se) &&
1216 		    (dl_se->flags & SCHED_FLAG_DL_OVERRUN))
1217 			dl_se->dl_overrun = 1;
1218 
1219 		__dequeue_task_dl(rq, curr, 0);
1220 		if (unlikely(dl_se->dl_boosted || !start_dl_timer(curr)))
1221 			enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
1222 
1223 		if (!is_leftmost(curr, &rq->dl))
1224 			resched_curr(rq);
1225 	}
1226 
1227 	/*
1228 	 * Because -- for now -- we share the rt bandwidth, we need to
1229 	 * account our runtime there too, otherwise actual rt tasks
1230 	 * would be able to exceed the shared quota.
1231 	 *
1232 	 * Account to the root rt group for now.
1233 	 *
1234 	 * The solution we're working towards is having the RT groups scheduled
1235 	 * using deadline servers -- however there's a few nasties to figure
1236 	 * out before that can happen.
1237 	 */
1238 	if (rt_bandwidth_enabled()) {
1239 		struct rt_rq *rt_rq = &rq->rt;
1240 
1241 		raw_spin_lock(&rt_rq->rt_runtime_lock);
1242 		/*
1243 		 * We'll let actual RT tasks worry about the overflow here, we
1244 		 * have our own CBS to keep us inline; only account when RT
1245 		 * bandwidth is relevant.
1246 		 */
1247 		if (sched_rt_bandwidth_account(rt_rq))
1248 			rt_rq->rt_time += delta_exec;
1249 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
1250 	}
1251 }
1252 
1253 static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer)
1254 {
1255 	struct sched_dl_entity *dl_se = container_of(timer,
1256 						     struct sched_dl_entity,
1257 						     inactive_timer);
1258 	struct task_struct *p = dl_task_of(dl_se);
1259 	struct rq_flags rf;
1260 	struct rq *rq;
1261 
1262 	rq = task_rq_lock(p, &rf);
1263 
1264 	if (!dl_task(p) || p->state == TASK_DEAD) {
1265 		struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
1266 
1267 		if (p->state == TASK_DEAD && dl_se->dl_non_contending) {
1268 			sub_running_bw(&p->dl, dl_rq_of_se(&p->dl));
1269 			sub_rq_bw(&p->dl, dl_rq_of_se(&p->dl));
1270 			dl_se->dl_non_contending = 0;
1271 		}
1272 
1273 		raw_spin_lock(&dl_b->lock);
1274 		__dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
1275 		raw_spin_unlock(&dl_b->lock);
1276 		__dl_clear_params(p);
1277 
1278 		goto unlock;
1279 	}
1280 	if (dl_se->dl_non_contending == 0)
1281 		goto unlock;
1282 
1283 	sched_clock_tick();
1284 	update_rq_clock(rq);
1285 
1286 	sub_running_bw(dl_se, &rq->dl);
1287 	dl_se->dl_non_contending = 0;
1288 unlock:
1289 	task_rq_unlock(rq, p, &rf);
1290 	put_task_struct(p);
1291 
1292 	return HRTIMER_NORESTART;
1293 }
1294 
1295 void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se)
1296 {
1297 	struct hrtimer *timer = &dl_se->inactive_timer;
1298 
1299 	hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1300 	timer->function = inactive_task_timer;
1301 }
1302 
1303 #ifdef CONFIG_SMP
1304 
1305 static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
1306 {
1307 	struct rq *rq = rq_of_dl_rq(dl_rq);
1308 
1309 	if (dl_rq->earliest_dl.curr == 0 ||
1310 	    dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
1311 		dl_rq->earliest_dl.curr = deadline;
1312 		cpudl_set(&rq->rd->cpudl, rq->cpu, deadline);
1313 	}
1314 }
1315 
1316 static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
1317 {
1318 	struct rq *rq = rq_of_dl_rq(dl_rq);
1319 
1320 	/*
1321 	 * Since we may have removed our earliest (and/or next earliest)
1322 	 * task we must recompute them.
1323 	 */
1324 	if (!dl_rq->dl_nr_running) {
1325 		dl_rq->earliest_dl.curr = 0;
1326 		dl_rq->earliest_dl.next = 0;
1327 		cpudl_clear(&rq->rd->cpudl, rq->cpu);
1328 	} else {
1329 		struct rb_node *leftmost = dl_rq->root.rb_leftmost;
1330 		struct sched_dl_entity *entry;
1331 
1332 		entry = rb_entry(leftmost, struct sched_dl_entity, rb_node);
1333 		dl_rq->earliest_dl.curr = entry->deadline;
1334 		cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline);
1335 	}
1336 }
1337 
1338 #else
1339 
1340 static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
1341 static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
1342 
1343 #endif /* CONFIG_SMP */
1344 
1345 static inline
1346 void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
1347 {
1348 	int prio = dl_task_of(dl_se)->prio;
1349 	u64 deadline = dl_se->deadline;
1350 
1351 	WARN_ON(!dl_prio(prio));
1352 	dl_rq->dl_nr_running++;
1353 	add_nr_running(rq_of_dl_rq(dl_rq), 1);
1354 
1355 	inc_dl_deadline(dl_rq, deadline);
1356 	inc_dl_migration(dl_se, dl_rq);
1357 }
1358 
1359 static inline
1360 void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
1361 {
1362 	int prio = dl_task_of(dl_se)->prio;
1363 
1364 	WARN_ON(!dl_prio(prio));
1365 	WARN_ON(!dl_rq->dl_nr_running);
1366 	dl_rq->dl_nr_running--;
1367 	sub_nr_running(rq_of_dl_rq(dl_rq), 1);
1368 
1369 	dec_dl_deadline(dl_rq, dl_se->deadline);
1370 	dec_dl_migration(dl_se, dl_rq);
1371 }
1372 
1373 static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
1374 {
1375 	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1376 	struct rb_node **link = &dl_rq->root.rb_root.rb_node;
1377 	struct rb_node *parent = NULL;
1378 	struct sched_dl_entity *entry;
1379 	int leftmost = 1;
1380 
1381 	BUG_ON(!RB_EMPTY_NODE(&dl_se->rb_node));
1382 
1383 	while (*link) {
1384 		parent = *link;
1385 		entry = rb_entry(parent, struct sched_dl_entity, rb_node);
1386 		if (dl_time_before(dl_se->deadline, entry->deadline))
1387 			link = &parent->rb_left;
1388 		else {
1389 			link = &parent->rb_right;
1390 			leftmost = 0;
1391 		}
1392 	}
1393 
1394 	rb_link_node(&dl_se->rb_node, parent, link);
1395 	rb_insert_color_cached(&dl_se->rb_node, &dl_rq->root, leftmost);
1396 
1397 	inc_dl_tasks(dl_se, dl_rq);
1398 }
1399 
1400 static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
1401 {
1402 	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1403 
1404 	if (RB_EMPTY_NODE(&dl_se->rb_node))
1405 		return;
1406 
1407 	rb_erase_cached(&dl_se->rb_node, &dl_rq->root);
1408 	RB_CLEAR_NODE(&dl_se->rb_node);
1409 
1410 	dec_dl_tasks(dl_se, dl_rq);
1411 }
1412 
1413 static void
1414 enqueue_dl_entity(struct sched_dl_entity *dl_se,
1415 		  struct sched_dl_entity *pi_se, int flags)
1416 {
1417 	BUG_ON(on_dl_rq(dl_se));
1418 
1419 	/*
1420 	 * If this is a wakeup or a new instance, the scheduling
1421 	 * parameters of the task might need updating. Otherwise,
1422 	 * we want a replenishment of its runtime.
1423 	 */
1424 	if (flags & ENQUEUE_WAKEUP) {
1425 		task_contending(dl_se, flags);
1426 		update_dl_entity(dl_se, pi_se);
1427 	} else if (flags & ENQUEUE_REPLENISH) {
1428 		replenish_dl_entity(dl_se, pi_se);
1429 	} else if ((flags & ENQUEUE_RESTORE) &&
1430 		  dl_time_before(dl_se->deadline,
1431 				 rq_clock(rq_of_dl_rq(dl_rq_of_se(dl_se))))) {
1432 		setup_new_dl_entity(dl_se);
1433 	}
1434 
1435 	__enqueue_dl_entity(dl_se);
1436 }
1437 
1438 static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
1439 {
1440 	__dequeue_dl_entity(dl_se);
1441 }
1442 
1443 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1444 {
1445 	struct task_struct *pi_task = rt_mutex_get_top_task(p);
1446 	struct sched_dl_entity *pi_se = &p->dl;
1447 
1448 	/*
1449 	 * Use the scheduling parameters of the top pi-waiter task if:
1450 	 * - we have a top pi-waiter which is a SCHED_DEADLINE task AND
1451 	 * - our dl_boosted is set (i.e. the pi-waiter's (absolute) deadline is
1452 	 *   smaller than our deadline OR we are a !SCHED_DEADLINE task getting
1453 	 *   boosted due to a SCHED_DEADLINE pi-waiter).
1454 	 * Otherwise we keep our runtime and deadline.
1455 	 */
1456 	if (pi_task && dl_prio(pi_task->normal_prio) && p->dl.dl_boosted) {
1457 		pi_se = &pi_task->dl;
1458 	} else if (!dl_prio(p->normal_prio)) {
1459 		/*
1460 		 * Special case in which we have a !SCHED_DEADLINE task
1461 		 * that is going to be deboosted, but exceeds its
1462 		 * runtime while doing so. No point in replenishing
1463 		 * it, as it's going to return back to its original
1464 		 * scheduling class after this.
1465 		 */
1466 		BUG_ON(!p->dl.dl_boosted || flags != ENQUEUE_REPLENISH);
1467 		return;
1468 	}
1469 
1470 	/*
1471 	 * Check if a constrained deadline task was activated
1472 	 * after the deadline but before the next period.
1473 	 * If that is the case, the task will be throttled and
1474 	 * the replenishment timer will be set to the next period.
1475 	 */
1476 	if (!p->dl.dl_throttled && !dl_is_implicit(&p->dl))
1477 		dl_check_constrained_dl(&p->dl);
1478 
1479 	if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & ENQUEUE_RESTORE) {
1480 		add_rq_bw(&p->dl, &rq->dl);
1481 		add_running_bw(&p->dl, &rq->dl);
1482 	}
1483 
1484 	/*
1485 	 * If p is throttled, we do not enqueue it. In fact, if it exhausted
1486 	 * its budget it needs a replenishment and, since it now is on
1487 	 * its rq, the bandwidth timer callback (which clearly has not
1488 	 * run yet) will take care of this.
1489 	 * However, the active utilization does not depend on the fact
1490 	 * that the task is on the runqueue or not (but depends on the
1491 	 * task's state - in GRUB parlance, "inactive" vs "active contending").
1492 	 * In other words, even if a task is throttled its utilization must
1493 	 * be counted in the active utilization; hence, we need to call
1494 	 * add_running_bw().
1495 	 */
1496 	if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH)) {
1497 		if (flags & ENQUEUE_WAKEUP)
1498 			task_contending(&p->dl, flags);
1499 
1500 		return;
1501 	}
1502 
1503 	enqueue_dl_entity(&p->dl, pi_se, flags);
1504 
1505 	if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1506 		enqueue_pushable_dl_task(rq, p);
1507 }
1508 
1509 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1510 {
1511 	dequeue_dl_entity(&p->dl);
1512 	dequeue_pushable_dl_task(rq, p);
1513 }
1514 
1515 static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1516 {
1517 	update_curr_dl(rq);
1518 	__dequeue_task_dl(rq, p, flags);
1519 
1520 	if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & DEQUEUE_SAVE) {
1521 		sub_running_bw(&p->dl, &rq->dl);
1522 		sub_rq_bw(&p->dl, &rq->dl);
1523 	}
1524 
1525 	/*
1526 	 * This check allows to start the inactive timer (or to immediately
1527 	 * decrease the active utilization, if needed) in two cases:
1528 	 * when the task blocks and when it is terminating
1529 	 * (p->state == TASK_DEAD). We can handle the two cases in the same
1530 	 * way, because from GRUB's point of view the same thing is happening
1531 	 * (the task moves from "active contending" to "active non contending"
1532 	 * or "inactive")
1533 	 */
1534 	if (flags & DEQUEUE_SLEEP)
1535 		task_non_contending(p);
1536 }
1537 
1538 /*
1539  * Yield task semantic for -deadline tasks is:
1540  *
1541  *   get off from the CPU until our next instance, with
1542  *   a new runtime. This is of little use now, since we
1543  *   don't have a bandwidth reclaiming mechanism. Anyway,
1544  *   bandwidth reclaiming is planned for the future, and
1545  *   yield_task_dl will indicate that some spare budget
1546  *   is available for other task instances to use it.
1547  */
1548 static void yield_task_dl(struct rq *rq)
1549 {
1550 	/*
1551 	 * We make the task go to sleep until its current deadline by
1552 	 * forcing its runtime to zero. This way, update_curr_dl() stops
1553 	 * it and the bandwidth timer will wake it up and will give it
1554 	 * new scheduling parameters (thanks to dl_yielded=1).
1555 	 */
1556 	rq->curr->dl.dl_yielded = 1;
1557 
1558 	update_rq_clock(rq);
1559 	update_curr_dl(rq);
1560 	/*
1561 	 * Tell update_rq_clock() that we've just updated,
1562 	 * so we don't do microscopic update in schedule()
1563 	 * and double the fastpath cost.
1564 	 */
1565 	rq_clock_skip_update(rq, true);
1566 }
1567 
1568 #ifdef CONFIG_SMP
1569 
1570 static int find_later_rq(struct task_struct *task);
1571 
1572 static int
1573 select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags)
1574 {
1575 	struct task_struct *curr;
1576 	struct rq *rq;
1577 
1578 	if (sd_flag != SD_BALANCE_WAKE)
1579 		goto out;
1580 
1581 	rq = cpu_rq(cpu);
1582 
1583 	rcu_read_lock();
1584 	curr = READ_ONCE(rq->curr); /* unlocked access */
1585 
1586 	/*
1587 	 * If we are dealing with a -deadline task, we must
1588 	 * decide where to wake it up.
1589 	 * If it has a later deadline and the current task
1590 	 * on this rq can't move (provided the waking task
1591 	 * can!) we prefer to send it somewhere else. On the
1592 	 * other hand, if it has a shorter deadline, we
1593 	 * try to make it stay here, it might be important.
1594 	 */
1595 	if (unlikely(dl_task(curr)) &&
1596 	    (curr->nr_cpus_allowed < 2 ||
1597 	     !dl_entity_preempt(&p->dl, &curr->dl)) &&
1598 	    (p->nr_cpus_allowed > 1)) {
1599 		int target = find_later_rq(p);
1600 
1601 		if (target != -1 &&
1602 				(dl_time_before(p->dl.deadline,
1603 					cpu_rq(target)->dl.earliest_dl.curr) ||
1604 				(cpu_rq(target)->dl.dl_nr_running == 0)))
1605 			cpu = target;
1606 	}
1607 	rcu_read_unlock();
1608 
1609 out:
1610 	return cpu;
1611 }
1612 
1613 static void migrate_task_rq_dl(struct task_struct *p)
1614 {
1615 	struct rq *rq;
1616 
1617 	if (p->state != TASK_WAKING)
1618 		return;
1619 
1620 	rq = task_rq(p);
1621 	/*
1622 	 * Since p->state == TASK_WAKING, set_task_cpu() has been called
1623 	 * from try_to_wake_up(). Hence, p->pi_lock is locked, but
1624 	 * rq->lock is not... So, lock it
1625 	 */
1626 	raw_spin_lock(&rq->lock);
1627 	if (p->dl.dl_non_contending) {
1628 		sub_running_bw(&p->dl, &rq->dl);
1629 		p->dl.dl_non_contending = 0;
1630 		/*
1631 		 * If the timer handler is currently running and the
1632 		 * timer cannot be cancelled, inactive_task_timer()
1633 		 * will see that dl_not_contending is not set, and
1634 		 * will not touch the rq's active utilization,
1635 		 * so we are still safe.
1636 		 */
1637 		if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
1638 			put_task_struct(p);
1639 	}
1640 	sub_rq_bw(&p->dl, &rq->dl);
1641 	raw_spin_unlock(&rq->lock);
1642 }
1643 
1644 static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
1645 {
1646 	/*
1647 	 * Current can't be migrated, useless to reschedule,
1648 	 * let's hope p can move out.
1649 	 */
1650 	if (rq->curr->nr_cpus_allowed == 1 ||
1651 	    !cpudl_find(&rq->rd->cpudl, rq->curr, NULL))
1652 		return;
1653 
1654 	/*
1655 	 * p is migratable, so let's not schedule it and
1656 	 * see if it is pushed or pulled somewhere else.
1657 	 */
1658 	if (p->nr_cpus_allowed != 1 &&
1659 	    cpudl_find(&rq->rd->cpudl, p, NULL))
1660 		return;
1661 
1662 	resched_curr(rq);
1663 }
1664 
1665 #endif /* CONFIG_SMP */
1666 
1667 /*
1668  * Only called when both the current and waking task are -deadline
1669  * tasks.
1670  */
1671 static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
1672 				  int flags)
1673 {
1674 	if (dl_entity_preempt(&p->dl, &rq->curr->dl)) {
1675 		resched_curr(rq);
1676 		return;
1677 	}
1678 
1679 #ifdef CONFIG_SMP
1680 	/*
1681 	 * In the unlikely case current and p have the same deadline
1682 	 * let us try to decide what's the best thing to do...
1683 	 */
1684 	if ((p->dl.deadline == rq->curr->dl.deadline) &&
1685 	    !test_tsk_need_resched(rq->curr))
1686 		check_preempt_equal_dl(rq, p);
1687 #endif /* CONFIG_SMP */
1688 }
1689 
1690 #ifdef CONFIG_SCHED_HRTICK
1691 static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1692 {
1693 	hrtick_start(rq, p->dl.runtime);
1694 }
1695 #else /* !CONFIG_SCHED_HRTICK */
1696 static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1697 {
1698 }
1699 #endif
1700 
1701 static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
1702 						   struct dl_rq *dl_rq)
1703 {
1704 	struct rb_node *left = rb_first_cached(&dl_rq->root);
1705 
1706 	if (!left)
1707 		return NULL;
1708 
1709 	return rb_entry(left, struct sched_dl_entity, rb_node);
1710 }
1711 
1712 static struct task_struct *
1713 pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
1714 {
1715 	struct sched_dl_entity *dl_se;
1716 	struct task_struct *p;
1717 	struct dl_rq *dl_rq;
1718 
1719 	dl_rq = &rq->dl;
1720 
1721 	if (need_pull_dl_task(rq, prev)) {
1722 		/*
1723 		 * This is OK, because current is on_cpu, which avoids it being
1724 		 * picked for load-balance and preemption/IRQs are still
1725 		 * disabled avoiding further scheduler activity on it and we're
1726 		 * being very careful to re-start the picking loop.
1727 		 */
1728 		rq_unpin_lock(rq, rf);
1729 		pull_dl_task(rq);
1730 		rq_repin_lock(rq, rf);
1731 		/*
1732 		 * pull_dl_task() can drop (and re-acquire) rq->lock; this
1733 		 * means a stop task can slip in, in which case we need to
1734 		 * re-start task selection.
1735 		 */
1736 		if (rq->stop && task_on_rq_queued(rq->stop))
1737 			return RETRY_TASK;
1738 	}
1739 
1740 	/*
1741 	 * When prev is DL, we may throttle it in put_prev_task().
1742 	 * So, we update time before we check for dl_nr_running.
1743 	 */
1744 	if (prev->sched_class == &dl_sched_class)
1745 		update_curr_dl(rq);
1746 
1747 	if (unlikely(!dl_rq->dl_nr_running))
1748 		return NULL;
1749 
1750 	put_prev_task(rq, prev);
1751 
1752 	dl_se = pick_next_dl_entity(rq, dl_rq);
1753 	BUG_ON(!dl_se);
1754 
1755 	p = dl_task_of(dl_se);
1756 	p->se.exec_start = rq_clock_task(rq);
1757 
1758 	/* Running task will never be pushed. */
1759        dequeue_pushable_dl_task(rq, p);
1760 
1761 	if (hrtick_enabled(rq))
1762 		start_hrtick_dl(rq, p);
1763 
1764 	queue_push_tasks(rq);
1765 
1766 	return p;
1767 }
1768 
1769 static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
1770 {
1771 	update_curr_dl(rq);
1772 
1773 	if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
1774 		enqueue_pushable_dl_task(rq, p);
1775 }
1776 
1777 static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
1778 {
1779 	update_curr_dl(rq);
1780 
1781 	/*
1782 	 * Even when we have runtime, update_curr_dl() might have resulted in us
1783 	 * not being the leftmost task anymore. In that case NEED_RESCHED will
1784 	 * be set and schedule() will start a new hrtick for the next task.
1785 	 */
1786 	if (hrtick_enabled(rq) && queued && p->dl.runtime > 0 &&
1787 	    is_leftmost(p, &rq->dl))
1788 		start_hrtick_dl(rq, p);
1789 }
1790 
1791 static void task_fork_dl(struct task_struct *p)
1792 {
1793 	/*
1794 	 * SCHED_DEADLINE tasks cannot fork and this is achieved through
1795 	 * sched_fork()
1796 	 */
1797 }
1798 
1799 static void set_curr_task_dl(struct rq *rq)
1800 {
1801 	struct task_struct *p = rq->curr;
1802 
1803 	p->se.exec_start = rq_clock_task(rq);
1804 
1805 	/* You can't push away the running task */
1806 	dequeue_pushable_dl_task(rq, p);
1807 }
1808 
1809 #ifdef CONFIG_SMP
1810 
1811 /* Only try algorithms three times */
1812 #define DL_MAX_TRIES 3
1813 
1814 static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
1815 {
1816 	if (!task_running(rq, p) &&
1817 	    cpumask_test_cpu(cpu, &p->cpus_allowed))
1818 		return 1;
1819 	return 0;
1820 }
1821 
1822 /*
1823  * Return the earliest pushable rq's task, which is suitable to be executed
1824  * on the CPU, NULL otherwise:
1825  */
1826 static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu)
1827 {
1828 	struct rb_node *next_node = rq->dl.pushable_dl_tasks_root.rb_leftmost;
1829 	struct task_struct *p = NULL;
1830 
1831 	if (!has_pushable_dl_tasks(rq))
1832 		return NULL;
1833 
1834 next_node:
1835 	if (next_node) {
1836 		p = rb_entry(next_node, struct task_struct, pushable_dl_tasks);
1837 
1838 		if (pick_dl_task(rq, p, cpu))
1839 			return p;
1840 
1841 		next_node = rb_next(next_node);
1842 		goto next_node;
1843 	}
1844 
1845 	return NULL;
1846 }
1847 
1848 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
1849 
1850 static int find_later_rq(struct task_struct *task)
1851 {
1852 	struct sched_domain *sd;
1853 	struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl);
1854 	int this_cpu = smp_processor_id();
1855 	int cpu = task_cpu(task);
1856 
1857 	/* Make sure the mask is initialized first */
1858 	if (unlikely(!later_mask))
1859 		return -1;
1860 
1861 	if (task->nr_cpus_allowed == 1)
1862 		return -1;
1863 
1864 	/*
1865 	 * We have to consider system topology and task affinity
1866 	 * first, then we can look for a suitable cpu.
1867 	 */
1868 	if (!cpudl_find(&task_rq(task)->rd->cpudl, task, later_mask))
1869 		return -1;
1870 
1871 	/*
1872 	 * If we are here, some targets have been found, including
1873 	 * the most suitable which is, among the runqueues where the
1874 	 * current tasks have later deadlines than the task's one, the
1875 	 * rq with the latest possible one.
1876 	 *
1877 	 * Now we check how well this matches with task's
1878 	 * affinity and system topology.
1879 	 *
1880 	 * The last cpu where the task run is our first
1881 	 * guess, since it is most likely cache-hot there.
1882 	 */
1883 	if (cpumask_test_cpu(cpu, later_mask))
1884 		return cpu;
1885 	/*
1886 	 * Check if this_cpu is to be skipped (i.e., it is
1887 	 * not in the mask) or not.
1888 	 */
1889 	if (!cpumask_test_cpu(this_cpu, later_mask))
1890 		this_cpu = -1;
1891 
1892 	rcu_read_lock();
1893 	for_each_domain(cpu, sd) {
1894 		if (sd->flags & SD_WAKE_AFFINE) {
1895 			int best_cpu;
1896 
1897 			/*
1898 			 * If possible, preempting this_cpu is
1899 			 * cheaper than migrating.
1900 			 */
1901 			if (this_cpu != -1 &&
1902 			    cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1903 				rcu_read_unlock();
1904 				return this_cpu;
1905 			}
1906 
1907 			best_cpu = cpumask_first_and(later_mask,
1908 							sched_domain_span(sd));
1909 			/*
1910 			 * Last chance: if a cpu being in both later_mask
1911 			 * and current sd span is valid, that becomes our
1912 			 * choice. Of course, the latest possible cpu is
1913 			 * already under consideration through later_mask.
1914 			 */
1915 			if (best_cpu < nr_cpu_ids) {
1916 				rcu_read_unlock();
1917 				return best_cpu;
1918 			}
1919 		}
1920 	}
1921 	rcu_read_unlock();
1922 
1923 	/*
1924 	 * At this point, all our guesses failed, we just return
1925 	 * 'something', and let the caller sort the things out.
1926 	 */
1927 	if (this_cpu != -1)
1928 		return this_cpu;
1929 
1930 	cpu = cpumask_any(later_mask);
1931 	if (cpu < nr_cpu_ids)
1932 		return cpu;
1933 
1934 	return -1;
1935 }
1936 
1937 /* Locks the rq it finds */
1938 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
1939 {
1940 	struct rq *later_rq = NULL;
1941 	int tries;
1942 	int cpu;
1943 
1944 	for (tries = 0; tries < DL_MAX_TRIES; tries++) {
1945 		cpu = find_later_rq(task);
1946 
1947 		if ((cpu == -1) || (cpu == rq->cpu))
1948 			break;
1949 
1950 		later_rq = cpu_rq(cpu);
1951 
1952 		if (later_rq->dl.dl_nr_running &&
1953 		    !dl_time_before(task->dl.deadline,
1954 					later_rq->dl.earliest_dl.curr)) {
1955 			/*
1956 			 * Target rq has tasks of equal or earlier deadline,
1957 			 * retrying does not release any lock and is unlikely
1958 			 * to yield a different result.
1959 			 */
1960 			later_rq = NULL;
1961 			break;
1962 		}
1963 
1964 		/* Retry if something changed. */
1965 		if (double_lock_balance(rq, later_rq)) {
1966 			if (unlikely(task_rq(task) != rq ||
1967 				     !cpumask_test_cpu(later_rq->cpu, &task->cpus_allowed) ||
1968 				     task_running(rq, task) ||
1969 				     !dl_task(task) ||
1970 				     !task_on_rq_queued(task))) {
1971 				double_unlock_balance(rq, later_rq);
1972 				later_rq = NULL;
1973 				break;
1974 			}
1975 		}
1976 
1977 		/*
1978 		 * If the rq we found has no -deadline task, or
1979 		 * its earliest one has a later deadline than our
1980 		 * task, the rq is a good one.
1981 		 */
1982 		if (!later_rq->dl.dl_nr_running ||
1983 		    dl_time_before(task->dl.deadline,
1984 				   later_rq->dl.earliest_dl.curr))
1985 			break;
1986 
1987 		/* Otherwise we try again. */
1988 		double_unlock_balance(rq, later_rq);
1989 		later_rq = NULL;
1990 	}
1991 
1992 	return later_rq;
1993 }
1994 
1995 static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
1996 {
1997 	struct task_struct *p;
1998 
1999 	if (!has_pushable_dl_tasks(rq))
2000 		return NULL;
2001 
2002 	p = rb_entry(rq->dl.pushable_dl_tasks_root.rb_leftmost,
2003 		     struct task_struct, pushable_dl_tasks);
2004 
2005 	BUG_ON(rq->cpu != task_cpu(p));
2006 	BUG_ON(task_current(rq, p));
2007 	BUG_ON(p->nr_cpus_allowed <= 1);
2008 
2009 	BUG_ON(!task_on_rq_queued(p));
2010 	BUG_ON(!dl_task(p));
2011 
2012 	return p;
2013 }
2014 
2015 /*
2016  * See if the non running -deadline tasks on this rq
2017  * can be sent to some other CPU where they can preempt
2018  * and start executing.
2019  */
2020 static int push_dl_task(struct rq *rq)
2021 {
2022 	struct task_struct *next_task;
2023 	struct rq *later_rq;
2024 	int ret = 0;
2025 
2026 	if (!rq->dl.overloaded)
2027 		return 0;
2028 
2029 	next_task = pick_next_pushable_dl_task(rq);
2030 	if (!next_task)
2031 		return 0;
2032 
2033 retry:
2034 	if (unlikely(next_task == rq->curr)) {
2035 		WARN_ON(1);
2036 		return 0;
2037 	}
2038 
2039 	/*
2040 	 * If next_task preempts rq->curr, and rq->curr
2041 	 * can move away, it makes sense to just reschedule
2042 	 * without going further in pushing next_task.
2043 	 */
2044 	if (dl_task(rq->curr) &&
2045 	    dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
2046 	    rq->curr->nr_cpus_allowed > 1) {
2047 		resched_curr(rq);
2048 		return 0;
2049 	}
2050 
2051 	/* We might release rq lock */
2052 	get_task_struct(next_task);
2053 
2054 	/* Will lock the rq it'll find */
2055 	later_rq = find_lock_later_rq(next_task, rq);
2056 	if (!later_rq) {
2057 		struct task_struct *task;
2058 
2059 		/*
2060 		 * We must check all this again, since
2061 		 * find_lock_later_rq releases rq->lock and it is
2062 		 * then possible that next_task has migrated.
2063 		 */
2064 		task = pick_next_pushable_dl_task(rq);
2065 		if (task == next_task) {
2066 			/*
2067 			 * The task is still there. We don't try
2068 			 * again, some other cpu will pull it when ready.
2069 			 */
2070 			goto out;
2071 		}
2072 
2073 		if (!task)
2074 			/* No more tasks */
2075 			goto out;
2076 
2077 		put_task_struct(next_task);
2078 		next_task = task;
2079 		goto retry;
2080 	}
2081 
2082 	deactivate_task(rq, next_task, 0);
2083 	sub_running_bw(&next_task->dl, &rq->dl);
2084 	sub_rq_bw(&next_task->dl, &rq->dl);
2085 	set_task_cpu(next_task, later_rq->cpu);
2086 	add_rq_bw(&next_task->dl, &later_rq->dl);
2087 	add_running_bw(&next_task->dl, &later_rq->dl);
2088 	activate_task(later_rq, next_task, 0);
2089 	ret = 1;
2090 
2091 	resched_curr(later_rq);
2092 
2093 	double_unlock_balance(rq, later_rq);
2094 
2095 out:
2096 	put_task_struct(next_task);
2097 
2098 	return ret;
2099 }
2100 
2101 static void push_dl_tasks(struct rq *rq)
2102 {
2103 	/* push_dl_task() will return true if it moved a -deadline task */
2104 	while (push_dl_task(rq))
2105 		;
2106 }
2107 
2108 static void pull_dl_task(struct rq *this_rq)
2109 {
2110 	int this_cpu = this_rq->cpu, cpu;
2111 	struct task_struct *p;
2112 	bool resched = false;
2113 	struct rq *src_rq;
2114 	u64 dmin = LONG_MAX;
2115 
2116 	if (likely(!dl_overloaded(this_rq)))
2117 		return;
2118 
2119 	/*
2120 	 * Match the barrier from dl_set_overloaded; this guarantees that if we
2121 	 * see overloaded we must also see the dlo_mask bit.
2122 	 */
2123 	smp_rmb();
2124 
2125 	for_each_cpu(cpu, this_rq->rd->dlo_mask) {
2126 		if (this_cpu == cpu)
2127 			continue;
2128 
2129 		src_rq = cpu_rq(cpu);
2130 
2131 		/*
2132 		 * It looks racy, abd it is! However, as in sched_rt.c,
2133 		 * we are fine with this.
2134 		 */
2135 		if (this_rq->dl.dl_nr_running &&
2136 		    dl_time_before(this_rq->dl.earliest_dl.curr,
2137 				   src_rq->dl.earliest_dl.next))
2138 			continue;
2139 
2140 		/* Might drop this_rq->lock */
2141 		double_lock_balance(this_rq, src_rq);
2142 
2143 		/*
2144 		 * If there are no more pullable tasks on the
2145 		 * rq, we're done with it.
2146 		 */
2147 		if (src_rq->dl.dl_nr_running <= 1)
2148 			goto skip;
2149 
2150 		p = pick_earliest_pushable_dl_task(src_rq, this_cpu);
2151 
2152 		/*
2153 		 * We found a task to be pulled if:
2154 		 *  - it preempts our current (if there's one),
2155 		 *  - it will preempt the last one we pulled (if any).
2156 		 */
2157 		if (p && dl_time_before(p->dl.deadline, dmin) &&
2158 		    (!this_rq->dl.dl_nr_running ||
2159 		     dl_time_before(p->dl.deadline,
2160 				    this_rq->dl.earliest_dl.curr))) {
2161 			WARN_ON(p == src_rq->curr);
2162 			WARN_ON(!task_on_rq_queued(p));
2163 
2164 			/*
2165 			 * Then we pull iff p has actually an earlier
2166 			 * deadline than the current task of its runqueue.
2167 			 */
2168 			if (dl_time_before(p->dl.deadline,
2169 					   src_rq->curr->dl.deadline))
2170 				goto skip;
2171 
2172 			resched = true;
2173 
2174 			deactivate_task(src_rq, p, 0);
2175 			sub_running_bw(&p->dl, &src_rq->dl);
2176 			sub_rq_bw(&p->dl, &src_rq->dl);
2177 			set_task_cpu(p, this_cpu);
2178 			add_rq_bw(&p->dl, &this_rq->dl);
2179 			add_running_bw(&p->dl, &this_rq->dl);
2180 			activate_task(this_rq, p, 0);
2181 			dmin = p->dl.deadline;
2182 
2183 			/* Is there any other task even earlier? */
2184 		}
2185 skip:
2186 		double_unlock_balance(this_rq, src_rq);
2187 	}
2188 
2189 	if (resched)
2190 		resched_curr(this_rq);
2191 }
2192 
2193 /*
2194  * Since the task is not running and a reschedule is not going to happen
2195  * anytime soon on its runqueue, we try pushing it away now.
2196  */
2197 static void task_woken_dl(struct rq *rq, struct task_struct *p)
2198 {
2199 	if (!task_running(rq, p) &&
2200 	    !test_tsk_need_resched(rq->curr) &&
2201 	    p->nr_cpus_allowed > 1 &&
2202 	    dl_task(rq->curr) &&
2203 	    (rq->curr->nr_cpus_allowed < 2 ||
2204 	     !dl_entity_preempt(&p->dl, &rq->curr->dl))) {
2205 		push_dl_tasks(rq);
2206 	}
2207 }
2208 
2209 static void set_cpus_allowed_dl(struct task_struct *p,
2210 				const struct cpumask *new_mask)
2211 {
2212 	struct root_domain *src_rd;
2213 	struct rq *rq;
2214 
2215 	BUG_ON(!dl_task(p));
2216 
2217 	rq = task_rq(p);
2218 	src_rd = rq->rd;
2219 	/*
2220 	 * Migrating a SCHED_DEADLINE task between exclusive
2221 	 * cpusets (different root_domains) entails a bandwidth
2222 	 * update. We already made space for us in the destination
2223 	 * domain (see cpuset_can_attach()).
2224 	 */
2225 	if (!cpumask_intersects(src_rd->span, new_mask)) {
2226 		struct dl_bw *src_dl_b;
2227 
2228 		src_dl_b = dl_bw_of(cpu_of(rq));
2229 		/*
2230 		 * We now free resources of the root_domain we are migrating
2231 		 * off. In the worst case, sched_setattr() may temporary fail
2232 		 * until we complete the update.
2233 		 */
2234 		raw_spin_lock(&src_dl_b->lock);
2235 		__dl_sub(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
2236 		raw_spin_unlock(&src_dl_b->lock);
2237 	}
2238 
2239 	set_cpus_allowed_common(p, new_mask);
2240 }
2241 
2242 /* Assumes rq->lock is held */
2243 static void rq_online_dl(struct rq *rq)
2244 {
2245 	if (rq->dl.overloaded)
2246 		dl_set_overload(rq);
2247 
2248 	cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu);
2249 	if (rq->dl.dl_nr_running > 0)
2250 		cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr);
2251 }
2252 
2253 /* Assumes rq->lock is held */
2254 static void rq_offline_dl(struct rq *rq)
2255 {
2256 	if (rq->dl.overloaded)
2257 		dl_clear_overload(rq);
2258 
2259 	cpudl_clear(&rq->rd->cpudl, rq->cpu);
2260 	cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu);
2261 }
2262 
2263 void __init init_sched_dl_class(void)
2264 {
2265 	unsigned int i;
2266 
2267 	for_each_possible_cpu(i)
2268 		zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i),
2269 					GFP_KERNEL, cpu_to_node(i));
2270 }
2271 
2272 #endif /* CONFIG_SMP */
2273 
2274 static void switched_from_dl(struct rq *rq, struct task_struct *p)
2275 {
2276 	/*
2277 	 * task_non_contending() can start the "inactive timer" (if the 0-lag
2278 	 * time is in the future). If the task switches back to dl before
2279 	 * the "inactive timer" fires, it can continue to consume its current
2280 	 * runtime using its current deadline. If it stays outside of
2281 	 * SCHED_DEADLINE until the 0-lag time passes, inactive_task_timer()
2282 	 * will reset the task parameters.
2283 	 */
2284 	if (task_on_rq_queued(p) && p->dl.dl_runtime)
2285 		task_non_contending(p);
2286 
2287 	if (!task_on_rq_queued(p))
2288 		sub_rq_bw(&p->dl, &rq->dl);
2289 
2290 	/*
2291 	 * We cannot use inactive_task_timer() to invoke sub_running_bw()
2292 	 * at the 0-lag time, because the task could have been migrated
2293 	 * while SCHED_OTHER in the meanwhile.
2294 	 */
2295 	if (p->dl.dl_non_contending)
2296 		p->dl.dl_non_contending = 0;
2297 
2298 	/*
2299 	 * Since this might be the only -deadline task on the rq,
2300 	 * this is the right place to try to pull some other one
2301 	 * from an overloaded cpu, if any.
2302 	 */
2303 	if (!task_on_rq_queued(p) || rq->dl.dl_nr_running)
2304 		return;
2305 
2306 	queue_pull_task(rq);
2307 }
2308 
2309 /*
2310  * When switching to -deadline, we may overload the rq, then
2311  * we try to push someone off, if possible.
2312  */
2313 static void switched_to_dl(struct rq *rq, struct task_struct *p)
2314 {
2315 	if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
2316 		put_task_struct(p);
2317 
2318 	/* If p is not queued we will update its parameters at next wakeup. */
2319 	if (!task_on_rq_queued(p)) {
2320 		add_rq_bw(&p->dl, &rq->dl);
2321 
2322 		return;
2323 	}
2324 
2325 	if (rq->curr != p) {
2326 #ifdef CONFIG_SMP
2327 		if (p->nr_cpus_allowed > 1 && rq->dl.overloaded)
2328 			queue_push_tasks(rq);
2329 #endif
2330 		if (dl_task(rq->curr))
2331 			check_preempt_curr_dl(rq, p, 0);
2332 		else
2333 			resched_curr(rq);
2334 	}
2335 }
2336 
2337 /*
2338  * If the scheduling parameters of a -deadline task changed,
2339  * a push or pull operation might be needed.
2340  */
2341 static void prio_changed_dl(struct rq *rq, struct task_struct *p,
2342 			    int oldprio)
2343 {
2344 	if (task_on_rq_queued(p) || rq->curr == p) {
2345 #ifdef CONFIG_SMP
2346 		/*
2347 		 * This might be too much, but unfortunately
2348 		 * we don't have the old deadline value, and
2349 		 * we can't argue if the task is increasing
2350 		 * or lowering its prio, so...
2351 		 */
2352 		if (!rq->dl.overloaded)
2353 			queue_pull_task(rq);
2354 
2355 		/*
2356 		 * If we now have a earlier deadline task than p,
2357 		 * then reschedule, provided p is still on this
2358 		 * runqueue.
2359 		 */
2360 		if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline))
2361 			resched_curr(rq);
2362 #else
2363 		/*
2364 		 * Again, we don't know if p has a earlier
2365 		 * or later deadline, so let's blindly set a
2366 		 * (maybe not needed) rescheduling point.
2367 		 */
2368 		resched_curr(rq);
2369 #endif /* CONFIG_SMP */
2370 	}
2371 }
2372 
2373 const struct sched_class dl_sched_class = {
2374 	.next			= &rt_sched_class,
2375 	.enqueue_task		= enqueue_task_dl,
2376 	.dequeue_task		= dequeue_task_dl,
2377 	.yield_task		= yield_task_dl,
2378 
2379 	.check_preempt_curr	= check_preempt_curr_dl,
2380 
2381 	.pick_next_task		= pick_next_task_dl,
2382 	.put_prev_task		= put_prev_task_dl,
2383 
2384 #ifdef CONFIG_SMP
2385 	.select_task_rq		= select_task_rq_dl,
2386 	.migrate_task_rq	= migrate_task_rq_dl,
2387 	.set_cpus_allowed       = set_cpus_allowed_dl,
2388 	.rq_online              = rq_online_dl,
2389 	.rq_offline             = rq_offline_dl,
2390 	.task_woken		= task_woken_dl,
2391 #endif
2392 
2393 	.set_curr_task		= set_curr_task_dl,
2394 	.task_tick		= task_tick_dl,
2395 	.task_fork              = task_fork_dl,
2396 
2397 	.prio_changed           = prio_changed_dl,
2398 	.switched_from		= switched_from_dl,
2399 	.switched_to		= switched_to_dl,
2400 
2401 	.update_curr		= update_curr_dl,
2402 };
2403 
2404 int sched_dl_global_validate(void)
2405 {
2406 	u64 runtime = global_rt_runtime();
2407 	u64 period = global_rt_period();
2408 	u64 new_bw = to_ratio(period, runtime);
2409 	struct dl_bw *dl_b;
2410 	int cpu, ret = 0;
2411 	unsigned long flags;
2412 
2413 	/*
2414 	 * Here we want to check the bandwidth not being set to some
2415 	 * value smaller than the currently allocated bandwidth in
2416 	 * any of the root_domains.
2417 	 *
2418 	 * FIXME: Cycling on all the CPUs is overdoing, but simpler than
2419 	 * cycling on root_domains... Discussion on different/better
2420 	 * solutions is welcome!
2421 	 */
2422 	for_each_possible_cpu(cpu) {
2423 		rcu_read_lock_sched();
2424 		dl_b = dl_bw_of(cpu);
2425 
2426 		raw_spin_lock_irqsave(&dl_b->lock, flags);
2427 		if (new_bw < dl_b->total_bw)
2428 			ret = -EBUSY;
2429 		raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2430 
2431 		rcu_read_unlock_sched();
2432 
2433 		if (ret)
2434 			break;
2435 	}
2436 
2437 	return ret;
2438 }
2439 
2440 void init_dl_rq_bw_ratio(struct dl_rq *dl_rq)
2441 {
2442 	if (global_rt_runtime() == RUNTIME_INF) {
2443 		dl_rq->bw_ratio = 1 << RATIO_SHIFT;
2444 		dl_rq->extra_bw = 1 << BW_SHIFT;
2445 	} else {
2446 		dl_rq->bw_ratio = to_ratio(global_rt_runtime(),
2447 			  global_rt_period()) >> (BW_SHIFT - RATIO_SHIFT);
2448 		dl_rq->extra_bw = to_ratio(global_rt_period(),
2449 						    global_rt_runtime());
2450 	}
2451 }
2452 
2453 void sched_dl_do_global(void)
2454 {
2455 	u64 new_bw = -1;
2456 	struct dl_bw *dl_b;
2457 	int cpu;
2458 	unsigned long flags;
2459 
2460 	def_dl_bandwidth.dl_period = global_rt_period();
2461 	def_dl_bandwidth.dl_runtime = global_rt_runtime();
2462 
2463 	if (global_rt_runtime() != RUNTIME_INF)
2464 		new_bw = to_ratio(global_rt_period(), global_rt_runtime());
2465 
2466 	/*
2467 	 * FIXME: As above...
2468 	 */
2469 	for_each_possible_cpu(cpu) {
2470 		rcu_read_lock_sched();
2471 		dl_b = dl_bw_of(cpu);
2472 
2473 		raw_spin_lock_irqsave(&dl_b->lock, flags);
2474 		dl_b->bw = new_bw;
2475 		raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2476 
2477 		rcu_read_unlock_sched();
2478 		init_dl_rq_bw_ratio(&cpu_rq(cpu)->dl);
2479 	}
2480 }
2481 
2482 /*
2483  * We must be sure that accepting a new task (or allowing changing the
2484  * parameters of an existing one) is consistent with the bandwidth
2485  * constraints. If yes, this function also accordingly updates the currently
2486  * allocated bandwidth to reflect the new situation.
2487  *
2488  * This function is called while holding p's rq->lock.
2489  */
2490 int sched_dl_overflow(struct task_struct *p, int policy,
2491 		      const struct sched_attr *attr)
2492 {
2493 	struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
2494 	u64 period = attr->sched_period ?: attr->sched_deadline;
2495 	u64 runtime = attr->sched_runtime;
2496 	u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0;
2497 	int cpus, err = -1;
2498 
2499 	if (attr->sched_flags & SCHED_FLAG_SUGOV)
2500 		return 0;
2501 
2502 	/* !deadline task may carry old deadline bandwidth */
2503 	if (new_bw == p->dl.dl_bw && task_has_dl_policy(p))
2504 		return 0;
2505 
2506 	/*
2507 	 * Either if a task, enters, leave, or stays -deadline but changes
2508 	 * its parameters, we may need to update accordingly the total
2509 	 * allocated bandwidth of the container.
2510 	 */
2511 	raw_spin_lock(&dl_b->lock);
2512 	cpus = dl_bw_cpus(task_cpu(p));
2513 	if (dl_policy(policy) && !task_has_dl_policy(p) &&
2514 	    !__dl_overflow(dl_b, cpus, 0, new_bw)) {
2515 		if (hrtimer_active(&p->dl.inactive_timer))
2516 			__dl_sub(dl_b, p->dl.dl_bw, cpus);
2517 		__dl_add(dl_b, new_bw, cpus);
2518 		err = 0;
2519 	} else if (dl_policy(policy) && task_has_dl_policy(p) &&
2520 		   !__dl_overflow(dl_b, cpus, p->dl.dl_bw, new_bw)) {
2521 		/*
2522 		 * XXX this is slightly incorrect: when the task
2523 		 * utilization decreases, we should delay the total
2524 		 * utilization change until the task's 0-lag point.
2525 		 * But this would require to set the task's "inactive
2526 		 * timer" when the task is not inactive.
2527 		 */
2528 		__dl_sub(dl_b, p->dl.dl_bw, cpus);
2529 		__dl_add(dl_b, new_bw, cpus);
2530 		dl_change_utilization(p, new_bw);
2531 		err = 0;
2532 	} else if (!dl_policy(policy) && task_has_dl_policy(p)) {
2533 		/*
2534 		 * Do not decrease the total deadline utilization here,
2535 		 * switched_from_dl() will take care to do it at the correct
2536 		 * (0-lag) time.
2537 		 */
2538 		err = 0;
2539 	}
2540 	raw_spin_unlock(&dl_b->lock);
2541 
2542 	return err;
2543 }
2544 
2545 /*
2546  * This function initializes the sched_dl_entity of a newly becoming
2547  * SCHED_DEADLINE task.
2548  *
2549  * Only the static values are considered here, the actual runtime and the
2550  * absolute deadline will be properly calculated when the task is enqueued
2551  * for the first time with its new policy.
2552  */
2553 void __setparam_dl(struct task_struct *p, const struct sched_attr *attr)
2554 {
2555 	struct sched_dl_entity *dl_se = &p->dl;
2556 
2557 	dl_se->dl_runtime = attr->sched_runtime;
2558 	dl_se->dl_deadline = attr->sched_deadline;
2559 	dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
2560 	dl_se->flags = attr->sched_flags;
2561 	dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
2562 	dl_se->dl_density = to_ratio(dl_se->dl_deadline, dl_se->dl_runtime);
2563 }
2564 
2565 void __getparam_dl(struct task_struct *p, struct sched_attr *attr)
2566 {
2567 	struct sched_dl_entity *dl_se = &p->dl;
2568 
2569 	attr->sched_priority = p->rt_priority;
2570 	attr->sched_runtime = dl_se->dl_runtime;
2571 	attr->sched_deadline = dl_se->dl_deadline;
2572 	attr->sched_period = dl_se->dl_period;
2573 	attr->sched_flags = dl_se->flags;
2574 }
2575 
2576 /*
2577  * This function validates the new parameters of a -deadline task.
2578  * We ask for the deadline not being zero, and greater or equal
2579  * than the runtime, as well as the period of being zero or
2580  * greater than deadline. Furthermore, we have to be sure that
2581  * user parameters are above the internal resolution of 1us (we
2582  * check sched_runtime only since it is always the smaller one) and
2583  * below 2^63 ns (we have to check both sched_deadline and
2584  * sched_period, as the latter can be zero).
2585  */
2586 bool __checkparam_dl(const struct sched_attr *attr)
2587 {
2588 	/* special dl tasks don't actually use any parameter */
2589 	if (attr->sched_flags & SCHED_FLAG_SUGOV)
2590 		return true;
2591 
2592 	/* deadline != 0 */
2593 	if (attr->sched_deadline == 0)
2594 		return false;
2595 
2596 	/*
2597 	 * Since we truncate DL_SCALE bits, make sure we're at least
2598 	 * that big.
2599 	 */
2600 	if (attr->sched_runtime < (1ULL << DL_SCALE))
2601 		return false;
2602 
2603 	/*
2604 	 * Since we use the MSB for wrap-around and sign issues, make
2605 	 * sure it's not set (mind that period can be equal to zero).
2606 	 */
2607 	if (attr->sched_deadline & (1ULL << 63) ||
2608 	    attr->sched_period & (1ULL << 63))
2609 		return false;
2610 
2611 	/* runtime <= deadline <= period (if period != 0) */
2612 	if ((attr->sched_period != 0 &&
2613 	     attr->sched_period < attr->sched_deadline) ||
2614 	    attr->sched_deadline < attr->sched_runtime)
2615 		return false;
2616 
2617 	return true;
2618 }
2619 
2620 /*
2621  * This function clears the sched_dl_entity static params.
2622  */
2623 void __dl_clear_params(struct task_struct *p)
2624 {
2625 	struct sched_dl_entity *dl_se = &p->dl;
2626 
2627 	dl_se->dl_runtime = 0;
2628 	dl_se->dl_deadline = 0;
2629 	dl_se->dl_period = 0;
2630 	dl_se->flags = 0;
2631 	dl_se->dl_bw = 0;
2632 	dl_se->dl_density = 0;
2633 
2634 	dl_se->dl_throttled = 0;
2635 	dl_se->dl_yielded = 0;
2636 	dl_se->dl_non_contending = 0;
2637 	dl_se->dl_overrun = 0;
2638 }
2639 
2640 bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr)
2641 {
2642 	struct sched_dl_entity *dl_se = &p->dl;
2643 
2644 	if (dl_se->dl_runtime != attr->sched_runtime ||
2645 	    dl_se->dl_deadline != attr->sched_deadline ||
2646 	    dl_se->dl_period != attr->sched_period ||
2647 	    dl_se->flags != attr->sched_flags)
2648 		return true;
2649 
2650 	return false;
2651 }
2652 
2653 #ifdef CONFIG_SMP
2654 int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed)
2655 {
2656 	unsigned int dest_cpu = cpumask_any_and(cpu_active_mask,
2657 							cs_cpus_allowed);
2658 	struct dl_bw *dl_b;
2659 	bool overflow;
2660 	int cpus, ret;
2661 	unsigned long flags;
2662 
2663 	rcu_read_lock_sched();
2664 	dl_b = dl_bw_of(dest_cpu);
2665 	raw_spin_lock_irqsave(&dl_b->lock, flags);
2666 	cpus = dl_bw_cpus(dest_cpu);
2667 	overflow = __dl_overflow(dl_b, cpus, 0, p->dl.dl_bw);
2668 	if (overflow)
2669 		ret = -EBUSY;
2670 	else {
2671 		/*
2672 		 * We reserve space for this task in the destination
2673 		 * root_domain, as we can't fail after this point.
2674 		 * We will free resources in the source root_domain
2675 		 * later on (see set_cpus_allowed_dl()).
2676 		 */
2677 		__dl_add(dl_b, p->dl.dl_bw, cpus);
2678 		ret = 0;
2679 	}
2680 	raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2681 	rcu_read_unlock_sched();
2682 	return ret;
2683 }
2684 
2685 int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur,
2686 				 const struct cpumask *trial)
2687 {
2688 	int ret = 1, trial_cpus;
2689 	struct dl_bw *cur_dl_b;
2690 	unsigned long flags;
2691 
2692 	rcu_read_lock_sched();
2693 	cur_dl_b = dl_bw_of(cpumask_any(cur));
2694 	trial_cpus = cpumask_weight(trial);
2695 
2696 	raw_spin_lock_irqsave(&cur_dl_b->lock, flags);
2697 	if (cur_dl_b->bw != -1 &&
2698 	    cur_dl_b->bw * trial_cpus < cur_dl_b->total_bw)
2699 		ret = 0;
2700 	raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags);
2701 	rcu_read_unlock_sched();
2702 	return ret;
2703 }
2704 
2705 bool dl_cpu_busy(unsigned int cpu)
2706 {
2707 	unsigned long flags;
2708 	struct dl_bw *dl_b;
2709 	bool overflow;
2710 	int cpus;
2711 
2712 	rcu_read_lock_sched();
2713 	dl_b = dl_bw_of(cpu);
2714 	raw_spin_lock_irqsave(&dl_b->lock, flags);
2715 	cpus = dl_bw_cpus(cpu);
2716 	overflow = __dl_overflow(dl_b, cpus, 0, 0);
2717 	raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2718 	rcu_read_unlock_sched();
2719 	return overflow;
2720 }
2721 #endif
2722 
2723 #ifdef CONFIG_SCHED_DEBUG
2724 extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq);
2725 
2726 void print_dl_stats(struct seq_file *m, int cpu)
2727 {
2728 	print_dl_rq(m, cpu, &cpu_rq(cpu)->dl);
2729 }
2730 #endif /* CONFIG_SCHED_DEBUG */
2731