1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Deadline Scheduling Class (SCHED_DEADLINE)
4 *
5 * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS).
6 *
7 * Tasks that periodically executes their instances for less than their
8 * runtime won't miss any of their deadlines.
9 * Tasks that are not periodic or sporadic or that tries to execute more
10 * than their reserved bandwidth will be slowed down (and may potentially
11 * miss some of their deadlines), and won't affect any other task.
12 *
13 * Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>,
14 * Juri Lelli <juri.lelli@gmail.com>,
15 * Michael Trimarchi <michael@amarulasolutions.com>,
16 * Fabio Checconi <fchecconi@gmail.com>
17 */
18
19 #include <linux/cpuset.h>
20
21 /*
22 * Default limits for DL period; on the top end we guard against small util
23 * tasks still getting ridiculously long effective runtimes, on the bottom end we
24 * guard against timer DoS.
25 */
26 static unsigned int sysctl_sched_dl_period_max = 1 << 22; /* ~4 seconds */
27 static unsigned int sysctl_sched_dl_period_min = 100; /* 100 us */
28 #ifdef CONFIG_SYSCTL
29 static struct ctl_table sched_dl_sysctls[] = {
30 {
31 .procname = "sched_deadline_period_max_us",
32 .data = &sysctl_sched_dl_period_max,
33 .maxlen = sizeof(unsigned int),
34 .mode = 0644,
35 .proc_handler = proc_douintvec_minmax,
36 .extra1 = (void *)&sysctl_sched_dl_period_min,
37 },
38 {
39 .procname = "sched_deadline_period_min_us",
40 .data = &sysctl_sched_dl_period_min,
41 .maxlen = sizeof(unsigned int),
42 .mode = 0644,
43 .proc_handler = proc_douintvec_minmax,
44 .extra2 = (void *)&sysctl_sched_dl_period_max,
45 },
46 {}
47 };
48
sched_dl_sysctl_init(void)49 static int __init sched_dl_sysctl_init(void)
50 {
51 register_sysctl_init("kernel", sched_dl_sysctls);
52 return 0;
53 }
54 late_initcall(sched_dl_sysctl_init);
55 #endif
56
dl_task_of(struct sched_dl_entity * dl_se)57 static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
58 {
59 return container_of(dl_se, struct task_struct, dl);
60 }
61
rq_of_dl_rq(struct dl_rq * dl_rq)62 static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq)
63 {
64 return container_of(dl_rq, struct rq, dl);
65 }
66
dl_rq_of_se(struct sched_dl_entity * dl_se)67 static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se)
68 {
69 struct task_struct *p = dl_task_of(dl_se);
70 struct rq *rq = task_rq(p);
71
72 return &rq->dl;
73 }
74
on_dl_rq(struct sched_dl_entity * dl_se)75 static inline int on_dl_rq(struct sched_dl_entity *dl_se)
76 {
77 return !RB_EMPTY_NODE(&dl_se->rb_node);
78 }
79
80 #ifdef CONFIG_RT_MUTEXES
pi_of(struct sched_dl_entity * dl_se)81 static inline struct sched_dl_entity *pi_of(struct sched_dl_entity *dl_se)
82 {
83 return dl_se->pi_se;
84 }
85
is_dl_boosted(struct sched_dl_entity * dl_se)86 static inline bool is_dl_boosted(struct sched_dl_entity *dl_se)
87 {
88 return pi_of(dl_se) != dl_se;
89 }
90 #else
pi_of(struct sched_dl_entity * dl_se)91 static inline struct sched_dl_entity *pi_of(struct sched_dl_entity *dl_se)
92 {
93 return dl_se;
94 }
95
is_dl_boosted(struct sched_dl_entity * dl_se)96 static inline bool is_dl_boosted(struct sched_dl_entity *dl_se)
97 {
98 return false;
99 }
100 #endif
101
102 #ifdef CONFIG_SMP
dl_bw_of(int i)103 static inline struct dl_bw *dl_bw_of(int i)
104 {
105 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
106 "sched RCU must be held");
107 return &cpu_rq(i)->rd->dl_bw;
108 }
109
dl_bw_cpus(int i)110 static inline int dl_bw_cpus(int i)
111 {
112 struct root_domain *rd = cpu_rq(i)->rd;
113 int cpus;
114
115 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
116 "sched RCU must be held");
117
118 if (cpumask_subset(rd->span, cpu_active_mask))
119 return cpumask_weight(rd->span);
120
121 cpus = 0;
122
123 for_each_cpu_and(i, rd->span, cpu_active_mask)
124 cpus++;
125
126 return cpus;
127 }
128
__dl_bw_capacity(const struct cpumask * mask)129 static inline unsigned long __dl_bw_capacity(const struct cpumask *mask)
130 {
131 unsigned long cap = 0;
132 int i;
133
134 for_each_cpu_and(i, mask, cpu_active_mask)
135 cap += capacity_orig_of(i);
136
137 return cap;
138 }
139
140 /*
141 * XXX Fix: If 'rq->rd == def_root_domain' perform AC against capacity
142 * of the CPU the task is running on rather rd's \Sum CPU capacity.
143 */
dl_bw_capacity(int i)144 static inline unsigned long dl_bw_capacity(int i)
145 {
146 if (!sched_asym_cpucap_active() &&
147 capacity_orig_of(i) == SCHED_CAPACITY_SCALE) {
148 return dl_bw_cpus(i) << SCHED_CAPACITY_SHIFT;
149 } else {
150 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
151 "sched RCU must be held");
152
153 return __dl_bw_capacity(cpu_rq(i)->rd->span);
154 }
155 }
156
dl_bw_visited(int cpu,u64 gen)157 static inline bool dl_bw_visited(int cpu, u64 gen)
158 {
159 struct root_domain *rd = cpu_rq(cpu)->rd;
160
161 if (rd->visit_gen == gen)
162 return true;
163
164 rd->visit_gen = gen;
165 return false;
166 }
167
168 static inline
__dl_update(struct dl_bw * dl_b,s64 bw)169 void __dl_update(struct dl_bw *dl_b, s64 bw)
170 {
171 struct root_domain *rd = container_of(dl_b, struct root_domain, dl_bw);
172 int i;
173
174 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
175 "sched RCU must be held");
176 for_each_cpu_and(i, rd->span, cpu_active_mask) {
177 struct rq *rq = cpu_rq(i);
178
179 rq->dl.extra_bw += bw;
180 }
181 }
182 #else
dl_bw_of(int i)183 static inline struct dl_bw *dl_bw_of(int i)
184 {
185 return &cpu_rq(i)->dl.dl_bw;
186 }
187
dl_bw_cpus(int i)188 static inline int dl_bw_cpus(int i)
189 {
190 return 1;
191 }
192
dl_bw_capacity(int i)193 static inline unsigned long dl_bw_capacity(int i)
194 {
195 return SCHED_CAPACITY_SCALE;
196 }
197
dl_bw_visited(int cpu,u64 gen)198 static inline bool dl_bw_visited(int cpu, u64 gen)
199 {
200 return false;
201 }
202
203 static inline
__dl_update(struct dl_bw * dl_b,s64 bw)204 void __dl_update(struct dl_bw *dl_b, s64 bw)
205 {
206 struct dl_rq *dl = container_of(dl_b, struct dl_rq, dl_bw);
207
208 dl->extra_bw += bw;
209 }
210 #endif
211
212 static inline
__dl_sub(struct dl_bw * dl_b,u64 tsk_bw,int cpus)213 void __dl_sub(struct dl_bw *dl_b, u64 tsk_bw, int cpus)
214 {
215 dl_b->total_bw -= tsk_bw;
216 __dl_update(dl_b, (s32)tsk_bw / cpus);
217 }
218
219 static inline
__dl_add(struct dl_bw * dl_b,u64 tsk_bw,int cpus)220 void __dl_add(struct dl_bw *dl_b, u64 tsk_bw, int cpus)
221 {
222 dl_b->total_bw += tsk_bw;
223 __dl_update(dl_b, -((s32)tsk_bw / cpus));
224 }
225
226 static inline bool
__dl_overflow(struct dl_bw * dl_b,unsigned long cap,u64 old_bw,u64 new_bw)227 __dl_overflow(struct dl_bw *dl_b, unsigned long cap, u64 old_bw, u64 new_bw)
228 {
229 return dl_b->bw != -1 &&
230 cap_scale(dl_b->bw, cap) < dl_b->total_bw - old_bw + new_bw;
231 }
232
233 static inline
__add_running_bw(u64 dl_bw,struct dl_rq * dl_rq)234 void __add_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
235 {
236 u64 old = dl_rq->running_bw;
237
238 lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
239 dl_rq->running_bw += dl_bw;
240 SCHED_WARN_ON(dl_rq->running_bw < old); /* overflow */
241 SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
242 /* kick cpufreq (see the comment in kernel/sched/sched.h). */
243 cpufreq_update_util(rq_of_dl_rq(dl_rq), 0);
244 }
245
246 static inline
__sub_running_bw(u64 dl_bw,struct dl_rq * dl_rq)247 void __sub_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
248 {
249 u64 old = dl_rq->running_bw;
250
251 lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
252 dl_rq->running_bw -= dl_bw;
253 SCHED_WARN_ON(dl_rq->running_bw > old); /* underflow */
254 if (dl_rq->running_bw > old)
255 dl_rq->running_bw = 0;
256 /* kick cpufreq (see the comment in kernel/sched/sched.h). */
257 cpufreq_update_util(rq_of_dl_rq(dl_rq), 0);
258 }
259
260 static inline
__add_rq_bw(u64 dl_bw,struct dl_rq * dl_rq)261 void __add_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
262 {
263 u64 old = dl_rq->this_bw;
264
265 lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
266 dl_rq->this_bw += dl_bw;
267 SCHED_WARN_ON(dl_rq->this_bw < old); /* overflow */
268 }
269
270 static inline
__sub_rq_bw(u64 dl_bw,struct dl_rq * dl_rq)271 void __sub_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
272 {
273 u64 old = dl_rq->this_bw;
274
275 lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
276 dl_rq->this_bw -= dl_bw;
277 SCHED_WARN_ON(dl_rq->this_bw > old); /* underflow */
278 if (dl_rq->this_bw > old)
279 dl_rq->this_bw = 0;
280 SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
281 }
282
283 static inline
add_rq_bw(struct sched_dl_entity * dl_se,struct dl_rq * dl_rq)284 void add_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
285 {
286 if (!dl_entity_is_special(dl_se))
287 __add_rq_bw(dl_se->dl_bw, dl_rq);
288 }
289
290 static inline
sub_rq_bw(struct sched_dl_entity * dl_se,struct dl_rq * dl_rq)291 void sub_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
292 {
293 if (!dl_entity_is_special(dl_se))
294 __sub_rq_bw(dl_se->dl_bw, dl_rq);
295 }
296
297 static inline
add_running_bw(struct sched_dl_entity * dl_se,struct dl_rq * dl_rq)298 void add_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
299 {
300 if (!dl_entity_is_special(dl_se))
301 __add_running_bw(dl_se->dl_bw, dl_rq);
302 }
303
304 static inline
sub_running_bw(struct sched_dl_entity * dl_se,struct dl_rq * dl_rq)305 void sub_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
306 {
307 if (!dl_entity_is_special(dl_se))
308 __sub_running_bw(dl_se->dl_bw, dl_rq);
309 }
310
dl_change_utilization(struct task_struct * p,u64 new_bw)311 static void dl_change_utilization(struct task_struct *p, u64 new_bw)
312 {
313 struct rq *rq;
314
315 WARN_ON_ONCE(p->dl.flags & SCHED_FLAG_SUGOV);
316
317 if (task_on_rq_queued(p))
318 return;
319
320 rq = task_rq(p);
321 if (p->dl.dl_non_contending) {
322 sub_running_bw(&p->dl, &rq->dl);
323 p->dl.dl_non_contending = 0;
324 /*
325 * If the timer handler is currently running and the
326 * timer cannot be canceled, inactive_task_timer()
327 * will see that dl_not_contending is not set, and
328 * will not touch the rq's active utilization,
329 * so we are still safe.
330 */
331 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
332 put_task_struct(p);
333 }
334 __sub_rq_bw(p->dl.dl_bw, &rq->dl);
335 __add_rq_bw(new_bw, &rq->dl);
336 }
337
338 static void __dl_clear_params(struct sched_dl_entity *dl_se);
339
340 /*
341 * The utilization of a task cannot be immediately removed from
342 * the rq active utilization (running_bw) when the task blocks.
343 * Instead, we have to wait for the so called "0-lag time".
344 *
345 * If a task blocks before the "0-lag time", a timer (the inactive
346 * timer) is armed, and running_bw is decreased when the timer
347 * fires.
348 *
349 * If the task wakes up again before the inactive timer fires,
350 * the timer is canceled, whereas if the task wakes up after the
351 * inactive timer fired (and running_bw has been decreased) the
352 * task's utilization has to be added to running_bw again.
353 * A flag in the deadline scheduling entity (dl_non_contending)
354 * is used to avoid race conditions between the inactive timer handler
355 * and task wakeups.
356 *
357 * The following diagram shows how running_bw is updated. A task is
358 * "ACTIVE" when its utilization contributes to running_bw; an
359 * "ACTIVE contending" task is in the TASK_RUNNING state, while an
360 * "ACTIVE non contending" task is a blocked task for which the "0-lag time"
361 * has not passed yet. An "INACTIVE" task is a task for which the "0-lag"
362 * time already passed, which does not contribute to running_bw anymore.
363 * +------------------+
364 * wakeup | ACTIVE |
365 * +------------------>+ contending |
366 * | add_running_bw | |
367 * | +----+------+------+
368 * | | ^
369 * | dequeue | |
370 * +--------+-------+ | |
371 * | | t >= 0-lag | | wakeup
372 * | INACTIVE |<---------------+ |
373 * | | sub_running_bw | |
374 * +--------+-------+ | |
375 * ^ | |
376 * | t < 0-lag | |
377 * | | |
378 * | V |
379 * | +----+------+------+
380 * | sub_running_bw | ACTIVE |
381 * +-------------------+ |
382 * inactive timer | non contending |
383 * fired +------------------+
384 *
385 * The task_non_contending() function is invoked when a task
386 * blocks, and checks if the 0-lag time already passed or
387 * not (in the first case, it directly updates running_bw;
388 * in the second case, it arms the inactive timer).
389 *
390 * The task_contending() function is invoked when a task wakes
391 * up, and checks if the task is still in the "ACTIVE non contending"
392 * state or not (in the second case, it updates running_bw).
393 */
task_non_contending(struct sched_dl_entity * dl_se)394 static void task_non_contending(struct sched_dl_entity *dl_se)
395 {
396 struct hrtimer *timer = &dl_se->inactive_timer;
397 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
398 struct rq *rq = rq_of_dl_rq(dl_rq);
399 struct task_struct *p = dl_task_of(dl_se);
400 s64 zerolag_time;
401
402 /*
403 * If this is a non-deadline task that has been boosted,
404 * do nothing
405 */
406 if (dl_se->dl_runtime == 0)
407 return;
408
409 if (dl_entity_is_special(dl_se))
410 return;
411
412 WARN_ON(dl_se->dl_non_contending);
413
414 zerolag_time = dl_se->deadline -
415 div64_long((dl_se->runtime * dl_se->dl_period),
416 dl_se->dl_runtime);
417
418 /*
419 * Using relative times instead of the absolute "0-lag time"
420 * allows to simplify the code
421 */
422 zerolag_time -= rq_clock(rq);
423
424 /*
425 * If the "0-lag time" already passed, decrease the active
426 * utilization now, instead of starting a timer
427 */
428 if ((zerolag_time < 0) || hrtimer_active(&dl_se->inactive_timer)) {
429 if (dl_task(p))
430 sub_running_bw(dl_se, dl_rq);
431
432 if (!dl_task(p) || READ_ONCE(p->__state) == TASK_DEAD) {
433 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
434
435 if (READ_ONCE(p->__state) == TASK_DEAD)
436 sub_rq_bw(dl_se, &rq->dl);
437 raw_spin_lock(&dl_b->lock);
438 __dl_sub(dl_b, dl_se->dl_bw, dl_bw_cpus(task_cpu(p)));
439 raw_spin_unlock(&dl_b->lock);
440 __dl_clear_params(dl_se);
441 }
442
443 return;
444 }
445
446 dl_se->dl_non_contending = 1;
447 get_task_struct(p);
448 hrtimer_start(timer, ns_to_ktime(zerolag_time), HRTIMER_MODE_REL_HARD);
449 }
450
task_contending(struct sched_dl_entity * dl_se,int flags)451 static void task_contending(struct sched_dl_entity *dl_se, int flags)
452 {
453 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
454
455 /*
456 * If this is a non-deadline task that has been boosted,
457 * do nothing
458 */
459 if (dl_se->dl_runtime == 0)
460 return;
461
462 if (flags & ENQUEUE_MIGRATED)
463 add_rq_bw(dl_se, dl_rq);
464
465 if (dl_se->dl_non_contending) {
466 dl_se->dl_non_contending = 0;
467 /*
468 * If the timer handler is currently running and the
469 * timer cannot be canceled, inactive_task_timer()
470 * will see that dl_not_contending is not set, and
471 * will not touch the rq's active utilization,
472 * so we are still safe.
473 */
474 if (hrtimer_try_to_cancel(&dl_se->inactive_timer) == 1)
475 put_task_struct(dl_task_of(dl_se));
476 } else {
477 /*
478 * Since "dl_non_contending" is not set, the
479 * task's utilization has already been removed from
480 * active utilization (either when the task blocked,
481 * when the "inactive timer" fired).
482 * So, add it back.
483 */
484 add_running_bw(dl_se, dl_rq);
485 }
486 }
487
is_leftmost(struct task_struct * p,struct dl_rq * dl_rq)488 static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq)
489 {
490 struct sched_dl_entity *dl_se = &p->dl;
491
492 return rb_first_cached(&dl_rq->root) == &dl_se->rb_node;
493 }
494
495 static void init_dl_rq_bw_ratio(struct dl_rq *dl_rq);
496
init_dl_bw(struct dl_bw * dl_b)497 void init_dl_bw(struct dl_bw *dl_b)
498 {
499 raw_spin_lock_init(&dl_b->lock);
500 if (global_rt_runtime() == RUNTIME_INF)
501 dl_b->bw = -1;
502 else
503 dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime());
504 dl_b->total_bw = 0;
505 }
506
init_dl_rq(struct dl_rq * dl_rq)507 void init_dl_rq(struct dl_rq *dl_rq)
508 {
509 dl_rq->root = RB_ROOT_CACHED;
510
511 #ifdef CONFIG_SMP
512 /* zero means no -deadline tasks */
513 dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0;
514
515 dl_rq->dl_nr_migratory = 0;
516 dl_rq->overloaded = 0;
517 dl_rq->pushable_dl_tasks_root = RB_ROOT_CACHED;
518 #else
519 init_dl_bw(&dl_rq->dl_bw);
520 #endif
521
522 dl_rq->running_bw = 0;
523 dl_rq->this_bw = 0;
524 init_dl_rq_bw_ratio(dl_rq);
525 }
526
527 #ifdef CONFIG_SMP
528
dl_overloaded(struct rq * rq)529 static inline int dl_overloaded(struct rq *rq)
530 {
531 return atomic_read(&rq->rd->dlo_count);
532 }
533
dl_set_overload(struct rq * rq)534 static inline void dl_set_overload(struct rq *rq)
535 {
536 if (!rq->online)
537 return;
538
539 cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask);
540 /*
541 * Must be visible before the overload count is
542 * set (as in sched_rt.c).
543 *
544 * Matched by the barrier in pull_dl_task().
545 */
546 smp_wmb();
547 atomic_inc(&rq->rd->dlo_count);
548 }
549
dl_clear_overload(struct rq * rq)550 static inline void dl_clear_overload(struct rq *rq)
551 {
552 if (!rq->online)
553 return;
554
555 atomic_dec(&rq->rd->dlo_count);
556 cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask);
557 }
558
update_dl_migration(struct dl_rq * dl_rq)559 static void update_dl_migration(struct dl_rq *dl_rq)
560 {
561 if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) {
562 if (!dl_rq->overloaded) {
563 dl_set_overload(rq_of_dl_rq(dl_rq));
564 dl_rq->overloaded = 1;
565 }
566 } else if (dl_rq->overloaded) {
567 dl_clear_overload(rq_of_dl_rq(dl_rq));
568 dl_rq->overloaded = 0;
569 }
570 }
571
inc_dl_migration(struct sched_dl_entity * dl_se,struct dl_rq * dl_rq)572 static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
573 {
574 struct task_struct *p = dl_task_of(dl_se);
575
576 if (p->nr_cpus_allowed > 1)
577 dl_rq->dl_nr_migratory++;
578
579 update_dl_migration(dl_rq);
580 }
581
dec_dl_migration(struct sched_dl_entity * dl_se,struct dl_rq * dl_rq)582 static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
583 {
584 struct task_struct *p = dl_task_of(dl_se);
585
586 if (p->nr_cpus_allowed > 1)
587 dl_rq->dl_nr_migratory--;
588
589 update_dl_migration(dl_rq);
590 }
591
592 #define __node_2_pdl(node) \
593 rb_entry((node), struct task_struct, pushable_dl_tasks)
594
__pushable_less(struct rb_node * a,const struct rb_node * b)595 static inline bool __pushable_less(struct rb_node *a, const struct rb_node *b)
596 {
597 return dl_entity_preempt(&__node_2_pdl(a)->dl, &__node_2_pdl(b)->dl);
598 }
599
600 /*
601 * The list of pushable -deadline task is not a plist, like in
602 * sched_rt.c, it is an rb-tree with tasks ordered by deadline.
603 */
enqueue_pushable_dl_task(struct rq * rq,struct task_struct * p)604 static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
605 {
606 struct rb_node *leftmost;
607
608 WARN_ON_ONCE(!RB_EMPTY_NODE(&p->pushable_dl_tasks));
609
610 leftmost = rb_add_cached(&p->pushable_dl_tasks,
611 &rq->dl.pushable_dl_tasks_root,
612 __pushable_less);
613 if (leftmost)
614 rq->dl.earliest_dl.next = p->dl.deadline;
615 }
616
dequeue_pushable_dl_task(struct rq * rq,struct task_struct * p)617 static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
618 {
619 struct dl_rq *dl_rq = &rq->dl;
620 struct rb_root_cached *root = &dl_rq->pushable_dl_tasks_root;
621 struct rb_node *leftmost;
622
623 if (RB_EMPTY_NODE(&p->pushable_dl_tasks))
624 return;
625
626 leftmost = rb_erase_cached(&p->pushable_dl_tasks, root);
627 if (leftmost)
628 dl_rq->earliest_dl.next = __node_2_pdl(leftmost)->dl.deadline;
629
630 RB_CLEAR_NODE(&p->pushable_dl_tasks);
631 }
632
has_pushable_dl_tasks(struct rq * rq)633 static inline int has_pushable_dl_tasks(struct rq *rq)
634 {
635 return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root.rb_root);
636 }
637
638 static int push_dl_task(struct rq *rq);
639
need_pull_dl_task(struct rq * rq,struct task_struct * prev)640 static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
641 {
642 return rq->online && dl_task(prev);
643 }
644
645 static DEFINE_PER_CPU(struct balance_callback, dl_push_head);
646 static DEFINE_PER_CPU(struct balance_callback, dl_pull_head);
647
648 static void push_dl_tasks(struct rq *);
649 static void pull_dl_task(struct rq *);
650
deadline_queue_push_tasks(struct rq * rq)651 static inline void deadline_queue_push_tasks(struct rq *rq)
652 {
653 if (!has_pushable_dl_tasks(rq))
654 return;
655
656 queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks);
657 }
658
deadline_queue_pull_task(struct rq * rq)659 static inline void deadline_queue_pull_task(struct rq *rq)
660 {
661 queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task);
662 }
663
664 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq);
665
dl_task_offline_migration(struct rq * rq,struct task_struct * p)666 static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p)
667 {
668 struct rq *later_rq = NULL;
669 struct dl_bw *dl_b;
670
671 later_rq = find_lock_later_rq(p, rq);
672 if (!later_rq) {
673 int cpu;
674
675 /*
676 * If we cannot preempt any rq, fall back to pick any
677 * online CPU:
678 */
679 cpu = cpumask_any_and(cpu_active_mask, p->cpus_ptr);
680 if (cpu >= nr_cpu_ids) {
681 /*
682 * Failed to find any suitable CPU.
683 * The task will never come back!
684 */
685 WARN_ON_ONCE(dl_bandwidth_enabled());
686
687 /*
688 * If admission control is disabled we
689 * try a little harder to let the task
690 * run.
691 */
692 cpu = cpumask_any(cpu_active_mask);
693 }
694 later_rq = cpu_rq(cpu);
695 double_lock_balance(rq, later_rq);
696 }
697
698 if (p->dl.dl_non_contending || p->dl.dl_throttled) {
699 /*
700 * Inactive timer is armed (or callback is running, but
701 * waiting for us to release rq locks). In any case, when it
702 * will fire (or continue), it will see running_bw of this
703 * task migrated to later_rq (and correctly handle it).
704 */
705 sub_running_bw(&p->dl, &rq->dl);
706 sub_rq_bw(&p->dl, &rq->dl);
707
708 add_rq_bw(&p->dl, &later_rq->dl);
709 add_running_bw(&p->dl, &later_rq->dl);
710 } else {
711 sub_rq_bw(&p->dl, &rq->dl);
712 add_rq_bw(&p->dl, &later_rq->dl);
713 }
714
715 /*
716 * And we finally need to fixup root_domain(s) bandwidth accounting,
717 * since p is still hanging out in the old (now moved to default) root
718 * domain.
719 */
720 dl_b = &rq->rd->dl_bw;
721 raw_spin_lock(&dl_b->lock);
722 __dl_sub(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
723 raw_spin_unlock(&dl_b->lock);
724
725 dl_b = &later_rq->rd->dl_bw;
726 raw_spin_lock(&dl_b->lock);
727 __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(later_rq->rd->span));
728 raw_spin_unlock(&dl_b->lock);
729
730 set_task_cpu(p, later_rq->cpu);
731 double_unlock_balance(later_rq, rq);
732
733 return later_rq;
734 }
735
736 #else
737
738 static inline
enqueue_pushable_dl_task(struct rq * rq,struct task_struct * p)739 void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
740 {
741 }
742
743 static inline
dequeue_pushable_dl_task(struct rq * rq,struct task_struct * p)744 void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
745 {
746 }
747
748 static inline
inc_dl_migration(struct sched_dl_entity * dl_se,struct dl_rq * dl_rq)749 void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
750 {
751 }
752
753 static inline
dec_dl_migration(struct sched_dl_entity * dl_se,struct dl_rq * dl_rq)754 void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
755 {
756 }
757
deadline_queue_push_tasks(struct rq * rq)758 static inline void deadline_queue_push_tasks(struct rq *rq)
759 {
760 }
761
deadline_queue_pull_task(struct rq * rq)762 static inline void deadline_queue_pull_task(struct rq *rq)
763 {
764 }
765 #endif /* CONFIG_SMP */
766
767 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
768 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags);
769 static void wakeup_preempt_dl(struct rq *rq, struct task_struct *p, int flags);
770
replenish_dl_new_period(struct sched_dl_entity * dl_se,struct rq * rq)771 static inline void replenish_dl_new_period(struct sched_dl_entity *dl_se,
772 struct rq *rq)
773 {
774 /* for non-boosted task, pi_of(dl_se) == dl_se */
775 dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline;
776 dl_se->runtime = pi_of(dl_se)->dl_runtime;
777 }
778
779 /*
780 * We are being explicitly informed that a new instance is starting,
781 * and this means that:
782 * - the absolute deadline of the entity has to be placed at
783 * current time + relative deadline;
784 * - the runtime of the entity has to be set to the maximum value.
785 *
786 * The capability of specifying such event is useful whenever a -deadline
787 * entity wants to (try to!) synchronize its behaviour with the scheduler's
788 * one, and to (try to!) reconcile itself with its own scheduling
789 * parameters.
790 */
setup_new_dl_entity(struct sched_dl_entity * dl_se)791 static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se)
792 {
793 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
794 struct rq *rq = rq_of_dl_rq(dl_rq);
795
796 WARN_ON(is_dl_boosted(dl_se));
797 WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline));
798
799 /*
800 * We are racing with the deadline timer. So, do nothing because
801 * the deadline timer handler will take care of properly recharging
802 * the runtime and postponing the deadline
803 */
804 if (dl_se->dl_throttled)
805 return;
806
807 /*
808 * We use the regular wall clock time to set deadlines in the
809 * future; in fact, we must consider execution overheads (time
810 * spent on hardirq context, etc.).
811 */
812 replenish_dl_new_period(dl_se, rq);
813 }
814
815 /*
816 * Pure Earliest Deadline First (EDF) scheduling does not deal with the
817 * possibility of a entity lasting more than what it declared, and thus
818 * exhausting its runtime.
819 *
820 * Here we are interested in making runtime overrun possible, but we do
821 * not want a entity which is misbehaving to affect the scheduling of all
822 * other entities.
823 * Therefore, a budgeting strategy called Constant Bandwidth Server (CBS)
824 * is used, in order to confine each entity within its own bandwidth.
825 *
826 * This function deals exactly with that, and ensures that when the runtime
827 * of a entity is replenished, its deadline is also postponed. That ensures
828 * the overrunning entity can't interfere with other entity in the system and
829 * can't make them miss their deadlines. Reasons why this kind of overruns
830 * could happen are, typically, a entity voluntarily trying to overcome its
831 * runtime, or it just underestimated it during sched_setattr().
832 */
replenish_dl_entity(struct sched_dl_entity * dl_se)833 static void replenish_dl_entity(struct sched_dl_entity *dl_se)
834 {
835 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
836 struct rq *rq = rq_of_dl_rq(dl_rq);
837
838 WARN_ON_ONCE(pi_of(dl_se)->dl_runtime <= 0);
839
840 /*
841 * This could be the case for a !-dl task that is boosted.
842 * Just go with full inherited parameters.
843 */
844 if (dl_se->dl_deadline == 0)
845 replenish_dl_new_period(dl_se, rq);
846
847 if (dl_se->dl_yielded && dl_se->runtime > 0)
848 dl_se->runtime = 0;
849
850 /*
851 * We keep moving the deadline away until we get some
852 * available runtime for the entity. This ensures correct
853 * handling of situations where the runtime overrun is
854 * arbitrary large.
855 */
856 while (dl_se->runtime <= 0) {
857 dl_se->deadline += pi_of(dl_se)->dl_period;
858 dl_se->runtime += pi_of(dl_se)->dl_runtime;
859 }
860
861 /*
862 * At this point, the deadline really should be "in
863 * the future" with respect to rq->clock. If it's
864 * not, we are, for some reason, lagging too much!
865 * Anyway, after having warn userspace abut that,
866 * we still try to keep the things running by
867 * resetting the deadline and the budget of the
868 * entity.
869 */
870 if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
871 printk_deferred_once("sched: DL replenish lagged too much\n");
872 replenish_dl_new_period(dl_se, rq);
873 }
874
875 if (dl_se->dl_yielded)
876 dl_se->dl_yielded = 0;
877 if (dl_se->dl_throttled)
878 dl_se->dl_throttled = 0;
879 }
880
881 /*
882 * Here we check if --at time t-- an entity (which is probably being
883 * [re]activated or, in general, enqueued) can use its remaining runtime
884 * and its current deadline _without_ exceeding the bandwidth it is
885 * assigned (function returns true if it can't). We are in fact applying
886 * one of the CBS rules: when a task wakes up, if the residual runtime
887 * over residual deadline fits within the allocated bandwidth, then we
888 * can keep the current (absolute) deadline and residual budget without
889 * disrupting the schedulability of the system. Otherwise, we should
890 * refill the runtime and set the deadline a period in the future,
891 * because keeping the current (absolute) deadline of the task would
892 * result in breaking guarantees promised to other tasks (refer to
893 * Documentation/scheduler/sched-deadline.rst for more information).
894 *
895 * This function returns true if:
896 *
897 * runtime / (deadline - t) > dl_runtime / dl_deadline ,
898 *
899 * IOW we can't recycle current parameters.
900 *
901 * Notice that the bandwidth check is done against the deadline. For
902 * task with deadline equal to period this is the same of using
903 * dl_period instead of dl_deadline in the equation above.
904 */
dl_entity_overflow(struct sched_dl_entity * dl_se,u64 t)905 static bool dl_entity_overflow(struct sched_dl_entity *dl_se, u64 t)
906 {
907 u64 left, right;
908
909 /*
910 * left and right are the two sides of the equation above,
911 * after a bit of shuffling to use multiplications instead
912 * of divisions.
913 *
914 * Note that none of the time values involved in the two
915 * multiplications are absolute: dl_deadline and dl_runtime
916 * are the relative deadline and the maximum runtime of each
917 * instance, runtime is the runtime left for the last instance
918 * and (deadline - t), since t is rq->clock, is the time left
919 * to the (absolute) deadline. Even if overflowing the u64 type
920 * is very unlikely to occur in both cases, here we scale down
921 * as we want to avoid that risk at all. Scaling down by 10
922 * means that we reduce granularity to 1us. We are fine with it,
923 * since this is only a true/false check and, anyway, thinking
924 * of anything below microseconds resolution is actually fiction
925 * (but still we want to give the user that illusion >;).
926 */
927 left = (pi_of(dl_se)->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
928 right = ((dl_se->deadline - t) >> DL_SCALE) *
929 (pi_of(dl_se)->dl_runtime >> DL_SCALE);
930
931 return dl_time_before(right, left);
932 }
933
934 /*
935 * Revised wakeup rule [1]: For self-suspending tasks, rather then
936 * re-initializing task's runtime and deadline, the revised wakeup
937 * rule adjusts the task's runtime to avoid the task to overrun its
938 * density.
939 *
940 * Reasoning: a task may overrun the density if:
941 * runtime / (deadline - t) > dl_runtime / dl_deadline
942 *
943 * Therefore, runtime can be adjusted to:
944 * runtime = (dl_runtime / dl_deadline) * (deadline - t)
945 *
946 * In such way that runtime will be equal to the maximum density
947 * the task can use without breaking any rule.
948 *
949 * [1] Luca Abeni, Giuseppe Lipari, and Juri Lelli. 2015. Constant
950 * bandwidth server revisited. SIGBED Rev. 11, 4 (January 2015), 19-24.
951 */
952 static void
update_dl_revised_wakeup(struct sched_dl_entity * dl_se,struct rq * rq)953 update_dl_revised_wakeup(struct sched_dl_entity *dl_se, struct rq *rq)
954 {
955 u64 laxity = dl_se->deadline - rq_clock(rq);
956
957 /*
958 * If the task has deadline < period, and the deadline is in the past,
959 * it should already be throttled before this check.
960 *
961 * See update_dl_entity() comments for further details.
962 */
963 WARN_ON(dl_time_before(dl_se->deadline, rq_clock(rq)));
964
965 dl_se->runtime = (dl_se->dl_density * laxity) >> BW_SHIFT;
966 }
967
968 /*
969 * Regarding the deadline, a task with implicit deadline has a relative
970 * deadline == relative period. A task with constrained deadline has a
971 * relative deadline <= relative period.
972 *
973 * We support constrained deadline tasks. However, there are some restrictions
974 * applied only for tasks which do not have an implicit deadline. See
975 * update_dl_entity() to know more about such restrictions.
976 *
977 * The dl_is_implicit() returns true if the task has an implicit deadline.
978 */
dl_is_implicit(struct sched_dl_entity * dl_se)979 static inline bool dl_is_implicit(struct sched_dl_entity *dl_se)
980 {
981 return dl_se->dl_deadline == dl_se->dl_period;
982 }
983
984 /*
985 * When a deadline entity is placed in the runqueue, its runtime and deadline
986 * might need to be updated. This is done by a CBS wake up rule. There are two
987 * different rules: 1) the original CBS; and 2) the Revisited CBS.
988 *
989 * When the task is starting a new period, the Original CBS is used. In this
990 * case, the runtime is replenished and a new absolute deadline is set.
991 *
992 * When a task is queued before the begin of the next period, using the
993 * remaining runtime and deadline could make the entity to overflow, see
994 * dl_entity_overflow() to find more about runtime overflow. When such case
995 * is detected, the runtime and deadline need to be updated.
996 *
997 * If the task has an implicit deadline, i.e., deadline == period, the Original
998 * CBS is applied. the runtime is replenished and a new absolute deadline is
999 * set, as in the previous cases.
1000 *
1001 * However, the Original CBS does not work properly for tasks with
1002 * deadline < period, which are said to have a constrained deadline. By
1003 * applying the Original CBS, a constrained deadline task would be able to run
1004 * runtime/deadline in a period. With deadline < period, the task would
1005 * overrun the runtime/period allowed bandwidth, breaking the admission test.
1006 *
1007 * In order to prevent this misbehave, the Revisited CBS is used for
1008 * constrained deadline tasks when a runtime overflow is detected. In the
1009 * Revisited CBS, rather than replenishing & setting a new absolute deadline,
1010 * the remaining runtime of the task is reduced to avoid runtime overflow.
1011 * Please refer to the comments update_dl_revised_wakeup() function to find
1012 * more about the Revised CBS rule.
1013 */
update_dl_entity(struct sched_dl_entity * dl_se)1014 static void update_dl_entity(struct sched_dl_entity *dl_se)
1015 {
1016 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1017 struct rq *rq = rq_of_dl_rq(dl_rq);
1018
1019 if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
1020 dl_entity_overflow(dl_se, rq_clock(rq))) {
1021
1022 if (unlikely(!dl_is_implicit(dl_se) &&
1023 !dl_time_before(dl_se->deadline, rq_clock(rq)) &&
1024 !is_dl_boosted(dl_se))) {
1025 update_dl_revised_wakeup(dl_se, rq);
1026 return;
1027 }
1028
1029 replenish_dl_new_period(dl_se, rq);
1030 }
1031 }
1032
dl_next_period(struct sched_dl_entity * dl_se)1033 static inline u64 dl_next_period(struct sched_dl_entity *dl_se)
1034 {
1035 return dl_se->deadline - dl_se->dl_deadline + dl_se->dl_period;
1036 }
1037
1038 /*
1039 * If the entity depleted all its runtime, and if we want it to sleep
1040 * while waiting for some new execution time to become available, we
1041 * set the bandwidth replenishment timer to the replenishment instant
1042 * and try to activate it.
1043 *
1044 * Notice that it is important for the caller to know if the timer
1045 * actually started or not (i.e., the replenishment instant is in
1046 * the future or in the past).
1047 */
start_dl_timer(struct task_struct * p)1048 static int start_dl_timer(struct task_struct *p)
1049 {
1050 struct sched_dl_entity *dl_se = &p->dl;
1051 struct hrtimer *timer = &dl_se->dl_timer;
1052 struct rq *rq = task_rq(p);
1053 ktime_t now, act;
1054 s64 delta;
1055
1056 lockdep_assert_rq_held(rq);
1057
1058 /*
1059 * We want the timer to fire at the deadline, but considering
1060 * that it is actually coming from rq->clock and not from
1061 * hrtimer's time base reading.
1062 */
1063 act = ns_to_ktime(dl_next_period(dl_se));
1064 now = hrtimer_cb_get_time(timer);
1065 delta = ktime_to_ns(now) - rq_clock(rq);
1066 act = ktime_add_ns(act, delta);
1067
1068 /*
1069 * If the expiry time already passed, e.g., because the value
1070 * chosen as the deadline is too small, don't even try to
1071 * start the timer in the past!
1072 */
1073 if (ktime_us_delta(act, now) < 0)
1074 return 0;
1075
1076 /*
1077 * !enqueued will guarantee another callback; even if one is already in
1078 * progress. This ensures a balanced {get,put}_task_struct().
1079 *
1080 * The race against __run_timer() clearing the enqueued state is
1081 * harmless because we're holding task_rq()->lock, therefore the timer
1082 * expiring after we've done the check will wait on its task_rq_lock()
1083 * and observe our state.
1084 */
1085 if (!hrtimer_is_queued(timer)) {
1086 get_task_struct(p);
1087 hrtimer_start(timer, act, HRTIMER_MODE_ABS_HARD);
1088 }
1089
1090 return 1;
1091 }
1092
1093 /*
1094 * This is the bandwidth enforcement timer callback. If here, we know
1095 * a task is not on its dl_rq, since the fact that the timer was running
1096 * means the task is throttled and needs a runtime replenishment.
1097 *
1098 * However, what we actually do depends on the fact the task is active,
1099 * (it is on its rq) or has been removed from there by a call to
1100 * dequeue_task_dl(). In the former case we must issue the runtime
1101 * replenishment and add the task back to the dl_rq; in the latter, we just
1102 * do nothing but clearing dl_throttled, so that runtime and deadline
1103 * updating (and the queueing back to dl_rq) will be done by the
1104 * next call to enqueue_task_dl().
1105 */
dl_task_timer(struct hrtimer * timer)1106 static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
1107 {
1108 struct sched_dl_entity *dl_se = container_of(timer,
1109 struct sched_dl_entity,
1110 dl_timer);
1111 struct task_struct *p = dl_task_of(dl_se);
1112 struct rq_flags rf;
1113 struct rq *rq;
1114
1115 rq = task_rq_lock(p, &rf);
1116
1117 /*
1118 * The task might have changed its scheduling policy to something
1119 * different than SCHED_DEADLINE (through switched_from_dl()).
1120 */
1121 if (!dl_task(p))
1122 goto unlock;
1123
1124 /*
1125 * The task might have been boosted by someone else and might be in the
1126 * boosting/deboosting path, its not throttled.
1127 */
1128 if (is_dl_boosted(dl_se))
1129 goto unlock;
1130
1131 /*
1132 * Spurious timer due to start_dl_timer() race; or we already received
1133 * a replenishment from rt_mutex_setprio().
1134 */
1135 if (!dl_se->dl_throttled)
1136 goto unlock;
1137
1138 sched_clock_tick();
1139 update_rq_clock(rq);
1140
1141 /*
1142 * If the throttle happened during sched-out; like:
1143 *
1144 * schedule()
1145 * deactivate_task()
1146 * dequeue_task_dl()
1147 * update_curr_dl()
1148 * start_dl_timer()
1149 * __dequeue_task_dl()
1150 * prev->on_rq = 0;
1151 *
1152 * We can be both throttled and !queued. Replenish the counter
1153 * but do not enqueue -- wait for our wakeup to do that.
1154 */
1155 if (!task_on_rq_queued(p)) {
1156 replenish_dl_entity(dl_se);
1157 goto unlock;
1158 }
1159
1160 #ifdef CONFIG_SMP
1161 if (unlikely(!rq->online)) {
1162 /*
1163 * If the runqueue is no longer available, migrate the
1164 * task elsewhere. This necessarily changes rq.
1165 */
1166 lockdep_unpin_lock(__rq_lockp(rq), rf.cookie);
1167 rq = dl_task_offline_migration(rq, p);
1168 rf.cookie = lockdep_pin_lock(__rq_lockp(rq));
1169 update_rq_clock(rq);
1170
1171 /*
1172 * Now that the task has been migrated to the new RQ and we
1173 * have that locked, proceed as normal and enqueue the task
1174 * there.
1175 */
1176 }
1177 #endif
1178
1179 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
1180 if (dl_task(rq->curr))
1181 wakeup_preempt_dl(rq, p, 0);
1182 else
1183 resched_curr(rq);
1184
1185 #ifdef CONFIG_SMP
1186 /*
1187 * Queueing this task back might have overloaded rq, check if we need
1188 * to kick someone away.
1189 */
1190 if (has_pushable_dl_tasks(rq)) {
1191 /*
1192 * Nothing relies on rq->lock after this, so its safe to drop
1193 * rq->lock.
1194 */
1195 rq_unpin_lock(rq, &rf);
1196 push_dl_task(rq);
1197 rq_repin_lock(rq, &rf);
1198 }
1199 #endif
1200
1201 unlock:
1202 task_rq_unlock(rq, p, &rf);
1203
1204 /*
1205 * This can free the task_struct, including this hrtimer, do not touch
1206 * anything related to that after this.
1207 */
1208 put_task_struct(p);
1209
1210 return HRTIMER_NORESTART;
1211 }
1212
init_dl_task_timer(struct sched_dl_entity * dl_se)1213 static void init_dl_task_timer(struct sched_dl_entity *dl_se)
1214 {
1215 struct hrtimer *timer = &dl_se->dl_timer;
1216
1217 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
1218 timer->function = dl_task_timer;
1219 }
1220
1221 /*
1222 * During the activation, CBS checks if it can reuse the current task's
1223 * runtime and period. If the deadline of the task is in the past, CBS
1224 * cannot use the runtime, and so it replenishes the task. This rule
1225 * works fine for implicit deadline tasks (deadline == period), and the
1226 * CBS was designed for implicit deadline tasks. However, a task with
1227 * constrained deadline (deadline < period) might be awakened after the
1228 * deadline, but before the next period. In this case, replenishing the
1229 * task would allow it to run for runtime / deadline. As in this case
1230 * deadline < period, CBS enables a task to run for more than the
1231 * runtime / period. In a very loaded system, this can cause a domino
1232 * effect, making other tasks miss their deadlines.
1233 *
1234 * To avoid this problem, in the activation of a constrained deadline
1235 * task after the deadline but before the next period, throttle the
1236 * task and set the replenishing timer to the begin of the next period,
1237 * unless it is boosted.
1238 */
dl_check_constrained_dl(struct sched_dl_entity * dl_se)1239 static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se)
1240 {
1241 struct task_struct *p = dl_task_of(dl_se);
1242 struct rq *rq = rq_of_dl_rq(dl_rq_of_se(dl_se));
1243
1244 if (dl_time_before(dl_se->deadline, rq_clock(rq)) &&
1245 dl_time_before(rq_clock(rq), dl_next_period(dl_se))) {
1246 if (unlikely(is_dl_boosted(dl_se) || !start_dl_timer(p)))
1247 return;
1248 dl_se->dl_throttled = 1;
1249 if (dl_se->runtime > 0)
1250 dl_se->runtime = 0;
1251 }
1252 }
1253
1254 static
dl_runtime_exceeded(struct sched_dl_entity * dl_se)1255 int dl_runtime_exceeded(struct sched_dl_entity *dl_se)
1256 {
1257 return (dl_se->runtime <= 0);
1258 }
1259
1260 /*
1261 * This function implements the GRUB accounting rule. According to the
1262 * GRUB reclaiming algorithm, the runtime is not decreased as "dq = -dt",
1263 * but as "dq = -(max{u, (Umax - Uinact - Uextra)} / Umax) dt",
1264 * where u is the utilization of the task, Umax is the maximum reclaimable
1265 * utilization, Uinact is the (per-runqueue) inactive utilization, computed
1266 * as the difference between the "total runqueue utilization" and the
1267 * "runqueue active utilization", and Uextra is the (per runqueue) extra
1268 * reclaimable utilization.
1269 * Since rq->dl.running_bw and rq->dl.this_bw contain utilizations multiplied
1270 * by 2^BW_SHIFT, the result has to be shifted right by BW_SHIFT.
1271 * Since rq->dl.bw_ratio contains 1 / Umax multiplied by 2^RATIO_SHIFT, dl_bw
1272 * is multiped by rq->dl.bw_ratio and shifted right by RATIO_SHIFT.
1273 * Since delta is a 64 bit variable, to have an overflow its value should be
1274 * larger than 2^(64 - 20 - 8), which is more than 64 seconds. So, overflow is
1275 * not an issue here.
1276 */
grub_reclaim(u64 delta,struct rq * rq,struct sched_dl_entity * dl_se)1277 static u64 grub_reclaim(u64 delta, struct rq *rq, struct sched_dl_entity *dl_se)
1278 {
1279 u64 u_act;
1280 u64 u_inact = rq->dl.this_bw - rq->dl.running_bw; /* Utot - Uact */
1281
1282 /*
1283 * Instead of computing max{u, (u_max - u_inact - u_extra)}, we
1284 * compare u_inact + u_extra with u_max - u, because u_inact + u_extra
1285 * can be larger than u_max. So, u_max - u_inact - u_extra would be
1286 * negative leading to wrong results.
1287 */
1288 if (u_inact + rq->dl.extra_bw > rq->dl.max_bw - dl_se->dl_bw)
1289 u_act = dl_se->dl_bw;
1290 else
1291 u_act = rq->dl.max_bw - u_inact - rq->dl.extra_bw;
1292
1293 u_act = (u_act * rq->dl.bw_ratio) >> RATIO_SHIFT;
1294 return (delta * u_act) >> BW_SHIFT;
1295 }
1296
1297 /*
1298 * Update the current task's runtime statistics (provided it is still
1299 * a -deadline task and has not been removed from the dl_rq).
1300 */
update_curr_dl(struct rq * rq)1301 static void update_curr_dl(struct rq *rq)
1302 {
1303 struct task_struct *curr = rq->curr;
1304 struct sched_dl_entity *dl_se = &curr->dl;
1305 s64 delta_exec, scaled_delta_exec;
1306 int cpu = cpu_of(rq);
1307
1308 if (!dl_task(curr) || !on_dl_rq(dl_se))
1309 return;
1310
1311 /*
1312 * Consumed budget is computed considering the time as
1313 * observed by schedulable tasks (excluding time spent
1314 * in hardirq context, etc.). Deadlines are instead
1315 * computed using hard walltime. This seems to be the more
1316 * natural solution, but the full ramifications of this
1317 * approach need further study.
1318 */
1319 delta_exec = update_curr_common(rq);
1320 if (unlikely(delta_exec <= 0)) {
1321 if (unlikely(dl_se->dl_yielded))
1322 goto throttle;
1323 return;
1324 }
1325
1326 if (dl_entity_is_special(dl_se))
1327 return;
1328
1329 /*
1330 * For tasks that participate in GRUB, we implement GRUB-PA: the
1331 * spare reclaimed bandwidth is used to clock down frequency.
1332 *
1333 * For the others, we still need to scale reservation parameters
1334 * according to current frequency and CPU maximum capacity.
1335 */
1336 if (unlikely(dl_se->flags & SCHED_FLAG_RECLAIM)) {
1337 scaled_delta_exec = grub_reclaim(delta_exec,
1338 rq,
1339 &curr->dl);
1340 } else {
1341 unsigned long scale_freq = arch_scale_freq_capacity(cpu);
1342 unsigned long scale_cpu = arch_scale_cpu_capacity(cpu);
1343
1344 scaled_delta_exec = cap_scale(delta_exec, scale_freq);
1345 scaled_delta_exec = cap_scale(scaled_delta_exec, scale_cpu);
1346 }
1347
1348 dl_se->runtime -= scaled_delta_exec;
1349
1350 throttle:
1351 if (dl_runtime_exceeded(dl_se) || dl_se->dl_yielded) {
1352 dl_se->dl_throttled = 1;
1353
1354 /* If requested, inform the user about runtime overruns. */
1355 if (dl_runtime_exceeded(dl_se) &&
1356 (dl_se->flags & SCHED_FLAG_DL_OVERRUN))
1357 dl_se->dl_overrun = 1;
1358
1359 __dequeue_task_dl(rq, curr, 0);
1360 if (unlikely(is_dl_boosted(dl_se) || !start_dl_timer(curr)))
1361 enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
1362
1363 if (!is_leftmost(curr, &rq->dl))
1364 resched_curr(rq);
1365 }
1366
1367 /*
1368 * Because -- for now -- we share the rt bandwidth, we need to
1369 * account our runtime there too, otherwise actual rt tasks
1370 * would be able to exceed the shared quota.
1371 *
1372 * Account to the root rt group for now.
1373 *
1374 * The solution we're working towards is having the RT groups scheduled
1375 * using deadline servers -- however there's a few nasties to figure
1376 * out before that can happen.
1377 */
1378 if (rt_bandwidth_enabled()) {
1379 struct rt_rq *rt_rq = &rq->rt;
1380
1381 raw_spin_lock(&rt_rq->rt_runtime_lock);
1382 /*
1383 * We'll let actual RT tasks worry about the overflow here, we
1384 * have our own CBS to keep us inline; only account when RT
1385 * bandwidth is relevant.
1386 */
1387 if (sched_rt_bandwidth_account(rt_rq))
1388 rt_rq->rt_time += delta_exec;
1389 raw_spin_unlock(&rt_rq->rt_runtime_lock);
1390 }
1391 }
1392
inactive_task_timer(struct hrtimer * timer)1393 static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer)
1394 {
1395 struct sched_dl_entity *dl_se = container_of(timer,
1396 struct sched_dl_entity,
1397 inactive_timer);
1398 struct task_struct *p = dl_task_of(dl_se);
1399 struct rq_flags rf;
1400 struct rq *rq;
1401
1402 rq = task_rq_lock(p, &rf);
1403
1404 sched_clock_tick();
1405 update_rq_clock(rq);
1406
1407 if (!dl_task(p) || READ_ONCE(p->__state) == TASK_DEAD) {
1408 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
1409
1410 if (READ_ONCE(p->__state) == TASK_DEAD && dl_se->dl_non_contending) {
1411 sub_running_bw(&p->dl, dl_rq_of_se(&p->dl));
1412 sub_rq_bw(&p->dl, dl_rq_of_se(&p->dl));
1413 dl_se->dl_non_contending = 0;
1414 }
1415
1416 raw_spin_lock(&dl_b->lock);
1417 __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
1418 raw_spin_unlock(&dl_b->lock);
1419 __dl_clear_params(dl_se);
1420
1421 goto unlock;
1422 }
1423 if (dl_se->dl_non_contending == 0)
1424 goto unlock;
1425
1426 sub_running_bw(dl_se, &rq->dl);
1427 dl_se->dl_non_contending = 0;
1428 unlock:
1429 task_rq_unlock(rq, p, &rf);
1430 put_task_struct(p);
1431
1432 return HRTIMER_NORESTART;
1433 }
1434
init_dl_inactive_task_timer(struct sched_dl_entity * dl_se)1435 static void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se)
1436 {
1437 struct hrtimer *timer = &dl_se->inactive_timer;
1438
1439 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
1440 timer->function = inactive_task_timer;
1441 }
1442
1443 #define __node_2_dle(node) \
1444 rb_entry((node), struct sched_dl_entity, rb_node)
1445
1446 #ifdef CONFIG_SMP
1447
inc_dl_deadline(struct dl_rq * dl_rq,u64 deadline)1448 static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
1449 {
1450 struct rq *rq = rq_of_dl_rq(dl_rq);
1451
1452 if (dl_rq->earliest_dl.curr == 0 ||
1453 dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
1454 if (dl_rq->earliest_dl.curr == 0)
1455 cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_HIGHER);
1456 dl_rq->earliest_dl.curr = deadline;
1457 cpudl_set(&rq->rd->cpudl, rq->cpu, deadline);
1458 }
1459 }
1460
dec_dl_deadline(struct dl_rq * dl_rq,u64 deadline)1461 static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
1462 {
1463 struct rq *rq = rq_of_dl_rq(dl_rq);
1464
1465 /*
1466 * Since we may have removed our earliest (and/or next earliest)
1467 * task we must recompute them.
1468 */
1469 if (!dl_rq->dl_nr_running) {
1470 dl_rq->earliest_dl.curr = 0;
1471 dl_rq->earliest_dl.next = 0;
1472 cpudl_clear(&rq->rd->cpudl, rq->cpu);
1473 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
1474 } else {
1475 struct rb_node *leftmost = rb_first_cached(&dl_rq->root);
1476 struct sched_dl_entity *entry = __node_2_dle(leftmost);
1477
1478 dl_rq->earliest_dl.curr = entry->deadline;
1479 cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline);
1480 }
1481 }
1482
1483 #else
1484
inc_dl_deadline(struct dl_rq * dl_rq,u64 deadline)1485 static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
dec_dl_deadline(struct dl_rq * dl_rq,u64 deadline)1486 static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
1487
1488 #endif /* CONFIG_SMP */
1489
1490 static inline
inc_dl_tasks(struct sched_dl_entity * dl_se,struct dl_rq * dl_rq)1491 void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
1492 {
1493 int prio = dl_task_of(dl_se)->prio;
1494 u64 deadline = dl_se->deadline;
1495
1496 WARN_ON(!dl_prio(prio));
1497 dl_rq->dl_nr_running++;
1498 add_nr_running(rq_of_dl_rq(dl_rq), 1);
1499
1500 inc_dl_deadline(dl_rq, deadline);
1501 inc_dl_migration(dl_se, dl_rq);
1502 }
1503
1504 static inline
dec_dl_tasks(struct sched_dl_entity * dl_se,struct dl_rq * dl_rq)1505 void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
1506 {
1507 int prio = dl_task_of(dl_se)->prio;
1508
1509 WARN_ON(!dl_prio(prio));
1510 WARN_ON(!dl_rq->dl_nr_running);
1511 dl_rq->dl_nr_running--;
1512 sub_nr_running(rq_of_dl_rq(dl_rq), 1);
1513
1514 dec_dl_deadline(dl_rq, dl_se->deadline);
1515 dec_dl_migration(dl_se, dl_rq);
1516 }
1517
__dl_less(struct rb_node * a,const struct rb_node * b)1518 static inline bool __dl_less(struct rb_node *a, const struct rb_node *b)
1519 {
1520 return dl_time_before(__node_2_dle(a)->deadline, __node_2_dle(b)->deadline);
1521 }
1522
1523 static inline struct sched_statistics *
__schedstats_from_dl_se(struct sched_dl_entity * dl_se)1524 __schedstats_from_dl_se(struct sched_dl_entity *dl_se)
1525 {
1526 return &dl_task_of(dl_se)->stats;
1527 }
1528
1529 static inline void
update_stats_wait_start_dl(struct dl_rq * dl_rq,struct sched_dl_entity * dl_se)1530 update_stats_wait_start_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se)
1531 {
1532 struct sched_statistics *stats;
1533
1534 if (!schedstat_enabled())
1535 return;
1536
1537 stats = __schedstats_from_dl_se(dl_se);
1538 __update_stats_wait_start(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats);
1539 }
1540
1541 static inline void
update_stats_wait_end_dl(struct dl_rq * dl_rq,struct sched_dl_entity * dl_se)1542 update_stats_wait_end_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se)
1543 {
1544 struct sched_statistics *stats;
1545
1546 if (!schedstat_enabled())
1547 return;
1548
1549 stats = __schedstats_from_dl_se(dl_se);
1550 __update_stats_wait_end(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats);
1551 }
1552
1553 static inline void
update_stats_enqueue_sleeper_dl(struct dl_rq * dl_rq,struct sched_dl_entity * dl_se)1554 update_stats_enqueue_sleeper_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se)
1555 {
1556 struct sched_statistics *stats;
1557
1558 if (!schedstat_enabled())
1559 return;
1560
1561 stats = __schedstats_from_dl_se(dl_se);
1562 __update_stats_enqueue_sleeper(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats);
1563 }
1564
1565 static inline void
update_stats_enqueue_dl(struct dl_rq * dl_rq,struct sched_dl_entity * dl_se,int flags)1566 update_stats_enqueue_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se,
1567 int flags)
1568 {
1569 if (!schedstat_enabled())
1570 return;
1571
1572 if (flags & ENQUEUE_WAKEUP)
1573 update_stats_enqueue_sleeper_dl(dl_rq, dl_se);
1574 }
1575
1576 static inline void
update_stats_dequeue_dl(struct dl_rq * dl_rq,struct sched_dl_entity * dl_se,int flags)1577 update_stats_dequeue_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se,
1578 int flags)
1579 {
1580 struct task_struct *p = dl_task_of(dl_se);
1581
1582 if (!schedstat_enabled())
1583 return;
1584
1585 if ((flags & DEQUEUE_SLEEP)) {
1586 unsigned int state;
1587
1588 state = READ_ONCE(p->__state);
1589 if (state & TASK_INTERRUPTIBLE)
1590 __schedstat_set(p->stats.sleep_start,
1591 rq_clock(rq_of_dl_rq(dl_rq)));
1592
1593 if (state & TASK_UNINTERRUPTIBLE)
1594 __schedstat_set(p->stats.block_start,
1595 rq_clock(rq_of_dl_rq(dl_rq)));
1596 }
1597 }
1598
__enqueue_dl_entity(struct sched_dl_entity * dl_se)1599 static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
1600 {
1601 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1602
1603 WARN_ON_ONCE(!RB_EMPTY_NODE(&dl_se->rb_node));
1604
1605 rb_add_cached(&dl_se->rb_node, &dl_rq->root, __dl_less);
1606
1607 inc_dl_tasks(dl_se, dl_rq);
1608 }
1609
__dequeue_dl_entity(struct sched_dl_entity * dl_se)1610 static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
1611 {
1612 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1613
1614 if (RB_EMPTY_NODE(&dl_se->rb_node))
1615 return;
1616
1617 rb_erase_cached(&dl_se->rb_node, &dl_rq->root);
1618
1619 RB_CLEAR_NODE(&dl_se->rb_node);
1620
1621 dec_dl_tasks(dl_se, dl_rq);
1622 }
1623
1624 static void
enqueue_dl_entity(struct sched_dl_entity * dl_se,int flags)1625 enqueue_dl_entity(struct sched_dl_entity *dl_se, int flags)
1626 {
1627 WARN_ON_ONCE(on_dl_rq(dl_se));
1628
1629 update_stats_enqueue_dl(dl_rq_of_se(dl_se), dl_se, flags);
1630
1631 /*
1632 * Check if a constrained deadline task was activated
1633 * after the deadline but before the next period.
1634 * If that is the case, the task will be throttled and
1635 * the replenishment timer will be set to the next period.
1636 */
1637 if (!dl_se->dl_throttled && !dl_is_implicit(dl_se))
1638 dl_check_constrained_dl(dl_se);
1639
1640 if (flags & (ENQUEUE_RESTORE|ENQUEUE_MIGRATING)) {
1641 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1642
1643 add_rq_bw(dl_se, dl_rq);
1644 add_running_bw(dl_se, dl_rq);
1645 }
1646
1647 /*
1648 * If p is throttled, we do not enqueue it. In fact, if it exhausted
1649 * its budget it needs a replenishment and, since it now is on
1650 * its rq, the bandwidth timer callback (which clearly has not
1651 * run yet) will take care of this.
1652 * However, the active utilization does not depend on the fact
1653 * that the task is on the runqueue or not (but depends on the
1654 * task's state - in GRUB parlance, "inactive" vs "active contending").
1655 * In other words, even if a task is throttled its utilization must
1656 * be counted in the active utilization; hence, we need to call
1657 * add_running_bw().
1658 */
1659 if (dl_se->dl_throttled && !(flags & ENQUEUE_REPLENISH)) {
1660 if (flags & ENQUEUE_WAKEUP)
1661 task_contending(dl_se, flags);
1662
1663 return;
1664 }
1665
1666 /*
1667 * If this is a wakeup or a new instance, the scheduling
1668 * parameters of the task might need updating. Otherwise,
1669 * we want a replenishment of its runtime.
1670 */
1671 if (flags & ENQUEUE_WAKEUP) {
1672 task_contending(dl_se, flags);
1673 update_dl_entity(dl_se);
1674 } else if (flags & ENQUEUE_REPLENISH) {
1675 replenish_dl_entity(dl_se);
1676 } else if ((flags & ENQUEUE_RESTORE) &&
1677 !is_dl_boosted(dl_se) &&
1678 dl_time_before(dl_se->deadline,
1679 rq_clock(rq_of_dl_rq(dl_rq_of_se(dl_se))))) {
1680 setup_new_dl_entity(dl_se);
1681 }
1682
1683 __enqueue_dl_entity(dl_se);
1684 }
1685
dequeue_dl_entity(struct sched_dl_entity * dl_se,int flags)1686 static void dequeue_dl_entity(struct sched_dl_entity *dl_se, int flags)
1687 {
1688 __dequeue_dl_entity(dl_se);
1689
1690 if (flags & (DEQUEUE_SAVE|DEQUEUE_MIGRATING)) {
1691 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1692
1693 sub_running_bw(dl_se, dl_rq);
1694 sub_rq_bw(dl_se, dl_rq);
1695 }
1696
1697 /*
1698 * This check allows to start the inactive timer (or to immediately
1699 * decrease the active utilization, if needed) in two cases:
1700 * when the task blocks and when it is terminating
1701 * (p->state == TASK_DEAD). We can handle the two cases in the same
1702 * way, because from GRUB's point of view the same thing is happening
1703 * (the task moves from "active contending" to "active non contending"
1704 * or "inactive")
1705 */
1706 if (flags & DEQUEUE_SLEEP)
1707 task_non_contending(dl_se);
1708 }
1709
enqueue_task_dl(struct rq * rq,struct task_struct * p,int flags)1710 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1711 {
1712 if (is_dl_boosted(&p->dl)) {
1713 /*
1714 * Because of delays in the detection of the overrun of a
1715 * thread's runtime, it might be the case that a thread
1716 * goes to sleep in a rt mutex with negative runtime. As
1717 * a consequence, the thread will be throttled.
1718 *
1719 * While waiting for the mutex, this thread can also be
1720 * boosted via PI, resulting in a thread that is throttled
1721 * and boosted at the same time.
1722 *
1723 * In this case, the boost overrides the throttle.
1724 */
1725 if (p->dl.dl_throttled) {
1726 /*
1727 * The replenish timer needs to be canceled. No
1728 * problem if it fires concurrently: boosted threads
1729 * are ignored in dl_task_timer().
1730 */
1731 hrtimer_try_to_cancel(&p->dl.dl_timer);
1732 p->dl.dl_throttled = 0;
1733 }
1734 } else if (!dl_prio(p->normal_prio)) {
1735 /*
1736 * Special case in which we have a !SCHED_DEADLINE task that is going
1737 * to be deboosted, but exceeds its runtime while doing so. No point in
1738 * replenishing it, as it's going to return back to its original
1739 * scheduling class after this. If it has been throttled, we need to
1740 * clear the flag, otherwise the task may wake up as throttled after
1741 * being boosted again with no means to replenish the runtime and clear
1742 * the throttle.
1743 */
1744 p->dl.dl_throttled = 0;
1745 if (!(flags & ENQUEUE_REPLENISH))
1746 printk_deferred_once("sched: DL de-boosted task PID %d: REPLENISH flag missing\n",
1747 task_pid_nr(p));
1748
1749 return;
1750 }
1751
1752 check_schedstat_required();
1753 update_stats_wait_start_dl(dl_rq_of_se(&p->dl), &p->dl);
1754
1755 if (p->on_rq == TASK_ON_RQ_MIGRATING)
1756 flags |= ENQUEUE_MIGRATING;
1757
1758 enqueue_dl_entity(&p->dl, flags);
1759
1760 if (!task_current(rq, p) && !p->dl.dl_throttled && p->nr_cpus_allowed > 1)
1761 enqueue_pushable_dl_task(rq, p);
1762 }
1763
__dequeue_task_dl(struct rq * rq,struct task_struct * p,int flags)1764 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1765 {
1766 update_stats_dequeue_dl(&rq->dl, &p->dl, flags);
1767 dequeue_dl_entity(&p->dl, flags);
1768
1769 if (!p->dl.dl_throttled)
1770 dequeue_pushable_dl_task(rq, p);
1771 }
1772
dequeue_task_dl(struct rq * rq,struct task_struct * p,int flags)1773 static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1774 {
1775 update_curr_dl(rq);
1776
1777 if (p->on_rq == TASK_ON_RQ_MIGRATING)
1778 flags |= DEQUEUE_MIGRATING;
1779
1780 __dequeue_task_dl(rq, p, flags);
1781 }
1782
1783 /*
1784 * Yield task semantic for -deadline tasks is:
1785 *
1786 * get off from the CPU until our next instance, with
1787 * a new runtime. This is of little use now, since we
1788 * don't have a bandwidth reclaiming mechanism. Anyway,
1789 * bandwidth reclaiming is planned for the future, and
1790 * yield_task_dl will indicate that some spare budget
1791 * is available for other task instances to use it.
1792 */
yield_task_dl(struct rq * rq)1793 static void yield_task_dl(struct rq *rq)
1794 {
1795 /*
1796 * We make the task go to sleep until its current deadline by
1797 * forcing its runtime to zero. This way, update_curr_dl() stops
1798 * it and the bandwidth timer will wake it up and will give it
1799 * new scheduling parameters (thanks to dl_yielded=1).
1800 */
1801 rq->curr->dl.dl_yielded = 1;
1802
1803 update_rq_clock(rq);
1804 update_curr_dl(rq);
1805 /*
1806 * Tell update_rq_clock() that we've just updated,
1807 * so we don't do microscopic update in schedule()
1808 * and double the fastpath cost.
1809 */
1810 rq_clock_skip_update(rq);
1811 }
1812
1813 #ifdef CONFIG_SMP
1814
dl_task_is_earliest_deadline(struct task_struct * p,struct rq * rq)1815 static inline bool dl_task_is_earliest_deadline(struct task_struct *p,
1816 struct rq *rq)
1817 {
1818 return (!rq->dl.dl_nr_running ||
1819 dl_time_before(p->dl.deadline,
1820 rq->dl.earliest_dl.curr));
1821 }
1822
1823 static int find_later_rq(struct task_struct *task);
1824
1825 static int
select_task_rq_dl(struct task_struct * p,int cpu,int flags)1826 select_task_rq_dl(struct task_struct *p, int cpu, int flags)
1827 {
1828 struct task_struct *curr;
1829 bool select_rq;
1830 struct rq *rq;
1831
1832 if (!(flags & WF_TTWU))
1833 goto out;
1834
1835 rq = cpu_rq(cpu);
1836
1837 rcu_read_lock();
1838 curr = READ_ONCE(rq->curr); /* unlocked access */
1839
1840 /*
1841 * If we are dealing with a -deadline task, we must
1842 * decide where to wake it up.
1843 * If it has a later deadline and the current task
1844 * on this rq can't move (provided the waking task
1845 * can!) we prefer to send it somewhere else. On the
1846 * other hand, if it has a shorter deadline, we
1847 * try to make it stay here, it might be important.
1848 */
1849 select_rq = unlikely(dl_task(curr)) &&
1850 (curr->nr_cpus_allowed < 2 ||
1851 !dl_entity_preempt(&p->dl, &curr->dl)) &&
1852 p->nr_cpus_allowed > 1;
1853
1854 /*
1855 * Take the capacity of the CPU into account to
1856 * ensure it fits the requirement of the task.
1857 */
1858 if (sched_asym_cpucap_active())
1859 select_rq |= !dl_task_fits_capacity(p, cpu);
1860
1861 if (select_rq) {
1862 int target = find_later_rq(p);
1863
1864 if (target != -1 &&
1865 dl_task_is_earliest_deadline(p, cpu_rq(target)))
1866 cpu = target;
1867 }
1868 rcu_read_unlock();
1869
1870 out:
1871 return cpu;
1872 }
1873
migrate_task_rq_dl(struct task_struct * p,int new_cpu __maybe_unused)1874 static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused)
1875 {
1876 struct rq_flags rf;
1877 struct rq *rq;
1878
1879 if (READ_ONCE(p->__state) != TASK_WAKING)
1880 return;
1881
1882 rq = task_rq(p);
1883 /*
1884 * Since p->state == TASK_WAKING, set_task_cpu() has been called
1885 * from try_to_wake_up(). Hence, p->pi_lock is locked, but
1886 * rq->lock is not... So, lock it
1887 */
1888 rq_lock(rq, &rf);
1889 if (p->dl.dl_non_contending) {
1890 update_rq_clock(rq);
1891 sub_running_bw(&p->dl, &rq->dl);
1892 p->dl.dl_non_contending = 0;
1893 /*
1894 * If the timer handler is currently running and the
1895 * timer cannot be canceled, inactive_task_timer()
1896 * will see that dl_not_contending is not set, and
1897 * will not touch the rq's active utilization,
1898 * so we are still safe.
1899 */
1900 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
1901 put_task_struct(p);
1902 }
1903 sub_rq_bw(&p->dl, &rq->dl);
1904 rq_unlock(rq, &rf);
1905 }
1906
check_preempt_equal_dl(struct rq * rq,struct task_struct * p)1907 static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
1908 {
1909 /*
1910 * Current can't be migrated, useless to reschedule,
1911 * let's hope p can move out.
1912 */
1913 if (rq->curr->nr_cpus_allowed == 1 ||
1914 !cpudl_find(&rq->rd->cpudl, rq->curr, NULL))
1915 return;
1916
1917 /*
1918 * p is migratable, so let's not schedule it and
1919 * see if it is pushed or pulled somewhere else.
1920 */
1921 if (p->nr_cpus_allowed != 1 &&
1922 cpudl_find(&rq->rd->cpudl, p, NULL))
1923 return;
1924
1925 resched_curr(rq);
1926 }
1927
balance_dl(struct rq * rq,struct task_struct * p,struct rq_flags * rf)1928 static int balance_dl(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
1929 {
1930 if (!on_dl_rq(&p->dl) && need_pull_dl_task(rq, p)) {
1931 /*
1932 * This is OK, because current is on_cpu, which avoids it being
1933 * picked for load-balance and preemption/IRQs are still
1934 * disabled avoiding further scheduler activity on it and we've
1935 * not yet started the picking loop.
1936 */
1937 rq_unpin_lock(rq, rf);
1938 pull_dl_task(rq);
1939 rq_repin_lock(rq, rf);
1940 }
1941
1942 return sched_stop_runnable(rq) || sched_dl_runnable(rq);
1943 }
1944 #endif /* CONFIG_SMP */
1945
1946 /*
1947 * Only called when both the current and waking task are -deadline
1948 * tasks.
1949 */
wakeup_preempt_dl(struct rq * rq,struct task_struct * p,int flags)1950 static void wakeup_preempt_dl(struct rq *rq, struct task_struct *p,
1951 int flags)
1952 {
1953 if (dl_entity_preempt(&p->dl, &rq->curr->dl)) {
1954 resched_curr(rq);
1955 return;
1956 }
1957
1958 #ifdef CONFIG_SMP
1959 /*
1960 * In the unlikely case current and p have the same deadline
1961 * let us try to decide what's the best thing to do...
1962 */
1963 if ((p->dl.deadline == rq->curr->dl.deadline) &&
1964 !test_tsk_need_resched(rq->curr))
1965 check_preempt_equal_dl(rq, p);
1966 #endif /* CONFIG_SMP */
1967 }
1968
1969 #ifdef CONFIG_SCHED_HRTICK
start_hrtick_dl(struct rq * rq,struct task_struct * p)1970 static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1971 {
1972 hrtick_start(rq, p->dl.runtime);
1973 }
1974 #else /* !CONFIG_SCHED_HRTICK */
start_hrtick_dl(struct rq * rq,struct task_struct * p)1975 static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1976 {
1977 }
1978 #endif
1979
set_next_task_dl(struct rq * rq,struct task_struct * p,bool first)1980 static void set_next_task_dl(struct rq *rq, struct task_struct *p, bool first)
1981 {
1982 struct sched_dl_entity *dl_se = &p->dl;
1983 struct dl_rq *dl_rq = &rq->dl;
1984
1985 p->se.exec_start = rq_clock_task(rq);
1986 if (on_dl_rq(&p->dl))
1987 update_stats_wait_end_dl(dl_rq, dl_se);
1988
1989 /* You can't push away the running task */
1990 dequeue_pushable_dl_task(rq, p);
1991
1992 if (!first)
1993 return;
1994
1995 if (hrtick_enabled_dl(rq))
1996 start_hrtick_dl(rq, p);
1997
1998 if (rq->curr->sched_class != &dl_sched_class)
1999 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
2000
2001 deadline_queue_push_tasks(rq);
2002 }
2003
pick_next_dl_entity(struct dl_rq * dl_rq)2004 static struct sched_dl_entity *pick_next_dl_entity(struct dl_rq *dl_rq)
2005 {
2006 struct rb_node *left = rb_first_cached(&dl_rq->root);
2007
2008 if (!left)
2009 return NULL;
2010
2011 return __node_2_dle(left);
2012 }
2013
pick_task_dl(struct rq * rq)2014 static struct task_struct *pick_task_dl(struct rq *rq)
2015 {
2016 struct sched_dl_entity *dl_se;
2017 struct dl_rq *dl_rq = &rq->dl;
2018 struct task_struct *p;
2019
2020 if (!sched_dl_runnable(rq))
2021 return NULL;
2022
2023 dl_se = pick_next_dl_entity(dl_rq);
2024 WARN_ON_ONCE(!dl_se);
2025 p = dl_task_of(dl_se);
2026
2027 return p;
2028 }
2029
pick_next_task_dl(struct rq * rq)2030 static struct task_struct *pick_next_task_dl(struct rq *rq)
2031 {
2032 struct task_struct *p;
2033
2034 p = pick_task_dl(rq);
2035 if (p)
2036 set_next_task_dl(rq, p, true);
2037
2038 return p;
2039 }
2040
put_prev_task_dl(struct rq * rq,struct task_struct * p)2041 static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
2042 {
2043 struct sched_dl_entity *dl_se = &p->dl;
2044 struct dl_rq *dl_rq = &rq->dl;
2045
2046 if (on_dl_rq(&p->dl))
2047 update_stats_wait_start_dl(dl_rq, dl_se);
2048
2049 update_curr_dl(rq);
2050
2051 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
2052 if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
2053 enqueue_pushable_dl_task(rq, p);
2054 }
2055
2056 /*
2057 * scheduler tick hitting a task of our scheduling class.
2058 *
2059 * NOTE: This function can be called remotely by the tick offload that
2060 * goes along full dynticks. Therefore no local assumption can be made
2061 * and everything must be accessed through the @rq and @curr passed in
2062 * parameters.
2063 */
task_tick_dl(struct rq * rq,struct task_struct * p,int queued)2064 static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
2065 {
2066 update_curr_dl(rq);
2067
2068 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
2069 /*
2070 * Even when we have runtime, update_curr_dl() might have resulted in us
2071 * not being the leftmost task anymore. In that case NEED_RESCHED will
2072 * be set and schedule() will start a new hrtick for the next task.
2073 */
2074 if (hrtick_enabled_dl(rq) && queued && p->dl.runtime > 0 &&
2075 is_leftmost(p, &rq->dl))
2076 start_hrtick_dl(rq, p);
2077 }
2078
task_fork_dl(struct task_struct * p)2079 static void task_fork_dl(struct task_struct *p)
2080 {
2081 /*
2082 * SCHED_DEADLINE tasks cannot fork and this is achieved through
2083 * sched_fork()
2084 */
2085 }
2086
2087 #ifdef CONFIG_SMP
2088
2089 /* Only try algorithms three times */
2090 #define DL_MAX_TRIES 3
2091
pick_dl_task(struct rq * rq,struct task_struct * p,int cpu)2092 static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
2093 {
2094 if (!task_on_cpu(rq, p) &&
2095 cpumask_test_cpu(cpu, &p->cpus_mask))
2096 return 1;
2097 return 0;
2098 }
2099
2100 /*
2101 * Return the earliest pushable rq's task, which is suitable to be executed
2102 * on the CPU, NULL otherwise:
2103 */
pick_earliest_pushable_dl_task(struct rq * rq,int cpu)2104 static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu)
2105 {
2106 struct task_struct *p = NULL;
2107 struct rb_node *next_node;
2108
2109 if (!has_pushable_dl_tasks(rq))
2110 return NULL;
2111
2112 next_node = rb_first_cached(&rq->dl.pushable_dl_tasks_root);
2113
2114 next_node:
2115 if (next_node) {
2116 p = __node_2_pdl(next_node);
2117
2118 if (pick_dl_task(rq, p, cpu))
2119 return p;
2120
2121 next_node = rb_next(next_node);
2122 goto next_node;
2123 }
2124
2125 return NULL;
2126 }
2127
2128 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
2129
find_later_rq(struct task_struct * task)2130 static int find_later_rq(struct task_struct *task)
2131 {
2132 struct sched_domain *sd;
2133 struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl);
2134 int this_cpu = smp_processor_id();
2135 int cpu = task_cpu(task);
2136
2137 /* Make sure the mask is initialized first */
2138 if (unlikely(!later_mask))
2139 return -1;
2140
2141 if (task->nr_cpus_allowed == 1)
2142 return -1;
2143
2144 /*
2145 * We have to consider system topology and task affinity
2146 * first, then we can look for a suitable CPU.
2147 */
2148 if (!cpudl_find(&task_rq(task)->rd->cpudl, task, later_mask))
2149 return -1;
2150
2151 /*
2152 * If we are here, some targets have been found, including
2153 * the most suitable which is, among the runqueues where the
2154 * current tasks have later deadlines than the task's one, the
2155 * rq with the latest possible one.
2156 *
2157 * Now we check how well this matches with task's
2158 * affinity and system topology.
2159 *
2160 * The last CPU where the task run is our first
2161 * guess, since it is most likely cache-hot there.
2162 */
2163 if (cpumask_test_cpu(cpu, later_mask))
2164 return cpu;
2165 /*
2166 * Check if this_cpu is to be skipped (i.e., it is
2167 * not in the mask) or not.
2168 */
2169 if (!cpumask_test_cpu(this_cpu, later_mask))
2170 this_cpu = -1;
2171
2172 rcu_read_lock();
2173 for_each_domain(cpu, sd) {
2174 if (sd->flags & SD_WAKE_AFFINE) {
2175 int best_cpu;
2176
2177 /*
2178 * If possible, preempting this_cpu is
2179 * cheaper than migrating.
2180 */
2181 if (this_cpu != -1 &&
2182 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
2183 rcu_read_unlock();
2184 return this_cpu;
2185 }
2186
2187 best_cpu = cpumask_any_and_distribute(later_mask,
2188 sched_domain_span(sd));
2189 /*
2190 * Last chance: if a CPU being in both later_mask
2191 * and current sd span is valid, that becomes our
2192 * choice. Of course, the latest possible CPU is
2193 * already under consideration through later_mask.
2194 */
2195 if (best_cpu < nr_cpu_ids) {
2196 rcu_read_unlock();
2197 return best_cpu;
2198 }
2199 }
2200 }
2201 rcu_read_unlock();
2202
2203 /*
2204 * At this point, all our guesses failed, we just return
2205 * 'something', and let the caller sort the things out.
2206 */
2207 if (this_cpu != -1)
2208 return this_cpu;
2209
2210 cpu = cpumask_any_distribute(later_mask);
2211 if (cpu < nr_cpu_ids)
2212 return cpu;
2213
2214 return -1;
2215 }
2216
2217 /* Locks the rq it finds */
find_lock_later_rq(struct task_struct * task,struct rq * rq)2218 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
2219 {
2220 struct rq *later_rq = NULL;
2221 int tries;
2222 int cpu;
2223
2224 for (tries = 0; tries < DL_MAX_TRIES; tries++) {
2225 cpu = find_later_rq(task);
2226
2227 if ((cpu == -1) || (cpu == rq->cpu))
2228 break;
2229
2230 later_rq = cpu_rq(cpu);
2231
2232 if (!dl_task_is_earliest_deadline(task, later_rq)) {
2233 /*
2234 * Target rq has tasks of equal or earlier deadline,
2235 * retrying does not release any lock and is unlikely
2236 * to yield a different result.
2237 */
2238 later_rq = NULL;
2239 break;
2240 }
2241
2242 /* Retry if something changed. */
2243 if (double_lock_balance(rq, later_rq)) {
2244 if (unlikely(task_rq(task) != rq ||
2245 !cpumask_test_cpu(later_rq->cpu, &task->cpus_mask) ||
2246 task_on_cpu(rq, task) ||
2247 !dl_task(task) ||
2248 is_migration_disabled(task) ||
2249 !task_on_rq_queued(task))) {
2250 double_unlock_balance(rq, later_rq);
2251 later_rq = NULL;
2252 break;
2253 }
2254 }
2255
2256 /*
2257 * If the rq we found has no -deadline task, or
2258 * its earliest one has a later deadline than our
2259 * task, the rq is a good one.
2260 */
2261 if (dl_task_is_earliest_deadline(task, later_rq))
2262 break;
2263
2264 /* Otherwise we try again. */
2265 double_unlock_balance(rq, later_rq);
2266 later_rq = NULL;
2267 }
2268
2269 return later_rq;
2270 }
2271
pick_next_pushable_dl_task(struct rq * rq)2272 static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
2273 {
2274 struct task_struct *p;
2275
2276 if (!has_pushable_dl_tasks(rq))
2277 return NULL;
2278
2279 p = __node_2_pdl(rb_first_cached(&rq->dl.pushable_dl_tasks_root));
2280
2281 WARN_ON_ONCE(rq->cpu != task_cpu(p));
2282 WARN_ON_ONCE(task_current(rq, p));
2283 WARN_ON_ONCE(p->nr_cpus_allowed <= 1);
2284
2285 WARN_ON_ONCE(!task_on_rq_queued(p));
2286 WARN_ON_ONCE(!dl_task(p));
2287
2288 return p;
2289 }
2290
2291 /*
2292 * See if the non running -deadline tasks on this rq
2293 * can be sent to some other CPU where they can preempt
2294 * and start executing.
2295 */
push_dl_task(struct rq * rq)2296 static int push_dl_task(struct rq *rq)
2297 {
2298 struct task_struct *next_task;
2299 struct rq *later_rq;
2300 int ret = 0;
2301
2302 if (!rq->dl.overloaded)
2303 return 0;
2304
2305 next_task = pick_next_pushable_dl_task(rq);
2306 if (!next_task)
2307 return 0;
2308
2309 retry:
2310 /*
2311 * If next_task preempts rq->curr, and rq->curr
2312 * can move away, it makes sense to just reschedule
2313 * without going further in pushing next_task.
2314 */
2315 if (dl_task(rq->curr) &&
2316 dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
2317 rq->curr->nr_cpus_allowed > 1) {
2318 resched_curr(rq);
2319 return 0;
2320 }
2321
2322 if (is_migration_disabled(next_task))
2323 return 0;
2324
2325 if (WARN_ON(next_task == rq->curr))
2326 return 0;
2327
2328 /* We might release rq lock */
2329 get_task_struct(next_task);
2330
2331 /* Will lock the rq it'll find */
2332 later_rq = find_lock_later_rq(next_task, rq);
2333 if (!later_rq) {
2334 struct task_struct *task;
2335
2336 /*
2337 * We must check all this again, since
2338 * find_lock_later_rq releases rq->lock and it is
2339 * then possible that next_task has migrated.
2340 */
2341 task = pick_next_pushable_dl_task(rq);
2342 if (task == next_task) {
2343 /*
2344 * The task is still there. We don't try
2345 * again, some other CPU will pull it when ready.
2346 */
2347 goto out;
2348 }
2349
2350 if (!task)
2351 /* No more tasks */
2352 goto out;
2353
2354 put_task_struct(next_task);
2355 next_task = task;
2356 goto retry;
2357 }
2358
2359 deactivate_task(rq, next_task, 0);
2360 set_task_cpu(next_task, later_rq->cpu);
2361 activate_task(later_rq, next_task, 0);
2362 ret = 1;
2363
2364 resched_curr(later_rq);
2365
2366 double_unlock_balance(rq, later_rq);
2367
2368 out:
2369 put_task_struct(next_task);
2370
2371 return ret;
2372 }
2373
push_dl_tasks(struct rq * rq)2374 static void push_dl_tasks(struct rq *rq)
2375 {
2376 /* push_dl_task() will return true if it moved a -deadline task */
2377 while (push_dl_task(rq))
2378 ;
2379 }
2380
pull_dl_task(struct rq * this_rq)2381 static void pull_dl_task(struct rq *this_rq)
2382 {
2383 int this_cpu = this_rq->cpu, cpu;
2384 struct task_struct *p, *push_task;
2385 bool resched = false;
2386 struct rq *src_rq;
2387 u64 dmin = LONG_MAX;
2388
2389 if (likely(!dl_overloaded(this_rq)))
2390 return;
2391
2392 /*
2393 * Match the barrier from dl_set_overloaded; this guarantees that if we
2394 * see overloaded we must also see the dlo_mask bit.
2395 */
2396 smp_rmb();
2397
2398 for_each_cpu(cpu, this_rq->rd->dlo_mask) {
2399 if (this_cpu == cpu)
2400 continue;
2401
2402 src_rq = cpu_rq(cpu);
2403
2404 /*
2405 * It looks racy, abd it is! However, as in sched_rt.c,
2406 * we are fine with this.
2407 */
2408 if (this_rq->dl.dl_nr_running &&
2409 dl_time_before(this_rq->dl.earliest_dl.curr,
2410 src_rq->dl.earliest_dl.next))
2411 continue;
2412
2413 /* Might drop this_rq->lock */
2414 push_task = NULL;
2415 double_lock_balance(this_rq, src_rq);
2416
2417 /*
2418 * If there are no more pullable tasks on the
2419 * rq, we're done with it.
2420 */
2421 if (src_rq->dl.dl_nr_running <= 1)
2422 goto skip;
2423
2424 p = pick_earliest_pushable_dl_task(src_rq, this_cpu);
2425
2426 /*
2427 * We found a task to be pulled if:
2428 * - it preempts our current (if there's one),
2429 * - it will preempt the last one we pulled (if any).
2430 */
2431 if (p && dl_time_before(p->dl.deadline, dmin) &&
2432 dl_task_is_earliest_deadline(p, this_rq)) {
2433 WARN_ON(p == src_rq->curr);
2434 WARN_ON(!task_on_rq_queued(p));
2435
2436 /*
2437 * Then we pull iff p has actually an earlier
2438 * deadline than the current task of its runqueue.
2439 */
2440 if (dl_time_before(p->dl.deadline,
2441 src_rq->curr->dl.deadline))
2442 goto skip;
2443
2444 if (is_migration_disabled(p)) {
2445 push_task = get_push_task(src_rq);
2446 } else {
2447 deactivate_task(src_rq, p, 0);
2448 set_task_cpu(p, this_cpu);
2449 activate_task(this_rq, p, 0);
2450 dmin = p->dl.deadline;
2451 resched = true;
2452 }
2453
2454 /* Is there any other task even earlier? */
2455 }
2456 skip:
2457 double_unlock_balance(this_rq, src_rq);
2458
2459 if (push_task) {
2460 preempt_disable();
2461 raw_spin_rq_unlock(this_rq);
2462 stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop,
2463 push_task, &src_rq->push_work);
2464 preempt_enable();
2465 raw_spin_rq_lock(this_rq);
2466 }
2467 }
2468
2469 if (resched)
2470 resched_curr(this_rq);
2471 }
2472
2473 /*
2474 * Since the task is not running and a reschedule is not going to happen
2475 * anytime soon on its runqueue, we try pushing it away now.
2476 */
task_woken_dl(struct rq * rq,struct task_struct * p)2477 static void task_woken_dl(struct rq *rq, struct task_struct *p)
2478 {
2479 if (!task_on_cpu(rq, p) &&
2480 !test_tsk_need_resched(rq->curr) &&
2481 p->nr_cpus_allowed > 1 &&
2482 dl_task(rq->curr) &&
2483 (rq->curr->nr_cpus_allowed < 2 ||
2484 !dl_entity_preempt(&p->dl, &rq->curr->dl))) {
2485 push_dl_tasks(rq);
2486 }
2487 }
2488
set_cpus_allowed_dl(struct task_struct * p,struct affinity_context * ctx)2489 static void set_cpus_allowed_dl(struct task_struct *p,
2490 struct affinity_context *ctx)
2491 {
2492 struct root_domain *src_rd;
2493 struct rq *rq;
2494
2495 WARN_ON_ONCE(!dl_task(p));
2496
2497 rq = task_rq(p);
2498 src_rd = rq->rd;
2499 /*
2500 * Migrating a SCHED_DEADLINE task between exclusive
2501 * cpusets (different root_domains) entails a bandwidth
2502 * update. We already made space for us in the destination
2503 * domain (see cpuset_can_attach()).
2504 */
2505 if (!cpumask_intersects(src_rd->span, ctx->new_mask)) {
2506 struct dl_bw *src_dl_b;
2507
2508 src_dl_b = dl_bw_of(cpu_of(rq));
2509 /*
2510 * We now free resources of the root_domain we are migrating
2511 * off. In the worst case, sched_setattr() may temporary fail
2512 * until we complete the update.
2513 */
2514 raw_spin_lock(&src_dl_b->lock);
2515 __dl_sub(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
2516 raw_spin_unlock(&src_dl_b->lock);
2517 }
2518
2519 set_cpus_allowed_common(p, ctx);
2520 }
2521
2522 /* Assumes rq->lock is held */
rq_online_dl(struct rq * rq)2523 static void rq_online_dl(struct rq *rq)
2524 {
2525 if (rq->dl.overloaded)
2526 dl_set_overload(rq);
2527
2528 cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu);
2529 if (rq->dl.dl_nr_running > 0)
2530 cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr);
2531 }
2532
2533 /* Assumes rq->lock is held */
rq_offline_dl(struct rq * rq)2534 static void rq_offline_dl(struct rq *rq)
2535 {
2536 if (rq->dl.overloaded)
2537 dl_clear_overload(rq);
2538
2539 cpudl_clear(&rq->rd->cpudl, rq->cpu);
2540 cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu);
2541 }
2542
init_sched_dl_class(void)2543 void __init init_sched_dl_class(void)
2544 {
2545 unsigned int i;
2546
2547 for_each_possible_cpu(i)
2548 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i),
2549 GFP_KERNEL, cpu_to_node(i));
2550 }
2551
dl_add_task_root_domain(struct task_struct * p)2552 void dl_add_task_root_domain(struct task_struct *p)
2553 {
2554 struct rq_flags rf;
2555 struct rq *rq;
2556 struct dl_bw *dl_b;
2557
2558 raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
2559 if (!dl_task(p)) {
2560 raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
2561 return;
2562 }
2563
2564 rq = __task_rq_lock(p, &rf);
2565
2566 dl_b = &rq->rd->dl_bw;
2567 raw_spin_lock(&dl_b->lock);
2568
2569 __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
2570
2571 raw_spin_unlock(&dl_b->lock);
2572
2573 task_rq_unlock(rq, p, &rf);
2574 }
2575
dl_clear_root_domain(struct root_domain * rd)2576 void dl_clear_root_domain(struct root_domain *rd)
2577 {
2578 unsigned long flags;
2579
2580 raw_spin_lock_irqsave(&rd->dl_bw.lock, flags);
2581 rd->dl_bw.total_bw = 0;
2582 raw_spin_unlock_irqrestore(&rd->dl_bw.lock, flags);
2583 }
2584
2585 #endif /* CONFIG_SMP */
2586
switched_from_dl(struct rq * rq,struct task_struct * p)2587 static void switched_from_dl(struct rq *rq, struct task_struct *p)
2588 {
2589 /*
2590 * task_non_contending() can start the "inactive timer" (if the 0-lag
2591 * time is in the future). If the task switches back to dl before
2592 * the "inactive timer" fires, it can continue to consume its current
2593 * runtime using its current deadline. If it stays outside of
2594 * SCHED_DEADLINE until the 0-lag time passes, inactive_task_timer()
2595 * will reset the task parameters.
2596 */
2597 if (task_on_rq_queued(p) && p->dl.dl_runtime)
2598 task_non_contending(&p->dl);
2599
2600 /*
2601 * In case a task is setscheduled out from SCHED_DEADLINE we need to
2602 * keep track of that on its cpuset (for correct bandwidth tracking).
2603 */
2604 dec_dl_tasks_cs(p);
2605
2606 if (!task_on_rq_queued(p)) {
2607 /*
2608 * Inactive timer is armed. However, p is leaving DEADLINE and
2609 * might migrate away from this rq while continuing to run on
2610 * some other class. We need to remove its contribution from
2611 * this rq running_bw now, or sub_rq_bw (below) will complain.
2612 */
2613 if (p->dl.dl_non_contending)
2614 sub_running_bw(&p->dl, &rq->dl);
2615 sub_rq_bw(&p->dl, &rq->dl);
2616 }
2617
2618 /*
2619 * We cannot use inactive_task_timer() to invoke sub_running_bw()
2620 * at the 0-lag time, because the task could have been migrated
2621 * while SCHED_OTHER in the meanwhile.
2622 */
2623 if (p->dl.dl_non_contending)
2624 p->dl.dl_non_contending = 0;
2625
2626 /*
2627 * Since this might be the only -deadline task on the rq,
2628 * this is the right place to try to pull some other one
2629 * from an overloaded CPU, if any.
2630 */
2631 if (!task_on_rq_queued(p) || rq->dl.dl_nr_running)
2632 return;
2633
2634 deadline_queue_pull_task(rq);
2635 }
2636
2637 /*
2638 * When switching to -deadline, we may overload the rq, then
2639 * we try to push someone off, if possible.
2640 */
switched_to_dl(struct rq * rq,struct task_struct * p)2641 static void switched_to_dl(struct rq *rq, struct task_struct *p)
2642 {
2643 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
2644 put_task_struct(p);
2645
2646 /*
2647 * In case a task is setscheduled to SCHED_DEADLINE we need to keep
2648 * track of that on its cpuset (for correct bandwidth tracking).
2649 */
2650 inc_dl_tasks_cs(p);
2651
2652 /* If p is not queued we will update its parameters at next wakeup. */
2653 if (!task_on_rq_queued(p)) {
2654 add_rq_bw(&p->dl, &rq->dl);
2655
2656 return;
2657 }
2658
2659 if (rq->curr != p) {
2660 #ifdef CONFIG_SMP
2661 if (p->nr_cpus_allowed > 1 && rq->dl.overloaded)
2662 deadline_queue_push_tasks(rq);
2663 #endif
2664 if (dl_task(rq->curr))
2665 wakeup_preempt_dl(rq, p, 0);
2666 else
2667 resched_curr(rq);
2668 } else {
2669 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
2670 }
2671 }
2672
2673 /*
2674 * If the scheduling parameters of a -deadline task changed,
2675 * a push or pull operation might be needed.
2676 */
prio_changed_dl(struct rq * rq,struct task_struct * p,int oldprio)2677 static void prio_changed_dl(struct rq *rq, struct task_struct *p,
2678 int oldprio)
2679 {
2680 if (!task_on_rq_queued(p))
2681 return;
2682
2683 #ifdef CONFIG_SMP
2684 /*
2685 * This might be too much, but unfortunately
2686 * we don't have the old deadline value, and
2687 * we can't argue if the task is increasing
2688 * or lowering its prio, so...
2689 */
2690 if (!rq->dl.overloaded)
2691 deadline_queue_pull_task(rq);
2692
2693 if (task_current(rq, p)) {
2694 /*
2695 * If we now have a earlier deadline task than p,
2696 * then reschedule, provided p is still on this
2697 * runqueue.
2698 */
2699 if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline))
2700 resched_curr(rq);
2701 } else {
2702 /*
2703 * Current may not be deadline in case p was throttled but we
2704 * have just replenished it (e.g. rt_mutex_setprio()).
2705 *
2706 * Otherwise, if p was given an earlier deadline, reschedule.
2707 */
2708 if (!dl_task(rq->curr) ||
2709 dl_time_before(p->dl.deadline, rq->curr->dl.deadline))
2710 resched_curr(rq);
2711 }
2712 #else
2713 /*
2714 * We don't know if p has a earlier or later deadline, so let's blindly
2715 * set a (maybe not needed) rescheduling point.
2716 */
2717 resched_curr(rq);
2718 #endif
2719 }
2720
2721 #ifdef CONFIG_SCHED_CORE
task_is_throttled_dl(struct task_struct * p,int cpu)2722 static int task_is_throttled_dl(struct task_struct *p, int cpu)
2723 {
2724 return p->dl.dl_throttled;
2725 }
2726 #endif
2727
2728 DEFINE_SCHED_CLASS(dl) = {
2729
2730 .enqueue_task = enqueue_task_dl,
2731 .dequeue_task = dequeue_task_dl,
2732 .yield_task = yield_task_dl,
2733
2734 .wakeup_preempt = wakeup_preempt_dl,
2735
2736 .pick_next_task = pick_next_task_dl,
2737 .put_prev_task = put_prev_task_dl,
2738 .set_next_task = set_next_task_dl,
2739
2740 #ifdef CONFIG_SMP
2741 .balance = balance_dl,
2742 .pick_task = pick_task_dl,
2743 .select_task_rq = select_task_rq_dl,
2744 .migrate_task_rq = migrate_task_rq_dl,
2745 .set_cpus_allowed = set_cpus_allowed_dl,
2746 .rq_online = rq_online_dl,
2747 .rq_offline = rq_offline_dl,
2748 .task_woken = task_woken_dl,
2749 .find_lock_rq = find_lock_later_rq,
2750 #endif
2751
2752 .task_tick = task_tick_dl,
2753 .task_fork = task_fork_dl,
2754
2755 .prio_changed = prio_changed_dl,
2756 .switched_from = switched_from_dl,
2757 .switched_to = switched_to_dl,
2758
2759 .update_curr = update_curr_dl,
2760 #ifdef CONFIG_SCHED_CORE
2761 .task_is_throttled = task_is_throttled_dl,
2762 #endif
2763 };
2764
2765 /* Used for dl_bw check and update, used under sched_rt_handler()::mutex */
2766 static u64 dl_generation;
2767
sched_dl_global_validate(void)2768 int sched_dl_global_validate(void)
2769 {
2770 u64 runtime = global_rt_runtime();
2771 u64 period = global_rt_period();
2772 u64 new_bw = to_ratio(period, runtime);
2773 u64 gen = ++dl_generation;
2774 struct dl_bw *dl_b;
2775 int cpu, cpus, ret = 0;
2776 unsigned long flags;
2777
2778 /*
2779 * Here we want to check the bandwidth not being set to some
2780 * value smaller than the currently allocated bandwidth in
2781 * any of the root_domains.
2782 */
2783 for_each_possible_cpu(cpu) {
2784 rcu_read_lock_sched();
2785
2786 if (dl_bw_visited(cpu, gen))
2787 goto next;
2788
2789 dl_b = dl_bw_of(cpu);
2790 cpus = dl_bw_cpus(cpu);
2791
2792 raw_spin_lock_irqsave(&dl_b->lock, flags);
2793 if (new_bw * cpus < dl_b->total_bw)
2794 ret = -EBUSY;
2795 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2796
2797 next:
2798 rcu_read_unlock_sched();
2799
2800 if (ret)
2801 break;
2802 }
2803
2804 return ret;
2805 }
2806
init_dl_rq_bw_ratio(struct dl_rq * dl_rq)2807 static void init_dl_rq_bw_ratio(struct dl_rq *dl_rq)
2808 {
2809 if (global_rt_runtime() == RUNTIME_INF) {
2810 dl_rq->bw_ratio = 1 << RATIO_SHIFT;
2811 dl_rq->max_bw = dl_rq->extra_bw = 1 << BW_SHIFT;
2812 } else {
2813 dl_rq->bw_ratio = to_ratio(global_rt_runtime(),
2814 global_rt_period()) >> (BW_SHIFT - RATIO_SHIFT);
2815 dl_rq->max_bw = dl_rq->extra_bw =
2816 to_ratio(global_rt_period(), global_rt_runtime());
2817 }
2818 }
2819
sched_dl_do_global(void)2820 void sched_dl_do_global(void)
2821 {
2822 u64 new_bw = -1;
2823 u64 gen = ++dl_generation;
2824 struct dl_bw *dl_b;
2825 int cpu;
2826 unsigned long flags;
2827
2828 if (global_rt_runtime() != RUNTIME_INF)
2829 new_bw = to_ratio(global_rt_period(), global_rt_runtime());
2830
2831 for_each_possible_cpu(cpu) {
2832 rcu_read_lock_sched();
2833
2834 if (dl_bw_visited(cpu, gen)) {
2835 rcu_read_unlock_sched();
2836 continue;
2837 }
2838
2839 dl_b = dl_bw_of(cpu);
2840
2841 raw_spin_lock_irqsave(&dl_b->lock, flags);
2842 dl_b->bw = new_bw;
2843 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2844
2845 rcu_read_unlock_sched();
2846 init_dl_rq_bw_ratio(&cpu_rq(cpu)->dl);
2847 }
2848 }
2849
2850 /*
2851 * We must be sure that accepting a new task (or allowing changing the
2852 * parameters of an existing one) is consistent with the bandwidth
2853 * constraints. If yes, this function also accordingly updates the currently
2854 * allocated bandwidth to reflect the new situation.
2855 *
2856 * This function is called while holding p's rq->lock.
2857 */
sched_dl_overflow(struct task_struct * p,int policy,const struct sched_attr * attr)2858 int sched_dl_overflow(struct task_struct *p, int policy,
2859 const struct sched_attr *attr)
2860 {
2861 u64 period = attr->sched_period ?: attr->sched_deadline;
2862 u64 runtime = attr->sched_runtime;
2863 u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0;
2864 int cpus, err = -1, cpu = task_cpu(p);
2865 struct dl_bw *dl_b = dl_bw_of(cpu);
2866 unsigned long cap;
2867
2868 if (attr->sched_flags & SCHED_FLAG_SUGOV)
2869 return 0;
2870
2871 /* !deadline task may carry old deadline bandwidth */
2872 if (new_bw == p->dl.dl_bw && task_has_dl_policy(p))
2873 return 0;
2874
2875 /*
2876 * Either if a task, enters, leave, or stays -deadline but changes
2877 * its parameters, we may need to update accordingly the total
2878 * allocated bandwidth of the container.
2879 */
2880 raw_spin_lock(&dl_b->lock);
2881 cpus = dl_bw_cpus(cpu);
2882 cap = dl_bw_capacity(cpu);
2883
2884 if (dl_policy(policy) && !task_has_dl_policy(p) &&
2885 !__dl_overflow(dl_b, cap, 0, new_bw)) {
2886 if (hrtimer_active(&p->dl.inactive_timer))
2887 __dl_sub(dl_b, p->dl.dl_bw, cpus);
2888 __dl_add(dl_b, new_bw, cpus);
2889 err = 0;
2890 } else if (dl_policy(policy) && task_has_dl_policy(p) &&
2891 !__dl_overflow(dl_b, cap, p->dl.dl_bw, new_bw)) {
2892 /*
2893 * XXX this is slightly incorrect: when the task
2894 * utilization decreases, we should delay the total
2895 * utilization change until the task's 0-lag point.
2896 * But this would require to set the task's "inactive
2897 * timer" when the task is not inactive.
2898 */
2899 __dl_sub(dl_b, p->dl.dl_bw, cpus);
2900 __dl_add(dl_b, new_bw, cpus);
2901 dl_change_utilization(p, new_bw);
2902 err = 0;
2903 } else if (!dl_policy(policy) && task_has_dl_policy(p)) {
2904 /*
2905 * Do not decrease the total deadline utilization here,
2906 * switched_from_dl() will take care to do it at the correct
2907 * (0-lag) time.
2908 */
2909 err = 0;
2910 }
2911 raw_spin_unlock(&dl_b->lock);
2912
2913 return err;
2914 }
2915
2916 /*
2917 * This function initializes the sched_dl_entity of a newly becoming
2918 * SCHED_DEADLINE task.
2919 *
2920 * Only the static values are considered here, the actual runtime and the
2921 * absolute deadline will be properly calculated when the task is enqueued
2922 * for the first time with its new policy.
2923 */
__setparam_dl(struct task_struct * p,const struct sched_attr * attr)2924 void __setparam_dl(struct task_struct *p, const struct sched_attr *attr)
2925 {
2926 struct sched_dl_entity *dl_se = &p->dl;
2927
2928 dl_se->dl_runtime = attr->sched_runtime;
2929 dl_se->dl_deadline = attr->sched_deadline;
2930 dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
2931 dl_se->flags = attr->sched_flags & SCHED_DL_FLAGS;
2932 dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
2933 dl_se->dl_density = to_ratio(dl_se->dl_deadline, dl_se->dl_runtime);
2934 }
2935
__getparam_dl(struct task_struct * p,struct sched_attr * attr)2936 void __getparam_dl(struct task_struct *p, struct sched_attr *attr)
2937 {
2938 struct sched_dl_entity *dl_se = &p->dl;
2939
2940 attr->sched_priority = p->rt_priority;
2941 attr->sched_runtime = dl_se->dl_runtime;
2942 attr->sched_deadline = dl_se->dl_deadline;
2943 attr->sched_period = dl_se->dl_period;
2944 attr->sched_flags &= ~SCHED_DL_FLAGS;
2945 attr->sched_flags |= dl_se->flags;
2946 }
2947
2948 /*
2949 * This function validates the new parameters of a -deadline task.
2950 * We ask for the deadline not being zero, and greater or equal
2951 * than the runtime, as well as the period of being zero or
2952 * greater than deadline. Furthermore, we have to be sure that
2953 * user parameters are above the internal resolution of 1us (we
2954 * check sched_runtime only since it is always the smaller one) and
2955 * below 2^63 ns (we have to check both sched_deadline and
2956 * sched_period, as the latter can be zero).
2957 */
__checkparam_dl(const struct sched_attr * attr)2958 bool __checkparam_dl(const struct sched_attr *attr)
2959 {
2960 u64 period, max, min;
2961
2962 /* special dl tasks don't actually use any parameter */
2963 if (attr->sched_flags & SCHED_FLAG_SUGOV)
2964 return true;
2965
2966 /* deadline != 0 */
2967 if (attr->sched_deadline == 0)
2968 return false;
2969
2970 /*
2971 * Since we truncate DL_SCALE bits, make sure we're at least
2972 * that big.
2973 */
2974 if (attr->sched_runtime < (1ULL << DL_SCALE))
2975 return false;
2976
2977 /*
2978 * Since we use the MSB for wrap-around and sign issues, make
2979 * sure it's not set (mind that period can be equal to zero).
2980 */
2981 if (attr->sched_deadline & (1ULL << 63) ||
2982 attr->sched_period & (1ULL << 63))
2983 return false;
2984
2985 period = attr->sched_period;
2986 if (!period)
2987 period = attr->sched_deadline;
2988
2989 /* runtime <= deadline <= period (if period != 0) */
2990 if (period < attr->sched_deadline ||
2991 attr->sched_deadline < attr->sched_runtime)
2992 return false;
2993
2994 max = (u64)READ_ONCE(sysctl_sched_dl_period_max) * NSEC_PER_USEC;
2995 min = (u64)READ_ONCE(sysctl_sched_dl_period_min) * NSEC_PER_USEC;
2996
2997 if (period < min || period > max)
2998 return false;
2999
3000 return true;
3001 }
3002
3003 /*
3004 * This function clears the sched_dl_entity static params.
3005 */
__dl_clear_params(struct sched_dl_entity * dl_se)3006 static void __dl_clear_params(struct sched_dl_entity *dl_se)
3007 {
3008 dl_se->dl_runtime = 0;
3009 dl_se->dl_deadline = 0;
3010 dl_se->dl_period = 0;
3011 dl_se->flags = 0;
3012 dl_se->dl_bw = 0;
3013 dl_se->dl_density = 0;
3014
3015 dl_se->dl_throttled = 0;
3016 dl_se->dl_yielded = 0;
3017 dl_se->dl_non_contending = 0;
3018 dl_se->dl_overrun = 0;
3019
3020 #ifdef CONFIG_RT_MUTEXES
3021 dl_se->pi_se = dl_se;
3022 #endif
3023 }
3024
init_dl_entity(struct sched_dl_entity * dl_se)3025 void init_dl_entity(struct sched_dl_entity *dl_se)
3026 {
3027 RB_CLEAR_NODE(&dl_se->rb_node);
3028 init_dl_task_timer(dl_se);
3029 init_dl_inactive_task_timer(dl_se);
3030 __dl_clear_params(dl_se);
3031 }
3032
dl_param_changed(struct task_struct * p,const struct sched_attr * attr)3033 bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr)
3034 {
3035 struct sched_dl_entity *dl_se = &p->dl;
3036
3037 if (dl_se->dl_runtime != attr->sched_runtime ||
3038 dl_se->dl_deadline != attr->sched_deadline ||
3039 dl_se->dl_period != attr->sched_period ||
3040 dl_se->flags != (attr->sched_flags & SCHED_DL_FLAGS))
3041 return true;
3042
3043 return false;
3044 }
3045
3046 #ifdef CONFIG_SMP
dl_cpuset_cpumask_can_shrink(const struct cpumask * cur,const struct cpumask * trial)3047 int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur,
3048 const struct cpumask *trial)
3049 {
3050 unsigned long flags, cap;
3051 struct dl_bw *cur_dl_b;
3052 int ret = 1;
3053
3054 rcu_read_lock_sched();
3055 cur_dl_b = dl_bw_of(cpumask_any(cur));
3056 cap = __dl_bw_capacity(trial);
3057 raw_spin_lock_irqsave(&cur_dl_b->lock, flags);
3058 if (__dl_overflow(cur_dl_b, cap, 0, 0))
3059 ret = 0;
3060 raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags);
3061 rcu_read_unlock_sched();
3062
3063 return ret;
3064 }
3065
3066 enum dl_bw_request {
3067 dl_bw_req_check_overflow = 0,
3068 dl_bw_req_alloc,
3069 dl_bw_req_free
3070 };
3071
dl_bw_manage(enum dl_bw_request req,int cpu,u64 dl_bw)3072 static int dl_bw_manage(enum dl_bw_request req, int cpu, u64 dl_bw)
3073 {
3074 unsigned long flags;
3075 struct dl_bw *dl_b;
3076 bool overflow = 0;
3077
3078 rcu_read_lock_sched();
3079 dl_b = dl_bw_of(cpu);
3080 raw_spin_lock_irqsave(&dl_b->lock, flags);
3081
3082 if (req == dl_bw_req_free) {
3083 __dl_sub(dl_b, dl_bw, dl_bw_cpus(cpu));
3084 } else {
3085 unsigned long cap = dl_bw_capacity(cpu);
3086
3087 overflow = __dl_overflow(dl_b, cap, 0, dl_bw);
3088
3089 if (req == dl_bw_req_alloc && !overflow) {
3090 /*
3091 * We reserve space in the destination
3092 * root_domain, as we can't fail after this point.
3093 * We will free resources in the source root_domain
3094 * later on (see set_cpus_allowed_dl()).
3095 */
3096 __dl_add(dl_b, dl_bw, dl_bw_cpus(cpu));
3097 }
3098 }
3099
3100 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
3101 rcu_read_unlock_sched();
3102
3103 return overflow ? -EBUSY : 0;
3104 }
3105
dl_bw_check_overflow(int cpu)3106 int dl_bw_check_overflow(int cpu)
3107 {
3108 return dl_bw_manage(dl_bw_req_check_overflow, cpu, 0);
3109 }
3110
dl_bw_alloc(int cpu,u64 dl_bw)3111 int dl_bw_alloc(int cpu, u64 dl_bw)
3112 {
3113 return dl_bw_manage(dl_bw_req_alloc, cpu, dl_bw);
3114 }
3115
dl_bw_free(int cpu,u64 dl_bw)3116 void dl_bw_free(int cpu, u64 dl_bw)
3117 {
3118 dl_bw_manage(dl_bw_req_free, cpu, dl_bw);
3119 }
3120 #endif
3121
3122 #ifdef CONFIG_SCHED_DEBUG
print_dl_stats(struct seq_file * m,int cpu)3123 void print_dl_stats(struct seq_file *m, int cpu)
3124 {
3125 print_dl_rq(m, cpu, &cpu_rq(cpu)->dl);
3126 }
3127 #endif /* CONFIG_SCHED_DEBUG */
3128