1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Deadline Scheduling Class (SCHED_DEADLINE) 4 * 5 * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS). 6 * 7 * Tasks that periodically executes their instances for less than their 8 * runtime won't miss any of their deadlines. 9 * Tasks that are not periodic or sporadic or that tries to execute more 10 * than their reserved bandwidth will be slowed down (and may potentially 11 * miss some of their deadlines), and won't affect any other task. 12 * 13 * Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>, 14 * Juri Lelli <juri.lelli@gmail.com>, 15 * Michael Trimarchi <michael@amarulasolutions.com>, 16 * Fabio Checconi <fchecconi@gmail.com> 17 */ 18 #include "sched.h" 19 #include "pelt.h" 20 21 struct dl_bandwidth def_dl_bandwidth; 22 23 static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se) 24 { 25 return container_of(dl_se, struct task_struct, dl); 26 } 27 28 static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq) 29 { 30 return container_of(dl_rq, struct rq, dl); 31 } 32 33 static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se) 34 { 35 struct task_struct *p = dl_task_of(dl_se); 36 struct rq *rq = task_rq(p); 37 38 return &rq->dl; 39 } 40 41 static inline int on_dl_rq(struct sched_dl_entity *dl_se) 42 { 43 return !RB_EMPTY_NODE(&dl_se->rb_node); 44 } 45 46 #ifdef CONFIG_RT_MUTEXES 47 static inline struct sched_dl_entity *pi_of(struct sched_dl_entity *dl_se) 48 { 49 return dl_se->pi_se; 50 } 51 52 static inline bool is_dl_boosted(struct sched_dl_entity *dl_se) 53 { 54 return pi_of(dl_se) != dl_se; 55 } 56 #else 57 static inline struct sched_dl_entity *pi_of(struct sched_dl_entity *dl_se) 58 { 59 return dl_se; 60 } 61 62 static inline bool is_dl_boosted(struct sched_dl_entity *dl_se) 63 { 64 return false; 65 } 66 #endif 67 68 #ifdef CONFIG_SMP 69 static inline struct dl_bw *dl_bw_of(int i) 70 { 71 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(), 72 "sched RCU must be held"); 73 return &cpu_rq(i)->rd->dl_bw; 74 } 75 76 static inline int dl_bw_cpus(int i) 77 { 78 struct root_domain *rd = cpu_rq(i)->rd; 79 int cpus; 80 81 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(), 82 "sched RCU must be held"); 83 84 if (cpumask_subset(rd->span, cpu_active_mask)) 85 return cpumask_weight(rd->span); 86 87 cpus = 0; 88 89 for_each_cpu_and(i, rd->span, cpu_active_mask) 90 cpus++; 91 92 return cpus; 93 } 94 95 static inline unsigned long __dl_bw_capacity(int i) 96 { 97 struct root_domain *rd = cpu_rq(i)->rd; 98 unsigned long cap = 0; 99 100 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(), 101 "sched RCU must be held"); 102 103 for_each_cpu_and(i, rd->span, cpu_active_mask) 104 cap += capacity_orig_of(i); 105 106 return cap; 107 } 108 109 /* 110 * XXX Fix: If 'rq->rd == def_root_domain' perform AC against capacity 111 * of the CPU the task is running on rather rd's \Sum CPU capacity. 112 */ 113 static inline unsigned long dl_bw_capacity(int i) 114 { 115 if (!static_branch_unlikely(&sched_asym_cpucapacity) && 116 capacity_orig_of(i) == SCHED_CAPACITY_SCALE) { 117 return dl_bw_cpus(i) << SCHED_CAPACITY_SHIFT; 118 } else { 119 return __dl_bw_capacity(i); 120 } 121 } 122 123 static inline bool dl_bw_visited(int cpu, u64 gen) 124 { 125 struct root_domain *rd = cpu_rq(cpu)->rd; 126 127 if (rd->visit_gen == gen) 128 return true; 129 130 rd->visit_gen = gen; 131 return false; 132 } 133 #else 134 static inline struct dl_bw *dl_bw_of(int i) 135 { 136 return &cpu_rq(i)->dl.dl_bw; 137 } 138 139 static inline int dl_bw_cpus(int i) 140 { 141 return 1; 142 } 143 144 static inline unsigned long dl_bw_capacity(int i) 145 { 146 return SCHED_CAPACITY_SCALE; 147 } 148 149 static inline bool dl_bw_visited(int cpu, u64 gen) 150 { 151 return false; 152 } 153 #endif 154 155 static inline 156 void __add_running_bw(u64 dl_bw, struct dl_rq *dl_rq) 157 { 158 u64 old = dl_rq->running_bw; 159 160 lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock); 161 dl_rq->running_bw += dl_bw; 162 SCHED_WARN_ON(dl_rq->running_bw < old); /* overflow */ 163 SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw); 164 /* kick cpufreq (see the comment in kernel/sched/sched.h). */ 165 cpufreq_update_util(rq_of_dl_rq(dl_rq), 0); 166 } 167 168 static inline 169 void __sub_running_bw(u64 dl_bw, struct dl_rq *dl_rq) 170 { 171 u64 old = dl_rq->running_bw; 172 173 lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock); 174 dl_rq->running_bw -= dl_bw; 175 SCHED_WARN_ON(dl_rq->running_bw > old); /* underflow */ 176 if (dl_rq->running_bw > old) 177 dl_rq->running_bw = 0; 178 /* kick cpufreq (see the comment in kernel/sched/sched.h). */ 179 cpufreq_update_util(rq_of_dl_rq(dl_rq), 0); 180 } 181 182 static inline 183 void __add_rq_bw(u64 dl_bw, struct dl_rq *dl_rq) 184 { 185 u64 old = dl_rq->this_bw; 186 187 lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock); 188 dl_rq->this_bw += dl_bw; 189 SCHED_WARN_ON(dl_rq->this_bw < old); /* overflow */ 190 } 191 192 static inline 193 void __sub_rq_bw(u64 dl_bw, struct dl_rq *dl_rq) 194 { 195 u64 old = dl_rq->this_bw; 196 197 lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock); 198 dl_rq->this_bw -= dl_bw; 199 SCHED_WARN_ON(dl_rq->this_bw > old); /* underflow */ 200 if (dl_rq->this_bw > old) 201 dl_rq->this_bw = 0; 202 SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw); 203 } 204 205 static inline 206 void add_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 207 { 208 if (!dl_entity_is_special(dl_se)) 209 __add_rq_bw(dl_se->dl_bw, dl_rq); 210 } 211 212 static inline 213 void sub_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 214 { 215 if (!dl_entity_is_special(dl_se)) 216 __sub_rq_bw(dl_se->dl_bw, dl_rq); 217 } 218 219 static inline 220 void add_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 221 { 222 if (!dl_entity_is_special(dl_se)) 223 __add_running_bw(dl_se->dl_bw, dl_rq); 224 } 225 226 static inline 227 void sub_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 228 { 229 if (!dl_entity_is_special(dl_se)) 230 __sub_running_bw(dl_se->dl_bw, dl_rq); 231 } 232 233 static void dl_change_utilization(struct task_struct *p, u64 new_bw) 234 { 235 struct rq *rq; 236 237 BUG_ON(p->dl.flags & SCHED_FLAG_SUGOV); 238 239 if (task_on_rq_queued(p)) 240 return; 241 242 rq = task_rq(p); 243 if (p->dl.dl_non_contending) { 244 sub_running_bw(&p->dl, &rq->dl); 245 p->dl.dl_non_contending = 0; 246 /* 247 * If the timer handler is currently running and the 248 * timer cannot be cancelled, inactive_task_timer() 249 * will see that dl_not_contending is not set, and 250 * will not touch the rq's active utilization, 251 * so we are still safe. 252 */ 253 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1) 254 put_task_struct(p); 255 } 256 __sub_rq_bw(p->dl.dl_bw, &rq->dl); 257 __add_rq_bw(new_bw, &rq->dl); 258 } 259 260 /* 261 * The utilization of a task cannot be immediately removed from 262 * the rq active utilization (running_bw) when the task blocks. 263 * Instead, we have to wait for the so called "0-lag time". 264 * 265 * If a task blocks before the "0-lag time", a timer (the inactive 266 * timer) is armed, and running_bw is decreased when the timer 267 * fires. 268 * 269 * If the task wakes up again before the inactive timer fires, 270 * the timer is cancelled, whereas if the task wakes up after the 271 * inactive timer fired (and running_bw has been decreased) the 272 * task's utilization has to be added to running_bw again. 273 * A flag in the deadline scheduling entity (dl_non_contending) 274 * is used to avoid race conditions between the inactive timer handler 275 * and task wakeups. 276 * 277 * The following diagram shows how running_bw is updated. A task is 278 * "ACTIVE" when its utilization contributes to running_bw; an 279 * "ACTIVE contending" task is in the TASK_RUNNING state, while an 280 * "ACTIVE non contending" task is a blocked task for which the "0-lag time" 281 * has not passed yet. An "INACTIVE" task is a task for which the "0-lag" 282 * time already passed, which does not contribute to running_bw anymore. 283 * +------------------+ 284 * wakeup | ACTIVE | 285 * +------------------>+ contending | 286 * | add_running_bw | | 287 * | +----+------+------+ 288 * | | ^ 289 * | dequeue | | 290 * +--------+-------+ | | 291 * | | t >= 0-lag | | wakeup 292 * | INACTIVE |<---------------+ | 293 * | | sub_running_bw | | 294 * +--------+-------+ | | 295 * ^ | | 296 * | t < 0-lag | | 297 * | | | 298 * | V | 299 * | +----+------+------+ 300 * | sub_running_bw | ACTIVE | 301 * +-------------------+ | 302 * inactive timer | non contending | 303 * fired +------------------+ 304 * 305 * The task_non_contending() function is invoked when a task 306 * blocks, and checks if the 0-lag time already passed or 307 * not (in the first case, it directly updates running_bw; 308 * in the second case, it arms the inactive timer). 309 * 310 * The task_contending() function is invoked when a task wakes 311 * up, and checks if the task is still in the "ACTIVE non contending" 312 * state or not (in the second case, it updates running_bw). 313 */ 314 static void task_non_contending(struct task_struct *p) 315 { 316 struct sched_dl_entity *dl_se = &p->dl; 317 struct hrtimer *timer = &dl_se->inactive_timer; 318 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 319 struct rq *rq = rq_of_dl_rq(dl_rq); 320 s64 zerolag_time; 321 322 /* 323 * If this is a non-deadline task that has been boosted, 324 * do nothing 325 */ 326 if (dl_se->dl_runtime == 0) 327 return; 328 329 if (dl_entity_is_special(dl_se)) 330 return; 331 332 WARN_ON(dl_se->dl_non_contending); 333 334 zerolag_time = dl_se->deadline - 335 div64_long((dl_se->runtime * dl_se->dl_period), 336 dl_se->dl_runtime); 337 338 /* 339 * Using relative times instead of the absolute "0-lag time" 340 * allows to simplify the code 341 */ 342 zerolag_time -= rq_clock(rq); 343 344 /* 345 * If the "0-lag time" already passed, decrease the active 346 * utilization now, instead of starting a timer 347 */ 348 if ((zerolag_time < 0) || hrtimer_active(&dl_se->inactive_timer)) { 349 if (dl_task(p)) 350 sub_running_bw(dl_se, dl_rq); 351 if (!dl_task(p) || p->state == TASK_DEAD) { 352 struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); 353 354 if (p->state == TASK_DEAD) 355 sub_rq_bw(&p->dl, &rq->dl); 356 raw_spin_lock(&dl_b->lock); 357 __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p))); 358 __dl_clear_params(p); 359 raw_spin_unlock(&dl_b->lock); 360 } 361 362 return; 363 } 364 365 dl_se->dl_non_contending = 1; 366 get_task_struct(p); 367 hrtimer_start(timer, ns_to_ktime(zerolag_time), HRTIMER_MODE_REL_HARD); 368 } 369 370 static void task_contending(struct sched_dl_entity *dl_se, int flags) 371 { 372 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 373 374 /* 375 * If this is a non-deadline task that has been boosted, 376 * do nothing 377 */ 378 if (dl_se->dl_runtime == 0) 379 return; 380 381 if (flags & ENQUEUE_MIGRATED) 382 add_rq_bw(dl_se, dl_rq); 383 384 if (dl_se->dl_non_contending) { 385 dl_se->dl_non_contending = 0; 386 /* 387 * If the timer handler is currently running and the 388 * timer cannot be cancelled, inactive_task_timer() 389 * will see that dl_not_contending is not set, and 390 * will not touch the rq's active utilization, 391 * so we are still safe. 392 */ 393 if (hrtimer_try_to_cancel(&dl_se->inactive_timer) == 1) 394 put_task_struct(dl_task_of(dl_se)); 395 } else { 396 /* 397 * Since "dl_non_contending" is not set, the 398 * task's utilization has already been removed from 399 * active utilization (either when the task blocked, 400 * when the "inactive timer" fired). 401 * So, add it back. 402 */ 403 add_running_bw(dl_se, dl_rq); 404 } 405 } 406 407 static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq) 408 { 409 struct sched_dl_entity *dl_se = &p->dl; 410 411 return dl_rq->root.rb_leftmost == &dl_se->rb_node; 412 } 413 414 static void init_dl_rq_bw_ratio(struct dl_rq *dl_rq); 415 416 void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime) 417 { 418 raw_spin_lock_init(&dl_b->dl_runtime_lock); 419 dl_b->dl_period = period; 420 dl_b->dl_runtime = runtime; 421 } 422 423 void init_dl_bw(struct dl_bw *dl_b) 424 { 425 raw_spin_lock_init(&dl_b->lock); 426 raw_spin_lock(&def_dl_bandwidth.dl_runtime_lock); 427 if (global_rt_runtime() == RUNTIME_INF) 428 dl_b->bw = -1; 429 else 430 dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime()); 431 raw_spin_unlock(&def_dl_bandwidth.dl_runtime_lock); 432 dl_b->total_bw = 0; 433 } 434 435 void init_dl_rq(struct dl_rq *dl_rq) 436 { 437 dl_rq->root = RB_ROOT_CACHED; 438 439 #ifdef CONFIG_SMP 440 /* zero means no -deadline tasks */ 441 dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0; 442 443 dl_rq->dl_nr_migratory = 0; 444 dl_rq->overloaded = 0; 445 dl_rq->pushable_dl_tasks_root = RB_ROOT_CACHED; 446 #else 447 init_dl_bw(&dl_rq->dl_bw); 448 #endif 449 450 dl_rq->running_bw = 0; 451 dl_rq->this_bw = 0; 452 init_dl_rq_bw_ratio(dl_rq); 453 } 454 455 #ifdef CONFIG_SMP 456 457 static inline int dl_overloaded(struct rq *rq) 458 { 459 return atomic_read(&rq->rd->dlo_count); 460 } 461 462 static inline void dl_set_overload(struct rq *rq) 463 { 464 if (!rq->online) 465 return; 466 467 cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask); 468 /* 469 * Must be visible before the overload count is 470 * set (as in sched_rt.c). 471 * 472 * Matched by the barrier in pull_dl_task(). 473 */ 474 smp_wmb(); 475 atomic_inc(&rq->rd->dlo_count); 476 } 477 478 static inline void dl_clear_overload(struct rq *rq) 479 { 480 if (!rq->online) 481 return; 482 483 atomic_dec(&rq->rd->dlo_count); 484 cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask); 485 } 486 487 static void update_dl_migration(struct dl_rq *dl_rq) 488 { 489 if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) { 490 if (!dl_rq->overloaded) { 491 dl_set_overload(rq_of_dl_rq(dl_rq)); 492 dl_rq->overloaded = 1; 493 } 494 } else if (dl_rq->overloaded) { 495 dl_clear_overload(rq_of_dl_rq(dl_rq)); 496 dl_rq->overloaded = 0; 497 } 498 } 499 500 static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 501 { 502 struct task_struct *p = dl_task_of(dl_se); 503 504 if (p->nr_cpus_allowed > 1) 505 dl_rq->dl_nr_migratory++; 506 507 update_dl_migration(dl_rq); 508 } 509 510 static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 511 { 512 struct task_struct *p = dl_task_of(dl_se); 513 514 if (p->nr_cpus_allowed > 1) 515 dl_rq->dl_nr_migratory--; 516 517 update_dl_migration(dl_rq); 518 } 519 520 /* 521 * The list of pushable -deadline task is not a plist, like in 522 * sched_rt.c, it is an rb-tree with tasks ordered by deadline. 523 */ 524 static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p) 525 { 526 struct dl_rq *dl_rq = &rq->dl; 527 struct rb_node **link = &dl_rq->pushable_dl_tasks_root.rb_root.rb_node; 528 struct rb_node *parent = NULL; 529 struct task_struct *entry; 530 bool leftmost = true; 531 532 BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks)); 533 534 while (*link) { 535 parent = *link; 536 entry = rb_entry(parent, struct task_struct, 537 pushable_dl_tasks); 538 if (dl_entity_preempt(&p->dl, &entry->dl)) 539 link = &parent->rb_left; 540 else { 541 link = &parent->rb_right; 542 leftmost = false; 543 } 544 } 545 546 if (leftmost) 547 dl_rq->earliest_dl.next = p->dl.deadline; 548 549 rb_link_node(&p->pushable_dl_tasks, parent, link); 550 rb_insert_color_cached(&p->pushable_dl_tasks, 551 &dl_rq->pushable_dl_tasks_root, leftmost); 552 } 553 554 static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p) 555 { 556 struct dl_rq *dl_rq = &rq->dl; 557 558 if (RB_EMPTY_NODE(&p->pushable_dl_tasks)) 559 return; 560 561 if (dl_rq->pushable_dl_tasks_root.rb_leftmost == &p->pushable_dl_tasks) { 562 struct rb_node *next_node; 563 564 next_node = rb_next(&p->pushable_dl_tasks); 565 if (next_node) { 566 dl_rq->earliest_dl.next = rb_entry(next_node, 567 struct task_struct, pushable_dl_tasks)->dl.deadline; 568 } 569 } 570 571 rb_erase_cached(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root); 572 RB_CLEAR_NODE(&p->pushable_dl_tasks); 573 } 574 575 static inline int has_pushable_dl_tasks(struct rq *rq) 576 { 577 return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root.rb_root); 578 } 579 580 static int push_dl_task(struct rq *rq); 581 582 static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev) 583 { 584 return rq->online && dl_task(prev); 585 } 586 587 static DEFINE_PER_CPU(struct callback_head, dl_push_head); 588 static DEFINE_PER_CPU(struct callback_head, dl_pull_head); 589 590 static void push_dl_tasks(struct rq *); 591 static void pull_dl_task(struct rq *); 592 593 static inline void deadline_queue_push_tasks(struct rq *rq) 594 { 595 if (!has_pushable_dl_tasks(rq)) 596 return; 597 598 queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks); 599 } 600 601 static inline void deadline_queue_pull_task(struct rq *rq) 602 { 603 queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task); 604 } 605 606 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq); 607 608 static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p) 609 { 610 struct rq *later_rq = NULL; 611 struct dl_bw *dl_b; 612 613 later_rq = find_lock_later_rq(p, rq); 614 if (!later_rq) { 615 int cpu; 616 617 /* 618 * If we cannot preempt any rq, fall back to pick any 619 * online CPU: 620 */ 621 cpu = cpumask_any_and(cpu_active_mask, p->cpus_ptr); 622 if (cpu >= nr_cpu_ids) { 623 /* 624 * Failed to find any suitable CPU. 625 * The task will never come back! 626 */ 627 BUG_ON(dl_bandwidth_enabled()); 628 629 /* 630 * If admission control is disabled we 631 * try a little harder to let the task 632 * run. 633 */ 634 cpu = cpumask_any(cpu_active_mask); 635 } 636 later_rq = cpu_rq(cpu); 637 double_lock_balance(rq, later_rq); 638 } 639 640 if (p->dl.dl_non_contending || p->dl.dl_throttled) { 641 /* 642 * Inactive timer is armed (or callback is running, but 643 * waiting for us to release rq locks). In any case, when it 644 * will fire (or continue), it will see running_bw of this 645 * task migrated to later_rq (and correctly handle it). 646 */ 647 sub_running_bw(&p->dl, &rq->dl); 648 sub_rq_bw(&p->dl, &rq->dl); 649 650 add_rq_bw(&p->dl, &later_rq->dl); 651 add_running_bw(&p->dl, &later_rq->dl); 652 } else { 653 sub_rq_bw(&p->dl, &rq->dl); 654 add_rq_bw(&p->dl, &later_rq->dl); 655 } 656 657 /* 658 * And we finally need to fixup root_domain(s) bandwidth accounting, 659 * since p is still hanging out in the old (now moved to default) root 660 * domain. 661 */ 662 dl_b = &rq->rd->dl_bw; 663 raw_spin_lock(&dl_b->lock); 664 __dl_sub(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span)); 665 raw_spin_unlock(&dl_b->lock); 666 667 dl_b = &later_rq->rd->dl_bw; 668 raw_spin_lock(&dl_b->lock); 669 __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(later_rq->rd->span)); 670 raw_spin_unlock(&dl_b->lock); 671 672 set_task_cpu(p, later_rq->cpu); 673 double_unlock_balance(later_rq, rq); 674 675 return later_rq; 676 } 677 678 #else 679 680 static inline 681 void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p) 682 { 683 } 684 685 static inline 686 void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p) 687 { 688 } 689 690 static inline 691 void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 692 { 693 } 694 695 static inline 696 void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 697 { 698 } 699 700 static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev) 701 { 702 return false; 703 } 704 705 static inline void pull_dl_task(struct rq *rq) 706 { 707 } 708 709 static inline void deadline_queue_push_tasks(struct rq *rq) 710 { 711 } 712 713 static inline void deadline_queue_pull_task(struct rq *rq) 714 { 715 } 716 #endif /* CONFIG_SMP */ 717 718 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags); 719 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags); 720 static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p, int flags); 721 722 /* 723 * We are being explicitly informed that a new instance is starting, 724 * and this means that: 725 * - the absolute deadline of the entity has to be placed at 726 * current time + relative deadline; 727 * - the runtime of the entity has to be set to the maximum value. 728 * 729 * The capability of specifying such event is useful whenever a -deadline 730 * entity wants to (try to!) synchronize its behaviour with the scheduler's 731 * one, and to (try to!) reconcile itself with its own scheduling 732 * parameters. 733 */ 734 static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se) 735 { 736 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 737 struct rq *rq = rq_of_dl_rq(dl_rq); 738 739 WARN_ON(is_dl_boosted(dl_se)); 740 WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline)); 741 742 /* 743 * We are racing with the deadline timer. So, do nothing because 744 * the deadline timer handler will take care of properly recharging 745 * the runtime and postponing the deadline 746 */ 747 if (dl_se->dl_throttled) 748 return; 749 750 /* 751 * We use the regular wall clock time to set deadlines in the 752 * future; in fact, we must consider execution overheads (time 753 * spent on hardirq context, etc.). 754 */ 755 dl_se->deadline = rq_clock(rq) + dl_se->dl_deadline; 756 dl_se->runtime = dl_se->dl_runtime; 757 } 758 759 /* 760 * Pure Earliest Deadline First (EDF) scheduling does not deal with the 761 * possibility of a entity lasting more than what it declared, and thus 762 * exhausting its runtime. 763 * 764 * Here we are interested in making runtime overrun possible, but we do 765 * not want a entity which is misbehaving to affect the scheduling of all 766 * other entities. 767 * Therefore, a budgeting strategy called Constant Bandwidth Server (CBS) 768 * is used, in order to confine each entity within its own bandwidth. 769 * 770 * This function deals exactly with that, and ensures that when the runtime 771 * of a entity is replenished, its deadline is also postponed. That ensures 772 * the overrunning entity can't interfere with other entity in the system and 773 * can't make them miss their deadlines. Reasons why this kind of overruns 774 * could happen are, typically, a entity voluntarily trying to overcome its 775 * runtime, or it just underestimated it during sched_setattr(). 776 */ 777 static void replenish_dl_entity(struct sched_dl_entity *dl_se) 778 { 779 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 780 struct rq *rq = rq_of_dl_rq(dl_rq); 781 782 BUG_ON(pi_of(dl_se)->dl_runtime <= 0); 783 784 /* 785 * This could be the case for a !-dl task that is boosted. 786 * Just go with full inherited parameters. 787 */ 788 if (dl_se->dl_deadline == 0) { 789 dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline; 790 dl_se->runtime = pi_of(dl_se)->dl_runtime; 791 } 792 793 if (dl_se->dl_yielded && dl_se->runtime > 0) 794 dl_se->runtime = 0; 795 796 /* 797 * We keep moving the deadline away until we get some 798 * available runtime for the entity. This ensures correct 799 * handling of situations where the runtime overrun is 800 * arbitrary large. 801 */ 802 while (dl_se->runtime <= 0) { 803 dl_se->deadline += pi_of(dl_se)->dl_period; 804 dl_se->runtime += pi_of(dl_se)->dl_runtime; 805 } 806 807 /* 808 * At this point, the deadline really should be "in 809 * the future" with respect to rq->clock. If it's 810 * not, we are, for some reason, lagging too much! 811 * Anyway, after having warn userspace abut that, 812 * we still try to keep the things running by 813 * resetting the deadline and the budget of the 814 * entity. 815 */ 816 if (dl_time_before(dl_se->deadline, rq_clock(rq))) { 817 printk_deferred_once("sched: DL replenish lagged too much\n"); 818 dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline; 819 dl_se->runtime = pi_of(dl_se)->dl_runtime; 820 } 821 822 if (dl_se->dl_yielded) 823 dl_se->dl_yielded = 0; 824 if (dl_se->dl_throttled) 825 dl_se->dl_throttled = 0; 826 } 827 828 /* 829 * Here we check if --at time t-- an entity (which is probably being 830 * [re]activated or, in general, enqueued) can use its remaining runtime 831 * and its current deadline _without_ exceeding the bandwidth it is 832 * assigned (function returns true if it can't). We are in fact applying 833 * one of the CBS rules: when a task wakes up, if the residual runtime 834 * over residual deadline fits within the allocated bandwidth, then we 835 * can keep the current (absolute) deadline and residual budget without 836 * disrupting the schedulability of the system. Otherwise, we should 837 * refill the runtime and set the deadline a period in the future, 838 * because keeping the current (absolute) deadline of the task would 839 * result in breaking guarantees promised to other tasks (refer to 840 * Documentation/scheduler/sched-deadline.rst for more information). 841 * 842 * This function returns true if: 843 * 844 * runtime / (deadline - t) > dl_runtime / dl_deadline , 845 * 846 * IOW we can't recycle current parameters. 847 * 848 * Notice that the bandwidth check is done against the deadline. For 849 * task with deadline equal to period this is the same of using 850 * dl_period instead of dl_deadline in the equation above. 851 */ 852 static bool dl_entity_overflow(struct sched_dl_entity *dl_se, u64 t) 853 { 854 u64 left, right; 855 856 /* 857 * left and right are the two sides of the equation above, 858 * after a bit of shuffling to use multiplications instead 859 * of divisions. 860 * 861 * Note that none of the time values involved in the two 862 * multiplications are absolute: dl_deadline and dl_runtime 863 * are the relative deadline and the maximum runtime of each 864 * instance, runtime is the runtime left for the last instance 865 * and (deadline - t), since t is rq->clock, is the time left 866 * to the (absolute) deadline. Even if overflowing the u64 type 867 * is very unlikely to occur in both cases, here we scale down 868 * as we want to avoid that risk at all. Scaling down by 10 869 * means that we reduce granularity to 1us. We are fine with it, 870 * since this is only a true/false check and, anyway, thinking 871 * of anything below microseconds resolution is actually fiction 872 * (but still we want to give the user that illusion >;). 873 */ 874 left = (pi_of(dl_se)->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE); 875 right = ((dl_se->deadline - t) >> DL_SCALE) * 876 (pi_of(dl_se)->dl_runtime >> DL_SCALE); 877 878 return dl_time_before(right, left); 879 } 880 881 /* 882 * Revised wakeup rule [1]: For self-suspending tasks, rather then 883 * re-initializing task's runtime and deadline, the revised wakeup 884 * rule adjusts the task's runtime to avoid the task to overrun its 885 * density. 886 * 887 * Reasoning: a task may overrun the density if: 888 * runtime / (deadline - t) > dl_runtime / dl_deadline 889 * 890 * Therefore, runtime can be adjusted to: 891 * runtime = (dl_runtime / dl_deadline) * (deadline - t) 892 * 893 * In such way that runtime will be equal to the maximum density 894 * the task can use without breaking any rule. 895 * 896 * [1] Luca Abeni, Giuseppe Lipari, and Juri Lelli. 2015. Constant 897 * bandwidth server revisited. SIGBED Rev. 11, 4 (January 2015), 19-24. 898 */ 899 static void 900 update_dl_revised_wakeup(struct sched_dl_entity *dl_se, struct rq *rq) 901 { 902 u64 laxity = dl_se->deadline - rq_clock(rq); 903 904 /* 905 * If the task has deadline < period, and the deadline is in the past, 906 * it should already be throttled before this check. 907 * 908 * See update_dl_entity() comments for further details. 909 */ 910 WARN_ON(dl_time_before(dl_se->deadline, rq_clock(rq))); 911 912 dl_se->runtime = (dl_se->dl_density * laxity) >> BW_SHIFT; 913 } 914 915 /* 916 * Regarding the deadline, a task with implicit deadline has a relative 917 * deadline == relative period. A task with constrained deadline has a 918 * relative deadline <= relative period. 919 * 920 * We support constrained deadline tasks. However, there are some restrictions 921 * applied only for tasks which do not have an implicit deadline. See 922 * update_dl_entity() to know more about such restrictions. 923 * 924 * The dl_is_implicit() returns true if the task has an implicit deadline. 925 */ 926 static inline bool dl_is_implicit(struct sched_dl_entity *dl_se) 927 { 928 return dl_se->dl_deadline == dl_se->dl_period; 929 } 930 931 /* 932 * When a deadline entity is placed in the runqueue, its runtime and deadline 933 * might need to be updated. This is done by a CBS wake up rule. There are two 934 * different rules: 1) the original CBS; and 2) the Revisited CBS. 935 * 936 * When the task is starting a new period, the Original CBS is used. In this 937 * case, the runtime is replenished and a new absolute deadline is set. 938 * 939 * When a task is queued before the begin of the next period, using the 940 * remaining runtime and deadline could make the entity to overflow, see 941 * dl_entity_overflow() to find more about runtime overflow. When such case 942 * is detected, the runtime and deadline need to be updated. 943 * 944 * If the task has an implicit deadline, i.e., deadline == period, the Original 945 * CBS is applied. the runtime is replenished and a new absolute deadline is 946 * set, as in the previous cases. 947 * 948 * However, the Original CBS does not work properly for tasks with 949 * deadline < period, which are said to have a constrained deadline. By 950 * applying the Original CBS, a constrained deadline task would be able to run 951 * runtime/deadline in a period. With deadline < period, the task would 952 * overrun the runtime/period allowed bandwidth, breaking the admission test. 953 * 954 * In order to prevent this misbehave, the Revisited CBS is used for 955 * constrained deadline tasks when a runtime overflow is detected. In the 956 * Revisited CBS, rather than replenishing & setting a new absolute deadline, 957 * the remaining runtime of the task is reduced to avoid runtime overflow. 958 * Please refer to the comments update_dl_revised_wakeup() function to find 959 * more about the Revised CBS rule. 960 */ 961 static void update_dl_entity(struct sched_dl_entity *dl_se) 962 { 963 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 964 struct rq *rq = rq_of_dl_rq(dl_rq); 965 966 if (dl_time_before(dl_se->deadline, rq_clock(rq)) || 967 dl_entity_overflow(dl_se, rq_clock(rq))) { 968 969 if (unlikely(!dl_is_implicit(dl_se) && 970 !dl_time_before(dl_se->deadline, rq_clock(rq)) && 971 !is_dl_boosted(dl_se))) { 972 update_dl_revised_wakeup(dl_se, rq); 973 return; 974 } 975 976 dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline; 977 dl_se->runtime = pi_of(dl_se)->dl_runtime; 978 } 979 } 980 981 static inline u64 dl_next_period(struct sched_dl_entity *dl_se) 982 { 983 return dl_se->deadline - dl_se->dl_deadline + dl_se->dl_period; 984 } 985 986 /* 987 * If the entity depleted all its runtime, and if we want it to sleep 988 * while waiting for some new execution time to become available, we 989 * set the bandwidth replenishment timer to the replenishment instant 990 * and try to activate it. 991 * 992 * Notice that it is important for the caller to know if the timer 993 * actually started or not (i.e., the replenishment instant is in 994 * the future or in the past). 995 */ 996 static int start_dl_timer(struct task_struct *p) 997 { 998 struct sched_dl_entity *dl_se = &p->dl; 999 struct hrtimer *timer = &dl_se->dl_timer; 1000 struct rq *rq = task_rq(p); 1001 ktime_t now, act; 1002 s64 delta; 1003 1004 lockdep_assert_held(&rq->lock); 1005 1006 /* 1007 * We want the timer to fire at the deadline, but considering 1008 * that it is actually coming from rq->clock and not from 1009 * hrtimer's time base reading. 1010 */ 1011 act = ns_to_ktime(dl_next_period(dl_se)); 1012 now = hrtimer_cb_get_time(timer); 1013 delta = ktime_to_ns(now) - rq_clock(rq); 1014 act = ktime_add_ns(act, delta); 1015 1016 /* 1017 * If the expiry time already passed, e.g., because the value 1018 * chosen as the deadline is too small, don't even try to 1019 * start the timer in the past! 1020 */ 1021 if (ktime_us_delta(act, now) < 0) 1022 return 0; 1023 1024 /* 1025 * !enqueued will guarantee another callback; even if one is already in 1026 * progress. This ensures a balanced {get,put}_task_struct(). 1027 * 1028 * The race against __run_timer() clearing the enqueued state is 1029 * harmless because we're holding task_rq()->lock, therefore the timer 1030 * expiring after we've done the check will wait on its task_rq_lock() 1031 * and observe our state. 1032 */ 1033 if (!hrtimer_is_queued(timer)) { 1034 get_task_struct(p); 1035 hrtimer_start(timer, act, HRTIMER_MODE_ABS_HARD); 1036 } 1037 1038 return 1; 1039 } 1040 1041 /* 1042 * This is the bandwidth enforcement timer callback. If here, we know 1043 * a task is not on its dl_rq, since the fact that the timer was running 1044 * means the task is throttled and needs a runtime replenishment. 1045 * 1046 * However, what we actually do depends on the fact the task is active, 1047 * (it is on its rq) or has been removed from there by a call to 1048 * dequeue_task_dl(). In the former case we must issue the runtime 1049 * replenishment and add the task back to the dl_rq; in the latter, we just 1050 * do nothing but clearing dl_throttled, so that runtime and deadline 1051 * updating (and the queueing back to dl_rq) will be done by the 1052 * next call to enqueue_task_dl(). 1053 */ 1054 static enum hrtimer_restart dl_task_timer(struct hrtimer *timer) 1055 { 1056 struct sched_dl_entity *dl_se = container_of(timer, 1057 struct sched_dl_entity, 1058 dl_timer); 1059 struct task_struct *p = dl_task_of(dl_se); 1060 struct rq_flags rf; 1061 struct rq *rq; 1062 1063 rq = task_rq_lock(p, &rf); 1064 1065 /* 1066 * The task might have changed its scheduling policy to something 1067 * different than SCHED_DEADLINE (through switched_from_dl()). 1068 */ 1069 if (!dl_task(p)) 1070 goto unlock; 1071 1072 /* 1073 * The task might have been boosted by someone else and might be in the 1074 * boosting/deboosting path, its not throttled. 1075 */ 1076 if (is_dl_boosted(dl_se)) 1077 goto unlock; 1078 1079 /* 1080 * Spurious timer due to start_dl_timer() race; or we already received 1081 * a replenishment from rt_mutex_setprio(). 1082 */ 1083 if (!dl_se->dl_throttled) 1084 goto unlock; 1085 1086 sched_clock_tick(); 1087 update_rq_clock(rq); 1088 1089 /* 1090 * If the throttle happened during sched-out; like: 1091 * 1092 * schedule() 1093 * deactivate_task() 1094 * dequeue_task_dl() 1095 * update_curr_dl() 1096 * start_dl_timer() 1097 * __dequeue_task_dl() 1098 * prev->on_rq = 0; 1099 * 1100 * We can be both throttled and !queued. Replenish the counter 1101 * but do not enqueue -- wait for our wakeup to do that. 1102 */ 1103 if (!task_on_rq_queued(p)) { 1104 replenish_dl_entity(dl_se); 1105 goto unlock; 1106 } 1107 1108 #ifdef CONFIG_SMP 1109 if (unlikely(!rq->online)) { 1110 /* 1111 * If the runqueue is no longer available, migrate the 1112 * task elsewhere. This necessarily changes rq. 1113 */ 1114 lockdep_unpin_lock(&rq->lock, rf.cookie); 1115 rq = dl_task_offline_migration(rq, p); 1116 rf.cookie = lockdep_pin_lock(&rq->lock); 1117 update_rq_clock(rq); 1118 1119 /* 1120 * Now that the task has been migrated to the new RQ and we 1121 * have that locked, proceed as normal and enqueue the task 1122 * there. 1123 */ 1124 } 1125 #endif 1126 1127 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH); 1128 if (dl_task(rq->curr)) 1129 check_preempt_curr_dl(rq, p, 0); 1130 else 1131 resched_curr(rq); 1132 1133 #ifdef CONFIG_SMP 1134 /* 1135 * Queueing this task back might have overloaded rq, check if we need 1136 * to kick someone away. 1137 */ 1138 if (has_pushable_dl_tasks(rq)) { 1139 /* 1140 * Nothing relies on rq->lock after this, so its safe to drop 1141 * rq->lock. 1142 */ 1143 rq_unpin_lock(rq, &rf); 1144 push_dl_task(rq); 1145 rq_repin_lock(rq, &rf); 1146 } 1147 #endif 1148 1149 unlock: 1150 task_rq_unlock(rq, p, &rf); 1151 1152 /* 1153 * This can free the task_struct, including this hrtimer, do not touch 1154 * anything related to that after this. 1155 */ 1156 put_task_struct(p); 1157 1158 return HRTIMER_NORESTART; 1159 } 1160 1161 void init_dl_task_timer(struct sched_dl_entity *dl_se) 1162 { 1163 struct hrtimer *timer = &dl_se->dl_timer; 1164 1165 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); 1166 timer->function = dl_task_timer; 1167 } 1168 1169 /* 1170 * During the activation, CBS checks if it can reuse the current task's 1171 * runtime and period. If the deadline of the task is in the past, CBS 1172 * cannot use the runtime, and so it replenishes the task. This rule 1173 * works fine for implicit deadline tasks (deadline == period), and the 1174 * CBS was designed for implicit deadline tasks. However, a task with 1175 * constrained deadline (deadline < period) might be awakened after the 1176 * deadline, but before the next period. In this case, replenishing the 1177 * task would allow it to run for runtime / deadline. As in this case 1178 * deadline < period, CBS enables a task to run for more than the 1179 * runtime / period. In a very loaded system, this can cause a domino 1180 * effect, making other tasks miss their deadlines. 1181 * 1182 * To avoid this problem, in the activation of a constrained deadline 1183 * task after the deadline but before the next period, throttle the 1184 * task and set the replenishing timer to the begin of the next period, 1185 * unless it is boosted. 1186 */ 1187 static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se) 1188 { 1189 struct task_struct *p = dl_task_of(dl_se); 1190 struct rq *rq = rq_of_dl_rq(dl_rq_of_se(dl_se)); 1191 1192 if (dl_time_before(dl_se->deadline, rq_clock(rq)) && 1193 dl_time_before(rq_clock(rq), dl_next_period(dl_se))) { 1194 if (unlikely(is_dl_boosted(dl_se) || !start_dl_timer(p))) 1195 return; 1196 dl_se->dl_throttled = 1; 1197 if (dl_se->runtime > 0) 1198 dl_se->runtime = 0; 1199 } 1200 } 1201 1202 static 1203 int dl_runtime_exceeded(struct sched_dl_entity *dl_se) 1204 { 1205 return (dl_se->runtime <= 0); 1206 } 1207 1208 extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq); 1209 1210 /* 1211 * This function implements the GRUB accounting rule: 1212 * according to the GRUB reclaiming algorithm, the runtime is 1213 * not decreased as "dq = -dt", but as 1214 * "dq = -max{u / Umax, (1 - Uinact - Uextra)} dt", 1215 * where u is the utilization of the task, Umax is the maximum reclaimable 1216 * utilization, Uinact is the (per-runqueue) inactive utilization, computed 1217 * as the difference between the "total runqueue utilization" and the 1218 * runqueue active utilization, and Uextra is the (per runqueue) extra 1219 * reclaimable utilization. 1220 * Since rq->dl.running_bw and rq->dl.this_bw contain utilizations 1221 * multiplied by 2^BW_SHIFT, the result has to be shifted right by 1222 * BW_SHIFT. 1223 * Since rq->dl.bw_ratio contains 1 / Umax multipled by 2^RATIO_SHIFT, 1224 * dl_bw is multiped by rq->dl.bw_ratio and shifted right by RATIO_SHIFT. 1225 * Since delta is a 64 bit variable, to have an overflow its value 1226 * should be larger than 2^(64 - 20 - 8), which is more than 64 seconds. 1227 * So, overflow is not an issue here. 1228 */ 1229 static u64 grub_reclaim(u64 delta, struct rq *rq, struct sched_dl_entity *dl_se) 1230 { 1231 u64 u_inact = rq->dl.this_bw - rq->dl.running_bw; /* Utot - Uact */ 1232 u64 u_act; 1233 u64 u_act_min = (dl_se->dl_bw * rq->dl.bw_ratio) >> RATIO_SHIFT; 1234 1235 /* 1236 * Instead of computing max{u * bw_ratio, (1 - u_inact - u_extra)}, 1237 * we compare u_inact + rq->dl.extra_bw with 1238 * 1 - (u * rq->dl.bw_ratio >> RATIO_SHIFT), because 1239 * u_inact + rq->dl.extra_bw can be larger than 1240 * 1 * (so, 1 - u_inact - rq->dl.extra_bw would be negative 1241 * leading to wrong results) 1242 */ 1243 if (u_inact + rq->dl.extra_bw > BW_UNIT - u_act_min) 1244 u_act = u_act_min; 1245 else 1246 u_act = BW_UNIT - u_inact - rq->dl.extra_bw; 1247 1248 return (delta * u_act) >> BW_SHIFT; 1249 } 1250 1251 /* 1252 * Update the current task's runtime statistics (provided it is still 1253 * a -deadline task and has not been removed from the dl_rq). 1254 */ 1255 static void update_curr_dl(struct rq *rq) 1256 { 1257 struct task_struct *curr = rq->curr; 1258 struct sched_dl_entity *dl_se = &curr->dl; 1259 u64 delta_exec, scaled_delta_exec; 1260 int cpu = cpu_of(rq); 1261 u64 now; 1262 1263 if (!dl_task(curr) || !on_dl_rq(dl_se)) 1264 return; 1265 1266 /* 1267 * Consumed budget is computed considering the time as 1268 * observed by schedulable tasks (excluding time spent 1269 * in hardirq context, etc.). Deadlines are instead 1270 * computed using hard walltime. This seems to be the more 1271 * natural solution, but the full ramifications of this 1272 * approach need further study. 1273 */ 1274 now = rq_clock_task(rq); 1275 delta_exec = now - curr->se.exec_start; 1276 if (unlikely((s64)delta_exec <= 0)) { 1277 if (unlikely(dl_se->dl_yielded)) 1278 goto throttle; 1279 return; 1280 } 1281 1282 schedstat_set(curr->se.statistics.exec_max, 1283 max(curr->se.statistics.exec_max, delta_exec)); 1284 1285 curr->se.sum_exec_runtime += delta_exec; 1286 account_group_exec_runtime(curr, delta_exec); 1287 1288 curr->se.exec_start = now; 1289 cgroup_account_cputime(curr, delta_exec); 1290 1291 if (dl_entity_is_special(dl_se)) 1292 return; 1293 1294 /* 1295 * For tasks that participate in GRUB, we implement GRUB-PA: the 1296 * spare reclaimed bandwidth is used to clock down frequency. 1297 * 1298 * For the others, we still need to scale reservation parameters 1299 * according to current frequency and CPU maximum capacity. 1300 */ 1301 if (unlikely(dl_se->flags & SCHED_FLAG_RECLAIM)) { 1302 scaled_delta_exec = grub_reclaim(delta_exec, 1303 rq, 1304 &curr->dl); 1305 } else { 1306 unsigned long scale_freq = arch_scale_freq_capacity(cpu); 1307 unsigned long scale_cpu = arch_scale_cpu_capacity(cpu); 1308 1309 scaled_delta_exec = cap_scale(delta_exec, scale_freq); 1310 scaled_delta_exec = cap_scale(scaled_delta_exec, scale_cpu); 1311 } 1312 1313 dl_se->runtime -= scaled_delta_exec; 1314 1315 throttle: 1316 if (dl_runtime_exceeded(dl_se) || dl_se->dl_yielded) { 1317 dl_se->dl_throttled = 1; 1318 1319 /* If requested, inform the user about runtime overruns. */ 1320 if (dl_runtime_exceeded(dl_se) && 1321 (dl_se->flags & SCHED_FLAG_DL_OVERRUN)) 1322 dl_se->dl_overrun = 1; 1323 1324 __dequeue_task_dl(rq, curr, 0); 1325 if (unlikely(is_dl_boosted(dl_se) || !start_dl_timer(curr))) 1326 enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH); 1327 1328 if (!is_leftmost(curr, &rq->dl)) 1329 resched_curr(rq); 1330 } 1331 1332 /* 1333 * Because -- for now -- we share the rt bandwidth, we need to 1334 * account our runtime there too, otherwise actual rt tasks 1335 * would be able to exceed the shared quota. 1336 * 1337 * Account to the root rt group for now. 1338 * 1339 * The solution we're working towards is having the RT groups scheduled 1340 * using deadline servers -- however there's a few nasties to figure 1341 * out before that can happen. 1342 */ 1343 if (rt_bandwidth_enabled()) { 1344 struct rt_rq *rt_rq = &rq->rt; 1345 1346 raw_spin_lock(&rt_rq->rt_runtime_lock); 1347 /* 1348 * We'll let actual RT tasks worry about the overflow here, we 1349 * have our own CBS to keep us inline; only account when RT 1350 * bandwidth is relevant. 1351 */ 1352 if (sched_rt_bandwidth_account(rt_rq)) 1353 rt_rq->rt_time += delta_exec; 1354 raw_spin_unlock(&rt_rq->rt_runtime_lock); 1355 } 1356 } 1357 1358 static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer) 1359 { 1360 struct sched_dl_entity *dl_se = container_of(timer, 1361 struct sched_dl_entity, 1362 inactive_timer); 1363 struct task_struct *p = dl_task_of(dl_se); 1364 struct rq_flags rf; 1365 struct rq *rq; 1366 1367 rq = task_rq_lock(p, &rf); 1368 1369 sched_clock_tick(); 1370 update_rq_clock(rq); 1371 1372 if (!dl_task(p) || p->state == TASK_DEAD) { 1373 struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); 1374 1375 if (p->state == TASK_DEAD && dl_se->dl_non_contending) { 1376 sub_running_bw(&p->dl, dl_rq_of_se(&p->dl)); 1377 sub_rq_bw(&p->dl, dl_rq_of_se(&p->dl)); 1378 dl_se->dl_non_contending = 0; 1379 } 1380 1381 raw_spin_lock(&dl_b->lock); 1382 __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p))); 1383 raw_spin_unlock(&dl_b->lock); 1384 __dl_clear_params(p); 1385 1386 goto unlock; 1387 } 1388 if (dl_se->dl_non_contending == 0) 1389 goto unlock; 1390 1391 sub_running_bw(dl_se, &rq->dl); 1392 dl_se->dl_non_contending = 0; 1393 unlock: 1394 task_rq_unlock(rq, p, &rf); 1395 put_task_struct(p); 1396 1397 return HRTIMER_NORESTART; 1398 } 1399 1400 void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se) 1401 { 1402 struct hrtimer *timer = &dl_se->inactive_timer; 1403 1404 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); 1405 timer->function = inactive_task_timer; 1406 } 1407 1408 #ifdef CONFIG_SMP 1409 1410 static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) 1411 { 1412 struct rq *rq = rq_of_dl_rq(dl_rq); 1413 1414 if (dl_rq->earliest_dl.curr == 0 || 1415 dl_time_before(deadline, dl_rq->earliest_dl.curr)) { 1416 if (dl_rq->earliest_dl.curr == 0) 1417 cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_HIGHER); 1418 dl_rq->earliest_dl.curr = deadline; 1419 cpudl_set(&rq->rd->cpudl, rq->cpu, deadline); 1420 } 1421 } 1422 1423 static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) 1424 { 1425 struct rq *rq = rq_of_dl_rq(dl_rq); 1426 1427 /* 1428 * Since we may have removed our earliest (and/or next earliest) 1429 * task we must recompute them. 1430 */ 1431 if (!dl_rq->dl_nr_running) { 1432 dl_rq->earliest_dl.curr = 0; 1433 dl_rq->earliest_dl.next = 0; 1434 cpudl_clear(&rq->rd->cpudl, rq->cpu); 1435 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr); 1436 } else { 1437 struct rb_node *leftmost = dl_rq->root.rb_leftmost; 1438 struct sched_dl_entity *entry; 1439 1440 entry = rb_entry(leftmost, struct sched_dl_entity, rb_node); 1441 dl_rq->earliest_dl.curr = entry->deadline; 1442 cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline); 1443 } 1444 } 1445 1446 #else 1447 1448 static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {} 1449 static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {} 1450 1451 #endif /* CONFIG_SMP */ 1452 1453 static inline 1454 void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 1455 { 1456 int prio = dl_task_of(dl_se)->prio; 1457 u64 deadline = dl_se->deadline; 1458 1459 WARN_ON(!dl_prio(prio)); 1460 dl_rq->dl_nr_running++; 1461 add_nr_running(rq_of_dl_rq(dl_rq), 1); 1462 1463 inc_dl_deadline(dl_rq, deadline); 1464 inc_dl_migration(dl_se, dl_rq); 1465 } 1466 1467 static inline 1468 void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 1469 { 1470 int prio = dl_task_of(dl_se)->prio; 1471 1472 WARN_ON(!dl_prio(prio)); 1473 WARN_ON(!dl_rq->dl_nr_running); 1474 dl_rq->dl_nr_running--; 1475 sub_nr_running(rq_of_dl_rq(dl_rq), 1); 1476 1477 dec_dl_deadline(dl_rq, dl_se->deadline); 1478 dec_dl_migration(dl_se, dl_rq); 1479 } 1480 1481 static void __enqueue_dl_entity(struct sched_dl_entity *dl_se) 1482 { 1483 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 1484 struct rb_node **link = &dl_rq->root.rb_root.rb_node; 1485 struct rb_node *parent = NULL; 1486 struct sched_dl_entity *entry; 1487 int leftmost = 1; 1488 1489 BUG_ON(!RB_EMPTY_NODE(&dl_se->rb_node)); 1490 1491 while (*link) { 1492 parent = *link; 1493 entry = rb_entry(parent, struct sched_dl_entity, rb_node); 1494 if (dl_time_before(dl_se->deadline, entry->deadline)) 1495 link = &parent->rb_left; 1496 else { 1497 link = &parent->rb_right; 1498 leftmost = 0; 1499 } 1500 } 1501 1502 rb_link_node(&dl_se->rb_node, parent, link); 1503 rb_insert_color_cached(&dl_se->rb_node, &dl_rq->root, leftmost); 1504 1505 inc_dl_tasks(dl_se, dl_rq); 1506 } 1507 1508 static void __dequeue_dl_entity(struct sched_dl_entity *dl_se) 1509 { 1510 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 1511 1512 if (RB_EMPTY_NODE(&dl_se->rb_node)) 1513 return; 1514 1515 rb_erase_cached(&dl_se->rb_node, &dl_rq->root); 1516 RB_CLEAR_NODE(&dl_se->rb_node); 1517 1518 dec_dl_tasks(dl_se, dl_rq); 1519 } 1520 1521 static void 1522 enqueue_dl_entity(struct sched_dl_entity *dl_se, int flags) 1523 { 1524 BUG_ON(on_dl_rq(dl_se)); 1525 1526 /* 1527 * If this is a wakeup or a new instance, the scheduling 1528 * parameters of the task might need updating. Otherwise, 1529 * we want a replenishment of its runtime. 1530 */ 1531 if (flags & ENQUEUE_WAKEUP) { 1532 task_contending(dl_se, flags); 1533 update_dl_entity(dl_se); 1534 } else if (flags & ENQUEUE_REPLENISH) { 1535 replenish_dl_entity(dl_se); 1536 } else if ((flags & ENQUEUE_RESTORE) && 1537 dl_time_before(dl_se->deadline, 1538 rq_clock(rq_of_dl_rq(dl_rq_of_se(dl_se))))) { 1539 setup_new_dl_entity(dl_se); 1540 } 1541 1542 __enqueue_dl_entity(dl_se); 1543 } 1544 1545 static void dequeue_dl_entity(struct sched_dl_entity *dl_se) 1546 { 1547 __dequeue_dl_entity(dl_se); 1548 } 1549 1550 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags) 1551 { 1552 if (is_dl_boosted(&p->dl)) { 1553 /* 1554 * Because of delays in the detection of the overrun of a 1555 * thread's runtime, it might be the case that a thread 1556 * goes to sleep in a rt mutex with negative runtime. As 1557 * a consequence, the thread will be throttled. 1558 * 1559 * While waiting for the mutex, this thread can also be 1560 * boosted via PI, resulting in a thread that is throttled 1561 * and boosted at the same time. 1562 * 1563 * In this case, the boost overrides the throttle. 1564 */ 1565 if (p->dl.dl_throttled) { 1566 /* 1567 * The replenish timer needs to be canceled. No 1568 * problem if it fires concurrently: boosted threads 1569 * are ignored in dl_task_timer(). 1570 */ 1571 hrtimer_try_to_cancel(&p->dl.dl_timer); 1572 p->dl.dl_throttled = 0; 1573 } 1574 } else if (!dl_prio(p->normal_prio)) { 1575 /* 1576 * Special case in which we have a !SCHED_DEADLINE task that is going 1577 * to be deboosted, but exceeds its runtime while doing so. No point in 1578 * replenishing it, as it's going to return back to its original 1579 * scheduling class after this. If it has been throttled, we need to 1580 * clear the flag, otherwise the task may wake up as throttled after 1581 * being boosted again with no means to replenish the runtime and clear 1582 * the throttle. 1583 */ 1584 p->dl.dl_throttled = 0; 1585 BUG_ON(!is_dl_boosted(&p->dl) || flags != ENQUEUE_REPLENISH); 1586 return; 1587 } 1588 1589 /* 1590 * Check if a constrained deadline task was activated 1591 * after the deadline but before the next period. 1592 * If that is the case, the task will be throttled and 1593 * the replenishment timer will be set to the next period. 1594 */ 1595 if (!p->dl.dl_throttled && !dl_is_implicit(&p->dl)) 1596 dl_check_constrained_dl(&p->dl); 1597 1598 if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & ENQUEUE_RESTORE) { 1599 add_rq_bw(&p->dl, &rq->dl); 1600 add_running_bw(&p->dl, &rq->dl); 1601 } 1602 1603 /* 1604 * If p is throttled, we do not enqueue it. In fact, if it exhausted 1605 * its budget it needs a replenishment and, since it now is on 1606 * its rq, the bandwidth timer callback (which clearly has not 1607 * run yet) will take care of this. 1608 * However, the active utilization does not depend on the fact 1609 * that the task is on the runqueue or not (but depends on the 1610 * task's state - in GRUB parlance, "inactive" vs "active contending"). 1611 * In other words, even if a task is throttled its utilization must 1612 * be counted in the active utilization; hence, we need to call 1613 * add_running_bw(). 1614 */ 1615 if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH)) { 1616 if (flags & ENQUEUE_WAKEUP) 1617 task_contending(&p->dl, flags); 1618 1619 return; 1620 } 1621 1622 enqueue_dl_entity(&p->dl, flags); 1623 1624 if (!task_current(rq, p) && p->nr_cpus_allowed > 1) 1625 enqueue_pushable_dl_task(rq, p); 1626 } 1627 1628 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags) 1629 { 1630 dequeue_dl_entity(&p->dl); 1631 dequeue_pushable_dl_task(rq, p); 1632 } 1633 1634 static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags) 1635 { 1636 update_curr_dl(rq); 1637 __dequeue_task_dl(rq, p, flags); 1638 1639 if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & DEQUEUE_SAVE) { 1640 sub_running_bw(&p->dl, &rq->dl); 1641 sub_rq_bw(&p->dl, &rq->dl); 1642 } 1643 1644 /* 1645 * This check allows to start the inactive timer (or to immediately 1646 * decrease the active utilization, if needed) in two cases: 1647 * when the task blocks and when it is terminating 1648 * (p->state == TASK_DEAD). We can handle the two cases in the same 1649 * way, because from GRUB's point of view the same thing is happening 1650 * (the task moves from "active contending" to "active non contending" 1651 * or "inactive") 1652 */ 1653 if (flags & DEQUEUE_SLEEP) 1654 task_non_contending(p); 1655 } 1656 1657 /* 1658 * Yield task semantic for -deadline tasks is: 1659 * 1660 * get off from the CPU until our next instance, with 1661 * a new runtime. This is of little use now, since we 1662 * don't have a bandwidth reclaiming mechanism. Anyway, 1663 * bandwidth reclaiming is planned for the future, and 1664 * yield_task_dl will indicate that some spare budget 1665 * is available for other task instances to use it. 1666 */ 1667 static void yield_task_dl(struct rq *rq) 1668 { 1669 /* 1670 * We make the task go to sleep until its current deadline by 1671 * forcing its runtime to zero. This way, update_curr_dl() stops 1672 * it and the bandwidth timer will wake it up and will give it 1673 * new scheduling parameters (thanks to dl_yielded=1). 1674 */ 1675 rq->curr->dl.dl_yielded = 1; 1676 1677 update_rq_clock(rq); 1678 update_curr_dl(rq); 1679 /* 1680 * Tell update_rq_clock() that we've just updated, 1681 * so we don't do microscopic update in schedule() 1682 * and double the fastpath cost. 1683 */ 1684 rq_clock_skip_update(rq); 1685 } 1686 1687 #ifdef CONFIG_SMP 1688 1689 static int find_later_rq(struct task_struct *task); 1690 1691 static int 1692 select_task_rq_dl(struct task_struct *p, int cpu, int flags) 1693 { 1694 struct task_struct *curr; 1695 bool select_rq; 1696 struct rq *rq; 1697 1698 if (!(flags & WF_TTWU)) 1699 goto out; 1700 1701 rq = cpu_rq(cpu); 1702 1703 rcu_read_lock(); 1704 curr = READ_ONCE(rq->curr); /* unlocked access */ 1705 1706 /* 1707 * If we are dealing with a -deadline task, we must 1708 * decide where to wake it up. 1709 * If it has a later deadline and the current task 1710 * on this rq can't move (provided the waking task 1711 * can!) we prefer to send it somewhere else. On the 1712 * other hand, if it has a shorter deadline, we 1713 * try to make it stay here, it might be important. 1714 */ 1715 select_rq = unlikely(dl_task(curr)) && 1716 (curr->nr_cpus_allowed < 2 || 1717 !dl_entity_preempt(&p->dl, &curr->dl)) && 1718 p->nr_cpus_allowed > 1; 1719 1720 /* 1721 * Take the capacity of the CPU into account to 1722 * ensure it fits the requirement of the task. 1723 */ 1724 if (static_branch_unlikely(&sched_asym_cpucapacity)) 1725 select_rq |= !dl_task_fits_capacity(p, cpu); 1726 1727 if (select_rq) { 1728 int target = find_later_rq(p); 1729 1730 if (target != -1 && 1731 (dl_time_before(p->dl.deadline, 1732 cpu_rq(target)->dl.earliest_dl.curr) || 1733 (cpu_rq(target)->dl.dl_nr_running == 0))) 1734 cpu = target; 1735 } 1736 rcu_read_unlock(); 1737 1738 out: 1739 return cpu; 1740 } 1741 1742 static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused) 1743 { 1744 struct rq *rq; 1745 1746 if (p->state != TASK_WAKING) 1747 return; 1748 1749 rq = task_rq(p); 1750 /* 1751 * Since p->state == TASK_WAKING, set_task_cpu() has been called 1752 * from try_to_wake_up(). Hence, p->pi_lock is locked, but 1753 * rq->lock is not... So, lock it 1754 */ 1755 raw_spin_lock(&rq->lock); 1756 if (p->dl.dl_non_contending) { 1757 sub_running_bw(&p->dl, &rq->dl); 1758 p->dl.dl_non_contending = 0; 1759 /* 1760 * If the timer handler is currently running and the 1761 * timer cannot be cancelled, inactive_task_timer() 1762 * will see that dl_not_contending is not set, and 1763 * will not touch the rq's active utilization, 1764 * so we are still safe. 1765 */ 1766 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1) 1767 put_task_struct(p); 1768 } 1769 sub_rq_bw(&p->dl, &rq->dl); 1770 raw_spin_unlock(&rq->lock); 1771 } 1772 1773 static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p) 1774 { 1775 /* 1776 * Current can't be migrated, useless to reschedule, 1777 * let's hope p can move out. 1778 */ 1779 if (rq->curr->nr_cpus_allowed == 1 || 1780 !cpudl_find(&rq->rd->cpudl, rq->curr, NULL)) 1781 return; 1782 1783 /* 1784 * p is migratable, so let's not schedule it and 1785 * see if it is pushed or pulled somewhere else. 1786 */ 1787 if (p->nr_cpus_allowed != 1 && 1788 cpudl_find(&rq->rd->cpudl, p, NULL)) 1789 return; 1790 1791 resched_curr(rq); 1792 } 1793 1794 static int balance_dl(struct rq *rq, struct task_struct *p, struct rq_flags *rf) 1795 { 1796 if (!on_dl_rq(&p->dl) && need_pull_dl_task(rq, p)) { 1797 /* 1798 * This is OK, because current is on_cpu, which avoids it being 1799 * picked for load-balance and preemption/IRQs are still 1800 * disabled avoiding further scheduler activity on it and we've 1801 * not yet started the picking loop. 1802 */ 1803 rq_unpin_lock(rq, rf); 1804 pull_dl_task(rq); 1805 rq_repin_lock(rq, rf); 1806 } 1807 1808 return sched_stop_runnable(rq) || sched_dl_runnable(rq); 1809 } 1810 #endif /* CONFIG_SMP */ 1811 1812 /* 1813 * Only called when both the current and waking task are -deadline 1814 * tasks. 1815 */ 1816 static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p, 1817 int flags) 1818 { 1819 if (dl_entity_preempt(&p->dl, &rq->curr->dl)) { 1820 resched_curr(rq); 1821 return; 1822 } 1823 1824 #ifdef CONFIG_SMP 1825 /* 1826 * In the unlikely case current and p have the same deadline 1827 * let us try to decide what's the best thing to do... 1828 */ 1829 if ((p->dl.deadline == rq->curr->dl.deadline) && 1830 !test_tsk_need_resched(rq->curr)) 1831 check_preempt_equal_dl(rq, p); 1832 #endif /* CONFIG_SMP */ 1833 } 1834 1835 #ifdef CONFIG_SCHED_HRTICK 1836 static void start_hrtick_dl(struct rq *rq, struct task_struct *p) 1837 { 1838 hrtick_start(rq, p->dl.runtime); 1839 } 1840 #else /* !CONFIG_SCHED_HRTICK */ 1841 static void start_hrtick_dl(struct rq *rq, struct task_struct *p) 1842 { 1843 } 1844 #endif 1845 1846 static void set_next_task_dl(struct rq *rq, struct task_struct *p, bool first) 1847 { 1848 p->se.exec_start = rq_clock_task(rq); 1849 1850 /* You can't push away the running task */ 1851 dequeue_pushable_dl_task(rq, p); 1852 1853 if (!first) 1854 return; 1855 1856 if (hrtick_enabled(rq)) 1857 start_hrtick_dl(rq, p); 1858 1859 if (rq->curr->sched_class != &dl_sched_class) 1860 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0); 1861 1862 deadline_queue_push_tasks(rq); 1863 } 1864 1865 static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq, 1866 struct dl_rq *dl_rq) 1867 { 1868 struct rb_node *left = rb_first_cached(&dl_rq->root); 1869 1870 if (!left) 1871 return NULL; 1872 1873 return rb_entry(left, struct sched_dl_entity, rb_node); 1874 } 1875 1876 static struct task_struct *pick_next_task_dl(struct rq *rq) 1877 { 1878 struct sched_dl_entity *dl_se; 1879 struct dl_rq *dl_rq = &rq->dl; 1880 struct task_struct *p; 1881 1882 if (!sched_dl_runnable(rq)) 1883 return NULL; 1884 1885 dl_se = pick_next_dl_entity(rq, dl_rq); 1886 BUG_ON(!dl_se); 1887 p = dl_task_of(dl_se); 1888 set_next_task_dl(rq, p, true); 1889 return p; 1890 } 1891 1892 static void put_prev_task_dl(struct rq *rq, struct task_struct *p) 1893 { 1894 update_curr_dl(rq); 1895 1896 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1); 1897 if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1) 1898 enqueue_pushable_dl_task(rq, p); 1899 } 1900 1901 /* 1902 * scheduler tick hitting a task of our scheduling class. 1903 * 1904 * NOTE: This function can be called remotely by the tick offload that 1905 * goes along full dynticks. Therefore no local assumption can be made 1906 * and everything must be accessed through the @rq and @curr passed in 1907 * parameters. 1908 */ 1909 static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued) 1910 { 1911 update_curr_dl(rq); 1912 1913 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1); 1914 /* 1915 * Even when we have runtime, update_curr_dl() might have resulted in us 1916 * not being the leftmost task anymore. In that case NEED_RESCHED will 1917 * be set and schedule() will start a new hrtick for the next task. 1918 */ 1919 if (hrtick_enabled(rq) && queued && p->dl.runtime > 0 && 1920 is_leftmost(p, &rq->dl)) 1921 start_hrtick_dl(rq, p); 1922 } 1923 1924 static void task_fork_dl(struct task_struct *p) 1925 { 1926 /* 1927 * SCHED_DEADLINE tasks cannot fork and this is achieved through 1928 * sched_fork() 1929 */ 1930 } 1931 1932 #ifdef CONFIG_SMP 1933 1934 /* Only try algorithms three times */ 1935 #define DL_MAX_TRIES 3 1936 1937 static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu) 1938 { 1939 if (!task_running(rq, p) && 1940 cpumask_test_cpu(cpu, &p->cpus_mask)) 1941 return 1; 1942 return 0; 1943 } 1944 1945 /* 1946 * Return the earliest pushable rq's task, which is suitable to be executed 1947 * on the CPU, NULL otherwise: 1948 */ 1949 static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu) 1950 { 1951 struct rb_node *next_node = rq->dl.pushable_dl_tasks_root.rb_leftmost; 1952 struct task_struct *p = NULL; 1953 1954 if (!has_pushable_dl_tasks(rq)) 1955 return NULL; 1956 1957 next_node: 1958 if (next_node) { 1959 p = rb_entry(next_node, struct task_struct, pushable_dl_tasks); 1960 1961 if (pick_dl_task(rq, p, cpu)) 1962 return p; 1963 1964 next_node = rb_next(next_node); 1965 goto next_node; 1966 } 1967 1968 return NULL; 1969 } 1970 1971 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl); 1972 1973 static int find_later_rq(struct task_struct *task) 1974 { 1975 struct sched_domain *sd; 1976 struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl); 1977 int this_cpu = smp_processor_id(); 1978 int cpu = task_cpu(task); 1979 1980 /* Make sure the mask is initialized first */ 1981 if (unlikely(!later_mask)) 1982 return -1; 1983 1984 if (task->nr_cpus_allowed == 1) 1985 return -1; 1986 1987 /* 1988 * We have to consider system topology and task affinity 1989 * first, then we can look for a suitable CPU. 1990 */ 1991 if (!cpudl_find(&task_rq(task)->rd->cpudl, task, later_mask)) 1992 return -1; 1993 1994 /* 1995 * If we are here, some targets have been found, including 1996 * the most suitable which is, among the runqueues where the 1997 * current tasks have later deadlines than the task's one, the 1998 * rq with the latest possible one. 1999 * 2000 * Now we check how well this matches with task's 2001 * affinity and system topology. 2002 * 2003 * The last CPU where the task run is our first 2004 * guess, since it is most likely cache-hot there. 2005 */ 2006 if (cpumask_test_cpu(cpu, later_mask)) 2007 return cpu; 2008 /* 2009 * Check if this_cpu is to be skipped (i.e., it is 2010 * not in the mask) or not. 2011 */ 2012 if (!cpumask_test_cpu(this_cpu, later_mask)) 2013 this_cpu = -1; 2014 2015 rcu_read_lock(); 2016 for_each_domain(cpu, sd) { 2017 if (sd->flags & SD_WAKE_AFFINE) { 2018 int best_cpu; 2019 2020 /* 2021 * If possible, preempting this_cpu is 2022 * cheaper than migrating. 2023 */ 2024 if (this_cpu != -1 && 2025 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) { 2026 rcu_read_unlock(); 2027 return this_cpu; 2028 } 2029 2030 best_cpu = cpumask_any_and_distribute(later_mask, 2031 sched_domain_span(sd)); 2032 /* 2033 * Last chance: if a CPU being in both later_mask 2034 * and current sd span is valid, that becomes our 2035 * choice. Of course, the latest possible CPU is 2036 * already under consideration through later_mask. 2037 */ 2038 if (best_cpu < nr_cpu_ids) { 2039 rcu_read_unlock(); 2040 return best_cpu; 2041 } 2042 } 2043 } 2044 rcu_read_unlock(); 2045 2046 /* 2047 * At this point, all our guesses failed, we just return 2048 * 'something', and let the caller sort the things out. 2049 */ 2050 if (this_cpu != -1) 2051 return this_cpu; 2052 2053 cpu = cpumask_any_distribute(later_mask); 2054 if (cpu < nr_cpu_ids) 2055 return cpu; 2056 2057 return -1; 2058 } 2059 2060 /* Locks the rq it finds */ 2061 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq) 2062 { 2063 struct rq *later_rq = NULL; 2064 int tries; 2065 int cpu; 2066 2067 for (tries = 0; tries < DL_MAX_TRIES; tries++) { 2068 cpu = find_later_rq(task); 2069 2070 if ((cpu == -1) || (cpu == rq->cpu)) 2071 break; 2072 2073 later_rq = cpu_rq(cpu); 2074 2075 if (later_rq->dl.dl_nr_running && 2076 !dl_time_before(task->dl.deadline, 2077 later_rq->dl.earliest_dl.curr)) { 2078 /* 2079 * Target rq has tasks of equal or earlier deadline, 2080 * retrying does not release any lock and is unlikely 2081 * to yield a different result. 2082 */ 2083 later_rq = NULL; 2084 break; 2085 } 2086 2087 /* Retry if something changed. */ 2088 if (double_lock_balance(rq, later_rq)) { 2089 if (unlikely(task_rq(task) != rq || 2090 !cpumask_test_cpu(later_rq->cpu, &task->cpus_mask) || 2091 task_running(rq, task) || 2092 !dl_task(task) || 2093 !task_on_rq_queued(task))) { 2094 double_unlock_balance(rq, later_rq); 2095 later_rq = NULL; 2096 break; 2097 } 2098 } 2099 2100 /* 2101 * If the rq we found has no -deadline task, or 2102 * its earliest one has a later deadline than our 2103 * task, the rq is a good one. 2104 */ 2105 if (!later_rq->dl.dl_nr_running || 2106 dl_time_before(task->dl.deadline, 2107 later_rq->dl.earliest_dl.curr)) 2108 break; 2109 2110 /* Otherwise we try again. */ 2111 double_unlock_balance(rq, later_rq); 2112 later_rq = NULL; 2113 } 2114 2115 return later_rq; 2116 } 2117 2118 static struct task_struct *pick_next_pushable_dl_task(struct rq *rq) 2119 { 2120 struct task_struct *p; 2121 2122 if (!has_pushable_dl_tasks(rq)) 2123 return NULL; 2124 2125 p = rb_entry(rq->dl.pushable_dl_tasks_root.rb_leftmost, 2126 struct task_struct, pushable_dl_tasks); 2127 2128 BUG_ON(rq->cpu != task_cpu(p)); 2129 BUG_ON(task_current(rq, p)); 2130 BUG_ON(p->nr_cpus_allowed <= 1); 2131 2132 BUG_ON(!task_on_rq_queued(p)); 2133 BUG_ON(!dl_task(p)); 2134 2135 return p; 2136 } 2137 2138 /* 2139 * See if the non running -deadline tasks on this rq 2140 * can be sent to some other CPU where they can preempt 2141 * and start executing. 2142 */ 2143 static int push_dl_task(struct rq *rq) 2144 { 2145 struct task_struct *next_task; 2146 struct rq *later_rq; 2147 int ret = 0; 2148 2149 if (!rq->dl.overloaded) 2150 return 0; 2151 2152 next_task = pick_next_pushable_dl_task(rq); 2153 if (!next_task) 2154 return 0; 2155 2156 retry: 2157 if (is_migration_disabled(next_task)) 2158 return 0; 2159 2160 if (WARN_ON(next_task == rq->curr)) 2161 return 0; 2162 2163 /* 2164 * If next_task preempts rq->curr, and rq->curr 2165 * can move away, it makes sense to just reschedule 2166 * without going further in pushing next_task. 2167 */ 2168 if (dl_task(rq->curr) && 2169 dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) && 2170 rq->curr->nr_cpus_allowed > 1) { 2171 resched_curr(rq); 2172 return 0; 2173 } 2174 2175 /* We might release rq lock */ 2176 get_task_struct(next_task); 2177 2178 /* Will lock the rq it'll find */ 2179 later_rq = find_lock_later_rq(next_task, rq); 2180 if (!later_rq) { 2181 struct task_struct *task; 2182 2183 /* 2184 * We must check all this again, since 2185 * find_lock_later_rq releases rq->lock and it is 2186 * then possible that next_task has migrated. 2187 */ 2188 task = pick_next_pushable_dl_task(rq); 2189 if (task == next_task) { 2190 /* 2191 * The task is still there. We don't try 2192 * again, some other CPU will pull it when ready. 2193 */ 2194 goto out; 2195 } 2196 2197 if (!task) 2198 /* No more tasks */ 2199 goto out; 2200 2201 put_task_struct(next_task); 2202 next_task = task; 2203 goto retry; 2204 } 2205 2206 deactivate_task(rq, next_task, 0); 2207 set_task_cpu(next_task, later_rq->cpu); 2208 2209 /* 2210 * Update the later_rq clock here, because the clock is used 2211 * by the cpufreq_update_util() inside __add_running_bw(). 2212 */ 2213 update_rq_clock(later_rq); 2214 activate_task(later_rq, next_task, ENQUEUE_NOCLOCK); 2215 ret = 1; 2216 2217 resched_curr(later_rq); 2218 2219 double_unlock_balance(rq, later_rq); 2220 2221 out: 2222 put_task_struct(next_task); 2223 2224 return ret; 2225 } 2226 2227 static void push_dl_tasks(struct rq *rq) 2228 { 2229 /* push_dl_task() will return true if it moved a -deadline task */ 2230 while (push_dl_task(rq)) 2231 ; 2232 } 2233 2234 static void pull_dl_task(struct rq *this_rq) 2235 { 2236 int this_cpu = this_rq->cpu, cpu; 2237 struct task_struct *p, *push_task; 2238 bool resched = false; 2239 struct rq *src_rq; 2240 u64 dmin = LONG_MAX; 2241 2242 if (likely(!dl_overloaded(this_rq))) 2243 return; 2244 2245 /* 2246 * Match the barrier from dl_set_overloaded; this guarantees that if we 2247 * see overloaded we must also see the dlo_mask bit. 2248 */ 2249 smp_rmb(); 2250 2251 for_each_cpu(cpu, this_rq->rd->dlo_mask) { 2252 if (this_cpu == cpu) 2253 continue; 2254 2255 src_rq = cpu_rq(cpu); 2256 2257 /* 2258 * It looks racy, abd it is! However, as in sched_rt.c, 2259 * we are fine with this. 2260 */ 2261 if (this_rq->dl.dl_nr_running && 2262 dl_time_before(this_rq->dl.earliest_dl.curr, 2263 src_rq->dl.earliest_dl.next)) 2264 continue; 2265 2266 /* Might drop this_rq->lock */ 2267 push_task = NULL; 2268 double_lock_balance(this_rq, src_rq); 2269 2270 /* 2271 * If there are no more pullable tasks on the 2272 * rq, we're done with it. 2273 */ 2274 if (src_rq->dl.dl_nr_running <= 1) 2275 goto skip; 2276 2277 p = pick_earliest_pushable_dl_task(src_rq, this_cpu); 2278 2279 /* 2280 * We found a task to be pulled if: 2281 * - it preempts our current (if there's one), 2282 * - it will preempt the last one we pulled (if any). 2283 */ 2284 if (p && dl_time_before(p->dl.deadline, dmin) && 2285 (!this_rq->dl.dl_nr_running || 2286 dl_time_before(p->dl.deadline, 2287 this_rq->dl.earliest_dl.curr))) { 2288 WARN_ON(p == src_rq->curr); 2289 WARN_ON(!task_on_rq_queued(p)); 2290 2291 /* 2292 * Then we pull iff p has actually an earlier 2293 * deadline than the current task of its runqueue. 2294 */ 2295 if (dl_time_before(p->dl.deadline, 2296 src_rq->curr->dl.deadline)) 2297 goto skip; 2298 2299 if (is_migration_disabled(p)) { 2300 push_task = get_push_task(src_rq); 2301 } else { 2302 deactivate_task(src_rq, p, 0); 2303 set_task_cpu(p, this_cpu); 2304 activate_task(this_rq, p, 0); 2305 dmin = p->dl.deadline; 2306 resched = true; 2307 } 2308 2309 /* Is there any other task even earlier? */ 2310 } 2311 skip: 2312 double_unlock_balance(this_rq, src_rq); 2313 2314 if (push_task) { 2315 raw_spin_unlock(&this_rq->lock); 2316 stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop, 2317 push_task, &src_rq->push_work); 2318 raw_spin_lock(&this_rq->lock); 2319 } 2320 } 2321 2322 if (resched) 2323 resched_curr(this_rq); 2324 } 2325 2326 /* 2327 * Since the task is not running and a reschedule is not going to happen 2328 * anytime soon on its runqueue, we try pushing it away now. 2329 */ 2330 static void task_woken_dl(struct rq *rq, struct task_struct *p) 2331 { 2332 if (!task_running(rq, p) && 2333 !test_tsk_need_resched(rq->curr) && 2334 p->nr_cpus_allowed > 1 && 2335 dl_task(rq->curr) && 2336 (rq->curr->nr_cpus_allowed < 2 || 2337 !dl_entity_preempt(&p->dl, &rq->curr->dl))) { 2338 push_dl_tasks(rq); 2339 } 2340 } 2341 2342 static void set_cpus_allowed_dl(struct task_struct *p, 2343 const struct cpumask *new_mask, 2344 u32 flags) 2345 { 2346 struct root_domain *src_rd; 2347 struct rq *rq; 2348 2349 BUG_ON(!dl_task(p)); 2350 2351 rq = task_rq(p); 2352 src_rd = rq->rd; 2353 /* 2354 * Migrating a SCHED_DEADLINE task between exclusive 2355 * cpusets (different root_domains) entails a bandwidth 2356 * update. We already made space for us in the destination 2357 * domain (see cpuset_can_attach()). 2358 */ 2359 if (!cpumask_intersects(src_rd->span, new_mask)) { 2360 struct dl_bw *src_dl_b; 2361 2362 src_dl_b = dl_bw_of(cpu_of(rq)); 2363 /* 2364 * We now free resources of the root_domain we are migrating 2365 * off. In the worst case, sched_setattr() may temporary fail 2366 * until we complete the update. 2367 */ 2368 raw_spin_lock(&src_dl_b->lock); 2369 __dl_sub(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p))); 2370 raw_spin_unlock(&src_dl_b->lock); 2371 } 2372 2373 set_cpus_allowed_common(p, new_mask, flags); 2374 } 2375 2376 /* Assumes rq->lock is held */ 2377 static void rq_online_dl(struct rq *rq) 2378 { 2379 if (rq->dl.overloaded) 2380 dl_set_overload(rq); 2381 2382 cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu); 2383 if (rq->dl.dl_nr_running > 0) 2384 cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr); 2385 } 2386 2387 /* Assumes rq->lock is held */ 2388 static void rq_offline_dl(struct rq *rq) 2389 { 2390 if (rq->dl.overloaded) 2391 dl_clear_overload(rq); 2392 2393 cpudl_clear(&rq->rd->cpudl, rq->cpu); 2394 cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu); 2395 } 2396 2397 void __init init_sched_dl_class(void) 2398 { 2399 unsigned int i; 2400 2401 for_each_possible_cpu(i) 2402 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i), 2403 GFP_KERNEL, cpu_to_node(i)); 2404 } 2405 2406 void dl_add_task_root_domain(struct task_struct *p) 2407 { 2408 struct rq_flags rf; 2409 struct rq *rq; 2410 struct dl_bw *dl_b; 2411 2412 rq = task_rq_lock(p, &rf); 2413 if (!dl_task(p)) 2414 goto unlock; 2415 2416 dl_b = &rq->rd->dl_bw; 2417 raw_spin_lock(&dl_b->lock); 2418 2419 __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span)); 2420 2421 raw_spin_unlock(&dl_b->lock); 2422 2423 unlock: 2424 task_rq_unlock(rq, p, &rf); 2425 } 2426 2427 void dl_clear_root_domain(struct root_domain *rd) 2428 { 2429 unsigned long flags; 2430 2431 raw_spin_lock_irqsave(&rd->dl_bw.lock, flags); 2432 rd->dl_bw.total_bw = 0; 2433 raw_spin_unlock_irqrestore(&rd->dl_bw.lock, flags); 2434 } 2435 2436 #endif /* CONFIG_SMP */ 2437 2438 static void switched_from_dl(struct rq *rq, struct task_struct *p) 2439 { 2440 /* 2441 * task_non_contending() can start the "inactive timer" (if the 0-lag 2442 * time is in the future). If the task switches back to dl before 2443 * the "inactive timer" fires, it can continue to consume its current 2444 * runtime using its current deadline. If it stays outside of 2445 * SCHED_DEADLINE until the 0-lag time passes, inactive_task_timer() 2446 * will reset the task parameters. 2447 */ 2448 if (task_on_rq_queued(p) && p->dl.dl_runtime) 2449 task_non_contending(p); 2450 2451 if (!task_on_rq_queued(p)) { 2452 /* 2453 * Inactive timer is armed. However, p is leaving DEADLINE and 2454 * might migrate away from this rq while continuing to run on 2455 * some other class. We need to remove its contribution from 2456 * this rq running_bw now, or sub_rq_bw (below) will complain. 2457 */ 2458 if (p->dl.dl_non_contending) 2459 sub_running_bw(&p->dl, &rq->dl); 2460 sub_rq_bw(&p->dl, &rq->dl); 2461 } 2462 2463 /* 2464 * We cannot use inactive_task_timer() to invoke sub_running_bw() 2465 * at the 0-lag time, because the task could have been migrated 2466 * while SCHED_OTHER in the meanwhile. 2467 */ 2468 if (p->dl.dl_non_contending) 2469 p->dl.dl_non_contending = 0; 2470 2471 /* 2472 * Since this might be the only -deadline task on the rq, 2473 * this is the right place to try to pull some other one 2474 * from an overloaded CPU, if any. 2475 */ 2476 if (!task_on_rq_queued(p) || rq->dl.dl_nr_running) 2477 return; 2478 2479 deadline_queue_pull_task(rq); 2480 } 2481 2482 /* 2483 * When switching to -deadline, we may overload the rq, then 2484 * we try to push someone off, if possible. 2485 */ 2486 static void switched_to_dl(struct rq *rq, struct task_struct *p) 2487 { 2488 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1) 2489 put_task_struct(p); 2490 2491 /* If p is not queued we will update its parameters at next wakeup. */ 2492 if (!task_on_rq_queued(p)) { 2493 add_rq_bw(&p->dl, &rq->dl); 2494 2495 return; 2496 } 2497 2498 if (rq->curr != p) { 2499 #ifdef CONFIG_SMP 2500 if (p->nr_cpus_allowed > 1 && rq->dl.overloaded) 2501 deadline_queue_push_tasks(rq); 2502 #endif 2503 if (dl_task(rq->curr)) 2504 check_preempt_curr_dl(rq, p, 0); 2505 else 2506 resched_curr(rq); 2507 } 2508 } 2509 2510 /* 2511 * If the scheduling parameters of a -deadline task changed, 2512 * a push or pull operation might be needed. 2513 */ 2514 static void prio_changed_dl(struct rq *rq, struct task_struct *p, 2515 int oldprio) 2516 { 2517 if (task_on_rq_queued(p) || rq->curr == p) { 2518 #ifdef CONFIG_SMP 2519 /* 2520 * This might be too much, but unfortunately 2521 * we don't have the old deadline value, and 2522 * we can't argue if the task is increasing 2523 * or lowering its prio, so... 2524 */ 2525 if (!rq->dl.overloaded) 2526 deadline_queue_pull_task(rq); 2527 2528 /* 2529 * If we now have a earlier deadline task than p, 2530 * then reschedule, provided p is still on this 2531 * runqueue. 2532 */ 2533 if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline)) 2534 resched_curr(rq); 2535 #else 2536 /* 2537 * Again, we don't know if p has a earlier 2538 * or later deadline, so let's blindly set a 2539 * (maybe not needed) rescheduling point. 2540 */ 2541 resched_curr(rq); 2542 #endif /* CONFIG_SMP */ 2543 } 2544 } 2545 2546 DEFINE_SCHED_CLASS(dl) = { 2547 2548 .enqueue_task = enqueue_task_dl, 2549 .dequeue_task = dequeue_task_dl, 2550 .yield_task = yield_task_dl, 2551 2552 .check_preempt_curr = check_preempt_curr_dl, 2553 2554 .pick_next_task = pick_next_task_dl, 2555 .put_prev_task = put_prev_task_dl, 2556 .set_next_task = set_next_task_dl, 2557 2558 #ifdef CONFIG_SMP 2559 .balance = balance_dl, 2560 .select_task_rq = select_task_rq_dl, 2561 .migrate_task_rq = migrate_task_rq_dl, 2562 .set_cpus_allowed = set_cpus_allowed_dl, 2563 .rq_online = rq_online_dl, 2564 .rq_offline = rq_offline_dl, 2565 .task_woken = task_woken_dl, 2566 .find_lock_rq = find_lock_later_rq, 2567 #endif 2568 2569 .task_tick = task_tick_dl, 2570 .task_fork = task_fork_dl, 2571 2572 .prio_changed = prio_changed_dl, 2573 .switched_from = switched_from_dl, 2574 .switched_to = switched_to_dl, 2575 2576 .update_curr = update_curr_dl, 2577 }; 2578 2579 /* Used for dl_bw check and update, used under sched_rt_handler()::mutex */ 2580 static u64 dl_generation; 2581 2582 int sched_dl_global_validate(void) 2583 { 2584 u64 runtime = global_rt_runtime(); 2585 u64 period = global_rt_period(); 2586 u64 new_bw = to_ratio(period, runtime); 2587 u64 gen = ++dl_generation; 2588 struct dl_bw *dl_b; 2589 int cpu, cpus, ret = 0; 2590 unsigned long flags; 2591 2592 /* 2593 * Here we want to check the bandwidth not being set to some 2594 * value smaller than the currently allocated bandwidth in 2595 * any of the root_domains. 2596 */ 2597 for_each_possible_cpu(cpu) { 2598 rcu_read_lock_sched(); 2599 2600 if (dl_bw_visited(cpu, gen)) 2601 goto next; 2602 2603 dl_b = dl_bw_of(cpu); 2604 cpus = dl_bw_cpus(cpu); 2605 2606 raw_spin_lock_irqsave(&dl_b->lock, flags); 2607 if (new_bw * cpus < dl_b->total_bw) 2608 ret = -EBUSY; 2609 raw_spin_unlock_irqrestore(&dl_b->lock, flags); 2610 2611 next: 2612 rcu_read_unlock_sched(); 2613 2614 if (ret) 2615 break; 2616 } 2617 2618 return ret; 2619 } 2620 2621 static void init_dl_rq_bw_ratio(struct dl_rq *dl_rq) 2622 { 2623 if (global_rt_runtime() == RUNTIME_INF) { 2624 dl_rq->bw_ratio = 1 << RATIO_SHIFT; 2625 dl_rq->extra_bw = 1 << BW_SHIFT; 2626 } else { 2627 dl_rq->bw_ratio = to_ratio(global_rt_runtime(), 2628 global_rt_period()) >> (BW_SHIFT - RATIO_SHIFT); 2629 dl_rq->extra_bw = to_ratio(global_rt_period(), 2630 global_rt_runtime()); 2631 } 2632 } 2633 2634 void sched_dl_do_global(void) 2635 { 2636 u64 new_bw = -1; 2637 u64 gen = ++dl_generation; 2638 struct dl_bw *dl_b; 2639 int cpu; 2640 unsigned long flags; 2641 2642 def_dl_bandwidth.dl_period = global_rt_period(); 2643 def_dl_bandwidth.dl_runtime = global_rt_runtime(); 2644 2645 if (global_rt_runtime() != RUNTIME_INF) 2646 new_bw = to_ratio(global_rt_period(), global_rt_runtime()); 2647 2648 for_each_possible_cpu(cpu) { 2649 rcu_read_lock_sched(); 2650 2651 if (dl_bw_visited(cpu, gen)) { 2652 rcu_read_unlock_sched(); 2653 continue; 2654 } 2655 2656 dl_b = dl_bw_of(cpu); 2657 2658 raw_spin_lock_irqsave(&dl_b->lock, flags); 2659 dl_b->bw = new_bw; 2660 raw_spin_unlock_irqrestore(&dl_b->lock, flags); 2661 2662 rcu_read_unlock_sched(); 2663 init_dl_rq_bw_ratio(&cpu_rq(cpu)->dl); 2664 } 2665 } 2666 2667 /* 2668 * We must be sure that accepting a new task (or allowing changing the 2669 * parameters of an existing one) is consistent with the bandwidth 2670 * constraints. If yes, this function also accordingly updates the currently 2671 * allocated bandwidth to reflect the new situation. 2672 * 2673 * This function is called while holding p's rq->lock. 2674 */ 2675 int sched_dl_overflow(struct task_struct *p, int policy, 2676 const struct sched_attr *attr) 2677 { 2678 u64 period = attr->sched_period ?: attr->sched_deadline; 2679 u64 runtime = attr->sched_runtime; 2680 u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0; 2681 int cpus, err = -1, cpu = task_cpu(p); 2682 struct dl_bw *dl_b = dl_bw_of(cpu); 2683 unsigned long cap; 2684 2685 if (attr->sched_flags & SCHED_FLAG_SUGOV) 2686 return 0; 2687 2688 /* !deadline task may carry old deadline bandwidth */ 2689 if (new_bw == p->dl.dl_bw && task_has_dl_policy(p)) 2690 return 0; 2691 2692 /* 2693 * Either if a task, enters, leave, or stays -deadline but changes 2694 * its parameters, we may need to update accordingly the total 2695 * allocated bandwidth of the container. 2696 */ 2697 raw_spin_lock(&dl_b->lock); 2698 cpus = dl_bw_cpus(cpu); 2699 cap = dl_bw_capacity(cpu); 2700 2701 if (dl_policy(policy) && !task_has_dl_policy(p) && 2702 !__dl_overflow(dl_b, cap, 0, new_bw)) { 2703 if (hrtimer_active(&p->dl.inactive_timer)) 2704 __dl_sub(dl_b, p->dl.dl_bw, cpus); 2705 __dl_add(dl_b, new_bw, cpus); 2706 err = 0; 2707 } else if (dl_policy(policy) && task_has_dl_policy(p) && 2708 !__dl_overflow(dl_b, cap, p->dl.dl_bw, new_bw)) { 2709 /* 2710 * XXX this is slightly incorrect: when the task 2711 * utilization decreases, we should delay the total 2712 * utilization change until the task's 0-lag point. 2713 * But this would require to set the task's "inactive 2714 * timer" when the task is not inactive. 2715 */ 2716 __dl_sub(dl_b, p->dl.dl_bw, cpus); 2717 __dl_add(dl_b, new_bw, cpus); 2718 dl_change_utilization(p, new_bw); 2719 err = 0; 2720 } else if (!dl_policy(policy) && task_has_dl_policy(p)) { 2721 /* 2722 * Do not decrease the total deadline utilization here, 2723 * switched_from_dl() will take care to do it at the correct 2724 * (0-lag) time. 2725 */ 2726 err = 0; 2727 } 2728 raw_spin_unlock(&dl_b->lock); 2729 2730 return err; 2731 } 2732 2733 /* 2734 * This function initializes the sched_dl_entity of a newly becoming 2735 * SCHED_DEADLINE task. 2736 * 2737 * Only the static values are considered here, the actual runtime and the 2738 * absolute deadline will be properly calculated when the task is enqueued 2739 * for the first time with its new policy. 2740 */ 2741 void __setparam_dl(struct task_struct *p, const struct sched_attr *attr) 2742 { 2743 struct sched_dl_entity *dl_se = &p->dl; 2744 2745 dl_se->dl_runtime = attr->sched_runtime; 2746 dl_se->dl_deadline = attr->sched_deadline; 2747 dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline; 2748 dl_se->flags = attr->sched_flags; 2749 dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime); 2750 dl_se->dl_density = to_ratio(dl_se->dl_deadline, dl_se->dl_runtime); 2751 } 2752 2753 void __getparam_dl(struct task_struct *p, struct sched_attr *attr) 2754 { 2755 struct sched_dl_entity *dl_se = &p->dl; 2756 2757 attr->sched_priority = p->rt_priority; 2758 attr->sched_runtime = dl_se->dl_runtime; 2759 attr->sched_deadline = dl_se->dl_deadline; 2760 attr->sched_period = dl_se->dl_period; 2761 attr->sched_flags = dl_se->flags; 2762 } 2763 2764 /* 2765 * Default limits for DL period; on the top end we guard against small util 2766 * tasks still getting rediculous long effective runtimes, on the bottom end we 2767 * guard against timer DoS. 2768 */ 2769 unsigned int sysctl_sched_dl_period_max = 1 << 22; /* ~4 seconds */ 2770 unsigned int sysctl_sched_dl_period_min = 100; /* 100 us */ 2771 2772 /* 2773 * This function validates the new parameters of a -deadline task. 2774 * We ask for the deadline not being zero, and greater or equal 2775 * than the runtime, as well as the period of being zero or 2776 * greater than deadline. Furthermore, we have to be sure that 2777 * user parameters are above the internal resolution of 1us (we 2778 * check sched_runtime only since it is always the smaller one) and 2779 * below 2^63 ns (we have to check both sched_deadline and 2780 * sched_period, as the latter can be zero). 2781 */ 2782 bool __checkparam_dl(const struct sched_attr *attr) 2783 { 2784 u64 period, max, min; 2785 2786 /* special dl tasks don't actually use any parameter */ 2787 if (attr->sched_flags & SCHED_FLAG_SUGOV) 2788 return true; 2789 2790 /* deadline != 0 */ 2791 if (attr->sched_deadline == 0) 2792 return false; 2793 2794 /* 2795 * Since we truncate DL_SCALE bits, make sure we're at least 2796 * that big. 2797 */ 2798 if (attr->sched_runtime < (1ULL << DL_SCALE)) 2799 return false; 2800 2801 /* 2802 * Since we use the MSB for wrap-around and sign issues, make 2803 * sure it's not set (mind that period can be equal to zero). 2804 */ 2805 if (attr->sched_deadline & (1ULL << 63) || 2806 attr->sched_period & (1ULL << 63)) 2807 return false; 2808 2809 period = attr->sched_period; 2810 if (!period) 2811 period = attr->sched_deadline; 2812 2813 /* runtime <= deadline <= period (if period != 0) */ 2814 if (period < attr->sched_deadline || 2815 attr->sched_deadline < attr->sched_runtime) 2816 return false; 2817 2818 max = (u64)READ_ONCE(sysctl_sched_dl_period_max) * NSEC_PER_USEC; 2819 min = (u64)READ_ONCE(sysctl_sched_dl_period_min) * NSEC_PER_USEC; 2820 2821 if (period < min || period > max) 2822 return false; 2823 2824 return true; 2825 } 2826 2827 /* 2828 * This function clears the sched_dl_entity static params. 2829 */ 2830 void __dl_clear_params(struct task_struct *p) 2831 { 2832 struct sched_dl_entity *dl_se = &p->dl; 2833 2834 dl_se->dl_runtime = 0; 2835 dl_se->dl_deadline = 0; 2836 dl_se->dl_period = 0; 2837 dl_se->flags = 0; 2838 dl_se->dl_bw = 0; 2839 dl_se->dl_density = 0; 2840 2841 dl_se->dl_throttled = 0; 2842 dl_se->dl_yielded = 0; 2843 dl_se->dl_non_contending = 0; 2844 dl_se->dl_overrun = 0; 2845 2846 #ifdef CONFIG_RT_MUTEXES 2847 dl_se->pi_se = dl_se; 2848 #endif 2849 } 2850 2851 bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr) 2852 { 2853 struct sched_dl_entity *dl_se = &p->dl; 2854 2855 if (dl_se->dl_runtime != attr->sched_runtime || 2856 dl_se->dl_deadline != attr->sched_deadline || 2857 dl_se->dl_period != attr->sched_period || 2858 dl_se->flags != attr->sched_flags) 2859 return true; 2860 2861 return false; 2862 } 2863 2864 #ifdef CONFIG_SMP 2865 int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed) 2866 { 2867 unsigned long flags, cap; 2868 unsigned int dest_cpu; 2869 struct dl_bw *dl_b; 2870 bool overflow; 2871 int ret; 2872 2873 dest_cpu = cpumask_any_and(cpu_active_mask, cs_cpus_allowed); 2874 2875 rcu_read_lock_sched(); 2876 dl_b = dl_bw_of(dest_cpu); 2877 raw_spin_lock_irqsave(&dl_b->lock, flags); 2878 cap = dl_bw_capacity(dest_cpu); 2879 overflow = __dl_overflow(dl_b, cap, 0, p->dl.dl_bw); 2880 if (overflow) { 2881 ret = -EBUSY; 2882 } else { 2883 /* 2884 * We reserve space for this task in the destination 2885 * root_domain, as we can't fail after this point. 2886 * We will free resources in the source root_domain 2887 * later on (see set_cpus_allowed_dl()). 2888 */ 2889 int cpus = dl_bw_cpus(dest_cpu); 2890 2891 __dl_add(dl_b, p->dl.dl_bw, cpus); 2892 ret = 0; 2893 } 2894 raw_spin_unlock_irqrestore(&dl_b->lock, flags); 2895 rcu_read_unlock_sched(); 2896 2897 return ret; 2898 } 2899 2900 int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, 2901 const struct cpumask *trial) 2902 { 2903 int ret = 1, trial_cpus; 2904 struct dl_bw *cur_dl_b; 2905 unsigned long flags; 2906 2907 rcu_read_lock_sched(); 2908 cur_dl_b = dl_bw_of(cpumask_any(cur)); 2909 trial_cpus = cpumask_weight(trial); 2910 2911 raw_spin_lock_irqsave(&cur_dl_b->lock, flags); 2912 if (cur_dl_b->bw != -1 && 2913 cur_dl_b->bw * trial_cpus < cur_dl_b->total_bw) 2914 ret = 0; 2915 raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags); 2916 rcu_read_unlock_sched(); 2917 2918 return ret; 2919 } 2920 2921 bool dl_cpu_busy(unsigned int cpu) 2922 { 2923 unsigned long flags, cap; 2924 struct dl_bw *dl_b; 2925 bool overflow; 2926 2927 rcu_read_lock_sched(); 2928 dl_b = dl_bw_of(cpu); 2929 raw_spin_lock_irqsave(&dl_b->lock, flags); 2930 cap = dl_bw_capacity(cpu); 2931 overflow = __dl_overflow(dl_b, cap, 0, 0); 2932 raw_spin_unlock_irqrestore(&dl_b->lock, flags); 2933 rcu_read_unlock_sched(); 2934 2935 return overflow; 2936 } 2937 #endif 2938 2939 #ifdef CONFIG_SCHED_DEBUG 2940 void print_dl_stats(struct seq_file *m, int cpu) 2941 { 2942 print_dl_rq(m, cpu, &cpu_rq(cpu)->dl); 2943 } 2944 #endif /* CONFIG_SCHED_DEBUG */ 2945