1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Deadline Scheduling Class (SCHED_DEADLINE) 4 * 5 * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS). 6 * 7 * Tasks that periodically executes their instances for less than their 8 * runtime won't miss any of their deadlines. 9 * Tasks that are not periodic or sporadic or that tries to execute more 10 * than their reserved bandwidth will be slowed down (and may potentially 11 * miss some of their deadlines), and won't affect any other task. 12 * 13 * Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>, 14 * Juri Lelli <juri.lelli@gmail.com>, 15 * Michael Trimarchi <michael@amarulasolutions.com>, 16 * Fabio Checconi <fchecconi@gmail.com> 17 */ 18 #include "sched.h" 19 #include "pelt.h" 20 21 struct dl_bandwidth def_dl_bandwidth; 22 23 static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se) 24 { 25 return container_of(dl_se, struct task_struct, dl); 26 } 27 28 static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq) 29 { 30 return container_of(dl_rq, struct rq, dl); 31 } 32 33 static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se) 34 { 35 struct task_struct *p = dl_task_of(dl_se); 36 struct rq *rq = task_rq(p); 37 38 return &rq->dl; 39 } 40 41 static inline int on_dl_rq(struct sched_dl_entity *dl_se) 42 { 43 return !RB_EMPTY_NODE(&dl_se->rb_node); 44 } 45 46 #ifdef CONFIG_RT_MUTEXES 47 static inline struct sched_dl_entity *pi_of(struct sched_dl_entity *dl_se) 48 { 49 return dl_se->pi_se; 50 } 51 52 static inline bool is_dl_boosted(struct sched_dl_entity *dl_se) 53 { 54 return pi_of(dl_se) != dl_se; 55 } 56 #else 57 static inline struct sched_dl_entity *pi_of(struct sched_dl_entity *dl_se) 58 { 59 return dl_se; 60 } 61 62 static inline bool is_dl_boosted(struct sched_dl_entity *dl_se) 63 { 64 return false; 65 } 66 #endif 67 68 #ifdef CONFIG_SMP 69 static inline struct dl_bw *dl_bw_of(int i) 70 { 71 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(), 72 "sched RCU must be held"); 73 return &cpu_rq(i)->rd->dl_bw; 74 } 75 76 static inline int dl_bw_cpus(int i) 77 { 78 struct root_domain *rd = cpu_rq(i)->rd; 79 int cpus; 80 81 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(), 82 "sched RCU must be held"); 83 84 if (cpumask_subset(rd->span, cpu_active_mask)) 85 return cpumask_weight(rd->span); 86 87 cpus = 0; 88 89 for_each_cpu_and(i, rd->span, cpu_active_mask) 90 cpus++; 91 92 return cpus; 93 } 94 95 static inline unsigned long __dl_bw_capacity(int i) 96 { 97 struct root_domain *rd = cpu_rq(i)->rd; 98 unsigned long cap = 0; 99 100 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(), 101 "sched RCU must be held"); 102 103 for_each_cpu_and(i, rd->span, cpu_active_mask) 104 cap += capacity_orig_of(i); 105 106 return cap; 107 } 108 109 /* 110 * XXX Fix: If 'rq->rd == def_root_domain' perform AC against capacity 111 * of the CPU the task is running on rather rd's \Sum CPU capacity. 112 */ 113 static inline unsigned long dl_bw_capacity(int i) 114 { 115 if (!static_branch_unlikely(&sched_asym_cpucapacity) && 116 capacity_orig_of(i) == SCHED_CAPACITY_SCALE) { 117 return dl_bw_cpus(i) << SCHED_CAPACITY_SHIFT; 118 } else { 119 return __dl_bw_capacity(i); 120 } 121 } 122 #else 123 static inline struct dl_bw *dl_bw_of(int i) 124 { 125 return &cpu_rq(i)->dl.dl_bw; 126 } 127 128 static inline int dl_bw_cpus(int i) 129 { 130 return 1; 131 } 132 133 static inline unsigned long dl_bw_capacity(int i) 134 { 135 return SCHED_CAPACITY_SCALE; 136 } 137 #endif 138 139 static inline 140 void __add_running_bw(u64 dl_bw, struct dl_rq *dl_rq) 141 { 142 u64 old = dl_rq->running_bw; 143 144 lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock); 145 dl_rq->running_bw += dl_bw; 146 SCHED_WARN_ON(dl_rq->running_bw < old); /* overflow */ 147 SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw); 148 /* kick cpufreq (see the comment in kernel/sched/sched.h). */ 149 cpufreq_update_util(rq_of_dl_rq(dl_rq), 0); 150 } 151 152 static inline 153 void __sub_running_bw(u64 dl_bw, struct dl_rq *dl_rq) 154 { 155 u64 old = dl_rq->running_bw; 156 157 lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock); 158 dl_rq->running_bw -= dl_bw; 159 SCHED_WARN_ON(dl_rq->running_bw > old); /* underflow */ 160 if (dl_rq->running_bw > old) 161 dl_rq->running_bw = 0; 162 /* kick cpufreq (see the comment in kernel/sched/sched.h). */ 163 cpufreq_update_util(rq_of_dl_rq(dl_rq), 0); 164 } 165 166 static inline 167 void __add_rq_bw(u64 dl_bw, struct dl_rq *dl_rq) 168 { 169 u64 old = dl_rq->this_bw; 170 171 lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock); 172 dl_rq->this_bw += dl_bw; 173 SCHED_WARN_ON(dl_rq->this_bw < old); /* overflow */ 174 } 175 176 static inline 177 void __sub_rq_bw(u64 dl_bw, struct dl_rq *dl_rq) 178 { 179 u64 old = dl_rq->this_bw; 180 181 lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock); 182 dl_rq->this_bw -= dl_bw; 183 SCHED_WARN_ON(dl_rq->this_bw > old); /* underflow */ 184 if (dl_rq->this_bw > old) 185 dl_rq->this_bw = 0; 186 SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw); 187 } 188 189 static inline 190 void add_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 191 { 192 if (!dl_entity_is_special(dl_se)) 193 __add_rq_bw(dl_se->dl_bw, dl_rq); 194 } 195 196 static inline 197 void sub_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 198 { 199 if (!dl_entity_is_special(dl_se)) 200 __sub_rq_bw(dl_se->dl_bw, dl_rq); 201 } 202 203 static inline 204 void add_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 205 { 206 if (!dl_entity_is_special(dl_se)) 207 __add_running_bw(dl_se->dl_bw, dl_rq); 208 } 209 210 static inline 211 void sub_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 212 { 213 if (!dl_entity_is_special(dl_se)) 214 __sub_running_bw(dl_se->dl_bw, dl_rq); 215 } 216 217 static void dl_change_utilization(struct task_struct *p, u64 new_bw) 218 { 219 struct rq *rq; 220 221 BUG_ON(p->dl.flags & SCHED_FLAG_SUGOV); 222 223 if (task_on_rq_queued(p)) 224 return; 225 226 rq = task_rq(p); 227 if (p->dl.dl_non_contending) { 228 sub_running_bw(&p->dl, &rq->dl); 229 p->dl.dl_non_contending = 0; 230 /* 231 * If the timer handler is currently running and the 232 * timer cannot be cancelled, inactive_task_timer() 233 * will see that dl_not_contending is not set, and 234 * will not touch the rq's active utilization, 235 * so we are still safe. 236 */ 237 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1) 238 put_task_struct(p); 239 } 240 __sub_rq_bw(p->dl.dl_bw, &rq->dl); 241 __add_rq_bw(new_bw, &rq->dl); 242 } 243 244 /* 245 * The utilization of a task cannot be immediately removed from 246 * the rq active utilization (running_bw) when the task blocks. 247 * Instead, we have to wait for the so called "0-lag time". 248 * 249 * If a task blocks before the "0-lag time", a timer (the inactive 250 * timer) is armed, and running_bw is decreased when the timer 251 * fires. 252 * 253 * If the task wakes up again before the inactive timer fires, 254 * the timer is cancelled, whereas if the task wakes up after the 255 * inactive timer fired (and running_bw has been decreased) the 256 * task's utilization has to be added to running_bw again. 257 * A flag in the deadline scheduling entity (dl_non_contending) 258 * is used to avoid race conditions between the inactive timer handler 259 * and task wakeups. 260 * 261 * The following diagram shows how running_bw is updated. A task is 262 * "ACTIVE" when its utilization contributes to running_bw; an 263 * "ACTIVE contending" task is in the TASK_RUNNING state, while an 264 * "ACTIVE non contending" task is a blocked task for which the "0-lag time" 265 * has not passed yet. An "INACTIVE" task is a task for which the "0-lag" 266 * time already passed, which does not contribute to running_bw anymore. 267 * +------------------+ 268 * wakeup | ACTIVE | 269 * +------------------>+ contending | 270 * | add_running_bw | | 271 * | +----+------+------+ 272 * | | ^ 273 * | dequeue | | 274 * +--------+-------+ | | 275 * | | t >= 0-lag | | wakeup 276 * | INACTIVE |<---------------+ | 277 * | | sub_running_bw | | 278 * +--------+-------+ | | 279 * ^ | | 280 * | t < 0-lag | | 281 * | | | 282 * | V | 283 * | +----+------+------+ 284 * | sub_running_bw | ACTIVE | 285 * +-------------------+ | 286 * inactive timer | non contending | 287 * fired +------------------+ 288 * 289 * The task_non_contending() function is invoked when a task 290 * blocks, and checks if the 0-lag time already passed or 291 * not (in the first case, it directly updates running_bw; 292 * in the second case, it arms the inactive timer). 293 * 294 * The task_contending() function is invoked when a task wakes 295 * up, and checks if the task is still in the "ACTIVE non contending" 296 * state or not (in the second case, it updates running_bw). 297 */ 298 static void task_non_contending(struct task_struct *p) 299 { 300 struct sched_dl_entity *dl_se = &p->dl; 301 struct hrtimer *timer = &dl_se->inactive_timer; 302 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 303 struct rq *rq = rq_of_dl_rq(dl_rq); 304 s64 zerolag_time; 305 306 /* 307 * If this is a non-deadline task that has been boosted, 308 * do nothing 309 */ 310 if (dl_se->dl_runtime == 0) 311 return; 312 313 if (dl_entity_is_special(dl_se)) 314 return; 315 316 WARN_ON(dl_se->dl_non_contending); 317 318 zerolag_time = dl_se->deadline - 319 div64_long((dl_se->runtime * dl_se->dl_period), 320 dl_se->dl_runtime); 321 322 /* 323 * Using relative times instead of the absolute "0-lag time" 324 * allows to simplify the code 325 */ 326 zerolag_time -= rq_clock(rq); 327 328 /* 329 * If the "0-lag time" already passed, decrease the active 330 * utilization now, instead of starting a timer 331 */ 332 if ((zerolag_time < 0) || hrtimer_active(&dl_se->inactive_timer)) { 333 if (dl_task(p)) 334 sub_running_bw(dl_se, dl_rq); 335 if (!dl_task(p) || p->state == TASK_DEAD) { 336 struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); 337 338 if (p->state == TASK_DEAD) 339 sub_rq_bw(&p->dl, &rq->dl); 340 raw_spin_lock(&dl_b->lock); 341 __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p))); 342 __dl_clear_params(p); 343 raw_spin_unlock(&dl_b->lock); 344 } 345 346 return; 347 } 348 349 dl_se->dl_non_contending = 1; 350 get_task_struct(p); 351 hrtimer_start(timer, ns_to_ktime(zerolag_time), HRTIMER_MODE_REL_HARD); 352 } 353 354 static void task_contending(struct sched_dl_entity *dl_se, int flags) 355 { 356 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 357 358 /* 359 * If this is a non-deadline task that has been boosted, 360 * do nothing 361 */ 362 if (dl_se->dl_runtime == 0) 363 return; 364 365 if (flags & ENQUEUE_MIGRATED) 366 add_rq_bw(dl_se, dl_rq); 367 368 if (dl_se->dl_non_contending) { 369 dl_se->dl_non_contending = 0; 370 /* 371 * If the timer handler is currently running and the 372 * timer cannot be cancelled, inactive_task_timer() 373 * will see that dl_not_contending is not set, and 374 * will not touch the rq's active utilization, 375 * so we are still safe. 376 */ 377 if (hrtimer_try_to_cancel(&dl_se->inactive_timer) == 1) 378 put_task_struct(dl_task_of(dl_se)); 379 } else { 380 /* 381 * Since "dl_non_contending" is not set, the 382 * task's utilization has already been removed from 383 * active utilization (either when the task blocked, 384 * when the "inactive timer" fired). 385 * So, add it back. 386 */ 387 add_running_bw(dl_se, dl_rq); 388 } 389 } 390 391 static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq) 392 { 393 struct sched_dl_entity *dl_se = &p->dl; 394 395 return dl_rq->root.rb_leftmost == &dl_se->rb_node; 396 } 397 398 static void init_dl_rq_bw_ratio(struct dl_rq *dl_rq); 399 400 void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime) 401 { 402 raw_spin_lock_init(&dl_b->dl_runtime_lock); 403 dl_b->dl_period = period; 404 dl_b->dl_runtime = runtime; 405 } 406 407 void init_dl_bw(struct dl_bw *dl_b) 408 { 409 raw_spin_lock_init(&dl_b->lock); 410 raw_spin_lock(&def_dl_bandwidth.dl_runtime_lock); 411 if (global_rt_runtime() == RUNTIME_INF) 412 dl_b->bw = -1; 413 else 414 dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime()); 415 raw_spin_unlock(&def_dl_bandwidth.dl_runtime_lock); 416 dl_b->total_bw = 0; 417 } 418 419 void init_dl_rq(struct dl_rq *dl_rq) 420 { 421 dl_rq->root = RB_ROOT_CACHED; 422 423 #ifdef CONFIG_SMP 424 /* zero means no -deadline tasks */ 425 dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0; 426 427 dl_rq->dl_nr_migratory = 0; 428 dl_rq->overloaded = 0; 429 dl_rq->pushable_dl_tasks_root = RB_ROOT_CACHED; 430 #else 431 init_dl_bw(&dl_rq->dl_bw); 432 #endif 433 434 dl_rq->running_bw = 0; 435 dl_rq->this_bw = 0; 436 init_dl_rq_bw_ratio(dl_rq); 437 } 438 439 #ifdef CONFIG_SMP 440 441 static inline int dl_overloaded(struct rq *rq) 442 { 443 return atomic_read(&rq->rd->dlo_count); 444 } 445 446 static inline void dl_set_overload(struct rq *rq) 447 { 448 if (!rq->online) 449 return; 450 451 cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask); 452 /* 453 * Must be visible before the overload count is 454 * set (as in sched_rt.c). 455 * 456 * Matched by the barrier in pull_dl_task(). 457 */ 458 smp_wmb(); 459 atomic_inc(&rq->rd->dlo_count); 460 } 461 462 static inline void dl_clear_overload(struct rq *rq) 463 { 464 if (!rq->online) 465 return; 466 467 atomic_dec(&rq->rd->dlo_count); 468 cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask); 469 } 470 471 static void update_dl_migration(struct dl_rq *dl_rq) 472 { 473 if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) { 474 if (!dl_rq->overloaded) { 475 dl_set_overload(rq_of_dl_rq(dl_rq)); 476 dl_rq->overloaded = 1; 477 } 478 } else if (dl_rq->overloaded) { 479 dl_clear_overload(rq_of_dl_rq(dl_rq)); 480 dl_rq->overloaded = 0; 481 } 482 } 483 484 static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 485 { 486 struct task_struct *p = dl_task_of(dl_se); 487 488 if (p->nr_cpus_allowed > 1) 489 dl_rq->dl_nr_migratory++; 490 491 update_dl_migration(dl_rq); 492 } 493 494 static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 495 { 496 struct task_struct *p = dl_task_of(dl_se); 497 498 if (p->nr_cpus_allowed > 1) 499 dl_rq->dl_nr_migratory--; 500 501 update_dl_migration(dl_rq); 502 } 503 504 /* 505 * The list of pushable -deadline task is not a plist, like in 506 * sched_rt.c, it is an rb-tree with tasks ordered by deadline. 507 */ 508 static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p) 509 { 510 struct dl_rq *dl_rq = &rq->dl; 511 struct rb_node **link = &dl_rq->pushable_dl_tasks_root.rb_root.rb_node; 512 struct rb_node *parent = NULL; 513 struct task_struct *entry; 514 bool leftmost = true; 515 516 BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks)); 517 518 while (*link) { 519 parent = *link; 520 entry = rb_entry(parent, struct task_struct, 521 pushable_dl_tasks); 522 if (dl_entity_preempt(&p->dl, &entry->dl)) 523 link = &parent->rb_left; 524 else { 525 link = &parent->rb_right; 526 leftmost = false; 527 } 528 } 529 530 if (leftmost) 531 dl_rq->earliest_dl.next = p->dl.deadline; 532 533 rb_link_node(&p->pushable_dl_tasks, parent, link); 534 rb_insert_color_cached(&p->pushable_dl_tasks, 535 &dl_rq->pushable_dl_tasks_root, leftmost); 536 } 537 538 static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p) 539 { 540 struct dl_rq *dl_rq = &rq->dl; 541 542 if (RB_EMPTY_NODE(&p->pushable_dl_tasks)) 543 return; 544 545 if (dl_rq->pushable_dl_tasks_root.rb_leftmost == &p->pushable_dl_tasks) { 546 struct rb_node *next_node; 547 548 next_node = rb_next(&p->pushable_dl_tasks); 549 if (next_node) { 550 dl_rq->earliest_dl.next = rb_entry(next_node, 551 struct task_struct, pushable_dl_tasks)->dl.deadline; 552 } 553 } 554 555 rb_erase_cached(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root); 556 RB_CLEAR_NODE(&p->pushable_dl_tasks); 557 } 558 559 static inline int has_pushable_dl_tasks(struct rq *rq) 560 { 561 return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root.rb_root); 562 } 563 564 static int push_dl_task(struct rq *rq); 565 566 static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev) 567 { 568 return dl_task(prev); 569 } 570 571 static DEFINE_PER_CPU(struct callback_head, dl_push_head); 572 static DEFINE_PER_CPU(struct callback_head, dl_pull_head); 573 574 static void push_dl_tasks(struct rq *); 575 static void pull_dl_task(struct rq *); 576 577 static inline void deadline_queue_push_tasks(struct rq *rq) 578 { 579 if (!has_pushable_dl_tasks(rq)) 580 return; 581 582 queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks); 583 } 584 585 static inline void deadline_queue_pull_task(struct rq *rq) 586 { 587 queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task); 588 } 589 590 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq); 591 592 static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p) 593 { 594 struct rq *later_rq = NULL; 595 struct dl_bw *dl_b; 596 597 later_rq = find_lock_later_rq(p, rq); 598 if (!later_rq) { 599 int cpu; 600 601 /* 602 * If we cannot preempt any rq, fall back to pick any 603 * online CPU: 604 */ 605 cpu = cpumask_any_and(cpu_active_mask, p->cpus_ptr); 606 if (cpu >= nr_cpu_ids) { 607 /* 608 * Failed to find any suitable CPU. 609 * The task will never come back! 610 */ 611 BUG_ON(dl_bandwidth_enabled()); 612 613 /* 614 * If admission control is disabled we 615 * try a little harder to let the task 616 * run. 617 */ 618 cpu = cpumask_any(cpu_active_mask); 619 } 620 later_rq = cpu_rq(cpu); 621 double_lock_balance(rq, later_rq); 622 } 623 624 if (p->dl.dl_non_contending || p->dl.dl_throttled) { 625 /* 626 * Inactive timer is armed (or callback is running, but 627 * waiting for us to release rq locks). In any case, when it 628 * will fire (or continue), it will see running_bw of this 629 * task migrated to later_rq (and correctly handle it). 630 */ 631 sub_running_bw(&p->dl, &rq->dl); 632 sub_rq_bw(&p->dl, &rq->dl); 633 634 add_rq_bw(&p->dl, &later_rq->dl); 635 add_running_bw(&p->dl, &later_rq->dl); 636 } else { 637 sub_rq_bw(&p->dl, &rq->dl); 638 add_rq_bw(&p->dl, &later_rq->dl); 639 } 640 641 /* 642 * And we finally need to fixup root_domain(s) bandwidth accounting, 643 * since p is still hanging out in the old (now moved to default) root 644 * domain. 645 */ 646 dl_b = &rq->rd->dl_bw; 647 raw_spin_lock(&dl_b->lock); 648 __dl_sub(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span)); 649 raw_spin_unlock(&dl_b->lock); 650 651 dl_b = &later_rq->rd->dl_bw; 652 raw_spin_lock(&dl_b->lock); 653 __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(later_rq->rd->span)); 654 raw_spin_unlock(&dl_b->lock); 655 656 set_task_cpu(p, later_rq->cpu); 657 double_unlock_balance(later_rq, rq); 658 659 return later_rq; 660 } 661 662 #else 663 664 static inline 665 void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p) 666 { 667 } 668 669 static inline 670 void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p) 671 { 672 } 673 674 static inline 675 void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 676 { 677 } 678 679 static inline 680 void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 681 { 682 } 683 684 static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev) 685 { 686 return false; 687 } 688 689 static inline void pull_dl_task(struct rq *rq) 690 { 691 } 692 693 static inline void deadline_queue_push_tasks(struct rq *rq) 694 { 695 } 696 697 static inline void deadline_queue_pull_task(struct rq *rq) 698 { 699 } 700 #endif /* CONFIG_SMP */ 701 702 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags); 703 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags); 704 static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p, int flags); 705 706 /* 707 * We are being explicitly informed that a new instance is starting, 708 * and this means that: 709 * - the absolute deadline of the entity has to be placed at 710 * current time + relative deadline; 711 * - the runtime of the entity has to be set to the maximum value. 712 * 713 * The capability of specifying such event is useful whenever a -deadline 714 * entity wants to (try to!) synchronize its behaviour with the scheduler's 715 * one, and to (try to!) reconcile itself with its own scheduling 716 * parameters. 717 */ 718 static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se) 719 { 720 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 721 struct rq *rq = rq_of_dl_rq(dl_rq); 722 723 WARN_ON(is_dl_boosted(dl_se)); 724 WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline)); 725 726 /* 727 * We are racing with the deadline timer. So, do nothing because 728 * the deadline timer handler will take care of properly recharging 729 * the runtime and postponing the deadline 730 */ 731 if (dl_se->dl_throttled) 732 return; 733 734 /* 735 * We use the regular wall clock time to set deadlines in the 736 * future; in fact, we must consider execution overheads (time 737 * spent on hardirq context, etc.). 738 */ 739 dl_se->deadline = rq_clock(rq) + dl_se->dl_deadline; 740 dl_se->runtime = dl_se->dl_runtime; 741 } 742 743 /* 744 * Pure Earliest Deadline First (EDF) scheduling does not deal with the 745 * possibility of a entity lasting more than what it declared, and thus 746 * exhausting its runtime. 747 * 748 * Here we are interested in making runtime overrun possible, but we do 749 * not want a entity which is misbehaving to affect the scheduling of all 750 * other entities. 751 * Therefore, a budgeting strategy called Constant Bandwidth Server (CBS) 752 * is used, in order to confine each entity within its own bandwidth. 753 * 754 * This function deals exactly with that, and ensures that when the runtime 755 * of a entity is replenished, its deadline is also postponed. That ensures 756 * the overrunning entity can't interfere with other entity in the system and 757 * can't make them miss their deadlines. Reasons why this kind of overruns 758 * could happen are, typically, a entity voluntarily trying to overcome its 759 * runtime, or it just underestimated it during sched_setattr(). 760 */ 761 static void replenish_dl_entity(struct sched_dl_entity *dl_se) 762 { 763 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 764 struct rq *rq = rq_of_dl_rq(dl_rq); 765 766 BUG_ON(pi_of(dl_se)->dl_runtime <= 0); 767 768 /* 769 * This could be the case for a !-dl task that is boosted. 770 * Just go with full inherited parameters. 771 */ 772 if (dl_se->dl_deadline == 0) { 773 dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline; 774 dl_se->runtime = pi_of(dl_se)->dl_runtime; 775 } 776 777 if (dl_se->dl_yielded && dl_se->runtime > 0) 778 dl_se->runtime = 0; 779 780 /* 781 * We keep moving the deadline away until we get some 782 * available runtime for the entity. This ensures correct 783 * handling of situations where the runtime overrun is 784 * arbitrary large. 785 */ 786 while (dl_se->runtime <= 0) { 787 dl_se->deadline += pi_of(dl_se)->dl_period; 788 dl_se->runtime += pi_of(dl_se)->dl_runtime; 789 } 790 791 /* 792 * At this point, the deadline really should be "in 793 * the future" with respect to rq->clock. If it's 794 * not, we are, for some reason, lagging too much! 795 * Anyway, after having warn userspace abut that, 796 * we still try to keep the things running by 797 * resetting the deadline and the budget of the 798 * entity. 799 */ 800 if (dl_time_before(dl_se->deadline, rq_clock(rq))) { 801 printk_deferred_once("sched: DL replenish lagged too much\n"); 802 dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline; 803 dl_se->runtime = pi_of(dl_se)->dl_runtime; 804 } 805 806 if (dl_se->dl_yielded) 807 dl_se->dl_yielded = 0; 808 if (dl_se->dl_throttled) 809 dl_se->dl_throttled = 0; 810 } 811 812 /* 813 * Here we check if --at time t-- an entity (which is probably being 814 * [re]activated or, in general, enqueued) can use its remaining runtime 815 * and its current deadline _without_ exceeding the bandwidth it is 816 * assigned (function returns true if it can't). We are in fact applying 817 * one of the CBS rules: when a task wakes up, if the residual runtime 818 * over residual deadline fits within the allocated bandwidth, then we 819 * can keep the current (absolute) deadline and residual budget without 820 * disrupting the schedulability of the system. Otherwise, we should 821 * refill the runtime and set the deadline a period in the future, 822 * because keeping the current (absolute) deadline of the task would 823 * result in breaking guarantees promised to other tasks (refer to 824 * Documentation/scheduler/sched-deadline.rst for more information). 825 * 826 * This function returns true if: 827 * 828 * runtime / (deadline - t) > dl_runtime / dl_deadline , 829 * 830 * IOW we can't recycle current parameters. 831 * 832 * Notice that the bandwidth check is done against the deadline. For 833 * task with deadline equal to period this is the same of using 834 * dl_period instead of dl_deadline in the equation above. 835 */ 836 static bool dl_entity_overflow(struct sched_dl_entity *dl_se, u64 t) 837 { 838 u64 left, right; 839 840 /* 841 * left and right are the two sides of the equation above, 842 * after a bit of shuffling to use multiplications instead 843 * of divisions. 844 * 845 * Note that none of the time values involved in the two 846 * multiplications are absolute: dl_deadline and dl_runtime 847 * are the relative deadline and the maximum runtime of each 848 * instance, runtime is the runtime left for the last instance 849 * and (deadline - t), since t is rq->clock, is the time left 850 * to the (absolute) deadline. Even if overflowing the u64 type 851 * is very unlikely to occur in both cases, here we scale down 852 * as we want to avoid that risk at all. Scaling down by 10 853 * means that we reduce granularity to 1us. We are fine with it, 854 * since this is only a true/false check and, anyway, thinking 855 * of anything below microseconds resolution is actually fiction 856 * (but still we want to give the user that illusion >;). 857 */ 858 left = (pi_of(dl_se)->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE); 859 right = ((dl_se->deadline - t) >> DL_SCALE) * 860 (pi_of(dl_se)->dl_runtime >> DL_SCALE); 861 862 return dl_time_before(right, left); 863 } 864 865 /* 866 * Revised wakeup rule [1]: For self-suspending tasks, rather then 867 * re-initializing task's runtime and deadline, the revised wakeup 868 * rule adjusts the task's runtime to avoid the task to overrun its 869 * density. 870 * 871 * Reasoning: a task may overrun the density if: 872 * runtime / (deadline - t) > dl_runtime / dl_deadline 873 * 874 * Therefore, runtime can be adjusted to: 875 * runtime = (dl_runtime / dl_deadline) * (deadline - t) 876 * 877 * In such way that runtime will be equal to the maximum density 878 * the task can use without breaking any rule. 879 * 880 * [1] Luca Abeni, Giuseppe Lipari, and Juri Lelli. 2015. Constant 881 * bandwidth server revisited. SIGBED Rev. 11, 4 (January 2015), 19-24. 882 */ 883 static void 884 update_dl_revised_wakeup(struct sched_dl_entity *dl_se, struct rq *rq) 885 { 886 u64 laxity = dl_se->deadline - rq_clock(rq); 887 888 /* 889 * If the task has deadline < period, and the deadline is in the past, 890 * it should already be throttled before this check. 891 * 892 * See update_dl_entity() comments for further details. 893 */ 894 WARN_ON(dl_time_before(dl_se->deadline, rq_clock(rq))); 895 896 dl_se->runtime = (dl_se->dl_density * laxity) >> BW_SHIFT; 897 } 898 899 /* 900 * Regarding the deadline, a task with implicit deadline has a relative 901 * deadline == relative period. A task with constrained deadline has a 902 * relative deadline <= relative period. 903 * 904 * We support constrained deadline tasks. However, there are some restrictions 905 * applied only for tasks which do not have an implicit deadline. See 906 * update_dl_entity() to know more about such restrictions. 907 * 908 * The dl_is_implicit() returns true if the task has an implicit deadline. 909 */ 910 static inline bool dl_is_implicit(struct sched_dl_entity *dl_se) 911 { 912 return dl_se->dl_deadline == dl_se->dl_period; 913 } 914 915 /* 916 * When a deadline entity is placed in the runqueue, its runtime and deadline 917 * might need to be updated. This is done by a CBS wake up rule. There are two 918 * different rules: 1) the original CBS; and 2) the Revisited CBS. 919 * 920 * When the task is starting a new period, the Original CBS is used. In this 921 * case, the runtime is replenished and a new absolute deadline is set. 922 * 923 * When a task is queued before the begin of the next period, using the 924 * remaining runtime and deadline could make the entity to overflow, see 925 * dl_entity_overflow() to find more about runtime overflow. When such case 926 * is detected, the runtime and deadline need to be updated. 927 * 928 * If the task has an implicit deadline, i.e., deadline == period, the Original 929 * CBS is applied. the runtime is replenished and a new absolute deadline is 930 * set, as in the previous cases. 931 * 932 * However, the Original CBS does not work properly for tasks with 933 * deadline < period, which are said to have a constrained deadline. By 934 * applying the Original CBS, a constrained deadline task would be able to run 935 * runtime/deadline in a period. With deadline < period, the task would 936 * overrun the runtime/period allowed bandwidth, breaking the admission test. 937 * 938 * In order to prevent this misbehave, the Revisited CBS is used for 939 * constrained deadline tasks when a runtime overflow is detected. In the 940 * Revisited CBS, rather than replenishing & setting a new absolute deadline, 941 * the remaining runtime of the task is reduced to avoid runtime overflow. 942 * Please refer to the comments update_dl_revised_wakeup() function to find 943 * more about the Revised CBS rule. 944 */ 945 static void update_dl_entity(struct sched_dl_entity *dl_se) 946 { 947 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 948 struct rq *rq = rq_of_dl_rq(dl_rq); 949 950 if (dl_time_before(dl_se->deadline, rq_clock(rq)) || 951 dl_entity_overflow(dl_se, rq_clock(rq))) { 952 953 if (unlikely(!dl_is_implicit(dl_se) && 954 !dl_time_before(dl_se->deadline, rq_clock(rq)) && 955 !is_dl_boosted(dl_se))) { 956 update_dl_revised_wakeup(dl_se, rq); 957 return; 958 } 959 960 dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline; 961 dl_se->runtime = pi_of(dl_se)->dl_runtime; 962 } 963 } 964 965 static inline u64 dl_next_period(struct sched_dl_entity *dl_se) 966 { 967 return dl_se->deadline - dl_se->dl_deadline + dl_se->dl_period; 968 } 969 970 /* 971 * If the entity depleted all its runtime, and if we want it to sleep 972 * while waiting for some new execution time to become available, we 973 * set the bandwidth replenishment timer to the replenishment instant 974 * and try to activate it. 975 * 976 * Notice that it is important for the caller to know if the timer 977 * actually started or not (i.e., the replenishment instant is in 978 * the future or in the past). 979 */ 980 static int start_dl_timer(struct task_struct *p) 981 { 982 struct sched_dl_entity *dl_se = &p->dl; 983 struct hrtimer *timer = &dl_se->dl_timer; 984 struct rq *rq = task_rq(p); 985 ktime_t now, act; 986 s64 delta; 987 988 lockdep_assert_held(&rq->lock); 989 990 /* 991 * We want the timer to fire at the deadline, but considering 992 * that it is actually coming from rq->clock and not from 993 * hrtimer's time base reading. 994 */ 995 act = ns_to_ktime(dl_next_period(dl_se)); 996 now = hrtimer_cb_get_time(timer); 997 delta = ktime_to_ns(now) - rq_clock(rq); 998 act = ktime_add_ns(act, delta); 999 1000 /* 1001 * If the expiry time already passed, e.g., because the value 1002 * chosen as the deadline is too small, don't even try to 1003 * start the timer in the past! 1004 */ 1005 if (ktime_us_delta(act, now) < 0) 1006 return 0; 1007 1008 /* 1009 * !enqueued will guarantee another callback; even if one is already in 1010 * progress. This ensures a balanced {get,put}_task_struct(). 1011 * 1012 * The race against __run_timer() clearing the enqueued state is 1013 * harmless because we're holding task_rq()->lock, therefore the timer 1014 * expiring after we've done the check will wait on its task_rq_lock() 1015 * and observe our state. 1016 */ 1017 if (!hrtimer_is_queued(timer)) { 1018 get_task_struct(p); 1019 hrtimer_start(timer, act, HRTIMER_MODE_ABS_HARD); 1020 } 1021 1022 return 1; 1023 } 1024 1025 /* 1026 * This is the bandwidth enforcement timer callback. If here, we know 1027 * a task is not on its dl_rq, since the fact that the timer was running 1028 * means the task is throttled and needs a runtime replenishment. 1029 * 1030 * However, what we actually do depends on the fact the task is active, 1031 * (it is on its rq) or has been removed from there by a call to 1032 * dequeue_task_dl(). In the former case we must issue the runtime 1033 * replenishment and add the task back to the dl_rq; in the latter, we just 1034 * do nothing but clearing dl_throttled, so that runtime and deadline 1035 * updating (and the queueing back to dl_rq) will be done by the 1036 * next call to enqueue_task_dl(). 1037 */ 1038 static enum hrtimer_restart dl_task_timer(struct hrtimer *timer) 1039 { 1040 struct sched_dl_entity *dl_se = container_of(timer, 1041 struct sched_dl_entity, 1042 dl_timer); 1043 struct task_struct *p = dl_task_of(dl_se); 1044 struct rq_flags rf; 1045 struct rq *rq; 1046 1047 rq = task_rq_lock(p, &rf); 1048 1049 /* 1050 * The task might have changed its scheduling policy to something 1051 * different than SCHED_DEADLINE (through switched_from_dl()). 1052 */ 1053 if (!dl_task(p)) 1054 goto unlock; 1055 1056 /* 1057 * The task might have been boosted by someone else and might be in the 1058 * boosting/deboosting path, its not throttled. 1059 */ 1060 if (is_dl_boosted(dl_se)) 1061 goto unlock; 1062 1063 /* 1064 * Spurious timer due to start_dl_timer() race; or we already received 1065 * a replenishment from rt_mutex_setprio(). 1066 */ 1067 if (!dl_se->dl_throttled) 1068 goto unlock; 1069 1070 sched_clock_tick(); 1071 update_rq_clock(rq); 1072 1073 /* 1074 * If the throttle happened during sched-out; like: 1075 * 1076 * schedule() 1077 * deactivate_task() 1078 * dequeue_task_dl() 1079 * update_curr_dl() 1080 * start_dl_timer() 1081 * __dequeue_task_dl() 1082 * prev->on_rq = 0; 1083 * 1084 * We can be both throttled and !queued. Replenish the counter 1085 * but do not enqueue -- wait for our wakeup to do that. 1086 */ 1087 if (!task_on_rq_queued(p)) { 1088 replenish_dl_entity(dl_se); 1089 goto unlock; 1090 } 1091 1092 #ifdef CONFIG_SMP 1093 if (unlikely(!rq->online)) { 1094 /* 1095 * If the runqueue is no longer available, migrate the 1096 * task elsewhere. This necessarily changes rq. 1097 */ 1098 lockdep_unpin_lock(&rq->lock, rf.cookie); 1099 rq = dl_task_offline_migration(rq, p); 1100 rf.cookie = lockdep_pin_lock(&rq->lock); 1101 update_rq_clock(rq); 1102 1103 /* 1104 * Now that the task has been migrated to the new RQ and we 1105 * have that locked, proceed as normal and enqueue the task 1106 * there. 1107 */ 1108 } 1109 #endif 1110 1111 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH); 1112 if (dl_task(rq->curr)) 1113 check_preempt_curr_dl(rq, p, 0); 1114 else 1115 resched_curr(rq); 1116 1117 #ifdef CONFIG_SMP 1118 /* 1119 * Queueing this task back might have overloaded rq, check if we need 1120 * to kick someone away. 1121 */ 1122 if (has_pushable_dl_tasks(rq)) { 1123 /* 1124 * Nothing relies on rq->lock after this, so its safe to drop 1125 * rq->lock. 1126 */ 1127 rq_unpin_lock(rq, &rf); 1128 push_dl_task(rq); 1129 rq_repin_lock(rq, &rf); 1130 } 1131 #endif 1132 1133 unlock: 1134 task_rq_unlock(rq, p, &rf); 1135 1136 /* 1137 * This can free the task_struct, including this hrtimer, do not touch 1138 * anything related to that after this. 1139 */ 1140 put_task_struct(p); 1141 1142 return HRTIMER_NORESTART; 1143 } 1144 1145 void init_dl_task_timer(struct sched_dl_entity *dl_se) 1146 { 1147 struct hrtimer *timer = &dl_se->dl_timer; 1148 1149 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); 1150 timer->function = dl_task_timer; 1151 } 1152 1153 /* 1154 * During the activation, CBS checks if it can reuse the current task's 1155 * runtime and period. If the deadline of the task is in the past, CBS 1156 * cannot use the runtime, and so it replenishes the task. This rule 1157 * works fine for implicit deadline tasks (deadline == period), and the 1158 * CBS was designed for implicit deadline tasks. However, a task with 1159 * constrained deadline (deadline < period) might be awakened after the 1160 * deadline, but before the next period. In this case, replenishing the 1161 * task would allow it to run for runtime / deadline. As in this case 1162 * deadline < period, CBS enables a task to run for more than the 1163 * runtime / period. In a very loaded system, this can cause a domino 1164 * effect, making other tasks miss their deadlines. 1165 * 1166 * To avoid this problem, in the activation of a constrained deadline 1167 * task after the deadline but before the next period, throttle the 1168 * task and set the replenishing timer to the begin of the next period, 1169 * unless it is boosted. 1170 */ 1171 static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se) 1172 { 1173 struct task_struct *p = dl_task_of(dl_se); 1174 struct rq *rq = rq_of_dl_rq(dl_rq_of_se(dl_se)); 1175 1176 if (dl_time_before(dl_se->deadline, rq_clock(rq)) && 1177 dl_time_before(rq_clock(rq), dl_next_period(dl_se))) { 1178 if (unlikely(is_dl_boosted(dl_se) || !start_dl_timer(p))) 1179 return; 1180 dl_se->dl_throttled = 1; 1181 if (dl_se->runtime > 0) 1182 dl_se->runtime = 0; 1183 } 1184 } 1185 1186 static 1187 int dl_runtime_exceeded(struct sched_dl_entity *dl_se) 1188 { 1189 return (dl_se->runtime <= 0); 1190 } 1191 1192 extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq); 1193 1194 /* 1195 * This function implements the GRUB accounting rule: 1196 * according to the GRUB reclaiming algorithm, the runtime is 1197 * not decreased as "dq = -dt", but as 1198 * "dq = -max{u / Umax, (1 - Uinact - Uextra)} dt", 1199 * where u is the utilization of the task, Umax is the maximum reclaimable 1200 * utilization, Uinact is the (per-runqueue) inactive utilization, computed 1201 * as the difference between the "total runqueue utilization" and the 1202 * runqueue active utilization, and Uextra is the (per runqueue) extra 1203 * reclaimable utilization. 1204 * Since rq->dl.running_bw and rq->dl.this_bw contain utilizations 1205 * multiplied by 2^BW_SHIFT, the result has to be shifted right by 1206 * BW_SHIFT. 1207 * Since rq->dl.bw_ratio contains 1 / Umax multipled by 2^RATIO_SHIFT, 1208 * dl_bw is multiped by rq->dl.bw_ratio and shifted right by RATIO_SHIFT. 1209 * Since delta is a 64 bit variable, to have an overflow its value 1210 * should be larger than 2^(64 - 20 - 8), which is more than 64 seconds. 1211 * So, overflow is not an issue here. 1212 */ 1213 static u64 grub_reclaim(u64 delta, struct rq *rq, struct sched_dl_entity *dl_se) 1214 { 1215 u64 u_inact = rq->dl.this_bw - rq->dl.running_bw; /* Utot - Uact */ 1216 u64 u_act; 1217 u64 u_act_min = (dl_se->dl_bw * rq->dl.bw_ratio) >> RATIO_SHIFT; 1218 1219 /* 1220 * Instead of computing max{u * bw_ratio, (1 - u_inact - u_extra)}, 1221 * we compare u_inact + rq->dl.extra_bw with 1222 * 1 - (u * rq->dl.bw_ratio >> RATIO_SHIFT), because 1223 * u_inact + rq->dl.extra_bw can be larger than 1224 * 1 * (so, 1 - u_inact - rq->dl.extra_bw would be negative 1225 * leading to wrong results) 1226 */ 1227 if (u_inact + rq->dl.extra_bw > BW_UNIT - u_act_min) 1228 u_act = u_act_min; 1229 else 1230 u_act = BW_UNIT - u_inact - rq->dl.extra_bw; 1231 1232 return (delta * u_act) >> BW_SHIFT; 1233 } 1234 1235 /* 1236 * Update the current task's runtime statistics (provided it is still 1237 * a -deadline task and has not been removed from the dl_rq). 1238 */ 1239 static void update_curr_dl(struct rq *rq) 1240 { 1241 struct task_struct *curr = rq->curr; 1242 struct sched_dl_entity *dl_se = &curr->dl; 1243 u64 delta_exec, scaled_delta_exec; 1244 int cpu = cpu_of(rq); 1245 u64 now; 1246 1247 if (!dl_task(curr) || !on_dl_rq(dl_se)) 1248 return; 1249 1250 /* 1251 * Consumed budget is computed considering the time as 1252 * observed by schedulable tasks (excluding time spent 1253 * in hardirq context, etc.). Deadlines are instead 1254 * computed using hard walltime. This seems to be the more 1255 * natural solution, but the full ramifications of this 1256 * approach need further study. 1257 */ 1258 now = rq_clock_task(rq); 1259 delta_exec = now - curr->se.exec_start; 1260 if (unlikely((s64)delta_exec <= 0)) { 1261 if (unlikely(dl_se->dl_yielded)) 1262 goto throttle; 1263 return; 1264 } 1265 1266 schedstat_set(curr->se.statistics.exec_max, 1267 max(curr->se.statistics.exec_max, delta_exec)); 1268 1269 curr->se.sum_exec_runtime += delta_exec; 1270 account_group_exec_runtime(curr, delta_exec); 1271 1272 curr->se.exec_start = now; 1273 cgroup_account_cputime(curr, delta_exec); 1274 1275 if (dl_entity_is_special(dl_se)) 1276 return; 1277 1278 /* 1279 * For tasks that participate in GRUB, we implement GRUB-PA: the 1280 * spare reclaimed bandwidth is used to clock down frequency. 1281 * 1282 * For the others, we still need to scale reservation parameters 1283 * according to current frequency and CPU maximum capacity. 1284 */ 1285 if (unlikely(dl_se->flags & SCHED_FLAG_RECLAIM)) { 1286 scaled_delta_exec = grub_reclaim(delta_exec, 1287 rq, 1288 &curr->dl); 1289 } else { 1290 unsigned long scale_freq = arch_scale_freq_capacity(cpu); 1291 unsigned long scale_cpu = arch_scale_cpu_capacity(cpu); 1292 1293 scaled_delta_exec = cap_scale(delta_exec, scale_freq); 1294 scaled_delta_exec = cap_scale(scaled_delta_exec, scale_cpu); 1295 } 1296 1297 dl_se->runtime -= scaled_delta_exec; 1298 1299 throttle: 1300 if (dl_runtime_exceeded(dl_se) || dl_se->dl_yielded) { 1301 dl_se->dl_throttled = 1; 1302 1303 /* If requested, inform the user about runtime overruns. */ 1304 if (dl_runtime_exceeded(dl_se) && 1305 (dl_se->flags & SCHED_FLAG_DL_OVERRUN)) 1306 dl_se->dl_overrun = 1; 1307 1308 __dequeue_task_dl(rq, curr, 0); 1309 if (unlikely(is_dl_boosted(dl_se) || !start_dl_timer(curr))) 1310 enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH); 1311 1312 if (!is_leftmost(curr, &rq->dl)) 1313 resched_curr(rq); 1314 } 1315 1316 /* 1317 * Because -- for now -- we share the rt bandwidth, we need to 1318 * account our runtime there too, otherwise actual rt tasks 1319 * would be able to exceed the shared quota. 1320 * 1321 * Account to the root rt group for now. 1322 * 1323 * The solution we're working towards is having the RT groups scheduled 1324 * using deadline servers -- however there's a few nasties to figure 1325 * out before that can happen. 1326 */ 1327 if (rt_bandwidth_enabled()) { 1328 struct rt_rq *rt_rq = &rq->rt; 1329 1330 raw_spin_lock(&rt_rq->rt_runtime_lock); 1331 /* 1332 * We'll let actual RT tasks worry about the overflow here, we 1333 * have our own CBS to keep us inline; only account when RT 1334 * bandwidth is relevant. 1335 */ 1336 if (sched_rt_bandwidth_account(rt_rq)) 1337 rt_rq->rt_time += delta_exec; 1338 raw_spin_unlock(&rt_rq->rt_runtime_lock); 1339 } 1340 } 1341 1342 static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer) 1343 { 1344 struct sched_dl_entity *dl_se = container_of(timer, 1345 struct sched_dl_entity, 1346 inactive_timer); 1347 struct task_struct *p = dl_task_of(dl_se); 1348 struct rq_flags rf; 1349 struct rq *rq; 1350 1351 rq = task_rq_lock(p, &rf); 1352 1353 sched_clock_tick(); 1354 update_rq_clock(rq); 1355 1356 if (!dl_task(p) || p->state == TASK_DEAD) { 1357 struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); 1358 1359 if (p->state == TASK_DEAD && dl_se->dl_non_contending) { 1360 sub_running_bw(&p->dl, dl_rq_of_se(&p->dl)); 1361 sub_rq_bw(&p->dl, dl_rq_of_se(&p->dl)); 1362 dl_se->dl_non_contending = 0; 1363 } 1364 1365 raw_spin_lock(&dl_b->lock); 1366 __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p))); 1367 raw_spin_unlock(&dl_b->lock); 1368 __dl_clear_params(p); 1369 1370 goto unlock; 1371 } 1372 if (dl_se->dl_non_contending == 0) 1373 goto unlock; 1374 1375 sub_running_bw(dl_se, &rq->dl); 1376 dl_se->dl_non_contending = 0; 1377 unlock: 1378 task_rq_unlock(rq, p, &rf); 1379 put_task_struct(p); 1380 1381 return HRTIMER_NORESTART; 1382 } 1383 1384 void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se) 1385 { 1386 struct hrtimer *timer = &dl_se->inactive_timer; 1387 1388 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); 1389 timer->function = inactive_task_timer; 1390 } 1391 1392 #ifdef CONFIG_SMP 1393 1394 static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) 1395 { 1396 struct rq *rq = rq_of_dl_rq(dl_rq); 1397 1398 if (dl_rq->earliest_dl.curr == 0 || 1399 dl_time_before(deadline, dl_rq->earliest_dl.curr)) { 1400 dl_rq->earliest_dl.curr = deadline; 1401 cpudl_set(&rq->rd->cpudl, rq->cpu, deadline); 1402 } 1403 } 1404 1405 static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) 1406 { 1407 struct rq *rq = rq_of_dl_rq(dl_rq); 1408 1409 /* 1410 * Since we may have removed our earliest (and/or next earliest) 1411 * task we must recompute them. 1412 */ 1413 if (!dl_rq->dl_nr_running) { 1414 dl_rq->earliest_dl.curr = 0; 1415 dl_rq->earliest_dl.next = 0; 1416 cpudl_clear(&rq->rd->cpudl, rq->cpu); 1417 } else { 1418 struct rb_node *leftmost = dl_rq->root.rb_leftmost; 1419 struct sched_dl_entity *entry; 1420 1421 entry = rb_entry(leftmost, struct sched_dl_entity, rb_node); 1422 dl_rq->earliest_dl.curr = entry->deadline; 1423 cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline); 1424 } 1425 } 1426 1427 #else 1428 1429 static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {} 1430 static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {} 1431 1432 #endif /* CONFIG_SMP */ 1433 1434 static inline 1435 void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 1436 { 1437 int prio = dl_task_of(dl_se)->prio; 1438 u64 deadline = dl_se->deadline; 1439 1440 WARN_ON(!dl_prio(prio)); 1441 dl_rq->dl_nr_running++; 1442 add_nr_running(rq_of_dl_rq(dl_rq), 1); 1443 1444 inc_dl_deadline(dl_rq, deadline); 1445 inc_dl_migration(dl_se, dl_rq); 1446 } 1447 1448 static inline 1449 void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 1450 { 1451 int prio = dl_task_of(dl_se)->prio; 1452 1453 WARN_ON(!dl_prio(prio)); 1454 WARN_ON(!dl_rq->dl_nr_running); 1455 dl_rq->dl_nr_running--; 1456 sub_nr_running(rq_of_dl_rq(dl_rq), 1); 1457 1458 dec_dl_deadline(dl_rq, dl_se->deadline); 1459 dec_dl_migration(dl_se, dl_rq); 1460 } 1461 1462 static void __enqueue_dl_entity(struct sched_dl_entity *dl_se) 1463 { 1464 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 1465 struct rb_node **link = &dl_rq->root.rb_root.rb_node; 1466 struct rb_node *parent = NULL; 1467 struct sched_dl_entity *entry; 1468 int leftmost = 1; 1469 1470 BUG_ON(!RB_EMPTY_NODE(&dl_se->rb_node)); 1471 1472 while (*link) { 1473 parent = *link; 1474 entry = rb_entry(parent, struct sched_dl_entity, rb_node); 1475 if (dl_time_before(dl_se->deadline, entry->deadline)) 1476 link = &parent->rb_left; 1477 else { 1478 link = &parent->rb_right; 1479 leftmost = 0; 1480 } 1481 } 1482 1483 rb_link_node(&dl_se->rb_node, parent, link); 1484 rb_insert_color_cached(&dl_se->rb_node, &dl_rq->root, leftmost); 1485 1486 inc_dl_tasks(dl_se, dl_rq); 1487 } 1488 1489 static void __dequeue_dl_entity(struct sched_dl_entity *dl_se) 1490 { 1491 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 1492 1493 if (RB_EMPTY_NODE(&dl_se->rb_node)) 1494 return; 1495 1496 rb_erase_cached(&dl_se->rb_node, &dl_rq->root); 1497 RB_CLEAR_NODE(&dl_se->rb_node); 1498 1499 dec_dl_tasks(dl_se, dl_rq); 1500 } 1501 1502 static void 1503 enqueue_dl_entity(struct sched_dl_entity *dl_se, int flags) 1504 { 1505 BUG_ON(on_dl_rq(dl_se)); 1506 1507 /* 1508 * If this is a wakeup or a new instance, the scheduling 1509 * parameters of the task might need updating. Otherwise, 1510 * we want a replenishment of its runtime. 1511 */ 1512 if (flags & ENQUEUE_WAKEUP) { 1513 task_contending(dl_se, flags); 1514 update_dl_entity(dl_se); 1515 } else if (flags & ENQUEUE_REPLENISH) { 1516 replenish_dl_entity(dl_se); 1517 } else if ((flags & ENQUEUE_RESTORE) && 1518 dl_time_before(dl_se->deadline, 1519 rq_clock(rq_of_dl_rq(dl_rq_of_se(dl_se))))) { 1520 setup_new_dl_entity(dl_se); 1521 } 1522 1523 __enqueue_dl_entity(dl_se); 1524 } 1525 1526 static void dequeue_dl_entity(struct sched_dl_entity *dl_se) 1527 { 1528 __dequeue_dl_entity(dl_se); 1529 } 1530 1531 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags) 1532 { 1533 if (is_dl_boosted(&p->dl)) { 1534 /* 1535 * Because of delays in the detection of the overrun of a 1536 * thread's runtime, it might be the case that a thread 1537 * goes to sleep in a rt mutex with negative runtime. As 1538 * a consequence, the thread will be throttled. 1539 * 1540 * While waiting for the mutex, this thread can also be 1541 * boosted via PI, resulting in a thread that is throttled 1542 * and boosted at the same time. 1543 * 1544 * In this case, the boost overrides the throttle. 1545 */ 1546 if (p->dl.dl_throttled) { 1547 /* 1548 * The replenish timer needs to be canceled. No 1549 * problem if it fires concurrently: boosted threads 1550 * are ignored in dl_task_timer(). 1551 */ 1552 hrtimer_try_to_cancel(&p->dl.dl_timer); 1553 p->dl.dl_throttled = 0; 1554 } 1555 } else if (!dl_prio(p->normal_prio)) { 1556 /* 1557 * Special case in which we have a !SCHED_DEADLINE task that is going 1558 * to be deboosted, but exceeds its runtime while doing so. No point in 1559 * replenishing it, as it's going to return back to its original 1560 * scheduling class after this. If it has been throttled, we need to 1561 * clear the flag, otherwise the task may wake up as throttled after 1562 * being boosted again with no means to replenish the runtime and clear 1563 * the throttle. 1564 */ 1565 p->dl.dl_throttled = 0; 1566 BUG_ON(!is_dl_boosted(&p->dl) || flags != ENQUEUE_REPLENISH); 1567 return; 1568 } 1569 1570 /* 1571 * Check if a constrained deadline task was activated 1572 * after the deadline but before the next period. 1573 * If that is the case, the task will be throttled and 1574 * the replenishment timer will be set to the next period. 1575 */ 1576 if (!p->dl.dl_throttled && !dl_is_implicit(&p->dl)) 1577 dl_check_constrained_dl(&p->dl); 1578 1579 if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & ENQUEUE_RESTORE) { 1580 add_rq_bw(&p->dl, &rq->dl); 1581 add_running_bw(&p->dl, &rq->dl); 1582 } 1583 1584 /* 1585 * If p is throttled, we do not enqueue it. In fact, if it exhausted 1586 * its budget it needs a replenishment and, since it now is on 1587 * its rq, the bandwidth timer callback (which clearly has not 1588 * run yet) will take care of this. 1589 * However, the active utilization does not depend on the fact 1590 * that the task is on the runqueue or not (but depends on the 1591 * task's state - in GRUB parlance, "inactive" vs "active contending"). 1592 * In other words, even if a task is throttled its utilization must 1593 * be counted in the active utilization; hence, we need to call 1594 * add_running_bw(). 1595 */ 1596 if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH)) { 1597 if (flags & ENQUEUE_WAKEUP) 1598 task_contending(&p->dl, flags); 1599 1600 return; 1601 } 1602 1603 enqueue_dl_entity(&p->dl, flags); 1604 1605 if (!task_current(rq, p) && p->nr_cpus_allowed > 1) 1606 enqueue_pushable_dl_task(rq, p); 1607 } 1608 1609 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags) 1610 { 1611 dequeue_dl_entity(&p->dl); 1612 dequeue_pushable_dl_task(rq, p); 1613 } 1614 1615 static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags) 1616 { 1617 update_curr_dl(rq); 1618 __dequeue_task_dl(rq, p, flags); 1619 1620 if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & DEQUEUE_SAVE) { 1621 sub_running_bw(&p->dl, &rq->dl); 1622 sub_rq_bw(&p->dl, &rq->dl); 1623 } 1624 1625 /* 1626 * This check allows to start the inactive timer (or to immediately 1627 * decrease the active utilization, if needed) in two cases: 1628 * when the task blocks and when it is terminating 1629 * (p->state == TASK_DEAD). We can handle the two cases in the same 1630 * way, because from GRUB's point of view the same thing is happening 1631 * (the task moves from "active contending" to "active non contending" 1632 * or "inactive") 1633 */ 1634 if (flags & DEQUEUE_SLEEP) 1635 task_non_contending(p); 1636 } 1637 1638 /* 1639 * Yield task semantic for -deadline tasks is: 1640 * 1641 * get off from the CPU until our next instance, with 1642 * a new runtime. This is of little use now, since we 1643 * don't have a bandwidth reclaiming mechanism. Anyway, 1644 * bandwidth reclaiming is planned for the future, and 1645 * yield_task_dl will indicate that some spare budget 1646 * is available for other task instances to use it. 1647 */ 1648 static void yield_task_dl(struct rq *rq) 1649 { 1650 /* 1651 * We make the task go to sleep until its current deadline by 1652 * forcing its runtime to zero. This way, update_curr_dl() stops 1653 * it and the bandwidth timer will wake it up and will give it 1654 * new scheduling parameters (thanks to dl_yielded=1). 1655 */ 1656 rq->curr->dl.dl_yielded = 1; 1657 1658 update_rq_clock(rq); 1659 update_curr_dl(rq); 1660 /* 1661 * Tell update_rq_clock() that we've just updated, 1662 * so we don't do microscopic update in schedule() 1663 * and double the fastpath cost. 1664 */ 1665 rq_clock_skip_update(rq); 1666 } 1667 1668 #ifdef CONFIG_SMP 1669 1670 static int find_later_rq(struct task_struct *task); 1671 1672 static int 1673 select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags) 1674 { 1675 struct task_struct *curr; 1676 bool select_rq; 1677 struct rq *rq; 1678 1679 if (sd_flag != SD_BALANCE_WAKE) 1680 goto out; 1681 1682 rq = cpu_rq(cpu); 1683 1684 rcu_read_lock(); 1685 curr = READ_ONCE(rq->curr); /* unlocked access */ 1686 1687 /* 1688 * If we are dealing with a -deadline task, we must 1689 * decide where to wake it up. 1690 * If it has a later deadline and the current task 1691 * on this rq can't move (provided the waking task 1692 * can!) we prefer to send it somewhere else. On the 1693 * other hand, if it has a shorter deadline, we 1694 * try to make it stay here, it might be important. 1695 */ 1696 select_rq = unlikely(dl_task(curr)) && 1697 (curr->nr_cpus_allowed < 2 || 1698 !dl_entity_preempt(&p->dl, &curr->dl)) && 1699 p->nr_cpus_allowed > 1; 1700 1701 /* 1702 * Take the capacity of the CPU into account to 1703 * ensure it fits the requirement of the task. 1704 */ 1705 if (static_branch_unlikely(&sched_asym_cpucapacity)) 1706 select_rq |= !dl_task_fits_capacity(p, cpu); 1707 1708 if (select_rq) { 1709 int target = find_later_rq(p); 1710 1711 if (target != -1 && 1712 (dl_time_before(p->dl.deadline, 1713 cpu_rq(target)->dl.earliest_dl.curr) || 1714 (cpu_rq(target)->dl.dl_nr_running == 0))) 1715 cpu = target; 1716 } 1717 rcu_read_unlock(); 1718 1719 out: 1720 return cpu; 1721 } 1722 1723 static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused) 1724 { 1725 struct rq *rq; 1726 1727 if (p->state != TASK_WAKING) 1728 return; 1729 1730 rq = task_rq(p); 1731 /* 1732 * Since p->state == TASK_WAKING, set_task_cpu() has been called 1733 * from try_to_wake_up(). Hence, p->pi_lock is locked, but 1734 * rq->lock is not... So, lock it 1735 */ 1736 raw_spin_lock(&rq->lock); 1737 if (p->dl.dl_non_contending) { 1738 sub_running_bw(&p->dl, &rq->dl); 1739 p->dl.dl_non_contending = 0; 1740 /* 1741 * If the timer handler is currently running and the 1742 * timer cannot be cancelled, inactive_task_timer() 1743 * will see that dl_not_contending is not set, and 1744 * will not touch the rq's active utilization, 1745 * so we are still safe. 1746 */ 1747 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1) 1748 put_task_struct(p); 1749 } 1750 sub_rq_bw(&p->dl, &rq->dl); 1751 raw_spin_unlock(&rq->lock); 1752 } 1753 1754 static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p) 1755 { 1756 /* 1757 * Current can't be migrated, useless to reschedule, 1758 * let's hope p can move out. 1759 */ 1760 if (rq->curr->nr_cpus_allowed == 1 || 1761 !cpudl_find(&rq->rd->cpudl, rq->curr, NULL)) 1762 return; 1763 1764 /* 1765 * p is migratable, so let's not schedule it and 1766 * see if it is pushed or pulled somewhere else. 1767 */ 1768 if (p->nr_cpus_allowed != 1 && 1769 cpudl_find(&rq->rd->cpudl, p, NULL)) 1770 return; 1771 1772 resched_curr(rq); 1773 } 1774 1775 static int balance_dl(struct rq *rq, struct task_struct *p, struct rq_flags *rf) 1776 { 1777 if (!on_dl_rq(&p->dl) && need_pull_dl_task(rq, p)) { 1778 /* 1779 * This is OK, because current is on_cpu, which avoids it being 1780 * picked for load-balance and preemption/IRQs are still 1781 * disabled avoiding further scheduler activity on it and we've 1782 * not yet started the picking loop. 1783 */ 1784 rq_unpin_lock(rq, rf); 1785 pull_dl_task(rq); 1786 rq_repin_lock(rq, rf); 1787 } 1788 1789 return sched_stop_runnable(rq) || sched_dl_runnable(rq); 1790 } 1791 #endif /* CONFIG_SMP */ 1792 1793 /* 1794 * Only called when both the current and waking task are -deadline 1795 * tasks. 1796 */ 1797 static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p, 1798 int flags) 1799 { 1800 if (dl_entity_preempt(&p->dl, &rq->curr->dl)) { 1801 resched_curr(rq); 1802 return; 1803 } 1804 1805 #ifdef CONFIG_SMP 1806 /* 1807 * In the unlikely case current and p have the same deadline 1808 * let us try to decide what's the best thing to do... 1809 */ 1810 if ((p->dl.deadline == rq->curr->dl.deadline) && 1811 !test_tsk_need_resched(rq->curr)) 1812 check_preempt_equal_dl(rq, p); 1813 #endif /* CONFIG_SMP */ 1814 } 1815 1816 #ifdef CONFIG_SCHED_HRTICK 1817 static void start_hrtick_dl(struct rq *rq, struct task_struct *p) 1818 { 1819 hrtick_start(rq, p->dl.runtime); 1820 } 1821 #else /* !CONFIG_SCHED_HRTICK */ 1822 static void start_hrtick_dl(struct rq *rq, struct task_struct *p) 1823 { 1824 } 1825 #endif 1826 1827 static void set_next_task_dl(struct rq *rq, struct task_struct *p, bool first) 1828 { 1829 p->se.exec_start = rq_clock_task(rq); 1830 1831 /* You can't push away the running task */ 1832 dequeue_pushable_dl_task(rq, p); 1833 1834 if (!first) 1835 return; 1836 1837 if (hrtick_enabled(rq)) 1838 start_hrtick_dl(rq, p); 1839 1840 if (rq->curr->sched_class != &dl_sched_class) 1841 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0); 1842 1843 deadline_queue_push_tasks(rq); 1844 } 1845 1846 static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq, 1847 struct dl_rq *dl_rq) 1848 { 1849 struct rb_node *left = rb_first_cached(&dl_rq->root); 1850 1851 if (!left) 1852 return NULL; 1853 1854 return rb_entry(left, struct sched_dl_entity, rb_node); 1855 } 1856 1857 static struct task_struct *pick_next_task_dl(struct rq *rq) 1858 { 1859 struct sched_dl_entity *dl_se; 1860 struct dl_rq *dl_rq = &rq->dl; 1861 struct task_struct *p; 1862 1863 if (!sched_dl_runnable(rq)) 1864 return NULL; 1865 1866 dl_se = pick_next_dl_entity(rq, dl_rq); 1867 BUG_ON(!dl_se); 1868 p = dl_task_of(dl_se); 1869 set_next_task_dl(rq, p, true); 1870 return p; 1871 } 1872 1873 static void put_prev_task_dl(struct rq *rq, struct task_struct *p) 1874 { 1875 update_curr_dl(rq); 1876 1877 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1); 1878 if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1) 1879 enqueue_pushable_dl_task(rq, p); 1880 } 1881 1882 /* 1883 * scheduler tick hitting a task of our scheduling class. 1884 * 1885 * NOTE: This function can be called remotely by the tick offload that 1886 * goes along full dynticks. Therefore no local assumption can be made 1887 * and everything must be accessed through the @rq and @curr passed in 1888 * parameters. 1889 */ 1890 static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued) 1891 { 1892 update_curr_dl(rq); 1893 1894 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1); 1895 /* 1896 * Even when we have runtime, update_curr_dl() might have resulted in us 1897 * not being the leftmost task anymore. In that case NEED_RESCHED will 1898 * be set and schedule() will start a new hrtick for the next task. 1899 */ 1900 if (hrtick_enabled(rq) && queued && p->dl.runtime > 0 && 1901 is_leftmost(p, &rq->dl)) 1902 start_hrtick_dl(rq, p); 1903 } 1904 1905 static void task_fork_dl(struct task_struct *p) 1906 { 1907 /* 1908 * SCHED_DEADLINE tasks cannot fork and this is achieved through 1909 * sched_fork() 1910 */ 1911 } 1912 1913 #ifdef CONFIG_SMP 1914 1915 /* Only try algorithms three times */ 1916 #define DL_MAX_TRIES 3 1917 1918 static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu) 1919 { 1920 if (!task_running(rq, p) && 1921 cpumask_test_cpu(cpu, p->cpus_ptr)) 1922 return 1; 1923 return 0; 1924 } 1925 1926 /* 1927 * Return the earliest pushable rq's task, which is suitable to be executed 1928 * on the CPU, NULL otherwise: 1929 */ 1930 static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu) 1931 { 1932 struct rb_node *next_node = rq->dl.pushable_dl_tasks_root.rb_leftmost; 1933 struct task_struct *p = NULL; 1934 1935 if (!has_pushable_dl_tasks(rq)) 1936 return NULL; 1937 1938 next_node: 1939 if (next_node) { 1940 p = rb_entry(next_node, struct task_struct, pushable_dl_tasks); 1941 1942 if (pick_dl_task(rq, p, cpu)) 1943 return p; 1944 1945 next_node = rb_next(next_node); 1946 goto next_node; 1947 } 1948 1949 return NULL; 1950 } 1951 1952 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl); 1953 1954 static int find_later_rq(struct task_struct *task) 1955 { 1956 struct sched_domain *sd; 1957 struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl); 1958 int this_cpu = smp_processor_id(); 1959 int cpu = task_cpu(task); 1960 1961 /* Make sure the mask is initialized first */ 1962 if (unlikely(!later_mask)) 1963 return -1; 1964 1965 if (task->nr_cpus_allowed == 1) 1966 return -1; 1967 1968 /* 1969 * We have to consider system topology and task affinity 1970 * first, then we can look for a suitable CPU. 1971 */ 1972 if (!cpudl_find(&task_rq(task)->rd->cpudl, task, later_mask)) 1973 return -1; 1974 1975 /* 1976 * If we are here, some targets have been found, including 1977 * the most suitable which is, among the runqueues where the 1978 * current tasks have later deadlines than the task's one, the 1979 * rq with the latest possible one. 1980 * 1981 * Now we check how well this matches with task's 1982 * affinity and system topology. 1983 * 1984 * The last CPU where the task run is our first 1985 * guess, since it is most likely cache-hot there. 1986 */ 1987 if (cpumask_test_cpu(cpu, later_mask)) 1988 return cpu; 1989 /* 1990 * Check if this_cpu is to be skipped (i.e., it is 1991 * not in the mask) or not. 1992 */ 1993 if (!cpumask_test_cpu(this_cpu, later_mask)) 1994 this_cpu = -1; 1995 1996 rcu_read_lock(); 1997 for_each_domain(cpu, sd) { 1998 if (sd->flags & SD_WAKE_AFFINE) { 1999 int best_cpu; 2000 2001 /* 2002 * If possible, preempting this_cpu is 2003 * cheaper than migrating. 2004 */ 2005 if (this_cpu != -1 && 2006 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) { 2007 rcu_read_unlock(); 2008 return this_cpu; 2009 } 2010 2011 best_cpu = cpumask_first_and(later_mask, 2012 sched_domain_span(sd)); 2013 /* 2014 * Last chance: if a CPU being in both later_mask 2015 * and current sd span is valid, that becomes our 2016 * choice. Of course, the latest possible CPU is 2017 * already under consideration through later_mask. 2018 */ 2019 if (best_cpu < nr_cpu_ids) { 2020 rcu_read_unlock(); 2021 return best_cpu; 2022 } 2023 } 2024 } 2025 rcu_read_unlock(); 2026 2027 /* 2028 * At this point, all our guesses failed, we just return 2029 * 'something', and let the caller sort the things out. 2030 */ 2031 if (this_cpu != -1) 2032 return this_cpu; 2033 2034 cpu = cpumask_any(later_mask); 2035 if (cpu < nr_cpu_ids) 2036 return cpu; 2037 2038 return -1; 2039 } 2040 2041 /* Locks the rq it finds */ 2042 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq) 2043 { 2044 struct rq *later_rq = NULL; 2045 int tries; 2046 int cpu; 2047 2048 for (tries = 0; tries < DL_MAX_TRIES; tries++) { 2049 cpu = find_later_rq(task); 2050 2051 if ((cpu == -1) || (cpu == rq->cpu)) 2052 break; 2053 2054 later_rq = cpu_rq(cpu); 2055 2056 if (later_rq->dl.dl_nr_running && 2057 !dl_time_before(task->dl.deadline, 2058 later_rq->dl.earliest_dl.curr)) { 2059 /* 2060 * Target rq has tasks of equal or earlier deadline, 2061 * retrying does not release any lock and is unlikely 2062 * to yield a different result. 2063 */ 2064 later_rq = NULL; 2065 break; 2066 } 2067 2068 /* Retry if something changed. */ 2069 if (double_lock_balance(rq, later_rq)) { 2070 if (unlikely(task_rq(task) != rq || 2071 !cpumask_test_cpu(later_rq->cpu, task->cpus_ptr) || 2072 task_running(rq, task) || 2073 !dl_task(task) || 2074 !task_on_rq_queued(task))) { 2075 double_unlock_balance(rq, later_rq); 2076 later_rq = NULL; 2077 break; 2078 } 2079 } 2080 2081 /* 2082 * If the rq we found has no -deadline task, or 2083 * its earliest one has a later deadline than our 2084 * task, the rq is a good one. 2085 */ 2086 if (!later_rq->dl.dl_nr_running || 2087 dl_time_before(task->dl.deadline, 2088 later_rq->dl.earliest_dl.curr)) 2089 break; 2090 2091 /* Otherwise we try again. */ 2092 double_unlock_balance(rq, later_rq); 2093 later_rq = NULL; 2094 } 2095 2096 return later_rq; 2097 } 2098 2099 static struct task_struct *pick_next_pushable_dl_task(struct rq *rq) 2100 { 2101 struct task_struct *p; 2102 2103 if (!has_pushable_dl_tasks(rq)) 2104 return NULL; 2105 2106 p = rb_entry(rq->dl.pushable_dl_tasks_root.rb_leftmost, 2107 struct task_struct, pushable_dl_tasks); 2108 2109 BUG_ON(rq->cpu != task_cpu(p)); 2110 BUG_ON(task_current(rq, p)); 2111 BUG_ON(p->nr_cpus_allowed <= 1); 2112 2113 BUG_ON(!task_on_rq_queued(p)); 2114 BUG_ON(!dl_task(p)); 2115 2116 return p; 2117 } 2118 2119 /* 2120 * See if the non running -deadline tasks on this rq 2121 * can be sent to some other CPU where they can preempt 2122 * and start executing. 2123 */ 2124 static int push_dl_task(struct rq *rq) 2125 { 2126 struct task_struct *next_task; 2127 struct rq *later_rq; 2128 int ret = 0; 2129 2130 if (!rq->dl.overloaded) 2131 return 0; 2132 2133 next_task = pick_next_pushable_dl_task(rq); 2134 if (!next_task) 2135 return 0; 2136 2137 retry: 2138 if (WARN_ON(next_task == rq->curr)) 2139 return 0; 2140 2141 /* 2142 * If next_task preempts rq->curr, and rq->curr 2143 * can move away, it makes sense to just reschedule 2144 * without going further in pushing next_task. 2145 */ 2146 if (dl_task(rq->curr) && 2147 dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) && 2148 rq->curr->nr_cpus_allowed > 1) { 2149 resched_curr(rq); 2150 return 0; 2151 } 2152 2153 /* We might release rq lock */ 2154 get_task_struct(next_task); 2155 2156 /* Will lock the rq it'll find */ 2157 later_rq = find_lock_later_rq(next_task, rq); 2158 if (!later_rq) { 2159 struct task_struct *task; 2160 2161 /* 2162 * We must check all this again, since 2163 * find_lock_later_rq releases rq->lock and it is 2164 * then possible that next_task has migrated. 2165 */ 2166 task = pick_next_pushable_dl_task(rq); 2167 if (task == next_task) { 2168 /* 2169 * The task is still there. We don't try 2170 * again, some other CPU will pull it when ready. 2171 */ 2172 goto out; 2173 } 2174 2175 if (!task) 2176 /* No more tasks */ 2177 goto out; 2178 2179 put_task_struct(next_task); 2180 next_task = task; 2181 goto retry; 2182 } 2183 2184 deactivate_task(rq, next_task, 0); 2185 set_task_cpu(next_task, later_rq->cpu); 2186 2187 /* 2188 * Update the later_rq clock here, because the clock is used 2189 * by the cpufreq_update_util() inside __add_running_bw(). 2190 */ 2191 update_rq_clock(later_rq); 2192 activate_task(later_rq, next_task, ENQUEUE_NOCLOCK); 2193 ret = 1; 2194 2195 resched_curr(later_rq); 2196 2197 double_unlock_balance(rq, later_rq); 2198 2199 out: 2200 put_task_struct(next_task); 2201 2202 return ret; 2203 } 2204 2205 static void push_dl_tasks(struct rq *rq) 2206 { 2207 /* push_dl_task() will return true if it moved a -deadline task */ 2208 while (push_dl_task(rq)) 2209 ; 2210 } 2211 2212 static void pull_dl_task(struct rq *this_rq) 2213 { 2214 int this_cpu = this_rq->cpu, cpu; 2215 struct task_struct *p; 2216 bool resched = false; 2217 struct rq *src_rq; 2218 u64 dmin = LONG_MAX; 2219 2220 if (likely(!dl_overloaded(this_rq))) 2221 return; 2222 2223 /* 2224 * Match the barrier from dl_set_overloaded; this guarantees that if we 2225 * see overloaded we must also see the dlo_mask bit. 2226 */ 2227 smp_rmb(); 2228 2229 for_each_cpu(cpu, this_rq->rd->dlo_mask) { 2230 if (this_cpu == cpu) 2231 continue; 2232 2233 src_rq = cpu_rq(cpu); 2234 2235 /* 2236 * It looks racy, abd it is! However, as in sched_rt.c, 2237 * we are fine with this. 2238 */ 2239 if (this_rq->dl.dl_nr_running && 2240 dl_time_before(this_rq->dl.earliest_dl.curr, 2241 src_rq->dl.earliest_dl.next)) 2242 continue; 2243 2244 /* Might drop this_rq->lock */ 2245 double_lock_balance(this_rq, src_rq); 2246 2247 /* 2248 * If there are no more pullable tasks on the 2249 * rq, we're done with it. 2250 */ 2251 if (src_rq->dl.dl_nr_running <= 1) 2252 goto skip; 2253 2254 p = pick_earliest_pushable_dl_task(src_rq, this_cpu); 2255 2256 /* 2257 * We found a task to be pulled if: 2258 * - it preempts our current (if there's one), 2259 * - it will preempt the last one we pulled (if any). 2260 */ 2261 if (p && dl_time_before(p->dl.deadline, dmin) && 2262 (!this_rq->dl.dl_nr_running || 2263 dl_time_before(p->dl.deadline, 2264 this_rq->dl.earliest_dl.curr))) { 2265 WARN_ON(p == src_rq->curr); 2266 WARN_ON(!task_on_rq_queued(p)); 2267 2268 /* 2269 * Then we pull iff p has actually an earlier 2270 * deadline than the current task of its runqueue. 2271 */ 2272 if (dl_time_before(p->dl.deadline, 2273 src_rq->curr->dl.deadline)) 2274 goto skip; 2275 2276 resched = true; 2277 2278 deactivate_task(src_rq, p, 0); 2279 set_task_cpu(p, this_cpu); 2280 activate_task(this_rq, p, 0); 2281 dmin = p->dl.deadline; 2282 2283 /* Is there any other task even earlier? */ 2284 } 2285 skip: 2286 double_unlock_balance(this_rq, src_rq); 2287 } 2288 2289 if (resched) 2290 resched_curr(this_rq); 2291 } 2292 2293 /* 2294 * Since the task is not running and a reschedule is not going to happen 2295 * anytime soon on its runqueue, we try pushing it away now. 2296 */ 2297 static void task_woken_dl(struct rq *rq, struct task_struct *p) 2298 { 2299 if (!task_running(rq, p) && 2300 !test_tsk_need_resched(rq->curr) && 2301 p->nr_cpus_allowed > 1 && 2302 dl_task(rq->curr) && 2303 (rq->curr->nr_cpus_allowed < 2 || 2304 !dl_entity_preempt(&p->dl, &rq->curr->dl))) { 2305 push_dl_tasks(rq); 2306 } 2307 } 2308 2309 static void set_cpus_allowed_dl(struct task_struct *p, 2310 const struct cpumask *new_mask) 2311 { 2312 struct root_domain *src_rd; 2313 struct rq *rq; 2314 2315 BUG_ON(!dl_task(p)); 2316 2317 rq = task_rq(p); 2318 src_rd = rq->rd; 2319 /* 2320 * Migrating a SCHED_DEADLINE task between exclusive 2321 * cpusets (different root_domains) entails a bandwidth 2322 * update. We already made space for us in the destination 2323 * domain (see cpuset_can_attach()). 2324 */ 2325 if (!cpumask_intersects(src_rd->span, new_mask)) { 2326 struct dl_bw *src_dl_b; 2327 2328 src_dl_b = dl_bw_of(cpu_of(rq)); 2329 /* 2330 * We now free resources of the root_domain we are migrating 2331 * off. In the worst case, sched_setattr() may temporary fail 2332 * until we complete the update. 2333 */ 2334 raw_spin_lock(&src_dl_b->lock); 2335 __dl_sub(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p))); 2336 raw_spin_unlock(&src_dl_b->lock); 2337 } 2338 2339 set_cpus_allowed_common(p, new_mask); 2340 } 2341 2342 /* Assumes rq->lock is held */ 2343 static void rq_online_dl(struct rq *rq) 2344 { 2345 if (rq->dl.overloaded) 2346 dl_set_overload(rq); 2347 2348 cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu); 2349 if (rq->dl.dl_nr_running > 0) 2350 cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr); 2351 } 2352 2353 /* Assumes rq->lock is held */ 2354 static void rq_offline_dl(struct rq *rq) 2355 { 2356 if (rq->dl.overloaded) 2357 dl_clear_overload(rq); 2358 2359 cpudl_clear(&rq->rd->cpudl, rq->cpu); 2360 cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu); 2361 } 2362 2363 void __init init_sched_dl_class(void) 2364 { 2365 unsigned int i; 2366 2367 for_each_possible_cpu(i) 2368 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i), 2369 GFP_KERNEL, cpu_to_node(i)); 2370 } 2371 2372 void dl_add_task_root_domain(struct task_struct *p) 2373 { 2374 struct rq_flags rf; 2375 struct rq *rq; 2376 struct dl_bw *dl_b; 2377 2378 rq = task_rq_lock(p, &rf); 2379 if (!dl_task(p)) 2380 goto unlock; 2381 2382 dl_b = &rq->rd->dl_bw; 2383 raw_spin_lock(&dl_b->lock); 2384 2385 __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span)); 2386 2387 raw_spin_unlock(&dl_b->lock); 2388 2389 unlock: 2390 task_rq_unlock(rq, p, &rf); 2391 } 2392 2393 void dl_clear_root_domain(struct root_domain *rd) 2394 { 2395 unsigned long flags; 2396 2397 raw_spin_lock_irqsave(&rd->dl_bw.lock, flags); 2398 rd->dl_bw.total_bw = 0; 2399 raw_spin_unlock_irqrestore(&rd->dl_bw.lock, flags); 2400 } 2401 2402 #endif /* CONFIG_SMP */ 2403 2404 static void switched_from_dl(struct rq *rq, struct task_struct *p) 2405 { 2406 /* 2407 * task_non_contending() can start the "inactive timer" (if the 0-lag 2408 * time is in the future). If the task switches back to dl before 2409 * the "inactive timer" fires, it can continue to consume its current 2410 * runtime using its current deadline. If it stays outside of 2411 * SCHED_DEADLINE until the 0-lag time passes, inactive_task_timer() 2412 * will reset the task parameters. 2413 */ 2414 if (task_on_rq_queued(p) && p->dl.dl_runtime) 2415 task_non_contending(p); 2416 2417 if (!task_on_rq_queued(p)) { 2418 /* 2419 * Inactive timer is armed. However, p is leaving DEADLINE and 2420 * might migrate away from this rq while continuing to run on 2421 * some other class. We need to remove its contribution from 2422 * this rq running_bw now, or sub_rq_bw (below) will complain. 2423 */ 2424 if (p->dl.dl_non_contending) 2425 sub_running_bw(&p->dl, &rq->dl); 2426 sub_rq_bw(&p->dl, &rq->dl); 2427 } 2428 2429 /* 2430 * We cannot use inactive_task_timer() to invoke sub_running_bw() 2431 * at the 0-lag time, because the task could have been migrated 2432 * while SCHED_OTHER in the meanwhile. 2433 */ 2434 if (p->dl.dl_non_contending) 2435 p->dl.dl_non_contending = 0; 2436 2437 /* 2438 * Since this might be the only -deadline task on the rq, 2439 * this is the right place to try to pull some other one 2440 * from an overloaded CPU, if any. 2441 */ 2442 if (!task_on_rq_queued(p) || rq->dl.dl_nr_running) 2443 return; 2444 2445 deadline_queue_pull_task(rq); 2446 } 2447 2448 /* 2449 * When switching to -deadline, we may overload the rq, then 2450 * we try to push someone off, if possible. 2451 */ 2452 static void switched_to_dl(struct rq *rq, struct task_struct *p) 2453 { 2454 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1) 2455 put_task_struct(p); 2456 2457 /* If p is not queued we will update its parameters at next wakeup. */ 2458 if (!task_on_rq_queued(p)) { 2459 add_rq_bw(&p->dl, &rq->dl); 2460 2461 return; 2462 } 2463 2464 if (rq->curr != p) { 2465 #ifdef CONFIG_SMP 2466 if (p->nr_cpus_allowed > 1 && rq->dl.overloaded) 2467 deadline_queue_push_tasks(rq); 2468 #endif 2469 if (dl_task(rq->curr)) 2470 check_preempt_curr_dl(rq, p, 0); 2471 else 2472 resched_curr(rq); 2473 } 2474 } 2475 2476 /* 2477 * If the scheduling parameters of a -deadline task changed, 2478 * a push or pull operation might be needed. 2479 */ 2480 static void prio_changed_dl(struct rq *rq, struct task_struct *p, 2481 int oldprio) 2482 { 2483 if (task_on_rq_queued(p) || rq->curr == p) { 2484 #ifdef CONFIG_SMP 2485 /* 2486 * This might be too much, but unfortunately 2487 * we don't have the old deadline value, and 2488 * we can't argue if the task is increasing 2489 * or lowering its prio, so... 2490 */ 2491 if (!rq->dl.overloaded) 2492 deadline_queue_pull_task(rq); 2493 2494 /* 2495 * If we now have a earlier deadline task than p, 2496 * then reschedule, provided p is still on this 2497 * runqueue. 2498 */ 2499 if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline)) 2500 resched_curr(rq); 2501 #else 2502 /* 2503 * Again, we don't know if p has a earlier 2504 * or later deadline, so let's blindly set a 2505 * (maybe not needed) rescheduling point. 2506 */ 2507 resched_curr(rq); 2508 #endif /* CONFIG_SMP */ 2509 } 2510 } 2511 2512 const struct sched_class dl_sched_class 2513 __section("__dl_sched_class") = { 2514 .enqueue_task = enqueue_task_dl, 2515 .dequeue_task = dequeue_task_dl, 2516 .yield_task = yield_task_dl, 2517 2518 .check_preempt_curr = check_preempt_curr_dl, 2519 2520 .pick_next_task = pick_next_task_dl, 2521 .put_prev_task = put_prev_task_dl, 2522 .set_next_task = set_next_task_dl, 2523 2524 #ifdef CONFIG_SMP 2525 .balance = balance_dl, 2526 .select_task_rq = select_task_rq_dl, 2527 .migrate_task_rq = migrate_task_rq_dl, 2528 .set_cpus_allowed = set_cpus_allowed_dl, 2529 .rq_online = rq_online_dl, 2530 .rq_offline = rq_offline_dl, 2531 .task_woken = task_woken_dl, 2532 #endif 2533 2534 .task_tick = task_tick_dl, 2535 .task_fork = task_fork_dl, 2536 2537 .prio_changed = prio_changed_dl, 2538 .switched_from = switched_from_dl, 2539 .switched_to = switched_to_dl, 2540 2541 .update_curr = update_curr_dl, 2542 }; 2543 2544 int sched_dl_global_validate(void) 2545 { 2546 u64 runtime = global_rt_runtime(); 2547 u64 period = global_rt_period(); 2548 u64 new_bw = to_ratio(period, runtime); 2549 struct dl_bw *dl_b; 2550 int cpu, ret = 0; 2551 unsigned long flags; 2552 2553 /* 2554 * Here we want to check the bandwidth not being set to some 2555 * value smaller than the currently allocated bandwidth in 2556 * any of the root_domains. 2557 * 2558 * FIXME: Cycling on all the CPUs is overdoing, but simpler than 2559 * cycling on root_domains... Discussion on different/better 2560 * solutions is welcome! 2561 */ 2562 for_each_possible_cpu(cpu) { 2563 rcu_read_lock_sched(); 2564 dl_b = dl_bw_of(cpu); 2565 2566 raw_spin_lock_irqsave(&dl_b->lock, flags); 2567 if (new_bw < dl_b->total_bw) 2568 ret = -EBUSY; 2569 raw_spin_unlock_irqrestore(&dl_b->lock, flags); 2570 2571 rcu_read_unlock_sched(); 2572 2573 if (ret) 2574 break; 2575 } 2576 2577 return ret; 2578 } 2579 2580 static void init_dl_rq_bw_ratio(struct dl_rq *dl_rq) 2581 { 2582 if (global_rt_runtime() == RUNTIME_INF) { 2583 dl_rq->bw_ratio = 1 << RATIO_SHIFT; 2584 dl_rq->extra_bw = 1 << BW_SHIFT; 2585 } else { 2586 dl_rq->bw_ratio = to_ratio(global_rt_runtime(), 2587 global_rt_period()) >> (BW_SHIFT - RATIO_SHIFT); 2588 dl_rq->extra_bw = to_ratio(global_rt_period(), 2589 global_rt_runtime()); 2590 } 2591 } 2592 2593 void sched_dl_do_global(void) 2594 { 2595 u64 new_bw = -1; 2596 struct dl_bw *dl_b; 2597 int cpu; 2598 unsigned long flags; 2599 2600 def_dl_bandwidth.dl_period = global_rt_period(); 2601 def_dl_bandwidth.dl_runtime = global_rt_runtime(); 2602 2603 if (global_rt_runtime() != RUNTIME_INF) 2604 new_bw = to_ratio(global_rt_period(), global_rt_runtime()); 2605 2606 /* 2607 * FIXME: As above... 2608 */ 2609 for_each_possible_cpu(cpu) { 2610 rcu_read_lock_sched(); 2611 dl_b = dl_bw_of(cpu); 2612 2613 raw_spin_lock_irqsave(&dl_b->lock, flags); 2614 dl_b->bw = new_bw; 2615 raw_spin_unlock_irqrestore(&dl_b->lock, flags); 2616 2617 rcu_read_unlock_sched(); 2618 init_dl_rq_bw_ratio(&cpu_rq(cpu)->dl); 2619 } 2620 } 2621 2622 /* 2623 * We must be sure that accepting a new task (or allowing changing the 2624 * parameters of an existing one) is consistent with the bandwidth 2625 * constraints. If yes, this function also accordingly updates the currently 2626 * allocated bandwidth to reflect the new situation. 2627 * 2628 * This function is called while holding p's rq->lock. 2629 */ 2630 int sched_dl_overflow(struct task_struct *p, int policy, 2631 const struct sched_attr *attr) 2632 { 2633 u64 period = attr->sched_period ?: attr->sched_deadline; 2634 u64 runtime = attr->sched_runtime; 2635 u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0; 2636 int cpus, err = -1, cpu = task_cpu(p); 2637 struct dl_bw *dl_b = dl_bw_of(cpu); 2638 unsigned long cap; 2639 2640 if (attr->sched_flags & SCHED_FLAG_SUGOV) 2641 return 0; 2642 2643 /* !deadline task may carry old deadline bandwidth */ 2644 if (new_bw == p->dl.dl_bw && task_has_dl_policy(p)) 2645 return 0; 2646 2647 /* 2648 * Either if a task, enters, leave, or stays -deadline but changes 2649 * its parameters, we may need to update accordingly the total 2650 * allocated bandwidth of the container. 2651 */ 2652 raw_spin_lock(&dl_b->lock); 2653 cpus = dl_bw_cpus(cpu); 2654 cap = dl_bw_capacity(cpu); 2655 2656 if (dl_policy(policy) && !task_has_dl_policy(p) && 2657 !__dl_overflow(dl_b, cap, 0, new_bw)) { 2658 if (hrtimer_active(&p->dl.inactive_timer)) 2659 __dl_sub(dl_b, p->dl.dl_bw, cpus); 2660 __dl_add(dl_b, new_bw, cpus); 2661 err = 0; 2662 } else if (dl_policy(policy) && task_has_dl_policy(p) && 2663 !__dl_overflow(dl_b, cap, p->dl.dl_bw, new_bw)) { 2664 /* 2665 * XXX this is slightly incorrect: when the task 2666 * utilization decreases, we should delay the total 2667 * utilization change until the task's 0-lag point. 2668 * But this would require to set the task's "inactive 2669 * timer" when the task is not inactive. 2670 */ 2671 __dl_sub(dl_b, p->dl.dl_bw, cpus); 2672 __dl_add(dl_b, new_bw, cpus); 2673 dl_change_utilization(p, new_bw); 2674 err = 0; 2675 } else if (!dl_policy(policy) && task_has_dl_policy(p)) { 2676 /* 2677 * Do not decrease the total deadline utilization here, 2678 * switched_from_dl() will take care to do it at the correct 2679 * (0-lag) time. 2680 */ 2681 err = 0; 2682 } 2683 raw_spin_unlock(&dl_b->lock); 2684 2685 return err; 2686 } 2687 2688 /* 2689 * This function initializes the sched_dl_entity of a newly becoming 2690 * SCHED_DEADLINE task. 2691 * 2692 * Only the static values are considered here, the actual runtime and the 2693 * absolute deadline will be properly calculated when the task is enqueued 2694 * for the first time with its new policy. 2695 */ 2696 void __setparam_dl(struct task_struct *p, const struct sched_attr *attr) 2697 { 2698 struct sched_dl_entity *dl_se = &p->dl; 2699 2700 dl_se->dl_runtime = attr->sched_runtime; 2701 dl_se->dl_deadline = attr->sched_deadline; 2702 dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline; 2703 dl_se->flags = attr->sched_flags; 2704 dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime); 2705 dl_se->dl_density = to_ratio(dl_se->dl_deadline, dl_se->dl_runtime); 2706 } 2707 2708 void __getparam_dl(struct task_struct *p, struct sched_attr *attr) 2709 { 2710 struct sched_dl_entity *dl_se = &p->dl; 2711 2712 attr->sched_priority = p->rt_priority; 2713 attr->sched_runtime = dl_se->dl_runtime; 2714 attr->sched_deadline = dl_se->dl_deadline; 2715 attr->sched_period = dl_se->dl_period; 2716 attr->sched_flags = dl_se->flags; 2717 } 2718 2719 /* 2720 * Default limits for DL period; on the top end we guard against small util 2721 * tasks still getting rediculous long effective runtimes, on the bottom end we 2722 * guard against timer DoS. 2723 */ 2724 unsigned int sysctl_sched_dl_period_max = 1 << 22; /* ~4 seconds */ 2725 unsigned int sysctl_sched_dl_period_min = 100; /* 100 us */ 2726 2727 /* 2728 * This function validates the new parameters of a -deadline task. 2729 * We ask for the deadline not being zero, and greater or equal 2730 * than the runtime, as well as the period of being zero or 2731 * greater than deadline. Furthermore, we have to be sure that 2732 * user parameters are above the internal resolution of 1us (we 2733 * check sched_runtime only since it is always the smaller one) and 2734 * below 2^63 ns (we have to check both sched_deadline and 2735 * sched_period, as the latter can be zero). 2736 */ 2737 bool __checkparam_dl(const struct sched_attr *attr) 2738 { 2739 u64 period, max, min; 2740 2741 /* special dl tasks don't actually use any parameter */ 2742 if (attr->sched_flags & SCHED_FLAG_SUGOV) 2743 return true; 2744 2745 /* deadline != 0 */ 2746 if (attr->sched_deadline == 0) 2747 return false; 2748 2749 /* 2750 * Since we truncate DL_SCALE bits, make sure we're at least 2751 * that big. 2752 */ 2753 if (attr->sched_runtime < (1ULL << DL_SCALE)) 2754 return false; 2755 2756 /* 2757 * Since we use the MSB for wrap-around and sign issues, make 2758 * sure it's not set (mind that period can be equal to zero). 2759 */ 2760 if (attr->sched_deadline & (1ULL << 63) || 2761 attr->sched_period & (1ULL << 63)) 2762 return false; 2763 2764 period = attr->sched_period; 2765 if (!period) 2766 period = attr->sched_deadline; 2767 2768 /* runtime <= deadline <= period (if period != 0) */ 2769 if (period < attr->sched_deadline || 2770 attr->sched_deadline < attr->sched_runtime) 2771 return false; 2772 2773 max = (u64)READ_ONCE(sysctl_sched_dl_period_max) * NSEC_PER_USEC; 2774 min = (u64)READ_ONCE(sysctl_sched_dl_period_min) * NSEC_PER_USEC; 2775 2776 if (period < min || period > max) 2777 return false; 2778 2779 return true; 2780 } 2781 2782 /* 2783 * This function clears the sched_dl_entity static params. 2784 */ 2785 void __dl_clear_params(struct task_struct *p) 2786 { 2787 struct sched_dl_entity *dl_se = &p->dl; 2788 2789 dl_se->dl_runtime = 0; 2790 dl_se->dl_deadline = 0; 2791 dl_se->dl_period = 0; 2792 dl_se->flags = 0; 2793 dl_se->dl_bw = 0; 2794 dl_se->dl_density = 0; 2795 2796 dl_se->dl_throttled = 0; 2797 dl_se->dl_yielded = 0; 2798 dl_se->dl_non_contending = 0; 2799 dl_se->dl_overrun = 0; 2800 2801 #ifdef CONFIG_RT_MUTEXES 2802 dl_se->pi_se = dl_se; 2803 #endif 2804 } 2805 2806 bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr) 2807 { 2808 struct sched_dl_entity *dl_se = &p->dl; 2809 2810 if (dl_se->dl_runtime != attr->sched_runtime || 2811 dl_se->dl_deadline != attr->sched_deadline || 2812 dl_se->dl_period != attr->sched_period || 2813 dl_se->flags != attr->sched_flags) 2814 return true; 2815 2816 return false; 2817 } 2818 2819 #ifdef CONFIG_SMP 2820 int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed) 2821 { 2822 unsigned long flags, cap; 2823 unsigned int dest_cpu; 2824 struct dl_bw *dl_b; 2825 bool overflow; 2826 int ret; 2827 2828 dest_cpu = cpumask_any_and(cpu_active_mask, cs_cpus_allowed); 2829 2830 rcu_read_lock_sched(); 2831 dl_b = dl_bw_of(dest_cpu); 2832 raw_spin_lock_irqsave(&dl_b->lock, flags); 2833 cap = dl_bw_capacity(dest_cpu); 2834 overflow = __dl_overflow(dl_b, cap, 0, p->dl.dl_bw); 2835 if (overflow) { 2836 ret = -EBUSY; 2837 } else { 2838 /* 2839 * We reserve space for this task in the destination 2840 * root_domain, as we can't fail after this point. 2841 * We will free resources in the source root_domain 2842 * later on (see set_cpus_allowed_dl()). 2843 */ 2844 int cpus = dl_bw_cpus(dest_cpu); 2845 2846 __dl_add(dl_b, p->dl.dl_bw, cpus); 2847 ret = 0; 2848 } 2849 raw_spin_unlock_irqrestore(&dl_b->lock, flags); 2850 rcu_read_unlock_sched(); 2851 2852 return ret; 2853 } 2854 2855 int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, 2856 const struct cpumask *trial) 2857 { 2858 int ret = 1, trial_cpus; 2859 struct dl_bw *cur_dl_b; 2860 unsigned long flags; 2861 2862 rcu_read_lock_sched(); 2863 cur_dl_b = dl_bw_of(cpumask_any(cur)); 2864 trial_cpus = cpumask_weight(trial); 2865 2866 raw_spin_lock_irqsave(&cur_dl_b->lock, flags); 2867 if (cur_dl_b->bw != -1 && 2868 cur_dl_b->bw * trial_cpus < cur_dl_b->total_bw) 2869 ret = 0; 2870 raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags); 2871 rcu_read_unlock_sched(); 2872 2873 return ret; 2874 } 2875 2876 bool dl_cpu_busy(unsigned int cpu) 2877 { 2878 unsigned long flags, cap; 2879 struct dl_bw *dl_b; 2880 bool overflow; 2881 2882 rcu_read_lock_sched(); 2883 dl_b = dl_bw_of(cpu); 2884 raw_spin_lock_irqsave(&dl_b->lock, flags); 2885 cap = dl_bw_capacity(cpu); 2886 overflow = __dl_overflow(dl_b, cap, 0, 0); 2887 raw_spin_unlock_irqrestore(&dl_b->lock, flags); 2888 rcu_read_unlock_sched(); 2889 2890 return overflow; 2891 } 2892 #endif 2893 2894 #ifdef CONFIG_SCHED_DEBUG 2895 void print_dl_stats(struct seq_file *m, int cpu) 2896 { 2897 print_dl_rq(m, cpu, &cpu_rq(cpu)->dl); 2898 } 2899 #endif /* CONFIG_SCHED_DEBUG */ 2900