1 /* 2 * Deadline Scheduling Class (SCHED_DEADLINE) 3 * 4 * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS). 5 * 6 * Tasks that periodically executes their instances for less than their 7 * runtime won't miss any of their deadlines. 8 * Tasks that are not periodic or sporadic or that tries to execute more 9 * than their reserved bandwidth will be slowed down (and may potentially 10 * miss some of their deadlines), and won't affect any other task. 11 * 12 * Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>, 13 * Juri Lelli <juri.lelli@gmail.com>, 14 * Michael Trimarchi <michael@amarulasolutions.com>, 15 * Fabio Checconi <fchecconi@gmail.com> 16 */ 17 #include "sched.h" 18 19 #include <linux/slab.h> 20 21 struct dl_bandwidth def_dl_bandwidth; 22 23 static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se) 24 { 25 return container_of(dl_se, struct task_struct, dl); 26 } 27 28 static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq) 29 { 30 return container_of(dl_rq, struct rq, dl); 31 } 32 33 static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se) 34 { 35 struct task_struct *p = dl_task_of(dl_se); 36 struct rq *rq = task_rq(p); 37 38 return &rq->dl; 39 } 40 41 static inline int on_dl_rq(struct sched_dl_entity *dl_se) 42 { 43 return !RB_EMPTY_NODE(&dl_se->rb_node); 44 } 45 46 static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq) 47 { 48 struct sched_dl_entity *dl_se = &p->dl; 49 50 return dl_rq->rb_leftmost == &dl_se->rb_node; 51 } 52 53 void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime) 54 { 55 raw_spin_lock_init(&dl_b->dl_runtime_lock); 56 dl_b->dl_period = period; 57 dl_b->dl_runtime = runtime; 58 } 59 60 void init_dl_bw(struct dl_bw *dl_b) 61 { 62 raw_spin_lock_init(&dl_b->lock); 63 raw_spin_lock(&def_dl_bandwidth.dl_runtime_lock); 64 if (global_rt_runtime() == RUNTIME_INF) 65 dl_b->bw = -1; 66 else 67 dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime()); 68 raw_spin_unlock(&def_dl_bandwidth.dl_runtime_lock); 69 dl_b->total_bw = 0; 70 } 71 72 void init_dl_rq(struct dl_rq *dl_rq) 73 { 74 dl_rq->rb_root = RB_ROOT; 75 76 #ifdef CONFIG_SMP 77 /* zero means no -deadline tasks */ 78 dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0; 79 80 dl_rq->dl_nr_migratory = 0; 81 dl_rq->overloaded = 0; 82 dl_rq->pushable_dl_tasks_root = RB_ROOT; 83 #else 84 init_dl_bw(&dl_rq->dl_bw); 85 #endif 86 } 87 88 #ifdef CONFIG_SMP 89 90 static inline int dl_overloaded(struct rq *rq) 91 { 92 return atomic_read(&rq->rd->dlo_count); 93 } 94 95 static inline void dl_set_overload(struct rq *rq) 96 { 97 if (!rq->online) 98 return; 99 100 cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask); 101 /* 102 * Must be visible before the overload count is 103 * set (as in sched_rt.c). 104 * 105 * Matched by the barrier in pull_dl_task(). 106 */ 107 smp_wmb(); 108 atomic_inc(&rq->rd->dlo_count); 109 } 110 111 static inline void dl_clear_overload(struct rq *rq) 112 { 113 if (!rq->online) 114 return; 115 116 atomic_dec(&rq->rd->dlo_count); 117 cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask); 118 } 119 120 static void update_dl_migration(struct dl_rq *dl_rq) 121 { 122 if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) { 123 if (!dl_rq->overloaded) { 124 dl_set_overload(rq_of_dl_rq(dl_rq)); 125 dl_rq->overloaded = 1; 126 } 127 } else if (dl_rq->overloaded) { 128 dl_clear_overload(rq_of_dl_rq(dl_rq)); 129 dl_rq->overloaded = 0; 130 } 131 } 132 133 static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 134 { 135 struct task_struct *p = dl_task_of(dl_se); 136 137 if (tsk_nr_cpus_allowed(p) > 1) 138 dl_rq->dl_nr_migratory++; 139 140 update_dl_migration(dl_rq); 141 } 142 143 static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 144 { 145 struct task_struct *p = dl_task_of(dl_se); 146 147 if (tsk_nr_cpus_allowed(p) > 1) 148 dl_rq->dl_nr_migratory--; 149 150 update_dl_migration(dl_rq); 151 } 152 153 /* 154 * The list of pushable -deadline task is not a plist, like in 155 * sched_rt.c, it is an rb-tree with tasks ordered by deadline. 156 */ 157 static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p) 158 { 159 struct dl_rq *dl_rq = &rq->dl; 160 struct rb_node **link = &dl_rq->pushable_dl_tasks_root.rb_node; 161 struct rb_node *parent = NULL; 162 struct task_struct *entry; 163 int leftmost = 1; 164 165 BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks)); 166 167 while (*link) { 168 parent = *link; 169 entry = rb_entry(parent, struct task_struct, 170 pushable_dl_tasks); 171 if (dl_entity_preempt(&p->dl, &entry->dl)) 172 link = &parent->rb_left; 173 else { 174 link = &parent->rb_right; 175 leftmost = 0; 176 } 177 } 178 179 if (leftmost) { 180 dl_rq->pushable_dl_tasks_leftmost = &p->pushable_dl_tasks; 181 dl_rq->earliest_dl.next = p->dl.deadline; 182 } 183 184 rb_link_node(&p->pushable_dl_tasks, parent, link); 185 rb_insert_color(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root); 186 } 187 188 static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p) 189 { 190 struct dl_rq *dl_rq = &rq->dl; 191 192 if (RB_EMPTY_NODE(&p->pushable_dl_tasks)) 193 return; 194 195 if (dl_rq->pushable_dl_tasks_leftmost == &p->pushable_dl_tasks) { 196 struct rb_node *next_node; 197 198 next_node = rb_next(&p->pushable_dl_tasks); 199 dl_rq->pushable_dl_tasks_leftmost = next_node; 200 if (next_node) { 201 dl_rq->earliest_dl.next = rb_entry(next_node, 202 struct task_struct, pushable_dl_tasks)->dl.deadline; 203 } 204 } 205 206 rb_erase(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root); 207 RB_CLEAR_NODE(&p->pushable_dl_tasks); 208 } 209 210 static inline int has_pushable_dl_tasks(struct rq *rq) 211 { 212 return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root); 213 } 214 215 static int push_dl_task(struct rq *rq); 216 217 static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev) 218 { 219 return dl_task(prev); 220 } 221 222 static DEFINE_PER_CPU(struct callback_head, dl_push_head); 223 static DEFINE_PER_CPU(struct callback_head, dl_pull_head); 224 225 static void push_dl_tasks(struct rq *); 226 static void pull_dl_task(struct rq *); 227 228 static inline void queue_push_tasks(struct rq *rq) 229 { 230 if (!has_pushable_dl_tasks(rq)) 231 return; 232 233 queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks); 234 } 235 236 static inline void queue_pull_task(struct rq *rq) 237 { 238 queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task); 239 } 240 241 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq); 242 243 static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p) 244 { 245 struct rq *later_rq = NULL; 246 bool fallback = false; 247 248 later_rq = find_lock_later_rq(p, rq); 249 250 if (!later_rq) { 251 int cpu; 252 253 /* 254 * If we cannot preempt any rq, fall back to pick any 255 * online cpu. 256 */ 257 fallback = true; 258 cpu = cpumask_any_and(cpu_active_mask, tsk_cpus_allowed(p)); 259 if (cpu >= nr_cpu_ids) { 260 /* 261 * Fail to find any suitable cpu. 262 * The task will never come back! 263 */ 264 BUG_ON(dl_bandwidth_enabled()); 265 266 /* 267 * If admission control is disabled we 268 * try a little harder to let the task 269 * run. 270 */ 271 cpu = cpumask_any(cpu_active_mask); 272 } 273 later_rq = cpu_rq(cpu); 274 double_lock_balance(rq, later_rq); 275 } 276 277 /* 278 * By now the task is replenished and enqueued; migrate it. 279 */ 280 deactivate_task(rq, p, 0); 281 set_task_cpu(p, later_rq->cpu); 282 activate_task(later_rq, p, 0); 283 284 if (!fallback) 285 resched_curr(later_rq); 286 287 double_unlock_balance(later_rq, rq); 288 289 return later_rq; 290 } 291 292 #else 293 294 static inline 295 void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p) 296 { 297 } 298 299 static inline 300 void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p) 301 { 302 } 303 304 static inline 305 void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 306 { 307 } 308 309 static inline 310 void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 311 { 312 } 313 314 static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev) 315 { 316 return false; 317 } 318 319 static inline void pull_dl_task(struct rq *rq) 320 { 321 } 322 323 static inline void queue_push_tasks(struct rq *rq) 324 { 325 } 326 327 static inline void queue_pull_task(struct rq *rq) 328 { 329 } 330 #endif /* CONFIG_SMP */ 331 332 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags); 333 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags); 334 static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p, 335 int flags); 336 337 /* 338 * We are being explicitly informed that a new instance is starting, 339 * and this means that: 340 * - the absolute deadline of the entity has to be placed at 341 * current time + relative deadline; 342 * - the runtime of the entity has to be set to the maximum value. 343 * 344 * The capability of specifying such event is useful whenever a -deadline 345 * entity wants to (try to!) synchronize its behaviour with the scheduler's 346 * one, and to (try to!) reconcile itself with its own scheduling 347 * parameters. 348 */ 349 static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se, 350 struct sched_dl_entity *pi_se) 351 { 352 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 353 struct rq *rq = rq_of_dl_rq(dl_rq); 354 355 WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline)); 356 357 /* 358 * We are racing with the deadline timer. So, do nothing because 359 * the deadline timer handler will take care of properly recharging 360 * the runtime and postponing the deadline 361 */ 362 if (dl_se->dl_throttled) 363 return; 364 365 /* 366 * We use the regular wall clock time to set deadlines in the 367 * future; in fact, we must consider execution overheads (time 368 * spent on hardirq context, etc.). 369 */ 370 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline; 371 dl_se->runtime = pi_se->dl_runtime; 372 } 373 374 /* 375 * Pure Earliest Deadline First (EDF) scheduling does not deal with the 376 * possibility of a entity lasting more than what it declared, and thus 377 * exhausting its runtime. 378 * 379 * Here we are interested in making runtime overrun possible, but we do 380 * not want a entity which is misbehaving to affect the scheduling of all 381 * other entities. 382 * Therefore, a budgeting strategy called Constant Bandwidth Server (CBS) 383 * is used, in order to confine each entity within its own bandwidth. 384 * 385 * This function deals exactly with that, and ensures that when the runtime 386 * of a entity is replenished, its deadline is also postponed. That ensures 387 * the overrunning entity can't interfere with other entity in the system and 388 * can't make them miss their deadlines. Reasons why this kind of overruns 389 * could happen are, typically, a entity voluntarily trying to overcome its 390 * runtime, or it just underestimated it during sched_setattr(). 391 */ 392 static void replenish_dl_entity(struct sched_dl_entity *dl_se, 393 struct sched_dl_entity *pi_se) 394 { 395 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 396 struct rq *rq = rq_of_dl_rq(dl_rq); 397 398 BUG_ON(pi_se->dl_runtime <= 0); 399 400 /* 401 * This could be the case for a !-dl task that is boosted. 402 * Just go with full inherited parameters. 403 */ 404 if (dl_se->dl_deadline == 0) { 405 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline; 406 dl_se->runtime = pi_se->dl_runtime; 407 } 408 409 if (dl_se->dl_yielded && dl_se->runtime > 0) 410 dl_se->runtime = 0; 411 412 /* 413 * We keep moving the deadline away until we get some 414 * available runtime for the entity. This ensures correct 415 * handling of situations where the runtime overrun is 416 * arbitrary large. 417 */ 418 while (dl_se->runtime <= 0) { 419 dl_se->deadline += pi_se->dl_period; 420 dl_se->runtime += pi_se->dl_runtime; 421 } 422 423 /* 424 * At this point, the deadline really should be "in 425 * the future" with respect to rq->clock. If it's 426 * not, we are, for some reason, lagging too much! 427 * Anyway, after having warn userspace abut that, 428 * we still try to keep the things running by 429 * resetting the deadline and the budget of the 430 * entity. 431 */ 432 if (dl_time_before(dl_se->deadline, rq_clock(rq))) { 433 printk_deferred_once("sched: DL replenish lagged too much\n"); 434 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline; 435 dl_se->runtime = pi_se->dl_runtime; 436 } 437 438 if (dl_se->dl_yielded) 439 dl_se->dl_yielded = 0; 440 if (dl_se->dl_throttled) 441 dl_se->dl_throttled = 0; 442 } 443 444 /* 445 * Here we check if --at time t-- an entity (which is probably being 446 * [re]activated or, in general, enqueued) can use its remaining runtime 447 * and its current deadline _without_ exceeding the bandwidth it is 448 * assigned (function returns true if it can't). We are in fact applying 449 * one of the CBS rules: when a task wakes up, if the residual runtime 450 * over residual deadline fits within the allocated bandwidth, then we 451 * can keep the current (absolute) deadline and residual budget without 452 * disrupting the schedulability of the system. Otherwise, we should 453 * refill the runtime and set the deadline a period in the future, 454 * because keeping the current (absolute) deadline of the task would 455 * result in breaking guarantees promised to other tasks (refer to 456 * Documentation/scheduler/sched-deadline.txt for more informations). 457 * 458 * This function returns true if: 459 * 460 * runtime / (deadline - t) > dl_runtime / dl_period , 461 * 462 * IOW we can't recycle current parameters. 463 * 464 * Notice that the bandwidth check is done against the period. For 465 * task with deadline equal to period this is the same of using 466 * dl_deadline instead of dl_period in the equation above. 467 */ 468 static bool dl_entity_overflow(struct sched_dl_entity *dl_se, 469 struct sched_dl_entity *pi_se, u64 t) 470 { 471 u64 left, right; 472 473 /* 474 * left and right are the two sides of the equation above, 475 * after a bit of shuffling to use multiplications instead 476 * of divisions. 477 * 478 * Note that none of the time values involved in the two 479 * multiplications are absolute: dl_deadline and dl_runtime 480 * are the relative deadline and the maximum runtime of each 481 * instance, runtime is the runtime left for the last instance 482 * and (deadline - t), since t is rq->clock, is the time left 483 * to the (absolute) deadline. Even if overflowing the u64 type 484 * is very unlikely to occur in both cases, here we scale down 485 * as we want to avoid that risk at all. Scaling down by 10 486 * means that we reduce granularity to 1us. We are fine with it, 487 * since this is only a true/false check and, anyway, thinking 488 * of anything below microseconds resolution is actually fiction 489 * (but still we want to give the user that illusion >;). 490 */ 491 left = (pi_se->dl_period >> DL_SCALE) * (dl_se->runtime >> DL_SCALE); 492 right = ((dl_se->deadline - t) >> DL_SCALE) * 493 (pi_se->dl_runtime >> DL_SCALE); 494 495 return dl_time_before(right, left); 496 } 497 498 /* 499 * When a -deadline entity is queued back on the runqueue, its runtime and 500 * deadline might need updating. 501 * 502 * The policy here is that we update the deadline of the entity only if: 503 * - the current deadline is in the past, 504 * - using the remaining runtime with the current deadline would make 505 * the entity exceed its bandwidth. 506 */ 507 static void update_dl_entity(struct sched_dl_entity *dl_se, 508 struct sched_dl_entity *pi_se) 509 { 510 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 511 struct rq *rq = rq_of_dl_rq(dl_rq); 512 513 if (dl_time_before(dl_se->deadline, rq_clock(rq)) || 514 dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) { 515 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline; 516 dl_se->runtime = pi_se->dl_runtime; 517 } 518 } 519 520 /* 521 * If the entity depleted all its runtime, and if we want it to sleep 522 * while waiting for some new execution time to become available, we 523 * set the bandwidth enforcement timer to the replenishment instant 524 * and try to activate it. 525 * 526 * Notice that it is important for the caller to know if the timer 527 * actually started or not (i.e., the replenishment instant is in 528 * the future or in the past). 529 */ 530 static int start_dl_timer(struct task_struct *p) 531 { 532 struct sched_dl_entity *dl_se = &p->dl; 533 struct hrtimer *timer = &dl_se->dl_timer; 534 struct rq *rq = task_rq(p); 535 ktime_t now, act; 536 s64 delta; 537 538 lockdep_assert_held(&rq->lock); 539 540 /* 541 * We want the timer to fire at the deadline, but considering 542 * that it is actually coming from rq->clock and not from 543 * hrtimer's time base reading. 544 */ 545 act = ns_to_ktime(dl_se->deadline); 546 now = hrtimer_cb_get_time(timer); 547 delta = ktime_to_ns(now) - rq_clock(rq); 548 act = ktime_add_ns(act, delta); 549 550 /* 551 * If the expiry time already passed, e.g., because the value 552 * chosen as the deadline is too small, don't even try to 553 * start the timer in the past! 554 */ 555 if (ktime_us_delta(act, now) < 0) 556 return 0; 557 558 /* 559 * !enqueued will guarantee another callback; even if one is already in 560 * progress. This ensures a balanced {get,put}_task_struct(). 561 * 562 * The race against __run_timer() clearing the enqueued state is 563 * harmless because we're holding task_rq()->lock, therefore the timer 564 * expiring after we've done the check will wait on its task_rq_lock() 565 * and observe our state. 566 */ 567 if (!hrtimer_is_queued(timer)) { 568 get_task_struct(p); 569 hrtimer_start(timer, act, HRTIMER_MODE_ABS); 570 } 571 572 return 1; 573 } 574 575 /* 576 * This is the bandwidth enforcement timer callback. If here, we know 577 * a task is not on its dl_rq, since the fact that the timer was running 578 * means the task is throttled and needs a runtime replenishment. 579 * 580 * However, what we actually do depends on the fact the task is active, 581 * (it is on its rq) or has been removed from there by a call to 582 * dequeue_task_dl(). In the former case we must issue the runtime 583 * replenishment and add the task back to the dl_rq; in the latter, we just 584 * do nothing but clearing dl_throttled, so that runtime and deadline 585 * updating (and the queueing back to dl_rq) will be done by the 586 * next call to enqueue_task_dl(). 587 */ 588 static enum hrtimer_restart dl_task_timer(struct hrtimer *timer) 589 { 590 struct sched_dl_entity *dl_se = container_of(timer, 591 struct sched_dl_entity, 592 dl_timer); 593 struct task_struct *p = dl_task_of(dl_se); 594 struct rq_flags rf; 595 struct rq *rq; 596 597 rq = task_rq_lock(p, &rf); 598 599 /* 600 * The task might have changed its scheduling policy to something 601 * different than SCHED_DEADLINE (through switched_fromd_dl()). 602 */ 603 if (!dl_task(p)) { 604 __dl_clear_params(p); 605 goto unlock; 606 } 607 608 /* 609 * The task might have been boosted by someone else and might be in the 610 * boosting/deboosting path, its not throttled. 611 */ 612 if (dl_se->dl_boosted) 613 goto unlock; 614 615 /* 616 * Spurious timer due to start_dl_timer() race; or we already received 617 * a replenishment from rt_mutex_setprio(). 618 */ 619 if (!dl_se->dl_throttled) 620 goto unlock; 621 622 sched_clock_tick(); 623 update_rq_clock(rq); 624 625 /* 626 * If the throttle happened during sched-out; like: 627 * 628 * schedule() 629 * deactivate_task() 630 * dequeue_task_dl() 631 * update_curr_dl() 632 * start_dl_timer() 633 * __dequeue_task_dl() 634 * prev->on_rq = 0; 635 * 636 * We can be both throttled and !queued. Replenish the counter 637 * but do not enqueue -- wait for our wakeup to do that. 638 */ 639 if (!task_on_rq_queued(p)) { 640 replenish_dl_entity(dl_se, dl_se); 641 goto unlock; 642 } 643 644 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH); 645 if (dl_task(rq->curr)) 646 check_preempt_curr_dl(rq, p, 0); 647 else 648 resched_curr(rq); 649 650 #ifdef CONFIG_SMP 651 /* 652 * Perform balancing operations here; after the replenishments. We 653 * cannot drop rq->lock before this, otherwise the assertion in 654 * start_dl_timer() about not missing updates is not true. 655 * 656 * If we find that the rq the task was on is no longer available, we 657 * need to select a new rq. 658 * 659 * XXX figure out if select_task_rq_dl() deals with offline cpus. 660 */ 661 if (unlikely(!rq->online)) 662 rq = dl_task_offline_migration(rq, p); 663 664 /* 665 * Queueing this task back might have overloaded rq, check if we need 666 * to kick someone away. 667 */ 668 if (has_pushable_dl_tasks(rq)) { 669 /* 670 * Nothing relies on rq->lock after this, so its safe to drop 671 * rq->lock. 672 */ 673 lockdep_unpin_lock(&rq->lock, rf.cookie); 674 push_dl_task(rq); 675 lockdep_repin_lock(&rq->lock, rf.cookie); 676 } 677 #endif 678 679 unlock: 680 task_rq_unlock(rq, p, &rf); 681 682 /* 683 * This can free the task_struct, including this hrtimer, do not touch 684 * anything related to that after this. 685 */ 686 put_task_struct(p); 687 688 return HRTIMER_NORESTART; 689 } 690 691 void init_dl_task_timer(struct sched_dl_entity *dl_se) 692 { 693 struct hrtimer *timer = &dl_se->dl_timer; 694 695 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 696 timer->function = dl_task_timer; 697 } 698 699 static 700 int dl_runtime_exceeded(struct sched_dl_entity *dl_se) 701 { 702 return (dl_se->runtime <= 0); 703 } 704 705 extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq); 706 707 /* 708 * Update the current task's runtime statistics (provided it is still 709 * a -deadline task and has not been removed from the dl_rq). 710 */ 711 static void update_curr_dl(struct rq *rq) 712 { 713 struct task_struct *curr = rq->curr; 714 struct sched_dl_entity *dl_se = &curr->dl; 715 u64 delta_exec; 716 717 if (!dl_task(curr) || !on_dl_rq(dl_se)) 718 return; 719 720 /* 721 * Consumed budget is computed considering the time as 722 * observed by schedulable tasks (excluding time spent 723 * in hardirq context, etc.). Deadlines are instead 724 * computed using hard walltime. This seems to be the more 725 * natural solution, but the full ramifications of this 726 * approach need further study. 727 */ 728 delta_exec = rq_clock_task(rq) - curr->se.exec_start; 729 if (unlikely((s64)delta_exec <= 0)) { 730 if (unlikely(dl_se->dl_yielded)) 731 goto throttle; 732 return; 733 } 734 735 /* kick cpufreq (see the comment in linux/cpufreq.h). */ 736 if (cpu_of(rq) == smp_processor_id()) 737 cpufreq_trigger_update(rq_clock(rq)); 738 739 schedstat_set(curr->se.statistics.exec_max, 740 max(curr->se.statistics.exec_max, delta_exec)); 741 742 curr->se.sum_exec_runtime += delta_exec; 743 account_group_exec_runtime(curr, delta_exec); 744 745 curr->se.exec_start = rq_clock_task(rq); 746 cpuacct_charge(curr, delta_exec); 747 748 sched_rt_avg_update(rq, delta_exec); 749 750 dl_se->runtime -= delta_exec; 751 752 throttle: 753 if (dl_runtime_exceeded(dl_se) || dl_se->dl_yielded) { 754 dl_se->dl_throttled = 1; 755 __dequeue_task_dl(rq, curr, 0); 756 if (unlikely(dl_se->dl_boosted || !start_dl_timer(curr))) 757 enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH); 758 759 if (!is_leftmost(curr, &rq->dl)) 760 resched_curr(rq); 761 } 762 763 /* 764 * Because -- for now -- we share the rt bandwidth, we need to 765 * account our runtime there too, otherwise actual rt tasks 766 * would be able to exceed the shared quota. 767 * 768 * Account to the root rt group for now. 769 * 770 * The solution we're working towards is having the RT groups scheduled 771 * using deadline servers -- however there's a few nasties to figure 772 * out before that can happen. 773 */ 774 if (rt_bandwidth_enabled()) { 775 struct rt_rq *rt_rq = &rq->rt; 776 777 raw_spin_lock(&rt_rq->rt_runtime_lock); 778 /* 779 * We'll let actual RT tasks worry about the overflow here, we 780 * have our own CBS to keep us inline; only account when RT 781 * bandwidth is relevant. 782 */ 783 if (sched_rt_bandwidth_account(rt_rq)) 784 rt_rq->rt_time += delta_exec; 785 raw_spin_unlock(&rt_rq->rt_runtime_lock); 786 } 787 } 788 789 #ifdef CONFIG_SMP 790 791 static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) 792 { 793 struct rq *rq = rq_of_dl_rq(dl_rq); 794 795 if (dl_rq->earliest_dl.curr == 0 || 796 dl_time_before(deadline, dl_rq->earliest_dl.curr)) { 797 dl_rq->earliest_dl.curr = deadline; 798 cpudl_set(&rq->rd->cpudl, rq->cpu, deadline, 1); 799 } 800 } 801 802 static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) 803 { 804 struct rq *rq = rq_of_dl_rq(dl_rq); 805 806 /* 807 * Since we may have removed our earliest (and/or next earliest) 808 * task we must recompute them. 809 */ 810 if (!dl_rq->dl_nr_running) { 811 dl_rq->earliest_dl.curr = 0; 812 dl_rq->earliest_dl.next = 0; 813 cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0); 814 } else { 815 struct rb_node *leftmost = dl_rq->rb_leftmost; 816 struct sched_dl_entity *entry; 817 818 entry = rb_entry(leftmost, struct sched_dl_entity, rb_node); 819 dl_rq->earliest_dl.curr = entry->deadline; 820 cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline, 1); 821 } 822 } 823 824 #else 825 826 static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {} 827 static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {} 828 829 #endif /* CONFIG_SMP */ 830 831 static inline 832 void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 833 { 834 int prio = dl_task_of(dl_se)->prio; 835 u64 deadline = dl_se->deadline; 836 837 WARN_ON(!dl_prio(prio)); 838 dl_rq->dl_nr_running++; 839 add_nr_running(rq_of_dl_rq(dl_rq), 1); 840 841 inc_dl_deadline(dl_rq, deadline); 842 inc_dl_migration(dl_se, dl_rq); 843 } 844 845 static inline 846 void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 847 { 848 int prio = dl_task_of(dl_se)->prio; 849 850 WARN_ON(!dl_prio(prio)); 851 WARN_ON(!dl_rq->dl_nr_running); 852 dl_rq->dl_nr_running--; 853 sub_nr_running(rq_of_dl_rq(dl_rq), 1); 854 855 dec_dl_deadline(dl_rq, dl_se->deadline); 856 dec_dl_migration(dl_se, dl_rq); 857 } 858 859 static void __enqueue_dl_entity(struct sched_dl_entity *dl_se) 860 { 861 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 862 struct rb_node **link = &dl_rq->rb_root.rb_node; 863 struct rb_node *parent = NULL; 864 struct sched_dl_entity *entry; 865 int leftmost = 1; 866 867 BUG_ON(!RB_EMPTY_NODE(&dl_se->rb_node)); 868 869 while (*link) { 870 parent = *link; 871 entry = rb_entry(parent, struct sched_dl_entity, rb_node); 872 if (dl_time_before(dl_se->deadline, entry->deadline)) 873 link = &parent->rb_left; 874 else { 875 link = &parent->rb_right; 876 leftmost = 0; 877 } 878 } 879 880 if (leftmost) 881 dl_rq->rb_leftmost = &dl_se->rb_node; 882 883 rb_link_node(&dl_se->rb_node, parent, link); 884 rb_insert_color(&dl_se->rb_node, &dl_rq->rb_root); 885 886 inc_dl_tasks(dl_se, dl_rq); 887 } 888 889 static void __dequeue_dl_entity(struct sched_dl_entity *dl_se) 890 { 891 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 892 893 if (RB_EMPTY_NODE(&dl_se->rb_node)) 894 return; 895 896 if (dl_rq->rb_leftmost == &dl_se->rb_node) { 897 struct rb_node *next_node; 898 899 next_node = rb_next(&dl_se->rb_node); 900 dl_rq->rb_leftmost = next_node; 901 } 902 903 rb_erase(&dl_se->rb_node, &dl_rq->rb_root); 904 RB_CLEAR_NODE(&dl_se->rb_node); 905 906 dec_dl_tasks(dl_se, dl_rq); 907 } 908 909 static void 910 enqueue_dl_entity(struct sched_dl_entity *dl_se, 911 struct sched_dl_entity *pi_se, int flags) 912 { 913 BUG_ON(on_dl_rq(dl_se)); 914 915 /* 916 * If this is a wakeup or a new instance, the scheduling 917 * parameters of the task might need updating. Otherwise, 918 * we want a replenishment of its runtime. 919 */ 920 if (flags & ENQUEUE_WAKEUP) 921 update_dl_entity(dl_se, pi_se); 922 else if (flags & ENQUEUE_REPLENISH) 923 replenish_dl_entity(dl_se, pi_se); 924 925 __enqueue_dl_entity(dl_se); 926 } 927 928 static void dequeue_dl_entity(struct sched_dl_entity *dl_se) 929 { 930 __dequeue_dl_entity(dl_se); 931 } 932 933 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags) 934 { 935 struct task_struct *pi_task = rt_mutex_get_top_task(p); 936 struct sched_dl_entity *pi_se = &p->dl; 937 938 /* 939 * Use the scheduling parameters of the top pi-waiter 940 * task if we have one and its (absolute) deadline is 941 * smaller than our one... OTW we keep our runtime and 942 * deadline. 943 */ 944 if (pi_task && p->dl.dl_boosted && dl_prio(pi_task->normal_prio)) { 945 pi_se = &pi_task->dl; 946 } else if (!dl_prio(p->normal_prio)) { 947 /* 948 * Special case in which we have a !SCHED_DEADLINE task 949 * that is going to be deboosted, but exceedes its 950 * runtime while doing so. No point in replenishing 951 * it, as it's going to return back to its original 952 * scheduling class after this. 953 */ 954 BUG_ON(!p->dl.dl_boosted || flags != ENQUEUE_REPLENISH); 955 return; 956 } 957 958 /* 959 * If p is throttled, we do nothing. In fact, if it exhausted 960 * its budget it needs a replenishment and, since it now is on 961 * its rq, the bandwidth timer callback (which clearly has not 962 * run yet) will take care of this. 963 */ 964 if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH)) 965 return; 966 967 enqueue_dl_entity(&p->dl, pi_se, flags); 968 969 if (!task_current(rq, p) && tsk_nr_cpus_allowed(p) > 1) 970 enqueue_pushable_dl_task(rq, p); 971 } 972 973 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags) 974 { 975 dequeue_dl_entity(&p->dl); 976 dequeue_pushable_dl_task(rq, p); 977 } 978 979 static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags) 980 { 981 update_curr_dl(rq); 982 __dequeue_task_dl(rq, p, flags); 983 } 984 985 /* 986 * Yield task semantic for -deadline tasks is: 987 * 988 * get off from the CPU until our next instance, with 989 * a new runtime. This is of little use now, since we 990 * don't have a bandwidth reclaiming mechanism. Anyway, 991 * bandwidth reclaiming is planned for the future, and 992 * yield_task_dl will indicate that some spare budget 993 * is available for other task instances to use it. 994 */ 995 static void yield_task_dl(struct rq *rq) 996 { 997 /* 998 * We make the task go to sleep until its current deadline by 999 * forcing its runtime to zero. This way, update_curr_dl() stops 1000 * it and the bandwidth timer will wake it up and will give it 1001 * new scheduling parameters (thanks to dl_yielded=1). 1002 */ 1003 rq->curr->dl.dl_yielded = 1; 1004 1005 update_rq_clock(rq); 1006 update_curr_dl(rq); 1007 /* 1008 * Tell update_rq_clock() that we've just updated, 1009 * so we don't do microscopic update in schedule() 1010 * and double the fastpath cost. 1011 */ 1012 rq_clock_skip_update(rq, true); 1013 } 1014 1015 #ifdef CONFIG_SMP 1016 1017 static int find_later_rq(struct task_struct *task); 1018 1019 static int 1020 select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags) 1021 { 1022 struct task_struct *curr; 1023 struct rq *rq; 1024 1025 if (sd_flag != SD_BALANCE_WAKE) 1026 goto out; 1027 1028 rq = cpu_rq(cpu); 1029 1030 rcu_read_lock(); 1031 curr = READ_ONCE(rq->curr); /* unlocked access */ 1032 1033 /* 1034 * If we are dealing with a -deadline task, we must 1035 * decide where to wake it up. 1036 * If it has a later deadline and the current task 1037 * on this rq can't move (provided the waking task 1038 * can!) we prefer to send it somewhere else. On the 1039 * other hand, if it has a shorter deadline, we 1040 * try to make it stay here, it might be important. 1041 */ 1042 if (unlikely(dl_task(curr)) && 1043 (tsk_nr_cpus_allowed(curr) < 2 || 1044 !dl_entity_preempt(&p->dl, &curr->dl)) && 1045 (tsk_nr_cpus_allowed(p) > 1)) { 1046 int target = find_later_rq(p); 1047 1048 if (target != -1 && 1049 (dl_time_before(p->dl.deadline, 1050 cpu_rq(target)->dl.earliest_dl.curr) || 1051 (cpu_rq(target)->dl.dl_nr_running == 0))) 1052 cpu = target; 1053 } 1054 rcu_read_unlock(); 1055 1056 out: 1057 return cpu; 1058 } 1059 1060 static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p) 1061 { 1062 /* 1063 * Current can't be migrated, useless to reschedule, 1064 * let's hope p can move out. 1065 */ 1066 if (tsk_nr_cpus_allowed(rq->curr) == 1 || 1067 cpudl_find(&rq->rd->cpudl, rq->curr, NULL) == -1) 1068 return; 1069 1070 /* 1071 * p is migratable, so let's not schedule it and 1072 * see if it is pushed or pulled somewhere else. 1073 */ 1074 if (tsk_nr_cpus_allowed(p) != 1 && 1075 cpudl_find(&rq->rd->cpudl, p, NULL) != -1) 1076 return; 1077 1078 resched_curr(rq); 1079 } 1080 1081 #endif /* CONFIG_SMP */ 1082 1083 /* 1084 * Only called when both the current and waking task are -deadline 1085 * tasks. 1086 */ 1087 static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p, 1088 int flags) 1089 { 1090 if (dl_entity_preempt(&p->dl, &rq->curr->dl)) { 1091 resched_curr(rq); 1092 return; 1093 } 1094 1095 #ifdef CONFIG_SMP 1096 /* 1097 * In the unlikely case current and p have the same deadline 1098 * let us try to decide what's the best thing to do... 1099 */ 1100 if ((p->dl.deadline == rq->curr->dl.deadline) && 1101 !test_tsk_need_resched(rq->curr)) 1102 check_preempt_equal_dl(rq, p); 1103 #endif /* CONFIG_SMP */ 1104 } 1105 1106 #ifdef CONFIG_SCHED_HRTICK 1107 static void start_hrtick_dl(struct rq *rq, struct task_struct *p) 1108 { 1109 hrtick_start(rq, p->dl.runtime); 1110 } 1111 #else /* !CONFIG_SCHED_HRTICK */ 1112 static void start_hrtick_dl(struct rq *rq, struct task_struct *p) 1113 { 1114 } 1115 #endif 1116 1117 static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq, 1118 struct dl_rq *dl_rq) 1119 { 1120 struct rb_node *left = dl_rq->rb_leftmost; 1121 1122 if (!left) 1123 return NULL; 1124 1125 return rb_entry(left, struct sched_dl_entity, rb_node); 1126 } 1127 1128 struct task_struct * 1129 pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie) 1130 { 1131 struct sched_dl_entity *dl_se; 1132 struct task_struct *p; 1133 struct dl_rq *dl_rq; 1134 1135 dl_rq = &rq->dl; 1136 1137 if (need_pull_dl_task(rq, prev)) { 1138 /* 1139 * This is OK, because current is on_cpu, which avoids it being 1140 * picked for load-balance and preemption/IRQs are still 1141 * disabled avoiding further scheduler activity on it and we're 1142 * being very careful to re-start the picking loop. 1143 */ 1144 lockdep_unpin_lock(&rq->lock, cookie); 1145 pull_dl_task(rq); 1146 lockdep_repin_lock(&rq->lock, cookie); 1147 /* 1148 * pull_rt_task() can drop (and re-acquire) rq->lock; this 1149 * means a stop task can slip in, in which case we need to 1150 * re-start task selection. 1151 */ 1152 if (rq->stop && task_on_rq_queued(rq->stop)) 1153 return RETRY_TASK; 1154 } 1155 1156 /* 1157 * When prev is DL, we may throttle it in put_prev_task(). 1158 * So, we update time before we check for dl_nr_running. 1159 */ 1160 if (prev->sched_class == &dl_sched_class) 1161 update_curr_dl(rq); 1162 1163 if (unlikely(!dl_rq->dl_nr_running)) 1164 return NULL; 1165 1166 put_prev_task(rq, prev); 1167 1168 dl_se = pick_next_dl_entity(rq, dl_rq); 1169 BUG_ON(!dl_se); 1170 1171 p = dl_task_of(dl_se); 1172 p->se.exec_start = rq_clock_task(rq); 1173 1174 /* Running task will never be pushed. */ 1175 dequeue_pushable_dl_task(rq, p); 1176 1177 if (hrtick_enabled(rq)) 1178 start_hrtick_dl(rq, p); 1179 1180 queue_push_tasks(rq); 1181 1182 return p; 1183 } 1184 1185 static void put_prev_task_dl(struct rq *rq, struct task_struct *p) 1186 { 1187 update_curr_dl(rq); 1188 1189 if (on_dl_rq(&p->dl) && tsk_nr_cpus_allowed(p) > 1) 1190 enqueue_pushable_dl_task(rq, p); 1191 } 1192 1193 static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued) 1194 { 1195 update_curr_dl(rq); 1196 1197 /* 1198 * Even when we have runtime, update_curr_dl() might have resulted in us 1199 * not being the leftmost task anymore. In that case NEED_RESCHED will 1200 * be set and schedule() will start a new hrtick for the next task. 1201 */ 1202 if (hrtick_enabled(rq) && queued && p->dl.runtime > 0 && 1203 is_leftmost(p, &rq->dl)) 1204 start_hrtick_dl(rq, p); 1205 } 1206 1207 static void task_fork_dl(struct task_struct *p) 1208 { 1209 /* 1210 * SCHED_DEADLINE tasks cannot fork and this is achieved through 1211 * sched_fork() 1212 */ 1213 } 1214 1215 static void task_dead_dl(struct task_struct *p) 1216 { 1217 struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); 1218 1219 /* 1220 * Since we are TASK_DEAD we won't slip out of the domain! 1221 */ 1222 raw_spin_lock_irq(&dl_b->lock); 1223 /* XXX we should retain the bw until 0-lag */ 1224 dl_b->total_bw -= p->dl.dl_bw; 1225 raw_spin_unlock_irq(&dl_b->lock); 1226 } 1227 1228 static void set_curr_task_dl(struct rq *rq) 1229 { 1230 struct task_struct *p = rq->curr; 1231 1232 p->se.exec_start = rq_clock_task(rq); 1233 1234 /* You can't push away the running task */ 1235 dequeue_pushable_dl_task(rq, p); 1236 } 1237 1238 #ifdef CONFIG_SMP 1239 1240 /* Only try algorithms three times */ 1241 #define DL_MAX_TRIES 3 1242 1243 static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu) 1244 { 1245 if (!task_running(rq, p) && 1246 cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) 1247 return 1; 1248 return 0; 1249 } 1250 1251 /* 1252 * Return the earliest pushable rq's task, which is suitable to be executed 1253 * on the CPU, NULL otherwise: 1254 */ 1255 static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu) 1256 { 1257 struct rb_node *next_node = rq->dl.pushable_dl_tasks_leftmost; 1258 struct task_struct *p = NULL; 1259 1260 if (!has_pushable_dl_tasks(rq)) 1261 return NULL; 1262 1263 next_node: 1264 if (next_node) { 1265 p = rb_entry(next_node, struct task_struct, pushable_dl_tasks); 1266 1267 if (pick_dl_task(rq, p, cpu)) 1268 return p; 1269 1270 next_node = rb_next(next_node); 1271 goto next_node; 1272 } 1273 1274 return NULL; 1275 } 1276 1277 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl); 1278 1279 static int find_later_rq(struct task_struct *task) 1280 { 1281 struct sched_domain *sd; 1282 struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl); 1283 int this_cpu = smp_processor_id(); 1284 int best_cpu, cpu = task_cpu(task); 1285 1286 /* Make sure the mask is initialized first */ 1287 if (unlikely(!later_mask)) 1288 return -1; 1289 1290 if (tsk_nr_cpus_allowed(task) == 1) 1291 return -1; 1292 1293 /* 1294 * We have to consider system topology and task affinity 1295 * first, then we can look for a suitable cpu. 1296 */ 1297 best_cpu = cpudl_find(&task_rq(task)->rd->cpudl, 1298 task, later_mask); 1299 if (best_cpu == -1) 1300 return -1; 1301 1302 /* 1303 * If we are here, some target has been found, 1304 * the most suitable of which is cached in best_cpu. 1305 * This is, among the runqueues where the current tasks 1306 * have later deadlines than the task's one, the rq 1307 * with the latest possible one. 1308 * 1309 * Now we check how well this matches with task's 1310 * affinity and system topology. 1311 * 1312 * The last cpu where the task run is our first 1313 * guess, since it is most likely cache-hot there. 1314 */ 1315 if (cpumask_test_cpu(cpu, later_mask)) 1316 return cpu; 1317 /* 1318 * Check if this_cpu is to be skipped (i.e., it is 1319 * not in the mask) or not. 1320 */ 1321 if (!cpumask_test_cpu(this_cpu, later_mask)) 1322 this_cpu = -1; 1323 1324 rcu_read_lock(); 1325 for_each_domain(cpu, sd) { 1326 if (sd->flags & SD_WAKE_AFFINE) { 1327 1328 /* 1329 * If possible, preempting this_cpu is 1330 * cheaper than migrating. 1331 */ 1332 if (this_cpu != -1 && 1333 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) { 1334 rcu_read_unlock(); 1335 return this_cpu; 1336 } 1337 1338 /* 1339 * Last chance: if best_cpu is valid and is 1340 * in the mask, that becomes our choice. 1341 */ 1342 if (best_cpu < nr_cpu_ids && 1343 cpumask_test_cpu(best_cpu, sched_domain_span(sd))) { 1344 rcu_read_unlock(); 1345 return best_cpu; 1346 } 1347 } 1348 } 1349 rcu_read_unlock(); 1350 1351 /* 1352 * At this point, all our guesses failed, we just return 1353 * 'something', and let the caller sort the things out. 1354 */ 1355 if (this_cpu != -1) 1356 return this_cpu; 1357 1358 cpu = cpumask_any(later_mask); 1359 if (cpu < nr_cpu_ids) 1360 return cpu; 1361 1362 return -1; 1363 } 1364 1365 /* Locks the rq it finds */ 1366 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq) 1367 { 1368 struct rq *later_rq = NULL; 1369 int tries; 1370 int cpu; 1371 1372 for (tries = 0; tries < DL_MAX_TRIES; tries++) { 1373 cpu = find_later_rq(task); 1374 1375 if ((cpu == -1) || (cpu == rq->cpu)) 1376 break; 1377 1378 later_rq = cpu_rq(cpu); 1379 1380 if (later_rq->dl.dl_nr_running && 1381 !dl_time_before(task->dl.deadline, 1382 later_rq->dl.earliest_dl.curr)) { 1383 /* 1384 * Target rq has tasks of equal or earlier deadline, 1385 * retrying does not release any lock and is unlikely 1386 * to yield a different result. 1387 */ 1388 later_rq = NULL; 1389 break; 1390 } 1391 1392 /* Retry if something changed. */ 1393 if (double_lock_balance(rq, later_rq)) { 1394 if (unlikely(task_rq(task) != rq || 1395 !cpumask_test_cpu(later_rq->cpu, 1396 tsk_cpus_allowed(task)) || 1397 task_running(rq, task) || 1398 !dl_task(task) || 1399 !task_on_rq_queued(task))) { 1400 double_unlock_balance(rq, later_rq); 1401 later_rq = NULL; 1402 break; 1403 } 1404 } 1405 1406 /* 1407 * If the rq we found has no -deadline task, or 1408 * its earliest one has a later deadline than our 1409 * task, the rq is a good one. 1410 */ 1411 if (!later_rq->dl.dl_nr_running || 1412 dl_time_before(task->dl.deadline, 1413 later_rq->dl.earliest_dl.curr)) 1414 break; 1415 1416 /* Otherwise we try again. */ 1417 double_unlock_balance(rq, later_rq); 1418 later_rq = NULL; 1419 } 1420 1421 return later_rq; 1422 } 1423 1424 static struct task_struct *pick_next_pushable_dl_task(struct rq *rq) 1425 { 1426 struct task_struct *p; 1427 1428 if (!has_pushable_dl_tasks(rq)) 1429 return NULL; 1430 1431 p = rb_entry(rq->dl.pushable_dl_tasks_leftmost, 1432 struct task_struct, pushable_dl_tasks); 1433 1434 BUG_ON(rq->cpu != task_cpu(p)); 1435 BUG_ON(task_current(rq, p)); 1436 BUG_ON(tsk_nr_cpus_allowed(p) <= 1); 1437 1438 BUG_ON(!task_on_rq_queued(p)); 1439 BUG_ON(!dl_task(p)); 1440 1441 return p; 1442 } 1443 1444 /* 1445 * See if the non running -deadline tasks on this rq 1446 * can be sent to some other CPU where they can preempt 1447 * and start executing. 1448 */ 1449 static int push_dl_task(struct rq *rq) 1450 { 1451 struct task_struct *next_task; 1452 struct rq *later_rq; 1453 int ret = 0; 1454 1455 if (!rq->dl.overloaded) 1456 return 0; 1457 1458 next_task = pick_next_pushable_dl_task(rq); 1459 if (!next_task) 1460 return 0; 1461 1462 retry: 1463 if (unlikely(next_task == rq->curr)) { 1464 WARN_ON(1); 1465 return 0; 1466 } 1467 1468 /* 1469 * If next_task preempts rq->curr, and rq->curr 1470 * can move away, it makes sense to just reschedule 1471 * without going further in pushing next_task. 1472 */ 1473 if (dl_task(rq->curr) && 1474 dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) && 1475 tsk_nr_cpus_allowed(rq->curr) > 1) { 1476 resched_curr(rq); 1477 return 0; 1478 } 1479 1480 /* We might release rq lock */ 1481 get_task_struct(next_task); 1482 1483 /* Will lock the rq it'll find */ 1484 later_rq = find_lock_later_rq(next_task, rq); 1485 if (!later_rq) { 1486 struct task_struct *task; 1487 1488 /* 1489 * We must check all this again, since 1490 * find_lock_later_rq releases rq->lock and it is 1491 * then possible that next_task has migrated. 1492 */ 1493 task = pick_next_pushable_dl_task(rq); 1494 if (task_cpu(next_task) == rq->cpu && task == next_task) { 1495 /* 1496 * The task is still there. We don't try 1497 * again, some other cpu will pull it when ready. 1498 */ 1499 goto out; 1500 } 1501 1502 if (!task) 1503 /* No more tasks */ 1504 goto out; 1505 1506 put_task_struct(next_task); 1507 next_task = task; 1508 goto retry; 1509 } 1510 1511 deactivate_task(rq, next_task, 0); 1512 set_task_cpu(next_task, later_rq->cpu); 1513 activate_task(later_rq, next_task, 0); 1514 ret = 1; 1515 1516 resched_curr(later_rq); 1517 1518 double_unlock_balance(rq, later_rq); 1519 1520 out: 1521 put_task_struct(next_task); 1522 1523 return ret; 1524 } 1525 1526 static void push_dl_tasks(struct rq *rq) 1527 { 1528 /* push_dl_task() will return true if it moved a -deadline task */ 1529 while (push_dl_task(rq)) 1530 ; 1531 } 1532 1533 static void pull_dl_task(struct rq *this_rq) 1534 { 1535 int this_cpu = this_rq->cpu, cpu; 1536 struct task_struct *p; 1537 bool resched = false; 1538 struct rq *src_rq; 1539 u64 dmin = LONG_MAX; 1540 1541 if (likely(!dl_overloaded(this_rq))) 1542 return; 1543 1544 /* 1545 * Match the barrier from dl_set_overloaded; this guarantees that if we 1546 * see overloaded we must also see the dlo_mask bit. 1547 */ 1548 smp_rmb(); 1549 1550 for_each_cpu(cpu, this_rq->rd->dlo_mask) { 1551 if (this_cpu == cpu) 1552 continue; 1553 1554 src_rq = cpu_rq(cpu); 1555 1556 /* 1557 * It looks racy, abd it is! However, as in sched_rt.c, 1558 * we are fine with this. 1559 */ 1560 if (this_rq->dl.dl_nr_running && 1561 dl_time_before(this_rq->dl.earliest_dl.curr, 1562 src_rq->dl.earliest_dl.next)) 1563 continue; 1564 1565 /* Might drop this_rq->lock */ 1566 double_lock_balance(this_rq, src_rq); 1567 1568 /* 1569 * If there are no more pullable tasks on the 1570 * rq, we're done with it. 1571 */ 1572 if (src_rq->dl.dl_nr_running <= 1) 1573 goto skip; 1574 1575 p = pick_earliest_pushable_dl_task(src_rq, this_cpu); 1576 1577 /* 1578 * We found a task to be pulled if: 1579 * - it preempts our current (if there's one), 1580 * - it will preempt the last one we pulled (if any). 1581 */ 1582 if (p && dl_time_before(p->dl.deadline, dmin) && 1583 (!this_rq->dl.dl_nr_running || 1584 dl_time_before(p->dl.deadline, 1585 this_rq->dl.earliest_dl.curr))) { 1586 WARN_ON(p == src_rq->curr); 1587 WARN_ON(!task_on_rq_queued(p)); 1588 1589 /* 1590 * Then we pull iff p has actually an earlier 1591 * deadline than the current task of its runqueue. 1592 */ 1593 if (dl_time_before(p->dl.deadline, 1594 src_rq->curr->dl.deadline)) 1595 goto skip; 1596 1597 resched = true; 1598 1599 deactivate_task(src_rq, p, 0); 1600 set_task_cpu(p, this_cpu); 1601 activate_task(this_rq, p, 0); 1602 dmin = p->dl.deadline; 1603 1604 /* Is there any other task even earlier? */ 1605 } 1606 skip: 1607 double_unlock_balance(this_rq, src_rq); 1608 } 1609 1610 if (resched) 1611 resched_curr(this_rq); 1612 } 1613 1614 /* 1615 * Since the task is not running and a reschedule is not going to happen 1616 * anytime soon on its runqueue, we try pushing it away now. 1617 */ 1618 static void task_woken_dl(struct rq *rq, struct task_struct *p) 1619 { 1620 if (!task_running(rq, p) && 1621 !test_tsk_need_resched(rq->curr) && 1622 tsk_nr_cpus_allowed(p) > 1 && 1623 dl_task(rq->curr) && 1624 (tsk_nr_cpus_allowed(rq->curr) < 2 || 1625 !dl_entity_preempt(&p->dl, &rq->curr->dl))) { 1626 push_dl_tasks(rq); 1627 } 1628 } 1629 1630 static void set_cpus_allowed_dl(struct task_struct *p, 1631 const struct cpumask *new_mask) 1632 { 1633 struct root_domain *src_rd; 1634 struct rq *rq; 1635 1636 BUG_ON(!dl_task(p)); 1637 1638 rq = task_rq(p); 1639 src_rd = rq->rd; 1640 /* 1641 * Migrating a SCHED_DEADLINE task between exclusive 1642 * cpusets (different root_domains) entails a bandwidth 1643 * update. We already made space for us in the destination 1644 * domain (see cpuset_can_attach()). 1645 */ 1646 if (!cpumask_intersects(src_rd->span, new_mask)) { 1647 struct dl_bw *src_dl_b; 1648 1649 src_dl_b = dl_bw_of(cpu_of(rq)); 1650 /* 1651 * We now free resources of the root_domain we are migrating 1652 * off. In the worst case, sched_setattr() may temporary fail 1653 * until we complete the update. 1654 */ 1655 raw_spin_lock(&src_dl_b->lock); 1656 __dl_clear(src_dl_b, p->dl.dl_bw); 1657 raw_spin_unlock(&src_dl_b->lock); 1658 } 1659 1660 set_cpus_allowed_common(p, new_mask); 1661 } 1662 1663 /* Assumes rq->lock is held */ 1664 static void rq_online_dl(struct rq *rq) 1665 { 1666 if (rq->dl.overloaded) 1667 dl_set_overload(rq); 1668 1669 cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu); 1670 if (rq->dl.dl_nr_running > 0) 1671 cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr, 1); 1672 } 1673 1674 /* Assumes rq->lock is held */ 1675 static void rq_offline_dl(struct rq *rq) 1676 { 1677 if (rq->dl.overloaded) 1678 dl_clear_overload(rq); 1679 1680 cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0); 1681 cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu); 1682 } 1683 1684 void __init init_sched_dl_class(void) 1685 { 1686 unsigned int i; 1687 1688 for_each_possible_cpu(i) 1689 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i), 1690 GFP_KERNEL, cpu_to_node(i)); 1691 } 1692 1693 #endif /* CONFIG_SMP */ 1694 1695 static void switched_from_dl(struct rq *rq, struct task_struct *p) 1696 { 1697 /* 1698 * Start the deadline timer; if we switch back to dl before this we'll 1699 * continue consuming our current CBS slice. If we stay outside of 1700 * SCHED_DEADLINE until the deadline passes, the timer will reset the 1701 * task. 1702 */ 1703 if (!start_dl_timer(p)) 1704 __dl_clear_params(p); 1705 1706 /* 1707 * Since this might be the only -deadline task on the rq, 1708 * this is the right place to try to pull some other one 1709 * from an overloaded cpu, if any. 1710 */ 1711 if (!task_on_rq_queued(p) || rq->dl.dl_nr_running) 1712 return; 1713 1714 queue_pull_task(rq); 1715 } 1716 1717 /* 1718 * When switching to -deadline, we may overload the rq, then 1719 * we try to push someone off, if possible. 1720 */ 1721 static void switched_to_dl(struct rq *rq, struct task_struct *p) 1722 { 1723 if (dl_time_before(p->dl.deadline, rq_clock(rq))) 1724 setup_new_dl_entity(&p->dl, &p->dl); 1725 1726 if (task_on_rq_queued(p) && rq->curr != p) { 1727 #ifdef CONFIG_SMP 1728 if (tsk_nr_cpus_allowed(p) > 1 && rq->dl.overloaded) 1729 queue_push_tasks(rq); 1730 #else 1731 if (dl_task(rq->curr)) 1732 check_preempt_curr_dl(rq, p, 0); 1733 else 1734 resched_curr(rq); 1735 #endif 1736 } 1737 } 1738 1739 /* 1740 * If the scheduling parameters of a -deadline task changed, 1741 * a push or pull operation might be needed. 1742 */ 1743 static void prio_changed_dl(struct rq *rq, struct task_struct *p, 1744 int oldprio) 1745 { 1746 if (task_on_rq_queued(p) || rq->curr == p) { 1747 #ifdef CONFIG_SMP 1748 /* 1749 * This might be too much, but unfortunately 1750 * we don't have the old deadline value, and 1751 * we can't argue if the task is increasing 1752 * or lowering its prio, so... 1753 */ 1754 if (!rq->dl.overloaded) 1755 queue_pull_task(rq); 1756 1757 /* 1758 * If we now have a earlier deadline task than p, 1759 * then reschedule, provided p is still on this 1760 * runqueue. 1761 */ 1762 if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline)) 1763 resched_curr(rq); 1764 #else 1765 /* 1766 * Again, we don't know if p has a earlier 1767 * or later deadline, so let's blindly set a 1768 * (maybe not needed) rescheduling point. 1769 */ 1770 resched_curr(rq); 1771 #endif /* CONFIG_SMP */ 1772 } 1773 } 1774 1775 const struct sched_class dl_sched_class = { 1776 .next = &rt_sched_class, 1777 .enqueue_task = enqueue_task_dl, 1778 .dequeue_task = dequeue_task_dl, 1779 .yield_task = yield_task_dl, 1780 1781 .check_preempt_curr = check_preempt_curr_dl, 1782 1783 .pick_next_task = pick_next_task_dl, 1784 .put_prev_task = put_prev_task_dl, 1785 1786 #ifdef CONFIG_SMP 1787 .select_task_rq = select_task_rq_dl, 1788 .set_cpus_allowed = set_cpus_allowed_dl, 1789 .rq_online = rq_online_dl, 1790 .rq_offline = rq_offline_dl, 1791 .task_woken = task_woken_dl, 1792 #endif 1793 1794 .set_curr_task = set_curr_task_dl, 1795 .task_tick = task_tick_dl, 1796 .task_fork = task_fork_dl, 1797 .task_dead = task_dead_dl, 1798 1799 .prio_changed = prio_changed_dl, 1800 .switched_from = switched_from_dl, 1801 .switched_to = switched_to_dl, 1802 1803 .update_curr = update_curr_dl, 1804 }; 1805 1806 #ifdef CONFIG_SCHED_DEBUG 1807 extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq); 1808 1809 void print_dl_stats(struct seq_file *m, int cpu) 1810 { 1811 print_dl_rq(m, cpu, &cpu_rq(cpu)->dl); 1812 } 1813 #endif /* CONFIG_SCHED_DEBUG */ 1814