1 /* 2 * Deadline Scheduling Class (SCHED_DEADLINE) 3 * 4 * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS). 5 * 6 * Tasks that periodically executes their instances for less than their 7 * runtime won't miss any of their deadlines. 8 * Tasks that are not periodic or sporadic or that tries to execute more 9 * than their reserved bandwidth will be slowed down (and may potentially 10 * miss some of their deadlines), and won't affect any other task. 11 * 12 * Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>, 13 * Juri Lelli <juri.lelli@gmail.com>, 14 * Michael Trimarchi <michael@amarulasolutions.com>, 15 * Fabio Checconi <fchecconi@gmail.com> 16 */ 17 #include "sched.h" 18 19 #include <linux/slab.h> 20 21 struct dl_bandwidth def_dl_bandwidth; 22 23 static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se) 24 { 25 return container_of(dl_se, struct task_struct, dl); 26 } 27 28 static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq) 29 { 30 return container_of(dl_rq, struct rq, dl); 31 } 32 33 static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se) 34 { 35 struct task_struct *p = dl_task_of(dl_se); 36 struct rq *rq = task_rq(p); 37 38 return &rq->dl; 39 } 40 41 static inline int on_dl_rq(struct sched_dl_entity *dl_se) 42 { 43 return !RB_EMPTY_NODE(&dl_se->rb_node); 44 } 45 46 static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq) 47 { 48 struct sched_dl_entity *dl_se = &p->dl; 49 50 return dl_rq->rb_leftmost == &dl_se->rb_node; 51 } 52 53 void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime) 54 { 55 raw_spin_lock_init(&dl_b->dl_runtime_lock); 56 dl_b->dl_period = period; 57 dl_b->dl_runtime = runtime; 58 } 59 60 void init_dl_bw(struct dl_bw *dl_b) 61 { 62 raw_spin_lock_init(&dl_b->lock); 63 raw_spin_lock(&def_dl_bandwidth.dl_runtime_lock); 64 if (global_rt_runtime() == RUNTIME_INF) 65 dl_b->bw = -1; 66 else 67 dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime()); 68 raw_spin_unlock(&def_dl_bandwidth.dl_runtime_lock); 69 dl_b->total_bw = 0; 70 } 71 72 void init_dl_rq(struct dl_rq *dl_rq, struct rq *rq) 73 { 74 dl_rq->rb_root = RB_ROOT; 75 76 #ifdef CONFIG_SMP 77 /* zero means no -deadline tasks */ 78 dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0; 79 80 dl_rq->dl_nr_migratory = 0; 81 dl_rq->overloaded = 0; 82 dl_rq->pushable_dl_tasks_root = RB_ROOT; 83 #else 84 init_dl_bw(&dl_rq->dl_bw); 85 #endif 86 } 87 88 #ifdef CONFIG_SMP 89 90 static inline int dl_overloaded(struct rq *rq) 91 { 92 return atomic_read(&rq->rd->dlo_count); 93 } 94 95 static inline void dl_set_overload(struct rq *rq) 96 { 97 if (!rq->online) 98 return; 99 100 cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask); 101 /* 102 * Must be visible before the overload count is 103 * set (as in sched_rt.c). 104 * 105 * Matched by the barrier in pull_dl_task(). 106 */ 107 smp_wmb(); 108 atomic_inc(&rq->rd->dlo_count); 109 } 110 111 static inline void dl_clear_overload(struct rq *rq) 112 { 113 if (!rq->online) 114 return; 115 116 atomic_dec(&rq->rd->dlo_count); 117 cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask); 118 } 119 120 static void update_dl_migration(struct dl_rq *dl_rq) 121 { 122 if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) { 123 if (!dl_rq->overloaded) { 124 dl_set_overload(rq_of_dl_rq(dl_rq)); 125 dl_rq->overloaded = 1; 126 } 127 } else if (dl_rq->overloaded) { 128 dl_clear_overload(rq_of_dl_rq(dl_rq)); 129 dl_rq->overloaded = 0; 130 } 131 } 132 133 static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 134 { 135 struct task_struct *p = dl_task_of(dl_se); 136 137 if (p->nr_cpus_allowed > 1) 138 dl_rq->dl_nr_migratory++; 139 140 update_dl_migration(dl_rq); 141 } 142 143 static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 144 { 145 struct task_struct *p = dl_task_of(dl_se); 146 147 if (p->nr_cpus_allowed > 1) 148 dl_rq->dl_nr_migratory--; 149 150 update_dl_migration(dl_rq); 151 } 152 153 /* 154 * The list of pushable -deadline task is not a plist, like in 155 * sched_rt.c, it is an rb-tree with tasks ordered by deadline. 156 */ 157 static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p) 158 { 159 struct dl_rq *dl_rq = &rq->dl; 160 struct rb_node **link = &dl_rq->pushable_dl_tasks_root.rb_node; 161 struct rb_node *parent = NULL; 162 struct task_struct *entry; 163 int leftmost = 1; 164 165 BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks)); 166 167 while (*link) { 168 parent = *link; 169 entry = rb_entry(parent, struct task_struct, 170 pushable_dl_tasks); 171 if (dl_entity_preempt(&p->dl, &entry->dl)) 172 link = &parent->rb_left; 173 else { 174 link = &parent->rb_right; 175 leftmost = 0; 176 } 177 } 178 179 if (leftmost) 180 dl_rq->pushable_dl_tasks_leftmost = &p->pushable_dl_tasks; 181 182 rb_link_node(&p->pushable_dl_tasks, parent, link); 183 rb_insert_color(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root); 184 } 185 186 static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p) 187 { 188 struct dl_rq *dl_rq = &rq->dl; 189 190 if (RB_EMPTY_NODE(&p->pushable_dl_tasks)) 191 return; 192 193 if (dl_rq->pushable_dl_tasks_leftmost == &p->pushable_dl_tasks) { 194 struct rb_node *next_node; 195 196 next_node = rb_next(&p->pushable_dl_tasks); 197 dl_rq->pushable_dl_tasks_leftmost = next_node; 198 } 199 200 rb_erase(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root); 201 RB_CLEAR_NODE(&p->pushable_dl_tasks); 202 } 203 204 static inline int has_pushable_dl_tasks(struct rq *rq) 205 { 206 return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root); 207 } 208 209 static int push_dl_task(struct rq *rq); 210 211 static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev) 212 { 213 return dl_task(prev); 214 } 215 216 static inline void set_post_schedule(struct rq *rq) 217 { 218 rq->post_schedule = has_pushable_dl_tasks(rq); 219 } 220 221 #else 222 223 static inline 224 void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p) 225 { 226 } 227 228 static inline 229 void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p) 230 { 231 } 232 233 static inline 234 void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 235 { 236 } 237 238 static inline 239 void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 240 { 241 } 242 243 static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev) 244 { 245 return false; 246 } 247 248 static inline int pull_dl_task(struct rq *rq) 249 { 250 return 0; 251 } 252 253 static inline void set_post_schedule(struct rq *rq) 254 { 255 } 256 #endif /* CONFIG_SMP */ 257 258 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags); 259 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags); 260 static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p, 261 int flags); 262 263 /* 264 * We are being explicitly informed that a new instance is starting, 265 * and this means that: 266 * - the absolute deadline of the entity has to be placed at 267 * current time + relative deadline; 268 * - the runtime of the entity has to be set to the maximum value. 269 * 270 * The capability of specifying such event is useful whenever a -deadline 271 * entity wants to (try to!) synchronize its behaviour with the scheduler's 272 * one, and to (try to!) reconcile itself with its own scheduling 273 * parameters. 274 */ 275 static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se, 276 struct sched_dl_entity *pi_se) 277 { 278 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 279 struct rq *rq = rq_of_dl_rq(dl_rq); 280 281 WARN_ON(!dl_se->dl_new || dl_se->dl_throttled); 282 283 /* 284 * We use the regular wall clock time to set deadlines in the 285 * future; in fact, we must consider execution overheads (time 286 * spent on hardirq context, etc.). 287 */ 288 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline; 289 dl_se->runtime = pi_se->dl_runtime; 290 dl_se->dl_new = 0; 291 } 292 293 /* 294 * Pure Earliest Deadline First (EDF) scheduling does not deal with the 295 * possibility of a entity lasting more than what it declared, and thus 296 * exhausting its runtime. 297 * 298 * Here we are interested in making runtime overrun possible, but we do 299 * not want a entity which is misbehaving to affect the scheduling of all 300 * other entities. 301 * Therefore, a budgeting strategy called Constant Bandwidth Server (CBS) 302 * is used, in order to confine each entity within its own bandwidth. 303 * 304 * This function deals exactly with that, and ensures that when the runtime 305 * of a entity is replenished, its deadline is also postponed. That ensures 306 * the overrunning entity can't interfere with other entity in the system and 307 * can't make them miss their deadlines. Reasons why this kind of overruns 308 * could happen are, typically, a entity voluntarily trying to overcome its 309 * runtime, or it just underestimated it during sched_setattr(). 310 */ 311 static void replenish_dl_entity(struct sched_dl_entity *dl_se, 312 struct sched_dl_entity *pi_se) 313 { 314 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 315 struct rq *rq = rq_of_dl_rq(dl_rq); 316 317 BUG_ON(pi_se->dl_runtime <= 0); 318 319 /* 320 * This could be the case for a !-dl task that is boosted. 321 * Just go with full inherited parameters. 322 */ 323 if (dl_se->dl_deadline == 0) { 324 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline; 325 dl_se->runtime = pi_se->dl_runtime; 326 } 327 328 /* 329 * We keep moving the deadline away until we get some 330 * available runtime for the entity. This ensures correct 331 * handling of situations where the runtime overrun is 332 * arbitrary large. 333 */ 334 while (dl_se->runtime <= 0) { 335 dl_se->deadline += pi_se->dl_period; 336 dl_se->runtime += pi_se->dl_runtime; 337 } 338 339 /* 340 * At this point, the deadline really should be "in 341 * the future" with respect to rq->clock. If it's 342 * not, we are, for some reason, lagging too much! 343 * Anyway, after having warn userspace abut that, 344 * we still try to keep the things running by 345 * resetting the deadline and the budget of the 346 * entity. 347 */ 348 if (dl_time_before(dl_se->deadline, rq_clock(rq))) { 349 printk_deferred_once("sched: DL replenish lagged to much\n"); 350 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline; 351 dl_se->runtime = pi_se->dl_runtime; 352 } 353 } 354 355 /* 356 * Here we check if --at time t-- an entity (which is probably being 357 * [re]activated or, in general, enqueued) can use its remaining runtime 358 * and its current deadline _without_ exceeding the bandwidth it is 359 * assigned (function returns true if it can't). We are in fact applying 360 * one of the CBS rules: when a task wakes up, if the residual runtime 361 * over residual deadline fits within the allocated bandwidth, then we 362 * can keep the current (absolute) deadline and residual budget without 363 * disrupting the schedulability of the system. Otherwise, we should 364 * refill the runtime and set the deadline a period in the future, 365 * because keeping the current (absolute) deadline of the task would 366 * result in breaking guarantees promised to other tasks (refer to 367 * Documentation/scheduler/sched-deadline.txt for more informations). 368 * 369 * This function returns true if: 370 * 371 * runtime / (deadline - t) > dl_runtime / dl_period , 372 * 373 * IOW we can't recycle current parameters. 374 * 375 * Notice that the bandwidth check is done against the period. For 376 * task with deadline equal to period this is the same of using 377 * dl_deadline instead of dl_period in the equation above. 378 */ 379 static bool dl_entity_overflow(struct sched_dl_entity *dl_se, 380 struct sched_dl_entity *pi_se, u64 t) 381 { 382 u64 left, right; 383 384 /* 385 * left and right are the two sides of the equation above, 386 * after a bit of shuffling to use multiplications instead 387 * of divisions. 388 * 389 * Note that none of the time values involved in the two 390 * multiplications are absolute: dl_deadline and dl_runtime 391 * are the relative deadline and the maximum runtime of each 392 * instance, runtime is the runtime left for the last instance 393 * and (deadline - t), since t is rq->clock, is the time left 394 * to the (absolute) deadline. Even if overflowing the u64 type 395 * is very unlikely to occur in both cases, here we scale down 396 * as we want to avoid that risk at all. Scaling down by 10 397 * means that we reduce granularity to 1us. We are fine with it, 398 * since this is only a true/false check and, anyway, thinking 399 * of anything below microseconds resolution is actually fiction 400 * (but still we want to give the user that illusion >;). 401 */ 402 left = (pi_se->dl_period >> DL_SCALE) * (dl_se->runtime >> DL_SCALE); 403 right = ((dl_se->deadline - t) >> DL_SCALE) * 404 (pi_se->dl_runtime >> DL_SCALE); 405 406 return dl_time_before(right, left); 407 } 408 409 /* 410 * When a -deadline entity is queued back on the runqueue, its runtime and 411 * deadline might need updating. 412 * 413 * The policy here is that we update the deadline of the entity only if: 414 * - the current deadline is in the past, 415 * - using the remaining runtime with the current deadline would make 416 * the entity exceed its bandwidth. 417 */ 418 static void update_dl_entity(struct sched_dl_entity *dl_se, 419 struct sched_dl_entity *pi_se) 420 { 421 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 422 struct rq *rq = rq_of_dl_rq(dl_rq); 423 424 /* 425 * The arrival of a new instance needs special treatment, i.e., 426 * the actual scheduling parameters have to be "renewed". 427 */ 428 if (dl_se->dl_new) { 429 setup_new_dl_entity(dl_se, pi_se); 430 return; 431 } 432 433 if (dl_time_before(dl_se->deadline, rq_clock(rq)) || 434 dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) { 435 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline; 436 dl_se->runtime = pi_se->dl_runtime; 437 } 438 } 439 440 /* 441 * If the entity depleted all its runtime, and if we want it to sleep 442 * while waiting for some new execution time to become available, we 443 * set the bandwidth enforcement timer to the replenishment instant 444 * and try to activate it. 445 * 446 * Notice that it is important for the caller to know if the timer 447 * actually started or not (i.e., the replenishment instant is in 448 * the future or in the past). 449 */ 450 static int start_dl_timer(struct sched_dl_entity *dl_se, bool boosted) 451 { 452 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 453 struct rq *rq = rq_of_dl_rq(dl_rq); 454 ktime_t now, act; 455 ktime_t soft, hard; 456 unsigned long range; 457 s64 delta; 458 459 if (boosted) 460 return 0; 461 /* 462 * We want the timer to fire at the deadline, but considering 463 * that it is actually coming from rq->clock and not from 464 * hrtimer's time base reading. 465 */ 466 act = ns_to_ktime(dl_se->deadline); 467 now = hrtimer_cb_get_time(&dl_se->dl_timer); 468 delta = ktime_to_ns(now) - rq_clock(rq); 469 act = ktime_add_ns(act, delta); 470 471 /* 472 * If the expiry time already passed, e.g., because the value 473 * chosen as the deadline is too small, don't even try to 474 * start the timer in the past! 475 */ 476 if (ktime_us_delta(act, now) < 0) 477 return 0; 478 479 hrtimer_set_expires(&dl_se->dl_timer, act); 480 481 soft = hrtimer_get_softexpires(&dl_se->dl_timer); 482 hard = hrtimer_get_expires(&dl_se->dl_timer); 483 range = ktime_to_ns(ktime_sub(hard, soft)); 484 __hrtimer_start_range_ns(&dl_se->dl_timer, soft, 485 range, HRTIMER_MODE_ABS, 0); 486 487 return hrtimer_active(&dl_se->dl_timer); 488 } 489 490 /* 491 * This is the bandwidth enforcement timer callback. If here, we know 492 * a task is not on its dl_rq, since the fact that the timer was running 493 * means the task is throttled and needs a runtime replenishment. 494 * 495 * However, what we actually do depends on the fact the task is active, 496 * (it is on its rq) or has been removed from there by a call to 497 * dequeue_task_dl(). In the former case we must issue the runtime 498 * replenishment and add the task back to the dl_rq; in the latter, we just 499 * do nothing but clearing dl_throttled, so that runtime and deadline 500 * updating (and the queueing back to dl_rq) will be done by the 501 * next call to enqueue_task_dl(). 502 */ 503 static enum hrtimer_restart dl_task_timer(struct hrtimer *timer) 504 { 505 struct sched_dl_entity *dl_se = container_of(timer, 506 struct sched_dl_entity, 507 dl_timer); 508 struct task_struct *p = dl_task_of(dl_se); 509 struct rq *rq; 510 again: 511 rq = task_rq(p); 512 raw_spin_lock(&rq->lock); 513 514 if (rq != task_rq(p)) { 515 /* Task was moved, retrying. */ 516 raw_spin_unlock(&rq->lock); 517 goto again; 518 } 519 520 /* 521 * We need to take care of several possible races here: 522 * 523 * - the task might have changed its scheduling policy 524 * to something different than SCHED_DEADLINE 525 * - the task might have changed its reservation parameters 526 * (through sched_setattr()) 527 * - the task might have been boosted by someone else and 528 * might be in the boosting/deboosting path 529 * 530 * In all this cases we bail out, as the task is already 531 * in the runqueue or is going to be enqueued back anyway. 532 */ 533 if (!dl_task(p) || dl_se->dl_new || 534 dl_se->dl_boosted || !dl_se->dl_throttled) 535 goto unlock; 536 537 sched_clock_tick(); 538 update_rq_clock(rq); 539 dl_se->dl_throttled = 0; 540 dl_se->dl_yielded = 0; 541 if (task_on_rq_queued(p)) { 542 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH); 543 if (dl_task(rq->curr)) 544 check_preempt_curr_dl(rq, p, 0); 545 else 546 resched_curr(rq); 547 #ifdef CONFIG_SMP 548 /* 549 * Queueing this task back might have overloaded rq, 550 * check if we need to kick someone away. 551 */ 552 if (has_pushable_dl_tasks(rq)) 553 push_dl_task(rq); 554 #endif 555 } 556 unlock: 557 raw_spin_unlock(&rq->lock); 558 559 return HRTIMER_NORESTART; 560 } 561 562 void init_dl_task_timer(struct sched_dl_entity *dl_se) 563 { 564 struct hrtimer *timer = &dl_se->dl_timer; 565 566 if (hrtimer_active(timer)) { 567 hrtimer_try_to_cancel(timer); 568 return; 569 } 570 571 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 572 timer->function = dl_task_timer; 573 } 574 575 static 576 int dl_runtime_exceeded(struct rq *rq, struct sched_dl_entity *dl_se) 577 { 578 int dmiss = dl_time_before(dl_se->deadline, rq_clock(rq)); 579 int rorun = dl_se->runtime <= 0; 580 581 if (!rorun && !dmiss) 582 return 0; 583 584 /* 585 * If we are beyond our current deadline and we are still 586 * executing, then we have already used some of the runtime of 587 * the next instance. Thus, if we do not account that, we are 588 * stealing bandwidth from the system at each deadline miss! 589 */ 590 if (dmiss) { 591 dl_se->runtime = rorun ? dl_se->runtime : 0; 592 dl_se->runtime -= rq_clock(rq) - dl_se->deadline; 593 } 594 595 return 1; 596 } 597 598 extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq); 599 600 /* 601 * Update the current task's runtime statistics (provided it is still 602 * a -deadline task and has not been removed from the dl_rq). 603 */ 604 static void update_curr_dl(struct rq *rq) 605 { 606 struct task_struct *curr = rq->curr; 607 struct sched_dl_entity *dl_se = &curr->dl; 608 u64 delta_exec; 609 610 if (!dl_task(curr) || !on_dl_rq(dl_se)) 611 return; 612 613 /* 614 * Consumed budget is computed considering the time as 615 * observed by schedulable tasks (excluding time spent 616 * in hardirq context, etc.). Deadlines are instead 617 * computed using hard walltime. This seems to be the more 618 * natural solution, but the full ramifications of this 619 * approach need further study. 620 */ 621 delta_exec = rq_clock_task(rq) - curr->se.exec_start; 622 if (unlikely((s64)delta_exec <= 0)) 623 return; 624 625 schedstat_set(curr->se.statistics.exec_max, 626 max(curr->se.statistics.exec_max, delta_exec)); 627 628 curr->se.sum_exec_runtime += delta_exec; 629 account_group_exec_runtime(curr, delta_exec); 630 631 curr->se.exec_start = rq_clock_task(rq); 632 cpuacct_charge(curr, delta_exec); 633 634 sched_rt_avg_update(rq, delta_exec); 635 636 dl_se->runtime -= delta_exec; 637 if (dl_runtime_exceeded(rq, dl_se)) { 638 __dequeue_task_dl(rq, curr, 0); 639 if (likely(start_dl_timer(dl_se, curr->dl.dl_boosted))) 640 dl_se->dl_throttled = 1; 641 else 642 enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH); 643 644 if (!is_leftmost(curr, &rq->dl)) 645 resched_curr(rq); 646 } 647 648 /* 649 * Because -- for now -- we share the rt bandwidth, we need to 650 * account our runtime there too, otherwise actual rt tasks 651 * would be able to exceed the shared quota. 652 * 653 * Account to the root rt group for now. 654 * 655 * The solution we're working towards is having the RT groups scheduled 656 * using deadline servers -- however there's a few nasties to figure 657 * out before that can happen. 658 */ 659 if (rt_bandwidth_enabled()) { 660 struct rt_rq *rt_rq = &rq->rt; 661 662 raw_spin_lock(&rt_rq->rt_runtime_lock); 663 /* 664 * We'll let actual RT tasks worry about the overflow here, we 665 * have our own CBS to keep us inline; only account when RT 666 * bandwidth is relevant. 667 */ 668 if (sched_rt_bandwidth_account(rt_rq)) 669 rt_rq->rt_time += delta_exec; 670 raw_spin_unlock(&rt_rq->rt_runtime_lock); 671 } 672 } 673 674 #ifdef CONFIG_SMP 675 676 static struct task_struct *pick_next_earliest_dl_task(struct rq *rq, int cpu); 677 678 static inline u64 next_deadline(struct rq *rq) 679 { 680 struct task_struct *next = pick_next_earliest_dl_task(rq, rq->cpu); 681 682 if (next && dl_prio(next->prio)) 683 return next->dl.deadline; 684 else 685 return 0; 686 } 687 688 static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) 689 { 690 struct rq *rq = rq_of_dl_rq(dl_rq); 691 692 if (dl_rq->earliest_dl.curr == 0 || 693 dl_time_before(deadline, dl_rq->earliest_dl.curr)) { 694 /* 695 * If the dl_rq had no -deadline tasks, or if the new task 696 * has shorter deadline than the current one on dl_rq, we 697 * know that the previous earliest becomes our next earliest, 698 * as the new task becomes the earliest itself. 699 */ 700 dl_rq->earliest_dl.next = dl_rq->earliest_dl.curr; 701 dl_rq->earliest_dl.curr = deadline; 702 cpudl_set(&rq->rd->cpudl, rq->cpu, deadline, 1); 703 } else if (dl_rq->earliest_dl.next == 0 || 704 dl_time_before(deadline, dl_rq->earliest_dl.next)) { 705 /* 706 * On the other hand, if the new -deadline task has a 707 * a later deadline than the earliest one on dl_rq, but 708 * it is earlier than the next (if any), we must 709 * recompute the next-earliest. 710 */ 711 dl_rq->earliest_dl.next = next_deadline(rq); 712 } 713 } 714 715 static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) 716 { 717 struct rq *rq = rq_of_dl_rq(dl_rq); 718 719 /* 720 * Since we may have removed our earliest (and/or next earliest) 721 * task we must recompute them. 722 */ 723 if (!dl_rq->dl_nr_running) { 724 dl_rq->earliest_dl.curr = 0; 725 dl_rq->earliest_dl.next = 0; 726 cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0); 727 } else { 728 struct rb_node *leftmost = dl_rq->rb_leftmost; 729 struct sched_dl_entity *entry; 730 731 entry = rb_entry(leftmost, struct sched_dl_entity, rb_node); 732 dl_rq->earliest_dl.curr = entry->deadline; 733 dl_rq->earliest_dl.next = next_deadline(rq); 734 cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline, 1); 735 } 736 } 737 738 #else 739 740 static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {} 741 static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {} 742 743 #endif /* CONFIG_SMP */ 744 745 static inline 746 void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 747 { 748 int prio = dl_task_of(dl_se)->prio; 749 u64 deadline = dl_se->deadline; 750 751 WARN_ON(!dl_prio(prio)); 752 dl_rq->dl_nr_running++; 753 add_nr_running(rq_of_dl_rq(dl_rq), 1); 754 755 inc_dl_deadline(dl_rq, deadline); 756 inc_dl_migration(dl_se, dl_rq); 757 } 758 759 static inline 760 void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 761 { 762 int prio = dl_task_of(dl_se)->prio; 763 764 WARN_ON(!dl_prio(prio)); 765 WARN_ON(!dl_rq->dl_nr_running); 766 dl_rq->dl_nr_running--; 767 sub_nr_running(rq_of_dl_rq(dl_rq), 1); 768 769 dec_dl_deadline(dl_rq, dl_se->deadline); 770 dec_dl_migration(dl_se, dl_rq); 771 } 772 773 static void __enqueue_dl_entity(struct sched_dl_entity *dl_se) 774 { 775 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 776 struct rb_node **link = &dl_rq->rb_root.rb_node; 777 struct rb_node *parent = NULL; 778 struct sched_dl_entity *entry; 779 int leftmost = 1; 780 781 BUG_ON(!RB_EMPTY_NODE(&dl_se->rb_node)); 782 783 while (*link) { 784 parent = *link; 785 entry = rb_entry(parent, struct sched_dl_entity, rb_node); 786 if (dl_time_before(dl_se->deadline, entry->deadline)) 787 link = &parent->rb_left; 788 else { 789 link = &parent->rb_right; 790 leftmost = 0; 791 } 792 } 793 794 if (leftmost) 795 dl_rq->rb_leftmost = &dl_se->rb_node; 796 797 rb_link_node(&dl_se->rb_node, parent, link); 798 rb_insert_color(&dl_se->rb_node, &dl_rq->rb_root); 799 800 inc_dl_tasks(dl_se, dl_rq); 801 } 802 803 static void __dequeue_dl_entity(struct sched_dl_entity *dl_se) 804 { 805 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 806 807 if (RB_EMPTY_NODE(&dl_se->rb_node)) 808 return; 809 810 if (dl_rq->rb_leftmost == &dl_se->rb_node) { 811 struct rb_node *next_node; 812 813 next_node = rb_next(&dl_se->rb_node); 814 dl_rq->rb_leftmost = next_node; 815 } 816 817 rb_erase(&dl_se->rb_node, &dl_rq->rb_root); 818 RB_CLEAR_NODE(&dl_se->rb_node); 819 820 dec_dl_tasks(dl_se, dl_rq); 821 } 822 823 static void 824 enqueue_dl_entity(struct sched_dl_entity *dl_se, 825 struct sched_dl_entity *pi_se, int flags) 826 { 827 BUG_ON(on_dl_rq(dl_se)); 828 829 /* 830 * If this is a wakeup or a new instance, the scheduling 831 * parameters of the task might need updating. Otherwise, 832 * we want a replenishment of its runtime. 833 */ 834 if (!dl_se->dl_new && flags & ENQUEUE_REPLENISH) 835 replenish_dl_entity(dl_se, pi_se); 836 else 837 update_dl_entity(dl_se, pi_se); 838 839 __enqueue_dl_entity(dl_se); 840 } 841 842 static void dequeue_dl_entity(struct sched_dl_entity *dl_se) 843 { 844 __dequeue_dl_entity(dl_se); 845 } 846 847 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags) 848 { 849 struct task_struct *pi_task = rt_mutex_get_top_task(p); 850 struct sched_dl_entity *pi_se = &p->dl; 851 852 /* 853 * Use the scheduling parameters of the top pi-waiter 854 * task if we have one and its (relative) deadline is 855 * smaller than our one... OTW we keep our runtime and 856 * deadline. 857 */ 858 if (pi_task && p->dl.dl_boosted && dl_prio(pi_task->normal_prio)) { 859 pi_se = &pi_task->dl; 860 } else if (!dl_prio(p->normal_prio)) { 861 /* 862 * Special case in which we have a !SCHED_DEADLINE task 863 * that is going to be deboosted, but exceedes its 864 * runtime while doing so. No point in replenishing 865 * it, as it's going to return back to its original 866 * scheduling class after this. 867 */ 868 BUG_ON(!p->dl.dl_boosted || flags != ENQUEUE_REPLENISH); 869 return; 870 } 871 872 /* 873 * If p is throttled, we do nothing. In fact, if it exhausted 874 * its budget it needs a replenishment and, since it now is on 875 * its rq, the bandwidth timer callback (which clearly has not 876 * run yet) will take care of this. 877 */ 878 if (p->dl.dl_throttled) 879 return; 880 881 enqueue_dl_entity(&p->dl, pi_se, flags); 882 883 if (!task_current(rq, p) && p->nr_cpus_allowed > 1) 884 enqueue_pushable_dl_task(rq, p); 885 } 886 887 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags) 888 { 889 dequeue_dl_entity(&p->dl); 890 dequeue_pushable_dl_task(rq, p); 891 } 892 893 static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags) 894 { 895 update_curr_dl(rq); 896 __dequeue_task_dl(rq, p, flags); 897 } 898 899 /* 900 * Yield task semantic for -deadline tasks is: 901 * 902 * get off from the CPU until our next instance, with 903 * a new runtime. This is of little use now, since we 904 * don't have a bandwidth reclaiming mechanism. Anyway, 905 * bandwidth reclaiming is planned for the future, and 906 * yield_task_dl will indicate that some spare budget 907 * is available for other task instances to use it. 908 */ 909 static void yield_task_dl(struct rq *rq) 910 { 911 struct task_struct *p = rq->curr; 912 913 /* 914 * We make the task go to sleep until its current deadline by 915 * forcing its runtime to zero. This way, update_curr_dl() stops 916 * it and the bandwidth timer will wake it up and will give it 917 * new scheduling parameters (thanks to dl_yielded=1). 918 */ 919 if (p->dl.runtime > 0) { 920 rq->curr->dl.dl_yielded = 1; 921 p->dl.runtime = 0; 922 } 923 update_curr_dl(rq); 924 } 925 926 #ifdef CONFIG_SMP 927 928 static int find_later_rq(struct task_struct *task); 929 930 static int 931 select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags) 932 { 933 struct task_struct *curr; 934 struct rq *rq; 935 936 if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK) 937 goto out; 938 939 rq = cpu_rq(cpu); 940 941 rcu_read_lock(); 942 curr = ACCESS_ONCE(rq->curr); /* unlocked access */ 943 944 /* 945 * If we are dealing with a -deadline task, we must 946 * decide where to wake it up. 947 * If it has a later deadline and the current task 948 * on this rq can't move (provided the waking task 949 * can!) we prefer to send it somewhere else. On the 950 * other hand, if it has a shorter deadline, we 951 * try to make it stay here, it might be important. 952 */ 953 if (unlikely(dl_task(curr)) && 954 (curr->nr_cpus_allowed < 2 || 955 !dl_entity_preempt(&p->dl, &curr->dl)) && 956 (p->nr_cpus_allowed > 1)) { 957 int target = find_later_rq(p); 958 959 if (target != -1) 960 cpu = target; 961 } 962 rcu_read_unlock(); 963 964 out: 965 return cpu; 966 } 967 968 static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p) 969 { 970 /* 971 * Current can't be migrated, useless to reschedule, 972 * let's hope p can move out. 973 */ 974 if (rq->curr->nr_cpus_allowed == 1 || 975 cpudl_find(&rq->rd->cpudl, rq->curr, NULL) == -1) 976 return; 977 978 /* 979 * p is migratable, so let's not schedule it and 980 * see if it is pushed or pulled somewhere else. 981 */ 982 if (p->nr_cpus_allowed != 1 && 983 cpudl_find(&rq->rd->cpudl, p, NULL) != -1) 984 return; 985 986 resched_curr(rq); 987 } 988 989 static int pull_dl_task(struct rq *this_rq); 990 991 #endif /* CONFIG_SMP */ 992 993 /* 994 * Only called when both the current and waking task are -deadline 995 * tasks. 996 */ 997 static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p, 998 int flags) 999 { 1000 if (dl_entity_preempt(&p->dl, &rq->curr->dl)) { 1001 resched_curr(rq); 1002 return; 1003 } 1004 1005 #ifdef CONFIG_SMP 1006 /* 1007 * In the unlikely case current and p have the same deadline 1008 * let us try to decide what's the best thing to do... 1009 */ 1010 if ((p->dl.deadline == rq->curr->dl.deadline) && 1011 !test_tsk_need_resched(rq->curr)) 1012 check_preempt_equal_dl(rq, p); 1013 #endif /* CONFIG_SMP */ 1014 } 1015 1016 #ifdef CONFIG_SCHED_HRTICK 1017 static void start_hrtick_dl(struct rq *rq, struct task_struct *p) 1018 { 1019 hrtick_start(rq, p->dl.runtime); 1020 } 1021 #endif 1022 1023 static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq, 1024 struct dl_rq *dl_rq) 1025 { 1026 struct rb_node *left = dl_rq->rb_leftmost; 1027 1028 if (!left) 1029 return NULL; 1030 1031 return rb_entry(left, struct sched_dl_entity, rb_node); 1032 } 1033 1034 struct task_struct *pick_next_task_dl(struct rq *rq, struct task_struct *prev) 1035 { 1036 struct sched_dl_entity *dl_se; 1037 struct task_struct *p; 1038 struct dl_rq *dl_rq; 1039 1040 dl_rq = &rq->dl; 1041 1042 if (need_pull_dl_task(rq, prev)) { 1043 pull_dl_task(rq); 1044 /* 1045 * pull_rt_task() can drop (and re-acquire) rq->lock; this 1046 * means a stop task can slip in, in which case we need to 1047 * re-start task selection. 1048 */ 1049 if (rq->stop && task_on_rq_queued(rq->stop)) 1050 return RETRY_TASK; 1051 } 1052 1053 /* 1054 * When prev is DL, we may throttle it in put_prev_task(). 1055 * So, we update time before we check for dl_nr_running. 1056 */ 1057 if (prev->sched_class == &dl_sched_class) 1058 update_curr_dl(rq); 1059 1060 if (unlikely(!dl_rq->dl_nr_running)) 1061 return NULL; 1062 1063 put_prev_task(rq, prev); 1064 1065 dl_se = pick_next_dl_entity(rq, dl_rq); 1066 BUG_ON(!dl_se); 1067 1068 p = dl_task_of(dl_se); 1069 p->se.exec_start = rq_clock_task(rq); 1070 1071 /* Running task will never be pushed. */ 1072 dequeue_pushable_dl_task(rq, p); 1073 1074 #ifdef CONFIG_SCHED_HRTICK 1075 if (hrtick_enabled(rq)) 1076 start_hrtick_dl(rq, p); 1077 #endif 1078 1079 set_post_schedule(rq); 1080 1081 return p; 1082 } 1083 1084 static void put_prev_task_dl(struct rq *rq, struct task_struct *p) 1085 { 1086 update_curr_dl(rq); 1087 1088 if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1) 1089 enqueue_pushable_dl_task(rq, p); 1090 } 1091 1092 static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued) 1093 { 1094 update_curr_dl(rq); 1095 1096 #ifdef CONFIG_SCHED_HRTICK 1097 if (hrtick_enabled(rq) && queued && p->dl.runtime > 0) 1098 start_hrtick_dl(rq, p); 1099 #endif 1100 } 1101 1102 static void task_fork_dl(struct task_struct *p) 1103 { 1104 /* 1105 * SCHED_DEADLINE tasks cannot fork and this is achieved through 1106 * sched_fork() 1107 */ 1108 } 1109 1110 static void task_dead_dl(struct task_struct *p) 1111 { 1112 struct hrtimer *timer = &p->dl.dl_timer; 1113 struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); 1114 1115 /* 1116 * Since we are TASK_DEAD we won't slip out of the domain! 1117 */ 1118 raw_spin_lock_irq(&dl_b->lock); 1119 dl_b->total_bw -= p->dl.dl_bw; 1120 raw_spin_unlock_irq(&dl_b->lock); 1121 1122 hrtimer_cancel(timer); 1123 } 1124 1125 static void set_curr_task_dl(struct rq *rq) 1126 { 1127 struct task_struct *p = rq->curr; 1128 1129 p->se.exec_start = rq_clock_task(rq); 1130 1131 /* You can't push away the running task */ 1132 dequeue_pushable_dl_task(rq, p); 1133 } 1134 1135 #ifdef CONFIG_SMP 1136 1137 /* Only try algorithms three times */ 1138 #define DL_MAX_TRIES 3 1139 1140 static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu) 1141 { 1142 if (!task_running(rq, p) && 1143 cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) 1144 return 1; 1145 return 0; 1146 } 1147 1148 /* Returns the second earliest -deadline task, NULL otherwise */ 1149 static struct task_struct *pick_next_earliest_dl_task(struct rq *rq, int cpu) 1150 { 1151 struct rb_node *next_node = rq->dl.rb_leftmost; 1152 struct sched_dl_entity *dl_se; 1153 struct task_struct *p = NULL; 1154 1155 next_node: 1156 next_node = rb_next(next_node); 1157 if (next_node) { 1158 dl_se = rb_entry(next_node, struct sched_dl_entity, rb_node); 1159 p = dl_task_of(dl_se); 1160 1161 if (pick_dl_task(rq, p, cpu)) 1162 return p; 1163 1164 goto next_node; 1165 } 1166 1167 return NULL; 1168 } 1169 1170 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl); 1171 1172 static int find_later_rq(struct task_struct *task) 1173 { 1174 struct sched_domain *sd; 1175 struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl); 1176 int this_cpu = smp_processor_id(); 1177 int best_cpu, cpu = task_cpu(task); 1178 1179 /* Make sure the mask is initialized first */ 1180 if (unlikely(!later_mask)) 1181 return -1; 1182 1183 if (task->nr_cpus_allowed == 1) 1184 return -1; 1185 1186 /* 1187 * We have to consider system topology and task affinity 1188 * first, then we can look for a suitable cpu. 1189 */ 1190 cpumask_copy(later_mask, task_rq(task)->rd->span); 1191 cpumask_and(later_mask, later_mask, cpu_active_mask); 1192 cpumask_and(later_mask, later_mask, &task->cpus_allowed); 1193 best_cpu = cpudl_find(&task_rq(task)->rd->cpudl, 1194 task, later_mask); 1195 if (best_cpu == -1) 1196 return -1; 1197 1198 /* 1199 * If we are here, some target has been found, 1200 * the most suitable of which is cached in best_cpu. 1201 * This is, among the runqueues where the current tasks 1202 * have later deadlines than the task's one, the rq 1203 * with the latest possible one. 1204 * 1205 * Now we check how well this matches with task's 1206 * affinity and system topology. 1207 * 1208 * The last cpu where the task run is our first 1209 * guess, since it is most likely cache-hot there. 1210 */ 1211 if (cpumask_test_cpu(cpu, later_mask)) 1212 return cpu; 1213 /* 1214 * Check if this_cpu is to be skipped (i.e., it is 1215 * not in the mask) or not. 1216 */ 1217 if (!cpumask_test_cpu(this_cpu, later_mask)) 1218 this_cpu = -1; 1219 1220 rcu_read_lock(); 1221 for_each_domain(cpu, sd) { 1222 if (sd->flags & SD_WAKE_AFFINE) { 1223 1224 /* 1225 * If possible, preempting this_cpu is 1226 * cheaper than migrating. 1227 */ 1228 if (this_cpu != -1 && 1229 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) { 1230 rcu_read_unlock(); 1231 return this_cpu; 1232 } 1233 1234 /* 1235 * Last chance: if best_cpu is valid and is 1236 * in the mask, that becomes our choice. 1237 */ 1238 if (best_cpu < nr_cpu_ids && 1239 cpumask_test_cpu(best_cpu, sched_domain_span(sd))) { 1240 rcu_read_unlock(); 1241 return best_cpu; 1242 } 1243 } 1244 } 1245 rcu_read_unlock(); 1246 1247 /* 1248 * At this point, all our guesses failed, we just return 1249 * 'something', and let the caller sort the things out. 1250 */ 1251 if (this_cpu != -1) 1252 return this_cpu; 1253 1254 cpu = cpumask_any(later_mask); 1255 if (cpu < nr_cpu_ids) 1256 return cpu; 1257 1258 return -1; 1259 } 1260 1261 /* Locks the rq it finds */ 1262 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq) 1263 { 1264 struct rq *later_rq = NULL; 1265 int tries; 1266 int cpu; 1267 1268 for (tries = 0; tries < DL_MAX_TRIES; tries++) { 1269 cpu = find_later_rq(task); 1270 1271 if ((cpu == -1) || (cpu == rq->cpu)) 1272 break; 1273 1274 later_rq = cpu_rq(cpu); 1275 1276 /* Retry if something changed. */ 1277 if (double_lock_balance(rq, later_rq)) { 1278 if (unlikely(task_rq(task) != rq || 1279 !cpumask_test_cpu(later_rq->cpu, 1280 &task->cpus_allowed) || 1281 task_running(rq, task) || 1282 !task_on_rq_queued(task))) { 1283 double_unlock_balance(rq, later_rq); 1284 later_rq = NULL; 1285 break; 1286 } 1287 } 1288 1289 /* 1290 * If the rq we found has no -deadline task, or 1291 * its earliest one has a later deadline than our 1292 * task, the rq is a good one. 1293 */ 1294 if (!later_rq->dl.dl_nr_running || 1295 dl_time_before(task->dl.deadline, 1296 later_rq->dl.earliest_dl.curr)) 1297 break; 1298 1299 /* Otherwise we try again. */ 1300 double_unlock_balance(rq, later_rq); 1301 later_rq = NULL; 1302 } 1303 1304 return later_rq; 1305 } 1306 1307 static struct task_struct *pick_next_pushable_dl_task(struct rq *rq) 1308 { 1309 struct task_struct *p; 1310 1311 if (!has_pushable_dl_tasks(rq)) 1312 return NULL; 1313 1314 p = rb_entry(rq->dl.pushable_dl_tasks_leftmost, 1315 struct task_struct, pushable_dl_tasks); 1316 1317 BUG_ON(rq->cpu != task_cpu(p)); 1318 BUG_ON(task_current(rq, p)); 1319 BUG_ON(p->nr_cpus_allowed <= 1); 1320 1321 BUG_ON(!task_on_rq_queued(p)); 1322 BUG_ON(!dl_task(p)); 1323 1324 return p; 1325 } 1326 1327 /* 1328 * See if the non running -deadline tasks on this rq 1329 * can be sent to some other CPU where they can preempt 1330 * and start executing. 1331 */ 1332 static int push_dl_task(struct rq *rq) 1333 { 1334 struct task_struct *next_task; 1335 struct rq *later_rq; 1336 1337 if (!rq->dl.overloaded) 1338 return 0; 1339 1340 next_task = pick_next_pushable_dl_task(rq); 1341 if (!next_task) 1342 return 0; 1343 1344 retry: 1345 if (unlikely(next_task == rq->curr)) { 1346 WARN_ON(1); 1347 return 0; 1348 } 1349 1350 /* 1351 * If next_task preempts rq->curr, and rq->curr 1352 * can move away, it makes sense to just reschedule 1353 * without going further in pushing next_task. 1354 */ 1355 if (dl_task(rq->curr) && 1356 dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) && 1357 rq->curr->nr_cpus_allowed > 1) { 1358 resched_curr(rq); 1359 return 0; 1360 } 1361 1362 /* We might release rq lock */ 1363 get_task_struct(next_task); 1364 1365 /* Will lock the rq it'll find */ 1366 later_rq = find_lock_later_rq(next_task, rq); 1367 if (!later_rq) { 1368 struct task_struct *task; 1369 1370 /* 1371 * We must check all this again, since 1372 * find_lock_later_rq releases rq->lock and it is 1373 * then possible that next_task has migrated. 1374 */ 1375 task = pick_next_pushable_dl_task(rq); 1376 if (task_cpu(next_task) == rq->cpu && task == next_task) { 1377 /* 1378 * The task is still there. We don't try 1379 * again, some other cpu will pull it when ready. 1380 */ 1381 dequeue_pushable_dl_task(rq, next_task); 1382 goto out; 1383 } 1384 1385 if (!task) 1386 /* No more tasks */ 1387 goto out; 1388 1389 put_task_struct(next_task); 1390 next_task = task; 1391 goto retry; 1392 } 1393 1394 deactivate_task(rq, next_task, 0); 1395 set_task_cpu(next_task, later_rq->cpu); 1396 activate_task(later_rq, next_task, 0); 1397 1398 resched_curr(later_rq); 1399 1400 double_unlock_balance(rq, later_rq); 1401 1402 out: 1403 put_task_struct(next_task); 1404 1405 return 1; 1406 } 1407 1408 static void push_dl_tasks(struct rq *rq) 1409 { 1410 /* Terminates as it moves a -deadline task */ 1411 while (push_dl_task(rq)) 1412 ; 1413 } 1414 1415 static int pull_dl_task(struct rq *this_rq) 1416 { 1417 int this_cpu = this_rq->cpu, ret = 0, cpu; 1418 struct task_struct *p; 1419 struct rq *src_rq; 1420 u64 dmin = LONG_MAX; 1421 1422 if (likely(!dl_overloaded(this_rq))) 1423 return 0; 1424 1425 /* 1426 * Match the barrier from dl_set_overloaded; this guarantees that if we 1427 * see overloaded we must also see the dlo_mask bit. 1428 */ 1429 smp_rmb(); 1430 1431 for_each_cpu(cpu, this_rq->rd->dlo_mask) { 1432 if (this_cpu == cpu) 1433 continue; 1434 1435 src_rq = cpu_rq(cpu); 1436 1437 /* 1438 * It looks racy, abd it is! However, as in sched_rt.c, 1439 * we are fine with this. 1440 */ 1441 if (this_rq->dl.dl_nr_running && 1442 dl_time_before(this_rq->dl.earliest_dl.curr, 1443 src_rq->dl.earliest_dl.next)) 1444 continue; 1445 1446 /* Might drop this_rq->lock */ 1447 double_lock_balance(this_rq, src_rq); 1448 1449 /* 1450 * If there are no more pullable tasks on the 1451 * rq, we're done with it. 1452 */ 1453 if (src_rq->dl.dl_nr_running <= 1) 1454 goto skip; 1455 1456 p = pick_next_earliest_dl_task(src_rq, this_cpu); 1457 1458 /* 1459 * We found a task to be pulled if: 1460 * - it preempts our current (if there's one), 1461 * - it will preempt the last one we pulled (if any). 1462 */ 1463 if (p && dl_time_before(p->dl.deadline, dmin) && 1464 (!this_rq->dl.dl_nr_running || 1465 dl_time_before(p->dl.deadline, 1466 this_rq->dl.earliest_dl.curr))) { 1467 WARN_ON(p == src_rq->curr); 1468 WARN_ON(!task_on_rq_queued(p)); 1469 1470 /* 1471 * Then we pull iff p has actually an earlier 1472 * deadline than the current task of its runqueue. 1473 */ 1474 if (dl_time_before(p->dl.deadline, 1475 src_rq->curr->dl.deadline)) 1476 goto skip; 1477 1478 ret = 1; 1479 1480 deactivate_task(src_rq, p, 0); 1481 set_task_cpu(p, this_cpu); 1482 activate_task(this_rq, p, 0); 1483 dmin = p->dl.deadline; 1484 1485 /* Is there any other task even earlier? */ 1486 } 1487 skip: 1488 double_unlock_balance(this_rq, src_rq); 1489 } 1490 1491 return ret; 1492 } 1493 1494 static void post_schedule_dl(struct rq *rq) 1495 { 1496 push_dl_tasks(rq); 1497 } 1498 1499 /* 1500 * Since the task is not running and a reschedule is not going to happen 1501 * anytime soon on its runqueue, we try pushing it away now. 1502 */ 1503 static void task_woken_dl(struct rq *rq, struct task_struct *p) 1504 { 1505 if (!task_running(rq, p) && 1506 !test_tsk_need_resched(rq->curr) && 1507 has_pushable_dl_tasks(rq) && 1508 p->nr_cpus_allowed > 1 && 1509 dl_task(rq->curr) && 1510 (rq->curr->nr_cpus_allowed < 2 || 1511 dl_entity_preempt(&rq->curr->dl, &p->dl))) { 1512 push_dl_tasks(rq); 1513 } 1514 } 1515 1516 static void set_cpus_allowed_dl(struct task_struct *p, 1517 const struct cpumask *new_mask) 1518 { 1519 struct rq *rq; 1520 int weight; 1521 1522 BUG_ON(!dl_task(p)); 1523 1524 /* 1525 * Update only if the task is actually running (i.e., 1526 * it is on the rq AND it is not throttled). 1527 */ 1528 if (!on_dl_rq(&p->dl)) 1529 return; 1530 1531 weight = cpumask_weight(new_mask); 1532 1533 /* 1534 * Only update if the process changes its state from whether it 1535 * can migrate or not. 1536 */ 1537 if ((p->nr_cpus_allowed > 1) == (weight > 1)) 1538 return; 1539 1540 rq = task_rq(p); 1541 1542 /* 1543 * The process used to be able to migrate OR it can now migrate 1544 */ 1545 if (weight <= 1) { 1546 if (!task_current(rq, p)) 1547 dequeue_pushable_dl_task(rq, p); 1548 BUG_ON(!rq->dl.dl_nr_migratory); 1549 rq->dl.dl_nr_migratory--; 1550 } else { 1551 if (!task_current(rq, p)) 1552 enqueue_pushable_dl_task(rq, p); 1553 rq->dl.dl_nr_migratory++; 1554 } 1555 1556 update_dl_migration(&rq->dl); 1557 } 1558 1559 /* Assumes rq->lock is held */ 1560 static void rq_online_dl(struct rq *rq) 1561 { 1562 if (rq->dl.overloaded) 1563 dl_set_overload(rq); 1564 1565 if (rq->dl.dl_nr_running > 0) 1566 cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr, 1); 1567 } 1568 1569 /* Assumes rq->lock is held */ 1570 static void rq_offline_dl(struct rq *rq) 1571 { 1572 if (rq->dl.overloaded) 1573 dl_clear_overload(rq); 1574 1575 cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0); 1576 } 1577 1578 void init_sched_dl_class(void) 1579 { 1580 unsigned int i; 1581 1582 for_each_possible_cpu(i) 1583 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i), 1584 GFP_KERNEL, cpu_to_node(i)); 1585 } 1586 1587 #endif /* CONFIG_SMP */ 1588 1589 static void switched_from_dl(struct rq *rq, struct task_struct *p) 1590 { 1591 if (hrtimer_active(&p->dl.dl_timer) && !dl_policy(p->policy)) 1592 hrtimer_try_to_cancel(&p->dl.dl_timer); 1593 1594 __dl_clear_params(p); 1595 1596 #ifdef CONFIG_SMP 1597 /* 1598 * Since this might be the only -deadline task on the rq, 1599 * this is the right place to try to pull some other one 1600 * from an overloaded cpu, if any. 1601 */ 1602 if (!rq->dl.dl_nr_running) 1603 pull_dl_task(rq); 1604 #endif 1605 } 1606 1607 /* 1608 * When switching to -deadline, we may overload the rq, then 1609 * we try to push someone off, if possible. 1610 */ 1611 static void switched_to_dl(struct rq *rq, struct task_struct *p) 1612 { 1613 int check_resched = 1; 1614 1615 /* 1616 * If p is throttled, don't consider the possibility 1617 * of preempting rq->curr, the check will be done right 1618 * after its runtime will get replenished. 1619 */ 1620 if (unlikely(p->dl.dl_throttled)) 1621 return; 1622 1623 if (task_on_rq_queued(p) && rq->curr != p) { 1624 #ifdef CONFIG_SMP 1625 if (rq->dl.overloaded && push_dl_task(rq) && rq != task_rq(p)) 1626 /* Only reschedule if pushing failed */ 1627 check_resched = 0; 1628 #endif /* CONFIG_SMP */ 1629 if (check_resched) { 1630 if (dl_task(rq->curr)) 1631 check_preempt_curr_dl(rq, p, 0); 1632 else 1633 resched_curr(rq); 1634 } 1635 } 1636 } 1637 1638 /* 1639 * If the scheduling parameters of a -deadline task changed, 1640 * a push or pull operation might be needed. 1641 */ 1642 static void prio_changed_dl(struct rq *rq, struct task_struct *p, 1643 int oldprio) 1644 { 1645 if (task_on_rq_queued(p) || rq->curr == p) { 1646 #ifdef CONFIG_SMP 1647 /* 1648 * This might be too much, but unfortunately 1649 * we don't have the old deadline value, and 1650 * we can't argue if the task is increasing 1651 * or lowering its prio, so... 1652 */ 1653 if (!rq->dl.overloaded) 1654 pull_dl_task(rq); 1655 1656 /* 1657 * If we now have a earlier deadline task than p, 1658 * then reschedule, provided p is still on this 1659 * runqueue. 1660 */ 1661 if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline) && 1662 rq->curr == p) 1663 resched_curr(rq); 1664 #else 1665 /* 1666 * Again, we don't know if p has a earlier 1667 * or later deadline, so let's blindly set a 1668 * (maybe not needed) rescheduling point. 1669 */ 1670 resched_curr(rq); 1671 #endif /* CONFIG_SMP */ 1672 } else 1673 switched_to_dl(rq, p); 1674 } 1675 1676 const struct sched_class dl_sched_class = { 1677 .next = &rt_sched_class, 1678 .enqueue_task = enqueue_task_dl, 1679 .dequeue_task = dequeue_task_dl, 1680 .yield_task = yield_task_dl, 1681 1682 .check_preempt_curr = check_preempt_curr_dl, 1683 1684 .pick_next_task = pick_next_task_dl, 1685 .put_prev_task = put_prev_task_dl, 1686 1687 #ifdef CONFIG_SMP 1688 .select_task_rq = select_task_rq_dl, 1689 .set_cpus_allowed = set_cpus_allowed_dl, 1690 .rq_online = rq_online_dl, 1691 .rq_offline = rq_offline_dl, 1692 .post_schedule = post_schedule_dl, 1693 .task_woken = task_woken_dl, 1694 #endif 1695 1696 .set_curr_task = set_curr_task_dl, 1697 .task_tick = task_tick_dl, 1698 .task_fork = task_fork_dl, 1699 .task_dead = task_dead_dl, 1700 1701 .prio_changed = prio_changed_dl, 1702 .switched_from = switched_from_dl, 1703 .switched_to = switched_to_dl, 1704 }; 1705