1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Deadline Scheduling Class (SCHED_DEADLINE) 4 * 5 * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS). 6 * 7 * Tasks that periodically executes their instances for less than their 8 * runtime won't miss any of their deadlines. 9 * Tasks that are not periodic or sporadic or that tries to execute more 10 * than their reserved bandwidth will be slowed down (and may potentially 11 * miss some of their deadlines), and won't affect any other task. 12 * 13 * Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>, 14 * Juri Lelli <juri.lelli@gmail.com>, 15 * Michael Trimarchi <michael@amarulasolutions.com>, 16 * Fabio Checconi <fchecconi@gmail.com> 17 */ 18 #include "sched.h" 19 20 #include <linux/slab.h> 21 #include <uapi/linux/sched/types.h> 22 23 struct dl_bandwidth def_dl_bandwidth; 24 25 static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se) 26 { 27 return container_of(dl_se, struct task_struct, dl); 28 } 29 30 static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq) 31 { 32 return container_of(dl_rq, struct rq, dl); 33 } 34 35 static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se) 36 { 37 struct task_struct *p = dl_task_of(dl_se); 38 struct rq *rq = task_rq(p); 39 40 return &rq->dl; 41 } 42 43 static inline int on_dl_rq(struct sched_dl_entity *dl_se) 44 { 45 return !RB_EMPTY_NODE(&dl_se->rb_node); 46 } 47 48 #ifdef CONFIG_SMP 49 static inline struct dl_bw *dl_bw_of(int i) 50 { 51 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(), 52 "sched RCU must be held"); 53 return &cpu_rq(i)->rd->dl_bw; 54 } 55 56 static inline int dl_bw_cpus(int i) 57 { 58 struct root_domain *rd = cpu_rq(i)->rd; 59 int cpus = 0; 60 61 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(), 62 "sched RCU must be held"); 63 for_each_cpu_and(i, rd->span, cpu_active_mask) 64 cpus++; 65 66 return cpus; 67 } 68 #else 69 static inline struct dl_bw *dl_bw_of(int i) 70 { 71 return &cpu_rq(i)->dl.dl_bw; 72 } 73 74 static inline int dl_bw_cpus(int i) 75 { 76 return 1; 77 } 78 #endif 79 80 static inline 81 void __add_running_bw(u64 dl_bw, struct dl_rq *dl_rq) 82 { 83 u64 old = dl_rq->running_bw; 84 85 lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock); 86 dl_rq->running_bw += dl_bw; 87 SCHED_WARN_ON(dl_rq->running_bw < old); /* overflow */ 88 SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw); 89 /* kick cpufreq (see the comment in kernel/sched/sched.h). */ 90 cpufreq_update_util(rq_of_dl_rq(dl_rq), SCHED_CPUFREQ_DL); 91 } 92 93 static inline 94 void __sub_running_bw(u64 dl_bw, struct dl_rq *dl_rq) 95 { 96 u64 old = dl_rq->running_bw; 97 98 lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock); 99 dl_rq->running_bw -= dl_bw; 100 SCHED_WARN_ON(dl_rq->running_bw > old); /* underflow */ 101 if (dl_rq->running_bw > old) 102 dl_rq->running_bw = 0; 103 /* kick cpufreq (see the comment in kernel/sched/sched.h). */ 104 cpufreq_update_util(rq_of_dl_rq(dl_rq), SCHED_CPUFREQ_DL); 105 } 106 107 static inline 108 void __add_rq_bw(u64 dl_bw, struct dl_rq *dl_rq) 109 { 110 u64 old = dl_rq->this_bw; 111 112 lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock); 113 dl_rq->this_bw += dl_bw; 114 SCHED_WARN_ON(dl_rq->this_bw < old); /* overflow */ 115 } 116 117 static inline 118 void __sub_rq_bw(u64 dl_bw, struct dl_rq *dl_rq) 119 { 120 u64 old = dl_rq->this_bw; 121 122 lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock); 123 dl_rq->this_bw -= dl_bw; 124 SCHED_WARN_ON(dl_rq->this_bw > old); /* underflow */ 125 if (dl_rq->this_bw > old) 126 dl_rq->this_bw = 0; 127 SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw); 128 } 129 130 static inline 131 void add_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 132 { 133 if (!dl_entity_is_special(dl_se)) 134 __add_rq_bw(dl_se->dl_bw, dl_rq); 135 } 136 137 static inline 138 void sub_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 139 { 140 if (!dl_entity_is_special(dl_se)) 141 __sub_rq_bw(dl_se->dl_bw, dl_rq); 142 } 143 144 static inline 145 void add_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 146 { 147 if (!dl_entity_is_special(dl_se)) 148 __add_running_bw(dl_se->dl_bw, dl_rq); 149 } 150 151 static inline 152 void sub_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 153 { 154 if (!dl_entity_is_special(dl_se)) 155 __sub_running_bw(dl_se->dl_bw, dl_rq); 156 } 157 158 void dl_change_utilization(struct task_struct *p, u64 new_bw) 159 { 160 struct rq *rq; 161 162 BUG_ON(p->dl.flags & SCHED_FLAG_SUGOV); 163 164 if (task_on_rq_queued(p)) 165 return; 166 167 rq = task_rq(p); 168 if (p->dl.dl_non_contending) { 169 sub_running_bw(&p->dl, &rq->dl); 170 p->dl.dl_non_contending = 0; 171 /* 172 * If the timer handler is currently running and the 173 * timer cannot be cancelled, inactive_task_timer() 174 * will see that dl_not_contending is not set, and 175 * will not touch the rq's active utilization, 176 * so we are still safe. 177 */ 178 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1) 179 put_task_struct(p); 180 } 181 __sub_rq_bw(p->dl.dl_bw, &rq->dl); 182 __add_rq_bw(new_bw, &rq->dl); 183 } 184 185 /* 186 * The utilization of a task cannot be immediately removed from 187 * the rq active utilization (running_bw) when the task blocks. 188 * Instead, we have to wait for the so called "0-lag time". 189 * 190 * If a task blocks before the "0-lag time", a timer (the inactive 191 * timer) is armed, and running_bw is decreased when the timer 192 * fires. 193 * 194 * If the task wakes up again before the inactive timer fires, 195 * the timer is cancelled, whereas if the task wakes up after the 196 * inactive timer fired (and running_bw has been decreased) the 197 * task's utilization has to be added to running_bw again. 198 * A flag in the deadline scheduling entity (dl_non_contending) 199 * is used to avoid race conditions between the inactive timer handler 200 * and task wakeups. 201 * 202 * The following diagram shows how running_bw is updated. A task is 203 * "ACTIVE" when its utilization contributes to running_bw; an 204 * "ACTIVE contending" task is in the TASK_RUNNING state, while an 205 * "ACTIVE non contending" task is a blocked task for which the "0-lag time" 206 * has not passed yet. An "INACTIVE" task is a task for which the "0-lag" 207 * time already passed, which does not contribute to running_bw anymore. 208 * +------------------+ 209 * wakeup | ACTIVE | 210 * +------------------>+ contending | 211 * | add_running_bw | | 212 * | +----+------+------+ 213 * | | ^ 214 * | dequeue | | 215 * +--------+-------+ | | 216 * | | t >= 0-lag | | wakeup 217 * | INACTIVE |<---------------+ | 218 * | | sub_running_bw | | 219 * +--------+-------+ | | 220 * ^ | | 221 * | t < 0-lag | | 222 * | | | 223 * | V | 224 * | +----+------+------+ 225 * | sub_running_bw | ACTIVE | 226 * +-------------------+ | 227 * inactive timer | non contending | 228 * fired +------------------+ 229 * 230 * The task_non_contending() function is invoked when a task 231 * blocks, and checks if the 0-lag time already passed or 232 * not (in the first case, it directly updates running_bw; 233 * in the second case, it arms the inactive timer). 234 * 235 * The task_contending() function is invoked when a task wakes 236 * up, and checks if the task is still in the "ACTIVE non contending" 237 * state or not (in the second case, it updates running_bw). 238 */ 239 static void task_non_contending(struct task_struct *p) 240 { 241 struct sched_dl_entity *dl_se = &p->dl; 242 struct hrtimer *timer = &dl_se->inactive_timer; 243 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 244 struct rq *rq = rq_of_dl_rq(dl_rq); 245 s64 zerolag_time; 246 247 /* 248 * If this is a non-deadline task that has been boosted, 249 * do nothing 250 */ 251 if (dl_se->dl_runtime == 0) 252 return; 253 254 if (dl_entity_is_special(dl_se)) 255 return; 256 257 WARN_ON(hrtimer_active(&dl_se->inactive_timer)); 258 WARN_ON(dl_se->dl_non_contending); 259 260 zerolag_time = dl_se->deadline - 261 div64_long((dl_se->runtime * dl_se->dl_period), 262 dl_se->dl_runtime); 263 264 /* 265 * Using relative times instead of the absolute "0-lag time" 266 * allows to simplify the code 267 */ 268 zerolag_time -= rq_clock(rq); 269 270 /* 271 * If the "0-lag time" already passed, decrease the active 272 * utilization now, instead of starting a timer 273 */ 274 if (zerolag_time < 0) { 275 if (dl_task(p)) 276 sub_running_bw(dl_se, dl_rq); 277 if (!dl_task(p) || p->state == TASK_DEAD) { 278 struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); 279 280 if (p->state == TASK_DEAD) 281 sub_rq_bw(&p->dl, &rq->dl); 282 raw_spin_lock(&dl_b->lock); 283 __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p))); 284 __dl_clear_params(p); 285 raw_spin_unlock(&dl_b->lock); 286 } 287 288 return; 289 } 290 291 dl_se->dl_non_contending = 1; 292 get_task_struct(p); 293 hrtimer_start(timer, ns_to_ktime(zerolag_time), HRTIMER_MODE_REL); 294 } 295 296 static void task_contending(struct sched_dl_entity *dl_se, int flags) 297 { 298 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 299 300 /* 301 * If this is a non-deadline task that has been boosted, 302 * do nothing 303 */ 304 if (dl_se->dl_runtime == 0) 305 return; 306 307 if (flags & ENQUEUE_MIGRATED) 308 add_rq_bw(dl_se, dl_rq); 309 310 if (dl_se->dl_non_contending) { 311 dl_se->dl_non_contending = 0; 312 /* 313 * If the timer handler is currently running and the 314 * timer cannot be cancelled, inactive_task_timer() 315 * will see that dl_not_contending is not set, and 316 * will not touch the rq's active utilization, 317 * so we are still safe. 318 */ 319 if (hrtimer_try_to_cancel(&dl_se->inactive_timer) == 1) 320 put_task_struct(dl_task_of(dl_se)); 321 } else { 322 /* 323 * Since "dl_non_contending" is not set, the 324 * task's utilization has already been removed from 325 * active utilization (either when the task blocked, 326 * when the "inactive timer" fired). 327 * So, add it back. 328 */ 329 add_running_bw(dl_se, dl_rq); 330 } 331 } 332 333 static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq) 334 { 335 struct sched_dl_entity *dl_se = &p->dl; 336 337 return dl_rq->root.rb_leftmost == &dl_se->rb_node; 338 } 339 340 void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime) 341 { 342 raw_spin_lock_init(&dl_b->dl_runtime_lock); 343 dl_b->dl_period = period; 344 dl_b->dl_runtime = runtime; 345 } 346 347 void init_dl_bw(struct dl_bw *dl_b) 348 { 349 raw_spin_lock_init(&dl_b->lock); 350 raw_spin_lock(&def_dl_bandwidth.dl_runtime_lock); 351 if (global_rt_runtime() == RUNTIME_INF) 352 dl_b->bw = -1; 353 else 354 dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime()); 355 raw_spin_unlock(&def_dl_bandwidth.dl_runtime_lock); 356 dl_b->total_bw = 0; 357 } 358 359 void init_dl_rq(struct dl_rq *dl_rq) 360 { 361 dl_rq->root = RB_ROOT_CACHED; 362 363 #ifdef CONFIG_SMP 364 /* zero means no -deadline tasks */ 365 dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0; 366 367 dl_rq->dl_nr_migratory = 0; 368 dl_rq->overloaded = 0; 369 dl_rq->pushable_dl_tasks_root = RB_ROOT_CACHED; 370 #else 371 init_dl_bw(&dl_rq->dl_bw); 372 #endif 373 374 dl_rq->running_bw = 0; 375 dl_rq->this_bw = 0; 376 init_dl_rq_bw_ratio(dl_rq); 377 } 378 379 #ifdef CONFIG_SMP 380 381 static inline int dl_overloaded(struct rq *rq) 382 { 383 return atomic_read(&rq->rd->dlo_count); 384 } 385 386 static inline void dl_set_overload(struct rq *rq) 387 { 388 if (!rq->online) 389 return; 390 391 cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask); 392 /* 393 * Must be visible before the overload count is 394 * set (as in sched_rt.c). 395 * 396 * Matched by the barrier in pull_dl_task(). 397 */ 398 smp_wmb(); 399 atomic_inc(&rq->rd->dlo_count); 400 } 401 402 static inline void dl_clear_overload(struct rq *rq) 403 { 404 if (!rq->online) 405 return; 406 407 atomic_dec(&rq->rd->dlo_count); 408 cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask); 409 } 410 411 static void update_dl_migration(struct dl_rq *dl_rq) 412 { 413 if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) { 414 if (!dl_rq->overloaded) { 415 dl_set_overload(rq_of_dl_rq(dl_rq)); 416 dl_rq->overloaded = 1; 417 } 418 } else if (dl_rq->overloaded) { 419 dl_clear_overload(rq_of_dl_rq(dl_rq)); 420 dl_rq->overloaded = 0; 421 } 422 } 423 424 static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 425 { 426 struct task_struct *p = dl_task_of(dl_se); 427 428 if (p->nr_cpus_allowed > 1) 429 dl_rq->dl_nr_migratory++; 430 431 update_dl_migration(dl_rq); 432 } 433 434 static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 435 { 436 struct task_struct *p = dl_task_of(dl_se); 437 438 if (p->nr_cpus_allowed > 1) 439 dl_rq->dl_nr_migratory--; 440 441 update_dl_migration(dl_rq); 442 } 443 444 /* 445 * The list of pushable -deadline task is not a plist, like in 446 * sched_rt.c, it is an rb-tree with tasks ordered by deadline. 447 */ 448 static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p) 449 { 450 struct dl_rq *dl_rq = &rq->dl; 451 struct rb_node **link = &dl_rq->pushable_dl_tasks_root.rb_root.rb_node; 452 struct rb_node *parent = NULL; 453 struct task_struct *entry; 454 bool leftmost = true; 455 456 BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks)); 457 458 while (*link) { 459 parent = *link; 460 entry = rb_entry(parent, struct task_struct, 461 pushable_dl_tasks); 462 if (dl_entity_preempt(&p->dl, &entry->dl)) 463 link = &parent->rb_left; 464 else { 465 link = &parent->rb_right; 466 leftmost = false; 467 } 468 } 469 470 if (leftmost) 471 dl_rq->earliest_dl.next = p->dl.deadline; 472 473 rb_link_node(&p->pushable_dl_tasks, parent, link); 474 rb_insert_color_cached(&p->pushable_dl_tasks, 475 &dl_rq->pushable_dl_tasks_root, leftmost); 476 } 477 478 static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p) 479 { 480 struct dl_rq *dl_rq = &rq->dl; 481 482 if (RB_EMPTY_NODE(&p->pushable_dl_tasks)) 483 return; 484 485 if (dl_rq->pushable_dl_tasks_root.rb_leftmost == &p->pushable_dl_tasks) { 486 struct rb_node *next_node; 487 488 next_node = rb_next(&p->pushable_dl_tasks); 489 if (next_node) { 490 dl_rq->earliest_dl.next = rb_entry(next_node, 491 struct task_struct, pushable_dl_tasks)->dl.deadline; 492 } 493 } 494 495 rb_erase_cached(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root); 496 RB_CLEAR_NODE(&p->pushable_dl_tasks); 497 } 498 499 static inline int has_pushable_dl_tasks(struct rq *rq) 500 { 501 return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root.rb_root); 502 } 503 504 static int push_dl_task(struct rq *rq); 505 506 static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev) 507 { 508 return dl_task(prev); 509 } 510 511 static DEFINE_PER_CPU(struct callback_head, dl_push_head); 512 static DEFINE_PER_CPU(struct callback_head, dl_pull_head); 513 514 static void push_dl_tasks(struct rq *); 515 static void pull_dl_task(struct rq *); 516 517 static inline void queue_push_tasks(struct rq *rq) 518 { 519 if (!has_pushable_dl_tasks(rq)) 520 return; 521 522 queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks); 523 } 524 525 static inline void queue_pull_task(struct rq *rq) 526 { 527 queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task); 528 } 529 530 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq); 531 532 static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p) 533 { 534 struct rq *later_rq = NULL; 535 536 later_rq = find_lock_later_rq(p, rq); 537 if (!later_rq) { 538 int cpu; 539 540 /* 541 * If we cannot preempt any rq, fall back to pick any 542 * online cpu. 543 */ 544 cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed); 545 if (cpu >= nr_cpu_ids) { 546 /* 547 * Fail to find any suitable cpu. 548 * The task will never come back! 549 */ 550 BUG_ON(dl_bandwidth_enabled()); 551 552 /* 553 * If admission control is disabled we 554 * try a little harder to let the task 555 * run. 556 */ 557 cpu = cpumask_any(cpu_active_mask); 558 } 559 later_rq = cpu_rq(cpu); 560 double_lock_balance(rq, later_rq); 561 } 562 563 set_task_cpu(p, later_rq->cpu); 564 double_unlock_balance(later_rq, rq); 565 566 return later_rq; 567 } 568 569 #else 570 571 static inline 572 void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p) 573 { 574 } 575 576 static inline 577 void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p) 578 { 579 } 580 581 static inline 582 void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 583 { 584 } 585 586 static inline 587 void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 588 { 589 } 590 591 static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev) 592 { 593 return false; 594 } 595 596 static inline void pull_dl_task(struct rq *rq) 597 { 598 } 599 600 static inline void queue_push_tasks(struct rq *rq) 601 { 602 } 603 604 static inline void queue_pull_task(struct rq *rq) 605 { 606 } 607 #endif /* CONFIG_SMP */ 608 609 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags); 610 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags); 611 static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p, 612 int flags); 613 614 /* 615 * We are being explicitly informed that a new instance is starting, 616 * and this means that: 617 * - the absolute deadline of the entity has to be placed at 618 * current time + relative deadline; 619 * - the runtime of the entity has to be set to the maximum value. 620 * 621 * The capability of specifying such event is useful whenever a -deadline 622 * entity wants to (try to!) synchronize its behaviour with the scheduler's 623 * one, and to (try to!) reconcile itself with its own scheduling 624 * parameters. 625 */ 626 static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se) 627 { 628 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 629 struct rq *rq = rq_of_dl_rq(dl_rq); 630 631 WARN_ON(dl_se->dl_boosted); 632 WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline)); 633 634 /* 635 * We are racing with the deadline timer. So, do nothing because 636 * the deadline timer handler will take care of properly recharging 637 * the runtime and postponing the deadline 638 */ 639 if (dl_se->dl_throttled) 640 return; 641 642 /* 643 * We use the regular wall clock time to set deadlines in the 644 * future; in fact, we must consider execution overheads (time 645 * spent on hardirq context, etc.). 646 */ 647 dl_se->deadline = rq_clock(rq) + dl_se->dl_deadline; 648 dl_se->runtime = dl_se->dl_runtime; 649 } 650 651 /* 652 * Pure Earliest Deadline First (EDF) scheduling does not deal with the 653 * possibility of a entity lasting more than what it declared, and thus 654 * exhausting its runtime. 655 * 656 * Here we are interested in making runtime overrun possible, but we do 657 * not want a entity which is misbehaving to affect the scheduling of all 658 * other entities. 659 * Therefore, a budgeting strategy called Constant Bandwidth Server (CBS) 660 * is used, in order to confine each entity within its own bandwidth. 661 * 662 * This function deals exactly with that, and ensures that when the runtime 663 * of a entity is replenished, its deadline is also postponed. That ensures 664 * the overrunning entity can't interfere with other entity in the system and 665 * can't make them miss their deadlines. Reasons why this kind of overruns 666 * could happen are, typically, a entity voluntarily trying to overcome its 667 * runtime, or it just underestimated it during sched_setattr(). 668 */ 669 static void replenish_dl_entity(struct sched_dl_entity *dl_se, 670 struct sched_dl_entity *pi_se) 671 { 672 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 673 struct rq *rq = rq_of_dl_rq(dl_rq); 674 675 BUG_ON(pi_se->dl_runtime <= 0); 676 677 /* 678 * This could be the case for a !-dl task that is boosted. 679 * Just go with full inherited parameters. 680 */ 681 if (dl_se->dl_deadline == 0) { 682 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline; 683 dl_se->runtime = pi_se->dl_runtime; 684 } 685 686 if (dl_se->dl_yielded && dl_se->runtime > 0) 687 dl_se->runtime = 0; 688 689 /* 690 * We keep moving the deadline away until we get some 691 * available runtime for the entity. This ensures correct 692 * handling of situations where the runtime overrun is 693 * arbitrary large. 694 */ 695 while (dl_se->runtime <= 0) { 696 dl_se->deadline += pi_se->dl_period; 697 dl_se->runtime += pi_se->dl_runtime; 698 } 699 700 /* 701 * At this point, the deadline really should be "in 702 * the future" with respect to rq->clock. If it's 703 * not, we are, for some reason, lagging too much! 704 * Anyway, after having warn userspace abut that, 705 * we still try to keep the things running by 706 * resetting the deadline and the budget of the 707 * entity. 708 */ 709 if (dl_time_before(dl_se->deadline, rq_clock(rq))) { 710 printk_deferred_once("sched: DL replenish lagged too much\n"); 711 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline; 712 dl_se->runtime = pi_se->dl_runtime; 713 } 714 715 if (dl_se->dl_yielded) 716 dl_se->dl_yielded = 0; 717 if (dl_se->dl_throttled) 718 dl_se->dl_throttled = 0; 719 } 720 721 /* 722 * Here we check if --at time t-- an entity (which is probably being 723 * [re]activated or, in general, enqueued) can use its remaining runtime 724 * and its current deadline _without_ exceeding the bandwidth it is 725 * assigned (function returns true if it can't). We are in fact applying 726 * one of the CBS rules: when a task wakes up, if the residual runtime 727 * over residual deadline fits within the allocated bandwidth, then we 728 * can keep the current (absolute) deadline and residual budget without 729 * disrupting the schedulability of the system. Otherwise, we should 730 * refill the runtime and set the deadline a period in the future, 731 * because keeping the current (absolute) deadline of the task would 732 * result in breaking guarantees promised to other tasks (refer to 733 * Documentation/scheduler/sched-deadline.txt for more informations). 734 * 735 * This function returns true if: 736 * 737 * runtime / (deadline - t) > dl_runtime / dl_deadline , 738 * 739 * IOW we can't recycle current parameters. 740 * 741 * Notice that the bandwidth check is done against the deadline. For 742 * task with deadline equal to period this is the same of using 743 * dl_period instead of dl_deadline in the equation above. 744 */ 745 static bool dl_entity_overflow(struct sched_dl_entity *dl_se, 746 struct sched_dl_entity *pi_se, u64 t) 747 { 748 u64 left, right; 749 750 /* 751 * left and right are the two sides of the equation above, 752 * after a bit of shuffling to use multiplications instead 753 * of divisions. 754 * 755 * Note that none of the time values involved in the two 756 * multiplications are absolute: dl_deadline and dl_runtime 757 * are the relative deadline and the maximum runtime of each 758 * instance, runtime is the runtime left for the last instance 759 * and (deadline - t), since t is rq->clock, is the time left 760 * to the (absolute) deadline. Even if overflowing the u64 type 761 * is very unlikely to occur in both cases, here we scale down 762 * as we want to avoid that risk at all. Scaling down by 10 763 * means that we reduce granularity to 1us. We are fine with it, 764 * since this is only a true/false check and, anyway, thinking 765 * of anything below microseconds resolution is actually fiction 766 * (but still we want to give the user that illusion >;). 767 */ 768 left = (pi_se->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE); 769 right = ((dl_se->deadline - t) >> DL_SCALE) * 770 (pi_se->dl_runtime >> DL_SCALE); 771 772 return dl_time_before(right, left); 773 } 774 775 /* 776 * Revised wakeup rule [1]: For self-suspending tasks, rather then 777 * re-initializing task's runtime and deadline, the revised wakeup 778 * rule adjusts the task's runtime to avoid the task to overrun its 779 * density. 780 * 781 * Reasoning: a task may overrun the density if: 782 * runtime / (deadline - t) > dl_runtime / dl_deadline 783 * 784 * Therefore, runtime can be adjusted to: 785 * runtime = (dl_runtime / dl_deadline) * (deadline - t) 786 * 787 * In such way that runtime will be equal to the maximum density 788 * the task can use without breaking any rule. 789 * 790 * [1] Luca Abeni, Giuseppe Lipari, and Juri Lelli. 2015. Constant 791 * bandwidth server revisited. SIGBED Rev. 11, 4 (January 2015), 19-24. 792 */ 793 static void 794 update_dl_revised_wakeup(struct sched_dl_entity *dl_se, struct rq *rq) 795 { 796 u64 laxity = dl_se->deadline - rq_clock(rq); 797 798 /* 799 * If the task has deadline < period, and the deadline is in the past, 800 * it should already be throttled before this check. 801 * 802 * See update_dl_entity() comments for further details. 803 */ 804 WARN_ON(dl_time_before(dl_se->deadline, rq_clock(rq))); 805 806 dl_se->runtime = (dl_se->dl_density * laxity) >> BW_SHIFT; 807 } 808 809 /* 810 * Regarding the deadline, a task with implicit deadline has a relative 811 * deadline == relative period. A task with constrained deadline has a 812 * relative deadline <= relative period. 813 * 814 * We support constrained deadline tasks. However, there are some restrictions 815 * applied only for tasks which do not have an implicit deadline. See 816 * update_dl_entity() to know more about such restrictions. 817 * 818 * The dl_is_implicit() returns true if the task has an implicit deadline. 819 */ 820 static inline bool dl_is_implicit(struct sched_dl_entity *dl_se) 821 { 822 return dl_se->dl_deadline == dl_se->dl_period; 823 } 824 825 /* 826 * When a deadline entity is placed in the runqueue, its runtime and deadline 827 * might need to be updated. This is done by a CBS wake up rule. There are two 828 * different rules: 1) the original CBS; and 2) the Revisited CBS. 829 * 830 * When the task is starting a new period, the Original CBS is used. In this 831 * case, the runtime is replenished and a new absolute deadline is set. 832 * 833 * When a task is queued before the begin of the next period, using the 834 * remaining runtime and deadline could make the entity to overflow, see 835 * dl_entity_overflow() to find more about runtime overflow. When such case 836 * is detected, the runtime and deadline need to be updated. 837 * 838 * If the task has an implicit deadline, i.e., deadline == period, the Original 839 * CBS is applied. the runtime is replenished and a new absolute deadline is 840 * set, as in the previous cases. 841 * 842 * However, the Original CBS does not work properly for tasks with 843 * deadline < period, which are said to have a constrained deadline. By 844 * applying the Original CBS, a constrained deadline task would be able to run 845 * runtime/deadline in a period. With deadline < period, the task would 846 * overrun the runtime/period allowed bandwidth, breaking the admission test. 847 * 848 * In order to prevent this misbehave, the Revisited CBS is used for 849 * constrained deadline tasks when a runtime overflow is detected. In the 850 * Revisited CBS, rather than replenishing & setting a new absolute deadline, 851 * the remaining runtime of the task is reduced to avoid runtime overflow. 852 * Please refer to the comments update_dl_revised_wakeup() function to find 853 * more about the Revised CBS rule. 854 */ 855 static void update_dl_entity(struct sched_dl_entity *dl_se, 856 struct sched_dl_entity *pi_se) 857 { 858 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 859 struct rq *rq = rq_of_dl_rq(dl_rq); 860 861 if (dl_time_before(dl_se->deadline, rq_clock(rq)) || 862 dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) { 863 864 if (unlikely(!dl_is_implicit(dl_se) && 865 !dl_time_before(dl_se->deadline, rq_clock(rq)) && 866 !dl_se->dl_boosted)){ 867 update_dl_revised_wakeup(dl_se, rq); 868 return; 869 } 870 871 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline; 872 dl_se->runtime = pi_se->dl_runtime; 873 } 874 } 875 876 static inline u64 dl_next_period(struct sched_dl_entity *dl_se) 877 { 878 return dl_se->deadline - dl_se->dl_deadline + dl_se->dl_period; 879 } 880 881 /* 882 * If the entity depleted all its runtime, and if we want it to sleep 883 * while waiting for some new execution time to become available, we 884 * set the bandwidth replenishment timer to the replenishment instant 885 * and try to activate it. 886 * 887 * Notice that it is important for the caller to know if the timer 888 * actually started or not (i.e., the replenishment instant is in 889 * the future or in the past). 890 */ 891 static int start_dl_timer(struct task_struct *p) 892 { 893 struct sched_dl_entity *dl_se = &p->dl; 894 struct hrtimer *timer = &dl_se->dl_timer; 895 struct rq *rq = task_rq(p); 896 ktime_t now, act; 897 s64 delta; 898 899 lockdep_assert_held(&rq->lock); 900 901 /* 902 * We want the timer to fire at the deadline, but considering 903 * that it is actually coming from rq->clock and not from 904 * hrtimer's time base reading. 905 */ 906 act = ns_to_ktime(dl_next_period(dl_se)); 907 now = hrtimer_cb_get_time(timer); 908 delta = ktime_to_ns(now) - rq_clock(rq); 909 act = ktime_add_ns(act, delta); 910 911 /* 912 * If the expiry time already passed, e.g., because the value 913 * chosen as the deadline is too small, don't even try to 914 * start the timer in the past! 915 */ 916 if (ktime_us_delta(act, now) < 0) 917 return 0; 918 919 /* 920 * !enqueued will guarantee another callback; even if one is already in 921 * progress. This ensures a balanced {get,put}_task_struct(). 922 * 923 * The race against __run_timer() clearing the enqueued state is 924 * harmless because we're holding task_rq()->lock, therefore the timer 925 * expiring after we've done the check will wait on its task_rq_lock() 926 * and observe our state. 927 */ 928 if (!hrtimer_is_queued(timer)) { 929 get_task_struct(p); 930 hrtimer_start(timer, act, HRTIMER_MODE_ABS); 931 } 932 933 return 1; 934 } 935 936 /* 937 * This is the bandwidth enforcement timer callback. If here, we know 938 * a task is not on its dl_rq, since the fact that the timer was running 939 * means the task is throttled and needs a runtime replenishment. 940 * 941 * However, what we actually do depends on the fact the task is active, 942 * (it is on its rq) or has been removed from there by a call to 943 * dequeue_task_dl(). In the former case we must issue the runtime 944 * replenishment and add the task back to the dl_rq; in the latter, we just 945 * do nothing but clearing dl_throttled, so that runtime and deadline 946 * updating (and the queueing back to dl_rq) will be done by the 947 * next call to enqueue_task_dl(). 948 */ 949 static enum hrtimer_restart dl_task_timer(struct hrtimer *timer) 950 { 951 struct sched_dl_entity *dl_se = container_of(timer, 952 struct sched_dl_entity, 953 dl_timer); 954 struct task_struct *p = dl_task_of(dl_se); 955 struct rq_flags rf; 956 struct rq *rq; 957 958 rq = task_rq_lock(p, &rf); 959 960 /* 961 * The task might have changed its scheduling policy to something 962 * different than SCHED_DEADLINE (through switched_from_dl()). 963 */ 964 if (!dl_task(p)) 965 goto unlock; 966 967 /* 968 * The task might have been boosted by someone else and might be in the 969 * boosting/deboosting path, its not throttled. 970 */ 971 if (dl_se->dl_boosted) 972 goto unlock; 973 974 /* 975 * Spurious timer due to start_dl_timer() race; or we already received 976 * a replenishment from rt_mutex_setprio(). 977 */ 978 if (!dl_se->dl_throttled) 979 goto unlock; 980 981 sched_clock_tick(); 982 update_rq_clock(rq); 983 984 /* 985 * If the throttle happened during sched-out; like: 986 * 987 * schedule() 988 * deactivate_task() 989 * dequeue_task_dl() 990 * update_curr_dl() 991 * start_dl_timer() 992 * __dequeue_task_dl() 993 * prev->on_rq = 0; 994 * 995 * We can be both throttled and !queued. Replenish the counter 996 * but do not enqueue -- wait for our wakeup to do that. 997 */ 998 if (!task_on_rq_queued(p)) { 999 replenish_dl_entity(dl_se, dl_se); 1000 goto unlock; 1001 } 1002 1003 #ifdef CONFIG_SMP 1004 if (unlikely(!rq->online)) { 1005 /* 1006 * If the runqueue is no longer available, migrate the 1007 * task elsewhere. This necessarily changes rq. 1008 */ 1009 lockdep_unpin_lock(&rq->lock, rf.cookie); 1010 rq = dl_task_offline_migration(rq, p); 1011 rf.cookie = lockdep_pin_lock(&rq->lock); 1012 update_rq_clock(rq); 1013 1014 /* 1015 * Now that the task has been migrated to the new RQ and we 1016 * have that locked, proceed as normal and enqueue the task 1017 * there. 1018 */ 1019 } 1020 #endif 1021 1022 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH); 1023 if (dl_task(rq->curr)) 1024 check_preempt_curr_dl(rq, p, 0); 1025 else 1026 resched_curr(rq); 1027 1028 #ifdef CONFIG_SMP 1029 /* 1030 * Queueing this task back might have overloaded rq, check if we need 1031 * to kick someone away. 1032 */ 1033 if (has_pushable_dl_tasks(rq)) { 1034 /* 1035 * Nothing relies on rq->lock after this, so its safe to drop 1036 * rq->lock. 1037 */ 1038 rq_unpin_lock(rq, &rf); 1039 push_dl_task(rq); 1040 rq_repin_lock(rq, &rf); 1041 } 1042 #endif 1043 1044 unlock: 1045 task_rq_unlock(rq, p, &rf); 1046 1047 /* 1048 * This can free the task_struct, including this hrtimer, do not touch 1049 * anything related to that after this. 1050 */ 1051 put_task_struct(p); 1052 1053 return HRTIMER_NORESTART; 1054 } 1055 1056 void init_dl_task_timer(struct sched_dl_entity *dl_se) 1057 { 1058 struct hrtimer *timer = &dl_se->dl_timer; 1059 1060 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 1061 timer->function = dl_task_timer; 1062 } 1063 1064 /* 1065 * During the activation, CBS checks if it can reuse the current task's 1066 * runtime and period. If the deadline of the task is in the past, CBS 1067 * cannot use the runtime, and so it replenishes the task. This rule 1068 * works fine for implicit deadline tasks (deadline == period), and the 1069 * CBS was designed for implicit deadline tasks. However, a task with 1070 * constrained deadline (deadine < period) might be awakened after the 1071 * deadline, but before the next period. In this case, replenishing the 1072 * task would allow it to run for runtime / deadline. As in this case 1073 * deadline < period, CBS enables a task to run for more than the 1074 * runtime / period. In a very loaded system, this can cause a domino 1075 * effect, making other tasks miss their deadlines. 1076 * 1077 * To avoid this problem, in the activation of a constrained deadline 1078 * task after the deadline but before the next period, throttle the 1079 * task and set the replenishing timer to the begin of the next period, 1080 * unless it is boosted. 1081 */ 1082 static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se) 1083 { 1084 struct task_struct *p = dl_task_of(dl_se); 1085 struct rq *rq = rq_of_dl_rq(dl_rq_of_se(dl_se)); 1086 1087 if (dl_time_before(dl_se->deadline, rq_clock(rq)) && 1088 dl_time_before(rq_clock(rq), dl_next_period(dl_se))) { 1089 if (unlikely(dl_se->dl_boosted || !start_dl_timer(p))) 1090 return; 1091 dl_se->dl_throttled = 1; 1092 if (dl_se->runtime > 0) 1093 dl_se->runtime = 0; 1094 } 1095 } 1096 1097 static 1098 int dl_runtime_exceeded(struct sched_dl_entity *dl_se) 1099 { 1100 return (dl_se->runtime <= 0); 1101 } 1102 1103 extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq); 1104 1105 /* 1106 * This function implements the GRUB accounting rule: 1107 * according to the GRUB reclaiming algorithm, the runtime is 1108 * not decreased as "dq = -dt", but as 1109 * "dq = -max{u / Umax, (1 - Uinact - Uextra)} dt", 1110 * where u is the utilization of the task, Umax is the maximum reclaimable 1111 * utilization, Uinact is the (per-runqueue) inactive utilization, computed 1112 * as the difference between the "total runqueue utilization" and the 1113 * runqueue active utilization, and Uextra is the (per runqueue) extra 1114 * reclaimable utilization. 1115 * Since rq->dl.running_bw and rq->dl.this_bw contain utilizations 1116 * multiplied by 2^BW_SHIFT, the result has to be shifted right by 1117 * BW_SHIFT. 1118 * Since rq->dl.bw_ratio contains 1 / Umax multipled by 2^RATIO_SHIFT, 1119 * dl_bw is multiped by rq->dl.bw_ratio and shifted right by RATIO_SHIFT. 1120 * Since delta is a 64 bit variable, to have an overflow its value 1121 * should be larger than 2^(64 - 20 - 8), which is more than 64 seconds. 1122 * So, overflow is not an issue here. 1123 */ 1124 u64 grub_reclaim(u64 delta, struct rq *rq, struct sched_dl_entity *dl_se) 1125 { 1126 u64 u_inact = rq->dl.this_bw - rq->dl.running_bw; /* Utot - Uact */ 1127 u64 u_act; 1128 u64 u_act_min = (dl_se->dl_bw * rq->dl.bw_ratio) >> RATIO_SHIFT; 1129 1130 /* 1131 * Instead of computing max{u * bw_ratio, (1 - u_inact - u_extra)}, 1132 * we compare u_inact + rq->dl.extra_bw with 1133 * 1 - (u * rq->dl.bw_ratio >> RATIO_SHIFT), because 1134 * u_inact + rq->dl.extra_bw can be larger than 1135 * 1 * (so, 1 - u_inact - rq->dl.extra_bw would be negative 1136 * leading to wrong results) 1137 */ 1138 if (u_inact + rq->dl.extra_bw > BW_UNIT - u_act_min) 1139 u_act = u_act_min; 1140 else 1141 u_act = BW_UNIT - u_inact - rq->dl.extra_bw; 1142 1143 return (delta * u_act) >> BW_SHIFT; 1144 } 1145 1146 /* 1147 * Update the current task's runtime statistics (provided it is still 1148 * a -deadline task and has not been removed from the dl_rq). 1149 */ 1150 static void update_curr_dl(struct rq *rq) 1151 { 1152 struct task_struct *curr = rq->curr; 1153 struct sched_dl_entity *dl_se = &curr->dl; 1154 u64 delta_exec, scaled_delta_exec; 1155 int cpu = cpu_of(rq); 1156 u64 now; 1157 1158 if (!dl_task(curr) || !on_dl_rq(dl_se)) 1159 return; 1160 1161 /* 1162 * Consumed budget is computed considering the time as 1163 * observed by schedulable tasks (excluding time spent 1164 * in hardirq context, etc.). Deadlines are instead 1165 * computed using hard walltime. This seems to be the more 1166 * natural solution, but the full ramifications of this 1167 * approach need further study. 1168 */ 1169 now = rq_clock_task(rq); 1170 delta_exec = now - curr->se.exec_start; 1171 if (unlikely((s64)delta_exec <= 0)) { 1172 if (unlikely(dl_se->dl_yielded)) 1173 goto throttle; 1174 return; 1175 } 1176 1177 schedstat_set(curr->se.statistics.exec_max, 1178 max(curr->se.statistics.exec_max, delta_exec)); 1179 1180 curr->se.sum_exec_runtime += delta_exec; 1181 account_group_exec_runtime(curr, delta_exec); 1182 1183 curr->se.exec_start = now; 1184 cgroup_account_cputime(curr, delta_exec); 1185 1186 sched_rt_avg_update(rq, delta_exec); 1187 1188 if (dl_entity_is_special(dl_se)) 1189 return; 1190 1191 /* 1192 * For tasks that participate in GRUB, we implement GRUB-PA: the 1193 * spare reclaimed bandwidth is used to clock down frequency. 1194 * 1195 * For the others, we still need to scale reservation parameters 1196 * according to current frequency and CPU maximum capacity. 1197 */ 1198 if (unlikely(dl_se->flags & SCHED_FLAG_RECLAIM)) { 1199 scaled_delta_exec = grub_reclaim(delta_exec, 1200 rq, 1201 &curr->dl); 1202 } else { 1203 unsigned long scale_freq = arch_scale_freq_capacity(cpu); 1204 unsigned long scale_cpu = arch_scale_cpu_capacity(NULL, cpu); 1205 1206 scaled_delta_exec = cap_scale(delta_exec, scale_freq); 1207 scaled_delta_exec = cap_scale(scaled_delta_exec, scale_cpu); 1208 } 1209 1210 dl_se->runtime -= scaled_delta_exec; 1211 1212 throttle: 1213 if (dl_runtime_exceeded(dl_se) || dl_se->dl_yielded) { 1214 dl_se->dl_throttled = 1; 1215 1216 /* If requested, inform the user about runtime overruns. */ 1217 if (dl_runtime_exceeded(dl_se) && 1218 (dl_se->flags & SCHED_FLAG_DL_OVERRUN)) 1219 dl_se->dl_overrun = 1; 1220 1221 __dequeue_task_dl(rq, curr, 0); 1222 if (unlikely(dl_se->dl_boosted || !start_dl_timer(curr))) 1223 enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH); 1224 1225 if (!is_leftmost(curr, &rq->dl)) 1226 resched_curr(rq); 1227 } 1228 1229 /* 1230 * Because -- for now -- we share the rt bandwidth, we need to 1231 * account our runtime there too, otherwise actual rt tasks 1232 * would be able to exceed the shared quota. 1233 * 1234 * Account to the root rt group for now. 1235 * 1236 * The solution we're working towards is having the RT groups scheduled 1237 * using deadline servers -- however there's a few nasties to figure 1238 * out before that can happen. 1239 */ 1240 if (rt_bandwidth_enabled()) { 1241 struct rt_rq *rt_rq = &rq->rt; 1242 1243 raw_spin_lock(&rt_rq->rt_runtime_lock); 1244 /* 1245 * We'll let actual RT tasks worry about the overflow here, we 1246 * have our own CBS to keep us inline; only account when RT 1247 * bandwidth is relevant. 1248 */ 1249 if (sched_rt_bandwidth_account(rt_rq)) 1250 rt_rq->rt_time += delta_exec; 1251 raw_spin_unlock(&rt_rq->rt_runtime_lock); 1252 } 1253 } 1254 1255 static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer) 1256 { 1257 struct sched_dl_entity *dl_se = container_of(timer, 1258 struct sched_dl_entity, 1259 inactive_timer); 1260 struct task_struct *p = dl_task_of(dl_se); 1261 struct rq_flags rf; 1262 struct rq *rq; 1263 1264 rq = task_rq_lock(p, &rf); 1265 1266 if (!dl_task(p) || p->state == TASK_DEAD) { 1267 struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); 1268 1269 if (p->state == TASK_DEAD && dl_se->dl_non_contending) { 1270 sub_running_bw(&p->dl, dl_rq_of_se(&p->dl)); 1271 sub_rq_bw(&p->dl, dl_rq_of_se(&p->dl)); 1272 dl_se->dl_non_contending = 0; 1273 } 1274 1275 raw_spin_lock(&dl_b->lock); 1276 __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p))); 1277 raw_spin_unlock(&dl_b->lock); 1278 __dl_clear_params(p); 1279 1280 goto unlock; 1281 } 1282 if (dl_se->dl_non_contending == 0) 1283 goto unlock; 1284 1285 sched_clock_tick(); 1286 update_rq_clock(rq); 1287 1288 sub_running_bw(dl_se, &rq->dl); 1289 dl_se->dl_non_contending = 0; 1290 unlock: 1291 task_rq_unlock(rq, p, &rf); 1292 put_task_struct(p); 1293 1294 return HRTIMER_NORESTART; 1295 } 1296 1297 void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se) 1298 { 1299 struct hrtimer *timer = &dl_se->inactive_timer; 1300 1301 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 1302 timer->function = inactive_task_timer; 1303 } 1304 1305 #ifdef CONFIG_SMP 1306 1307 static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) 1308 { 1309 struct rq *rq = rq_of_dl_rq(dl_rq); 1310 1311 if (dl_rq->earliest_dl.curr == 0 || 1312 dl_time_before(deadline, dl_rq->earliest_dl.curr)) { 1313 dl_rq->earliest_dl.curr = deadline; 1314 cpudl_set(&rq->rd->cpudl, rq->cpu, deadline); 1315 } 1316 } 1317 1318 static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) 1319 { 1320 struct rq *rq = rq_of_dl_rq(dl_rq); 1321 1322 /* 1323 * Since we may have removed our earliest (and/or next earliest) 1324 * task we must recompute them. 1325 */ 1326 if (!dl_rq->dl_nr_running) { 1327 dl_rq->earliest_dl.curr = 0; 1328 dl_rq->earliest_dl.next = 0; 1329 cpudl_clear(&rq->rd->cpudl, rq->cpu); 1330 } else { 1331 struct rb_node *leftmost = dl_rq->root.rb_leftmost; 1332 struct sched_dl_entity *entry; 1333 1334 entry = rb_entry(leftmost, struct sched_dl_entity, rb_node); 1335 dl_rq->earliest_dl.curr = entry->deadline; 1336 cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline); 1337 } 1338 } 1339 1340 #else 1341 1342 static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {} 1343 static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {} 1344 1345 #endif /* CONFIG_SMP */ 1346 1347 static inline 1348 void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 1349 { 1350 int prio = dl_task_of(dl_se)->prio; 1351 u64 deadline = dl_se->deadline; 1352 1353 WARN_ON(!dl_prio(prio)); 1354 dl_rq->dl_nr_running++; 1355 add_nr_running(rq_of_dl_rq(dl_rq), 1); 1356 1357 inc_dl_deadline(dl_rq, deadline); 1358 inc_dl_migration(dl_se, dl_rq); 1359 } 1360 1361 static inline 1362 void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 1363 { 1364 int prio = dl_task_of(dl_se)->prio; 1365 1366 WARN_ON(!dl_prio(prio)); 1367 WARN_ON(!dl_rq->dl_nr_running); 1368 dl_rq->dl_nr_running--; 1369 sub_nr_running(rq_of_dl_rq(dl_rq), 1); 1370 1371 dec_dl_deadline(dl_rq, dl_se->deadline); 1372 dec_dl_migration(dl_se, dl_rq); 1373 } 1374 1375 static void __enqueue_dl_entity(struct sched_dl_entity *dl_se) 1376 { 1377 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 1378 struct rb_node **link = &dl_rq->root.rb_root.rb_node; 1379 struct rb_node *parent = NULL; 1380 struct sched_dl_entity *entry; 1381 int leftmost = 1; 1382 1383 BUG_ON(!RB_EMPTY_NODE(&dl_se->rb_node)); 1384 1385 while (*link) { 1386 parent = *link; 1387 entry = rb_entry(parent, struct sched_dl_entity, rb_node); 1388 if (dl_time_before(dl_se->deadline, entry->deadline)) 1389 link = &parent->rb_left; 1390 else { 1391 link = &parent->rb_right; 1392 leftmost = 0; 1393 } 1394 } 1395 1396 rb_link_node(&dl_se->rb_node, parent, link); 1397 rb_insert_color_cached(&dl_se->rb_node, &dl_rq->root, leftmost); 1398 1399 inc_dl_tasks(dl_se, dl_rq); 1400 } 1401 1402 static void __dequeue_dl_entity(struct sched_dl_entity *dl_se) 1403 { 1404 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 1405 1406 if (RB_EMPTY_NODE(&dl_se->rb_node)) 1407 return; 1408 1409 rb_erase_cached(&dl_se->rb_node, &dl_rq->root); 1410 RB_CLEAR_NODE(&dl_se->rb_node); 1411 1412 dec_dl_tasks(dl_se, dl_rq); 1413 } 1414 1415 static void 1416 enqueue_dl_entity(struct sched_dl_entity *dl_se, 1417 struct sched_dl_entity *pi_se, int flags) 1418 { 1419 BUG_ON(on_dl_rq(dl_se)); 1420 1421 /* 1422 * If this is a wakeup or a new instance, the scheduling 1423 * parameters of the task might need updating. Otherwise, 1424 * we want a replenishment of its runtime. 1425 */ 1426 if (flags & ENQUEUE_WAKEUP) { 1427 task_contending(dl_se, flags); 1428 update_dl_entity(dl_se, pi_se); 1429 } else if (flags & ENQUEUE_REPLENISH) { 1430 replenish_dl_entity(dl_se, pi_se); 1431 } else if ((flags & ENQUEUE_RESTORE) && 1432 dl_time_before(dl_se->deadline, 1433 rq_clock(rq_of_dl_rq(dl_rq_of_se(dl_se))))) { 1434 setup_new_dl_entity(dl_se); 1435 } 1436 1437 __enqueue_dl_entity(dl_se); 1438 } 1439 1440 static void dequeue_dl_entity(struct sched_dl_entity *dl_se) 1441 { 1442 __dequeue_dl_entity(dl_se); 1443 } 1444 1445 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags) 1446 { 1447 struct task_struct *pi_task = rt_mutex_get_top_task(p); 1448 struct sched_dl_entity *pi_se = &p->dl; 1449 1450 /* 1451 * Use the scheduling parameters of the top pi-waiter task if: 1452 * - we have a top pi-waiter which is a SCHED_DEADLINE task AND 1453 * - our dl_boosted is set (i.e. the pi-waiter's (absolute) deadline is 1454 * smaller than our deadline OR we are a !SCHED_DEADLINE task getting 1455 * boosted due to a SCHED_DEADLINE pi-waiter). 1456 * Otherwise we keep our runtime and deadline. 1457 */ 1458 if (pi_task && dl_prio(pi_task->normal_prio) && p->dl.dl_boosted) { 1459 pi_se = &pi_task->dl; 1460 } else if (!dl_prio(p->normal_prio)) { 1461 /* 1462 * Special case in which we have a !SCHED_DEADLINE task 1463 * that is going to be deboosted, but exceeds its 1464 * runtime while doing so. No point in replenishing 1465 * it, as it's going to return back to its original 1466 * scheduling class after this. 1467 */ 1468 BUG_ON(!p->dl.dl_boosted || flags != ENQUEUE_REPLENISH); 1469 return; 1470 } 1471 1472 /* 1473 * Check if a constrained deadline task was activated 1474 * after the deadline but before the next period. 1475 * If that is the case, the task will be throttled and 1476 * the replenishment timer will be set to the next period. 1477 */ 1478 if (!p->dl.dl_throttled && !dl_is_implicit(&p->dl)) 1479 dl_check_constrained_dl(&p->dl); 1480 1481 if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & ENQUEUE_RESTORE) { 1482 add_rq_bw(&p->dl, &rq->dl); 1483 add_running_bw(&p->dl, &rq->dl); 1484 } 1485 1486 /* 1487 * If p is throttled, we do not enqueue it. In fact, if it exhausted 1488 * its budget it needs a replenishment and, since it now is on 1489 * its rq, the bandwidth timer callback (which clearly has not 1490 * run yet) will take care of this. 1491 * However, the active utilization does not depend on the fact 1492 * that the task is on the runqueue or not (but depends on the 1493 * task's state - in GRUB parlance, "inactive" vs "active contending"). 1494 * In other words, even if a task is throttled its utilization must 1495 * be counted in the active utilization; hence, we need to call 1496 * add_running_bw(). 1497 */ 1498 if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH)) { 1499 if (flags & ENQUEUE_WAKEUP) 1500 task_contending(&p->dl, flags); 1501 1502 return; 1503 } 1504 1505 enqueue_dl_entity(&p->dl, pi_se, flags); 1506 1507 if (!task_current(rq, p) && p->nr_cpus_allowed > 1) 1508 enqueue_pushable_dl_task(rq, p); 1509 } 1510 1511 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags) 1512 { 1513 dequeue_dl_entity(&p->dl); 1514 dequeue_pushable_dl_task(rq, p); 1515 } 1516 1517 static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags) 1518 { 1519 update_curr_dl(rq); 1520 __dequeue_task_dl(rq, p, flags); 1521 1522 if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & DEQUEUE_SAVE) { 1523 sub_running_bw(&p->dl, &rq->dl); 1524 sub_rq_bw(&p->dl, &rq->dl); 1525 } 1526 1527 /* 1528 * This check allows to start the inactive timer (or to immediately 1529 * decrease the active utilization, if needed) in two cases: 1530 * when the task blocks and when it is terminating 1531 * (p->state == TASK_DEAD). We can handle the two cases in the same 1532 * way, because from GRUB's point of view the same thing is happening 1533 * (the task moves from "active contending" to "active non contending" 1534 * or "inactive") 1535 */ 1536 if (flags & DEQUEUE_SLEEP) 1537 task_non_contending(p); 1538 } 1539 1540 /* 1541 * Yield task semantic for -deadline tasks is: 1542 * 1543 * get off from the CPU until our next instance, with 1544 * a new runtime. This is of little use now, since we 1545 * don't have a bandwidth reclaiming mechanism. Anyway, 1546 * bandwidth reclaiming is planned for the future, and 1547 * yield_task_dl will indicate that some spare budget 1548 * is available for other task instances to use it. 1549 */ 1550 static void yield_task_dl(struct rq *rq) 1551 { 1552 /* 1553 * We make the task go to sleep until its current deadline by 1554 * forcing its runtime to zero. This way, update_curr_dl() stops 1555 * it and the bandwidth timer will wake it up and will give it 1556 * new scheduling parameters (thanks to dl_yielded=1). 1557 */ 1558 rq->curr->dl.dl_yielded = 1; 1559 1560 update_rq_clock(rq); 1561 update_curr_dl(rq); 1562 /* 1563 * Tell update_rq_clock() that we've just updated, 1564 * so we don't do microscopic update in schedule() 1565 * and double the fastpath cost. 1566 */ 1567 rq_clock_skip_update(rq, true); 1568 } 1569 1570 #ifdef CONFIG_SMP 1571 1572 static int find_later_rq(struct task_struct *task); 1573 1574 static int 1575 select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags) 1576 { 1577 struct task_struct *curr; 1578 struct rq *rq; 1579 1580 if (sd_flag != SD_BALANCE_WAKE) 1581 goto out; 1582 1583 rq = cpu_rq(cpu); 1584 1585 rcu_read_lock(); 1586 curr = READ_ONCE(rq->curr); /* unlocked access */ 1587 1588 /* 1589 * If we are dealing with a -deadline task, we must 1590 * decide where to wake it up. 1591 * If it has a later deadline and the current task 1592 * on this rq can't move (provided the waking task 1593 * can!) we prefer to send it somewhere else. On the 1594 * other hand, if it has a shorter deadline, we 1595 * try to make it stay here, it might be important. 1596 */ 1597 if (unlikely(dl_task(curr)) && 1598 (curr->nr_cpus_allowed < 2 || 1599 !dl_entity_preempt(&p->dl, &curr->dl)) && 1600 (p->nr_cpus_allowed > 1)) { 1601 int target = find_later_rq(p); 1602 1603 if (target != -1 && 1604 (dl_time_before(p->dl.deadline, 1605 cpu_rq(target)->dl.earliest_dl.curr) || 1606 (cpu_rq(target)->dl.dl_nr_running == 0))) 1607 cpu = target; 1608 } 1609 rcu_read_unlock(); 1610 1611 out: 1612 return cpu; 1613 } 1614 1615 static void migrate_task_rq_dl(struct task_struct *p) 1616 { 1617 struct rq *rq; 1618 1619 if (p->state != TASK_WAKING) 1620 return; 1621 1622 rq = task_rq(p); 1623 /* 1624 * Since p->state == TASK_WAKING, set_task_cpu() has been called 1625 * from try_to_wake_up(). Hence, p->pi_lock is locked, but 1626 * rq->lock is not... So, lock it 1627 */ 1628 raw_spin_lock(&rq->lock); 1629 if (p->dl.dl_non_contending) { 1630 sub_running_bw(&p->dl, &rq->dl); 1631 p->dl.dl_non_contending = 0; 1632 /* 1633 * If the timer handler is currently running and the 1634 * timer cannot be cancelled, inactive_task_timer() 1635 * will see that dl_not_contending is not set, and 1636 * will not touch the rq's active utilization, 1637 * so we are still safe. 1638 */ 1639 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1) 1640 put_task_struct(p); 1641 } 1642 sub_rq_bw(&p->dl, &rq->dl); 1643 raw_spin_unlock(&rq->lock); 1644 } 1645 1646 static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p) 1647 { 1648 /* 1649 * Current can't be migrated, useless to reschedule, 1650 * let's hope p can move out. 1651 */ 1652 if (rq->curr->nr_cpus_allowed == 1 || 1653 !cpudl_find(&rq->rd->cpudl, rq->curr, NULL)) 1654 return; 1655 1656 /* 1657 * p is migratable, so let's not schedule it and 1658 * see if it is pushed or pulled somewhere else. 1659 */ 1660 if (p->nr_cpus_allowed != 1 && 1661 cpudl_find(&rq->rd->cpudl, p, NULL)) 1662 return; 1663 1664 resched_curr(rq); 1665 } 1666 1667 #endif /* CONFIG_SMP */ 1668 1669 /* 1670 * Only called when both the current and waking task are -deadline 1671 * tasks. 1672 */ 1673 static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p, 1674 int flags) 1675 { 1676 if (dl_entity_preempt(&p->dl, &rq->curr->dl)) { 1677 resched_curr(rq); 1678 return; 1679 } 1680 1681 #ifdef CONFIG_SMP 1682 /* 1683 * In the unlikely case current and p have the same deadline 1684 * let us try to decide what's the best thing to do... 1685 */ 1686 if ((p->dl.deadline == rq->curr->dl.deadline) && 1687 !test_tsk_need_resched(rq->curr)) 1688 check_preempt_equal_dl(rq, p); 1689 #endif /* CONFIG_SMP */ 1690 } 1691 1692 #ifdef CONFIG_SCHED_HRTICK 1693 static void start_hrtick_dl(struct rq *rq, struct task_struct *p) 1694 { 1695 hrtick_start(rq, p->dl.runtime); 1696 } 1697 #else /* !CONFIG_SCHED_HRTICK */ 1698 static void start_hrtick_dl(struct rq *rq, struct task_struct *p) 1699 { 1700 } 1701 #endif 1702 1703 static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq, 1704 struct dl_rq *dl_rq) 1705 { 1706 struct rb_node *left = rb_first_cached(&dl_rq->root); 1707 1708 if (!left) 1709 return NULL; 1710 1711 return rb_entry(left, struct sched_dl_entity, rb_node); 1712 } 1713 1714 static struct task_struct * 1715 pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) 1716 { 1717 struct sched_dl_entity *dl_se; 1718 struct task_struct *p; 1719 struct dl_rq *dl_rq; 1720 1721 dl_rq = &rq->dl; 1722 1723 if (need_pull_dl_task(rq, prev)) { 1724 /* 1725 * This is OK, because current is on_cpu, which avoids it being 1726 * picked for load-balance and preemption/IRQs are still 1727 * disabled avoiding further scheduler activity on it and we're 1728 * being very careful to re-start the picking loop. 1729 */ 1730 rq_unpin_lock(rq, rf); 1731 pull_dl_task(rq); 1732 rq_repin_lock(rq, rf); 1733 /* 1734 * pull_dl_task() can drop (and re-acquire) rq->lock; this 1735 * means a stop task can slip in, in which case we need to 1736 * re-start task selection. 1737 */ 1738 if (rq->stop && task_on_rq_queued(rq->stop)) 1739 return RETRY_TASK; 1740 } 1741 1742 /* 1743 * When prev is DL, we may throttle it in put_prev_task(). 1744 * So, we update time before we check for dl_nr_running. 1745 */ 1746 if (prev->sched_class == &dl_sched_class) 1747 update_curr_dl(rq); 1748 1749 if (unlikely(!dl_rq->dl_nr_running)) 1750 return NULL; 1751 1752 put_prev_task(rq, prev); 1753 1754 dl_se = pick_next_dl_entity(rq, dl_rq); 1755 BUG_ON(!dl_se); 1756 1757 p = dl_task_of(dl_se); 1758 p->se.exec_start = rq_clock_task(rq); 1759 1760 /* Running task will never be pushed. */ 1761 dequeue_pushable_dl_task(rq, p); 1762 1763 if (hrtick_enabled(rq)) 1764 start_hrtick_dl(rq, p); 1765 1766 queue_push_tasks(rq); 1767 1768 return p; 1769 } 1770 1771 static void put_prev_task_dl(struct rq *rq, struct task_struct *p) 1772 { 1773 update_curr_dl(rq); 1774 1775 if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1) 1776 enqueue_pushable_dl_task(rq, p); 1777 } 1778 1779 static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued) 1780 { 1781 update_curr_dl(rq); 1782 1783 /* 1784 * Even when we have runtime, update_curr_dl() might have resulted in us 1785 * not being the leftmost task anymore. In that case NEED_RESCHED will 1786 * be set and schedule() will start a new hrtick for the next task. 1787 */ 1788 if (hrtick_enabled(rq) && queued && p->dl.runtime > 0 && 1789 is_leftmost(p, &rq->dl)) 1790 start_hrtick_dl(rq, p); 1791 } 1792 1793 static void task_fork_dl(struct task_struct *p) 1794 { 1795 /* 1796 * SCHED_DEADLINE tasks cannot fork and this is achieved through 1797 * sched_fork() 1798 */ 1799 } 1800 1801 static void set_curr_task_dl(struct rq *rq) 1802 { 1803 struct task_struct *p = rq->curr; 1804 1805 p->se.exec_start = rq_clock_task(rq); 1806 1807 /* You can't push away the running task */ 1808 dequeue_pushable_dl_task(rq, p); 1809 } 1810 1811 #ifdef CONFIG_SMP 1812 1813 /* Only try algorithms three times */ 1814 #define DL_MAX_TRIES 3 1815 1816 static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu) 1817 { 1818 if (!task_running(rq, p) && 1819 cpumask_test_cpu(cpu, &p->cpus_allowed)) 1820 return 1; 1821 return 0; 1822 } 1823 1824 /* 1825 * Return the earliest pushable rq's task, which is suitable to be executed 1826 * on the CPU, NULL otherwise: 1827 */ 1828 static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu) 1829 { 1830 struct rb_node *next_node = rq->dl.pushable_dl_tasks_root.rb_leftmost; 1831 struct task_struct *p = NULL; 1832 1833 if (!has_pushable_dl_tasks(rq)) 1834 return NULL; 1835 1836 next_node: 1837 if (next_node) { 1838 p = rb_entry(next_node, struct task_struct, pushable_dl_tasks); 1839 1840 if (pick_dl_task(rq, p, cpu)) 1841 return p; 1842 1843 next_node = rb_next(next_node); 1844 goto next_node; 1845 } 1846 1847 return NULL; 1848 } 1849 1850 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl); 1851 1852 static int find_later_rq(struct task_struct *task) 1853 { 1854 struct sched_domain *sd; 1855 struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl); 1856 int this_cpu = smp_processor_id(); 1857 int cpu = task_cpu(task); 1858 1859 /* Make sure the mask is initialized first */ 1860 if (unlikely(!later_mask)) 1861 return -1; 1862 1863 if (task->nr_cpus_allowed == 1) 1864 return -1; 1865 1866 /* 1867 * We have to consider system topology and task affinity 1868 * first, then we can look for a suitable cpu. 1869 */ 1870 if (!cpudl_find(&task_rq(task)->rd->cpudl, task, later_mask)) 1871 return -1; 1872 1873 /* 1874 * If we are here, some targets have been found, including 1875 * the most suitable which is, among the runqueues where the 1876 * current tasks have later deadlines than the task's one, the 1877 * rq with the latest possible one. 1878 * 1879 * Now we check how well this matches with task's 1880 * affinity and system topology. 1881 * 1882 * The last cpu where the task run is our first 1883 * guess, since it is most likely cache-hot there. 1884 */ 1885 if (cpumask_test_cpu(cpu, later_mask)) 1886 return cpu; 1887 /* 1888 * Check if this_cpu is to be skipped (i.e., it is 1889 * not in the mask) or not. 1890 */ 1891 if (!cpumask_test_cpu(this_cpu, later_mask)) 1892 this_cpu = -1; 1893 1894 rcu_read_lock(); 1895 for_each_domain(cpu, sd) { 1896 if (sd->flags & SD_WAKE_AFFINE) { 1897 int best_cpu; 1898 1899 /* 1900 * If possible, preempting this_cpu is 1901 * cheaper than migrating. 1902 */ 1903 if (this_cpu != -1 && 1904 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) { 1905 rcu_read_unlock(); 1906 return this_cpu; 1907 } 1908 1909 best_cpu = cpumask_first_and(later_mask, 1910 sched_domain_span(sd)); 1911 /* 1912 * Last chance: if a cpu being in both later_mask 1913 * and current sd span is valid, that becomes our 1914 * choice. Of course, the latest possible cpu is 1915 * already under consideration through later_mask. 1916 */ 1917 if (best_cpu < nr_cpu_ids) { 1918 rcu_read_unlock(); 1919 return best_cpu; 1920 } 1921 } 1922 } 1923 rcu_read_unlock(); 1924 1925 /* 1926 * At this point, all our guesses failed, we just return 1927 * 'something', and let the caller sort the things out. 1928 */ 1929 if (this_cpu != -1) 1930 return this_cpu; 1931 1932 cpu = cpumask_any(later_mask); 1933 if (cpu < nr_cpu_ids) 1934 return cpu; 1935 1936 return -1; 1937 } 1938 1939 /* Locks the rq it finds */ 1940 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq) 1941 { 1942 struct rq *later_rq = NULL; 1943 int tries; 1944 int cpu; 1945 1946 for (tries = 0; tries < DL_MAX_TRIES; tries++) { 1947 cpu = find_later_rq(task); 1948 1949 if ((cpu == -1) || (cpu == rq->cpu)) 1950 break; 1951 1952 later_rq = cpu_rq(cpu); 1953 1954 if (later_rq->dl.dl_nr_running && 1955 !dl_time_before(task->dl.deadline, 1956 later_rq->dl.earliest_dl.curr)) { 1957 /* 1958 * Target rq has tasks of equal or earlier deadline, 1959 * retrying does not release any lock and is unlikely 1960 * to yield a different result. 1961 */ 1962 later_rq = NULL; 1963 break; 1964 } 1965 1966 /* Retry if something changed. */ 1967 if (double_lock_balance(rq, later_rq)) { 1968 if (unlikely(task_rq(task) != rq || 1969 !cpumask_test_cpu(later_rq->cpu, &task->cpus_allowed) || 1970 task_running(rq, task) || 1971 !dl_task(task) || 1972 !task_on_rq_queued(task))) { 1973 double_unlock_balance(rq, later_rq); 1974 later_rq = NULL; 1975 break; 1976 } 1977 } 1978 1979 /* 1980 * If the rq we found has no -deadline task, or 1981 * its earliest one has a later deadline than our 1982 * task, the rq is a good one. 1983 */ 1984 if (!later_rq->dl.dl_nr_running || 1985 dl_time_before(task->dl.deadline, 1986 later_rq->dl.earliest_dl.curr)) 1987 break; 1988 1989 /* Otherwise we try again. */ 1990 double_unlock_balance(rq, later_rq); 1991 later_rq = NULL; 1992 } 1993 1994 return later_rq; 1995 } 1996 1997 static struct task_struct *pick_next_pushable_dl_task(struct rq *rq) 1998 { 1999 struct task_struct *p; 2000 2001 if (!has_pushable_dl_tasks(rq)) 2002 return NULL; 2003 2004 p = rb_entry(rq->dl.pushable_dl_tasks_root.rb_leftmost, 2005 struct task_struct, pushable_dl_tasks); 2006 2007 BUG_ON(rq->cpu != task_cpu(p)); 2008 BUG_ON(task_current(rq, p)); 2009 BUG_ON(p->nr_cpus_allowed <= 1); 2010 2011 BUG_ON(!task_on_rq_queued(p)); 2012 BUG_ON(!dl_task(p)); 2013 2014 return p; 2015 } 2016 2017 /* 2018 * See if the non running -deadline tasks on this rq 2019 * can be sent to some other CPU where they can preempt 2020 * and start executing. 2021 */ 2022 static int push_dl_task(struct rq *rq) 2023 { 2024 struct task_struct *next_task; 2025 struct rq *later_rq; 2026 int ret = 0; 2027 2028 if (!rq->dl.overloaded) 2029 return 0; 2030 2031 next_task = pick_next_pushable_dl_task(rq); 2032 if (!next_task) 2033 return 0; 2034 2035 retry: 2036 if (unlikely(next_task == rq->curr)) { 2037 WARN_ON(1); 2038 return 0; 2039 } 2040 2041 /* 2042 * If next_task preempts rq->curr, and rq->curr 2043 * can move away, it makes sense to just reschedule 2044 * without going further in pushing next_task. 2045 */ 2046 if (dl_task(rq->curr) && 2047 dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) && 2048 rq->curr->nr_cpus_allowed > 1) { 2049 resched_curr(rq); 2050 return 0; 2051 } 2052 2053 /* We might release rq lock */ 2054 get_task_struct(next_task); 2055 2056 /* Will lock the rq it'll find */ 2057 later_rq = find_lock_later_rq(next_task, rq); 2058 if (!later_rq) { 2059 struct task_struct *task; 2060 2061 /* 2062 * We must check all this again, since 2063 * find_lock_later_rq releases rq->lock and it is 2064 * then possible that next_task has migrated. 2065 */ 2066 task = pick_next_pushable_dl_task(rq); 2067 if (task == next_task) { 2068 /* 2069 * The task is still there. We don't try 2070 * again, some other cpu will pull it when ready. 2071 */ 2072 goto out; 2073 } 2074 2075 if (!task) 2076 /* No more tasks */ 2077 goto out; 2078 2079 put_task_struct(next_task); 2080 next_task = task; 2081 goto retry; 2082 } 2083 2084 deactivate_task(rq, next_task, 0); 2085 sub_running_bw(&next_task->dl, &rq->dl); 2086 sub_rq_bw(&next_task->dl, &rq->dl); 2087 set_task_cpu(next_task, later_rq->cpu); 2088 add_rq_bw(&next_task->dl, &later_rq->dl); 2089 add_running_bw(&next_task->dl, &later_rq->dl); 2090 activate_task(later_rq, next_task, 0); 2091 ret = 1; 2092 2093 resched_curr(later_rq); 2094 2095 double_unlock_balance(rq, later_rq); 2096 2097 out: 2098 put_task_struct(next_task); 2099 2100 return ret; 2101 } 2102 2103 static void push_dl_tasks(struct rq *rq) 2104 { 2105 /* push_dl_task() will return true if it moved a -deadline task */ 2106 while (push_dl_task(rq)) 2107 ; 2108 } 2109 2110 static void pull_dl_task(struct rq *this_rq) 2111 { 2112 int this_cpu = this_rq->cpu, cpu; 2113 struct task_struct *p; 2114 bool resched = false; 2115 struct rq *src_rq; 2116 u64 dmin = LONG_MAX; 2117 2118 if (likely(!dl_overloaded(this_rq))) 2119 return; 2120 2121 /* 2122 * Match the barrier from dl_set_overloaded; this guarantees that if we 2123 * see overloaded we must also see the dlo_mask bit. 2124 */ 2125 smp_rmb(); 2126 2127 for_each_cpu(cpu, this_rq->rd->dlo_mask) { 2128 if (this_cpu == cpu) 2129 continue; 2130 2131 src_rq = cpu_rq(cpu); 2132 2133 /* 2134 * It looks racy, abd it is! However, as in sched_rt.c, 2135 * we are fine with this. 2136 */ 2137 if (this_rq->dl.dl_nr_running && 2138 dl_time_before(this_rq->dl.earliest_dl.curr, 2139 src_rq->dl.earliest_dl.next)) 2140 continue; 2141 2142 /* Might drop this_rq->lock */ 2143 double_lock_balance(this_rq, src_rq); 2144 2145 /* 2146 * If there are no more pullable tasks on the 2147 * rq, we're done with it. 2148 */ 2149 if (src_rq->dl.dl_nr_running <= 1) 2150 goto skip; 2151 2152 p = pick_earliest_pushable_dl_task(src_rq, this_cpu); 2153 2154 /* 2155 * We found a task to be pulled if: 2156 * - it preempts our current (if there's one), 2157 * - it will preempt the last one we pulled (if any). 2158 */ 2159 if (p && dl_time_before(p->dl.deadline, dmin) && 2160 (!this_rq->dl.dl_nr_running || 2161 dl_time_before(p->dl.deadline, 2162 this_rq->dl.earliest_dl.curr))) { 2163 WARN_ON(p == src_rq->curr); 2164 WARN_ON(!task_on_rq_queued(p)); 2165 2166 /* 2167 * Then we pull iff p has actually an earlier 2168 * deadline than the current task of its runqueue. 2169 */ 2170 if (dl_time_before(p->dl.deadline, 2171 src_rq->curr->dl.deadline)) 2172 goto skip; 2173 2174 resched = true; 2175 2176 deactivate_task(src_rq, p, 0); 2177 sub_running_bw(&p->dl, &src_rq->dl); 2178 sub_rq_bw(&p->dl, &src_rq->dl); 2179 set_task_cpu(p, this_cpu); 2180 add_rq_bw(&p->dl, &this_rq->dl); 2181 add_running_bw(&p->dl, &this_rq->dl); 2182 activate_task(this_rq, p, 0); 2183 dmin = p->dl.deadline; 2184 2185 /* Is there any other task even earlier? */ 2186 } 2187 skip: 2188 double_unlock_balance(this_rq, src_rq); 2189 } 2190 2191 if (resched) 2192 resched_curr(this_rq); 2193 } 2194 2195 /* 2196 * Since the task is not running and a reschedule is not going to happen 2197 * anytime soon on its runqueue, we try pushing it away now. 2198 */ 2199 static void task_woken_dl(struct rq *rq, struct task_struct *p) 2200 { 2201 if (!task_running(rq, p) && 2202 !test_tsk_need_resched(rq->curr) && 2203 p->nr_cpus_allowed > 1 && 2204 dl_task(rq->curr) && 2205 (rq->curr->nr_cpus_allowed < 2 || 2206 !dl_entity_preempt(&p->dl, &rq->curr->dl))) { 2207 push_dl_tasks(rq); 2208 } 2209 } 2210 2211 static void set_cpus_allowed_dl(struct task_struct *p, 2212 const struct cpumask *new_mask) 2213 { 2214 struct root_domain *src_rd; 2215 struct rq *rq; 2216 2217 BUG_ON(!dl_task(p)); 2218 2219 rq = task_rq(p); 2220 src_rd = rq->rd; 2221 /* 2222 * Migrating a SCHED_DEADLINE task between exclusive 2223 * cpusets (different root_domains) entails a bandwidth 2224 * update. We already made space for us in the destination 2225 * domain (see cpuset_can_attach()). 2226 */ 2227 if (!cpumask_intersects(src_rd->span, new_mask)) { 2228 struct dl_bw *src_dl_b; 2229 2230 src_dl_b = dl_bw_of(cpu_of(rq)); 2231 /* 2232 * We now free resources of the root_domain we are migrating 2233 * off. In the worst case, sched_setattr() may temporary fail 2234 * until we complete the update. 2235 */ 2236 raw_spin_lock(&src_dl_b->lock); 2237 __dl_sub(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p))); 2238 raw_spin_unlock(&src_dl_b->lock); 2239 } 2240 2241 set_cpus_allowed_common(p, new_mask); 2242 } 2243 2244 /* Assumes rq->lock is held */ 2245 static void rq_online_dl(struct rq *rq) 2246 { 2247 if (rq->dl.overloaded) 2248 dl_set_overload(rq); 2249 2250 cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu); 2251 if (rq->dl.dl_nr_running > 0) 2252 cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr); 2253 } 2254 2255 /* Assumes rq->lock is held */ 2256 static void rq_offline_dl(struct rq *rq) 2257 { 2258 if (rq->dl.overloaded) 2259 dl_clear_overload(rq); 2260 2261 cpudl_clear(&rq->rd->cpudl, rq->cpu); 2262 cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu); 2263 } 2264 2265 void __init init_sched_dl_class(void) 2266 { 2267 unsigned int i; 2268 2269 for_each_possible_cpu(i) 2270 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i), 2271 GFP_KERNEL, cpu_to_node(i)); 2272 } 2273 2274 #endif /* CONFIG_SMP */ 2275 2276 static void switched_from_dl(struct rq *rq, struct task_struct *p) 2277 { 2278 /* 2279 * task_non_contending() can start the "inactive timer" (if the 0-lag 2280 * time is in the future). If the task switches back to dl before 2281 * the "inactive timer" fires, it can continue to consume its current 2282 * runtime using its current deadline. If it stays outside of 2283 * SCHED_DEADLINE until the 0-lag time passes, inactive_task_timer() 2284 * will reset the task parameters. 2285 */ 2286 if (task_on_rq_queued(p) && p->dl.dl_runtime) 2287 task_non_contending(p); 2288 2289 if (!task_on_rq_queued(p)) 2290 sub_rq_bw(&p->dl, &rq->dl); 2291 2292 /* 2293 * We cannot use inactive_task_timer() to invoke sub_running_bw() 2294 * at the 0-lag time, because the task could have been migrated 2295 * while SCHED_OTHER in the meanwhile. 2296 */ 2297 if (p->dl.dl_non_contending) 2298 p->dl.dl_non_contending = 0; 2299 2300 /* 2301 * Since this might be the only -deadline task on the rq, 2302 * this is the right place to try to pull some other one 2303 * from an overloaded cpu, if any. 2304 */ 2305 if (!task_on_rq_queued(p) || rq->dl.dl_nr_running) 2306 return; 2307 2308 queue_pull_task(rq); 2309 } 2310 2311 /* 2312 * When switching to -deadline, we may overload the rq, then 2313 * we try to push someone off, if possible. 2314 */ 2315 static void switched_to_dl(struct rq *rq, struct task_struct *p) 2316 { 2317 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1) 2318 put_task_struct(p); 2319 2320 /* If p is not queued we will update its parameters at next wakeup. */ 2321 if (!task_on_rq_queued(p)) { 2322 add_rq_bw(&p->dl, &rq->dl); 2323 2324 return; 2325 } 2326 2327 if (rq->curr != p) { 2328 #ifdef CONFIG_SMP 2329 if (p->nr_cpus_allowed > 1 && rq->dl.overloaded) 2330 queue_push_tasks(rq); 2331 #endif 2332 if (dl_task(rq->curr)) 2333 check_preempt_curr_dl(rq, p, 0); 2334 else 2335 resched_curr(rq); 2336 } 2337 } 2338 2339 /* 2340 * If the scheduling parameters of a -deadline task changed, 2341 * a push or pull operation might be needed. 2342 */ 2343 static void prio_changed_dl(struct rq *rq, struct task_struct *p, 2344 int oldprio) 2345 { 2346 if (task_on_rq_queued(p) || rq->curr == p) { 2347 #ifdef CONFIG_SMP 2348 /* 2349 * This might be too much, but unfortunately 2350 * we don't have the old deadline value, and 2351 * we can't argue if the task is increasing 2352 * or lowering its prio, so... 2353 */ 2354 if (!rq->dl.overloaded) 2355 queue_pull_task(rq); 2356 2357 /* 2358 * If we now have a earlier deadline task than p, 2359 * then reschedule, provided p is still on this 2360 * runqueue. 2361 */ 2362 if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline)) 2363 resched_curr(rq); 2364 #else 2365 /* 2366 * Again, we don't know if p has a earlier 2367 * or later deadline, so let's blindly set a 2368 * (maybe not needed) rescheduling point. 2369 */ 2370 resched_curr(rq); 2371 #endif /* CONFIG_SMP */ 2372 } 2373 } 2374 2375 const struct sched_class dl_sched_class = { 2376 .next = &rt_sched_class, 2377 .enqueue_task = enqueue_task_dl, 2378 .dequeue_task = dequeue_task_dl, 2379 .yield_task = yield_task_dl, 2380 2381 .check_preempt_curr = check_preempt_curr_dl, 2382 2383 .pick_next_task = pick_next_task_dl, 2384 .put_prev_task = put_prev_task_dl, 2385 2386 #ifdef CONFIG_SMP 2387 .select_task_rq = select_task_rq_dl, 2388 .migrate_task_rq = migrate_task_rq_dl, 2389 .set_cpus_allowed = set_cpus_allowed_dl, 2390 .rq_online = rq_online_dl, 2391 .rq_offline = rq_offline_dl, 2392 .task_woken = task_woken_dl, 2393 #endif 2394 2395 .set_curr_task = set_curr_task_dl, 2396 .task_tick = task_tick_dl, 2397 .task_fork = task_fork_dl, 2398 2399 .prio_changed = prio_changed_dl, 2400 .switched_from = switched_from_dl, 2401 .switched_to = switched_to_dl, 2402 2403 .update_curr = update_curr_dl, 2404 }; 2405 2406 int sched_dl_global_validate(void) 2407 { 2408 u64 runtime = global_rt_runtime(); 2409 u64 period = global_rt_period(); 2410 u64 new_bw = to_ratio(period, runtime); 2411 struct dl_bw *dl_b; 2412 int cpu, ret = 0; 2413 unsigned long flags; 2414 2415 /* 2416 * Here we want to check the bandwidth not being set to some 2417 * value smaller than the currently allocated bandwidth in 2418 * any of the root_domains. 2419 * 2420 * FIXME: Cycling on all the CPUs is overdoing, but simpler than 2421 * cycling on root_domains... Discussion on different/better 2422 * solutions is welcome! 2423 */ 2424 for_each_possible_cpu(cpu) { 2425 rcu_read_lock_sched(); 2426 dl_b = dl_bw_of(cpu); 2427 2428 raw_spin_lock_irqsave(&dl_b->lock, flags); 2429 if (new_bw < dl_b->total_bw) 2430 ret = -EBUSY; 2431 raw_spin_unlock_irqrestore(&dl_b->lock, flags); 2432 2433 rcu_read_unlock_sched(); 2434 2435 if (ret) 2436 break; 2437 } 2438 2439 return ret; 2440 } 2441 2442 void init_dl_rq_bw_ratio(struct dl_rq *dl_rq) 2443 { 2444 if (global_rt_runtime() == RUNTIME_INF) { 2445 dl_rq->bw_ratio = 1 << RATIO_SHIFT; 2446 dl_rq->extra_bw = 1 << BW_SHIFT; 2447 } else { 2448 dl_rq->bw_ratio = to_ratio(global_rt_runtime(), 2449 global_rt_period()) >> (BW_SHIFT - RATIO_SHIFT); 2450 dl_rq->extra_bw = to_ratio(global_rt_period(), 2451 global_rt_runtime()); 2452 } 2453 } 2454 2455 void sched_dl_do_global(void) 2456 { 2457 u64 new_bw = -1; 2458 struct dl_bw *dl_b; 2459 int cpu; 2460 unsigned long flags; 2461 2462 def_dl_bandwidth.dl_period = global_rt_period(); 2463 def_dl_bandwidth.dl_runtime = global_rt_runtime(); 2464 2465 if (global_rt_runtime() != RUNTIME_INF) 2466 new_bw = to_ratio(global_rt_period(), global_rt_runtime()); 2467 2468 /* 2469 * FIXME: As above... 2470 */ 2471 for_each_possible_cpu(cpu) { 2472 rcu_read_lock_sched(); 2473 dl_b = dl_bw_of(cpu); 2474 2475 raw_spin_lock_irqsave(&dl_b->lock, flags); 2476 dl_b->bw = new_bw; 2477 raw_spin_unlock_irqrestore(&dl_b->lock, flags); 2478 2479 rcu_read_unlock_sched(); 2480 init_dl_rq_bw_ratio(&cpu_rq(cpu)->dl); 2481 } 2482 } 2483 2484 /* 2485 * We must be sure that accepting a new task (or allowing changing the 2486 * parameters of an existing one) is consistent with the bandwidth 2487 * constraints. If yes, this function also accordingly updates the currently 2488 * allocated bandwidth to reflect the new situation. 2489 * 2490 * This function is called while holding p's rq->lock. 2491 */ 2492 int sched_dl_overflow(struct task_struct *p, int policy, 2493 const struct sched_attr *attr) 2494 { 2495 struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); 2496 u64 period = attr->sched_period ?: attr->sched_deadline; 2497 u64 runtime = attr->sched_runtime; 2498 u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0; 2499 int cpus, err = -1; 2500 2501 if (attr->sched_flags & SCHED_FLAG_SUGOV) 2502 return 0; 2503 2504 /* !deadline task may carry old deadline bandwidth */ 2505 if (new_bw == p->dl.dl_bw && task_has_dl_policy(p)) 2506 return 0; 2507 2508 /* 2509 * Either if a task, enters, leave, or stays -deadline but changes 2510 * its parameters, we may need to update accordingly the total 2511 * allocated bandwidth of the container. 2512 */ 2513 raw_spin_lock(&dl_b->lock); 2514 cpus = dl_bw_cpus(task_cpu(p)); 2515 if (dl_policy(policy) && !task_has_dl_policy(p) && 2516 !__dl_overflow(dl_b, cpus, 0, new_bw)) { 2517 if (hrtimer_active(&p->dl.inactive_timer)) 2518 __dl_sub(dl_b, p->dl.dl_bw, cpus); 2519 __dl_add(dl_b, new_bw, cpus); 2520 err = 0; 2521 } else if (dl_policy(policy) && task_has_dl_policy(p) && 2522 !__dl_overflow(dl_b, cpus, p->dl.dl_bw, new_bw)) { 2523 /* 2524 * XXX this is slightly incorrect: when the task 2525 * utilization decreases, we should delay the total 2526 * utilization change until the task's 0-lag point. 2527 * But this would require to set the task's "inactive 2528 * timer" when the task is not inactive. 2529 */ 2530 __dl_sub(dl_b, p->dl.dl_bw, cpus); 2531 __dl_add(dl_b, new_bw, cpus); 2532 dl_change_utilization(p, new_bw); 2533 err = 0; 2534 } else if (!dl_policy(policy) && task_has_dl_policy(p)) { 2535 /* 2536 * Do not decrease the total deadline utilization here, 2537 * switched_from_dl() will take care to do it at the correct 2538 * (0-lag) time. 2539 */ 2540 err = 0; 2541 } 2542 raw_spin_unlock(&dl_b->lock); 2543 2544 return err; 2545 } 2546 2547 /* 2548 * This function initializes the sched_dl_entity of a newly becoming 2549 * SCHED_DEADLINE task. 2550 * 2551 * Only the static values are considered here, the actual runtime and the 2552 * absolute deadline will be properly calculated when the task is enqueued 2553 * for the first time with its new policy. 2554 */ 2555 void __setparam_dl(struct task_struct *p, const struct sched_attr *attr) 2556 { 2557 struct sched_dl_entity *dl_se = &p->dl; 2558 2559 dl_se->dl_runtime = attr->sched_runtime; 2560 dl_se->dl_deadline = attr->sched_deadline; 2561 dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline; 2562 dl_se->flags = attr->sched_flags; 2563 dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime); 2564 dl_se->dl_density = to_ratio(dl_se->dl_deadline, dl_se->dl_runtime); 2565 } 2566 2567 void __getparam_dl(struct task_struct *p, struct sched_attr *attr) 2568 { 2569 struct sched_dl_entity *dl_se = &p->dl; 2570 2571 attr->sched_priority = p->rt_priority; 2572 attr->sched_runtime = dl_se->dl_runtime; 2573 attr->sched_deadline = dl_se->dl_deadline; 2574 attr->sched_period = dl_se->dl_period; 2575 attr->sched_flags = dl_se->flags; 2576 } 2577 2578 /* 2579 * This function validates the new parameters of a -deadline task. 2580 * We ask for the deadline not being zero, and greater or equal 2581 * than the runtime, as well as the period of being zero or 2582 * greater than deadline. Furthermore, we have to be sure that 2583 * user parameters are above the internal resolution of 1us (we 2584 * check sched_runtime only since it is always the smaller one) and 2585 * below 2^63 ns (we have to check both sched_deadline and 2586 * sched_period, as the latter can be zero). 2587 */ 2588 bool __checkparam_dl(const struct sched_attr *attr) 2589 { 2590 /* special dl tasks don't actually use any parameter */ 2591 if (attr->sched_flags & SCHED_FLAG_SUGOV) 2592 return true; 2593 2594 /* deadline != 0 */ 2595 if (attr->sched_deadline == 0) 2596 return false; 2597 2598 /* 2599 * Since we truncate DL_SCALE bits, make sure we're at least 2600 * that big. 2601 */ 2602 if (attr->sched_runtime < (1ULL << DL_SCALE)) 2603 return false; 2604 2605 /* 2606 * Since we use the MSB for wrap-around and sign issues, make 2607 * sure it's not set (mind that period can be equal to zero). 2608 */ 2609 if (attr->sched_deadline & (1ULL << 63) || 2610 attr->sched_period & (1ULL << 63)) 2611 return false; 2612 2613 /* runtime <= deadline <= period (if period != 0) */ 2614 if ((attr->sched_period != 0 && 2615 attr->sched_period < attr->sched_deadline) || 2616 attr->sched_deadline < attr->sched_runtime) 2617 return false; 2618 2619 return true; 2620 } 2621 2622 /* 2623 * This function clears the sched_dl_entity static params. 2624 */ 2625 void __dl_clear_params(struct task_struct *p) 2626 { 2627 struct sched_dl_entity *dl_se = &p->dl; 2628 2629 dl_se->dl_runtime = 0; 2630 dl_se->dl_deadline = 0; 2631 dl_se->dl_period = 0; 2632 dl_se->flags = 0; 2633 dl_se->dl_bw = 0; 2634 dl_se->dl_density = 0; 2635 2636 dl_se->dl_throttled = 0; 2637 dl_se->dl_yielded = 0; 2638 dl_se->dl_non_contending = 0; 2639 dl_se->dl_overrun = 0; 2640 } 2641 2642 bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr) 2643 { 2644 struct sched_dl_entity *dl_se = &p->dl; 2645 2646 if (dl_se->dl_runtime != attr->sched_runtime || 2647 dl_se->dl_deadline != attr->sched_deadline || 2648 dl_se->dl_period != attr->sched_period || 2649 dl_se->flags != attr->sched_flags) 2650 return true; 2651 2652 return false; 2653 } 2654 2655 #ifdef CONFIG_SMP 2656 int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed) 2657 { 2658 unsigned int dest_cpu = cpumask_any_and(cpu_active_mask, 2659 cs_cpus_allowed); 2660 struct dl_bw *dl_b; 2661 bool overflow; 2662 int cpus, ret; 2663 unsigned long flags; 2664 2665 rcu_read_lock_sched(); 2666 dl_b = dl_bw_of(dest_cpu); 2667 raw_spin_lock_irqsave(&dl_b->lock, flags); 2668 cpus = dl_bw_cpus(dest_cpu); 2669 overflow = __dl_overflow(dl_b, cpus, 0, p->dl.dl_bw); 2670 if (overflow) 2671 ret = -EBUSY; 2672 else { 2673 /* 2674 * We reserve space for this task in the destination 2675 * root_domain, as we can't fail after this point. 2676 * We will free resources in the source root_domain 2677 * later on (see set_cpus_allowed_dl()). 2678 */ 2679 __dl_add(dl_b, p->dl.dl_bw, cpus); 2680 ret = 0; 2681 } 2682 raw_spin_unlock_irqrestore(&dl_b->lock, flags); 2683 rcu_read_unlock_sched(); 2684 return ret; 2685 } 2686 2687 int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, 2688 const struct cpumask *trial) 2689 { 2690 int ret = 1, trial_cpus; 2691 struct dl_bw *cur_dl_b; 2692 unsigned long flags; 2693 2694 rcu_read_lock_sched(); 2695 cur_dl_b = dl_bw_of(cpumask_any(cur)); 2696 trial_cpus = cpumask_weight(trial); 2697 2698 raw_spin_lock_irqsave(&cur_dl_b->lock, flags); 2699 if (cur_dl_b->bw != -1 && 2700 cur_dl_b->bw * trial_cpus < cur_dl_b->total_bw) 2701 ret = 0; 2702 raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags); 2703 rcu_read_unlock_sched(); 2704 return ret; 2705 } 2706 2707 bool dl_cpu_busy(unsigned int cpu) 2708 { 2709 unsigned long flags; 2710 struct dl_bw *dl_b; 2711 bool overflow; 2712 int cpus; 2713 2714 rcu_read_lock_sched(); 2715 dl_b = dl_bw_of(cpu); 2716 raw_spin_lock_irqsave(&dl_b->lock, flags); 2717 cpus = dl_bw_cpus(cpu); 2718 overflow = __dl_overflow(dl_b, cpus, 0, 0); 2719 raw_spin_unlock_irqrestore(&dl_b->lock, flags); 2720 rcu_read_unlock_sched(); 2721 return overflow; 2722 } 2723 #endif 2724 2725 #ifdef CONFIG_SCHED_DEBUG 2726 extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq); 2727 2728 void print_dl_stats(struct seq_file *m, int cpu) 2729 { 2730 print_dl_rq(m, cpu, &cpu_rq(cpu)->dl); 2731 } 2732 #endif /* CONFIG_SCHED_DEBUG */ 2733