1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Deadline Scheduling Class (SCHED_DEADLINE) 4 * 5 * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS). 6 * 7 * Tasks that periodically executes their instances for less than their 8 * runtime won't miss any of their deadlines. 9 * Tasks that are not periodic or sporadic or that tries to execute more 10 * than their reserved bandwidth will be slowed down (and may potentially 11 * miss some of their deadlines), and won't affect any other task. 12 * 13 * Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>, 14 * Juri Lelli <juri.lelli@gmail.com>, 15 * Michael Trimarchi <michael@amarulasolutions.com>, 16 * Fabio Checconi <fchecconi@gmail.com> 17 */ 18 #include "sched.h" 19 #include "pelt.h" 20 21 struct dl_bandwidth def_dl_bandwidth; 22 23 static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se) 24 { 25 return container_of(dl_se, struct task_struct, dl); 26 } 27 28 static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq) 29 { 30 return container_of(dl_rq, struct rq, dl); 31 } 32 33 static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se) 34 { 35 struct task_struct *p = dl_task_of(dl_se); 36 struct rq *rq = task_rq(p); 37 38 return &rq->dl; 39 } 40 41 static inline int on_dl_rq(struct sched_dl_entity *dl_se) 42 { 43 return !RB_EMPTY_NODE(&dl_se->rb_node); 44 } 45 46 #ifdef CONFIG_SMP 47 static inline struct dl_bw *dl_bw_of(int i) 48 { 49 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(), 50 "sched RCU must be held"); 51 return &cpu_rq(i)->rd->dl_bw; 52 } 53 54 static inline int dl_bw_cpus(int i) 55 { 56 struct root_domain *rd = cpu_rq(i)->rd; 57 int cpus = 0; 58 59 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(), 60 "sched RCU must be held"); 61 for_each_cpu_and(i, rd->span, cpu_active_mask) 62 cpus++; 63 64 return cpus; 65 } 66 #else 67 static inline struct dl_bw *dl_bw_of(int i) 68 { 69 return &cpu_rq(i)->dl.dl_bw; 70 } 71 72 static inline int dl_bw_cpus(int i) 73 { 74 return 1; 75 } 76 #endif 77 78 static inline 79 void __add_running_bw(u64 dl_bw, struct dl_rq *dl_rq) 80 { 81 u64 old = dl_rq->running_bw; 82 83 lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock); 84 dl_rq->running_bw += dl_bw; 85 SCHED_WARN_ON(dl_rq->running_bw < old); /* overflow */ 86 SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw); 87 /* kick cpufreq (see the comment in kernel/sched/sched.h). */ 88 cpufreq_update_util(rq_of_dl_rq(dl_rq), 0); 89 } 90 91 static inline 92 void __sub_running_bw(u64 dl_bw, struct dl_rq *dl_rq) 93 { 94 u64 old = dl_rq->running_bw; 95 96 lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock); 97 dl_rq->running_bw -= dl_bw; 98 SCHED_WARN_ON(dl_rq->running_bw > old); /* underflow */ 99 if (dl_rq->running_bw > old) 100 dl_rq->running_bw = 0; 101 /* kick cpufreq (see the comment in kernel/sched/sched.h). */ 102 cpufreq_update_util(rq_of_dl_rq(dl_rq), 0); 103 } 104 105 static inline 106 void __add_rq_bw(u64 dl_bw, struct dl_rq *dl_rq) 107 { 108 u64 old = dl_rq->this_bw; 109 110 lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock); 111 dl_rq->this_bw += dl_bw; 112 SCHED_WARN_ON(dl_rq->this_bw < old); /* overflow */ 113 } 114 115 static inline 116 void __sub_rq_bw(u64 dl_bw, struct dl_rq *dl_rq) 117 { 118 u64 old = dl_rq->this_bw; 119 120 lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock); 121 dl_rq->this_bw -= dl_bw; 122 SCHED_WARN_ON(dl_rq->this_bw > old); /* underflow */ 123 if (dl_rq->this_bw > old) 124 dl_rq->this_bw = 0; 125 SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw); 126 } 127 128 static inline 129 void add_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 130 { 131 if (!dl_entity_is_special(dl_se)) 132 __add_rq_bw(dl_se->dl_bw, dl_rq); 133 } 134 135 static inline 136 void sub_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 137 { 138 if (!dl_entity_is_special(dl_se)) 139 __sub_rq_bw(dl_se->dl_bw, dl_rq); 140 } 141 142 static inline 143 void add_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 144 { 145 if (!dl_entity_is_special(dl_se)) 146 __add_running_bw(dl_se->dl_bw, dl_rq); 147 } 148 149 static inline 150 void sub_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 151 { 152 if (!dl_entity_is_special(dl_se)) 153 __sub_running_bw(dl_se->dl_bw, dl_rq); 154 } 155 156 static void dl_change_utilization(struct task_struct *p, u64 new_bw) 157 { 158 struct rq *rq; 159 160 BUG_ON(p->dl.flags & SCHED_FLAG_SUGOV); 161 162 if (task_on_rq_queued(p)) 163 return; 164 165 rq = task_rq(p); 166 if (p->dl.dl_non_contending) { 167 sub_running_bw(&p->dl, &rq->dl); 168 p->dl.dl_non_contending = 0; 169 /* 170 * If the timer handler is currently running and the 171 * timer cannot be cancelled, inactive_task_timer() 172 * will see that dl_not_contending is not set, and 173 * will not touch the rq's active utilization, 174 * so we are still safe. 175 */ 176 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1) 177 put_task_struct(p); 178 } 179 __sub_rq_bw(p->dl.dl_bw, &rq->dl); 180 __add_rq_bw(new_bw, &rq->dl); 181 } 182 183 /* 184 * The utilization of a task cannot be immediately removed from 185 * the rq active utilization (running_bw) when the task blocks. 186 * Instead, we have to wait for the so called "0-lag time". 187 * 188 * If a task blocks before the "0-lag time", a timer (the inactive 189 * timer) is armed, and running_bw is decreased when the timer 190 * fires. 191 * 192 * If the task wakes up again before the inactive timer fires, 193 * the timer is cancelled, whereas if the task wakes up after the 194 * inactive timer fired (and running_bw has been decreased) the 195 * task's utilization has to be added to running_bw again. 196 * A flag in the deadline scheduling entity (dl_non_contending) 197 * is used to avoid race conditions between the inactive timer handler 198 * and task wakeups. 199 * 200 * The following diagram shows how running_bw is updated. A task is 201 * "ACTIVE" when its utilization contributes to running_bw; an 202 * "ACTIVE contending" task is in the TASK_RUNNING state, while an 203 * "ACTIVE non contending" task is a blocked task for which the "0-lag time" 204 * has not passed yet. An "INACTIVE" task is a task for which the "0-lag" 205 * time already passed, which does not contribute to running_bw anymore. 206 * +------------------+ 207 * wakeup | ACTIVE | 208 * +------------------>+ contending | 209 * | add_running_bw | | 210 * | +----+------+------+ 211 * | | ^ 212 * | dequeue | | 213 * +--------+-------+ | | 214 * | | t >= 0-lag | | wakeup 215 * | INACTIVE |<---------------+ | 216 * | | sub_running_bw | | 217 * +--------+-------+ | | 218 * ^ | | 219 * | t < 0-lag | | 220 * | | | 221 * | V | 222 * | +----+------+------+ 223 * | sub_running_bw | ACTIVE | 224 * +-------------------+ | 225 * inactive timer | non contending | 226 * fired +------------------+ 227 * 228 * The task_non_contending() function is invoked when a task 229 * blocks, and checks if the 0-lag time already passed or 230 * not (in the first case, it directly updates running_bw; 231 * in the second case, it arms the inactive timer). 232 * 233 * The task_contending() function is invoked when a task wakes 234 * up, and checks if the task is still in the "ACTIVE non contending" 235 * state or not (in the second case, it updates running_bw). 236 */ 237 static void task_non_contending(struct task_struct *p) 238 { 239 struct sched_dl_entity *dl_se = &p->dl; 240 struct hrtimer *timer = &dl_se->inactive_timer; 241 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 242 struct rq *rq = rq_of_dl_rq(dl_rq); 243 s64 zerolag_time; 244 245 /* 246 * If this is a non-deadline task that has been boosted, 247 * do nothing 248 */ 249 if (dl_se->dl_runtime == 0) 250 return; 251 252 if (dl_entity_is_special(dl_se)) 253 return; 254 255 WARN_ON(dl_se->dl_non_contending); 256 257 zerolag_time = dl_se->deadline - 258 div64_long((dl_se->runtime * dl_se->dl_period), 259 dl_se->dl_runtime); 260 261 /* 262 * Using relative times instead of the absolute "0-lag time" 263 * allows to simplify the code 264 */ 265 zerolag_time -= rq_clock(rq); 266 267 /* 268 * If the "0-lag time" already passed, decrease the active 269 * utilization now, instead of starting a timer 270 */ 271 if ((zerolag_time < 0) || hrtimer_active(&dl_se->inactive_timer)) { 272 if (dl_task(p)) 273 sub_running_bw(dl_se, dl_rq); 274 if (!dl_task(p) || p->state == TASK_DEAD) { 275 struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); 276 277 if (p->state == TASK_DEAD) 278 sub_rq_bw(&p->dl, &rq->dl); 279 raw_spin_lock(&dl_b->lock); 280 __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p))); 281 __dl_clear_params(p); 282 raw_spin_unlock(&dl_b->lock); 283 } 284 285 return; 286 } 287 288 dl_se->dl_non_contending = 1; 289 get_task_struct(p); 290 hrtimer_start(timer, ns_to_ktime(zerolag_time), HRTIMER_MODE_REL_HARD); 291 } 292 293 static void task_contending(struct sched_dl_entity *dl_se, int flags) 294 { 295 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 296 297 /* 298 * If this is a non-deadline task that has been boosted, 299 * do nothing 300 */ 301 if (dl_se->dl_runtime == 0) 302 return; 303 304 if (flags & ENQUEUE_MIGRATED) 305 add_rq_bw(dl_se, dl_rq); 306 307 if (dl_se->dl_non_contending) { 308 dl_se->dl_non_contending = 0; 309 /* 310 * If the timer handler is currently running and the 311 * timer cannot be cancelled, inactive_task_timer() 312 * will see that dl_not_contending is not set, and 313 * will not touch the rq's active utilization, 314 * so we are still safe. 315 */ 316 if (hrtimer_try_to_cancel(&dl_se->inactive_timer) == 1) 317 put_task_struct(dl_task_of(dl_se)); 318 } else { 319 /* 320 * Since "dl_non_contending" is not set, the 321 * task's utilization has already been removed from 322 * active utilization (either when the task blocked, 323 * when the "inactive timer" fired). 324 * So, add it back. 325 */ 326 add_running_bw(dl_se, dl_rq); 327 } 328 } 329 330 static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq) 331 { 332 struct sched_dl_entity *dl_se = &p->dl; 333 334 return dl_rq->root.rb_leftmost == &dl_se->rb_node; 335 } 336 337 static void init_dl_rq_bw_ratio(struct dl_rq *dl_rq); 338 339 void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime) 340 { 341 raw_spin_lock_init(&dl_b->dl_runtime_lock); 342 dl_b->dl_period = period; 343 dl_b->dl_runtime = runtime; 344 } 345 346 void init_dl_bw(struct dl_bw *dl_b) 347 { 348 raw_spin_lock_init(&dl_b->lock); 349 raw_spin_lock(&def_dl_bandwidth.dl_runtime_lock); 350 if (global_rt_runtime() == RUNTIME_INF) 351 dl_b->bw = -1; 352 else 353 dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime()); 354 raw_spin_unlock(&def_dl_bandwidth.dl_runtime_lock); 355 dl_b->total_bw = 0; 356 } 357 358 void init_dl_rq(struct dl_rq *dl_rq) 359 { 360 dl_rq->root = RB_ROOT_CACHED; 361 362 #ifdef CONFIG_SMP 363 /* zero means no -deadline tasks */ 364 dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0; 365 366 dl_rq->dl_nr_migratory = 0; 367 dl_rq->overloaded = 0; 368 dl_rq->pushable_dl_tasks_root = RB_ROOT_CACHED; 369 #else 370 init_dl_bw(&dl_rq->dl_bw); 371 #endif 372 373 dl_rq->running_bw = 0; 374 dl_rq->this_bw = 0; 375 init_dl_rq_bw_ratio(dl_rq); 376 } 377 378 #ifdef CONFIG_SMP 379 380 static inline int dl_overloaded(struct rq *rq) 381 { 382 return atomic_read(&rq->rd->dlo_count); 383 } 384 385 static inline void dl_set_overload(struct rq *rq) 386 { 387 if (!rq->online) 388 return; 389 390 cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask); 391 /* 392 * Must be visible before the overload count is 393 * set (as in sched_rt.c). 394 * 395 * Matched by the barrier in pull_dl_task(). 396 */ 397 smp_wmb(); 398 atomic_inc(&rq->rd->dlo_count); 399 } 400 401 static inline void dl_clear_overload(struct rq *rq) 402 { 403 if (!rq->online) 404 return; 405 406 atomic_dec(&rq->rd->dlo_count); 407 cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask); 408 } 409 410 static void update_dl_migration(struct dl_rq *dl_rq) 411 { 412 if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) { 413 if (!dl_rq->overloaded) { 414 dl_set_overload(rq_of_dl_rq(dl_rq)); 415 dl_rq->overloaded = 1; 416 } 417 } else if (dl_rq->overloaded) { 418 dl_clear_overload(rq_of_dl_rq(dl_rq)); 419 dl_rq->overloaded = 0; 420 } 421 } 422 423 static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 424 { 425 struct task_struct *p = dl_task_of(dl_se); 426 427 if (p->nr_cpus_allowed > 1) 428 dl_rq->dl_nr_migratory++; 429 430 update_dl_migration(dl_rq); 431 } 432 433 static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 434 { 435 struct task_struct *p = dl_task_of(dl_se); 436 437 if (p->nr_cpus_allowed > 1) 438 dl_rq->dl_nr_migratory--; 439 440 update_dl_migration(dl_rq); 441 } 442 443 /* 444 * The list of pushable -deadline task is not a plist, like in 445 * sched_rt.c, it is an rb-tree with tasks ordered by deadline. 446 */ 447 static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p) 448 { 449 struct dl_rq *dl_rq = &rq->dl; 450 struct rb_node **link = &dl_rq->pushable_dl_tasks_root.rb_root.rb_node; 451 struct rb_node *parent = NULL; 452 struct task_struct *entry; 453 bool leftmost = true; 454 455 BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks)); 456 457 while (*link) { 458 parent = *link; 459 entry = rb_entry(parent, struct task_struct, 460 pushable_dl_tasks); 461 if (dl_entity_preempt(&p->dl, &entry->dl)) 462 link = &parent->rb_left; 463 else { 464 link = &parent->rb_right; 465 leftmost = false; 466 } 467 } 468 469 if (leftmost) 470 dl_rq->earliest_dl.next = p->dl.deadline; 471 472 rb_link_node(&p->pushable_dl_tasks, parent, link); 473 rb_insert_color_cached(&p->pushable_dl_tasks, 474 &dl_rq->pushable_dl_tasks_root, leftmost); 475 } 476 477 static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p) 478 { 479 struct dl_rq *dl_rq = &rq->dl; 480 481 if (RB_EMPTY_NODE(&p->pushable_dl_tasks)) 482 return; 483 484 if (dl_rq->pushable_dl_tasks_root.rb_leftmost == &p->pushable_dl_tasks) { 485 struct rb_node *next_node; 486 487 next_node = rb_next(&p->pushable_dl_tasks); 488 if (next_node) { 489 dl_rq->earliest_dl.next = rb_entry(next_node, 490 struct task_struct, pushable_dl_tasks)->dl.deadline; 491 } 492 } 493 494 rb_erase_cached(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root); 495 RB_CLEAR_NODE(&p->pushable_dl_tasks); 496 } 497 498 static inline int has_pushable_dl_tasks(struct rq *rq) 499 { 500 return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root.rb_root); 501 } 502 503 static int push_dl_task(struct rq *rq); 504 505 static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev) 506 { 507 return dl_task(prev); 508 } 509 510 static DEFINE_PER_CPU(struct callback_head, dl_push_head); 511 static DEFINE_PER_CPU(struct callback_head, dl_pull_head); 512 513 static void push_dl_tasks(struct rq *); 514 static void pull_dl_task(struct rq *); 515 516 static inline void deadline_queue_push_tasks(struct rq *rq) 517 { 518 if (!has_pushable_dl_tasks(rq)) 519 return; 520 521 queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks); 522 } 523 524 static inline void deadline_queue_pull_task(struct rq *rq) 525 { 526 queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task); 527 } 528 529 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq); 530 531 static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p) 532 { 533 struct rq *later_rq = NULL; 534 struct dl_bw *dl_b; 535 536 later_rq = find_lock_later_rq(p, rq); 537 if (!later_rq) { 538 int cpu; 539 540 /* 541 * If we cannot preempt any rq, fall back to pick any 542 * online CPU: 543 */ 544 cpu = cpumask_any_and(cpu_active_mask, p->cpus_ptr); 545 if (cpu >= nr_cpu_ids) { 546 /* 547 * Failed to find any suitable CPU. 548 * The task will never come back! 549 */ 550 BUG_ON(dl_bandwidth_enabled()); 551 552 /* 553 * If admission control is disabled we 554 * try a little harder to let the task 555 * run. 556 */ 557 cpu = cpumask_any(cpu_active_mask); 558 } 559 later_rq = cpu_rq(cpu); 560 double_lock_balance(rq, later_rq); 561 } 562 563 if (p->dl.dl_non_contending || p->dl.dl_throttled) { 564 /* 565 * Inactive timer is armed (or callback is running, but 566 * waiting for us to release rq locks). In any case, when it 567 * will fire (or continue), it will see running_bw of this 568 * task migrated to later_rq (and correctly handle it). 569 */ 570 sub_running_bw(&p->dl, &rq->dl); 571 sub_rq_bw(&p->dl, &rq->dl); 572 573 add_rq_bw(&p->dl, &later_rq->dl); 574 add_running_bw(&p->dl, &later_rq->dl); 575 } else { 576 sub_rq_bw(&p->dl, &rq->dl); 577 add_rq_bw(&p->dl, &later_rq->dl); 578 } 579 580 /* 581 * And we finally need to fixup root_domain(s) bandwidth accounting, 582 * since p is still hanging out in the old (now moved to default) root 583 * domain. 584 */ 585 dl_b = &rq->rd->dl_bw; 586 raw_spin_lock(&dl_b->lock); 587 __dl_sub(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span)); 588 raw_spin_unlock(&dl_b->lock); 589 590 dl_b = &later_rq->rd->dl_bw; 591 raw_spin_lock(&dl_b->lock); 592 __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(later_rq->rd->span)); 593 raw_spin_unlock(&dl_b->lock); 594 595 set_task_cpu(p, later_rq->cpu); 596 double_unlock_balance(later_rq, rq); 597 598 return later_rq; 599 } 600 601 #else 602 603 static inline 604 void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p) 605 { 606 } 607 608 static inline 609 void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p) 610 { 611 } 612 613 static inline 614 void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 615 { 616 } 617 618 static inline 619 void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 620 { 621 } 622 623 static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev) 624 { 625 return false; 626 } 627 628 static inline void pull_dl_task(struct rq *rq) 629 { 630 } 631 632 static inline void deadline_queue_push_tasks(struct rq *rq) 633 { 634 } 635 636 static inline void deadline_queue_pull_task(struct rq *rq) 637 { 638 } 639 #endif /* CONFIG_SMP */ 640 641 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags); 642 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags); 643 static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p, int flags); 644 645 /* 646 * We are being explicitly informed that a new instance is starting, 647 * and this means that: 648 * - the absolute deadline of the entity has to be placed at 649 * current time + relative deadline; 650 * - the runtime of the entity has to be set to the maximum value. 651 * 652 * The capability of specifying such event is useful whenever a -deadline 653 * entity wants to (try to!) synchronize its behaviour with the scheduler's 654 * one, and to (try to!) reconcile itself with its own scheduling 655 * parameters. 656 */ 657 static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se) 658 { 659 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 660 struct rq *rq = rq_of_dl_rq(dl_rq); 661 662 WARN_ON(dl_se->dl_boosted); 663 WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline)); 664 665 /* 666 * We are racing with the deadline timer. So, do nothing because 667 * the deadline timer handler will take care of properly recharging 668 * the runtime and postponing the deadline 669 */ 670 if (dl_se->dl_throttled) 671 return; 672 673 /* 674 * We use the regular wall clock time to set deadlines in the 675 * future; in fact, we must consider execution overheads (time 676 * spent on hardirq context, etc.). 677 */ 678 dl_se->deadline = rq_clock(rq) + dl_se->dl_deadline; 679 dl_se->runtime = dl_se->dl_runtime; 680 } 681 682 /* 683 * Pure Earliest Deadline First (EDF) scheduling does not deal with the 684 * possibility of a entity lasting more than what it declared, and thus 685 * exhausting its runtime. 686 * 687 * Here we are interested in making runtime overrun possible, but we do 688 * not want a entity which is misbehaving to affect the scheduling of all 689 * other entities. 690 * Therefore, a budgeting strategy called Constant Bandwidth Server (CBS) 691 * is used, in order to confine each entity within its own bandwidth. 692 * 693 * This function deals exactly with that, and ensures that when the runtime 694 * of a entity is replenished, its deadline is also postponed. That ensures 695 * the overrunning entity can't interfere with other entity in the system and 696 * can't make them miss their deadlines. Reasons why this kind of overruns 697 * could happen are, typically, a entity voluntarily trying to overcome its 698 * runtime, or it just underestimated it during sched_setattr(). 699 */ 700 static void replenish_dl_entity(struct sched_dl_entity *dl_se, 701 struct sched_dl_entity *pi_se) 702 { 703 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 704 struct rq *rq = rq_of_dl_rq(dl_rq); 705 706 BUG_ON(pi_se->dl_runtime <= 0); 707 708 /* 709 * This could be the case for a !-dl task that is boosted. 710 * Just go with full inherited parameters. 711 */ 712 if (dl_se->dl_deadline == 0) { 713 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline; 714 dl_se->runtime = pi_se->dl_runtime; 715 } 716 717 if (dl_se->dl_yielded && dl_se->runtime > 0) 718 dl_se->runtime = 0; 719 720 /* 721 * We keep moving the deadline away until we get some 722 * available runtime for the entity. This ensures correct 723 * handling of situations where the runtime overrun is 724 * arbitrary large. 725 */ 726 while (dl_se->runtime <= 0) { 727 dl_se->deadline += pi_se->dl_period; 728 dl_se->runtime += pi_se->dl_runtime; 729 } 730 731 /* 732 * At this point, the deadline really should be "in 733 * the future" with respect to rq->clock. If it's 734 * not, we are, for some reason, lagging too much! 735 * Anyway, after having warn userspace abut that, 736 * we still try to keep the things running by 737 * resetting the deadline and the budget of the 738 * entity. 739 */ 740 if (dl_time_before(dl_se->deadline, rq_clock(rq))) { 741 printk_deferred_once("sched: DL replenish lagged too much\n"); 742 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline; 743 dl_se->runtime = pi_se->dl_runtime; 744 } 745 746 if (dl_se->dl_yielded) 747 dl_se->dl_yielded = 0; 748 if (dl_se->dl_throttled) 749 dl_se->dl_throttled = 0; 750 } 751 752 /* 753 * Here we check if --at time t-- an entity (which is probably being 754 * [re]activated or, in general, enqueued) can use its remaining runtime 755 * and its current deadline _without_ exceeding the bandwidth it is 756 * assigned (function returns true if it can't). We are in fact applying 757 * one of the CBS rules: when a task wakes up, if the residual runtime 758 * over residual deadline fits within the allocated bandwidth, then we 759 * can keep the current (absolute) deadline and residual budget without 760 * disrupting the schedulability of the system. Otherwise, we should 761 * refill the runtime and set the deadline a period in the future, 762 * because keeping the current (absolute) deadline of the task would 763 * result in breaking guarantees promised to other tasks (refer to 764 * Documentation/scheduler/sched-deadline.rst for more information). 765 * 766 * This function returns true if: 767 * 768 * runtime / (deadline - t) > dl_runtime / dl_deadline , 769 * 770 * IOW we can't recycle current parameters. 771 * 772 * Notice that the bandwidth check is done against the deadline. For 773 * task with deadline equal to period this is the same of using 774 * dl_period instead of dl_deadline in the equation above. 775 */ 776 static bool dl_entity_overflow(struct sched_dl_entity *dl_se, 777 struct sched_dl_entity *pi_se, u64 t) 778 { 779 u64 left, right; 780 781 /* 782 * left and right are the two sides of the equation above, 783 * after a bit of shuffling to use multiplications instead 784 * of divisions. 785 * 786 * Note that none of the time values involved in the two 787 * multiplications are absolute: dl_deadline and dl_runtime 788 * are the relative deadline and the maximum runtime of each 789 * instance, runtime is the runtime left for the last instance 790 * and (deadline - t), since t is rq->clock, is the time left 791 * to the (absolute) deadline. Even if overflowing the u64 type 792 * is very unlikely to occur in both cases, here we scale down 793 * as we want to avoid that risk at all. Scaling down by 10 794 * means that we reduce granularity to 1us. We are fine with it, 795 * since this is only a true/false check and, anyway, thinking 796 * of anything below microseconds resolution is actually fiction 797 * (but still we want to give the user that illusion >;). 798 */ 799 left = (pi_se->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE); 800 right = ((dl_se->deadline - t) >> DL_SCALE) * 801 (pi_se->dl_runtime >> DL_SCALE); 802 803 return dl_time_before(right, left); 804 } 805 806 /* 807 * Revised wakeup rule [1]: For self-suspending tasks, rather then 808 * re-initializing task's runtime and deadline, the revised wakeup 809 * rule adjusts the task's runtime to avoid the task to overrun its 810 * density. 811 * 812 * Reasoning: a task may overrun the density if: 813 * runtime / (deadline - t) > dl_runtime / dl_deadline 814 * 815 * Therefore, runtime can be adjusted to: 816 * runtime = (dl_runtime / dl_deadline) * (deadline - t) 817 * 818 * In such way that runtime will be equal to the maximum density 819 * the task can use without breaking any rule. 820 * 821 * [1] Luca Abeni, Giuseppe Lipari, and Juri Lelli. 2015. Constant 822 * bandwidth server revisited. SIGBED Rev. 11, 4 (January 2015), 19-24. 823 */ 824 static void 825 update_dl_revised_wakeup(struct sched_dl_entity *dl_se, struct rq *rq) 826 { 827 u64 laxity = dl_se->deadline - rq_clock(rq); 828 829 /* 830 * If the task has deadline < period, and the deadline is in the past, 831 * it should already be throttled before this check. 832 * 833 * See update_dl_entity() comments for further details. 834 */ 835 WARN_ON(dl_time_before(dl_se->deadline, rq_clock(rq))); 836 837 dl_se->runtime = (dl_se->dl_density * laxity) >> BW_SHIFT; 838 } 839 840 /* 841 * Regarding the deadline, a task with implicit deadline has a relative 842 * deadline == relative period. A task with constrained deadline has a 843 * relative deadline <= relative period. 844 * 845 * We support constrained deadline tasks. However, there are some restrictions 846 * applied only for tasks which do not have an implicit deadline. See 847 * update_dl_entity() to know more about such restrictions. 848 * 849 * The dl_is_implicit() returns true if the task has an implicit deadline. 850 */ 851 static inline bool dl_is_implicit(struct sched_dl_entity *dl_se) 852 { 853 return dl_se->dl_deadline == dl_se->dl_period; 854 } 855 856 /* 857 * When a deadline entity is placed in the runqueue, its runtime and deadline 858 * might need to be updated. This is done by a CBS wake up rule. There are two 859 * different rules: 1) the original CBS; and 2) the Revisited CBS. 860 * 861 * When the task is starting a new period, the Original CBS is used. In this 862 * case, the runtime is replenished and a new absolute deadline is set. 863 * 864 * When a task is queued before the begin of the next period, using the 865 * remaining runtime and deadline could make the entity to overflow, see 866 * dl_entity_overflow() to find more about runtime overflow. When such case 867 * is detected, the runtime and deadline need to be updated. 868 * 869 * If the task has an implicit deadline, i.e., deadline == period, the Original 870 * CBS is applied. the runtime is replenished and a new absolute deadline is 871 * set, as in the previous cases. 872 * 873 * However, the Original CBS does not work properly for tasks with 874 * deadline < period, which are said to have a constrained deadline. By 875 * applying the Original CBS, a constrained deadline task would be able to run 876 * runtime/deadline in a period. With deadline < period, the task would 877 * overrun the runtime/period allowed bandwidth, breaking the admission test. 878 * 879 * In order to prevent this misbehave, the Revisited CBS is used for 880 * constrained deadline tasks when a runtime overflow is detected. In the 881 * Revisited CBS, rather than replenishing & setting a new absolute deadline, 882 * the remaining runtime of the task is reduced to avoid runtime overflow. 883 * Please refer to the comments update_dl_revised_wakeup() function to find 884 * more about the Revised CBS rule. 885 */ 886 static void update_dl_entity(struct sched_dl_entity *dl_se, 887 struct sched_dl_entity *pi_se) 888 { 889 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 890 struct rq *rq = rq_of_dl_rq(dl_rq); 891 892 if (dl_time_before(dl_se->deadline, rq_clock(rq)) || 893 dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) { 894 895 if (unlikely(!dl_is_implicit(dl_se) && 896 !dl_time_before(dl_se->deadline, rq_clock(rq)) && 897 !dl_se->dl_boosted)){ 898 update_dl_revised_wakeup(dl_se, rq); 899 return; 900 } 901 902 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline; 903 dl_se->runtime = pi_se->dl_runtime; 904 } 905 } 906 907 static inline u64 dl_next_period(struct sched_dl_entity *dl_se) 908 { 909 return dl_se->deadline - dl_se->dl_deadline + dl_se->dl_period; 910 } 911 912 /* 913 * If the entity depleted all its runtime, and if we want it to sleep 914 * while waiting for some new execution time to become available, we 915 * set the bandwidth replenishment timer to the replenishment instant 916 * and try to activate it. 917 * 918 * Notice that it is important for the caller to know if the timer 919 * actually started or not (i.e., the replenishment instant is in 920 * the future or in the past). 921 */ 922 static int start_dl_timer(struct task_struct *p) 923 { 924 struct sched_dl_entity *dl_se = &p->dl; 925 struct hrtimer *timer = &dl_se->dl_timer; 926 struct rq *rq = task_rq(p); 927 ktime_t now, act; 928 s64 delta; 929 930 lockdep_assert_held(&rq->lock); 931 932 /* 933 * We want the timer to fire at the deadline, but considering 934 * that it is actually coming from rq->clock and not from 935 * hrtimer's time base reading. 936 */ 937 act = ns_to_ktime(dl_next_period(dl_se)); 938 now = hrtimer_cb_get_time(timer); 939 delta = ktime_to_ns(now) - rq_clock(rq); 940 act = ktime_add_ns(act, delta); 941 942 /* 943 * If the expiry time already passed, e.g., because the value 944 * chosen as the deadline is too small, don't even try to 945 * start the timer in the past! 946 */ 947 if (ktime_us_delta(act, now) < 0) 948 return 0; 949 950 /* 951 * !enqueued will guarantee another callback; even if one is already in 952 * progress. This ensures a balanced {get,put}_task_struct(). 953 * 954 * The race against __run_timer() clearing the enqueued state is 955 * harmless because we're holding task_rq()->lock, therefore the timer 956 * expiring after we've done the check will wait on its task_rq_lock() 957 * and observe our state. 958 */ 959 if (!hrtimer_is_queued(timer)) { 960 get_task_struct(p); 961 hrtimer_start(timer, act, HRTIMER_MODE_ABS_HARD); 962 } 963 964 return 1; 965 } 966 967 /* 968 * This is the bandwidth enforcement timer callback. If here, we know 969 * a task is not on its dl_rq, since the fact that the timer was running 970 * means the task is throttled and needs a runtime replenishment. 971 * 972 * However, what we actually do depends on the fact the task is active, 973 * (it is on its rq) or has been removed from there by a call to 974 * dequeue_task_dl(). In the former case we must issue the runtime 975 * replenishment and add the task back to the dl_rq; in the latter, we just 976 * do nothing but clearing dl_throttled, so that runtime and deadline 977 * updating (and the queueing back to dl_rq) will be done by the 978 * next call to enqueue_task_dl(). 979 */ 980 static enum hrtimer_restart dl_task_timer(struct hrtimer *timer) 981 { 982 struct sched_dl_entity *dl_se = container_of(timer, 983 struct sched_dl_entity, 984 dl_timer); 985 struct task_struct *p = dl_task_of(dl_se); 986 struct rq_flags rf; 987 struct rq *rq; 988 989 rq = task_rq_lock(p, &rf); 990 991 /* 992 * The task might have changed its scheduling policy to something 993 * different than SCHED_DEADLINE (through switched_from_dl()). 994 */ 995 if (!dl_task(p)) 996 goto unlock; 997 998 /* 999 * The task might have been boosted by someone else and might be in the 1000 * boosting/deboosting path, its not throttled. 1001 */ 1002 if (dl_se->dl_boosted) 1003 goto unlock; 1004 1005 /* 1006 * Spurious timer due to start_dl_timer() race; or we already received 1007 * a replenishment from rt_mutex_setprio(). 1008 */ 1009 if (!dl_se->dl_throttled) 1010 goto unlock; 1011 1012 sched_clock_tick(); 1013 update_rq_clock(rq); 1014 1015 /* 1016 * If the throttle happened during sched-out; like: 1017 * 1018 * schedule() 1019 * deactivate_task() 1020 * dequeue_task_dl() 1021 * update_curr_dl() 1022 * start_dl_timer() 1023 * __dequeue_task_dl() 1024 * prev->on_rq = 0; 1025 * 1026 * We can be both throttled and !queued. Replenish the counter 1027 * but do not enqueue -- wait for our wakeup to do that. 1028 */ 1029 if (!task_on_rq_queued(p)) { 1030 replenish_dl_entity(dl_se, dl_se); 1031 goto unlock; 1032 } 1033 1034 #ifdef CONFIG_SMP 1035 if (unlikely(!rq->online)) { 1036 /* 1037 * If the runqueue is no longer available, migrate the 1038 * task elsewhere. This necessarily changes rq. 1039 */ 1040 lockdep_unpin_lock(&rq->lock, rf.cookie); 1041 rq = dl_task_offline_migration(rq, p); 1042 rf.cookie = lockdep_pin_lock(&rq->lock); 1043 update_rq_clock(rq); 1044 1045 /* 1046 * Now that the task has been migrated to the new RQ and we 1047 * have that locked, proceed as normal and enqueue the task 1048 * there. 1049 */ 1050 } 1051 #endif 1052 1053 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH); 1054 if (dl_task(rq->curr)) 1055 check_preempt_curr_dl(rq, p, 0); 1056 else 1057 resched_curr(rq); 1058 1059 #ifdef CONFIG_SMP 1060 /* 1061 * Queueing this task back might have overloaded rq, check if we need 1062 * to kick someone away. 1063 */ 1064 if (has_pushable_dl_tasks(rq)) { 1065 /* 1066 * Nothing relies on rq->lock after this, so its safe to drop 1067 * rq->lock. 1068 */ 1069 rq_unpin_lock(rq, &rf); 1070 push_dl_task(rq); 1071 rq_repin_lock(rq, &rf); 1072 } 1073 #endif 1074 1075 unlock: 1076 task_rq_unlock(rq, p, &rf); 1077 1078 /* 1079 * This can free the task_struct, including this hrtimer, do not touch 1080 * anything related to that after this. 1081 */ 1082 put_task_struct(p); 1083 1084 return HRTIMER_NORESTART; 1085 } 1086 1087 void init_dl_task_timer(struct sched_dl_entity *dl_se) 1088 { 1089 struct hrtimer *timer = &dl_se->dl_timer; 1090 1091 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); 1092 timer->function = dl_task_timer; 1093 } 1094 1095 /* 1096 * During the activation, CBS checks if it can reuse the current task's 1097 * runtime and period. If the deadline of the task is in the past, CBS 1098 * cannot use the runtime, and so it replenishes the task. This rule 1099 * works fine for implicit deadline tasks (deadline == period), and the 1100 * CBS was designed for implicit deadline tasks. However, a task with 1101 * constrained deadline (deadine < period) might be awakened after the 1102 * deadline, but before the next period. In this case, replenishing the 1103 * task would allow it to run for runtime / deadline. As in this case 1104 * deadline < period, CBS enables a task to run for more than the 1105 * runtime / period. In a very loaded system, this can cause a domino 1106 * effect, making other tasks miss their deadlines. 1107 * 1108 * To avoid this problem, in the activation of a constrained deadline 1109 * task after the deadline but before the next period, throttle the 1110 * task and set the replenishing timer to the begin of the next period, 1111 * unless it is boosted. 1112 */ 1113 static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se) 1114 { 1115 struct task_struct *p = dl_task_of(dl_se); 1116 struct rq *rq = rq_of_dl_rq(dl_rq_of_se(dl_se)); 1117 1118 if (dl_time_before(dl_se->deadline, rq_clock(rq)) && 1119 dl_time_before(rq_clock(rq), dl_next_period(dl_se))) { 1120 if (unlikely(dl_se->dl_boosted || !start_dl_timer(p))) 1121 return; 1122 dl_se->dl_throttled = 1; 1123 if (dl_se->runtime > 0) 1124 dl_se->runtime = 0; 1125 } 1126 } 1127 1128 static 1129 int dl_runtime_exceeded(struct sched_dl_entity *dl_se) 1130 { 1131 return (dl_se->runtime <= 0); 1132 } 1133 1134 extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq); 1135 1136 /* 1137 * This function implements the GRUB accounting rule: 1138 * according to the GRUB reclaiming algorithm, the runtime is 1139 * not decreased as "dq = -dt", but as 1140 * "dq = -max{u / Umax, (1 - Uinact - Uextra)} dt", 1141 * where u is the utilization of the task, Umax is the maximum reclaimable 1142 * utilization, Uinact is the (per-runqueue) inactive utilization, computed 1143 * as the difference between the "total runqueue utilization" and the 1144 * runqueue active utilization, and Uextra is the (per runqueue) extra 1145 * reclaimable utilization. 1146 * Since rq->dl.running_bw and rq->dl.this_bw contain utilizations 1147 * multiplied by 2^BW_SHIFT, the result has to be shifted right by 1148 * BW_SHIFT. 1149 * Since rq->dl.bw_ratio contains 1 / Umax multipled by 2^RATIO_SHIFT, 1150 * dl_bw is multiped by rq->dl.bw_ratio and shifted right by RATIO_SHIFT. 1151 * Since delta is a 64 bit variable, to have an overflow its value 1152 * should be larger than 2^(64 - 20 - 8), which is more than 64 seconds. 1153 * So, overflow is not an issue here. 1154 */ 1155 static u64 grub_reclaim(u64 delta, struct rq *rq, struct sched_dl_entity *dl_se) 1156 { 1157 u64 u_inact = rq->dl.this_bw - rq->dl.running_bw; /* Utot - Uact */ 1158 u64 u_act; 1159 u64 u_act_min = (dl_se->dl_bw * rq->dl.bw_ratio) >> RATIO_SHIFT; 1160 1161 /* 1162 * Instead of computing max{u * bw_ratio, (1 - u_inact - u_extra)}, 1163 * we compare u_inact + rq->dl.extra_bw with 1164 * 1 - (u * rq->dl.bw_ratio >> RATIO_SHIFT), because 1165 * u_inact + rq->dl.extra_bw can be larger than 1166 * 1 * (so, 1 - u_inact - rq->dl.extra_bw would be negative 1167 * leading to wrong results) 1168 */ 1169 if (u_inact + rq->dl.extra_bw > BW_UNIT - u_act_min) 1170 u_act = u_act_min; 1171 else 1172 u_act = BW_UNIT - u_inact - rq->dl.extra_bw; 1173 1174 return (delta * u_act) >> BW_SHIFT; 1175 } 1176 1177 /* 1178 * Update the current task's runtime statistics (provided it is still 1179 * a -deadline task and has not been removed from the dl_rq). 1180 */ 1181 static void update_curr_dl(struct rq *rq) 1182 { 1183 struct task_struct *curr = rq->curr; 1184 struct sched_dl_entity *dl_se = &curr->dl; 1185 u64 delta_exec, scaled_delta_exec; 1186 int cpu = cpu_of(rq); 1187 u64 now; 1188 1189 if (!dl_task(curr) || !on_dl_rq(dl_se)) 1190 return; 1191 1192 /* 1193 * Consumed budget is computed considering the time as 1194 * observed by schedulable tasks (excluding time spent 1195 * in hardirq context, etc.). Deadlines are instead 1196 * computed using hard walltime. This seems to be the more 1197 * natural solution, but the full ramifications of this 1198 * approach need further study. 1199 */ 1200 now = rq_clock_task(rq); 1201 delta_exec = now - curr->se.exec_start; 1202 if (unlikely((s64)delta_exec <= 0)) { 1203 if (unlikely(dl_se->dl_yielded)) 1204 goto throttle; 1205 return; 1206 } 1207 1208 schedstat_set(curr->se.statistics.exec_max, 1209 max(curr->se.statistics.exec_max, delta_exec)); 1210 1211 curr->se.sum_exec_runtime += delta_exec; 1212 account_group_exec_runtime(curr, delta_exec); 1213 1214 curr->se.exec_start = now; 1215 cgroup_account_cputime(curr, delta_exec); 1216 1217 if (dl_entity_is_special(dl_se)) 1218 return; 1219 1220 /* 1221 * For tasks that participate in GRUB, we implement GRUB-PA: the 1222 * spare reclaimed bandwidth is used to clock down frequency. 1223 * 1224 * For the others, we still need to scale reservation parameters 1225 * according to current frequency and CPU maximum capacity. 1226 */ 1227 if (unlikely(dl_se->flags & SCHED_FLAG_RECLAIM)) { 1228 scaled_delta_exec = grub_reclaim(delta_exec, 1229 rq, 1230 &curr->dl); 1231 } else { 1232 unsigned long scale_freq = arch_scale_freq_capacity(cpu); 1233 unsigned long scale_cpu = arch_scale_cpu_capacity(cpu); 1234 1235 scaled_delta_exec = cap_scale(delta_exec, scale_freq); 1236 scaled_delta_exec = cap_scale(scaled_delta_exec, scale_cpu); 1237 } 1238 1239 dl_se->runtime -= scaled_delta_exec; 1240 1241 throttle: 1242 if (dl_runtime_exceeded(dl_se) || dl_se->dl_yielded) { 1243 dl_se->dl_throttled = 1; 1244 1245 /* If requested, inform the user about runtime overruns. */ 1246 if (dl_runtime_exceeded(dl_se) && 1247 (dl_se->flags & SCHED_FLAG_DL_OVERRUN)) 1248 dl_se->dl_overrun = 1; 1249 1250 __dequeue_task_dl(rq, curr, 0); 1251 if (unlikely(dl_se->dl_boosted || !start_dl_timer(curr))) 1252 enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH); 1253 1254 if (!is_leftmost(curr, &rq->dl)) 1255 resched_curr(rq); 1256 } 1257 1258 /* 1259 * Because -- for now -- we share the rt bandwidth, we need to 1260 * account our runtime there too, otherwise actual rt tasks 1261 * would be able to exceed the shared quota. 1262 * 1263 * Account to the root rt group for now. 1264 * 1265 * The solution we're working towards is having the RT groups scheduled 1266 * using deadline servers -- however there's a few nasties to figure 1267 * out before that can happen. 1268 */ 1269 if (rt_bandwidth_enabled()) { 1270 struct rt_rq *rt_rq = &rq->rt; 1271 1272 raw_spin_lock(&rt_rq->rt_runtime_lock); 1273 /* 1274 * We'll let actual RT tasks worry about the overflow here, we 1275 * have our own CBS to keep us inline; only account when RT 1276 * bandwidth is relevant. 1277 */ 1278 if (sched_rt_bandwidth_account(rt_rq)) 1279 rt_rq->rt_time += delta_exec; 1280 raw_spin_unlock(&rt_rq->rt_runtime_lock); 1281 } 1282 } 1283 1284 static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer) 1285 { 1286 struct sched_dl_entity *dl_se = container_of(timer, 1287 struct sched_dl_entity, 1288 inactive_timer); 1289 struct task_struct *p = dl_task_of(dl_se); 1290 struct rq_flags rf; 1291 struct rq *rq; 1292 1293 rq = task_rq_lock(p, &rf); 1294 1295 sched_clock_tick(); 1296 update_rq_clock(rq); 1297 1298 if (!dl_task(p) || p->state == TASK_DEAD) { 1299 struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); 1300 1301 if (p->state == TASK_DEAD && dl_se->dl_non_contending) { 1302 sub_running_bw(&p->dl, dl_rq_of_se(&p->dl)); 1303 sub_rq_bw(&p->dl, dl_rq_of_se(&p->dl)); 1304 dl_se->dl_non_contending = 0; 1305 } 1306 1307 raw_spin_lock(&dl_b->lock); 1308 __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p))); 1309 raw_spin_unlock(&dl_b->lock); 1310 __dl_clear_params(p); 1311 1312 goto unlock; 1313 } 1314 if (dl_se->dl_non_contending == 0) 1315 goto unlock; 1316 1317 sub_running_bw(dl_se, &rq->dl); 1318 dl_se->dl_non_contending = 0; 1319 unlock: 1320 task_rq_unlock(rq, p, &rf); 1321 put_task_struct(p); 1322 1323 return HRTIMER_NORESTART; 1324 } 1325 1326 void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se) 1327 { 1328 struct hrtimer *timer = &dl_se->inactive_timer; 1329 1330 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); 1331 timer->function = inactive_task_timer; 1332 } 1333 1334 #ifdef CONFIG_SMP 1335 1336 static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) 1337 { 1338 struct rq *rq = rq_of_dl_rq(dl_rq); 1339 1340 if (dl_rq->earliest_dl.curr == 0 || 1341 dl_time_before(deadline, dl_rq->earliest_dl.curr)) { 1342 dl_rq->earliest_dl.curr = deadline; 1343 cpudl_set(&rq->rd->cpudl, rq->cpu, deadline); 1344 } 1345 } 1346 1347 static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) 1348 { 1349 struct rq *rq = rq_of_dl_rq(dl_rq); 1350 1351 /* 1352 * Since we may have removed our earliest (and/or next earliest) 1353 * task we must recompute them. 1354 */ 1355 if (!dl_rq->dl_nr_running) { 1356 dl_rq->earliest_dl.curr = 0; 1357 dl_rq->earliest_dl.next = 0; 1358 cpudl_clear(&rq->rd->cpudl, rq->cpu); 1359 } else { 1360 struct rb_node *leftmost = dl_rq->root.rb_leftmost; 1361 struct sched_dl_entity *entry; 1362 1363 entry = rb_entry(leftmost, struct sched_dl_entity, rb_node); 1364 dl_rq->earliest_dl.curr = entry->deadline; 1365 cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline); 1366 } 1367 } 1368 1369 #else 1370 1371 static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {} 1372 static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {} 1373 1374 #endif /* CONFIG_SMP */ 1375 1376 static inline 1377 void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 1378 { 1379 int prio = dl_task_of(dl_se)->prio; 1380 u64 deadline = dl_se->deadline; 1381 1382 WARN_ON(!dl_prio(prio)); 1383 dl_rq->dl_nr_running++; 1384 add_nr_running(rq_of_dl_rq(dl_rq), 1); 1385 1386 inc_dl_deadline(dl_rq, deadline); 1387 inc_dl_migration(dl_se, dl_rq); 1388 } 1389 1390 static inline 1391 void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 1392 { 1393 int prio = dl_task_of(dl_se)->prio; 1394 1395 WARN_ON(!dl_prio(prio)); 1396 WARN_ON(!dl_rq->dl_nr_running); 1397 dl_rq->dl_nr_running--; 1398 sub_nr_running(rq_of_dl_rq(dl_rq), 1); 1399 1400 dec_dl_deadline(dl_rq, dl_se->deadline); 1401 dec_dl_migration(dl_se, dl_rq); 1402 } 1403 1404 static void __enqueue_dl_entity(struct sched_dl_entity *dl_se) 1405 { 1406 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 1407 struct rb_node **link = &dl_rq->root.rb_root.rb_node; 1408 struct rb_node *parent = NULL; 1409 struct sched_dl_entity *entry; 1410 int leftmost = 1; 1411 1412 BUG_ON(!RB_EMPTY_NODE(&dl_se->rb_node)); 1413 1414 while (*link) { 1415 parent = *link; 1416 entry = rb_entry(parent, struct sched_dl_entity, rb_node); 1417 if (dl_time_before(dl_se->deadline, entry->deadline)) 1418 link = &parent->rb_left; 1419 else { 1420 link = &parent->rb_right; 1421 leftmost = 0; 1422 } 1423 } 1424 1425 rb_link_node(&dl_se->rb_node, parent, link); 1426 rb_insert_color_cached(&dl_se->rb_node, &dl_rq->root, leftmost); 1427 1428 inc_dl_tasks(dl_se, dl_rq); 1429 } 1430 1431 static void __dequeue_dl_entity(struct sched_dl_entity *dl_se) 1432 { 1433 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 1434 1435 if (RB_EMPTY_NODE(&dl_se->rb_node)) 1436 return; 1437 1438 rb_erase_cached(&dl_se->rb_node, &dl_rq->root); 1439 RB_CLEAR_NODE(&dl_se->rb_node); 1440 1441 dec_dl_tasks(dl_se, dl_rq); 1442 } 1443 1444 static void 1445 enqueue_dl_entity(struct sched_dl_entity *dl_se, 1446 struct sched_dl_entity *pi_se, int flags) 1447 { 1448 BUG_ON(on_dl_rq(dl_se)); 1449 1450 /* 1451 * If this is a wakeup or a new instance, the scheduling 1452 * parameters of the task might need updating. Otherwise, 1453 * we want a replenishment of its runtime. 1454 */ 1455 if (flags & ENQUEUE_WAKEUP) { 1456 task_contending(dl_se, flags); 1457 update_dl_entity(dl_se, pi_se); 1458 } else if (flags & ENQUEUE_REPLENISH) { 1459 replenish_dl_entity(dl_se, pi_se); 1460 } else if ((flags & ENQUEUE_RESTORE) && 1461 dl_time_before(dl_se->deadline, 1462 rq_clock(rq_of_dl_rq(dl_rq_of_se(dl_se))))) { 1463 setup_new_dl_entity(dl_se); 1464 } 1465 1466 __enqueue_dl_entity(dl_se); 1467 } 1468 1469 static void dequeue_dl_entity(struct sched_dl_entity *dl_se) 1470 { 1471 __dequeue_dl_entity(dl_se); 1472 } 1473 1474 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags) 1475 { 1476 struct task_struct *pi_task = rt_mutex_get_top_task(p); 1477 struct sched_dl_entity *pi_se = &p->dl; 1478 1479 /* 1480 * Use the scheduling parameters of the top pi-waiter task if: 1481 * - we have a top pi-waiter which is a SCHED_DEADLINE task AND 1482 * - our dl_boosted is set (i.e. the pi-waiter's (absolute) deadline is 1483 * smaller than our deadline OR we are a !SCHED_DEADLINE task getting 1484 * boosted due to a SCHED_DEADLINE pi-waiter). 1485 * Otherwise we keep our runtime and deadline. 1486 */ 1487 if (pi_task && dl_prio(pi_task->normal_prio) && p->dl.dl_boosted) { 1488 pi_se = &pi_task->dl; 1489 } else if (!dl_prio(p->normal_prio)) { 1490 /* 1491 * Special case in which we have a !SCHED_DEADLINE task 1492 * that is going to be deboosted, but exceeds its 1493 * runtime while doing so. No point in replenishing 1494 * it, as it's going to return back to its original 1495 * scheduling class after this. 1496 */ 1497 BUG_ON(!p->dl.dl_boosted || flags != ENQUEUE_REPLENISH); 1498 return; 1499 } 1500 1501 /* 1502 * Check if a constrained deadline task was activated 1503 * after the deadline but before the next period. 1504 * If that is the case, the task will be throttled and 1505 * the replenishment timer will be set to the next period. 1506 */ 1507 if (!p->dl.dl_throttled && !dl_is_implicit(&p->dl)) 1508 dl_check_constrained_dl(&p->dl); 1509 1510 if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & ENQUEUE_RESTORE) { 1511 add_rq_bw(&p->dl, &rq->dl); 1512 add_running_bw(&p->dl, &rq->dl); 1513 } 1514 1515 /* 1516 * If p is throttled, we do not enqueue it. In fact, if it exhausted 1517 * its budget it needs a replenishment and, since it now is on 1518 * its rq, the bandwidth timer callback (which clearly has not 1519 * run yet) will take care of this. 1520 * However, the active utilization does not depend on the fact 1521 * that the task is on the runqueue or not (but depends on the 1522 * task's state - in GRUB parlance, "inactive" vs "active contending"). 1523 * In other words, even if a task is throttled its utilization must 1524 * be counted in the active utilization; hence, we need to call 1525 * add_running_bw(). 1526 */ 1527 if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH)) { 1528 if (flags & ENQUEUE_WAKEUP) 1529 task_contending(&p->dl, flags); 1530 1531 return; 1532 } 1533 1534 enqueue_dl_entity(&p->dl, pi_se, flags); 1535 1536 if (!task_current(rq, p) && p->nr_cpus_allowed > 1) 1537 enqueue_pushable_dl_task(rq, p); 1538 } 1539 1540 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags) 1541 { 1542 dequeue_dl_entity(&p->dl); 1543 dequeue_pushable_dl_task(rq, p); 1544 } 1545 1546 static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags) 1547 { 1548 update_curr_dl(rq); 1549 __dequeue_task_dl(rq, p, flags); 1550 1551 if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & DEQUEUE_SAVE) { 1552 sub_running_bw(&p->dl, &rq->dl); 1553 sub_rq_bw(&p->dl, &rq->dl); 1554 } 1555 1556 /* 1557 * This check allows to start the inactive timer (or to immediately 1558 * decrease the active utilization, if needed) in two cases: 1559 * when the task blocks and when it is terminating 1560 * (p->state == TASK_DEAD). We can handle the two cases in the same 1561 * way, because from GRUB's point of view the same thing is happening 1562 * (the task moves from "active contending" to "active non contending" 1563 * or "inactive") 1564 */ 1565 if (flags & DEQUEUE_SLEEP) 1566 task_non_contending(p); 1567 } 1568 1569 /* 1570 * Yield task semantic for -deadline tasks is: 1571 * 1572 * get off from the CPU until our next instance, with 1573 * a new runtime. This is of little use now, since we 1574 * don't have a bandwidth reclaiming mechanism. Anyway, 1575 * bandwidth reclaiming is planned for the future, and 1576 * yield_task_dl will indicate that some spare budget 1577 * is available for other task instances to use it. 1578 */ 1579 static void yield_task_dl(struct rq *rq) 1580 { 1581 /* 1582 * We make the task go to sleep until its current deadline by 1583 * forcing its runtime to zero. This way, update_curr_dl() stops 1584 * it and the bandwidth timer will wake it up and will give it 1585 * new scheduling parameters (thanks to dl_yielded=1). 1586 */ 1587 rq->curr->dl.dl_yielded = 1; 1588 1589 update_rq_clock(rq); 1590 update_curr_dl(rq); 1591 /* 1592 * Tell update_rq_clock() that we've just updated, 1593 * so we don't do microscopic update in schedule() 1594 * and double the fastpath cost. 1595 */ 1596 rq_clock_skip_update(rq); 1597 } 1598 1599 #ifdef CONFIG_SMP 1600 1601 static int find_later_rq(struct task_struct *task); 1602 1603 static int 1604 select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags) 1605 { 1606 struct task_struct *curr; 1607 struct rq *rq; 1608 1609 if (sd_flag != SD_BALANCE_WAKE) 1610 goto out; 1611 1612 rq = cpu_rq(cpu); 1613 1614 rcu_read_lock(); 1615 curr = READ_ONCE(rq->curr); /* unlocked access */ 1616 1617 /* 1618 * If we are dealing with a -deadline task, we must 1619 * decide where to wake it up. 1620 * If it has a later deadline and the current task 1621 * on this rq can't move (provided the waking task 1622 * can!) we prefer to send it somewhere else. On the 1623 * other hand, if it has a shorter deadline, we 1624 * try to make it stay here, it might be important. 1625 */ 1626 if (unlikely(dl_task(curr)) && 1627 (curr->nr_cpus_allowed < 2 || 1628 !dl_entity_preempt(&p->dl, &curr->dl)) && 1629 (p->nr_cpus_allowed > 1)) { 1630 int target = find_later_rq(p); 1631 1632 if (target != -1 && 1633 (dl_time_before(p->dl.deadline, 1634 cpu_rq(target)->dl.earliest_dl.curr) || 1635 (cpu_rq(target)->dl.dl_nr_running == 0))) 1636 cpu = target; 1637 } 1638 rcu_read_unlock(); 1639 1640 out: 1641 return cpu; 1642 } 1643 1644 static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused) 1645 { 1646 struct rq *rq; 1647 1648 if (p->state != TASK_WAKING) 1649 return; 1650 1651 rq = task_rq(p); 1652 /* 1653 * Since p->state == TASK_WAKING, set_task_cpu() has been called 1654 * from try_to_wake_up(). Hence, p->pi_lock is locked, but 1655 * rq->lock is not... So, lock it 1656 */ 1657 raw_spin_lock(&rq->lock); 1658 if (p->dl.dl_non_contending) { 1659 sub_running_bw(&p->dl, &rq->dl); 1660 p->dl.dl_non_contending = 0; 1661 /* 1662 * If the timer handler is currently running and the 1663 * timer cannot be cancelled, inactive_task_timer() 1664 * will see that dl_not_contending is not set, and 1665 * will not touch the rq's active utilization, 1666 * so we are still safe. 1667 */ 1668 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1) 1669 put_task_struct(p); 1670 } 1671 sub_rq_bw(&p->dl, &rq->dl); 1672 raw_spin_unlock(&rq->lock); 1673 } 1674 1675 static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p) 1676 { 1677 /* 1678 * Current can't be migrated, useless to reschedule, 1679 * let's hope p can move out. 1680 */ 1681 if (rq->curr->nr_cpus_allowed == 1 || 1682 !cpudl_find(&rq->rd->cpudl, rq->curr, NULL)) 1683 return; 1684 1685 /* 1686 * p is migratable, so let's not schedule it and 1687 * see if it is pushed or pulled somewhere else. 1688 */ 1689 if (p->nr_cpus_allowed != 1 && 1690 cpudl_find(&rq->rd->cpudl, p, NULL)) 1691 return; 1692 1693 resched_curr(rq); 1694 } 1695 1696 static int balance_dl(struct rq *rq, struct task_struct *p, struct rq_flags *rf) 1697 { 1698 if (!on_dl_rq(&p->dl) && need_pull_dl_task(rq, p)) { 1699 /* 1700 * This is OK, because current is on_cpu, which avoids it being 1701 * picked for load-balance and preemption/IRQs are still 1702 * disabled avoiding further scheduler activity on it and we've 1703 * not yet started the picking loop. 1704 */ 1705 rq_unpin_lock(rq, rf); 1706 pull_dl_task(rq); 1707 rq_repin_lock(rq, rf); 1708 } 1709 1710 return sched_stop_runnable(rq) || sched_dl_runnable(rq); 1711 } 1712 #endif /* CONFIG_SMP */ 1713 1714 /* 1715 * Only called when both the current and waking task are -deadline 1716 * tasks. 1717 */ 1718 static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p, 1719 int flags) 1720 { 1721 if (dl_entity_preempt(&p->dl, &rq->curr->dl)) { 1722 resched_curr(rq); 1723 return; 1724 } 1725 1726 #ifdef CONFIG_SMP 1727 /* 1728 * In the unlikely case current and p have the same deadline 1729 * let us try to decide what's the best thing to do... 1730 */ 1731 if ((p->dl.deadline == rq->curr->dl.deadline) && 1732 !test_tsk_need_resched(rq->curr)) 1733 check_preempt_equal_dl(rq, p); 1734 #endif /* CONFIG_SMP */ 1735 } 1736 1737 #ifdef CONFIG_SCHED_HRTICK 1738 static void start_hrtick_dl(struct rq *rq, struct task_struct *p) 1739 { 1740 hrtick_start(rq, p->dl.runtime); 1741 } 1742 #else /* !CONFIG_SCHED_HRTICK */ 1743 static void start_hrtick_dl(struct rq *rq, struct task_struct *p) 1744 { 1745 } 1746 #endif 1747 1748 static void set_next_task_dl(struct rq *rq, struct task_struct *p, bool first) 1749 { 1750 p->se.exec_start = rq_clock_task(rq); 1751 1752 /* You can't push away the running task */ 1753 dequeue_pushable_dl_task(rq, p); 1754 1755 if (!first) 1756 return; 1757 1758 if (hrtick_enabled(rq)) 1759 start_hrtick_dl(rq, p); 1760 1761 if (rq->curr->sched_class != &dl_sched_class) 1762 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0); 1763 1764 deadline_queue_push_tasks(rq); 1765 } 1766 1767 static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq, 1768 struct dl_rq *dl_rq) 1769 { 1770 struct rb_node *left = rb_first_cached(&dl_rq->root); 1771 1772 if (!left) 1773 return NULL; 1774 1775 return rb_entry(left, struct sched_dl_entity, rb_node); 1776 } 1777 1778 static struct task_struct *pick_next_task_dl(struct rq *rq) 1779 { 1780 struct sched_dl_entity *dl_se; 1781 struct dl_rq *dl_rq = &rq->dl; 1782 struct task_struct *p; 1783 1784 if (!sched_dl_runnable(rq)) 1785 return NULL; 1786 1787 dl_se = pick_next_dl_entity(rq, dl_rq); 1788 BUG_ON(!dl_se); 1789 p = dl_task_of(dl_se); 1790 set_next_task_dl(rq, p, true); 1791 return p; 1792 } 1793 1794 static void put_prev_task_dl(struct rq *rq, struct task_struct *p) 1795 { 1796 update_curr_dl(rq); 1797 1798 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1); 1799 if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1) 1800 enqueue_pushable_dl_task(rq, p); 1801 } 1802 1803 /* 1804 * scheduler tick hitting a task of our scheduling class. 1805 * 1806 * NOTE: This function can be called remotely by the tick offload that 1807 * goes along full dynticks. Therefore no local assumption can be made 1808 * and everything must be accessed through the @rq and @curr passed in 1809 * parameters. 1810 */ 1811 static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued) 1812 { 1813 update_curr_dl(rq); 1814 1815 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1); 1816 /* 1817 * Even when we have runtime, update_curr_dl() might have resulted in us 1818 * not being the leftmost task anymore. In that case NEED_RESCHED will 1819 * be set and schedule() will start a new hrtick for the next task. 1820 */ 1821 if (hrtick_enabled(rq) && queued && p->dl.runtime > 0 && 1822 is_leftmost(p, &rq->dl)) 1823 start_hrtick_dl(rq, p); 1824 } 1825 1826 static void task_fork_dl(struct task_struct *p) 1827 { 1828 /* 1829 * SCHED_DEADLINE tasks cannot fork and this is achieved through 1830 * sched_fork() 1831 */ 1832 } 1833 1834 #ifdef CONFIG_SMP 1835 1836 /* Only try algorithms three times */ 1837 #define DL_MAX_TRIES 3 1838 1839 static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu) 1840 { 1841 if (!task_running(rq, p) && 1842 cpumask_test_cpu(cpu, p->cpus_ptr)) 1843 return 1; 1844 return 0; 1845 } 1846 1847 /* 1848 * Return the earliest pushable rq's task, which is suitable to be executed 1849 * on the CPU, NULL otherwise: 1850 */ 1851 static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu) 1852 { 1853 struct rb_node *next_node = rq->dl.pushable_dl_tasks_root.rb_leftmost; 1854 struct task_struct *p = NULL; 1855 1856 if (!has_pushable_dl_tasks(rq)) 1857 return NULL; 1858 1859 next_node: 1860 if (next_node) { 1861 p = rb_entry(next_node, struct task_struct, pushable_dl_tasks); 1862 1863 if (pick_dl_task(rq, p, cpu)) 1864 return p; 1865 1866 next_node = rb_next(next_node); 1867 goto next_node; 1868 } 1869 1870 return NULL; 1871 } 1872 1873 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl); 1874 1875 static int find_later_rq(struct task_struct *task) 1876 { 1877 struct sched_domain *sd; 1878 struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl); 1879 int this_cpu = smp_processor_id(); 1880 int cpu = task_cpu(task); 1881 1882 /* Make sure the mask is initialized first */ 1883 if (unlikely(!later_mask)) 1884 return -1; 1885 1886 if (task->nr_cpus_allowed == 1) 1887 return -1; 1888 1889 /* 1890 * We have to consider system topology and task affinity 1891 * first, then we can look for a suitable CPU. 1892 */ 1893 if (!cpudl_find(&task_rq(task)->rd->cpudl, task, later_mask)) 1894 return -1; 1895 1896 /* 1897 * If we are here, some targets have been found, including 1898 * the most suitable which is, among the runqueues where the 1899 * current tasks have later deadlines than the task's one, the 1900 * rq with the latest possible one. 1901 * 1902 * Now we check how well this matches with task's 1903 * affinity and system topology. 1904 * 1905 * The last CPU where the task run is our first 1906 * guess, since it is most likely cache-hot there. 1907 */ 1908 if (cpumask_test_cpu(cpu, later_mask)) 1909 return cpu; 1910 /* 1911 * Check if this_cpu is to be skipped (i.e., it is 1912 * not in the mask) or not. 1913 */ 1914 if (!cpumask_test_cpu(this_cpu, later_mask)) 1915 this_cpu = -1; 1916 1917 rcu_read_lock(); 1918 for_each_domain(cpu, sd) { 1919 if (sd->flags & SD_WAKE_AFFINE) { 1920 int best_cpu; 1921 1922 /* 1923 * If possible, preempting this_cpu is 1924 * cheaper than migrating. 1925 */ 1926 if (this_cpu != -1 && 1927 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) { 1928 rcu_read_unlock(); 1929 return this_cpu; 1930 } 1931 1932 best_cpu = cpumask_first_and(later_mask, 1933 sched_domain_span(sd)); 1934 /* 1935 * Last chance: if a CPU being in both later_mask 1936 * and current sd span is valid, that becomes our 1937 * choice. Of course, the latest possible CPU is 1938 * already under consideration through later_mask. 1939 */ 1940 if (best_cpu < nr_cpu_ids) { 1941 rcu_read_unlock(); 1942 return best_cpu; 1943 } 1944 } 1945 } 1946 rcu_read_unlock(); 1947 1948 /* 1949 * At this point, all our guesses failed, we just return 1950 * 'something', and let the caller sort the things out. 1951 */ 1952 if (this_cpu != -1) 1953 return this_cpu; 1954 1955 cpu = cpumask_any(later_mask); 1956 if (cpu < nr_cpu_ids) 1957 return cpu; 1958 1959 return -1; 1960 } 1961 1962 /* Locks the rq it finds */ 1963 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq) 1964 { 1965 struct rq *later_rq = NULL; 1966 int tries; 1967 int cpu; 1968 1969 for (tries = 0; tries < DL_MAX_TRIES; tries++) { 1970 cpu = find_later_rq(task); 1971 1972 if ((cpu == -1) || (cpu == rq->cpu)) 1973 break; 1974 1975 later_rq = cpu_rq(cpu); 1976 1977 if (later_rq->dl.dl_nr_running && 1978 !dl_time_before(task->dl.deadline, 1979 later_rq->dl.earliest_dl.curr)) { 1980 /* 1981 * Target rq has tasks of equal or earlier deadline, 1982 * retrying does not release any lock and is unlikely 1983 * to yield a different result. 1984 */ 1985 later_rq = NULL; 1986 break; 1987 } 1988 1989 /* Retry if something changed. */ 1990 if (double_lock_balance(rq, later_rq)) { 1991 if (unlikely(task_rq(task) != rq || 1992 !cpumask_test_cpu(later_rq->cpu, task->cpus_ptr) || 1993 task_running(rq, task) || 1994 !dl_task(task) || 1995 !task_on_rq_queued(task))) { 1996 double_unlock_balance(rq, later_rq); 1997 later_rq = NULL; 1998 break; 1999 } 2000 } 2001 2002 /* 2003 * If the rq we found has no -deadline task, or 2004 * its earliest one has a later deadline than our 2005 * task, the rq is a good one. 2006 */ 2007 if (!later_rq->dl.dl_nr_running || 2008 dl_time_before(task->dl.deadline, 2009 later_rq->dl.earliest_dl.curr)) 2010 break; 2011 2012 /* Otherwise we try again. */ 2013 double_unlock_balance(rq, later_rq); 2014 later_rq = NULL; 2015 } 2016 2017 return later_rq; 2018 } 2019 2020 static struct task_struct *pick_next_pushable_dl_task(struct rq *rq) 2021 { 2022 struct task_struct *p; 2023 2024 if (!has_pushable_dl_tasks(rq)) 2025 return NULL; 2026 2027 p = rb_entry(rq->dl.pushable_dl_tasks_root.rb_leftmost, 2028 struct task_struct, pushable_dl_tasks); 2029 2030 BUG_ON(rq->cpu != task_cpu(p)); 2031 BUG_ON(task_current(rq, p)); 2032 BUG_ON(p->nr_cpus_allowed <= 1); 2033 2034 BUG_ON(!task_on_rq_queued(p)); 2035 BUG_ON(!dl_task(p)); 2036 2037 return p; 2038 } 2039 2040 /* 2041 * See if the non running -deadline tasks on this rq 2042 * can be sent to some other CPU where they can preempt 2043 * and start executing. 2044 */ 2045 static int push_dl_task(struct rq *rq) 2046 { 2047 struct task_struct *next_task; 2048 struct rq *later_rq; 2049 int ret = 0; 2050 2051 if (!rq->dl.overloaded) 2052 return 0; 2053 2054 next_task = pick_next_pushable_dl_task(rq); 2055 if (!next_task) 2056 return 0; 2057 2058 retry: 2059 if (WARN_ON(next_task == rq->curr)) 2060 return 0; 2061 2062 /* 2063 * If next_task preempts rq->curr, and rq->curr 2064 * can move away, it makes sense to just reschedule 2065 * without going further in pushing next_task. 2066 */ 2067 if (dl_task(rq->curr) && 2068 dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) && 2069 rq->curr->nr_cpus_allowed > 1) { 2070 resched_curr(rq); 2071 return 0; 2072 } 2073 2074 /* We might release rq lock */ 2075 get_task_struct(next_task); 2076 2077 /* Will lock the rq it'll find */ 2078 later_rq = find_lock_later_rq(next_task, rq); 2079 if (!later_rq) { 2080 struct task_struct *task; 2081 2082 /* 2083 * We must check all this again, since 2084 * find_lock_later_rq releases rq->lock and it is 2085 * then possible that next_task has migrated. 2086 */ 2087 task = pick_next_pushable_dl_task(rq); 2088 if (task == next_task) { 2089 /* 2090 * The task is still there. We don't try 2091 * again, some other CPU will pull it when ready. 2092 */ 2093 goto out; 2094 } 2095 2096 if (!task) 2097 /* No more tasks */ 2098 goto out; 2099 2100 put_task_struct(next_task); 2101 next_task = task; 2102 goto retry; 2103 } 2104 2105 deactivate_task(rq, next_task, 0); 2106 set_task_cpu(next_task, later_rq->cpu); 2107 2108 /* 2109 * Update the later_rq clock here, because the clock is used 2110 * by the cpufreq_update_util() inside __add_running_bw(). 2111 */ 2112 update_rq_clock(later_rq); 2113 activate_task(later_rq, next_task, ENQUEUE_NOCLOCK); 2114 ret = 1; 2115 2116 resched_curr(later_rq); 2117 2118 double_unlock_balance(rq, later_rq); 2119 2120 out: 2121 put_task_struct(next_task); 2122 2123 return ret; 2124 } 2125 2126 static void push_dl_tasks(struct rq *rq) 2127 { 2128 /* push_dl_task() will return true if it moved a -deadline task */ 2129 while (push_dl_task(rq)) 2130 ; 2131 } 2132 2133 static void pull_dl_task(struct rq *this_rq) 2134 { 2135 int this_cpu = this_rq->cpu, cpu; 2136 struct task_struct *p; 2137 bool resched = false; 2138 struct rq *src_rq; 2139 u64 dmin = LONG_MAX; 2140 2141 if (likely(!dl_overloaded(this_rq))) 2142 return; 2143 2144 /* 2145 * Match the barrier from dl_set_overloaded; this guarantees that if we 2146 * see overloaded we must also see the dlo_mask bit. 2147 */ 2148 smp_rmb(); 2149 2150 for_each_cpu(cpu, this_rq->rd->dlo_mask) { 2151 if (this_cpu == cpu) 2152 continue; 2153 2154 src_rq = cpu_rq(cpu); 2155 2156 /* 2157 * It looks racy, abd it is! However, as in sched_rt.c, 2158 * we are fine with this. 2159 */ 2160 if (this_rq->dl.dl_nr_running && 2161 dl_time_before(this_rq->dl.earliest_dl.curr, 2162 src_rq->dl.earliest_dl.next)) 2163 continue; 2164 2165 /* Might drop this_rq->lock */ 2166 double_lock_balance(this_rq, src_rq); 2167 2168 /* 2169 * If there are no more pullable tasks on the 2170 * rq, we're done with it. 2171 */ 2172 if (src_rq->dl.dl_nr_running <= 1) 2173 goto skip; 2174 2175 p = pick_earliest_pushable_dl_task(src_rq, this_cpu); 2176 2177 /* 2178 * We found a task to be pulled if: 2179 * - it preempts our current (if there's one), 2180 * - it will preempt the last one we pulled (if any). 2181 */ 2182 if (p && dl_time_before(p->dl.deadline, dmin) && 2183 (!this_rq->dl.dl_nr_running || 2184 dl_time_before(p->dl.deadline, 2185 this_rq->dl.earliest_dl.curr))) { 2186 WARN_ON(p == src_rq->curr); 2187 WARN_ON(!task_on_rq_queued(p)); 2188 2189 /* 2190 * Then we pull iff p has actually an earlier 2191 * deadline than the current task of its runqueue. 2192 */ 2193 if (dl_time_before(p->dl.deadline, 2194 src_rq->curr->dl.deadline)) 2195 goto skip; 2196 2197 resched = true; 2198 2199 deactivate_task(src_rq, p, 0); 2200 set_task_cpu(p, this_cpu); 2201 activate_task(this_rq, p, 0); 2202 dmin = p->dl.deadline; 2203 2204 /* Is there any other task even earlier? */ 2205 } 2206 skip: 2207 double_unlock_balance(this_rq, src_rq); 2208 } 2209 2210 if (resched) 2211 resched_curr(this_rq); 2212 } 2213 2214 /* 2215 * Since the task is not running and a reschedule is not going to happen 2216 * anytime soon on its runqueue, we try pushing it away now. 2217 */ 2218 static void task_woken_dl(struct rq *rq, struct task_struct *p) 2219 { 2220 if (!task_running(rq, p) && 2221 !test_tsk_need_resched(rq->curr) && 2222 p->nr_cpus_allowed > 1 && 2223 dl_task(rq->curr) && 2224 (rq->curr->nr_cpus_allowed < 2 || 2225 !dl_entity_preempt(&p->dl, &rq->curr->dl))) { 2226 push_dl_tasks(rq); 2227 } 2228 } 2229 2230 static void set_cpus_allowed_dl(struct task_struct *p, 2231 const struct cpumask *new_mask) 2232 { 2233 struct root_domain *src_rd; 2234 struct rq *rq; 2235 2236 BUG_ON(!dl_task(p)); 2237 2238 rq = task_rq(p); 2239 src_rd = rq->rd; 2240 /* 2241 * Migrating a SCHED_DEADLINE task between exclusive 2242 * cpusets (different root_domains) entails a bandwidth 2243 * update. We already made space for us in the destination 2244 * domain (see cpuset_can_attach()). 2245 */ 2246 if (!cpumask_intersects(src_rd->span, new_mask)) { 2247 struct dl_bw *src_dl_b; 2248 2249 src_dl_b = dl_bw_of(cpu_of(rq)); 2250 /* 2251 * We now free resources of the root_domain we are migrating 2252 * off. In the worst case, sched_setattr() may temporary fail 2253 * until we complete the update. 2254 */ 2255 raw_spin_lock(&src_dl_b->lock); 2256 __dl_sub(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p))); 2257 raw_spin_unlock(&src_dl_b->lock); 2258 } 2259 2260 set_cpus_allowed_common(p, new_mask); 2261 } 2262 2263 /* Assumes rq->lock is held */ 2264 static void rq_online_dl(struct rq *rq) 2265 { 2266 if (rq->dl.overloaded) 2267 dl_set_overload(rq); 2268 2269 cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu); 2270 if (rq->dl.dl_nr_running > 0) 2271 cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr); 2272 } 2273 2274 /* Assumes rq->lock is held */ 2275 static void rq_offline_dl(struct rq *rq) 2276 { 2277 if (rq->dl.overloaded) 2278 dl_clear_overload(rq); 2279 2280 cpudl_clear(&rq->rd->cpudl, rq->cpu); 2281 cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu); 2282 } 2283 2284 void __init init_sched_dl_class(void) 2285 { 2286 unsigned int i; 2287 2288 for_each_possible_cpu(i) 2289 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i), 2290 GFP_KERNEL, cpu_to_node(i)); 2291 } 2292 2293 void dl_add_task_root_domain(struct task_struct *p) 2294 { 2295 struct rq_flags rf; 2296 struct rq *rq; 2297 struct dl_bw *dl_b; 2298 2299 rq = task_rq_lock(p, &rf); 2300 if (!dl_task(p)) 2301 goto unlock; 2302 2303 dl_b = &rq->rd->dl_bw; 2304 raw_spin_lock(&dl_b->lock); 2305 2306 __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span)); 2307 2308 raw_spin_unlock(&dl_b->lock); 2309 2310 unlock: 2311 task_rq_unlock(rq, p, &rf); 2312 } 2313 2314 void dl_clear_root_domain(struct root_domain *rd) 2315 { 2316 unsigned long flags; 2317 2318 raw_spin_lock_irqsave(&rd->dl_bw.lock, flags); 2319 rd->dl_bw.total_bw = 0; 2320 raw_spin_unlock_irqrestore(&rd->dl_bw.lock, flags); 2321 } 2322 2323 #endif /* CONFIG_SMP */ 2324 2325 static void switched_from_dl(struct rq *rq, struct task_struct *p) 2326 { 2327 /* 2328 * task_non_contending() can start the "inactive timer" (if the 0-lag 2329 * time is in the future). If the task switches back to dl before 2330 * the "inactive timer" fires, it can continue to consume its current 2331 * runtime using its current deadline. If it stays outside of 2332 * SCHED_DEADLINE until the 0-lag time passes, inactive_task_timer() 2333 * will reset the task parameters. 2334 */ 2335 if (task_on_rq_queued(p) && p->dl.dl_runtime) 2336 task_non_contending(p); 2337 2338 if (!task_on_rq_queued(p)) { 2339 /* 2340 * Inactive timer is armed. However, p is leaving DEADLINE and 2341 * might migrate away from this rq while continuing to run on 2342 * some other class. We need to remove its contribution from 2343 * this rq running_bw now, or sub_rq_bw (below) will complain. 2344 */ 2345 if (p->dl.dl_non_contending) 2346 sub_running_bw(&p->dl, &rq->dl); 2347 sub_rq_bw(&p->dl, &rq->dl); 2348 } 2349 2350 /* 2351 * We cannot use inactive_task_timer() to invoke sub_running_bw() 2352 * at the 0-lag time, because the task could have been migrated 2353 * while SCHED_OTHER in the meanwhile. 2354 */ 2355 if (p->dl.dl_non_contending) 2356 p->dl.dl_non_contending = 0; 2357 2358 /* 2359 * Since this might be the only -deadline task on the rq, 2360 * this is the right place to try to pull some other one 2361 * from an overloaded CPU, if any. 2362 */ 2363 if (!task_on_rq_queued(p) || rq->dl.dl_nr_running) 2364 return; 2365 2366 deadline_queue_pull_task(rq); 2367 } 2368 2369 /* 2370 * When switching to -deadline, we may overload the rq, then 2371 * we try to push someone off, if possible. 2372 */ 2373 static void switched_to_dl(struct rq *rq, struct task_struct *p) 2374 { 2375 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1) 2376 put_task_struct(p); 2377 2378 /* If p is not queued we will update its parameters at next wakeup. */ 2379 if (!task_on_rq_queued(p)) { 2380 add_rq_bw(&p->dl, &rq->dl); 2381 2382 return; 2383 } 2384 2385 if (rq->curr != p) { 2386 #ifdef CONFIG_SMP 2387 if (p->nr_cpus_allowed > 1 && rq->dl.overloaded) 2388 deadline_queue_push_tasks(rq); 2389 #endif 2390 if (dl_task(rq->curr)) 2391 check_preempt_curr_dl(rq, p, 0); 2392 else 2393 resched_curr(rq); 2394 } 2395 } 2396 2397 /* 2398 * If the scheduling parameters of a -deadline task changed, 2399 * a push or pull operation might be needed. 2400 */ 2401 static void prio_changed_dl(struct rq *rq, struct task_struct *p, 2402 int oldprio) 2403 { 2404 if (task_on_rq_queued(p) || rq->curr == p) { 2405 #ifdef CONFIG_SMP 2406 /* 2407 * This might be too much, but unfortunately 2408 * we don't have the old deadline value, and 2409 * we can't argue if the task is increasing 2410 * or lowering its prio, so... 2411 */ 2412 if (!rq->dl.overloaded) 2413 deadline_queue_pull_task(rq); 2414 2415 /* 2416 * If we now have a earlier deadline task than p, 2417 * then reschedule, provided p is still on this 2418 * runqueue. 2419 */ 2420 if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline)) 2421 resched_curr(rq); 2422 #else 2423 /* 2424 * Again, we don't know if p has a earlier 2425 * or later deadline, so let's blindly set a 2426 * (maybe not needed) rescheduling point. 2427 */ 2428 resched_curr(rq); 2429 #endif /* CONFIG_SMP */ 2430 } 2431 } 2432 2433 const struct sched_class dl_sched_class = { 2434 .next = &rt_sched_class, 2435 .enqueue_task = enqueue_task_dl, 2436 .dequeue_task = dequeue_task_dl, 2437 .yield_task = yield_task_dl, 2438 2439 .check_preempt_curr = check_preempt_curr_dl, 2440 2441 .pick_next_task = pick_next_task_dl, 2442 .put_prev_task = put_prev_task_dl, 2443 .set_next_task = set_next_task_dl, 2444 2445 #ifdef CONFIG_SMP 2446 .balance = balance_dl, 2447 .select_task_rq = select_task_rq_dl, 2448 .migrate_task_rq = migrate_task_rq_dl, 2449 .set_cpus_allowed = set_cpus_allowed_dl, 2450 .rq_online = rq_online_dl, 2451 .rq_offline = rq_offline_dl, 2452 .task_woken = task_woken_dl, 2453 #endif 2454 2455 .task_tick = task_tick_dl, 2456 .task_fork = task_fork_dl, 2457 2458 .prio_changed = prio_changed_dl, 2459 .switched_from = switched_from_dl, 2460 .switched_to = switched_to_dl, 2461 2462 .update_curr = update_curr_dl, 2463 }; 2464 2465 int sched_dl_global_validate(void) 2466 { 2467 u64 runtime = global_rt_runtime(); 2468 u64 period = global_rt_period(); 2469 u64 new_bw = to_ratio(period, runtime); 2470 struct dl_bw *dl_b; 2471 int cpu, ret = 0; 2472 unsigned long flags; 2473 2474 /* 2475 * Here we want to check the bandwidth not being set to some 2476 * value smaller than the currently allocated bandwidth in 2477 * any of the root_domains. 2478 * 2479 * FIXME: Cycling on all the CPUs is overdoing, but simpler than 2480 * cycling on root_domains... Discussion on different/better 2481 * solutions is welcome! 2482 */ 2483 for_each_possible_cpu(cpu) { 2484 rcu_read_lock_sched(); 2485 dl_b = dl_bw_of(cpu); 2486 2487 raw_spin_lock_irqsave(&dl_b->lock, flags); 2488 if (new_bw < dl_b->total_bw) 2489 ret = -EBUSY; 2490 raw_spin_unlock_irqrestore(&dl_b->lock, flags); 2491 2492 rcu_read_unlock_sched(); 2493 2494 if (ret) 2495 break; 2496 } 2497 2498 return ret; 2499 } 2500 2501 static void init_dl_rq_bw_ratio(struct dl_rq *dl_rq) 2502 { 2503 if (global_rt_runtime() == RUNTIME_INF) { 2504 dl_rq->bw_ratio = 1 << RATIO_SHIFT; 2505 dl_rq->extra_bw = 1 << BW_SHIFT; 2506 } else { 2507 dl_rq->bw_ratio = to_ratio(global_rt_runtime(), 2508 global_rt_period()) >> (BW_SHIFT - RATIO_SHIFT); 2509 dl_rq->extra_bw = to_ratio(global_rt_period(), 2510 global_rt_runtime()); 2511 } 2512 } 2513 2514 void sched_dl_do_global(void) 2515 { 2516 u64 new_bw = -1; 2517 struct dl_bw *dl_b; 2518 int cpu; 2519 unsigned long flags; 2520 2521 def_dl_bandwidth.dl_period = global_rt_period(); 2522 def_dl_bandwidth.dl_runtime = global_rt_runtime(); 2523 2524 if (global_rt_runtime() != RUNTIME_INF) 2525 new_bw = to_ratio(global_rt_period(), global_rt_runtime()); 2526 2527 /* 2528 * FIXME: As above... 2529 */ 2530 for_each_possible_cpu(cpu) { 2531 rcu_read_lock_sched(); 2532 dl_b = dl_bw_of(cpu); 2533 2534 raw_spin_lock_irqsave(&dl_b->lock, flags); 2535 dl_b->bw = new_bw; 2536 raw_spin_unlock_irqrestore(&dl_b->lock, flags); 2537 2538 rcu_read_unlock_sched(); 2539 init_dl_rq_bw_ratio(&cpu_rq(cpu)->dl); 2540 } 2541 } 2542 2543 /* 2544 * We must be sure that accepting a new task (or allowing changing the 2545 * parameters of an existing one) is consistent with the bandwidth 2546 * constraints. If yes, this function also accordingly updates the currently 2547 * allocated bandwidth to reflect the new situation. 2548 * 2549 * This function is called while holding p's rq->lock. 2550 */ 2551 int sched_dl_overflow(struct task_struct *p, int policy, 2552 const struct sched_attr *attr) 2553 { 2554 struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); 2555 u64 period = attr->sched_period ?: attr->sched_deadline; 2556 u64 runtime = attr->sched_runtime; 2557 u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0; 2558 int cpus, err = -1; 2559 2560 if (attr->sched_flags & SCHED_FLAG_SUGOV) 2561 return 0; 2562 2563 /* !deadline task may carry old deadline bandwidth */ 2564 if (new_bw == p->dl.dl_bw && task_has_dl_policy(p)) 2565 return 0; 2566 2567 /* 2568 * Either if a task, enters, leave, or stays -deadline but changes 2569 * its parameters, we may need to update accordingly the total 2570 * allocated bandwidth of the container. 2571 */ 2572 raw_spin_lock(&dl_b->lock); 2573 cpus = dl_bw_cpus(task_cpu(p)); 2574 if (dl_policy(policy) && !task_has_dl_policy(p) && 2575 !__dl_overflow(dl_b, cpus, 0, new_bw)) { 2576 if (hrtimer_active(&p->dl.inactive_timer)) 2577 __dl_sub(dl_b, p->dl.dl_bw, cpus); 2578 __dl_add(dl_b, new_bw, cpus); 2579 err = 0; 2580 } else if (dl_policy(policy) && task_has_dl_policy(p) && 2581 !__dl_overflow(dl_b, cpus, p->dl.dl_bw, new_bw)) { 2582 /* 2583 * XXX this is slightly incorrect: when the task 2584 * utilization decreases, we should delay the total 2585 * utilization change until the task's 0-lag point. 2586 * But this would require to set the task's "inactive 2587 * timer" when the task is not inactive. 2588 */ 2589 __dl_sub(dl_b, p->dl.dl_bw, cpus); 2590 __dl_add(dl_b, new_bw, cpus); 2591 dl_change_utilization(p, new_bw); 2592 err = 0; 2593 } else if (!dl_policy(policy) && task_has_dl_policy(p)) { 2594 /* 2595 * Do not decrease the total deadline utilization here, 2596 * switched_from_dl() will take care to do it at the correct 2597 * (0-lag) time. 2598 */ 2599 err = 0; 2600 } 2601 raw_spin_unlock(&dl_b->lock); 2602 2603 return err; 2604 } 2605 2606 /* 2607 * This function initializes the sched_dl_entity of a newly becoming 2608 * SCHED_DEADLINE task. 2609 * 2610 * Only the static values are considered here, the actual runtime and the 2611 * absolute deadline will be properly calculated when the task is enqueued 2612 * for the first time with its new policy. 2613 */ 2614 void __setparam_dl(struct task_struct *p, const struct sched_attr *attr) 2615 { 2616 struct sched_dl_entity *dl_se = &p->dl; 2617 2618 dl_se->dl_runtime = attr->sched_runtime; 2619 dl_se->dl_deadline = attr->sched_deadline; 2620 dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline; 2621 dl_se->flags = attr->sched_flags; 2622 dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime); 2623 dl_se->dl_density = to_ratio(dl_se->dl_deadline, dl_se->dl_runtime); 2624 } 2625 2626 void __getparam_dl(struct task_struct *p, struct sched_attr *attr) 2627 { 2628 struct sched_dl_entity *dl_se = &p->dl; 2629 2630 attr->sched_priority = p->rt_priority; 2631 attr->sched_runtime = dl_se->dl_runtime; 2632 attr->sched_deadline = dl_se->dl_deadline; 2633 attr->sched_period = dl_se->dl_period; 2634 attr->sched_flags = dl_se->flags; 2635 } 2636 2637 /* 2638 * This function validates the new parameters of a -deadline task. 2639 * We ask for the deadline not being zero, and greater or equal 2640 * than the runtime, as well as the period of being zero or 2641 * greater than deadline. Furthermore, we have to be sure that 2642 * user parameters are above the internal resolution of 1us (we 2643 * check sched_runtime only since it is always the smaller one) and 2644 * below 2^63 ns (we have to check both sched_deadline and 2645 * sched_period, as the latter can be zero). 2646 */ 2647 bool __checkparam_dl(const struct sched_attr *attr) 2648 { 2649 /* special dl tasks don't actually use any parameter */ 2650 if (attr->sched_flags & SCHED_FLAG_SUGOV) 2651 return true; 2652 2653 /* deadline != 0 */ 2654 if (attr->sched_deadline == 0) 2655 return false; 2656 2657 /* 2658 * Since we truncate DL_SCALE bits, make sure we're at least 2659 * that big. 2660 */ 2661 if (attr->sched_runtime < (1ULL << DL_SCALE)) 2662 return false; 2663 2664 /* 2665 * Since we use the MSB for wrap-around and sign issues, make 2666 * sure it's not set (mind that period can be equal to zero). 2667 */ 2668 if (attr->sched_deadline & (1ULL << 63) || 2669 attr->sched_period & (1ULL << 63)) 2670 return false; 2671 2672 /* runtime <= deadline <= period (if period != 0) */ 2673 if ((attr->sched_period != 0 && 2674 attr->sched_period < attr->sched_deadline) || 2675 attr->sched_deadline < attr->sched_runtime) 2676 return false; 2677 2678 return true; 2679 } 2680 2681 /* 2682 * This function clears the sched_dl_entity static params. 2683 */ 2684 void __dl_clear_params(struct task_struct *p) 2685 { 2686 struct sched_dl_entity *dl_se = &p->dl; 2687 2688 dl_se->dl_runtime = 0; 2689 dl_se->dl_deadline = 0; 2690 dl_se->dl_period = 0; 2691 dl_se->flags = 0; 2692 dl_se->dl_bw = 0; 2693 dl_se->dl_density = 0; 2694 2695 dl_se->dl_throttled = 0; 2696 dl_se->dl_yielded = 0; 2697 dl_se->dl_non_contending = 0; 2698 dl_se->dl_overrun = 0; 2699 } 2700 2701 bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr) 2702 { 2703 struct sched_dl_entity *dl_se = &p->dl; 2704 2705 if (dl_se->dl_runtime != attr->sched_runtime || 2706 dl_se->dl_deadline != attr->sched_deadline || 2707 dl_se->dl_period != attr->sched_period || 2708 dl_se->flags != attr->sched_flags) 2709 return true; 2710 2711 return false; 2712 } 2713 2714 #ifdef CONFIG_SMP 2715 int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed) 2716 { 2717 unsigned int dest_cpu; 2718 struct dl_bw *dl_b; 2719 bool overflow; 2720 int cpus, ret; 2721 unsigned long flags; 2722 2723 dest_cpu = cpumask_any_and(cpu_active_mask, cs_cpus_allowed); 2724 2725 rcu_read_lock_sched(); 2726 dl_b = dl_bw_of(dest_cpu); 2727 raw_spin_lock_irqsave(&dl_b->lock, flags); 2728 cpus = dl_bw_cpus(dest_cpu); 2729 overflow = __dl_overflow(dl_b, cpus, 0, p->dl.dl_bw); 2730 if (overflow) { 2731 ret = -EBUSY; 2732 } else { 2733 /* 2734 * We reserve space for this task in the destination 2735 * root_domain, as we can't fail after this point. 2736 * We will free resources in the source root_domain 2737 * later on (see set_cpus_allowed_dl()). 2738 */ 2739 __dl_add(dl_b, p->dl.dl_bw, cpus); 2740 ret = 0; 2741 } 2742 raw_spin_unlock_irqrestore(&dl_b->lock, flags); 2743 rcu_read_unlock_sched(); 2744 2745 return ret; 2746 } 2747 2748 int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, 2749 const struct cpumask *trial) 2750 { 2751 int ret = 1, trial_cpus; 2752 struct dl_bw *cur_dl_b; 2753 unsigned long flags; 2754 2755 rcu_read_lock_sched(); 2756 cur_dl_b = dl_bw_of(cpumask_any(cur)); 2757 trial_cpus = cpumask_weight(trial); 2758 2759 raw_spin_lock_irqsave(&cur_dl_b->lock, flags); 2760 if (cur_dl_b->bw != -1 && 2761 cur_dl_b->bw * trial_cpus < cur_dl_b->total_bw) 2762 ret = 0; 2763 raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags); 2764 rcu_read_unlock_sched(); 2765 2766 return ret; 2767 } 2768 2769 bool dl_cpu_busy(unsigned int cpu) 2770 { 2771 unsigned long flags; 2772 struct dl_bw *dl_b; 2773 bool overflow; 2774 int cpus; 2775 2776 rcu_read_lock_sched(); 2777 dl_b = dl_bw_of(cpu); 2778 raw_spin_lock_irqsave(&dl_b->lock, flags); 2779 cpus = dl_bw_cpus(cpu); 2780 overflow = __dl_overflow(dl_b, cpus, 0, 0); 2781 raw_spin_unlock_irqrestore(&dl_b->lock, flags); 2782 rcu_read_unlock_sched(); 2783 2784 return overflow; 2785 } 2786 #endif 2787 2788 #ifdef CONFIG_SCHED_DEBUG 2789 void print_dl_stats(struct seq_file *m, int cpu) 2790 { 2791 print_dl_rq(m, cpu, &cpu_rq(cpu)->dl); 2792 } 2793 #endif /* CONFIG_SCHED_DEBUG */ 2794