1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * kernel/sched/core.c 4 * 5 * Core kernel scheduler code and related syscalls 6 * 7 * Copyright (C) 1991-2002 Linus Torvalds 8 */ 9 #include <linux/highmem.h> 10 #include <linux/hrtimer_api.h> 11 #include <linux/ktime_api.h> 12 #include <linux/sched/signal.h> 13 #include <linux/syscalls_api.h> 14 #include <linux/debug_locks.h> 15 #include <linux/prefetch.h> 16 #include <linux/capability.h> 17 #include <linux/pgtable_api.h> 18 #include <linux/wait_bit.h> 19 #include <linux/jiffies.h> 20 #include <linux/spinlock_api.h> 21 #include <linux/cpumask_api.h> 22 #include <linux/lockdep_api.h> 23 #include <linux/hardirq.h> 24 #include <linux/softirq.h> 25 #include <linux/refcount_api.h> 26 #include <linux/topology.h> 27 #include <linux/sched/clock.h> 28 #include <linux/sched/cond_resched.h> 29 #include <linux/sched/cputime.h> 30 #include <linux/sched/debug.h> 31 #include <linux/sched/hotplug.h> 32 #include <linux/sched/init.h> 33 #include <linux/sched/isolation.h> 34 #include <linux/sched/loadavg.h> 35 #include <linux/sched/mm.h> 36 #include <linux/sched/nohz.h> 37 #include <linux/sched/rseq_api.h> 38 #include <linux/sched/rt.h> 39 40 #include <linux/blkdev.h> 41 #include <linux/context_tracking.h> 42 #include <linux/cpuset.h> 43 #include <linux/delayacct.h> 44 #include <linux/init_task.h> 45 #include <linux/interrupt.h> 46 #include <linux/ioprio.h> 47 #include <linux/kallsyms.h> 48 #include <linux/kcov.h> 49 #include <linux/kprobes.h> 50 #include <linux/llist_api.h> 51 #include <linux/mmu_context.h> 52 #include <linux/mmzone.h> 53 #include <linux/mutex_api.h> 54 #include <linux/nmi.h> 55 #include <linux/nospec.h> 56 #include <linux/perf_event_api.h> 57 #include <linux/profile.h> 58 #include <linux/psi.h> 59 #include <linux/rcuwait_api.h> 60 #include <linux/sched/wake_q.h> 61 #include <linux/scs.h> 62 #include <linux/slab.h> 63 #include <linux/syscalls.h> 64 #include <linux/vtime.h> 65 #include <linux/wait_api.h> 66 #include <linux/workqueue_api.h> 67 68 #ifdef CONFIG_PREEMPT_DYNAMIC 69 # ifdef CONFIG_GENERIC_ENTRY 70 # include <linux/entry-common.h> 71 # endif 72 #endif 73 74 #include <uapi/linux/sched/types.h> 75 76 #include <asm/irq_regs.h> 77 #include <asm/switch_to.h> 78 #include <asm/tlb.h> 79 80 #define CREATE_TRACE_POINTS 81 #include <linux/sched/rseq_api.h> 82 #include <trace/events/sched.h> 83 #undef CREATE_TRACE_POINTS 84 85 #include "sched.h" 86 #include "stats.h" 87 #include "autogroup.h" 88 89 #include "autogroup.h" 90 #include "pelt.h" 91 #include "smp.h" 92 #include "stats.h" 93 94 #include "../workqueue_internal.h" 95 #include "../../io_uring/io-wq.h" 96 #include "../smpboot.h" 97 98 /* 99 * Export tracepoints that act as a bare tracehook (ie: have no trace event 100 * associated with them) to allow external modules to probe them. 101 */ 102 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_cfs_tp); 103 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_rt_tp); 104 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_dl_tp); 105 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp); 106 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_se_tp); 107 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_thermal_tp); 108 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_cpu_capacity_tp); 109 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_overutilized_tp); 110 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_cfs_tp); 111 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_se_tp); 112 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_update_nr_running_tp); 113 114 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); 115 116 #ifdef CONFIG_SCHED_DEBUG 117 /* 118 * Debugging: various feature bits 119 * 120 * If SCHED_DEBUG is disabled, each compilation unit has its own copy of 121 * sysctl_sched_features, defined in sched.h, to allow constants propagation 122 * at compile time and compiler optimization based on features default. 123 */ 124 #define SCHED_FEAT(name, enabled) \ 125 (1UL << __SCHED_FEAT_##name) * enabled | 126 const_debug unsigned int sysctl_sched_features = 127 #include "features.h" 128 0; 129 #undef SCHED_FEAT 130 131 /* 132 * Print a warning if need_resched is set for the given duration (if 133 * LATENCY_WARN is enabled). 134 * 135 * If sysctl_resched_latency_warn_once is set, only one warning will be shown 136 * per boot. 137 */ 138 __read_mostly int sysctl_resched_latency_warn_ms = 100; 139 __read_mostly int sysctl_resched_latency_warn_once = 1; 140 #endif /* CONFIG_SCHED_DEBUG */ 141 142 /* 143 * Number of tasks to iterate in a single balance run. 144 * Limited because this is done with IRQs disabled. 145 */ 146 #ifdef CONFIG_PREEMPT_RT 147 const_debug unsigned int sysctl_sched_nr_migrate = 8; 148 #else 149 const_debug unsigned int sysctl_sched_nr_migrate = 32; 150 #endif 151 152 __read_mostly int scheduler_running; 153 154 #ifdef CONFIG_SCHED_CORE 155 156 DEFINE_STATIC_KEY_FALSE(__sched_core_enabled); 157 158 /* kernel prio, less is more */ 159 static inline int __task_prio(struct task_struct *p) 160 { 161 if (p->sched_class == &stop_sched_class) /* trumps deadline */ 162 return -2; 163 164 if (rt_prio(p->prio)) /* includes deadline */ 165 return p->prio; /* [-1, 99] */ 166 167 if (p->sched_class == &idle_sched_class) 168 return MAX_RT_PRIO + NICE_WIDTH; /* 140 */ 169 170 return MAX_RT_PRIO + MAX_NICE; /* 120, squash fair */ 171 } 172 173 /* 174 * l(a,b) 175 * le(a,b) := !l(b,a) 176 * g(a,b) := l(b,a) 177 * ge(a,b) := !l(a,b) 178 */ 179 180 /* real prio, less is less */ 181 static inline bool prio_less(struct task_struct *a, struct task_struct *b, bool in_fi) 182 { 183 184 int pa = __task_prio(a), pb = __task_prio(b); 185 186 if (-pa < -pb) 187 return true; 188 189 if (-pb < -pa) 190 return false; 191 192 if (pa == -1) /* dl_prio() doesn't work because of stop_class above */ 193 return !dl_time_before(a->dl.deadline, b->dl.deadline); 194 195 if (pa == MAX_RT_PRIO + MAX_NICE) /* fair */ 196 return cfs_prio_less(a, b, in_fi); 197 198 return false; 199 } 200 201 static inline bool __sched_core_less(struct task_struct *a, struct task_struct *b) 202 { 203 if (a->core_cookie < b->core_cookie) 204 return true; 205 206 if (a->core_cookie > b->core_cookie) 207 return false; 208 209 /* flip prio, so high prio is leftmost */ 210 if (prio_less(b, a, !!task_rq(a)->core->core_forceidle_count)) 211 return true; 212 213 return false; 214 } 215 216 #define __node_2_sc(node) rb_entry((node), struct task_struct, core_node) 217 218 static inline bool rb_sched_core_less(struct rb_node *a, const struct rb_node *b) 219 { 220 return __sched_core_less(__node_2_sc(a), __node_2_sc(b)); 221 } 222 223 static inline int rb_sched_core_cmp(const void *key, const struct rb_node *node) 224 { 225 const struct task_struct *p = __node_2_sc(node); 226 unsigned long cookie = (unsigned long)key; 227 228 if (cookie < p->core_cookie) 229 return -1; 230 231 if (cookie > p->core_cookie) 232 return 1; 233 234 return 0; 235 } 236 237 void sched_core_enqueue(struct rq *rq, struct task_struct *p) 238 { 239 rq->core->core_task_seq++; 240 241 if (!p->core_cookie) 242 return; 243 244 rb_add(&p->core_node, &rq->core_tree, rb_sched_core_less); 245 } 246 247 void sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags) 248 { 249 rq->core->core_task_seq++; 250 251 if (sched_core_enqueued(p)) { 252 rb_erase(&p->core_node, &rq->core_tree); 253 RB_CLEAR_NODE(&p->core_node); 254 } 255 256 /* 257 * Migrating the last task off the cpu, with the cpu in forced idle 258 * state. Reschedule to create an accounting edge for forced idle, 259 * and re-examine whether the core is still in forced idle state. 260 */ 261 if (!(flags & DEQUEUE_SAVE) && rq->nr_running == 1 && 262 rq->core->core_forceidle_count && rq->curr == rq->idle) 263 resched_curr(rq); 264 } 265 266 /* 267 * Find left-most (aka, highest priority) task matching @cookie. 268 */ 269 static struct task_struct *sched_core_find(struct rq *rq, unsigned long cookie) 270 { 271 struct rb_node *node; 272 273 node = rb_find_first((void *)cookie, &rq->core_tree, rb_sched_core_cmp); 274 /* 275 * The idle task always matches any cookie! 276 */ 277 if (!node) 278 return idle_sched_class.pick_task(rq); 279 280 return __node_2_sc(node); 281 } 282 283 static struct task_struct *sched_core_next(struct task_struct *p, unsigned long cookie) 284 { 285 struct rb_node *node = &p->core_node; 286 287 node = rb_next(node); 288 if (!node) 289 return NULL; 290 291 p = container_of(node, struct task_struct, core_node); 292 if (p->core_cookie != cookie) 293 return NULL; 294 295 return p; 296 } 297 298 /* 299 * Magic required such that: 300 * 301 * raw_spin_rq_lock(rq); 302 * ... 303 * raw_spin_rq_unlock(rq); 304 * 305 * ends up locking and unlocking the _same_ lock, and all CPUs 306 * always agree on what rq has what lock. 307 * 308 * XXX entirely possible to selectively enable cores, don't bother for now. 309 */ 310 311 static DEFINE_MUTEX(sched_core_mutex); 312 static atomic_t sched_core_count; 313 static struct cpumask sched_core_mask; 314 315 static void sched_core_lock(int cpu, unsigned long *flags) 316 { 317 const struct cpumask *smt_mask = cpu_smt_mask(cpu); 318 int t, i = 0; 319 320 local_irq_save(*flags); 321 for_each_cpu(t, smt_mask) 322 raw_spin_lock_nested(&cpu_rq(t)->__lock, i++); 323 } 324 325 static void sched_core_unlock(int cpu, unsigned long *flags) 326 { 327 const struct cpumask *smt_mask = cpu_smt_mask(cpu); 328 int t; 329 330 for_each_cpu(t, smt_mask) 331 raw_spin_unlock(&cpu_rq(t)->__lock); 332 local_irq_restore(*flags); 333 } 334 335 static void __sched_core_flip(bool enabled) 336 { 337 unsigned long flags; 338 int cpu, t; 339 340 cpus_read_lock(); 341 342 /* 343 * Toggle the online cores, one by one. 344 */ 345 cpumask_copy(&sched_core_mask, cpu_online_mask); 346 for_each_cpu(cpu, &sched_core_mask) { 347 const struct cpumask *smt_mask = cpu_smt_mask(cpu); 348 349 sched_core_lock(cpu, &flags); 350 351 for_each_cpu(t, smt_mask) 352 cpu_rq(t)->core_enabled = enabled; 353 354 cpu_rq(cpu)->core->core_forceidle_start = 0; 355 356 sched_core_unlock(cpu, &flags); 357 358 cpumask_andnot(&sched_core_mask, &sched_core_mask, smt_mask); 359 } 360 361 /* 362 * Toggle the offline CPUs. 363 */ 364 cpumask_copy(&sched_core_mask, cpu_possible_mask); 365 cpumask_andnot(&sched_core_mask, &sched_core_mask, cpu_online_mask); 366 367 for_each_cpu(cpu, &sched_core_mask) 368 cpu_rq(cpu)->core_enabled = enabled; 369 370 cpus_read_unlock(); 371 } 372 373 static void sched_core_assert_empty(void) 374 { 375 int cpu; 376 377 for_each_possible_cpu(cpu) 378 WARN_ON_ONCE(!RB_EMPTY_ROOT(&cpu_rq(cpu)->core_tree)); 379 } 380 381 static void __sched_core_enable(void) 382 { 383 static_branch_enable(&__sched_core_enabled); 384 /* 385 * Ensure all previous instances of raw_spin_rq_*lock() have finished 386 * and future ones will observe !sched_core_disabled(). 387 */ 388 synchronize_rcu(); 389 __sched_core_flip(true); 390 sched_core_assert_empty(); 391 } 392 393 static void __sched_core_disable(void) 394 { 395 sched_core_assert_empty(); 396 __sched_core_flip(false); 397 static_branch_disable(&__sched_core_enabled); 398 } 399 400 void sched_core_get(void) 401 { 402 if (atomic_inc_not_zero(&sched_core_count)) 403 return; 404 405 mutex_lock(&sched_core_mutex); 406 if (!atomic_read(&sched_core_count)) 407 __sched_core_enable(); 408 409 smp_mb__before_atomic(); 410 atomic_inc(&sched_core_count); 411 mutex_unlock(&sched_core_mutex); 412 } 413 414 static void __sched_core_put(struct work_struct *work) 415 { 416 if (atomic_dec_and_mutex_lock(&sched_core_count, &sched_core_mutex)) { 417 __sched_core_disable(); 418 mutex_unlock(&sched_core_mutex); 419 } 420 } 421 422 void sched_core_put(void) 423 { 424 static DECLARE_WORK(_work, __sched_core_put); 425 426 /* 427 * "There can be only one" 428 * 429 * Either this is the last one, or we don't actually need to do any 430 * 'work'. If it is the last *again*, we rely on 431 * WORK_STRUCT_PENDING_BIT. 432 */ 433 if (!atomic_add_unless(&sched_core_count, -1, 1)) 434 schedule_work(&_work); 435 } 436 437 #else /* !CONFIG_SCHED_CORE */ 438 439 static inline void sched_core_enqueue(struct rq *rq, struct task_struct *p) { } 440 static inline void 441 sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags) { } 442 443 #endif /* CONFIG_SCHED_CORE */ 444 445 /* 446 * Serialization rules: 447 * 448 * Lock order: 449 * 450 * p->pi_lock 451 * rq->lock 452 * hrtimer_cpu_base->lock (hrtimer_start() for bandwidth controls) 453 * 454 * rq1->lock 455 * rq2->lock where: rq1 < rq2 456 * 457 * Regular state: 458 * 459 * Normal scheduling state is serialized by rq->lock. __schedule() takes the 460 * local CPU's rq->lock, it optionally removes the task from the runqueue and 461 * always looks at the local rq data structures to find the most eligible task 462 * to run next. 463 * 464 * Task enqueue is also under rq->lock, possibly taken from another CPU. 465 * Wakeups from another LLC domain might use an IPI to transfer the enqueue to 466 * the local CPU to avoid bouncing the runqueue state around [ see 467 * ttwu_queue_wakelist() ] 468 * 469 * Task wakeup, specifically wakeups that involve migration, are horribly 470 * complicated to avoid having to take two rq->locks. 471 * 472 * Special state: 473 * 474 * System-calls and anything external will use task_rq_lock() which acquires 475 * both p->pi_lock and rq->lock. As a consequence the state they change is 476 * stable while holding either lock: 477 * 478 * - sched_setaffinity()/ 479 * set_cpus_allowed_ptr(): p->cpus_ptr, p->nr_cpus_allowed 480 * - set_user_nice(): p->se.load, p->*prio 481 * - __sched_setscheduler(): p->sched_class, p->policy, p->*prio, 482 * p->se.load, p->rt_priority, 483 * p->dl.dl_{runtime, deadline, period, flags, bw, density} 484 * - sched_setnuma(): p->numa_preferred_nid 485 * - sched_move_task()/ 486 * cpu_cgroup_fork(): p->sched_task_group 487 * - uclamp_update_active() p->uclamp* 488 * 489 * p->state <- TASK_*: 490 * 491 * is changed locklessly using set_current_state(), __set_current_state() or 492 * set_special_state(), see their respective comments, or by 493 * try_to_wake_up(). This latter uses p->pi_lock to serialize against 494 * concurrent self. 495 * 496 * p->on_rq <- { 0, 1 = TASK_ON_RQ_QUEUED, 2 = TASK_ON_RQ_MIGRATING }: 497 * 498 * is set by activate_task() and cleared by deactivate_task(), under 499 * rq->lock. Non-zero indicates the task is runnable, the special 500 * ON_RQ_MIGRATING state is used for migration without holding both 501 * rq->locks. It indicates task_cpu() is not stable, see task_rq_lock(). 502 * 503 * p->on_cpu <- { 0, 1 }: 504 * 505 * is set by prepare_task() and cleared by finish_task() such that it will be 506 * set before p is scheduled-in and cleared after p is scheduled-out, both 507 * under rq->lock. Non-zero indicates the task is running on its CPU. 508 * 509 * [ The astute reader will observe that it is possible for two tasks on one 510 * CPU to have ->on_cpu = 1 at the same time. ] 511 * 512 * task_cpu(p): is changed by set_task_cpu(), the rules are: 513 * 514 * - Don't call set_task_cpu() on a blocked task: 515 * 516 * We don't care what CPU we're not running on, this simplifies hotplug, 517 * the CPU assignment of blocked tasks isn't required to be valid. 518 * 519 * - for try_to_wake_up(), called under p->pi_lock: 520 * 521 * This allows try_to_wake_up() to only take one rq->lock, see its comment. 522 * 523 * - for migration called under rq->lock: 524 * [ see task_on_rq_migrating() in task_rq_lock() ] 525 * 526 * o move_queued_task() 527 * o detach_task() 528 * 529 * - for migration called under double_rq_lock(): 530 * 531 * o __migrate_swap_task() 532 * o push_rt_task() / pull_rt_task() 533 * o push_dl_task() / pull_dl_task() 534 * o dl_task_offline_migration() 535 * 536 */ 537 538 void raw_spin_rq_lock_nested(struct rq *rq, int subclass) 539 { 540 raw_spinlock_t *lock; 541 542 /* Matches synchronize_rcu() in __sched_core_enable() */ 543 preempt_disable(); 544 if (sched_core_disabled()) { 545 raw_spin_lock_nested(&rq->__lock, subclass); 546 /* preempt_count *MUST* be > 1 */ 547 preempt_enable_no_resched(); 548 return; 549 } 550 551 for (;;) { 552 lock = __rq_lockp(rq); 553 raw_spin_lock_nested(lock, subclass); 554 if (likely(lock == __rq_lockp(rq))) { 555 /* preempt_count *MUST* be > 1 */ 556 preempt_enable_no_resched(); 557 return; 558 } 559 raw_spin_unlock(lock); 560 } 561 } 562 563 bool raw_spin_rq_trylock(struct rq *rq) 564 { 565 raw_spinlock_t *lock; 566 bool ret; 567 568 /* Matches synchronize_rcu() in __sched_core_enable() */ 569 preempt_disable(); 570 if (sched_core_disabled()) { 571 ret = raw_spin_trylock(&rq->__lock); 572 preempt_enable(); 573 return ret; 574 } 575 576 for (;;) { 577 lock = __rq_lockp(rq); 578 ret = raw_spin_trylock(lock); 579 if (!ret || (likely(lock == __rq_lockp(rq)))) { 580 preempt_enable(); 581 return ret; 582 } 583 raw_spin_unlock(lock); 584 } 585 } 586 587 void raw_spin_rq_unlock(struct rq *rq) 588 { 589 raw_spin_unlock(rq_lockp(rq)); 590 } 591 592 #ifdef CONFIG_SMP 593 /* 594 * double_rq_lock - safely lock two runqueues 595 */ 596 void double_rq_lock(struct rq *rq1, struct rq *rq2) 597 { 598 lockdep_assert_irqs_disabled(); 599 600 if (rq_order_less(rq2, rq1)) 601 swap(rq1, rq2); 602 603 raw_spin_rq_lock(rq1); 604 if (__rq_lockp(rq1) != __rq_lockp(rq2)) 605 raw_spin_rq_lock_nested(rq2, SINGLE_DEPTH_NESTING); 606 607 double_rq_clock_clear_update(rq1, rq2); 608 } 609 #endif 610 611 /* 612 * __task_rq_lock - lock the rq @p resides on. 613 */ 614 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) 615 __acquires(rq->lock) 616 { 617 struct rq *rq; 618 619 lockdep_assert_held(&p->pi_lock); 620 621 for (;;) { 622 rq = task_rq(p); 623 raw_spin_rq_lock(rq); 624 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { 625 rq_pin_lock(rq, rf); 626 return rq; 627 } 628 raw_spin_rq_unlock(rq); 629 630 while (unlikely(task_on_rq_migrating(p))) 631 cpu_relax(); 632 } 633 } 634 635 /* 636 * task_rq_lock - lock p->pi_lock and lock the rq @p resides on. 637 */ 638 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) 639 __acquires(p->pi_lock) 640 __acquires(rq->lock) 641 { 642 struct rq *rq; 643 644 for (;;) { 645 raw_spin_lock_irqsave(&p->pi_lock, rf->flags); 646 rq = task_rq(p); 647 raw_spin_rq_lock(rq); 648 /* 649 * move_queued_task() task_rq_lock() 650 * 651 * ACQUIRE (rq->lock) 652 * [S] ->on_rq = MIGRATING [L] rq = task_rq() 653 * WMB (__set_task_cpu()) ACQUIRE (rq->lock); 654 * [S] ->cpu = new_cpu [L] task_rq() 655 * [L] ->on_rq 656 * RELEASE (rq->lock) 657 * 658 * If we observe the old CPU in task_rq_lock(), the acquire of 659 * the old rq->lock will fully serialize against the stores. 660 * 661 * If we observe the new CPU in task_rq_lock(), the address 662 * dependency headed by '[L] rq = task_rq()' and the acquire 663 * will pair with the WMB to ensure we then also see migrating. 664 */ 665 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { 666 rq_pin_lock(rq, rf); 667 return rq; 668 } 669 raw_spin_rq_unlock(rq); 670 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); 671 672 while (unlikely(task_on_rq_migrating(p))) 673 cpu_relax(); 674 } 675 } 676 677 /* 678 * RQ-clock updating methods: 679 */ 680 681 static void update_rq_clock_task(struct rq *rq, s64 delta) 682 { 683 /* 684 * In theory, the compile should just see 0 here, and optimize out the call 685 * to sched_rt_avg_update. But I don't trust it... 686 */ 687 s64 __maybe_unused steal = 0, irq_delta = 0; 688 689 #ifdef CONFIG_IRQ_TIME_ACCOUNTING 690 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time; 691 692 /* 693 * Since irq_time is only updated on {soft,}irq_exit, we might run into 694 * this case when a previous update_rq_clock() happened inside a 695 * {soft,}irq region. 696 * 697 * When this happens, we stop ->clock_task and only update the 698 * prev_irq_time stamp to account for the part that fit, so that a next 699 * update will consume the rest. This ensures ->clock_task is 700 * monotonic. 701 * 702 * It does however cause some slight miss-attribution of {soft,}irq 703 * time, a more accurate solution would be to update the irq_time using 704 * the current rq->clock timestamp, except that would require using 705 * atomic ops. 706 */ 707 if (irq_delta > delta) 708 irq_delta = delta; 709 710 rq->prev_irq_time += irq_delta; 711 delta -= irq_delta; 712 #endif 713 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING 714 if (static_key_false((¶virt_steal_rq_enabled))) { 715 steal = paravirt_steal_clock(cpu_of(rq)); 716 steal -= rq->prev_steal_time_rq; 717 718 if (unlikely(steal > delta)) 719 steal = delta; 720 721 rq->prev_steal_time_rq += steal; 722 delta -= steal; 723 } 724 #endif 725 726 rq->clock_task += delta; 727 728 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ 729 if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY)) 730 update_irq_load_avg(rq, irq_delta + steal); 731 #endif 732 update_rq_clock_pelt(rq, delta); 733 } 734 735 void update_rq_clock(struct rq *rq) 736 { 737 s64 delta; 738 739 lockdep_assert_rq_held(rq); 740 741 if (rq->clock_update_flags & RQCF_ACT_SKIP) 742 return; 743 744 #ifdef CONFIG_SCHED_DEBUG 745 if (sched_feat(WARN_DOUBLE_CLOCK)) 746 SCHED_WARN_ON(rq->clock_update_flags & RQCF_UPDATED); 747 rq->clock_update_flags |= RQCF_UPDATED; 748 #endif 749 750 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock; 751 if (delta < 0) 752 return; 753 rq->clock += delta; 754 update_rq_clock_task(rq, delta); 755 } 756 757 #ifdef CONFIG_SCHED_HRTICK 758 /* 759 * Use HR-timers to deliver accurate preemption points. 760 */ 761 762 static void hrtick_clear(struct rq *rq) 763 { 764 if (hrtimer_active(&rq->hrtick_timer)) 765 hrtimer_cancel(&rq->hrtick_timer); 766 } 767 768 /* 769 * High-resolution timer tick. 770 * Runs from hardirq context with interrupts disabled. 771 */ 772 static enum hrtimer_restart hrtick(struct hrtimer *timer) 773 { 774 struct rq *rq = container_of(timer, struct rq, hrtick_timer); 775 struct rq_flags rf; 776 777 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); 778 779 rq_lock(rq, &rf); 780 update_rq_clock(rq); 781 rq->curr->sched_class->task_tick(rq, rq->curr, 1); 782 rq_unlock(rq, &rf); 783 784 return HRTIMER_NORESTART; 785 } 786 787 #ifdef CONFIG_SMP 788 789 static void __hrtick_restart(struct rq *rq) 790 { 791 struct hrtimer *timer = &rq->hrtick_timer; 792 ktime_t time = rq->hrtick_time; 793 794 hrtimer_start(timer, time, HRTIMER_MODE_ABS_PINNED_HARD); 795 } 796 797 /* 798 * called from hardirq (IPI) context 799 */ 800 static void __hrtick_start(void *arg) 801 { 802 struct rq *rq = arg; 803 struct rq_flags rf; 804 805 rq_lock(rq, &rf); 806 __hrtick_restart(rq); 807 rq_unlock(rq, &rf); 808 } 809 810 /* 811 * Called to set the hrtick timer state. 812 * 813 * called with rq->lock held and irqs disabled 814 */ 815 void hrtick_start(struct rq *rq, u64 delay) 816 { 817 struct hrtimer *timer = &rq->hrtick_timer; 818 s64 delta; 819 820 /* 821 * Don't schedule slices shorter than 10000ns, that just 822 * doesn't make sense and can cause timer DoS. 823 */ 824 delta = max_t(s64, delay, 10000LL); 825 rq->hrtick_time = ktime_add_ns(timer->base->get_time(), delta); 826 827 if (rq == this_rq()) 828 __hrtick_restart(rq); 829 else 830 smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd); 831 } 832 833 #else 834 /* 835 * Called to set the hrtick timer state. 836 * 837 * called with rq->lock held and irqs disabled 838 */ 839 void hrtick_start(struct rq *rq, u64 delay) 840 { 841 /* 842 * Don't schedule slices shorter than 10000ns, that just 843 * doesn't make sense. Rely on vruntime for fairness. 844 */ 845 delay = max_t(u64, delay, 10000LL); 846 hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay), 847 HRTIMER_MODE_REL_PINNED_HARD); 848 } 849 850 #endif /* CONFIG_SMP */ 851 852 static void hrtick_rq_init(struct rq *rq) 853 { 854 #ifdef CONFIG_SMP 855 INIT_CSD(&rq->hrtick_csd, __hrtick_start, rq); 856 #endif 857 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); 858 rq->hrtick_timer.function = hrtick; 859 } 860 #else /* CONFIG_SCHED_HRTICK */ 861 static inline void hrtick_clear(struct rq *rq) 862 { 863 } 864 865 static inline void hrtick_rq_init(struct rq *rq) 866 { 867 } 868 #endif /* CONFIG_SCHED_HRTICK */ 869 870 /* 871 * cmpxchg based fetch_or, macro so it works for different integer types 872 */ 873 #define fetch_or(ptr, mask) \ 874 ({ \ 875 typeof(ptr) _ptr = (ptr); \ 876 typeof(mask) _mask = (mask); \ 877 typeof(*_ptr) _val = *_ptr; \ 878 \ 879 do { \ 880 } while (!try_cmpxchg(_ptr, &_val, _val | _mask)); \ 881 _val; \ 882 }) 883 884 #if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG) 885 /* 886 * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG, 887 * this avoids any races wrt polling state changes and thereby avoids 888 * spurious IPIs. 889 */ 890 static inline bool set_nr_and_not_polling(struct task_struct *p) 891 { 892 struct thread_info *ti = task_thread_info(p); 893 return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG); 894 } 895 896 /* 897 * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set. 898 * 899 * If this returns true, then the idle task promises to call 900 * sched_ttwu_pending() and reschedule soon. 901 */ 902 static bool set_nr_if_polling(struct task_struct *p) 903 { 904 struct thread_info *ti = task_thread_info(p); 905 typeof(ti->flags) val = READ_ONCE(ti->flags); 906 907 for (;;) { 908 if (!(val & _TIF_POLLING_NRFLAG)) 909 return false; 910 if (val & _TIF_NEED_RESCHED) 911 return true; 912 if (try_cmpxchg(&ti->flags, &val, val | _TIF_NEED_RESCHED)) 913 break; 914 } 915 return true; 916 } 917 918 #else 919 static inline bool set_nr_and_not_polling(struct task_struct *p) 920 { 921 set_tsk_need_resched(p); 922 return true; 923 } 924 925 #ifdef CONFIG_SMP 926 static inline bool set_nr_if_polling(struct task_struct *p) 927 { 928 return false; 929 } 930 #endif 931 #endif 932 933 static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task) 934 { 935 struct wake_q_node *node = &task->wake_q; 936 937 /* 938 * Atomically grab the task, if ->wake_q is !nil already it means 939 * it's already queued (either by us or someone else) and will get the 940 * wakeup due to that. 941 * 942 * In order to ensure that a pending wakeup will observe our pending 943 * state, even in the failed case, an explicit smp_mb() must be used. 944 */ 945 smp_mb__before_atomic(); 946 if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL))) 947 return false; 948 949 /* 950 * The head is context local, there can be no concurrency. 951 */ 952 *head->lastp = node; 953 head->lastp = &node->next; 954 return true; 955 } 956 957 /** 958 * wake_q_add() - queue a wakeup for 'later' waking. 959 * @head: the wake_q_head to add @task to 960 * @task: the task to queue for 'later' wakeup 961 * 962 * Queue a task for later wakeup, most likely by the wake_up_q() call in the 963 * same context, _HOWEVER_ this is not guaranteed, the wakeup can come 964 * instantly. 965 * 966 * This function must be used as-if it were wake_up_process(); IOW the task 967 * must be ready to be woken at this location. 968 */ 969 void wake_q_add(struct wake_q_head *head, struct task_struct *task) 970 { 971 if (__wake_q_add(head, task)) 972 get_task_struct(task); 973 } 974 975 /** 976 * wake_q_add_safe() - safely queue a wakeup for 'later' waking. 977 * @head: the wake_q_head to add @task to 978 * @task: the task to queue for 'later' wakeup 979 * 980 * Queue a task for later wakeup, most likely by the wake_up_q() call in the 981 * same context, _HOWEVER_ this is not guaranteed, the wakeup can come 982 * instantly. 983 * 984 * This function must be used as-if it were wake_up_process(); IOW the task 985 * must be ready to be woken at this location. 986 * 987 * This function is essentially a task-safe equivalent to wake_q_add(). Callers 988 * that already hold reference to @task can call the 'safe' version and trust 989 * wake_q to do the right thing depending whether or not the @task is already 990 * queued for wakeup. 991 */ 992 void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task) 993 { 994 if (!__wake_q_add(head, task)) 995 put_task_struct(task); 996 } 997 998 void wake_up_q(struct wake_q_head *head) 999 { 1000 struct wake_q_node *node = head->first; 1001 1002 while (node != WAKE_Q_TAIL) { 1003 struct task_struct *task; 1004 1005 task = container_of(node, struct task_struct, wake_q); 1006 /* Task can safely be re-inserted now: */ 1007 node = node->next; 1008 task->wake_q.next = NULL; 1009 1010 /* 1011 * wake_up_process() executes a full barrier, which pairs with 1012 * the queueing in wake_q_add() so as not to miss wakeups. 1013 */ 1014 wake_up_process(task); 1015 put_task_struct(task); 1016 } 1017 } 1018 1019 /* 1020 * resched_curr - mark rq's current task 'to be rescheduled now'. 1021 * 1022 * On UP this means the setting of the need_resched flag, on SMP it 1023 * might also involve a cross-CPU call to trigger the scheduler on 1024 * the target CPU. 1025 */ 1026 void resched_curr(struct rq *rq) 1027 { 1028 struct task_struct *curr = rq->curr; 1029 int cpu; 1030 1031 lockdep_assert_rq_held(rq); 1032 1033 if (test_tsk_need_resched(curr)) 1034 return; 1035 1036 cpu = cpu_of(rq); 1037 1038 if (cpu == smp_processor_id()) { 1039 set_tsk_need_resched(curr); 1040 set_preempt_need_resched(); 1041 return; 1042 } 1043 1044 if (set_nr_and_not_polling(curr)) 1045 smp_send_reschedule(cpu); 1046 else 1047 trace_sched_wake_idle_without_ipi(cpu); 1048 } 1049 1050 void resched_cpu(int cpu) 1051 { 1052 struct rq *rq = cpu_rq(cpu); 1053 unsigned long flags; 1054 1055 raw_spin_rq_lock_irqsave(rq, flags); 1056 if (cpu_online(cpu) || cpu == smp_processor_id()) 1057 resched_curr(rq); 1058 raw_spin_rq_unlock_irqrestore(rq, flags); 1059 } 1060 1061 #ifdef CONFIG_SMP 1062 #ifdef CONFIG_NO_HZ_COMMON 1063 /* 1064 * In the semi idle case, use the nearest busy CPU for migrating timers 1065 * from an idle CPU. This is good for power-savings. 1066 * 1067 * We don't do similar optimization for completely idle system, as 1068 * selecting an idle CPU will add more delays to the timers than intended 1069 * (as that CPU's timer base may not be uptodate wrt jiffies etc). 1070 */ 1071 int get_nohz_timer_target(void) 1072 { 1073 int i, cpu = smp_processor_id(), default_cpu = -1; 1074 struct sched_domain *sd; 1075 const struct cpumask *hk_mask; 1076 1077 if (housekeeping_cpu(cpu, HK_TYPE_TIMER)) { 1078 if (!idle_cpu(cpu)) 1079 return cpu; 1080 default_cpu = cpu; 1081 } 1082 1083 hk_mask = housekeeping_cpumask(HK_TYPE_TIMER); 1084 1085 rcu_read_lock(); 1086 for_each_domain(cpu, sd) { 1087 for_each_cpu_and(i, sched_domain_span(sd), hk_mask) { 1088 if (cpu == i) 1089 continue; 1090 1091 if (!idle_cpu(i)) { 1092 cpu = i; 1093 goto unlock; 1094 } 1095 } 1096 } 1097 1098 if (default_cpu == -1) 1099 default_cpu = housekeeping_any_cpu(HK_TYPE_TIMER); 1100 cpu = default_cpu; 1101 unlock: 1102 rcu_read_unlock(); 1103 return cpu; 1104 } 1105 1106 /* 1107 * When add_timer_on() enqueues a timer into the timer wheel of an 1108 * idle CPU then this timer might expire before the next timer event 1109 * which is scheduled to wake up that CPU. In case of a completely 1110 * idle system the next event might even be infinite time into the 1111 * future. wake_up_idle_cpu() ensures that the CPU is woken up and 1112 * leaves the inner idle loop so the newly added timer is taken into 1113 * account when the CPU goes back to idle and evaluates the timer 1114 * wheel for the next timer event. 1115 */ 1116 static void wake_up_idle_cpu(int cpu) 1117 { 1118 struct rq *rq = cpu_rq(cpu); 1119 1120 if (cpu == smp_processor_id()) 1121 return; 1122 1123 if (set_nr_and_not_polling(rq->idle)) 1124 smp_send_reschedule(cpu); 1125 else 1126 trace_sched_wake_idle_without_ipi(cpu); 1127 } 1128 1129 static bool wake_up_full_nohz_cpu(int cpu) 1130 { 1131 /* 1132 * We just need the target to call irq_exit() and re-evaluate 1133 * the next tick. The nohz full kick at least implies that. 1134 * If needed we can still optimize that later with an 1135 * empty IRQ. 1136 */ 1137 if (cpu_is_offline(cpu)) 1138 return true; /* Don't try to wake offline CPUs. */ 1139 if (tick_nohz_full_cpu(cpu)) { 1140 if (cpu != smp_processor_id() || 1141 tick_nohz_tick_stopped()) 1142 tick_nohz_full_kick_cpu(cpu); 1143 return true; 1144 } 1145 1146 return false; 1147 } 1148 1149 /* 1150 * Wake up the specified CPU. If the CPU is going offline, it is the 1151 * caller's responsibility to deal with the lost wakeup, for example, 1152 * by hooking into the CPU_DEAD notifier like timers and hrtimers do. 1153 */ 1154 void wake_up_nohz_cpu(int cpu) 1155 { 1156 if (!wake_up_full_nohz_cpu(cpu)) 1157 wake_up_idle_cpu(cpu); 1158 } 1159 1160 static void nohz_csd_func(void *info) 1161 { 1162 struct rq *rq = info; 1163 int cpu = cpu_of(rq); 1164 unsigned int flags; 1165 1166 /* 1167 * Release the rq::nohz_csd. 1168 */ 1169 flags = atomic_fetch_andnot(NOHZ_KICK_MASK | NOHZ_NEWILB_KICK, nohz_flags(cpu)); 1170 WARN_ON(!(flags & NOHZ_KICK_MASK)); 1171 1172 rq->idle_balance = idle_cpu(cpu); 1173 if (rq->idle_balance && !need_resched()) { 1174 rq->nohz_idle_balance = flags; 1175 raise_softirq_irqoff(SCHED_SOFTIRQ); 1176 } 1177 } 1178 1179 #endif /* CONFIG_NO_HZ_COMMON */ 1180 1181 #ifdef CONFIG_NO_HZ_FULL 1182 bool sched_can_stop_tick(struct rq *rq) 1183 { 1184 int fifo_nr_running; 1185 1186 /* Deadline tasks, even if single, need the tick */ 1187 if (rq->dl.dl_nr_running) 1188 return false; 1189 1190 /* 1191 * If there are more than one RR tasks, we need the tick to affect the 1192 * actual RR behaviour. 1193 */ 1194 if (rq->rt.rr_nr_running) { 1195 if (rq->rt.rr_nr_running == 1) 1196 return true; 1197 else 1198 return false; 1199 } 1200 1201 /* 1202 * If there's no RR tasks, but FIFO tasks, we can skip the tick, no 1203 * forced preemption between FIFO tasks. 1204 */ 1205 fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running; 1206 if (fifo_nr_running) 1207 return true; 1208 1209 /* 1210 * If there are no DL,RR/FIFO tasks, there must only be CFS tasks left; 1211 * if there's more than one we need the tick for involuntary 1212 * preemption. 1213 */ 1214 if (rq->nr_running > 1) 1215 return false; 1216 1217 return true; 1218 } 1219 #endif /* CONFIG_NO_HZ_FULL */ 1220 #endif /* CONFIG_SMP */ 1221 1222 #if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \ 1223 (defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH))) 1224 /* 1225 * Iterate task_group tree rooted at *from, calling @down when first entering a 1226 * node and @up when leaving it for the final time. 1227 * 1228 * Caller must hold rcu_lock or sufficient equivalent. 1229 */ 1230 int walk_tg_tree_from(struct task_group *from, 1231 tg_visitor down, tg_visitor up, void *data) 1232 { 1233 struct task_group *parent, *child; 1234 int ret; 1235 1236 parent = from; 1237 1238 down: 1239 ret = (*down)(parent, data); 1240 if (ret) 1241 goto out; 1242 list_for_each_entry_rcu(child, &parent->children, siblings) { 1243 parent = child; 1244 goto down; 1245 1246 up: 1247 continue; 1248 } 1249 ret = (*up)(parent, data); 1250 if (ret || parent == from) 1251 goto out; 1252 1253 child = parent; 1254 parent = parent->parent; 1255 if (parent) 1256 goto up; 1257 out: 1258 return ret; 1259 } 1260 1261 int tg_nop(struct task_group *tg, void *data) 1262 { 1263 return 0; 1264 } 1265 #endif 1266 1267 static void set_load_weight(struct task_struct *p, bool update_load) 1268 { 1269 int prio = p->static_prio - MAX_RT_PRIO; 1270 struct load_weight *load = &p->se.load; 1271 1272 /* 1273 * SCHED_IDLE tasks get minimal weight: 1274 */ 1275 if (task_has_idle_policy(p)) { 1276 load->weight = scale_load(WEIGHT_IDLEPRIO); 1277 load->inv_weight = WMULT_IDLEPRIO; 1278 return; 1279 } 1280 1281 /* 1282 * SCHED_OTHER tasks have to update their load when changing their 1283 * weight 1284 */ 1285 if (update_load && p->sched_class == &fair_sched_class) { 1286 reweight_task(p, prio); 1287 } else { 1288 load->weight = scale_load(sched_prio_to_weight[prio]); 1289 load->inv_weight = sched_prio_to_wmult[prio]; 1290 } 1291 } 1292 1293 #ifdef CONFIG_UCLAMP_TASK 1294 /* 1295 * Serializes updates of utilization clamp values 1296 * 1297 * The (slow-path) user-space triggers utilization clamp value updates which 1298 * can require updates on (fast-path) scheduler's data structures used to 1299 * support enqueue/dequeue operations. 1300 * While the per-CPU rq lock protects fast-path update operations, user-space 1301 * requests are serialized using a mutex to reduce the risk of conflicting 1302 * updates or API abuses. 1303 */ 1304 static DEFINE_MUTEX(uclamp_mutex); 1305 1306 /* Max allowed minimum utilization */ 1307 static unsigned int __maybe_unused sysctl_sched_uclamp_util_min = SCHED_CAPACITY_SCALE; 1308 1309 /* Max allowed maximum utilization */ 1310 static unsigned int __maybe_unused sysctl_sched_uclamp_util_max = SCHED_CAPACITY_SCALE; 1311 1312 /* 1313 * By default RT tasks run at the maximum performance point/capacity of the 1314 * system. Uclamp enforces this by always setting UCLAMP_MIN of RT tasks to 1315 * SCHED_CAPACITY_SCALE. 1316 * 1317 * This knob allows admins to change the default behavior when uclamp is being 1318 * used. In battery powered devices, particularly, running at the maximum 1319 * capacity and frequency will increase energy consumption and shorten the 1320 * battery life. 1321 * 1322 * This knob only affects RT tasks that their uclamp_se->user_defined == false. 1323 * 1324 * This knob will not override the system default sched_util_clamp_min defined 1325 * above. 1326 */ 1327 static unsigned int sysctl_sched_uclamp_util_min_rt_default = SCHED_CAPACITY_SCALE; 1328 1329 /* All clamps are required to be less or equal than these values */ 1330 static struct uclamp_se uclamp_default[UCLAMP_CNT]; 1331 1332 /* 1333 * This static key is used to reduce the uclamp overhead in the fast path. It 1334 * primarily disables the call to uclamp_rq_{inc, dec}() in 1335 * enqueue/dequeue_task(). 1336 * 1337 * This allows users to continue to enable uclamp in their kernel config with 1338 * minimum uclamp overhead in the fast path. 1339 * 1340 * As soon as userspace modifies any of the uclamp knobs, the static key is 1341 * enabled, since we have an actual users that make use of uclamp 1342 * functionality. 1343 * 1344 * The knobs that would enable this static key are: 1345 * 1346 * * A task modifying its uclamp value with sched_setattr(). 1347 * * An admin modifying the sysctl_sched_uclamp_{min, max} via procfs. 1348 * * An admin modifying the cgroup cpu.uclamp.{min, max} 1349 */ 1350 DEFINE_STATIC_KEY_FALSE(sched_uclamp_used); 1351 1352 /* Integer rounded range for each bucket */ 1353 #define UCLAMP_BUCKET_DELTA DIV_ROUND_CLOSEST(SCHED_CAPACITY_SCALE, UCLAMP_BUCKETS) 1354 1355 #define for_each_clamp_id(clamp_id) \ 1356 for ((clamp_id) = 0; (clamp_id) < UCLAMP_CNT; (clamp_id)++) 1357 1358 static inline unsigned int uclamp_bucket_id(unsigned int clamp_value) 1359 { 1360 return min_t(unsigned int, clamp_value / UCLAMP_BUCKET_DELTA, UCLAMP_BUCKETS - 1); 1361 } 1362 1363 static inline unsigned int uclamp_none(enum uclamp_id clamp_id) 1364 { 1365 if (clamp_id == UCLAMP_MIN) 1366 return 0; 1367 return SCHED_CAPACITY_SCALE; 1368 } 1369 1370 static inline void uclamp_se_set(struct uclamp_se *uc_se, 1371 unsigned int value, bool user_defined) 1372 { 1373 uc_se->value = value; 1374 uc_se->bucket_id = uclamp_bucket_id(value); 1375 uc_se->user_defined = user_defined; 1376 } 1377 1378 static inline unsigned int 1379 uclamp_idle_value(struct rq *rq, enum uclamp_id clamp_id, 1380 unsigned int clamp_value) 1381 { 1382 /* 1383 * Avoid blocked utilization pushing up the frequency when we go 1384 * idle (which drops the max-clamp) by retaining the last known 1385 * max-clamp. 1386 */ 1387 if (clamp_id == UCLAMP_MAX) { 1388 rq->uclamp_flags |= UCLAMP_FLAG_IDLE; 1389 return clamp_value; 1390 } 1391 1392 return uclamp_none(UCLAMP_MIN); 1393 } 1394 1395 static inline void uclamp_idle_reset(struct rq *rq, enum uclamp_id clamp_id, 1396 unsigned int clamp_value) 1397 { 1398 /* Reset max-clamp retention only on idle exit */ 1399 if (!(rq->uclamp_flags & UCLAMP_FLAG_IDLE)) 1400 return; 1401 1402 WRITE_ONCE(rq->uclamp[clamp_id].value, clamp_value); 1403 } 1404 1405 static inline 1406 unsigned int uclamp_rq_max_value(struct rq *rq, enum uclamp_id clamp_id, 1407 unsigned int clamp_value) 1408 { 1409 struct uclamp_bucket *bucket = rq->uclamp[clamp_id].bucket; 1410 int bucket_id = UCLAMP_BUCKETS - 1; 1411 1412 /* 1413 * Since both min and max clamps are max aggregated, find the 1414 * top most bucket with tasks in. 1415 */ 1416 for ( ; bucket_id >= 0; bucket_id--) { 1417 if (!bucket[bucket_id].tasks) 1418 continue; 1419 return bucket[bucket_id].value; 1420 } 1421 1422 /* No tasks -- default clamp values */ 1423 return uclamp_idle_value(rq, clamp_id, clamp_value); 1424 } 1425 1426 static void __uclamp_update_util_min_rt_default(struct task_struct *p) 1427 { 1428 unsigned int default_util_min; 1429 struct uclamp_se *uc_se; 1430 1431 lockdep_assert_held(&p->pi_lock); 1432 1433 uc_se = &p->uclamp_req[UCLAMP_MIN]; 1434 1435 /* Only sync if user didn't override the default */ 1436 if (uc_se->user_defined) 1437 return; 1438 1439 default_util_min = sysctl_sched_uclamp_util_min_rt_default; 1440 uclamp_se_set(uc_se, default_util_min, false); 1441 } 1442 1443 static void uclamp_update_util_min_rt_default(struct task_struct *p) 1444 { 1445 struct rq_flags rf; 1446 struct rq *rq; 1447 1448 if (!rt_task(p)) 1449 return; 1450 1451 /* Protect updates to p->uclamp_* */ 1452 rq = task_rq_lock(p, &rf); 1453 __uclamp_update_util_min_rt_default(p); 1454 task_rq_unlock(rq, p, &rf); 1455 } 1456 1457 static inline struct uclamp_se 1458 uclamp_tg_restrict(struct task_struct *p, enum uclamp_id clamp_id) 1459 { 1460 /* Copy by value as we could modify it */ 1461 struct uclamp_se uc_req = p->uclamp_req[clamp_id]; 1462 #ifdef CONFIG_UCLAMP_TASK_GROUP 1463 unsigned int tg_min, tg_max, value; 1464 1465 /* 1466 * Tasks in autogroups or root task group will be 1467 * restricted by system defaults. 1468 */ 1469 if (task_group_is_autogroup(task_group(p))) 1470 return uc_req; 1471 if (task_group(p) == &root_task_group) 1472 return uc_req; 1473 1474 tg_min = task_group(p)->uclamp[UCLAMP_MIN].value; 1475 tg_max = task_group(p)->uclamp[UCLAMP_MAX].value; 1476 value = uc_req.value; 1477 value = clamp(value, tg_min, tg_max); 1478 uclamp_se_set(&uc_req, value, false); 1479 #endif 1480 1481 return uc_req; 1482 } 1483 1484 /* 1485 * The effective clamp bucket index of a task depends on, by increasing 1486 * priority: 1487 * - the task specific clamp value, when explicitly requested from userspace 1488 * - the task group effective clamp value, for tasks not either in the root 1489 * group or in an autogroup 1490 * - the system default clamp value, defined by the sysadmin 1491 */ 1492 static inline struct uclamp_se 1493 uclamp_eff_get(struct task_struct *p, enum uclamp_id clamp_id) 1494 { 1495 struct uclamp_se uc_req = uclamp_tg_restrict(p, clamp_id); 1496 struct uclamp_se uc_max = uclamp_default[clamp_id]; 1497 1498 /* System default restrictions always apply */ 1499 if (unlikely(uc_req.value > uc_max.value)) 1500 return uc_max; 1501 1502 return uc_req; 1503 } 1504 1505 unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id) 1506 { 1507 struct uclamp_se uc_eff; 1508 1509 /* Task currently refcounted: use back-annotated (effective) value */ 1510 if (p->uclamp[clamp_id].active) 1511 return (unsigned long)p->uclamp[clamp_id].value; 1512 1513 uc_eff = uclamp_eff_get(p, clamp_id); 1514 1515 return (unsigned long)uc_eff.value; 1516 } 1517 1518 /* 1519 * When a task is enqueued on a rq, the clamp bucket currently defined by the 1520 * task's uclamp::bucket_id is refcounted on that rq. This also immediately 1521 * updates the rq's clamp value if required. 1522 * 1523 * Tasks can have a task-specific value requested from user-space, track 1524 * within each bucket the maximum value for tasks refcounted in it. 1525 * This "local max aggregation" allows to track the exact "requested" value 1526 * for each bucket when all its RUNNABLE tasks require the same clamp. 1527 */ 1528 static inline void uclamp_rq_inc_id(struct rq *rq, struct task_struct *p, 1529 enum uclamp_id clamp_id) 1530 { 1531 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id]; 1532 struct uclamp_se *uc_se = &p->uclamp[clamp_id]; 1533 struct uclamp_bucket *bucket; 1534 1535 lockdep_assert_rq_held(rq); 1536 1537 /* Update task effective clamp */ 1538 p->uclamp[clamp_id] = uclamp_eff_get(p, clamp_id); 1539 1540 bucket = &uc_rq->bucket[uc_se->bucket_id]; 1541 bucket->tasks++; 1542 uc_se->active = true; 1543 1544 uclamp_idle_reset(rq, clamp_id, uc_se->value); 1545 1546 /* 1547 * Local max aggregation: rq buckets always track the max 1548 * "requested" clamp value of its RUNNABLE tasks. 1549 */ 1550 if (bucket->tasks == 1 || uc_se->value > bucket->value) 1551 bucket->value = uc_se->value; 1552 1553 if (uc_se->value > READ_ONCE(uc_rq->value)) 1554 WRITE_ONCE(uc_rq->value, uc_se->value); 1555 } 1556 1557 /* 1558 * When a task is dequeued from a rq, the clamp bucket refcounted by the task 1559 * is released. If this is the last task reference counting the rq's max 1560 * active clamp value, then the rq's clamp value is updated. 1561 * 1562 * Both refcounted tasks and rq's cached clamp values are expected to be 1563 * always valid. If it's detected they are not, as defensive programming, 1564 * enforce the expected state and warn. 1565 */ 1566 static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p, 1567 enum uclamp_id clamp_id) 1568 { 1569 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id]; 1570 struct uclamp_se *uc_se = &p->uclamp[clamp_id]; 1571 struct uclamp_bucket *bucket; 1572 unsigned int bkt_clamp; 1573 unsigned int rq_clamp; 1574 1575 lockdep_assert_rq_held(rq); 1576 1577 /* 1578 * If sched_uclamp_used was enabled after task @p was enqueued, 1579 * we could end up with unbalanced call to uclamp_rq_dec_id(). 1580 * 1581 * In this case the uc_se->active flag should be false since no uclamp 1582 * accounting was performed at enqueue time and we can just return 1583 * here. 1584 * 1585 * Need to be careful of the following enqueue/dequeue ordering 1586 * problem too 1587 * 1588 * enqueue(taskA) 1589 * // sched_uclamp_used gets enabled 1590 * enqueue(taskB) 1591 * dequeue(taskA) 1592 * // Must not decrement bucket->tasks here 1593 * dequeue(taskB) 1594 * 1595 * where we could end up with stale data in uc_se and 1596 * bucket[uc_se->bucket_id]. 1597 * 1598 * The following check here eliminates the possibility of such race. 1599 */ 1600 if (unlikely(!uc_se->active)) 1601 return; 1602 1603 bucket = &uc_rq->bucket[uc_se->bucket_id]; 1604 1605 SCHED_WARN_ON(!bucket->tasks); 1606 if (likely(bucket->tasks)) 1607 bucket->tasks--; 1608 1609 uc_se->active = false; 1610 1611 /* 1612 * Keep "local max aggregation" simple and accept to (possibly) 1613 * overboost some RUNNABLE tasks in the same bucket. 1614 * The rq clamp bucket value is reset to its base value whenever 1615 * there are no more RUNNABLE tasks refcounting it. 1616 */ 1617 if (likely(bucket->tasks)) 1618 return; 1619 1620 rq_clamp = READ_ONCE(uc_rq->value); 1621 /* 1622 * Defensive programming: this should never happen. If it happens, 1623 * e.g. due to future modification, warn and fixup the expected value. 1624 */ 1625 SCHED_WARN_ON(bucket->value > rq_clamp); 1626 if (bucket->value >= rq_clamp) { 1627 bkt_clamp = uclamp_rq_max_value(rq, clamp_id, uc_se->value); 1628 WRITE_ONCE(uc_rq->value, bkt_clamp); 1629 } 1630 } 1631 1632 static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) 1633 { 1634 enum uclamp_id clamp_id; 1635 1636 /* 1637 * Avoid any overhead until uclamp is actually used by the userspace. 1638 * 1639 * The condition is constructed such that a NOP is generated when 1640 * sched_uclamp_used is disabled. 1641 */ 1642 if (!static_branch_unlikely(&sched_uclamp_used)) 1643 return; 1644 1645 if (unlikely(!p->sched_class->uclamp_enabled)) 1646 return; 1647 1648 for_each_clamp_id(clamp_id) 1649 uclamp_rq_inc_id(rq, p, clamp_id); 1650 1651 /* Reset clamp idle holding when there is one RUNNABLE task */ 1652 if (rq->uclamp_flags & UCLAMP_FLAG_IDLE) 1653 rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE; 1654 } 1655 1656 static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) 1657 { 1658 enum uclamp_id clamp_id; 1659 1660 /* 1661 * Avoid any overhead until uclamp is actually used by the userspace. 1662 * 1663 * The condition is constructed such that a NOP is generated when 1664 * sched_uclamp_used is disabled. 1665 */ 1666 if (!static_branch_unlikely(&sched_uclamp_used)) 1667 return; 1668 1669 if (unlikely(!p->sched_class->uclamp_enabled)) 1670 return; 1671 1672 for_each_clamp_id(clamp_id) 1673 uclamp_rq_dec_id(rq, p, clamp_id); 1674 } 1675 1676 static inline void uclamp_rq_reinc_id(struct rq *rq, struct task_struct *p, 1677 enum uclamp_id clamp_id) 1678 { 1679 if (!p->uclamp[clamp_id].active) 1680 return; 1681 1682 uclamp_rq_dec_id(rq, p, clamp_id); 1683 uclamp_rq_inc_id(rq, p, clamp_id); 1684 1685 /* 1686 * Make sure to clear the idle flag if we've transiently reached 0 1687 * active tasks on rq. 1688 */ 1689 if (clamp_id == UCLAMP_MAX && (rq->uclamp_flags & UCLAMP_FLAG_IDLE)) 1690 rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE; 1691 } 1692 1693 static inline void 1694 uclamp_update_active(struct task_struct *p) 1695 { 1696 enum uclamp_id clamp_id; 1697 struct rq_flags rf; 1698 struct rq *rq; 1699 1700 /* 1701 * Lock the task and the rq where the task is (or was) queued. 1702 * 1703 * We might lock the (previous) rq of a !RUNNABLE task, but that's the 1704 * price to pay to safely serialize util_{min,max} updates with 1705 * enqueues, dequeues and migration operations. 1706 * This is the same locking schema used by __set_cpus_allowed_ptr(). 1707 */ 1708 rq = task_rq_lock(p, &rf); 1709 1710 /* 1711 * Setting the clamp bucket is serialized by task_rq_lock(). 1712 * If the task is not yet RUNNABLE and its task_struct is not 1713 * affecting a valid clamp bucket, the next time it's enqueued, 1714 * it will already see the updated clamp bucket value. 1715 */ 1716 for_each_clamp_id(clamp_id) 1717 uclamp_rq_reinc_id(rq, p, clamp_id); 1718 1719 task_rq_unlock(rq, p, &rf); 1720 } 1721 1722 #ifdef CONFIG_UCLAMP_TASK_GROUP 1723 static inline void 1724 uclamp_update_active_tasks(struct cgroup_subsys_state *css) 1725 { 1726 struct css_task_iter it; 1727 struct task_struct *p; 1728 1729 css_task_iter_start(css, 0, &it); 1730 while ((p = css_task_iter_next(&it))) 1731 uclamp_update_active(p); 1732 css_task_iter_end(&it); 1733 } 1734 1735 static void cpu_util_update_eff(struct cgroup_subsys_state *css); 1736 #endif 1737 1738 #ifdef CONFIG_SYSCTL 1739 #ifdef CONFIG_UCLAMP_TASK 1740 #ifdef CONFIG_UCLAMP_TASK_GROUP 1741 static void uclamp_update_root_tg(void) 1742 { 1743 struct task_group *tg = &root_task_group; 1744 1745 uclamp_se_set(&tg->uclamp_req[UCLAMP_MIN], 1746 sysctl_sched_uclamp_util_min, false); 1747 uclamp_se_set(&tg->uclamp_req[UCLAMP_MAX], 1748 sysctl_sched_uclamp_util_max, false); 1749 1750 rcu_read_lock(); 1751 cpu_util_update_eff(&root_task_group.css); 1752 rcu_read_unlock(); 1753 } 1754 #else 1755 static void uclamp_update_root_tg(void) { } 1756 #endif 1757 1758 static void uclamp_sync_util_min_rt_default(void) 1759 { 1760 struct task_struct *g, *p; 1761 1762 /* 1763 * copy_process() sysctl_uclamp 1764 * uclamp_min_rt = X; 1765 * write_lock(&tasklist_lock) read_lock(&tasklist_lock) 1766 * // link thread smp_mb__after_spinlock() 1767 * write_unlock(&tasklist_lock) read_unlock(&tasklist_lock); 1768 * sched_post_fork() for_each_process_thread() 1769 * __uclamp_sync_rt() __uclamp_sync_rt() 1770 * 1771 * Ensures that either sched_post_fork() will observe the new 1772 * uclamp_min_rt or for_each_process_thread() will observe the new 1773 * task. 1774 */ 1775 read_lock(&tasklist_lock); 1776 smp_mb__after_spinlock(); 1777 read_unlock(&tasklist_lock); 1778 1779 rcu_read_lock(); 1780 for_each_process_thread(g, p) 1781 uclamp_update_util_min_rt_default(p); 1782 rcu_read_unlock(); 1783 } 1784 1785 static int sysctl_sched_uclamp_handler(struct ctl_table *table, int write, 1786 void *buffer, size_t *lenp, loff_t *ppos) 1787 { 1788 bool update_root_tg = false; 1789 int old_min, old_max, old_min_rt; 1790 int result; 1791 1792 mutex_lock(&uclamp_mutex); 1793 old_min = sysctl_sched_uclamp_util_min; 1794 old_max = sysctl_sched_uclamp_util_max; 1795 old_min_rt = sysctl_sched_uclamp_util_min_rt_default; 1796 1797 result = proc_dointvec(table, write, buffer, lenp, ppos); 1798 if (result) 1799 goto undo; 1800 if (!write) 1801 goto done; 1802 1803 if (sysctl_sched_uclamp_util_min > sysctl_sched_uclamp_util_max || 1804 sysctl_sched_uclamp_util_max > SCHED_CAPACITY_SCALE || 1805 sysctl_sched_uclamp_util_min_rt_default > SCHED_CAPACITY_SCALE) { 1806 1807 result = -EINVAL; 1808 goto undo; 1809 } 1810 1811 if (old_min != sysctl_sched_uclamp_util_min) { 1812 uclamp_se_set(&uclamp_default[UCLAMP_MIN], 1813 sysctl_sched_uclamp_util_min, false); 1814 update_root_tg = true; 1815 } 1816 if (old_max != sysctl_sched_uclamp_util_max) { 1817 uclamp_se_set(&uclamp_default[UCLAMP_MAX], 1818 sysctl_sched_uclamp_util_max, false); 1819 update_root_tg = true; 1820 } 1821 1822 if (update_root_tg) { 1823 static_branch_enable(&sched_uclamp_used); 1824 uclamp_update_root_tg(); 1825 } 1826 1827 if (old_min_rt != sysctl_sched_uclamp_util_min_rt_default) { 1828 static_branch_enable(&sched_uclamp_used); 1829 uclamp_sync_util_min_rt_default(); 1830 } 1831 1832 /* 1833 * We update all RUNNABLE tasks only when task groups are in use. 1834 * Otherwise, keep it simple and do just a lazy update at each next 1835 * task enqueue time. 1836 */ 1837 1838 goto done; 1839 1840 undo: 1841 sysctl_sched_uclamp_util_min = old_min; 1842 sysctl_sched_uclamp_util_max = old_max; 1843 sysctl_sched_uclamp_util_min_rt_default = old_min_rt; 1844 done: 1845 mutex_unlock(&uclamp_mutex); 1846 1847 return result; 1848 } 1849 #endif 1850 #endif 1851 1852 static int uclamp_validate(struct task_struct *p, 1853 const struct sched_attr *attr) 1854 { 1855 int util_min = p->uclamp_req[UCLAMP_MIN].value; 1856 int util_max = p->uclamp_req[UCLAMP_MAX].value; 1857 1858 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN) { 1859 util_min = attr->sched_util_min; 1860 1861 if (util_min + 1 > SCHED_CAPACITY_SCALE + 1) 1862 return -EINVAL; 1863 } 1864 1865 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX) { 1866 util_max = attr->sched_util_max; 1867 1868 if (util_max + 1 > SCHED_CAPACITY_SCALE + 1) 1869 return -EINVAL; 1870 } 1871 1872 if (util_min != -1 && util_max != -1 && util_min > util_max) 1873 return -EINVAL; 1874 1875 /* 1876 * We have valid uclamp attributes; make sure uclamp is enabled. 1877 * 1878 * We need to do that here, because enabling static branches is a 1879 * blocking operation which obviously cannot be done while holding 1880 * scheduler locks. 1881 */ 1882 static_branch_enable(&sched_uclamp_used); 1883 1884 return 0; 1885 } 1886 1887 static bool uclamp_reset(const struct sched_attr *attr, 1888 enum uclamp_id clamp_id, 1889 struct uclamp_se *uc_se) 1890 { 1891 /* Reset on sched class change for a non user-defined clamp value. */ 1892 if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)) && 1893 !uc_se->user_defined) 1894 return true; 1895 1896 /* Reset on sched_util_{min,max} == -1. */ 1897 if (clamp_id == UCLAMP_MIN && 1898 attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN && 1899 attr->sched_util_min == -1) { 1900 return true; 1901 } 1902 1903 if (clamp_id == UCLAMP_MAX && 1904 attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX && 1905 attr->sched_util_max == -1) { 1906 return true; 1907 } 1908 1909 return false; 1910 } 1911 1912 static void __setscheduler_uclamp(struct task_struct *p, 1913 const struct sched_attr *attr) 1914 { 1915 enum uclamp_id clamp_id; 1916 1917 for_each_clamp_id(clamp_id) { 1918 struct uclamp_se *uc_se = &p->uclamp_req[clamp_id]; 1919 unsigned int value; 1920 1921 if (!uclamp_reset(attr, clamp_id, uc_se)) 1922 continue; 1923 1924 /* 1925 * RT by default have a 100% boost value that could be modified 1926 * at runtime. 1927 */ 1928 if (unlikely(rt_task(p) && clamp_id == UCLAMP_MIN)) 1929 value = sysctl_sched_uclamp_util_min_rt_default; 1930 else 1931 value = uclamp_none(clamp_id); 1932 1933 uclamp_se_set(uc_se, value, false); 1934 1935 } 1936 1937 if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP))) 1938 return; 1939 1940 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN && 1941 attr->sched_util_min != -1) { 1942 uclamp_se_set(&p->uclamp_req[UCLAMP_MIN], 1943 attr->sched_util_min, true); 1944 } 1945 1946 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX && 1947 attr->sched_util_max != -1) { 1948 uclamp_se_set(&p->uclamp_req[UCLAMP_MAX], 1949 attr->sched_util_max, true); 1950 } 1951 } 1952 1953 static void uclamp_fork(struct task_struct *p) 1954 { 1955 enum uclamp_id clamp_id; 1956 1957 /* 1958 * We don't need to hold task_rq_lock() when updating p->uclamp_* here 1959 * as the task is still at its early fork stages. 1960 */ 1961 for_each_clamp_id(clamp_id) 1962 p->uclamp[clamp_id].active = false; 1963 1964 if (likely(!p->sched_reset_on_fork)) 1965 return; 1966 1967 for_each_clamp_id(clamp_id) { 1968 uclamp_se_set(&p->uclamp_req[clamp_id], 1969 uclamp_none(clamp_id), false); 1970 } 1971 } 1972 1973 static void uclamp_post_fork(struct task_struct *p) 1974 { 1975 uclamp_update_util_min_rt_default(p); 1976 } 1977 1978 static void __init init_uclamp_rq(struct rq *rq) 1979 { 1980 enum uclamp_id clamp_id; 1981 struct uclamp_rq *uc_rq = rq->uclamp; 1982 1983 for_each_clamp_id(clamp_id) { 1984 uc_rq[clamp_id] = (struct uclamp_rq) { 1985 .value = uclamp_none(clamp_id) 1986 }; 1987 } 1988 1989 rq->uclamp_flags = UCLAMP_FLAG_IDLE; 1990 } 1991 1992 static void __init init_uclamp(void) 1993 { 1994 struct uclamp_se uc_max = {}; 1995 enum uclamp_id clamp_id; 1996 int cpu; 1997 1998 for_each_possible_cpu(cpu) 1999 init_uclamp_rq(cpu_rq(cpu)); 2000 2001 for_each_clamp_id(clamp_id) { 2002 uclamp_se_set(&init_task.uclamp_req[clamp_id], 2003 uclamp_none(clamp_id), false); 2004 } 2005 2006 /* System defaults allow max clamp values for both indexes */ 2007 uclamp_se_set(&uc_max, uclamp_none(UCLAMP_MAX), false); 2008 for_each_clamp_id(clamp_id) { 2009 uclamp_default[clamp_id] = uc_max; 2010 #ifdef CONFIG_UCLAMP_TASK_GROUP 2011 root_task_group.uclamp_req[clamp_id] = uc_max; 2012 root_task_group.uclamp[clamp_id] = uc_max; 2013 #endif 2014 } 2015 } 2016 2017 #else /* CONFIG_UCLAMP_TASK */ 2018 static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) { } 2019 static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) { } 2020 static inline int uclamp_validate(struct task_struct *p, 2021 const struct sched_attr *attr) 2022 { 2023 return -EOPNOTSUPP; 2024 } 2025 static void __setscheduler_uclamp(struct task_struct *p, 2026 const struct sched_attr *attr) { } 2027 static inline void uclamp_fork(struct task_struct *p) { } 2028 static inline void uclamp_post_fork(struct task_struct *p) { } 2029 static inline void init_uclamp(void) { } 2030 #endif /* CONFIG_UCLAMP_TASK */ 2031 2032 bool sched_task_on_rq(struct task_struct *p) 2033 { 2034 return task_on_rq_queued(p); 2035 } 2036 2037 unsigned long get_wchan(struct task_struct *p) 2038 { 2039 unsigned long ip = 0; 2040 unsigned int state; 2041 2042 if (!p || p == current) 2043 return 0; 2044 2045 /* Only get wchan if task is blocked and we can keep it that way. */ 2046 raw_spin_lock_irq(&p->pi_lock); 2047 state = READ_ONCE(p->__state); 2048 smp_rmb(); /* see try_to_wake_up() */ 2049 if (state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq) 2050 ip = __get_wchan(p); 2051 raw_spin_unlock_irq(&p->pi_lock); 2052 2053 return ip; 2054 } 2055 2056 static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags) 2057 { 2058 if (!(flags & ENQUEUE_NOCLOCK)) 2059 update_rq_clock(rq); 2060 2061 if (!(flags & ENQUEUE_RESTORE)) { 2062 sched_info_enqueue(rq, p); 2063 psi_enqueue(p, flags & ENQUEUE_WAKEUP); 2064 } 2065 2066 uclamp_rq_inc(rq, p); 2067 p->sched_class->enqueue_task(rq, p, flags); 2068 2069 if (sched_core_enabled(rq)) 2070 sched_core_enqueue(rq, p); 2071 } 2072 2073 static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags) 2074 { 2075 if (sched_core_enabled(rq)) 2076 sched_core_dequeue(rq, p, flags); 2077 2078 if (!(flags & DEQUEUE_NOCLOCK)) 2079 update_rq_clock(rq); 2080 2081 if (!(flags & DEQUEUE_SAVE)) { 2082 sched_info_dequeue(rq, p); 2083 psi_dequeue(p, flags & DEQUEUE_SLEEP); 2084 } 2085 2086 uclamp_rq_dec(rq, p); 2087 p->sched_class->dequeue_task(rq, p, flags); 2088 } 2089 2090 void activate_task(struct rq *rq, struct task_struct *p, int flags) 2091 { 2092 enqueue_task(rq, p, flags); 2093 2094 p->on_rq = TASK_ON_RQ_QUEUED; 2095 } 2096 2097 void deactivate_task(struct rq *rq, struct task_struct *p, int flags) 2098 { 2099 p->on_rq = (flags & DEQUEUE_SLEEP) ? 0 : TASK_ON_RQ_MIGRATING; 2100 2101 dequeue_task(rq, p, flags); 2102 } 2103 2104 static inline int __normal_prio(int policy, int rt_prio, int nice) 2105 { 2106 int prio; 2107 2108 if (dl_policy(policy)) 2109 prio = MAX_DL_PRIO - 1; 2110 else if (rt_policy(policy)) 2111 prio = MAX_RT_PRIO - 1 - rt_prio; 2112 else 2113 prio = NICE_TO_PRIO(nice); 2114 2115 return prio; 2116 } 2117 2118 /* 2119 * Calculate the expected normal priority: i.e. priority 2120 * without taking RT-inheritance into account. Might be 2121 * boosted by interactivity modifiers. Changes upon fork, 2122 * setprio syscalls, and whenever the interactivity 2123 * estimator recalculates. 2124 */ 2125 static inline int normal_prio(struct task_struct *p) 2126 { 2127 return __normal_prio(p->policy, p->rt_priority, PRIO_TO_NICE(p->static_prio)); 2128 } 2129 2130 /* 2131 * Calculate the current priority, i.e. the priority 2132 * taken into account by the scheduler. This value might 2133 * be boosted by RT tasks, or might be boosted by 2134 * interactivity modifiers. Will be RT if the task got 2135 * RT-boosted. If not then it returns p->normal_prio. 2136 */ 2137 static int effective_prio(struct task_struct *p) 2138 { 2139 p->normal_prio = normal_prio(p); 2140 /* 2141 * If we are RT tasks or we were boosted to RT priority, 2142 * keep the priority unchanged. Otherwise, update priority 2143 * to the normal priority: 2144 */ 2145 if (!rt_prio(p->prio)) 2146 return p->normal_prio; 2147 return p->prio; 2148 } 2149 2150 /** 2151 * task_curr - is this task currently executing on a CPU? 2152 * @p: the task in question. 2153 * 2154 * Return: 1 if the task is currently executing. 0 otherwise. 2155 */ 2156 inline int task_curr(const struct task_struct *p) 2157 { 2158 return cpu_curr(task_cpu(p)) == p; 2159 } 2160 2161 /* 2162 * switched_from, switched_to and prio_changed must _NOT_ drop rq->lock, 2163 * use the balance_callback list if you want balancing. 2164 * 2165 * this means any call to check_class_changed() must be followed by a call to 2166 * balance_callback(). 2167 */ 2168 static inline void check_class_changed(struct rq *rq, struct task_struct *p, 2169 const struct sched_class *prev_class, 2170 int oldprio) 2171 { 2172 if (prev_class != p->sched_class) { 2173 if (prev_class->switched_from) 2174 prev_class->switched_from(rq, p); 2175 2176 p->sched_class->switched_to(rq, p); 2177 } else if (oldprio != p->prio || dl_task(p)) 2178 p->sched_class->prio_changed(rq, p, oldprio); 2179 } 2180 2181 void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) 2182 { 2183 if (p->sched_class == rq->curr->sched_class) 2184 rq->curr->sched_class->check_preempt_curr(rq, p, flags); 2185 else if (sched_class_above(p->sched_class, rq->curr->sched_class)) 2186 resched_curr(rq); 2187 2188 /* 2189 * A queue event has occurred, and we're going to schedule. In 2190 * this case, we can save a useless back to back clock update. 2191 */ 2192 if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr)) 2193 rq_clock_skip_update(rq); 2194 } 2195 2196 #ifdef CONFIG_SMP 2197 2198 static void 2199 __do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask, u32 flags); 2200 2201 static int __set_cpus_allowed_ptr(struct task_struct *p, 2202 const struct cpumask *new_mask, 2203 u32 flags); 2204 2205 static void migrate_disable_switch(struct rq *rq, struct task_struct *p) 2206 { 2207 if (likely(!p->migration_disabled)) 2208 return; 2209 2210 if (p->cpus_ptr != &p->cpus_mask) 2211 return; 2212 2213 /* 2214 * Violates locking rules! see comment in __do_set_cpus_allowed(). 2215 */ 2216 __do_set_cpus_allowed(p, cpumask_of(rq->cpu), SCA_MIGRATE_DISABLE); 2217 } 2218 2219 void migrate_disable(void) 2220 { 2221 struct task_struct *p = current; 2222 2223 if (p->migration_disabled) { 2224 p->migration_disabled++; 2225 return; 2226 } 2227 2228 preempt_disable(); 2229 this_rq()->nr_pinned++; 2230 p->migration_disabled = 1; 2231 preempt_enable(); 2232 } 2233 EXPORT_SYMBOL_GPL(migrate_disable); 2234 2235 void migrate_enable(void) 2236 { 2237 struct task_struct *p = current; 2238 2239 if (p->migration_disabled > 1) { 2240 p->migration_disabled--; 2241 return; 2242 } 2243 2244 if (WARN_ON_ONCE(!p->migration_disabled)) 2245 return; 2246 2247 /* 2248 * Ensure stop_task runs either before or after this, and that 2249 * __set_cpus_allowed_ptr(SCA_MIGRATE_ENABLE) doesn't schedule(). 2250 */ 2251 preempt_disable(); 2252 if (p->cpus_ptr != &p->cpus_mask) 2253 __set_cpus_allowed_ptr(p, &p->cpus_mask, SCA_MIGRATE_ENABLE); 2254 /* 2255 * Mustn't clear migration_disabled() until cpus_ptr points back at the 2256 * regular cpus_mask, otherwise things that race (eg. 2257 * select_fallback_rq) get confused. 2258 */ 2259 barrier(); 2260 p->migration_disabled = 0; 2261 this_rq()->nr_pinned--; 2262 preempt_enable(); 2263 } 2264 EXPORT_SYMBOL_GPL(migrate_enable); 2265 2266 static inline bool rq_has_pinned_tasks(struct rq *rq) 2267 { 2268 return rq->nr_pinned; 2269 } 2270 2271 /* 2272 * Per-CPU kthreads are allowed to run on !active && online CPUs, see 2273 * __set_cpus_allowed_ptr() and select_fallback_rq(). 2274 */ 2275 static inline bool is_cpu_allowed(struct task_struct *p, int cpu) 2276 { 2277 /* When not in the task's cpumask, no point in looking further. */ 2278 if (!cpumask_test_cpu(cpu, p->cpus_ptr)) 2279 return false; 2280 2281 /* migrate_disabled() must be allowed to finish. */ 2282 if (is_migration_disabled(p)) 2283 return cpu_online(cpu); 2284 2285 /* Non kernel threads are not allowed during either online or offline. */ 2286 if (!(p->flags & PF_KTHREAD)) 2287 return cpu_active(cpu) && task_cpu_possible(cpu, p); 2288 2289 /* KTHREAD_IS_PER_CPU is always allowed. */ 2290 if (kthread_is_per_cpu(p)) 2291 return cpu_online(cpu); 2292 2293 /* Regular kernel threads don't get to stay during offline. */ 2294 if (cpu_dying(cpu)) 2295 return false; 2296 2297 /* But are allowed during online. */ 2298 return cpu_online(cpu); 2299 } 2300 2301 /* 2302 * This is how migration works: 2303 * 2304 * 1) we invoke migration_cpu_stop() on the target CPU using 2305 * stop_one_cpu(). 2306 * 2) stopper starts to run (implicitly forcing the migrated thread 2307 * off the CPU) 2308 * 3) it checks whether the migrated task is still in the wrong runqueue. 2309 * 4) if it's in the wrong runqueue then the migration thread removes 2310 * it and puts it into the right queue. 2311 * 5) stopper completes and stop_one_cpu() returns and the migration 2312 * is done. 2313 */ 2314 2315 /* 2316 * move_queued_task - move a queued task to new rq. 2317 * 2318 * Returns (locked) new rq. Old rq's lock is released. 2319 */ 2320 static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf, 2321 struct task_struct *p, int new_cpu) 2322 { 2323 lockdep_assert_rq_held(rq); 2324 2325 deactivate_task(rq, p, DEQUEUE_NOCLOCK); 2326 set_task_cpu(p, new_cpu); 2327 rq_unlock(rq, rf); 2328 2329 rq = cpu_rq(new_cpu); 2330 2331 rq_lock(rq, rf); 2332 BUG_ON(task_cpu(p) != new_cpu); 2333 activate_task(rq, p, 0); 2334 check_preempt_curr(rq, p, 0); 2335 2336 return rq; 2337 } 2338 2339 struct migration_arg { 2340 struct task_struct *task; 2341 int dest_cpu; 2342 struct set_affinity_pending *pending; 2343 }; 2344 2345 /* 2346 * @refs: number of wait_for_completion() 2347 * @stop_pending: is @stop_work in use 2348 */ 2349 struct set_affinity_pending { 2350 refcount_t refs; 2351 unsigned int stop_pending; 2352 struct completion done; 2353 struct cpu_stop_work stop_work; 2354 struct migration_arg arg; 2355 }; 2356 2357 /* 2358 * Move (not current) task off this CPU, onto the destination CPU. We're doing 2359 * this because either it can't run here any more (set_cpus_allowed() 2360 * away from this CPU, or CPU going down), or because we're 2361 * attempting to rebalance this task on exec (sched_exec). 2362 * 2363 * So we race with normal scheduler movements, but that's OK, as long 2364 * as the task is no longer on this CPU. 2365 */ 2366 static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf, 2367 struct task_struct *p, int dest_cpu) 2368 { 2369 /* Affinity changed (again). */ 2370 if (!is_cpu_allowed(p, dest_cpu)) 2371 return rq; 2372 2373 update_rq_clock(rq); 2374 rq = move_queued_task(rq, rf, p, dest_cpu); 2375 2376 return rq; 2377 } 2378 2379 /* 2380 * migration_cpu_stop - this will be executed by a highprio stopper thread 2381 * and performs thread migration by bumping thread off CPU then 2382 * 'pushing' onto another runqueue. 2383 */ 2384 static int migration_cpu_stop(void *data) 2385 { 2386 struct migration_arg *arg = data; 2387 struct set_affinity_pending *pending = arg->pending; 2388 struct task_struct *p = arg->task; 2389 struct rq *rq = this_rq(); 2390 bool complete = false; 2391 struct rq_flags rf; 2392 2393 /* 2394 * The original target CPU might have gone down and we might 2395 * be on another CPU but it doesn't matter. 2396 */ 2397 local_irq_save(rf.flags); 2398 /* 2399 * We need to explicitly wake pending tasks before running 2400 * __migrate_task() such that we will not miss enforcing cpus_ptr 2401 * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test. 2402 */ 2403 flush_smp_call_function_queue(); 2404 2405 raw_spin_lock(&p->pi_lock); 2406 rq_lock(rq, &rf); 2407 2408 /* 2409 * If we were passed a pending, then ->stop_pending was set, thus 2410 * p->migration_pending must have remained stable. 2411 */ 2412 WARN_ON_ONCE(pending && pending != p->migration_pending); 2413 2414 /* 2415 * If task_rq(p) != rq, it cannot be migrated here, because we're 2416 * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because 2417 * we're holding p->pi_lock. 2418 */ 2419 if (task_rq(p) == rq) { 2420 if (is_migration_disabled(p)) 2421 goto out; 2422 2423 if (pending) { 2424 p->migration_pending = NULL; 2425 complete = true; 2426 2427 if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) 2428 goto out; 2429 } 2430 2431 if (task_on_rq_queued(p)) 2432 rq = __migrate_task(rq, &rf, p, arg->dest_cpu); 2433 else 2434 p->wake_cpu = arg->dest_cpu; 2435 2436 /* 2437 * XXX __migrate_task() can fail, at which point we might end 2438 * up running on a dodgy CPU, AFAICT this can only happen 2439 * during CPU hotplug, at which point we'll get pushed out 2440 * anyway, so it's probably not a big deal. 2441 */ 2442 2443 } else if (pending) { 2444 /* 2445 * This happens when we get migrated between migrate_enable()'s 2446 * preempt_enable() and scheduling the stopper task. At that 2447 * point we're a regular task again and not current anymore. 2448 * 2449 * A !PREEMPT kernel has a giant hole here, which makes it far 2450 * more likely. 2451 */ 2452 2453 /* 2454 * The task moved before the stopper got to run. We're holding 2455 * ->pi_lock, so the allowed mask is stable - if it got 2456 * somewhere allowed, we're done. 2457 */ 2458 if (cpumask_test_cpu(task_cpu(p), p->cpus_ptr)) { 2459 p->migration_pending = NULL; 2460 complete = true; 2461 goto out; 2462 } 2463 2464 /* 2465 * When migrate_enable() hits a rq mis-match we can't reliably 2466 * determine is_migration_disabled() and so have to chase after 2467 * it. 2468 */ 2469 WARN_ON_ONCE(!pending->stop_pending); 2470 task_rq_unlock(rq, p, &rf); 2471 stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop, 2472 &pending->arg, &pending->stop_work); 2473 return 0; 2474 } 2475 out: 2476 if (pending) 2477 pending->stop_pending = false; 2478 task_rq_unlock(rq, p, &rf); 2479 2480 if (complete) 2481 complete_all(&pending->done); 2482 2483 return 0; 2484 } 2485 2486 int push_cpu_stop(void *arg) 2487 { 2488 struct rq *lowest_rq = NULL, *rq = this_rq(); 2489 struct task_struct *p = arg; 2490 2491 raw_spin_lock_irq(&p->pi_lock); 2492 raw_spin_rq_lock(rq); 2493 2494 if (task_rq(p) != rq) 2495 goto out_unlock; 2496 2497 if (is_migration_disabled(p)) { 2498 p->migration_flags |= MDF_PUSH; 2499 goto out_unlock; 2500 } 2501 2502 p->migration_flags &= ~MDF_PUSH; 2503 2504 if (p->sched_class->find_lock_rq) 2505 lowest_rq = p->sched_class->find_lock_rq(p, rq); 2506 2507 if (!lowest_rq) 2508 goto out_unlock; 2509 2510 // XXX validate p is still the highest prio task 2511 if (task_rq(p) == rq) { 2512 deactivate_task(rq, p, 0); 2513 set_task_cpu(p, lowest_rq->cpu); 2514 activate_task(lowest_rq, p, 0); 2515 resched_curr(lowest_rq); 2516 } 2517 2518 double_unlock_balance(rq, lowest_rq); 2519 2520 out_unlock: 2521 rq->push_busy = false; 2522 raw_spin_rq_unlock(rq); 2523 raw_spin_unlock_irq(&p->pi_lock); 2524 2525 put_task_struct(p); 2526 return 0; 2527 } 2528 2529 /* 2530 * sched_class::set_cpus_allowed must do the below, but is not required to 2531 * actually call this function. 2532 */ 2533 void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask, u32 flags) 2534 { 2535 if (flags & (SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) { 2536 p->cpus_ptr = new_mask; 2537 return; 2538 } 2539 2540 cpumask_copy(&p->cpus_mask, new_mask); 2541 p->nr_cpus_allowed = cpumask_weight(new_mask); 2542 } 2543 2544 static void 2545 __do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask, u32 flags) 2546 { 2547 struct rq *rq = task_rq(p); 2548 bool queued, running; 2549 2550 /* 2551 * This here violates the locking rules for affinity, since we're only 2552 * supposed to change these variables while holding both rq->lock and 2553 * p->pi_lock. 2554 * 2555 * HOWEVER, it magically works, because ttwu() is the only code that 2556 * accesses these variables under p->pi_lock and only does so after 2557 * smp_cond_load_acquire(&p->on_cpu, !VAL), and we're in __schedule() 2558 * before finish_task(). 2559 * 2560 * XXX do further audits, this smells like something putrid. 2561 */ 2562 if (flags & SCA_MIGRATE_DISABLE) 2563 SCHED_WARN_ON(!p->on_cpu); 2564 else 2565 lockdep_assert_held(&p->pi_lock); 2566 2567 queued = task_on_rq_queued(p); 2568 running = task_current(rq, p); 2569 2570 if (queued) { 2571 /* 2572 * Because __kthread_bind() calls this on blocked tasks without 2573 * holding rq->lock. 2574 */ 2575 lockdep_assert_rq_held(rq); 2576 dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK); 2577 } 2578 if (running) 2579 put_prev_task(rq, p); 2580 2581 p->sched_class->set_cpus_allowed(p, new_mask, flags); 2582 2583 if (queued) 2584 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); 2585 if (running) 2586 set_next_task(rq, p); 2587 } 2588 2589 void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) 2590 { 2591 __do_set_cpus_allowed(p, new_mask, 0); 2592 } 2593 2594 int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, 2595 int node) 2596 { 2597 if (!src->user_cpus_ptr) 2598 return 0; 2599 2600 dst->user_cpus_ptr = kmalloc_node(cpumask_size(), GFP_KERNEL, node); 2601 if (!dst->user_cpus_ptr) 2602 return -ENOMEM; 2603 2604 cpumask_copy(dst->user_cpus_ptr, src->user_cpus_ptr); 2605 return 0; 2606 } 2607 2608 static inline struct cpumask *clear_user_cpus_ptr(struct task_struct *p) 2609 { 2610 struct cpumask *user_mask = NULL; 2611 2612 swap(p->user_cpus_ptr, user_mask); 2613 2614 return user_mask; 2615 } 2616 2617 void release_user_cpus_ptr(struct task_struct *p) 2618 { 2619 kfree(clear_user_cpus_ptr(p)); 2620 } 2621 2622 /* 2623 * This function is wildly self concurrent; here be dragons. 2624 * 2625 * 2626 * When given a valid mask, __set_cpus_allowed_ptr() must block until the 2627 * designated task is enqueued on an allowed CPU. If that task is currently 2628 * running, we have to kick it out using the CPU stopper. 2629 * 2630 * Migrate-Disable comes along and tramples all over our nice sandcastle. 2631 * Consider: 2632 * 2633 * Initial conditions: P0->cpus_mask = [0, 1] 2634 * 2635 * P0@CPU0 P1 2636 * 2637 * migrate_disable(); 2638 * <preempted> 2639 * set_cpus_allowed_ptr(P0, [1]); 2640 * 2641 * P1 *cannot* return from this set_cpus_allowed_ptr() call until P0 executes 2642 * its outermost migrate_enable() (i.e. it exits its Migrate-Disable region). 2643 * This means we need the following scheme: 2644 * 2645 * P0@CPU0 P1 2646 * 2647 * migrate_disable(); 2648 * <preempted> 2649 * set_cpus_allowed_ptr(P0, [1]); 2650 * <blocks> 2651 * <resumes> 2652 * migrate_enable(); 2653 * __set_cpus_allowed_ptr(); 2654 * <wakes local stopper> 2655 * `--> <woken on migration completion> 2656 * 2657 * Now the fun stuff: there may be several P1-like tasks, i.e. multiple 2658 * concurrent set_cpus_allowed_ptr(P0, [*]) calls. CPU affinity changes of any 2659 * task p are serialized by p->pi_lock, which we can leverage: the one that 2660 * should come into effect at the end of the Migrate-Disable region is the last 2661 * one. This means we only need to track a single cpumask (i.e. p->cpus_mask), 2662 * but we still need to properly signal those waiting tasks at the appropriate 2663 * moment. 2664 * 2665 * This is implemented using struct set_affinity_pending. The first 2666 * __set_cpus_allowed_ptr() caller within a given Migrate-Disable region will 2667 * setup an instance of that struct and install it on the targeted task_struct. 2668 * Any and all further callers will reuse that instance. Those then wait for 2669 * a completion signaled at the tail of the CPU stopper callback (1), triggered 2670 * on the end of the Migrate-Disable region (i.e. outermost migrate_enable()). 2671 * 2672 * 2673 * (1) In the cases covered above. There is one more where the completion is 2674 * signaled within affine_move_task() itself: when a subsequent affinity request 2675 * occurs after the stopper bailed out due to the targeted task still being 2676 * Migrate-Disable. Consider: 2677 * 2678 * Initial conditions: P0->cpus_mask = [0, 1] 2679 * 2680 * CPU0 P1 P2 2681 * <P0> 2682 * migrate_disable(); 2683 * <preempted> 2684 * set_cpus_allowed_ptr(P0, [1]); 2685 * <blocks> 2686 * <migration/0> 2687 * migration_cpu_stop() 2688 * is_migration_disabled() 2689 * <bails> 2690 * set_cpus_allowed_ptr(P0, [0, 1]); 2691 * <signal completion> 2692 * <awakes> 2693 * 2694 * Note that the above is safe vs a concurrent migrate_enable(), as any 2695 * pending affinity completion is preceded by an uninstallation of 2696 * p->migration_pending done with p->pi_lock held. 2697 */ 2698 static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flags *rf, 2699 int dest_cpu, unsigned int flags) 2700 { 2701 struct set_affinity_pending my_pending = { }, *pending = NULL; 2702 bool stop_pending, complete = false; 2703 2704 /* Can the task run on the task's current CPU? If so, we're done */ 2705 if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) { 2706 struct task_struct *push_task = NULL; 2707 2708 if ((flags & SCA_MIGRATE_ENABLE) && 2709 (p->migration_flags & MDF_PUSH) && !rq->push_busy) { 2710 rq->push_busy = true; 2711 push_task = get_task_struct(p); 2712 } 2713 2714 /* 2715 * If there are pending waiters, but no pending stop_work, 2716 * then complete now. 2717 */ 2718 pending = p->migration_pending; 2719 if (pending && !pending->stop_pending) { 2720 p->migration_pending = NULL; 2721 complete = true; 2722 } 2723 2724 task_rq_unlock(rq, p, rf); 2725 2726 if (push_task) { 2727 stop_one_cpu_nowait(rq->cpu, push_cpu_stop, 2728 p, &rq->push_work); 2729 } 2730 2731 if (complete) 2732 complete_all(&pending->done); 2733 2734 return 0; 2735 } 2736 2737 if (!(flags & SCA_MIGRATE_ENABLE)) { 2738 /* serialized by p->pi_lock */ 2739 if (!p->migration_pending) { 2740 /* Install the request */ 2741 refcount_set(&my_pending.refs, 1); 2742 init_completion(&my_pending.done); 2743 my_pending.arg = (struct migration_arg) { 2744 .task = p, 2745 .dest_cpu = dest_cpu, 2746 .pending = &my_pending, 2747 }; 2748 2749 p->migration_pending = &my_pending; 2750 } else { 2751 pending = p->migration_pending; 2752 refcount_inc(&pending->refs); 2753 /* 2754 * Affinity has changed, but we've already installed a 2755 * pending. migration_cpu_stop() *must* see this, else 2756 * we risk a completion of the pending despite having a 2757 * task on a disallowed CPU. 2758 * 2759 * Serialized by p->pi_lock, so this is safe. 2760 */ 2761 pending->arg.dest_cpu = dest_cpu; 2762 } 2763 } 2764 pending = p->migration_pending; 2765 /* 2766 * - !MIGRATE_ENABLE: 2767 * we'll have installed a pending if there wasn't one already. 2768 * 2769 * - MIGRATE_ENABLE: 2770 * we're here because the current CPU isn't matching anymore, 2771 * the only way that can happen is because of a concurrent 2772 * set_cpus_allowed_ptr() call, which should then still be 2773 * pending completion. 2774 * 2775 * Either way, we really should have a @pending here. 2776 */ 2777 if (WARN_ON_ONCE(!pending)) { 2778 task_rq_unlock(rq, p, rf); 2779 return -EINVAL; 2780 } 2781 2782 if (task_running(rq, p) || READ_ONCE(p->__state) == TASK_WAKING) { 2783 /* 2784 * MIGRATE_ENABLE gets here because 'p == current', but for 2785 * anything else we cannot do is_migration_disabled(), punt 2786 * and have the stopper function handle it all race-free. 2787 */ 2788 stop_pending = pending->stop_pending; 2789 if (!stop_pending) 2790 pending->stop_pending = true; 2791 2792 if (flags & SCA_MIGRATE_ENABLE) 2793 p->migration_flags &= ~MDF_PUSH; 2794 2795 task_rq_unlock(rq, p, rf); 2796 2797 if (!stop_pending) { 2798 stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop, 2799 &pending->arg, &pending->stop_work); 2800 } 2801 2802 if (flags & SCA_MIGRATE_ENABLE) 2803 return 0; 2804 } else { 2805 2806 if (!is_migration_disabled(p)) { 2807 if (task_on_rq_queued(p)) 2808 rq = move_queued_task(rq, rf, p, dest_cpu); 2809 2810 if (!pending->stop_pending) { 2811 p->migration_pending = NULL; 2812 complete = true; 2813 } 2814 } 2815 task_rq_unlock(rq, p, rf); 2816 2817 if (complete) 2818 complete_all(&pending->done); 2819 } 2820 2821 wait_for_completion(&pending->done); 2822 2823 if (refcount_dec_and_test(&pending->refs)) 2824 wake_up_var(&pending->refs); /* No UaF, just an address */ 2825 2826 /* 2827 * Block the original owner of &pending until all subsequent callers 2828 * have seen the completion and decremented the refcount 2829 */ 2830 wait_var_event(&my_pending.refs, !refcount_read(&my_pending.refs)); 2831 2832 /* ARGH */ 2833 WARN_ON_ONCE(my_pending.stop_pending); 2834 2835 return 0; 2836 } 2837 2838 /* 2839 * Called with both p->pi_lock and rq->lock held; drops both before returning. 2840 */ 2841 static int __set_cpus_allowed_ptr_locked(struct task_struct *p, 2842 const struct cpumask *new_mask, 2843 u32 flags, 2844 struct rq *rq, 2845 struct rq_flags *rf) 2846 __releases(rq->lock) 2847 __releases(p->pi_lock) 2848 { 2849 const struct cpumask *cpu_allowed_mask = task_cpu_possible_mask(p); 2850 const struct cpumask *cpu_valid_mask = cpu_active_mask; 2851 bool kthread = p->flags & PF_KTHREAD; 2852 struct cpumask *user_mask = NULL; 2853 unsigned int dest_cpu; 2854 int ret = 0; 2855 2856 update_rq_clock(rq); 2857 2858 if (kthread || is_migration_disabled(p)) { 2859 /* 2860 * Kernel threads are allowed on online && !active CPUs, 2861 * however, during cpu-hot-unplug, even these might get pushed 2862 * away if not KTHREAD_IS_PER_CPU. 2863 * 2864 * Specifically, migration_disabled() tasks must not fail the 2865 * cpumask_any_and_distribute() pick below, esp. so on 2866 * SCA_MIGRATE_ENABLE, otherwise we'll not call 2867 * set_cpus_allowed_common() and actually reset p->cpus_ptr. 2868 */ 2869 cpu_valid_mask = cpu_online_mask; 2870 } 2871 2872 if (!kthread && !cpumask_subset(new_mask, cpu_allowed_mask)) { 2873 ret = -EINVAL; 2874 goto out; 2875 } 2876 2877 /* 2878 * Must re-check here, to close a race against __kthread_bind(), 2879 * sched_setaffinity() is not guaranteed to observe the flag. 2880 */ 2881 if ((flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) { 2882 ret = -EINVAL; 2883 goto out; 2884 } 2885 2886 if (!(flags & SCA_MIGRATE_ENABLE)) { 2887 if (cpumask_equal(&p->cpus_mask, new_mask)) 2888 goto out; 2889 2890 if (WARN_ON_ONCE(p == current && 2891 is_migration_disabled(p) && 2892 !cpumask_test_cpu(task_cpu(p), new_mask))) { 2893 ret = -EBUSY; 2894 goto out; 2895 } 2896 } 2897 2898 /* 2899 * Picking a ~random cpu helps in cases where we are changing affinity 2900 * for groups of tasks (ie. cpuset), so that load balancing is not 2901 * immediately required to distribute the tasks within their new mask. 2902 */ 2903 dest_cpu = cpumask_any_and_distribute(cpu_valid_mask, new_mask); 2904 if (dest_cpu >= nr_cpu_ids) { 2905 ret = -EINVAL; 2906 goto out; 2907 } 2908 2909 __do_set_cpus_allowed(p, new_mask, flags); 2910 2911 if (flags & SCA_USER) 2912 user_mask = clear_user_cpus_ptr(p); 2913 2914 ret = affine_move_task(rq, p, rf, dest_cpu, flags); 2915 2916 kfree(user_mask); 2917 2918 return ret; 2919 2920 out: 2921 task_rq_unlock(rq, p, rf); 2922 2923 return ret; 2924 } 2925 2926 /* 2927 * Change a given task's CPU affinity. Migrate the thread to a 2928 * proper CPU and schedule it away if the CPU it's executing on 2929 * is removed from the allowed bitmask. 2930 * 2931 * NOTE: the caller must have a valid reference to the task, the 2932 * task must not exit() & deallocate itself prematurely. The 2933 * call is not atomic; no spinlocks may be held. 2934 */ 2935 static int __set_cpus_allowed_ptr(struct task_struct *p, 2936 const struct cpumask *new_mask, u32 flags) 2937 { 2938 struct rq_flags rf; 2939 struct rq *rq; 2940 2941 rq = task_rq_lock(p, &rf); 2942 return __set_cpus_allowed_ptr_locked(p, new_mask, flags, rq, &rf); 2943 } 2944 2945 int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) 2946 { 2947 return __set_cpus_allowed_ptr(p, new_mask, 0); 2948 } 2949 EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr); 2950 2951 /* 2952 * Change a given task's CPU affinity to the intersection of its current 2953 * affinity mask and @subset_mask, writing the resulting mask to @new_mask 2954 * and pointing @p->user_cpus_ptr to a copy of the old mask. 2955 * If the resulting mask is empty, leave the affinity unchanged and return 2956 * -EINVAL. 2957 */ 2958 static int restrict_cpus_allowed_ptr(struct task_struct *p, 2959 struct cpumask *new_mask, 2960 const struct cpumask *subset_mask) 2961 { 2962 struct cpumask *user_mask = NULL; 2963 struct rq_flags rf; 2964 struct rq *rq; 2965 int err; 2966 2967 if (!p->user_cpus_ptr) { 2968 user_mask = kmalloc(cpumask_size(), GFP_KERNEL); 2969 if (!user_mask) 2970 return -ENOMEM; 2971 } 2972 2973 rq = task_rq_lock(p, &rf); 2974 2975 /* 2976 * Forcefully restricting the affinity of a deadline task is 2977 * likely to cause problems, so fail and noisily override the 2978 * mask entirely. 2979 */ 2980 if (task_has_dl_policy(p) && dl_bandwidth_enabled()) { 2981 err = -EPERM; 2982 goto err_unlock; 2983 } 2984 2985 if (!cpumask_and(new_mask, &p->cpus_mask, subset_mask)) { 2986 err = -EINVAL; 2987 goto err_unlock; 2988 } 2989 2990 /* 2991 * We're about to butcher the task affinity, so keep track of what 2992 * the user asked for in case we're able to restore it later on. 2993 */ 2994 if (user_mask) { 2995 cpumask_copy(user_mask, p->cpus_ptr); 2996 p->user_cpus_ptr = user_mask; 2997 } 2998 2999 return __set_cpus_allowed_ptr_locked(p, new_mask, 0, rq, &rf); 3000 3001 err_unlock: 3002 task_rq_unlock(rq, p, &rf); 3003 kfree(user_mask); 3004 return err; 3005 } 3006 3007 /* 3008 * Restrict the CPU affinity of task @p so that it is a subset of 3009 * task_cpu_possible_mask() and point @p->user_cpu_ptr to a copy of the 3010 * old affinity mask. If the resulting mask is empty, we warn and walk 3011 * up the cpuset hierarchy until we find a suitable mask. 3012 */ 3013 void force_compatible_cpus_allowed_ptr(struct task_struct *p) 3014 { 3015 cpumask_var_t new_mask; 3016 const struct cpumask *override_mask = task_cpu_possible_mask(p); 3017 3018 alloc_cpumask_var(&new_mask, GFP_KERNEL); 3019 3020 /* 3021 * __migrate_task() can fail silently in the face of concurrent 3022 * offlining of the chosen destination CPU, so take the hotplug 3023 * lock to ensure that the migration succeeds. 3024 */ 3025 cpus_read_lock(); 3026 if (!cpumask_available(new_mask)) 3027 goto out_set_mask; 3028 3029 if (!restrict_cpus_allowed_ptr(p, new_mask, override_mask)) 3030 goto out_free_mask; 3031 3032 /* 3033 * We failed to find a valid subset of the affinity mask for the 3034 * task, so override it based on its cpuset hierarchy. 3035 */ 3036 cpuset_cpus_allowed(p, new_mask); 3037 override_mask = new_mask; 3038 3039 out_set_mask: 3040 if (printk_ratelimit()) { 3041 printk_deferred("Overriding affinity for process %d (%s) to CPUs %*pbl\n", 3042 task_pid_nr(p), p->comm, 3043 cpumask_pr_args(override_mask)); 3044 } 3045 3046 WARN_ON(set_cpus_allowed_ptr(p, override_mask)); 3047 out_free_mask: 3048 cpus_read_unlock(); 3049 free_cpumask_var(new_mask); 3050 } 3051 3052 static int 3053 __sched_setaffinity(struct task_struct *p, const struct cpumask *mask); 3054 3055 /* 3056 * Restore the affinity of a task @p which was previously restricted by a 3057 * call to force_compatible_cpus_allowed_ptr(). This will clear (and free) 3058 * @p->user_cpus_ptr. 3059 * 3060 * It is the caller's responsibility to serialise this with any calls to 3061 * force_compatible_cpus_allowed_ptr(@p). 3062 */ 3063 void relax_compatible_cpus_allowed_ptr(struct task_struct *p) 3064 { 3065 struct cpumask *user_mask = p->user_cpus_ptr; 3066 unsigned long flags; 3067 3068 /* 3069 * Try to restore the old affinity mask. If this fails, then 3070 * we free the mask explicitly to avoid it being inherited across 3071 * a subsequent fork(). 3072 */ 3073 if (!user_mask || !__sched_setaffinity(p, user_mask)) 3074 return; 3075 3076 raw_spin_lock_irqsave(&p->pi_lock, flags); 3077 user_mask = clear_user_cpus_ptr(p); 3078 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 3079 3080 kfree(user_mask); 3081 } 3082 3083 void set_task_cpu(struct task_struct *p, unsigned int new_cpu) 3084 { 3085 #ifdef CONFIG_SCHED_DEBUG 3086 unsigned int state = READ_ONCE(p->__state); 3087 3088 /* 3089 * We should never call set_task_cpu() on a blocked task, 3090 * ttwu() will sort out the placement. 3091 */ 3092 WARN_ON_ONCE(state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq); 3093 3094 /* 3095 * Migrating fair class task must have p->on_rq = TASK_ON_RQ_MIGRATING, 3096 * because schedstat_wait_{start,end} rebase migrating task's wait_start 3097 * time relying on p->on_rq. 3098 */ 3099 WARN_ON_ONCE(state == TASK_RUNNING && 3100 p->sched_class == &fair_sched_class && 3101 (p->on_rq && !task_on_rq_migrating(p))); 3102 3103 #ifdef CONFIG_LOCKDEP 3104 /* 3105 * The caller should hold either p->pi_lock or rq->lock, when changing 3106 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks. 3107 * 3108 * sched_move_task() holds both and thus holding either pins the cgroup, 3109 * see task_group(). 3110 * 3111 * Furthermore, all task_rq users should acquire both locks, see 3112 * task_rq_lock(). 3113 */ 3114 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) || 3115 lockdep_is_held(__rq_lockp(task_rq(p))))); 3116 #endif 3117 /* 3118 * Clearly, migrating tasks to offline CPUs is a fairly daft thing. 3119 */ 3120 WARN_ON_ONCE(!cpu_online(new_cpu)); 3121 3122 WARN_ON_ONCE(is_migration_disabled(p)); 3123 #endif 3124 3125 trace_sched_migrate_task(p, new_cpu); 3126 3127 if (task_cpu(p) != new_cpu) { 3128 if (p->sched_class->migrate_task_rq) 3129 p->sched_class->migrate_task_rq(p, new_cpu); 3130 p->se.nr_migrations++; 3131 rseq_migrate(p); 3132 perf_event_task_migrate(p); 3133 } 3134 3135 __set_task_cpu(p, new_cpu); 3136 } 3137 3138 #ifdef CONFIG_NUMA_BALANCING 3139 static void __migrate_swap_task(struct task_struct *p, int cpu) 3140 { 3141 if (task_on_rq_queued(p)) { 3142 struct rq *src_rq, *dst_rq; 3143 struct rq_flags srf, drf; 3144 3145 src_rq = task_rq(p); 3146 dst_rq = cpu_rq(cpu); 3147 3148 rq_pin_lock(src_rq, &srf); 3149 rq_pin_lock(dst_rq, &drf); 3150 3151 deactivate_task(src_rq, p, 0); 3152 set_task_cpu(p, cpu); 3153 activate_task(dst_rq, p, 0); 3154 check_preempt_curr(dst_rq, p, 0); 3155 3156 rq_unpin_lock(dst_rq, &drf); 3157 rq_unpin_lock(src_rq, &srf); 3158 3159 } else { 3160 /* 3161 * Task isn't running anymore; make it appear like we migrated 3162 * it before it went to sleep. This means on wakeup we make the 3163 * previous CPU our target instead of where it really is. 3164 */ 3165 p->wake_cpu = cpu; 3166 } 3167 } 3168 3169 struct migration_swap_arg { 3170 struct task_struct *src_task, *dst_task; 3171 int src_cpu, dst_cpu; 3172 }; 3173 3174 static int migrate_swap_stop(void *data) 3175 { 3176 struct migration_swap_arg *arg = data; 3177 struct rq *src_rq, *dst_rq; 3178 int ret = -EAGAIN; 3179 3180 if (!cpu_active(arg->src_cpu) || !cpu_active(arg->dst_cpu)) 3181 return -EAGAIN; 3182 3183 src_rq = cpu_rq(arg->src_cpu); 3184 dst_rq = cpu_rq(arg->dst_cpu); 3185 3186 double_raw_lock(&arg->src_task->pi_lock, 3187 &arg->dst_task->pi_lock); 3188 double_rq_lock(src_rq, dst_rq); 3189 3190 if (task_cpu(arg->dst_task) != arg->dst_cpu) 3191 goto unlock; 3192 3193 if (task_cpu(arg->src_task) != arg->src_cpu) 3194 goto unlock; 3195 3196 if (!cpumask_test_cpu(arg->dst_cpu, arg->src_task->cpus_ptr)) 3197 goto unlock; 3198 3199 if (!cpumask_test_cpu(arg->src_cpu, arg->dst_task->cpus_ptr)) 3200 goto unlock; 3201 3202 __migrate_swap_task(arg->src_task, arg->dst_cpu); 3203 __migrate_swap_task(arg->dst_task, arg->src_cpu); 3204 3205 ret = 0; 3206 3207 unlock: 3208 double_rq_unlock(src_rq, dst_rq); 3209 raw_spin_unlock(&arg->dst_task->pi_lock); 3210 raw_spin_unlock(&arg->src_task->pi_lock); 3211 3212 return ret; 3213 } 3214 3215 /* 3216 * Cross migrate two tasks 3217 */ 3218 int migrate_swap(struct task_struct *cur, struct task_struct *p, 3219 int target_cpu, int curr_cpu) 3220 { 3221 struct migration_swap_arg arg; 3222 int ret = -EINVAL; 3223 3224 arg = (struct migration_swap_arg){ 3225 .src_task = cur, 3226 .src_cpu = curr_cpu, 3227 .dst_task = p, 3228 .dst_cpu = target_cpu, 3229 }; 3230 3231 if (arg.src_cpu == arg.dst_cpu) 3232 goto out; 3233 3234 /* 3235 * These three tests are all lockless; this is OK since all of them 3236 * will be re-checked with proper locks held further down the line. 3237 */ 3238 if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu)) 3239 goto out; 3240 3241 if (!cpumask_test_cpu(arg.dst_cpu, arg.src_task->cpus_ptr)) 3242 goto out; 3243 3244 if (!cpumask_test_cpu(arg.src_cpu, arg.dst_task->cpus_ptr)) 3245 goto out; 3246 3247 trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu); 3248 ret = stop_two_cpus(arg.dst_cpu, arg.src_cpu, migrate_swap_stop, &arg); 3249 3250 out: 3251 return ret; 3252 } 3253 #endif /* CONFIG_NUMA_BALANCING */ 3254 3255 /* 3256 * wait_task_inactive - wait for a thread to unschedule. 3257 * 3258 * If @match_state is nonzero, it's the @p->state value just checked and 3259 * not expected to change. If it changes, i.e. @p might have woken up, 3260 * then return zero. When we succeed in waiting for @p to be off its CPU, 3261 * we return a positive number (its total switch count). If a second call 3262 * a short while later returns the same number, the caller can be sure that 3263 * @p has remained unscheduled the whole time. 3264 * 3265 * The caller must ensure that the task *will* unschedule sometime soon, 3266 * else this function might spin for a *long* time. This function can't 3267 * be called with interrupts off, or it may introduce deadlock with 3268 * smp_call_function() if an IPI is sent by the same process we are 3269 * waiting to become inactive. 3270 */ 3271 unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state) 3272 { 3273 int running, queued; 3274 struct rq_flags rf; 3275 unsigned long ncsw; 3276 struct rq *rq; 3277 3278 for (;;) { 3279 /* 3280 * We do the initial early heuristics without holding 3281 * any task-queue locks at all. We'll only try to get 3282 * the runqueue lock when things look like they will 3283 * work out! 3284 */ 3285 rq = task_rq(p); 3286 3287 /* 3288 * If the task is actively running on another CPU 3289 * still, just relax and busy-wait without holding 3290 * any locks. 3291 * 3292 * NOTE! Since we don't hold any locks, it's not 3293 * even sure that "rq" stays as the right runqueue! 3294 * But we don't care, since "task_running()" will 3295 * return false if the runqueue has changed and p 3296 * is actually now running somewhere else! 3297 */ 3298 while (task_running(rq, p)) { 3299 if (match_state && unlikely(READ_ONCE(p->__state) != match_state)) 3300 return 0; 3301 cpu_relax(); 3302 } 3303 3304 /* 3305 * Ok, time to look more closely! We need the rq 3306 * lock now, to be *sure*. If we're wrong, we'll 3307 * just go back and repeat. 3308 */ 3309 rq = task_rq_lock(p, &rf); 3310 trace_sched_wait_task(p); 3311 running = task_running(rq, p); 3312 queued = task_on_rq_queued(p); 3313 ncsw = 0; 3314 if (!match_state || READ_ONCE(p->__state) == match_state) 3315 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ 3316 task_rq_unlock(rq, p, &rf); 3317 3318 /* 3319 * If it changed from the expected state, bail out now. 3320 */ 3321 if (unlikely(!ncsw)) 3322 break; 3323 3324 /* 3325 * Was it really running after all now that we 3326 * checked with the proper locks actually held? 3327 * 3328 * Oops. Go back and try again.. 3329 */ 3330 if (unlikely(running)) { 3331 cpu_relax(); 3332 continue; 3333 } 3334 3335 /* 3336 * It's not enough that it's not actively running, 3337 * it must be off the runqueue _entirely_, and not 3338 * preempted! 3339 * 3340 * So if it was still runnable (but just not actively 3341 * running right now), it's preempted, and we should 3342 * yield - it could be a while. 3343 */ 3344 if (unlikely(queued)) { 3345 ktime_t to = NSEC_PER_SEC / HZ; 3346 3347 set_current_state(TASK_UNINTERRUPTIBLE); 3348 schedule_hrtimeout(&to, HRTIMER_MODE_REL_HARD); 3349 continue; 3350 } 3351 3352 /* 3353 * Ahh, all good. It wasn't running, and it wasn't 3354 * runnable, which means that it will never become 3355 * running in the future either. We're all done! 3356 */ 3357 break; 3358 } 3359 3360 return ncsw; 3361 } 3362 3363 /*** 3364 * kick_process - kick a running thread to enter/exit the kernel 3365 * @p: the to-be-kicked thread 3366 * 3367 * Cause a process which is running on another CPU to enter 3368 * kernel-mode, without any delay. (to get signals handled.) 3369 * 3370 * NOTE: this function doesn't have to take the runqueue lock, 3371 * because all it wants to ensure is that the remote task enters 3372 * the kernel. If the IPI races and the task has been migrated 3373 * to another CPU then no harm is done and the purpose has been 3374 * achieved as well. 3375 */ 3376 void kick_process(struct task_struct *p) 3377 { 3378 int cpu; 3379 3380 preempt_disable(); 3381 cpu = task_cpu(p); 3382 if ((cpu != smp_processor_id()) && task_curr(p)) 3383 smp_send_reschedule(cpu); 3384 preempt_enable(); 3385 } 3386 EXPORT_SYMBOL_GPL(kick_process); 3387 3388 /* 3389 * ->cpus_ptr is protected by both rq->lock and p->pi_lock 3390 * 3391 * A few notes on cpu_active vs cpu_online: 3392 * 3393 * - cpu_active must be a subset of cpu_online 3394 * 3395 * - on CPU-up we allow per-CPU kthreads on the online && !active CPU, 3396 * see __set_cpus_allowed_ptr(). At this point the newly online 3397 * CPU isn't yet part of the sched domains, and balancing will not 3398 * see it. 3399 * 3400 * - on CPU-down we clear cpu_active() to mask the sched domains and 3401 * avoid the load balancer to place new tasks on the to be removed 3402 * CPU. Existing tasks will remain running there and will be taken 3403 * off. 3404 * 3405 * This means that fallback selection must not select !active CPUs. 3406 * And can assume that any active CPU must be online. Conversely 3407 * select_task_rq() below may allow selection of !active CPUs in order 3408 * to satisfy the above rules. 3409 */ 3410 static int select_fallback_rq(int cpu, struct task_struct *p) 3411 { 3412 int nid = cpu_to_node(cpu); 3413 const struct cpumask *nodemask = NULL; 3414 enum { cpuset, possible, fail } state = cpuset; 3415 int dest_cpu; 3416 3417 /* 3418 * If the node that the CPU is on has been offlined, cpu_to_node() 3419 * will return -1. There is no CPU on the node, and we should 3420 * select the CPU on the other node. 3421 */ 3422 if (nid != -1) { 3423 nodemask = cpumask_of_node(nid); 3424 3425 /* Look for allowed, online CPU in same node. */ 3426 for_each_cpu(dest_cpu, nodemask) { 3427 if (is_cpu_allowed(p, dest_cpu)) 3428 return dest_cpu; 3429 } 3430 } 3431 3432 for (;;) { 3433 /* Any allowed, online CPU? */ 3434 for_each_cpu(dest_cpu, p->cpus_ptr) { 3435 if (!is_cpu_allowed(p, dest_cpu)) 3436 continue; 3437 3438 goto out; 3439 } 3440 3441 /* No more Mr. Nice Guy. */ 3442 switch (state) { 3443 case cpuset: 3444 if (cpuset_cpus_allowed_fallback(p)) { 3445 state = possible; 3446 break; 3447 } 3448 fallthrough; 3449 case possible: 3450 /* 3451 * XXX When called from select_task_rq() we only 3452 * hold p->pi_lock and again violate locking order. 3453 * 3454 * More yuck to audit. 3455 */ 3456 do_set_cpus_allowed(p, task_cpu_possible_mask(p)); 3457 state = fail; 3458 break; 3459 case fail: 3460 BUG(); 3461 break; 3462 } 3463 } 3464 3465 out: 3466 if (state != cpuset) { 3467 /* 3468 * Don't tell them about moving exiting tasks or 3469 * kernel threads (both mm NULL), since they never 3470 * leave kernel. 3471 */ 3472 if (p->mm && printk_ratelimit()) { 3473 printk_deferred("process %d (%s) no longer affine to cpu%d\n", 3474 task_pid_nr(p), p->comm, cpu); 3475 } 3476 } 3477 3478 return dest_cpu; 3479 } 3480 3481 /* 3482 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_ptr is stable. 3483 */ 3484 static inline 3485 int select_task_rq(struct task_struct *p, int cpu, int wake_flags) 3486 { 3487 lockdep_assert_held(&p->pi_lock); 3488 3489 if (p->nr_cpus_allowed > 1 && !is_migration_disabled(p)) 3490 cpu = p->sched_class->select_task_rq(p, cpu, wake_flags); 3491 else 3492 cpu = cpumask_any(p->cpus_ptr); 3493 3494 /* 3495 * In order not to call set_task_cpu() on a blocking task we need 3496 * to rely on ttwu() to place the task on a valid ->cpus_ptr 3497 * CPU. 3498 * 3499 * Since this is common to all placement strategies, this lives here. 3500 * 3501 * [ this allows ->select_task() to simply return task_cpu(p) and 3502 * not worry about this generic constraint ] 3503 */ 3504 if (unlikely(!is_cpu_allowed(p, cpu))) 3505 cpu = select_fallback_rq(task_cpu(p), p); 3506 3507 return cpu; 3508 } 3509 3510 void sched_set_stop_task(int cpu, struct task_struct *stop) 3511 { 3512 static struct lock_class_key stop_pi_lock; 3513 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; 3514 struct task_struct *old_stop = cpu_rq(cpu)->stop; 3515 3516 if (stop) { 3517 /* 3518 * Make it appear like a SCHED_FIFO task, its something 3519 * userspace knows about and won't get confused about. 3520 * 3521 * Also, it will make PI more or less work without too 3522 * much confusion -- but then, stop work should not 3523 * rely on PI working anyway. 3524 */ 3525 sched_setscheduler_nocheck(stop, SCHED_FIFO, ¶m); 3526 3527 stop->sched_class = &stop_sched_class; 3528 3529 /* 3530 * The PI code calls rt_mutex_setprio() with ->pi_lock held to 3531 * adjust the effective priority of a task. As a result, 3532 * rt_mutex_setprio() can trigger (RT) balancing operations, 3533 * which can then trigger wakeups of the stop thread to push 3534 * around the current task. 3535 * 3536 * The stop task itself will never be part of the PI-chain, it 3537 * never blocks, therefore that ->pi_lock recursion is safe. 3538 * Tell lockdep about this by placing the stop->pi_lock in its 3539 * own class. 3540 */ 3541 lockdep_set_class(&stop->pi_lock, &stop_pi_lock); 3542 } 3543 3544 cpu_rq(cpu)->stop = stop; 3545 3546 if (old_stop) { 3547 /* 3548 * Reset it back to a normal scheduling class so that 3549 * it can die in pieces. 3550 */ 3551 old_stop->sched_class = &rt_sched_class; 3552 } 3553 } 3554 3555 #else /* CONFIG_SMP */ 3556 3557 static inline int __set_cpus_allowed_ptr(struct task_struct *p, 3558 const struct cpumask *new_mask, 3559 u32 flags) 3560 { 3561 return set_cpus_allowed_ptr(p, new_mask); 3562 } 3563 3564 static inline void migrate_disable_switch(struct rq *rq, struct task_struct *p) { } 3565 3566 static inline bool rq_has_pinned_tasks(struct rq *rq) 3567 { 3568 return false; 3569 } 3570 3571 #endif /* !CONFIG_SMP */ 3572 3573 static void 3574 ttwu_stat(struct task_struct *p, int cpu, int wake_flags) 3575 { 3576 struct rq *rq; 3577 3578 if (!schedstat_enabled()) 3579 return; 3580 3581 rq = this_rq(); 3582 3583 #ifdef CONFIG_SMP 3584 if (cpu == rq->cpu) { 3585 __schedstat_inc(rq->ttwu_local); 3586 __schedstat_inc(p->stats.nr_wakeups_local); 3587 } else { 3588 struct sched_domain *sd; 3589 3590 __schedstat_inc(p->stats.nr_wakeups_remote); 3591 rcu_read_lock(); 3592 for_each_domain(rq->cpu, sd) { 3593 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) { 3594 __schedstat_inc(sd->ttwu_wake_remote); 3595 break; 3596 } 3597 } 3598 rcu_read_unlock(); 3599 } 3600 3601 if (wake_flags & WF_MIGRATED) 3602 __schedstat_inc(p->stats.nr_wakeups_migrate); 3603 #endif /* CONFIG_SMP */ 3604 3605 __schedstat_inc(rq->ttwu_count); 3606 __schedstat_inc(p->stats.nr_wakeups); 3607 3608 if (wake_flags & WF_SYNC) 3609 __schedstat_inc(p->stats.nr_wakeups_sync); 3610 } 3611 3612 /* 3613 * Mark the task runnable and perform wakeup-preemption. 3614 */ 3615 static void ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags, 3616 struct rq_flags *rf) 3617 { 3618 check_preempt_curr(rq, p, wake_flags); 3619 WRITE_ONCE(p->__state, TASK_RUNNING); 3620 trace_sched_wakeup(p); 3621 3622 #ifdef CONFIG_SMP 3623 if (p->sched_class->task_woken) { 3624 /* 3625 * Our task @p is fully woken up and running; so it's safe to 3626 * drop the rq->lock, hereafter rq is only used for statistics. 3627 */ 3628 rq_unpin_lock(rq, rf); 3629 p->sched_class->task_woken(rq, p); 3630 rq_repin_lock(rq, rf); 3631 } 3632 3633 if (rq->idle_stamp) { 3634 u64 delta = rq_clock(rq) - rq->idle_stamp; 3635 u64 max = 2*rq->max_idle_balance_cost; 3636 3637 update_avg(&rq->avg_idle, delta); 3638 3639 if (rq->avg_idle > max) 3640 rq->avg_idle = max; 3641 3642 rq->wake_stamp = jiffies; 3643 rq->wake_avg_idle = rq->avg_idle / 2; 3644 3645 rq->idle_stamp = 0; 3646 } 3647 #endif 3648 } 3649 3650 static void 3651 ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags, 3652 struct rq_flags *rf) 3653 { 3654 int en_flags = ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK; 3655 3656 lockdep_assert_rq_held(rq); 3657 3658 if (p->sched_contributes_to_load) 3659 rq->nr_uninterruptible--; 3660 3661 #ifdef CONFIG_SMP 3662 if (wake_flags & WF_MIGRATED) 3663 en_flags |= ENQUEUE_MIGRATED; 3664 else 3665 #endif 3666 if (p->in_iowait) { 3667 delayacct_blkio_end(p); 3668 atomic_dec(&task_rq(p)->nr_iowait); 3669 } 3670 3671 activate_task(rq, p, en_flags); 3672 ttwu_do_wakeup(rq, p, wake_flags, rf); 3673 } 3674 3675 /* 3676 * Consider @p being inside a wait loop: 3677 * 3678 * for (;;) { 3679 * set_current_state(TASK_UNINTERRUPTIBLE); 3680 * 3681 * if (CONDITION) 3682 * break; 3683 * 3684 * schedule(); 3685 * } 3686 * __set_current_state(TASK_RUNNING); 3687 * 3688 * between set_current_state() and schedule(). In this case @p is still 3689 * runnable, so all that needs doing is change p->state back to TASK_RUNNING in 3690 * an atomic manner. 3691 * 3692 * By taking task_rq(p)->lock we serialize against schedule(), if @p->on_rq 3693 * then schedule() must still happen and p->state can be changed to 3694 * TASK_RUNNING. Otherwise we lost the race, schedule() has happened, and we 3695 * need to do a full wakeup with enqueue. 3696 * 3697 * Returns: %true when the wakeup is done, 3698 * %false otherwise. 3699 */ 3700 static int ttwu_runnable(struct task_struct *p, int wake_flags) 3701 { 3702 struct rq_flags rf; 3703 struct rq *rq; 3704 int ret = 0; 3705 3706 rq = __task_rq_lock(p, &rf); 3707 if (task_on_rq_queued(p)) { 3708 /* check_preempt_curr() may use rq clock */ 3709 update_rq_clock(rq); 3710 ttwu_do_wakeup(rq, p, wake_flags, &rf); 3711 ret = 1; 3712 } 3713 __task_rq_unlock(rq, &rf); 3714 3715 return ret; 3716 } 3717 3718 #ifdef CONFIG_SMP 3719 void sched_ttwu_pending(void *arg) 3720 { 3721 struct llist_node *llist = arg; 3722 struct rq *rq = this_rq(); 3723 struct task_struct *p, *t; 3724 struct rq_flags rf; 3725 3726 if (!llist) 3727 return; 3728 3729 /* 3730 * rq::ttwu_pending racy indication of out-standing wakeups. 3731 * Races such that false-negatives are possible, since they 3732 * are shorter lived that false-positives would be. 3733 */ 3734 WRITE_ONCE(rq->ttwu_pending, 0); 3735 3736 rq_lock_irqsave(rq, &rf); 3737 update_rq_clock(rq); 3738 3739 llist_for_each_entry_safe(p, t, llist, wake_entry.llist) { 3740 if (WARN_ON_ONCE(p->on_cpu)) 3741 smp_cond_load_acquire(&p->on_cpu, !VAL); 3742 3743 if (WARN_ON_ONCE(task_cpu(p) != cpu_of(rq))) 3744 set_task_cpu(p, cpu_of(rq)); 3745 3746 ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0, &rf); 3747 } 3748 3749 rq_unlock_irqrestore(rq, &rf); 3750 } 3751 3752 void send_call_function_single_ipi(int cpu) 3753 { 3754 struct rq *rq = cpu_rq(cpu); 3755 3756 if (!set_nr_if_polling(rq->idle)) 3757 arch_send_call_function_single_ipi(cpu); 3758 else 3759 trace_sched_wake_idle_without_ipi(cpu); 3760 } 3761 3762 /* 3763 * Queue a task on the target CPUs wake_list and wake the CPU via IPI if 3764 * necessary. The wakee CPU on receipt of the IPI will queue the task 3765 * via sched_ttwu_wakeup() for activation so the wakee incurs the cost 3766 * of the wakeup instead of the waker. 3767 */ 3768 static void __ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags) 3769 { 3770 struct rq *rq = cpu_rq(cpu); 3771 3772 p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED); 3773 3774 WRITE_ONCE(rq->ttwu_pending, 1); 3775 __smp_call_single_queue(cpu, &p->wake_entry.llist); 3776 } 3777 3778 void wake_up_if_idle(int cpu) 3779 { 3780 struct rq *rq = cpu_rq(cpu); 3781 struct rq_flags rf; 3782 3783 rcu_read_lock(); 3784 3785 if (!is_idle_task(rcu_dereference(rq->curr))) 3786 goto out; 3787 3788 rq_lock_irqsave(rq, &rf); 3789 if (is_idle_task(rq->curr)) 3790 resched_curr(rq); 3791 /* Else CPU is not idle, do nothing here: */ 3792 rq_unlock_irqrestore(rq, &rf); 3793 3794 out: 3795 rcu_read_unlock(); 3796 } 3797 3798 bool cpus_share_cache(int this_cpu, int that_cpu) 3799 { 3800 if (this_cpu == that_cpu) 3801 return true; 3802 3803 return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu); 3804 } 3805 3806 static inline bool ttwu_queue_cond(struct task_struct *p, int cpu) 3807 { 3808 /* 3809 * Do not complicate things with the async wake_list while the CPU is 3810 * in hotplug state. 3811 */ 3812 if (!cpu_active(cpu)) 3813 return false; 3814 3815 /* Ensure the task will still be allowed to run on the CPU. */ 3816 if (!cpumask_test_cpu(cpu, p->cpus_ptr)) 3817 return false; 3818 3819 /* 3820 * If the CPU does not share cache, then queue the task on the 3821 * remote rqs wakelist to avoid accessing remote data. 3822 */ 3823 if (!cpus_share_cache(smp_processor_id(), cpu)) 3824 return true; 3825 3826 if (cpu == smp_processor_id()) 3827 return false; 3828 3829 /* 3830 * If the wakee cpu is idle, or the task is descheduling and the 3831 * only running task on the CPU, then use the wakelist to offload 3832 * the task activation to the idle (or soon-to-be-idle) CPU as 3833 * the current CPU is likely busy. nr_running is checked to 3834 * avoid unnecessary task stacking. 3835 * 3836 * Note that we can only get here with (wakee) p->on_rq=0, 3837 * p->on_cpu can be whatever, we've done the dequeue, so 3838 * the wakee has been accounted out of ->nr_running. 3839 */ 3840 if (!cpu_rq(cpu)->nr_running) 3841 return true; 3842 3843 return false; 3844 } 3845 3846 static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags) 3847 { 3848 if (sched_feat(TTWU_QUEUE) && ttwu_queue_cond(p, cpu)) { 3849 sched_clock_cpu(cpu); /* Sync clocks across CPUs */ 3850 __ttwu_queue_wakelist(p, cpu, wake_flags); 3851 return true; 3852 } 3853 3854 return false; 3855 } 3856 3857 #else /* !CONFIG_SMP */ 3858 3859 static inline bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags) 3860 { 3861 return false; 3862 } 3863 3864 #endif /* CONFIG_SMP */ 3865 3866 static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags) 3867 { 3868 struct rq *rq = cpu_rq(cpu); 3869 struct rq_flags rf; 3870 3871 if (ttwu_queue_wakelist(p, cpu, wake_flags)) 3872 return; 3873 3874 rq_lock(rq, &rf); 3875 update_rq_clock(rq); 3876 ttwu_do_activate(rq, p, wake_flags, &rf); 3877 rq_unlock(rq, &rf); 3878 } 3879 3880 /* 3881 * Invoked from try_to_wake_up() to check whether the task can be woken up. 3882 * 3883 * The caller holds p::pi_lock if p != current or has preemption 3884 * disabled when p == current. 3885 * 3886 * The rules of PREEMPT_RT saved_state: 3887 * 3888 * The related locking code always holds p::pi_lock when updating 3889 * p::saved_state, which means the code is fully serialized in both cases. 3890 * 3891 * The lock wait and lock wakeups happen via TASK_RTLOCK_WAIT. No other 3892 * bits set. This allows to distinguish all wakeup scenarios. 3893 */ 3894 static __always_inline 3895 bool ttwu_state_match(struct task_struct *p, unsigned int state, int *success) 3896 { 3897 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) { 3898 WARN_ON_ONCE((state & TASK_RTLOCK_WAIT) && 3899 state != TASK_RTLOCK_WAIT); 3900 } 3901 3902 if (READ_ONCE(p->__state) & state) { 3903 *success = 1; 3904 return true; 3905 } 3906 3907 #ifdef CONFIG_PREEMPT_RT 3908 /* 3909 * Saved state preserves the task state across blocking on 3910 * an RT lock. If the state matches, set p::saved_state to 3911 * TASK_RUNNING, but do not wake the task because it waits 3912 * for a lock wakeup. Also indicate success because from 3913 * the regular waker's point of view this has succeeded. 3914 * 3915 * After acquiring the lock the task will restore p::__state 3916 * from p::saved_state which ensures that the regular 3917 * wakeup is not lost. The restore will also set 3918 * p::saved_state to TASK_RUNNING so any further tests will 3919 * not result in false positives vs. @success 3920 */ 3921 if (p->saved_state & state) { 3922 p->saved_state = TASK_RUNNING; 3923 *success = 1; 3924 } 3925 #endif 3926 return false; 3927 } 3928 3929 /* 3930 * Notes on Program-Order guarantees on SMP systems. 3931 * 3932 * MIGRATION 3933 * 3934 * The basic program-order guarantee on SMP systems is that when a task [t] 3935 * migrates, all its activity on its old CPU [c0] happens-before any subsequent 3936 * execution on its new CPU [c1]. 3937 * 3938 * For migration (of runnable tasks) this is provided by the following means: 3939 * 3940 * A) UNLOCK of the rq(c0)->lock scheduling out task t 3941 * B) migration for t is required to synchronize *both* rq(c0)->lock and 3942 * rq(c1)->lock (if not at the same time, then in that order). 3943 * C) LOCK of the rq(c1)->lock scheduling in task 3944 * 3945 * Release/acquire chaining guarantees that B happens after A and C after B. 3946 * Note: the CPU doing B need not be c0 or c1 3947 * 3948 * Example: 3949 * 3950 * CPU0 CPU1 CPU2 3951 * 3952 * LOCK rq(0)->lock 3953 * sched-out X 3954 * sched-in Y 3955 * UNLOCK rq(0)->lock 3956 * 3957 * LOCK rq(0)->lock // orders against CPU0 3958 * dequeue X 3959 * UNLOCK rq(0)->lock 3960 * 3961 * LOCK rq(1)->lock 3962 * enqueue X 3963 * UNLOCK rq(1)->lock 3964 * 3965 * LOCK rq(1)->lock // orders against CPU2 3966 * sched-out Z 3967 * sched-in X 3968 * UNLOCK rq(1)->lock 3969 * 3970 * 3971 * BLOCKING -- aka. SLEEP + WAKEUP 3972 * 3973 * For blocking we (obviously) need to provide the same guarantee as for 3974 * migration. However the means are completely different as there is no lock 3975 * chain to provide order. Instead we do: 3976 * 3977 * 1) smp_store_release(X->on_cpu, 0) -- finish_task() 3978 * 2) smp_cond_load_acquire(!X->on_cpu) -- try_to_wake_up() 3979 * 3980 * Example: 3981 * 3982 * CPU0 (schedule) CPU1 (try_to_wake_up) CPU2 (schedule) 3983 * 3984 * LOCK rq(0)->lock LOCK X->pi_lock 3985 * dequeue X 3986 * sched-out X 3987 * smp_store_release(X->on_cpu, 0); 3988 * 3989 * smp_cond_load_acquire(&X->on_cpu, !VAL); 3990 * X->state = WAKING 3991 * set_task_cpu(X,2) 3992 * 3993 * LOCK rq(2)->lock 3994 * enqueue X 3995 * X->state = RUNNING 3996 * UNLOCK rq(2)->lock 3997 * 3998 * LOCK rq(2)->lock // orders against CPU1 3999 * sched-out Z 4000 * sched-in X 4001 * UNLOCK rq(2)->lock 4002 * 4003 * UNLOCK X->pi_lock 4004 * UNLOCK rq(0)->lock 4005 * 4006 * 4007 * However, for wakeups there is a second guarantee we must provide, namely we 4008 * must ensure that CONDITION=1 done by the caller can not be reordered with 4009 * accesses to the task state; see try_to_wake_up() and set_current_state(). 4010 */ 4011 4012 /** 4013 * try_to_wake_up - wake up a thread 4014 * @p: the thread to be awakened 4015 * @state: the mask of task states that can be woken 4016 * @wake_flags: wake modifier flags (WF_*) 4017 * 4018 * Conceptually does: 4019 * 4020 * If (@state & @p->state) @p->state = TASK_RUNNING. 4021 * 4022 * If the task was not queued/runnable, also place it back on a runqueue. 4023 * 4024 * This function is atomic against schedule() which would dequeue the task. 4025 * 4026 * It issues a full memory barrier before accessing @p->state, see the comment 4027 * with set_current_state(). 4028 * 4029 * Uses p->pi_lock to serialize against concurrent wake-ups. 4030 * 4031 * Relies on p->pi_lock stabilizing: 4032 * - p->sched_class 4033 * - p->cpus_ptr 4034 * - p->sched_task_group 4035 * in order to do migration, see its use of select_task_rq()/set_task_cpu(). 4036 * 4037 * Tries really hard to only take one task_rq(p)->lock for performance. 4038 * Takes rq->lock in: 4039 * - ttwu_runnable() -- old rq, unavoidable, see comment there; 4040 * - ttwu_queue() -- new rq, for enqueue of the task; 4041 * - psi_ttwu_dequeue() -- much sadness :-( accounting will kill us. 4042 * 4043 * As a consequence we race really badly with just about everything. See the 4044 * many memory barriers and their comments for details. 4045 * 4046 * Return: %true if @p->state changes (an actual wakeup was done), 4047 * %false otherwise. 4048 */ 4049 static int 4050 try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) 4051 { 4052 unsigned long flags; 4053 int cpu, success = 0; 4054 4055 preempt_disable(); 4056 if (p == current) { 4057 /* 4058 * We're waking current, this means 'p->on_rq' and 'task_cpu(p) 4059 * == smp_processor_id()'. Together this means we can special 4060 * case the whole 'p->on_rq && ttwu_runnable()' case below 4061 * without taking any locks. 4062 * 4063 * In particular: 4064 * - we rely on Program-Order guarantees for all the ordering, 4065 * - we're serialized against set_special_state() by virtue of 4066 * it disabling IRQs (this allows not taking ->pi_lock). 4067 */ 4068 if (!ttwu_state_match(p, state, &success)) 4069 goto out; 4070 4071 trace_sched_waking(p); 4072 WRITE_ONCE(p->__state, TASK_RUNNING); 4073 trace_sched_wakeup(p); 4074 goto out; 4075 } 4076 4077 /* 4078 * If we are going to wake up a thread waiting for CONDITION we 4079 * need to ensure that CONDITION=1 done by the caller can not be 4080 * reordered with p->state check below. This pairs with smp_store_mb() 4081 * in set_current_state() that the waiting thread does. 4082 */ 4083 raw_spin_lock_irqsave(&p->pi_lock, flags); 4084 smp_mb__after_spinlock(); 4085 if (!ttwu_state_match(p, state, &success)) 4086 goto unlock; 4087 4088 trace_sched_waking(p); 4089 4090 /* 4091 * Ensure we load p->on_rq _after_ p->state, otherwise it would 4092 * be possible to, falsely, observe p->on_rq == 0 and get stuck 4093 * in smp_cond_load_acquire() below. 4094 * 4095 * sched_ttwu_pending() try_to_wake_up() 4096 * STORE p->on_rq = 1 LOAD p->state 4097 * UNLOCK rq->lock 4098 * 4099 * __schedule() (switch to task 'p') 4100 * LOCK rq->lock smp_rmb(); 4101 * smp_mb__after_spinlock(); 4102 * UNLOCK rq->lock 4103 * 4104 * [task p] 4105 * STORE p->state = UNINTERRUPTIBLE LOAD p->on_rq 4106 * 4107 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in 4108 * __schedule(). See the comment for smp_mb__after_spinlock(). 4109 * 4110 * A similar smb_rmb() lives in try_invoke_on_locked_down_task(). 4111 */ 4112 smp_rmb(); 4113 if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags)) 4114 goto unlock; 4115 4116 #ifdef CONFIG_SMP 4117 /* 4118 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be 4119 * possible to, falsely, observe p->on_cpu == 0. 4120 * 4121 * One must be running (->on_cpu == 1) in order to remove oneself 4122 * from the runqueue. 4123 * 4124 * __schedule() (switch to task 'p') try_to_wake_up() 4125 * STORE p->on_cpu = 1 LOAD p->on_rq 4126 * UNLOCK rq->lock 4127 * 4128 * __schedule() (put 'p' to sleep) 4129 * LOCK rq->lock smp_rmb(); 4130 * smp_mb__after_spinlock(); 4131 * STORE p->on_rq = 0 LOAD p->on_cpu 4132 * 4133 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in 4134 * __schedule(). See the comment for smp_mb__after_spinlock(). 4135 * 4136 * Form a control-dep-acquire with p->on_rq == 0 above, to ensure 4137 * schedule()'s deactivate_task() has 'happened' and p will no longer 4138 * care about it's own p->state. See the comment in __schedule(). 4139 */ 4140 smp_acquire__after_ctrl_dep(); 4141 4142 /* 4143 * We're doing the wakeup (@success == 1), they did a dequeue (p->on_rq 4144 * == 0), which means we need to do an enqueue, change p->state to 4145 * TASK_WAKING such that we can unlock p->pi_lock before doing the 4146 * enqueue, such as ttwu_queue_wakelist(). 4147 */ 4148 WRITE_ONCE(p->__state, TASK_WAKING); 4149 4150 /* 4151 * If the owning (remote) CPU is still in the middle of schedule() with 4152 * this task as prev, considering queueing p on the remote CPUs wake_list 4153 * which potentially sends an IPI instead of spinning on p->on_cpu to 4154 * let the waker make forward progress. This is safe because IRQs are 4155 * disabled and the IPI will deliver after on_cpu is cleared. 4156 * 4157 * Ensure we load task_cpu(p) after p->on_cpu: 4158 * 4159 * set_task_cpu(p, cpu); 4160 * STORE p->cpu = @cpu 4161 * __schedule() (switch to task 'p') 4162 * LOCK rq->lock 4163 * smp_mb__after_spin_lock() smp_cond_load_acquire(&p->on_cpu) 4164 * STORE p->on_cpu = 1 LOAD p->cpu 4165 * 4166 * to ensure we observe the correct CPU on which the task is currently 4167 * scheduling. 4168 */ 4169 if (smp_load_acquire(&p->on_cpu) && 4170 ttwu_queue_wakelist(p, task_cpu(p), wake_flags)) 4171 goto unlock; 4172 4173 /* 4174 * If the owning (remote) CPU is still in the middle of schedule() with 4175 * this task as prev, wait until it's done referencing the task. 4176 * 4177 * Pairs with the smp_store_release() in finish_task(). 4178 * 4179 * This ensures that tasks getting woken will be fully ordered against 4180 * their previous state and preserve Program Order. 4181 */ 4182 smp_cond_load_acquire(&p->on_cpu, !VAL); 4183 4184 cpu = select_task_rq(p, p->wake_cpu, wake_flags | WF_TTWU); 4185 if (task_cpu(p) != cpu) { 4186 if (p->in_iowait) { 4187 delayacct_blkio_end(p); 4188 atomic_dec(&task_rq(p)->nr_iowait); 4189 } 4190 4191 wake_flags |= WF_MIGRATED; 4192 psi_ttwu_dequeue(p); 4193 set_task_cpu(p, cpu); 4194 } 4195 #else 4196 cpu = task_cpu(p); 4197 #endif /* CONFIG_SMP */ 4198 4199 ttwu_queue(p, cpu, wake_flags); 4200 unlock: 4201 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 4202 out: 4203 if (success) 4204 ttwu_stat(p, task_cpu(p), wake_flags); 4205 preempt_enable(); 4206 4207 return success; 4208 } 4209 4210 /** 4211 * task_call_func - Invoke a function on task in fixed state 4212 * @p: Process for which the function is to be invoked, can be @current. 4213 * @func: Function to invoke. 4214 * @arg: Argument to function. 4215 * 4216 * Fix the task in it's current state by avoiding wakeups and or rq operations 4217 * and call @func(@arg) on it. This function can use ->on_rq and task_curr() 4218 * to work out what the state is, if required. Given that @func can be invoked 4219 * with a runqueue lock held, it had better be quite lightweight. 4220 * 4221 * Returns: 4222 * Whatever @func returns 4223 */ 4224 int task_call_func(struct task_struct *p, task_call_f func, void *arg) 4225 { 4226 struct rq *rq = NULL; 4227 unsigned int state; 4228 struct rq_flags rf; 4229 int ret; 4230 4231 raw_spin_lock_irqsave(&p->pi_lock, rf.flags); 4232 4233 state = READ_ONCE(p->__state); 4234 4235 /* 4236 * Ensure we load p->on_rq after p->__state, otherwise it would be 4237 * possible to, falsely, observe p->on_rq == 0. 4238 * 4239 * See try_to_wake_up() for a longer comment. 4240 */ 4241 smp_rmb(); 4242 4243 /* 4244 * Since pi->lock blocks try_to_wake_up(), we don't need rq->lock when 4245 * the task is blocked. Make sure to check @state since ttwu() can drop 4246 * locks at the end, see ttwu_queue_wakelist(). 4247 */ 4248 if (state == TASK_RUNNING || state == TASK_WAKING || p->on_rq) 4249 rq = __task_rq_lock(p, &rf); 4250 4251 /* 4252 * At this point the task is pinned; either: 4253 * - blocked and we're holding off wakeups (pi->lock) 4254 * - woken, and we're holding off enqueue (rq->lock) 4255 * - queued, and we're holding off schedule (rq->lock) 4256 * - running, and we're holding off de-schedule (rq->lock) 4257 * 4258 * The called function (@func) can use: task_curr(), p->on_rq and 4259 * p->__state to differentiate between these states. 4260 */ 4261 ret = func(p, arg); 4262 4263 if (rq) 4264 rq_unlock(rq, &rf); 4265 4266 raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags); 4267 return ret; 4268 } 4269 4270 /** 4271 * cpu_curr_snapshot - Return a snapshot of the currently running task 4272 * @cpu: The CPU on which to snapshot the task. 4273 * 4274 * Returns the task_struct pointer of the task "currently" running on 4275 * the specified CPU. If the same task is running on that CPU throughout, 4276 * the return value will be a pointer to that task's task_struct structure. 4277 * If the CPU did any context switches even vaguely concurrently with the 4278 * execution of this function, the return value will be a pointer to the 4279 * task_struct structure of a randomly chosen task that was running on 4280 * that CPU somewhere around the time that this function was executing. 4281 * 4282 * If the specified CPU was offline, the return value is whatever it 4283 * is, perhaps a pointer to the task_struct structure of that CPU's idle 4284 * task, but there is no guarantee. Callers wishing a useful return 4285 * value must take some action to ensure that the specified CPU remains 4286 * online throughout. 4287 * 4288 * This function executes full memory barriers before and after fetching 4289 * the pointer, which permits the caller to confine this function's fetch 4290 * with respect to the caller's accesses to other shared variables. 4291 */ 4292 struct task_struct *cpu_curr_snapshot(int cpu) 4293 { 4294 struct task_struct *t; 4295 4296 smp_mb(); /* Pairing determined by caller's synchronization design. */ 4297 t = rcu_dereference(cpu_curr(cpu)); 4298 smp_mb(); /* Pairing determined by caller's synchronization design. */ 4299 return t; 4300 } 4301 4302 /** 4303 * wake_up_process - Wake up a specific process 4304 * @p: The process to be woken up. 4305 * 4306 * Attempt to wake up the nominated process and move it to the set of runnable 4307 * processes. 4308 * 4309 * Return: 1 if the process was woken up, 0 if it was already running. 4310 * 4311 * This function executes a full memory barrier before accessing the task state. 4312 */ 4313 int wake_up_process(struct task_struct *p) 4314 { 4315 return try_to_wake_up(p, TASK_NORMAL, 0); 4316 } 4317 EXPORT_SYMBOL(wake_up_process); 4318 4319 int wake_up_state(struct task_struct *p, unsigned int state) 4320 { 4321 return try_to_wake_up(p, state, 0); 4322 } 4323 4324 /* 4325 * Perform scheduler related setup for a newly forked process p. 4326 * p is forked by current. 4327 * 4328 * __sched_fork() is basic setup used by init_idle() too: 4329 */ 4330 static void __sched_fork(unsigned long clone_flags, struct task_struct *p) 4331 { 4332 p->on_rq = 0; 4333 4334 p->se.on_rq = 0; 4335 p->se.exec_start = 0; 4336 p->se.sum_exec_runtime = 0; 4337 p->se.prev_sum_exec_runtime = 0; 4338 p->se.nr_migrations = 0; 4339 p->se.vruntime = 0; 4340 INIT_LIST_HEAD(&p->se.group_node); 4341 4342 #ifdef CONFIG_FAIR_GROUP_SCHED 4343 p->se.cfs_rq = NULL; 4344 #endif 4345 4346 #ifdef CONFIG_SCHEDSTATS 4347 /* Even if schedstat is disabled, there should not be garbage */ 4348 memset(&p->stats, 0, sizeof(p->stats)); 4349 #endif 4350 4351 RB_CLEAR_NODE(&p->dl.rb_node); 4352 init_dl_task_timer(&p->dl); 4353 init_dl_inactive_task_timer(&p->dl); 4354 __dl_clear_params(p); 4355 4356 INIT_LIST_HEAD(&p->rt.run_list); 4357 p->rt.timeout = 0; 4358 p->rt.time_slice = sched_rr_timeslice; 4359 p->rt.on_rq = 0; 4360 p->rt.on_list = 0; 4361 4362 #ifdef CONFIG_PREEMPT_NOTIFIERS 4363 INIT_HLIST_HEAD(&p->preempt_notifiers); 4364 #endif 4365 4366 #ifdef CONFIG_COMPACTION 4367 p->capture_control = NULL; 4368 #endif 4369 init_numa_balancing(clone_flags, p); 4370 #ifdef CONFIG_SMP 4371 p->wake_entry.u_flags = CSD_TYPE_TTWU; 4372 p->migration_pending = NULL; 4373 #endif 4374 } 4375 4376 DEFINE_STATIC_KEY_FALSE(sched_numa_balancing); 4377 4378 #ifdef CONFIG_NUMA_BALANCING 4379 4380 int sysctl_numa_balancing_mode; 4381 4382 static void __set_numabalancing_state(bool enabled) 4383 { 4384 if (enabled) 4385 static_branch_enable(&sched_numa_balancing); 4386 else 4387 static_branch_disable(&sched_numa_balancing); 4388 } 4389 4390 void set_numabalancing_state(bool enabled) 4391 { 4392 if (enabled) 4393 sysctl_numa_balancing_mode = NUMA_BALANCING_NORMAL; 4394 else 4395 sysctl_numa_balancing_mode = NUMA_BALANCING_DISABLED; 4396 __set_numabalancing_state(enabled); 4397 } 4398 4399 #ifdef CONFIG_PROC_SYSCTL 4400 int sysctl_numa_balancing(struct ctl_table *table, int write, 4401 void *buffer, size_t *lenp, loff_t *ppos) 4402 { 4403 struct ctl_table t; 4404 int err; 4405 int state = sysctl_numa_balancing_mode; 4406 4407 if (write && !capable(CAP_SYS_ADMIN)) 4408 return -EPERM; 4409 4410 t = *table; 4411 t.data = &state; 4412 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); 4413 if (err < 0) 4414 return err; 4415 if (write) { 4416 sysctl_numa_balancing_mode = state; 4417 __set_numabalancing_state(state); 4418 } 4419 return err; 4420 } 4421 #endif 4422 #endif 4423 4424 #ifdef CONFIG_SCHEDSTATS 4425 4426 DEFINE_STATIC_KEY_FALSE(sched_schedstats); 4427 4428 static void set_schedstats(bool enabled) 4429 { 4430 if (enabled) 4431 static_branch_enable(&sched_schedstats); 4432 else 4433 static_branch_disable(&sched_schedstats); 4434 } 4435 4436 void force_schedstat_enabled(void) 4437 { 4438 if (!schedstat_enabled()) { 4439 pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n"); 4440 static_branch_enable(&sched_schedstats); 4441 } 4442 } 4443 4444 static int __init setup_schedstats(char *str) 4445 { 4446 int ret = 0; 4447 if (!str) 4448 goto out; 4449 4450 if (!strcmp(str, "enable")) { 4451 set_schedstats(true); 4452 ret = 1; 4453 } else if (!strcmp(str, "disable")) { 4454 set_schedstats(false); 4455 ret = 1; 4456 } 4457 out: 4458 if (!ret) 4459 pr_warn("Unable to parse schedstats=\n"); 4460 4461 return ret; 4462 } 4463 __setup("schedstats=", setup_schedstats); 4464 4465 #ifdef CONFIG_PROC_SYSCTL 4466 static int sysctl_schedstats(struct ctl_table *table, int write, void *buffer, 4467 size_t *lenp, loff_t *ppos) 4468 { 4469 struct ctl_table t; 4470 int err; 4471 int state = static_branch_likely(&sched_schedstats); 4472 4473 if (write && !capable(CAP_SYS_ADMIN)) 4474 return -EPERM; 4475 4476 t = *table; 4477 t.data = &state; 4478 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); 4479 if (err < 0) 4480 return err; 4481 if (write) 4482 set_schedstats(state); 4483 return err; 4484 } 4485 #endif /* CONFIG_PROC_SYSCTL */ 4486 #endif /* CONFIG_SCHEDSTATS */ 4487 4488 #ifdef CONFIG_SYSCTL 4489 static struct ctl_table sched_core_sysctls[] = { 4490 #ifdef CONFIG_SCHEDSTATS 4491 { 4492 .procname = "sched_schedstats", 4493 .data = NULL, 4494 .maxlen = sizeof(unsigned int), 4495 .mode = 0644, 4496 .proc_handler = sysctl_schedstats, 4497 .extra1 = SYSCTL_ZERO, 4498 .extra2 = SYSCTL_ONE, 4499 }, 4500 #endif /* CONFIG_SCHEDSTATS */ 4501 #ifdef CONFIG_UCLAMP_TASK 4502 { 4503 .procname = "sched_util_clamp_min", 4504 .data = &sysctl_sched_uclamp_util_min, 4505 .maxlen = sizeof(unsigned int), 4506 .mode = 0644, 4507 .proc_handler = sysctl_sched_uclamp_handler, 4508 }, 4509 { 4510 .procname = "sched_util_clamp_max", 4511 .data = &sysctl_sched_uclamp_util_max, 4512 .maxlen = sizeof(unsigned int), 4513 .mode = 0644, 4514 .proc_handler = sysctl_sched_uclamp_handler, 4515 }, 4516 { 4517 .procname = "sched_util_clamp_min_rt_default", 4518 .data = &sysctl_sched_uclamp_util_min_rt_default, 4519 .maxlen = sizeof(unsigned int), 4520 .mode = 0644, 4521 .proc_handler = sysctl_sched_uclamp_handler, 4522 }, 4523 #endif /* CONFIG_UCLAMP_TASK */ 4524 {} 4525 }; 4526 static int __init sched_core_sysctl_init(void) 4527 { 4528 register_sysctl_init("kernel", sched_core_sysctls); 4529 return 0; 4530 } 4531 late_initcall(sched_core_sysctl_init); 4532 #endif /* CONFIG_SYSCTL */ 4533 4534 /* 4535 * fork()/clone()-time setup: 4536 */ 4537 int sched_fork(unsigned long clone_flags, struct task_struct *p) 4538 { 4539 __sched_fork(clone_flags, p); 4540 /* 4541 * We mark the process as NEW here. This guarantees that 4542 * nobody will actually run it, and a signal or other external 4543 * event cannot wake it up and insert it on the runqueue either. 4544 */ 4545 p->__state = TASK_NEW; 4546 4547 /* 4548 * Make sure we do not leak PI boosting priority to the child. 4549 */ 4550 p->prio = current->normal_prio; 4551 4552 uclamp_fork(p); 4553 4554 /* 4555 * Revert to default priority/policy on fork if requested. 4556 */ 4557 if (unlikely(p->sched_reset_on_fork)) { 4558 if (task_has_dl_policy(p) || task_has_rt_policy(p)) { 4559 p->policy = SCHED_NORMAL; 4560 p->static_prio = NICE_TO_PRIO(0); 4561 p->rt_priority = 0; 4562 } else if (PRIO_TO_NICE(p->static_prio) < 0) 4563 p->static_prio = NICE_TO_PRIO(0); 4564 4565 p->prio = p->normal_prio = p->static_prio; 4566 set_load_weight(p, false); 4567 4568 /* 4569 * We don't need the reset flag anymore after the fork. It has 4570 * fulfilled its duty: 4571 */ 4572 p->sched_reset_on_fork = 0; 4573 } 4574 4575 if (dl_prio(p->prio)) 4576 return -EAGAIN; 4577 else if (rt_prio(p->prio)) 4578 p->sched_class = &rt_sched_class; 4579 else 4580 p->sched_class = &fair_sched_class; 4581 4582 init_entity_runnable_average(&p->se); 4583 4584 4585 #ifdef CONFIG_SCHED_INFO 4586 if (likely(sched_info_on())) 4587 memset(&p->sched_info, 0, sizeof(p->sched_info)); 4588 #endif 4589 #if defined(CONFIG_SMP) 4590 p->on_cpu = 0; 4591 #endif 4592 init_task_preempt_count(p); 4593 #ifdef CONFIG_SMP 4594 plist_node_init(&p->pushable_tasks, MAX_PRIO); 4595 RB_CLEAR_NODE(&p->pushable_dl_tasks); 4596 #endif 4597 return 0; 4598 } 4599 4600 void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs) 4601 { 4602 unsigned long flags; 4603 4604 /* 4605 * Because we're not yet on the pid-hash, p->pi_lock isn't strictly 4606 * required yet, but lockdep gets upset if rules are violated. 4607 */ 4608 raw_spin_lock_irqsave(&p->pi_lock, flags); 4609 #ifdef CONFIG_CGROUP_SCHED 4610 if (1) { 4611 struct task_group *tg; 4612 tg = container_of(kargs->cset->subsys[cpu_cgrp_id], 4613 struct task_group, css); 4614 tg = autogroup_task_group(p, tg); 4615 p->sched_task_group = tg; 4616 } 4617 #endif 4618 rseq_migrate(p); 4619 /* 4620 * We're setting the CPU for the first time, we don't migrate, 4621 * so use __set_task_cpu(). 4622 */ 4623 __set_task_cpu(p, smp_processor_id()); 4624 if (p->sched_class->task_fork) 4625 p->sched_class->task_fork(p); 4626 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 4627 } 4628 4629 void sched_post_fork(struct task_struct *p) 4630 { 4631 uclamp_post_fork(p); 4632 } 4633 4634 unsigned long to_ratio(u64 period, u64 runtime) 4635 { 4636 if (runtime == RUNTIME_INF) 4637 return BW_UNIT; 4638 4639 /* 4640 * Doing this here saves a lot of checks in all 4641 * the calling paths, and returning zero seems 4642 * safe for them anyway. 4643 */ 4644 if (period == 0) 4645 return 0; 4646 4647 return div64_u64(runtime << BW_SHIFT, period); 4648 } 4649 4650 /* 4651 * wake_up_new_task - wake up a newly created task for the first time. 4652 * 4653 * This function will do some initial scheduler statistics housekeeping 4654 * that must be done for every newly created context, then puts the task 4655 * on the runqueue and wakes it. 4656 */ 4657 void wake_up_new_task(struct task_struct *p) 4658 { 4659 struct rq_flags rf; 4660 struct rq *rq; 4661 4662 raw_spin_lock_irqsave(&p->pi_lock, rf.flags); 4663 WRITE_ONCE(p->__state, TASK_RUNNING); 4664 #ifdef CONFIG_SMP 4665 /* 4666 * Fork balancing, do it here and not earlier because: 4667 * - cpus_ptr can change in the fork path 4668 * - any previously selected CPU might disappear through hotplug 4669 * 4670 * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq, 4671 * as we're not fully set-up yet. 4672 */ 4673 p->recent_used_cpu = task_cpu(p); 4674 rseq_migrate(p); 4675 __set_task_cpu(p, select_task_rq(p, task_cpu(p), WF_FORK)); 4676 #endif 4677 rq = __task_rq_lock(p, &rf); 4678 update_rq_clock(rq); 4679 post_init_entity_util_avg(p); 4680 4681 activate_task(rq, p, ENQUEUE_NOCLOCK); 4682 trace_sched_wakeup_new(p); 4683 check_preempt_curr(rq, p, WF_FORK); 4684 #ifdef CONFIG_SMP 4685 if (p->sched_class->task_woken) { 4686 /* 4687 * Nothing relies on rq->lock after this, so it's fine to 4688 * drop it. 4689 */ 4690 rq_unpin_lock(rq, &rf); 4691 p->sched_class->task_woken(rq, p); 4692 rq_repin_lock(rq, &rf); 4693 } 4694 #endif 4695 task_rq_unlock(rq, p, &rf); 4696 } 4697 4698 #ifdef CONFIG_PREEMPT_NOTIFIERS 4699 4700 static DEFINE_STATIC_KEY_FALSE(preempt_notifier_key); 4701 4702 void preempt_notifier_inc(void) 4703 { 4704 static_branch_inc(&preempt_notifier_key); 4705 } 4706 EXPORT_SYMBOL_GPL(preempt_notifier_inc); 4707 4708 void preempt_notifier_dec(void) 4709 { 4710 static_branch_dec(&preempt_notifier_key); 4711 } 4712 EXPORT_SYMBOL_GPL(preempt_notifier_dec); 4713 4714 /** 4715 * preempt_notifier_register - tell me when current is being preempted & rescheduled 4716 * @notifier: notifier struct to register 4717 */ 4718 void preempt_notifier_register(struct preempt_notifier *notifier) 4719 { 4720 if (!static_branch_unlikely(&preempt_notifier_key)) 4721 WARN(1, "registering preempt_notifier while notifiers disabled\n"); 4722 4723 hlist_add_head(¬ifier->link, ¤t->preempt_notifiers); 4724 } 4725 EXPORT_SYMBOL_GPL(preempt_notifier_register); 4726 4727 /** 4728 * preempt_notifier_unregister - no longer interested in preemption notifications 4729 * @notifier: notifier struct to unregister 4730 * 4731 * This is *not* safe to call from within a preemption notifier. 4732 */ 4733 void preempt_notifier_unregister(struct preempt_notifier *notifier) 4734 { 4735 hlist_del(¬ifier->link); 4736 } 4737 EXPORT_SYMBOL_GPL(preempt_notifier_unregister); 4738 4739 static void __fire_sched_in_preempt_notifiers(struct task_struct *curr) 4740 { 4741 struct preempt_notifier *notifier; 4742 4743 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) 4744 notifier->ops->sched_in(notifier, raw_smp_processor_id()); 4745 } 4746 4747 static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr) 4748 { 4749 if (static_branch_unlikely(&preempt_notifier_key)) 4750 __fire_sched_in_preempt_notifiers(curr); 4751 } 4752 4753 static void 4754 __fire_sched_out_preempt_notifiers(struct task_struct *curr, 4755 struct task_struct *next) 4756 { 4757 struct preempt_notifier *notifier; 4758 4759 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) 4760 notifier->ops->sched_out(notifier, next); 4761 } 4762 4763 static __always_inline void 4764 fire_sched_out_preempt_notifiers(struct task_struct *curr, 4765 struct task_struct *next) 4766 { 4767 if (static_branch_unlikely(&preempt_notifier_key)) 4768 __fire_sched_out_preempt_notifiers(curr, next); 4769 } 4770 4771 #else /* !CONFIG_PREEMPT_NOTIFIERS */ 4772 4773 static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr) 4774 { 4775 } 4776 4777 static inline void 4778 fire_sched_out_preempt_notifiers(struct task_struct *curr, 4779 struct task_struct *next) 4780 { 4781 } 4782 4783 #endif /* CONFIG_PREEMPT_NOTIFIERS */ 4784 4785 static inline void prepare_task(struct task_struct *next) 4786 { 4787 #ifdef CONFIG_SMP 4788 /* 4789 * Claim the task as running, we do this before switching to it 4790 * such that any running task will have this set. 4791 * 4792 * See the smp_load_acquire(&p->on_cpu) case in ttwu() and 4793 * its ordering comment. 4794 */ 4795 WRITE_ONCE(next->on_cpu, 1); 4796 #endif 4797 } 4798 4799 static inline void finish_task(struct task_struct *prev) 4800 { 4801 #ifdef CONFIG_SMP 4802 /* 4803 * This must be the very last reference to @prev from this CPU. After 4804 * p->on_cpu is cleared, the task can be moved to a different CPU. We 4805 * must ensure this doesn't happen until the switch is completely 4806 * finished. 4807 * 4808 * In particular, the load of prev->state in finish_task_switch() must 4809 * happen before this. 4810 * 4811 * Pairs with the smp_cond_load_acquire() in try_to_wake_up(). 4812 */ 4813 smp_store_release(&prev->on_cpu, 0); 4814 #endif 4815 } 4816 4817 #ifdef CONFIG_SMP 4818 4819 static void do_balance_callbacks(struct rq *rq, struct callback_head *head) 4820 { 4821 void (*func)(struct rq *rq); 4822 struct callback_head *next; 4823 4824 lockdep_assert_rq_held(rq); 4825 4826 while (head) { 4827 func = (void (*)(struct rq *))head->func; 4828 next = head->next; 4829 head->next = NULL; 4830 head = next; 4831 4832 func(rq); 4833 } 4834 } 4835 4836 static void balance_push(struct rq *rq); 4837 4838 /* 4839 * balance_push_callback is a right abuse of the callback interface and plays 4840 * by significantly different rules. 4841 * 4842 * Where the normal balance_callback's purpose is to be ran in the same context 4843 * that queued it (only later, when it's safe to drop rq->lock again), 4844 * balance_push_callback is specifically targeted at __schedule(). 4845 * 4846 * This abuse is tolerated because it places all the unlikely/odd cases behind 4847 * a single test, namely: rq->balance_callback == NULL. 4848 */ 4849 struct callback_head balance_push_callback = { 4850 .next = NULL, 4851 .func = (void (*)(struct callback_head *))balance_push, 4852 }; 4853 4854 static inline struct callback_head * 4855 __splice_balance_callbacks(struct rq *rq, bool split) 4856 { 4857 struct callback_head *head = rq->balance_callback; 4858 4859 if (likely(!head)) 4860 return NULL; 4861 4862 lockdep_assert_rq_held(rq); 4863 /* 4864 * Must not take balance_push_callback off the list when 4865 * splice_balance_callbacks() and balance_callbacks() are not 4866 * in the same rq->lock section. 4867 * 4868 * In that case it would be possible for __schedule() to interleave 4869 * and observe the list empty. 4870 */ 4871 if (split && head == &balance_push_callback) 4872 head = NULL; 4873 else 4874 rq->balance_callback = NULL; 4875 4876 return head; 4877 } 4878 4879 static inline struct callback_head *splice_balance_callbacks(struct rq *rq) 4880 { 4881 return __splice_balance_callbacks(rq, true); 4882 } 4883 4884 static void __balance_callbacks(struct rq *rq) 4885 { 4886 do_balance_callbacks(rq, __splice_balance_callbacks(rq, false)); 4887 } 4888 4889 static inline void balance_callbacks(struct rq *rq, struct callback_head *head) 4890 { 4891 unsigned long flags; 4892 4893 if (unlikely(head)) { 4894 raw_spin_rq_lock_irqsave(rq, flags); 4895 do_balance_callbacks(rq, head); 4896 raw_spin_rq_unlock_irqrestore(rq, flags); 4897 } 4898 } 4899 4900 #else 4901 4902 static inline void __balance_callbacks(struct rq *rq) 4903 { 4904 } 4905 4906 static inline struct callback_head *splice_balance_callbacks(struct rq *rq) 4907 { 4908 return NULL; 4909 } 4910 4911 static inline void balance_callbacks(struct rq *rq, struct callback_head *head) 4912 { 4913 } 4914 4915 #endif 4916 4917 static inline void 4918 prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf) 4919 { 4920 /* 4921 * Since the runqueue lock will be released by the next 4922 * task (which is an invalid locking op but in the case 4923 * of the scheduler it's an obvious special-case), so we 4924 * do an early lockdep release here: 4925 */ 4926 rq_unpin_lock(rq, rf); 4927 spin_release(&__rq_lockp(rq)->dep_map, _THIS_IP_); 4928 #ifdef CONFIG_DEBUG_SPINLOCK 4929 /* this is a valid case when another task releases the spinlock */ 4930 rq_lockp(rq)->owner = next; 4931 #endif 4932 } 4933 4934 static inline void finish_lock_switch(struct rq *rq) 4935 { 4936 /* 4937 * If we are tracking spinlock dependencies then we have to 4938 * fix up the runqueue lock - which gets 'carried over' from 4939 * prev into current: 4940 */ 4941 spin_acquire(&__rq_lockp(rq)->dep_map, 0, 0, _THIS_IP_); 4942 __balance_callbacks(rq); 4943 raw_spin_rq_unlock_irq(rq); 4944 } 4945 4946 /* 4947 * NOP if the arch has not defined these: 4948 */ 4949 4950 #ifndef prepare_arch_switch 4951 # define prepare_arch_switch(next) do { } while (0) 4952 #endif 4953 4954 #ifndef finish_arch_post_lock_switch 4955 # define finish_arch_post_lock_switch() do { } while (0) 4956 #endif 4957 4958 static inline void kmap_local_sched_out(void) 4959 { 4960 #ifdef CONFIG_KMAP_LOCAL 4961 if (unlikely(current->kmap_ctrl.idx)) 4962 __kmap_local_sched_out(); 4963 #endif 4964 } 4965 4966 static inline void kmap_local_sched_in(void) 4967 { 4968 #ifdef CONFIG_KMAP_LOCAL 4969 if (unlikely(current->kmap_ctrl.idx)) 4970 __kmap_local_sched_in(); 4971 #endif 4972 } 4973 4974 /** 4975 * prepare_task_switch - prepare to switch tasks 4976 * @rq: the runqueue preparing to switch 4977 * @prev: the current task that is being switched out 4978 * @next: the task we are going to switch to. 4979 * 4980 * This is called with the rq lock held and interrupts off. It must 4981 * be paired with a subsequent finish_task_switch after the context 4982 * switch. 4983 * 4984 * prepare_task_switch sets up locking and calls architecture specific 4985 * hooks. 4986 */ 4987 static inline void 4988 prepare_task_switch(struct rq *rq, struct task_struct *prev, 4989 struct task_struct *next) 4990 { 4991 kcov_prepare_switch(prev); 4992 sched_info_switch(rq, prev, next); 4993 perf_event_task_sched_out(prev, next); 4994 rseq_preempt(prev); 4995 fire_sched_out_preempt_notifiers(prev, next); 4996 kmap_local_sched_out(); 4997 prepare_task(next); 4998 prepare_arch_switch(next); 4999 } 5000 5001 /** 5002 * finish_task_switch - clean up after a task-switch 5003 * @prev: the thread we just switched away from. 5004 * 5005 * finish_task_switch must be called after the context switch, paired 5006 * with a prepare_task_switch call before the context switch. 5007 * finish_task_switch will reconcile locking set up by prepare_task_switch, 5008 * and do any other architecture-specific cleanup actions. 5009 * 5010 * Note that we may have delayed dropping an mm in context_switch(). If 5011 * so, we finish that here outside of the runqueue lock. (Doing it 5012 * with the lock held can cause deadlocks; see schedule() for 5013 * details.) 5014 * 5015 * The context switch have flipped the stack from under us and restored the 5016 * local variables which were saved when this task called schedule() in the 5017 * past. prev == current is still correct but we need to recalculate this_rq 5018 * because prev may have moved to another CPU. 5019 */ 5020 static struct rq *finish_task_switch(struct task_struct *prev) 5021 __releases(rq->lock) 5022 { 5023 struct rq *rq = this_rq(); 5024 struct mm_struct *mm = rq->prev_mm; 5025 unsigned int prev_state; 5026 5027 /* 5028 * The previous task will have left us with a preempt_count of 2 5029 * because it left us after: 5030 * 5031 * schedule() 5032 * preempt_disable(); // 1 5033 * __schedule() 5034 * raw_spin_lock_irq(&rq->lock) // 2 5035 * 5036 * Also, see FORK_PREEMPT_COUNT. 5037 */ 5038 if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET, 5039 "corrupted preempt_count: %s/%d/0x%x\n", 5040 current->comm, current->pid, preempt_count())) 5041 preempt_count_set(FORK_PREEMPT_COUNT); 5042 5043 rq->prev_mm = NULL; 5044 5045 /* 5046 * A task struct has one reference for the use as "current". 5047 * If a task dies, then it sets TASK_DEAD in tsk->state and calls 5048 * schedule one last time. The schedule call will never return, and 5049 * the scheduled task must drop that reference. 5050 * 5051 * We must observe prev->state before clearing prev->on_cpu (in 5052 * finish_task), otherwise a concurrent wakeup can get prev 5053 * running on another CPU and we could rave with its RUNNING -> DEAD 5054 * transition, resulting in a double drop. 5055 */ 5056 prev_state = READ_ONCE(prev->__state); 5057 vtime_task_switch(prev); 5058 perf_event_task_sched_in(prev, current); 5059 finish_task(prev); 5060 tick_nohz_task_switch(); 5061 finish_lock_switch(rq); 5062 finish_arch_post_lock_switch(); 5063 kcov_finish_switch(current); 5064 /* 5065 * kmap_local_sched_out() is invoked with rq::lock held and 5066 * interrupts disabled. There is no requirement for that, but the 5067 * sched out code does not have an interrupt enabled section. 5068 * Restoring the maps on sched in does not require interrupts being 5069 * disabled either. 5070 */ 5071 kmap_local_sched_in(); 5072 5073 fire_sched_in_preempt_notifiers(current); 5074 /* 5075 * When switching through a kernel thread, the loop in 5076 * membarrier_{private,global}_expedited() may have observed that 5077 * kernel thread and not issued an IPI. It is therefore possible to 5078 * schedule between user->kernel->user threads without passing though 5079 * switch_mm(). Membarrier requires a barrier after storing to 5080 * rq->curr, before returning to userspace, so provide them here: 5081 * 5082 * - a full memory barrier for {PRIVATE,GLOBAL}_EXPEDITED, implicitly 5083 * provided by mmdrop(), 5084 * - a sync_core for SYNC_CORE. 5085 */ 5086 if (mm) { 5087 membarrier_mm_sync_core_before_usermode(mm); 5088 mmdrop_sched(mm); 5089 } 5090 if (unlikely(prev_state == TASK_DEAD)) { 5091 if (prev->sched_class->task_dead) 5092 prev->sched_class->task_dead(prev); 5093 5094 /* Task is done with its stack. */ 5095 put_task_stack(prev); 5096 5097 put_task_struct_rcu_user(prev); 5098 } 5099 5100 return rq; 5101 } 5102 5103 /** 5104 * schedule_tail - first thing a freshly forked thread must call. 5105 * @prev: the thread we just switched away from. 5106 */ 5107 asmlinkage __visible void schedule_tail(struct task_struct *prev) 5108 __releases(rq->lock) 5109 { 5110 /* 5111 * New tasks start with FORK_PREEMPT_COUNT, see there and 5112 * finish_task_switch() for details. 5113 * 5114 * finish_task_switch() will drop rq->lock() and lower preempt_count 5115 * and the preempt_enable() will end up enabling preemption (on 5116 * PREEMPT_COUNT kernels). 5117 */ 5118 5119 finish_task_switch(prev); 5120 preempt_enable(); 5121 5122 if (current->set_child_tid) 5123 put_user(task_pid_vnr(current), current->set_child_tid); 5124 5125 calculate_sigpending(); 5126 } 5127 5128 /* 5129 * context_switch - switch to the new MM and the new thread's register state. 5130 */ 5131 static __always_inline struct rq * 5132 context_switch(struct rq *rq, struct task_struct *prev, 5133 struct task_struct *next, struct rq_flags *rf) 5134 { 5135 prepare_task_switch(rq, prev, next); 5136 5137 /* 5138 * For paravirt, this is coupled with an exit in switch_to to 5139 * combine the page table reload and the switch backend into 5140 * one hypercall. 5141 */ 5142 arch_start_context_switch(prev); 5143 5144 /* 5145 * kernel -> kernel lazy + transfer active 5146 * user -> kernel lazy + mmgrab() active 5147 * 5148 * kernel -> user switch + mmdrop() active 5149 * user -> user switch 5150 */ 5151 if (!next->mm) { // to kernel 5152 enter_lazy_tlb(prev->active_mm, next); 5153 5154 next->active_mm = prev->active_mm; 5155 if (prev->mm) // from user 5156 mmgrab(prev->active_mm); 5157 else 5158 prev->active_mm = NULL; 5159 } else { // to user 5160 membarrier_switch_mm(rq, prev->active_mm, next->mm); 5161 /* 5162 * sys_membarrier() requires an smp_mb() between setting 5163 * rq->curr / membarrier_switch_mm() and returning to userspace. 5164 * 5165 * The below provides this either through switch_mm(), or in 5166 * case 'prev->active_mm == next->mm' through 5167 * finish_task_switch()'s mmdrop(). 5168 */ 5169 switch_mm_irqs_off(prev->active_mm, next->mm, next); 5170 5171 if (!prev->mm) { // from kernel 5172 /* will mmdrop() in finish_task_switch(). */ 5173 rq->prev_mm = prev->active_mm; 5174 prev->active_mm = NULL; 5175 } 5176 } 5177 5178 rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP); 5179 5180 prepare_lock_switch(rq, next, rf); 5181 5182 /* Here we just switch the register state and the stack. */ 5183 switch_to(prev, next, prev); 5184 barrier(); 5185 5186 return finish_task_switch(prev); 5187 } 5188 5189 /* 5190 * nr_running and nr_context_switches: 5191 * 5192 * externally visible scheduler statistics: current number of runnable 5193 * threads, total number of context switches performed since bootup. 5194 */ 5195 unsigned int nr_running(void) 5196 { 5197 unsigned int i, sum = 0; 5198 5199 for_each_online_cpu(i) 5200 sum += cpu_rq(i)->nr_running; 5201 5202 return sum; 5203 } 5204 5205 /* 5206 * Check if only the current task is running on the CPU. 5207 * 5208 * Caution: this function does not check that the caller has disabled 5209 * preemption, thus the result might have a time-of-check-to-time-of-use 5210 * race. The caller is responsible to use it correctly, for example: 5211 * 5212 * - from a non-preemptible section (of course) 5213 * 5214 * - from a thread that is bound to a single CPU 5215 * 5216 * - in a loop with very short iterations (e.g. a polling loop) 5217 */ 5218 bool single_task_running(void) 5219 { 5220 return raw_rq()->nr_running == 1; 5221 } 5222 EXPORT_SYMBOL(single_task_running); 5223 5224 unsigned long long nr_context_switches(void) 5225 { 5226 int i; 5227 unsigned long long sum = 0; 5228 5229 for_each_possible_cpu(i) 5230 sum += cpu_rq(i)->nr_switches; 5231 5232 return sum; 5233 } 5234 5235 /* 5236 * Consumers of these two interfaces, like for example the cpuidle menu 5237 * governor, are using nonsensical data. Preferring shallow idle state selection 5238 * for a CPU that has IO-wait which might not even end up running the task when 5239 * it does become runnable. 5240 */ 5241 5242 unsigned int nr_iowait_cpu(int cpu) 5243 { 5244 return atomic_read(&cpu_rq(cpu)->nr_iowait); 5245 } 5246 5247 /* 5248 * IO-wait accounting, and how it's mostly bollocks (on SMP). 5249 * 5250 * The idea behind IO-wait account is to account the idle time that we could 5251 * have spend running if it were not for IO. That is, if we were to improve the 5252 * storage performance, we'd have a proportional reduction in IO-wait time. 5253 * 5254 * This all works nicely on UP, where, when a task blocks on IO, we account 5255 * idle time as IO-wait, because if the storage were faster, it could've been 5256 * running and we'd not be idle. 5257 * 5258 * This has been extended to SMP, by doing the same for each CPU. This however 5259 * is broken. 5260 * 5261 * Imagine for instance the case where two tasks block on one CPU, only the one 5262 * CPU will have IO-wait accounted, while the other has regular idle. Even 5263 * though, if the storage were faster, both could've ran at the same time, 5264 * utilising both CPUs. 5265 * 5266 * This means, that when looking globally, the current IO-wait accounting on 5267 * SMP is a lower bound, by reason of under accounting. 5268 * 5269 * Worse, since the numbers are provided per CPU, they are sometimes 5270 * interpreted per CPU, and that is nonsensical. A blocked task isn't strictly 5271 * associated with any one particular CPU, it can wake to another CPU than it 5272 * blocked on. This means the per CPU IO-wait number is meaningless. 5273 * 5274 * Task CPU affinities can make all that even more 'interesting'. 5275 */ 5276 5277 unsigned int nr_iowait(void) 5278 { 5279 unsigned int i, sum = 0; 5280 5281 for_each_possible_cpu(i) 5282 sum += nr_iowait_cpu(i); 5283 5284 return sum; 5285 } 5286 5287 #ifdef CONFIG_SMP 5288 5289 /* 5290 * sched_exec - execve() is a valuable balancing opportunity, because at 5291 * this point the task has the smallest effective memory and cache footprint. 5292 */ 5293 void sched_exec(void) 5294 { 5295 struct task_struct *p = current; 5296 unsigned long flags; 5297 int dest_cpu; 5298 5299 raw_spin_lock_irqsave(&p->pi_lock, flags); 5300 dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), WF_EXEC); 5301 if (dest_cpu == smp_processor_id()) 5302 goto unlock; 5303 5304 if (likely(cpu_active(dest_cpu))) { 5305 struct migration_arg arg = { p, dest_cpu }; 5306 5307 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 5308 stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg); 5309 return; 5310 } 5311 unlock: 5312 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 5313 } 5314 5315 #endif 5316 5317 DEFINE_PER_CPU(struct kernel_stat, kstat); 5318 DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat); 5319 5320 EXPORT_PER_CPU_SYMBOL(kstat); 5321 EXPORT_PER_CPU_SYMBOL(kernel_cpustat); 5322 5323 /* 5324 * The function fair_sched_class.update_curr accesses the struct curr 5325 * and its field curr->exec_start; when called from task_sched_runtime(), 5326 * we observe a high rate of cache misses in practice. 5327 * Prefetching this data results in improved performance. 5328 */ 5329 static inline void prefetch_curr_exec_start(struct task_struct *p) 5330 { 5331 #ifdef CONFIG_FAIR_GROUP_SCHED 5332 struct sched_entity *curr = (&p->se)->cfs_rq->curr; 5333 #else 5334 struct sched_entity *curr = (&task_rq(p)->cfs)->curr; 5335 #endif 5336 prefetch(curr); 5337 prefetch(&curr->exec_start); 5338 } 5339 5340 /* 5341 * Return accounted runtime for the task. 5342 * In case the task is currently running, return the runtime plus current's 5343 * pending runtime that have not been accounted yet. 5344 */ 5345 unsigned long long task_sched_runtime(struct task_struct *p) 5346 { 5347 struct rq_flags rf; 5348 struct rq *rq; 5349 u64 ns; 5350 5351 #if defined(CONFIG_64BIT) && defined(CONFIG_SMP) 5352 /* 5353 * 64-bit doesn't need locks to atomically read a 64-bit value. 5354 * So we have a optimization chance when the task's delta_exec is 0. 5355 * Reading ->on_cpu is racy, but this is ok. 5356 * 5357 * If we race with it leaving CPU, we'll take a lock. So we're correct. 5358 * If we race with it entering CPU, unaccounted time is 0. This is 5359 * indistinguishable from the read occurring a few cycles earlier. 5360 * If we see ->on_cpu without ->on_rq, the task is leaving, and has 5361 * been accounted, so we're correct here as well. 5362 */ 5363 if (!p->on_cpu || !task_on_rq_queued(p)) 5364 return p->se.sum_exec_runtime; 5365 #endif 5366 5367 rq = task_rq_lock(p, &rf); 5368 /* 5369 * Must be ->curr _and_ ->on_rq. If dequeued, we would 5370 * project cycles that may never be accounted to this 5371 * thread, breaking clock_gettime(). 5372 */ 5373 if (task_current(rq, p) && task_on_rq_queued(p)) { 5374 prefetch_curr_exec_start(p); 5375 update_rq_clock(rq); 5376 p->sched_class->update_curr(rq); 5377 } 5378 ns = p->se.sum_exec_runtime; 5379 task_rq_unlock(rq, p, &rf); 5380 5381 return ns; 5382 } 5383 5384 #ifdef CONFIG_SCHED_DEBUG 5385 static u64 cpu_resched_latency(struct rq *rq) 5386 { 5387 int latency_warn_ms = READ_ONCE(sysctl_resched_latency_warn_ms); 5388 u64 resched_latency, now = rq_clock(rq); 5389 static bool warned_once; 5390 5391 if (sysctl_resched_latency_warn_once && warned_once) 5392 return 0; 5393 5394 if (!need_resched() || !latency_warn_ms) 5395 return 0; 5396 5397 if (system_state == SYSTEM_BOOTING) 5398 return 0; 5399 5400 if (!rq->last_seen_need_resched_ns) { 5401 rq->last_seen_need_resched_ns = now; 5402 rq->ticks_without_resched = 0; 5403 return 0; 5404 } 5405 5406 rq->ticks_without_resched++; 5407 resched_latency = now - rq->last_seen_need_resched_ns; 5408 if (resched_latency <= latency_warn_ms * NSEC_PER_MSEC) 5409 return 0; 5410 5411 warned_once = true; 5412 5413 return resched_latency; 5414 } 5415 5416 static int __init setup_resched_latency_warn_ms(char *str) 5417 { 5418 long val; 5419 5420 if ((kstrtol(str, 0, &val))) { 5421 pr_warn("Unable to set resched_latency_warn_ms\n"); 5422 return 1; 5423 } 5424 5425 sysctl_resched_latency_warn_ms = val; 5426 return 1; 5427 } 5428 __setup("resched_latency_warn_ms=", setup_resched_latency_warn_ms); 5429 #else 5430 static inline u64 cpu_resched_latency(struct rq *rq) { return 0; } 5431 #endif /* CONFIG_SCHED_DEBUG */ 5432 5433 /* 5434 * This function gets called by the timer code, with HZ frequency. 5435 * We call it with interrupts disabled. 5436 */ 5437 void scheduler_tick(void) 5438 { 5439 int cpu = smp_processor_id(); 5440 struct rq *rq = cpu_rq(cpu); 5441 struct task_struct *curr = rq->curr; 5442 struct rq_flags rf; 5443 unsigned long thermal_pressure; 5444 u64 resched_latency; 5445 5446 arch_scale_freq_tick(); 5447 sched_clock_tick(); 5448 5449 rq_lock(rq, &rf); 5450 5451 update_rq_clock(rq); 5452 thermal_pressure = arch_scale_thermal_pressure(cpu_of(rq)); 5453 update_thermal_load_avg(rq_clock_thermal(rq), rq, thermal_pressure); 5454 curr->sched_class->task_tick(rq, curr, 0); 5455 if (sched_feat(LATENCY_WARN)) 5456 resched_latency = cpu_resched_latency(rq); 5457 calc_global_load_tick(rq); 5458 sched_core_tick(rq); 5459 5460 rq_unlock(rq, &rf); 5461 5462 if (sched_feat(LATENCY_WARN) && resched_latency) 5463 resched_latency_warn(cpu, resched_latency); 5464 5465 perf_event_task_tick(); 5466 5467 #ifdef CONFIG_SMP 5468 rq->idle_balance = idle_cpu(cpu); 5469 trigger_load_balance(rq); 5470 #endif 5471 } 5472 5473 #ifdef CONFIG_NO_HZ_FULL 5474 5475 struct tick_work { 5476 int cpu; 5477 atomic_t state; 5478 struct delayed_work work; 5479 }; 5480 /* Values for ->state, see diagram below. */ 5481 #define TICK_SCHED_REMOTE_OFFLINE 0 5482 #define TICK_SCHED_REMOTE_OFFLINING 1 5483 #define TICK_SCHED_REMOTE_RUNNING 2 5484 5485 /* 5486 * State diagram for ->state: 5487 * 5488 * 5489 * TICK_SCHED_REMOTE_OFFLINE 5490 * | ^ 5491 * | | 5492 * | | sched_tick_remote() 5493 * | | 5494 * | | 5495 * +--TICK_SCHED_REMOTE_OFFLINING 5496 * | ^ 5497 * | | 5498 * sched_tick_start() | | sched_tick_stop() 5499 * | | 5500 * V | 5501 * TICK_SCHED_REMOTE_RUNNING 5502 * 5503 * 5504 * Other transitions get WARN_ON_ONCE(), except that sched_tick_remote() 5505 * and sched_tick_start() are happy to leave the state in RUNNING. 5506 */ 5507 5508 static struct tick_work __percpu *tick_work_cpu; 5509 5510 static void sched_tick_remote(struct work_struct *work) 5511 { 5512 struct delayed_work *dwork = to_delayed_work(work); 5513 struct tick_work *twork = container_of(dwork, struct tick_work, work); 5514 int cpu = twork->cpu; 5515 struct rq *rq = cpu_rq(cpu); 5516 struct task_struct *curr; 5517 struct rq_flags rf; 5518 u64 delta; 5519 int os; 5520 5521 /* 5522 * Handle the tick only if it appears the remote CPU is running in full 5523 * dynticks mode. The check is racy by nature, but missing a tick or 5524 * having one too much is no big deal because the scheduler tick updates 5525 * statistics and checks timeslices in a time-independent way, regardless 5526 * of when exactly it is running. 5527 */ 5528 if (!tick_nohz_tick_stopped_cpu(cpu)) 5529 goto out_requeue; 5530 5531 rq_lock_irq(rq, &rf); 5532 curr = rq->curr; 5533 if (cpu_is_offline(cpu)) 5534 goto out_unlock; 5535 5536 update_rq_clock(rq); 5537 5538 if (!is_idle_task(curr)) { 5539 /* 5540 * Make sure the next tick runs within a reasonable 5541 * amount of time. 5542 */ 5543 delta = rq_clock_task(rq) - curr->se.exec_start; 5544 WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3); 5545 } 5546 curr->sched_class->task_tick(rq, curr, 0); 5547 5548 calc_load_nohz_remote(rq); 5549 out_unlock: 5550 rq_unlock_irq(rq, &rf); 5551 out_requeue: 5552 5553 /* 5554 * Run the remote tick once per second (1Hz). This arbitrary 5555 * frequency is large enough to avoid overload but short enough 5556 * to keep scheduler internal stats reasonably up to date. But 5557 * first update state to reflect hotplug activity if required. 5558 */ 5559 os = atomic_fetch_add_unless(&twork->state, -1, TICK_SCHED_REMOTE_RUNNING); 5560 WARN_ON_ONCE(os == TICK_SCHED_REMOTE_OFFLINE); 5561 if (os == TICK_SCHED_REMOTE_RUNNING) 5562 queue_delayed_work(system_unbound_wq, dwork, HZ); 5563 } 5564 5565 static void sched_tick_start(int cpu) 5566 { 5567 int os; 5568 struct tick_work *twork; 5569 5570 if (housekeeping_cpu(cpu, HK_TYPE_TICK)) 5571 return; 5572 5573 WARN_ON_ONCE(!tick_work_cpu); 5574 5575 twork = per_cpu_ptr(tick_work_cpu, cpu); 5576 os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_RUNNING); 5577 WARN_ON_ONCE(os == TICK_SCHED_REMOTE_RUNNING); 5578 if (os == TICK_SCHED_REMOTE_OFFLINE) { 5579 twork->cpu = cpu; 5580 INIT_DELAYED_WORK(&twork->work, sched_tick_remote); 5581 queue_delayed_work(system_unbound_wq, &twork->work, HZ); 5582 } 5583 } 5584 5585 #ifdef CONFIG_HOTPLUG_CPU 5586 static void sched_tick_stop(int cpu) 5587 { 5588 struct tick_work *twork; 5589 int os; 5590 5591 if (housekeeping_cpu(cpu, HK_TYPE_TICK)) 5592 return; 5593 5594 WARN_ON_ONCE(!tick_work_cpu); 5595 5596 twork = per_cpu_ptr(tick_work_cpu, cpu); 5597 /* There cannot be competing actions, but don't rely on stop-machine. */ 5598 os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_OFFLINING); 5599 WARN_ON_ONCE(os != TICK_SCHED_REMOTE_RUNNING); 5600 /* Don't cancel, as this would mess up the state machine. */ 5601 } 5602 #endif /* CONFIG_HOTPLUG_CPU */ 5603 5604 int __init sched_tick_offload_init(void) 5605 { 5606 tick_work_cpu = alloc_percpu(struct tick_work); 5607 BUG_ON(!tick_work_cpu); 5608 return 0; 5609 } 5610 5611 #else /* !CONFIG_NO_HZ_FULL */ 5612 static inline void sched_tick_start(int cpu) { } 5613 static inline void sched_tick_stop(int cpu) { } 5614 #endif 5615 5616 #if defined(CONFIG_PREEMPTION) && (defined(CONFIG_DEBUG_PREEMPT) || \ 5617 defined(CONFIG_TRACE_PREEMPT_TOGGLE)) 5618 /* 5619 * If the value passed in is equal to the current preempt count 5620 * then we just disabled preemption. Start timing the latency. 5621 */ 5622 static inline void preempt_latency_start(int val) 5623 { 5624 if (preempt_count() == val) { 5625 unsigned long ip = get_lock_parent_ip(); 5626 #ifdef CONFIG_DEBUG_PREEMPT 5627 current->preempt_disable_ip = ip; 5628 #endif 5629 trace_preempt_off(CALLER_ADDR0, ip); 5630 } 5631 } 5632 5633 void preempt_count_add(int val) 5634 { 5635 #ifdef CONFIG_DEBUG_PREEMPT 5636 /* 5637 * Underflow? 5638 */ 5639 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0))) 5640 return; 5641 #endif 5642 __preempt_count_add(val); 5643 #ifdef CONFIG_DEBUG_PREEMPT 5644 /* 5645 * Spinlock count overflowing soon? 5646 */ 5647 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >= 5648 PREEMPT_MASK - 10); 5649 #endif 5650 preempt_latency_start(val); 5651 } 5652 EXPORT_SYMBOL(preempt_count_add); 5653 NOKPROBE_SYMBOL(preempt_count_add); 5654 5655 /* 5656 * If the value passed in equals to the current preempt count 5657 * then we just enabled preemption. Stop timing the latency. 5658 */ 5659 static inline void preempt_latency_stop(int val) 5660 { 5661 if (preempt_count() == val) 5662 trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip()); 5663 } 5664 5665 void preempt_count_sub(int val) 5666 { 5667 #ifdef CONFIG_DEBUG_PREEMPT 5668 /* 5669 * Underflow? 5670 */ 5671 if (DEBUG_LOCKS_WARN_ON(val > preempt_count())) 5672 return; 5673 /* 5674 * Is the spinlock portion underflowing? 5675 */ 5676 if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) && 5677 !(preempt_count() & PREEMPT_MASK))) 5678 return; 5679 #endif 5680 5681 preempt_latency_stop(val); 5682 __preempt_count_sub(val); 5683 } 5684 EXPORT_SYMBOL(preempt_count_sub); 5685 NOKPROBE_SYMBOL(preempt_count_sub); 5686 5687 #else 5688 static inline void preempt_latency_start(int val) { } 5689 static inline void preempt_latency_stop(int val) { } 5690 #endif 5691 5692 static inline unsigned long get_preempt_disable_ip(struct task_struct *p) 5693 { 5694 #ifdef CONFIG_DEBUG_PREEMPT 5695 return p->preempt_disable_ip; 5696 #else 5697 return 0; 5698 #endif 5699 } 5700 5701 /* 5702 * Print scheduling while atomic bug: 5703 */ 5704 static noinline void __schedule_bug(struct task_struct *prev) 5705 { 5706 /* Save this before calling printk(), since that will clobber it */ 5707 unsigned long preempt_disable_ip = get_preempt_disable_ip(current); 5708 5709 if (oops_in_progress) 5710 return; 5711 5712 printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n", 5713 prev->comm, prev->pid, preempt_count()); 5714 5715 debug_show_held_locks(prev); 5716 print_modules(); 5717 if (irqs_disabled()) 5718 print_irqtrace_events(prev); 5719 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT) 5720 && in_atomic_preempt_off()) { 5721 pr_err("Preemption disabled at:"); 5722 print_ip_sym(KERN_ERR, preempt_disable_ip); 5723 } 5724 if (panic_on_warn) 5725 panic("scheduling while atomic\n"); 5726 5727 dump_stack(); 5728 add_taint(TAINT_WARN, LOCKDEP_STILL_OK); 5729 } 5730 5731 /* 5732 * Various schedule()-time debugging checks and statistics: 5733 */ 5734 static inline void schedule_debug(struct task_struct *prev, bool preempt) 5735 { 5736 #ifdef CONFIG_SCHED_STACK_END_CHECK 5737 if (task_stack_end_corrupted(prev)) 5738 panic("corrupted stack end detected inside scheduler\n"); 5739 5740 if (task_scs_end_corrupted(prev)) 5741 panic("corrupted shadow stack detected inside scheduler\n"); 5742 #endif 5743 5744 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP 5745 if (!preempt && READ_ONCE(prev->__state) && prev->non_block_count) { 5746 printk(KERN_ERR "BUG: scheduling in a non-blocking section: %s/%d/%i\n", 5747 prev->comm, prev->pid, prev->non_block_count); 5748 dump_stack(); 5749 add_taint(TAINT_WARN, LOCKDEP_STILL_OK); 5750 } 5751 #endif 5752 5753 if (unlikely(in_atomic_preempt_off())) { 5754 __schedule_bug(prev); 5755 preempt_count_set(PREEMPT_DISABLED); 5756 } 5757 rcu_sleep_check(); 5758 SCHED_WARN_ON(ct_state() == CONTEXT_USER); 5759 5760 profile_hit(SCHED_PROFILING, __builtin_return_address(0)); 5761 5762 schedstat_inc(this_rq()->sched_count); 5763 } 5764 5765 static void put_prev_task_balance(struct rq *rq, struct task_struct *prev, 5766 struct rq_flags *rf) 5767 { 5768 #ifdef CONFIG_SMP 5769 const struct sched_class *class; 5770 /* 5771 * We must do the balancing pass before put_prev_task(), such 5772 * that when we release the rq->lock the task is in the same 5773 * state as before we took rq->lock. 5774 * 5775 * We can terminate the balance pass as soon as we know there is 5776 * a runnable task of @class priority or higher. 5777 */ 5778 for_class_range(class, prev->sched_class, &idle_sched_class) { 5779 if (class->balance(rq, prev, rf)) 5780 break; 5781 } 5782 #endif 5783 5784 put_prev_task(rq, prev); 5785 } 5786 5787 /* 5788 * Pick up the highest-prio task: 5789 */ 5790 static inline struct task_struct * 5791 __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) 5792 { 5793 const struct sched_class *class; 5794 struct task_struct *p; 5795 5796 /* 5797 * Optimization: we know that if all tasks are in the fair class we can 5798 * call that function directly, but only if the @prev task wasn't of a 5799 * higher scheduling class, because otherwise those lose the 5800 * opportunity to pull in more work from other CPUs. 5801 */ 5802 if (likely(!sched_class_above(prev->sched_class, &fair_sched_class) && 5803 rq->nr_running == rq->cfs.h_nr_running)) { 5804 5805 p = pick_next_task_fair(rq, prev, rf); 5806 if (unlikely(p == RETRY_TASK)) 5807 goto restart; 5808 5809 /* Assume the next prioritized class is idle_sched_class */ 5810 if (!p) { 5811 put_prev_task(rq, prev); 5812 p = pick_next_task_idle(rq); 5813 } 5814 5815 return p; 5816 } 5817 5818 restart: 5819 put_prev_task_balance(rq, prev, rf); 5820 5821 for_each_class(class) { 5822 p = class->pick_next_task(rq); 5823 if (p) 5824 return p; 5825 } 5826 5827 BUG(); /* The idle class should always have a runnable task. */ 5828 } 5829 5830 #ifdef CONFIG_SCHED_CORE 5831 static inline bool is_task_rq_idle(struct task_struct *t) 5832 { 5833 return (task_rq(t)->idle == t); 5834 } 5835 5836 static inline bool cookie_equals(struct task_struct *a, unsigned long cookie) 5837 { 5838 return is_task_rq_idle(a) || (a->core_cookie == cookie); 5839 } 5840 5841 static inline bool cookie_match(struct task_struct *a, struct task_struct *b) 5842 { 5843 if (is_task_rq_idle(a) || is_task_rq_idle(b)) 5844 return true; 5845 5846 return a->core_cookie == b->core_cookie; 5847 } 5848 5849 static inline struct task_struct *pick_task(struct rq *rq) 5850 { 5851 const struct sched_class *class; 5852 struct task_struct *p; 5853 5854 for_each_class(class) { 5855 p = class->pick_task(rq); 5856 if (p) 5857 return p; 5858 } 5859 5860 BUG(); /* The idle class should always have a runnable task. */ 5861 } 5862 5863 extern void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi); 5864 5865 static void queue_core_balance(struct rq *rq); 5866 5867 static struct task_struct * 5868 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) 5869 { 5870 struct task_struct *next, *p, *max = NULL; 5871 const struct cpumask *smt_mask; 5872 bool fi_before = false; 5873 bool core_clock_updated = (rq == rq->core); 5874 unsigned long cookie; 5875 int i, cpu, occ = 0; 5876 struct rq *rq_i; 5877 bool need_sync; 5878 5879 if (!sched_core_enabled(rq)) 5880 return __pick_next_task(rq, prev, rf); 5881 5882 cpu = cpu_of(rq); 5883 5884 /* Stopper task is switching into idle, no need core-wide selection. */ 5885 if (cpu_is_offline(cpu)) { 5886 /* 5887 * Reset core_pick so that we don't enter the fastpath when 5888 * coming online. core_pick would already be migrated to 5889 * another cpu during offline. 5890 */ 5891 rq->core_pick = NULL; 5892 return __pick_next_task(rq, prev, rf); 5893 } 5894 5895 /* 5896 * If there were no {en,de}queues since we picked (IOW, the task 5897 * pointers are all still valid), and we haven't scheduled the last 5898 * pick yet, do so now. 5899 * 5900 * rq->core_pick can be NULL if no selection was made for a CPU because 5901 * it was either offline or went offline during a sibling's core-wide 5902 * selection. In this case, do a core-wide selection. 5903 */ 5904 if (rq->core->core_pick_seq == rq->core->core_task_seq && 5905 rq->core->core_pick_seq != rq->core_sched_seq && 5906 rq->core_pick) { 5907 WRITE_ONCE(rq->core_sched_seq, rq->core->core_pick_seq); 5908 5909 next = rq->core_pick; 5910 if (next != prev) { 5911 put_prev_task(rq, prev); 5912 set_next_task(rq, next); 5913 } 5914 5915 rq->core_pick = NULL; 5916 goto out; 5917 } 5918 5919 put_prev_task_balance(rq, prev, rf); 5920 5921 smt_mask = cpu_smt_mask(cpu); 5922 need_sync = !!rq->core->core_cookie; 5923 5924 /* reset state */ 5925 rq->core->core_cookie = 0UL; 5926 if (rq->core->core_forceidle_count) { 5927 if (!core_clock_updated) { 5928 update_rq_clock(rq->core); 5929 core_clock_updated = true; 5930 } 5931 sched_core_account_forceidle(rq); 5932 /* reset after accounting force idle */ 5933 rq->core->core_forceidle_start = 0; 5934 rq->core->core_forceidle_count = 0; 5935 rq->core->core_forceidle_occupation = 0; 5936 need_sync = true; 5937 fi_before = true; 5938 } 5939 5940 /* 5941 * core->core_task_seq, core->core_pick_seq, rq->core_sched_seq 5942 * 5943 * @task_seq guards the task state ({en,de}queues) 5944 * @pick_seq is the @task_seq we did a selection on 5945 * @sched_seq is the @pick_seq we scheduled 5946 * 5947 * However, preemptions can cause multiple picks on the same task set. 5948 * 'Fix' this by also increasing @task_seq for every pick. 5949 */ 5950 rq->core->core_task_seq++; 5951 5952 /* 5953 * Optimize for common case where this CPU has no cookies 5954 * and there are no cookied tasks running on siblings. 5955 */ 5956 if (!need_sync) { 5957 next = pick_task(rq); 5958 if (!next->core_cookie) { 5959 rq->core_pick = NULL; 5960 /* 5961 * For robustness, update the min_vruntime_fi for 5962 * unconstrained picks as well. 5963 */ 5964 WARN_ON_ONCE(fi_before); 5965 task_vruntime_update(rq, next, false); 5966 goto out_set_next; 5967 } 5968 } 5969 5970 /* 5971 * For each thread: do the regular task pick and find the max prio task 5972 * amongst them. 5973 * 5974 * Tie-break prio towards the current CPU 5975 */ 5976 for_each_cpu_wrap(i, smt_mask, cpu) { 5977 rq_i = cpu_rq(i); 5978 5979 /* 5980 * Current cpu always has its clock updated on entrance to 5981 * pick_next_task(). If the current cpu is not the core, 5982 * the core may also have been updated above. 5983 */ 5984 if (i != cpu && (rq_i != rq->core || !core_clock_updated)) 5985 update_rq_clock(rq_i); 5986 5987 p = rq_i->core_pick = pick_task(rq_i); 5988 if (!max || prio_less(max, p, fi_before)) 5989 max = p; 5990 } 5991 5992 cookie = rq->core->core_cookie = max->core_cookie; 5993 5994 /* 5995 * For each thread: try and find a runnable task that matches @max or 5996 * force idle. 5997 */ 5998 for_each_cpu(i, smt_mask) { 5999 rq_i = cpu_rq(i); 6000 p = rq_i->core_pick; 6001 6002 if (!cookie_equals(p, cookie)) { 6003 p = NULL; 6004 if (cookie) 6005 p = sched_core_find(rq_i, cookie); 6006 if (!p) 6007 p = idle_sched_class.pick_task(rq_i); 6008 } 6009 6010 rq_i->core_pick = p; 6011 6012 if (p == rq_i->idle) { 6013 if (rq_i->nr_running) { 6014 rq->core->core_forceidle_count++; 6015 if (!fi_before) 6016 rq->core->core_forceidle_seq++; 6017 } 6018 } else { 6019 occ++; 6020 } 6021 } 6022 6023 if (schedstat_enabled() && rq->core->core_forceidle_count) { 6024 rq->core->core_forceidle_start = rq_clock(rq->core); 6025 rq->core->core_forceidle_occupation = occ; 6026 } 6027 6028 rq->core->core_pick_seq = rq->core->core_task_seq; 6029 next = rq->core_pick; 6030 rq->core_sched_seq = rq->core->core_pick_seq; 6031 6032 /* Something should have been selected for current CPU */ 6033 WARN_ON_ONCE(!next); 6034 6035 /* 6036 * Reschedule siblings 6037 * 6038 * NOTE: L1TF -- at this point we're no longer running the old task and 6039 * sending an IPI (below) ensures the sibling will no longer be running 6040 * their task. This ensures there is no inter-sibling overlap between 6041 * non-matching user state. 6042 */ 6043 for_each_cpu(i, smt_mask) { 6044 rq_i = cpu_rq(i); 6045 6046 /* 6047 * An online sibling might have gone offline before a task 6048 * could be picked for it, or it might be offline but later 6049 * happen to come online, but its too late and nothing was 6050 * picked for it. That's Ok - it will pick tasks for itself, 6051 * so ignore it. 6052 */ 6053 if (!rq_i->core_pick) 6054 continue; 6055 6056 /* 6057 * Update for new !FI->FI transitions, or if continuing to be in !FI: 6058 * fi_before fi update? 6059 * 0 0 1 6060 * 0 1 1 6061 * 1 0 1 6062 * 1 1 0 6063 */ 6064 if (!(fi_before && rq->core->core_forceidle_count)) 6065 task_vruntime_update(rq_i, rq_i->core_pick, !!rq->core->core_forceidle_count); 6066 6067 rq_i->core_pick->core_occupation = occ; 6068 6069 if (i == cpu) { 6070 rq_i->core_pick = NULL; 6071 continue; 6072 } 6073 6074 /* Did we break L1TF mitigation requirements? */ 6075 WARN_ON_ONCE(!cookie_match(next, rq_i->core_pick)); 6076 6077 if (rq_i->curr == rq_i->core_pick) { 6078 rq_i->core_pick = NULL; 6079 continue; 6080 } 6081 6082 resched_curr(rq_i); 6083 } 6084 6085 out_set_next: 6086 set_next_task(rq, next); 6087 out: 6088 if (rq->core->core_forceidle_count && next == rq->idle) 6089 queue_core_balance(rq); 6090 6091 return next; 6092 } 6093 6094 static bool try_steal_cookie(int this, int that) 6095 { 6096 struct rq *dst = cpu_rq(this), *src = cpu_rq(that); 6097 struct task_struct *p; 6098 unsigned long cookie; 6099 bool success = false; 6100 6101 local_irq_disable(); 6102 double_rq_lock(dst, src); 6103 6104 cookie = dst->core->core_cookie; 6105 if (!cookie) 6106 goto unlock; 6107 6108 if (dst->curr != dst->idle) 6109 goto unlock; 6110 6111 p = sched_core_find(src, cookie); 6112 if (p == src->idle) 6113 goto unlock; 6114 6115 do { 6116 if (p == src->core_pick || p == src->curr) 6117 goto next; 6118 6119 if (!is_cpu_allowed(p, this)) 6120 goto next; 6121 6122 if (p->core_occupation > dst->idle->core_occupation) 6123 goto next; 6124 6125 deactivate_task(src, p, 0); 6126 set_task_cpu(p, this); 6127 activate_task(dst, p, 0); 6128 6129 resched_curr(dst); 6130 6131 success = true; 6132 break; 6133 6134 next: 6135 p = sched_core_next(p, cookie); 6136 } while (p); 6137 6138 unlock: 6139 double_rq_unlock(dst, src); 6140 local_irq_enable(); 6141 6142 return success; 6143 } 6144 6145 static bool steal_cookie_task(int cpu, struct sched_domain *sd) 6146 { 6147 int i; 6148 6149 for_each_cpu_wrap(i, sched_domain_span(sd), cpu) { 6150 if (i == cpu) 6151 continue; 6152 6153 if (need_resched()) 6154 break; 6155 6156 if (try_steal_cookie(cpu, i)) 6157 return true; 6158 } 6159 6160 return false; 6161 } 6162 6163 static void sched_core_balance(struct rq *rq) 6164 { 6165 struct sched_domain *sd; 6166 int cpu = cpu_of(rq); 6167 6168 preempt_disable(); 6169 rcu_read_lock(); 6170 raw_spin_rq_unlock_irq(rq); 6171 for_each_domain(cpu, sd) { 6172 if (need_resched()) 6173 break; 6174 6175 if (steal_cookie_task(cpu, sd)) 6176 break; 6177 } 6178 raw_spin_rq_lock_irq(rq); 6179 rcu_read_unlock(); 6180 preempt_enable(); 6181 } 6182 6183 static DEFINE_PER_CPU(struct callback_head, core_balance_head); 6184 6185 static void queue_core_balance(struct rq *rq) 6186 { 6187 if (!sched_core_enabled(rq)) 6188 return; 6189 6190 if (!rq->core->core_cookie) 6191 return; 6192 6193 if (!rq->nr_running) /* not forced idle */ 6194 return; 6195 6196 queue_balance_callback(rq, &per_cpu(core_balance_head, rq->cpu), sched_core_balance); 6197 } 6198 6199 static void sched_core_cpu_starting(unsigned int cpu) 6200 { 6201 const struct cpumask *smt_mask = cpu_smt_mask(cpu); 6202 struct rq *rq = cpu_rq(cpu), *core_rq = NULL; 6203 unsigned long flags; 6204 int t; 6205 6206 sched_core_lock(cpu, &flags); 6207 6208 WARN_ON_ONCE(rq->core != rq); 6209 6210 /* if we're the first, we'll be our own leader */ 6211 if (cpumask_weight(smt_mask) == 1) 6212 goto unlock; 6213 6214 /* find the leader */ 6215 for_each_cpu(t, smt_mask) { 6216 if (t == cpu) 6217 continue; 6218 rq = cpu_rq(t); 6219 if (rq->core == rq) { 6220 core_rq = rq; 6221 break; 6222 } 6223 } 6224 6225 if (WARN_ON_ONCE(!core_rq)) /* whoopsie */ 6226 goto unlock; 6227 6228 /* install and validate core_rq */ 6229 for_each_cpu(t, smt_mask) { 6230 rq = cpu_rq(t); 6231 6232 if (t == cpu) 6233 rq->core = core_rq; 6234 6235 WARN_ON_ONCE(rq->core != core_rq); 6236 } 6237 6238 unlock: 6239 sched_core_unlock(cpu, &flags); 6240 } 6241 6242 static void sched_core_cpu_deactivate(unsigned int cpu) 6243 { 6244 const struct cpumask *smt_mask = cpu_smt_mask(cpu); 6245 struct rq *rq = cpu_rq(cpu), *core_rq = NULL; 6246 unsigned long flags; 6247 int t; 6248 6249 sched_core_lock(cpu, &flags); 6250 6251 /* if we're the last man standing, nothing to do */ 6252 if (cpumask_weight(smt_mask) == 1) { 6253 WARN_ON_ONCE(rq->core != rq); 6254 goto unlock; 6255 } 6256 6257 /* if we're not the leader, nothing to do */ 6258 if (rq->core != rq) 6259 goto unlock; 6260 6261 /* find a new leader */ 6262 for_each_cpu(t, smt_mask) { 6263 if (t == cpu) 6264 continue; 6265 core_rq = cpu_rq(t); 6266 break; 6267 } 6268 6269 if (WARN_ON_ONCE(!core_rq)) /* impossible */ 6270 goto unlock; 6271 6272 /* copy the shared state to the new leader */ 6273 core_rq->core_task_seq = rq->core_task_seq; 6274 core_rq->core_pick_seq = rq->core_pick_seq; 6275 core_rq->core_cookie = rq->core_cookie; 6276 core_rq->core_forceidle_count = rq->core_forceidle_count; 6277 core_rq->core_forceidle_seq = rq->core_forceidle_seq; 6278 core_rq->core_forceidle_occupation = rq->core_forceidle_occupation; 6279 6280 /* 6281 * Accounting edge for forced idle is handled in pick_next_task(). 6282 * Don't need another one here, since the hotplug thread shouldn't 6283 * have a cookie. 6284 */ 6285 core_rq->core_forceidle_start = 0; 6286 6287 /* install new leader */ 6288 for_each_cpu(t, smt_mask) { 6289 rq = cpu_rq(t); 6290 rq->core = core_rq; 6291 } 6292 6293 unlock: 6294 sched_core_unlock(cpu, &flags); 6295 } 6296 6297 static inline void sched_core_cpu_dying(unsigned int cpu) 6298 { 6299 struct rq *rq = cpu_rq(cpu); 6300 6301 if (rq->core != rq) 6302 rq->core = rq; 6303 } 6304 6305 #else /* !CONFIG_SCHED_CORE */ 6306 6307 static inline void sched_core_cpu_starting(unsigned int cpu) {} 6308 static inline void sched_core_cpu_deactivate(unsigned int cpu) {} 6309 static inline void sched_core_cpu_dying(unsigned int cpu) {} 6310 6311 static struct task_struct * 6312 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) 6313 { 6314 return __pick_next_task(rq, prev, rf); 6315 } 6316 6317 #endif /* CONFIG_SCHED_CORE */ 6318 6319 /* 6320 * Constants for the sched_mode argument of __schedule(). 6321 * 6322 * The mode argument allows RT enabled kernels to differentiate a 6323 * preemption from blocking on an 'sleeping' spin/rwlock. Note that 6324 * SM_MASK_PREEMPT for !RT has all bits set, which allows the compiler to 6325 * optimize the AND operation out and just check for zero. 6326 */ 6327 #define SM_NONE 0x0 6328 #define SM_PREEMPT 0x1 6329 #define SM_RTLOCK_WAIT 0x2 6330 6331 #ifndef CONFIG_PREEMPT_RT 6332 # define SM_MASK_PREEMPT (~0U) 6333 #else 6334 # define SM_MASK_PREEMPT SM_PREEMPT 6335 #endif 6336 6337 /* 6338 * __schedule() is the main scheduler function. 6339 * 6340 * The main means of driving the scheduler and thus entering this function are: 6341 * 6342 * 1. Explicit blocking: mutex, semaphore, waitqueue, etc. 6343 * 6344 * 2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return 6345 * paths. For example, see arch/x86/entry_64.S. 6346 * 6347 * To drive preemption between tasks, the scheduler sets the flag in timer 6348 * interrupt handler scheduler_tick(). 6349 * 6350 * 3. Wakeups don't really cause entry into schedule(). They add a 6351 * task to the run-queue and that's it. 6352 * 6353 * Now, if the new task added to the run-queue preempts the current 6354 * task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets 6355 * called on the nearest possible occasion: 6356 * 6357 * - If the kernel is preemptible (CONFIG_PREEMPTION=y): 6358 * 6359 * - in syscall or exception context, at the next outmost 6360 * preempt_enable(). (this might be as soon as the wake_up()'s 6361 * spin_unlock()!) 6362 * 6363 * - in IRQ context, return from interrupt-handler to 6364 * preemptible context 6365 * 6366 * - If the kernel is not preemptible (CONFIG_PREEMPTION is not set) 6367 * then at the next: 6368 * 6369 * - cond_resched() call 6370 * - explicit schedule() call 6371 * - return from syscall or exception to user-space 6372 * - return from interrupt-handler to user-space 6373 * 6374 * WARNING: must be called with preemption disabled! 6375 */ 6376 static void __sched notrace __schedule(unsigned int sched_mode) 6377 { 6378 struct task_struct *prev, *next; 6379 unsigned long *switch_count; 6380 unsigned long prev_state; 6381 struct rq_flags rf; 6382 struct rq *rq; 6383 int cpu; 6384 6385 cpu = smp_processor_id(); 6386 rq = cpu_rq(cpu); 6387 prev = rq->curr; 6388 6389 schedule_debug(prev, !!sched_mode); 6390 6391 if (sched_feat(HRTICK) || sched_feat(HRTICK_DL)) 6392 hrtick_clear(rq); 6393 6394 local_irq_disable(); 6395 rcu_note_context_switch(!!sched_mode); 6396 6397 /* 6398 * Make sure that signal_pending_state()->signal_pending() below 6399 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE) 6400 * done by the caller to avoid the race with signal_wake_up(): 6401 * 6402 * __set_current_state(@state) signal_wake_up() 6403 * schedule() set_tsk_thread_flag(p, TIF_SIGPENDING) 6404 * wake_up_state(p, state) 6405 * LOCK rq->lock LOCK p->pi_state 6406 * smp_mb__after_spinlock() smp_mb__after_spinlock() 6407 * if (signal_pending_state()) if (p->state & @state) 6408 * 6409 * Also, the membarrier system call requires a full memory barrier 6410 * after coming from user-space, before storing to rq->curr. 6411 */ 6412 rq_lock(rq, &rf); 6413 smp_mb__after_spinlock(); 6414 6415 /* Promote REQ to ACT */ 6416 rq->clock_update_flags <<= 1; 6417 update_rq_clock(rq); 6418 6419 switch_count = &prev->nivcsw; 6420 6421 /* 6422 * We must load prev->state once (task_struct::state is volatile), such 6423 * that we form a control dependency vs deactivate_task() below. 6424 */ 6425 prev_state = READ_ONCE(prev->__state); 6426 if (!(sched_mode & SM_MASK_PREEMPT) && prev_state) { 6427 if (signal_pending_state(prev_state, prev)) { 6428 WRITE_ONCE(prev->__state, TASK_RUNNING); 6429 } else { 6430 prev->sched_contributes_to_load = 6431 (prev_state & TASK_UNINTERRUPTIBLE) && 6432 !(prev_state & TASK_NOLOAD) && 6433 !(prev->flags & PF_FROZEN); 6434 6435 if (prev->sched_contributes_to_load) 6436 rq->nr_uninterruptible++; 6437 6438 /* 6439 * __schedule() ttwu() 6440 * prev_state = prev->state; if (p->on_rq && ...) 6441 * if (prev_state) goto out; 6442 * p->on_rq = 0; smp_acquire__after_ctrl_dep(); 6443 * p->state = TASK_WAKING 6444 * 6445 * Where __schedule() and ttwu() have matching control dependencies. 6446 * 6447 * After this, schedule() must not care about p->state any more. 6448 */ 6449 deactivate_task(rq, prev, DEQUEUE_SLEEP | DEQUEUE_NOCLOCK); 6450 6451 if (prev->in_iowait) { 6452 atomic_inc(&rq->nr_iowait); 6453 delayacct_blkio_start(); 6454 } 6455 } 6456 switch_count = &prev->nvcsw; 6457 } 6458 6459 next = pick_next_task(rq, prev, &rf); 6460 clear_tsk_need_resched(prev); 6461 clear_preempt_need_resched(); 6462 #ifdef CONFIG_SCHED_DEBUG 6463 rq->last_seen_need_resched_ns = 0; 6464 #endif 6465 6466 if (likely(prev != next)) { 6467 rq->nr_switches++; 6468 /* 6469 * RCU users of rcu_dereference(rq->curr) may not see 6470 * changes to task_struct made by pick_next_task(). 6471 */ 6472 RCU_INIT_POINTER(rq->curr, next); 6473 /* 6474 * The membarrier system call requires each architecture 6475 * to have a full memory barrier after updating 6476 * rq->curr, before returning to user-space. 6477 * 6478 * Here are the schemes providing that barrier on the 6479 * various architectures: 6480 * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC. 6481 * switch_mm() rely on membarrier_arch_switch_mm() on PowerPC. 6482 * - finish_lock_switch() for weakly-ordered 6483 * architectures where spin_unlock is a full barrier, 6484 * - switch_to() for arm64 (weakly-ordered, spin_unlock 6485 * is a RELEASE barrier), 6486 */ 6487 ++*switch_count; 6488 6489 migrate_disable_switch(rq, prev); 6490 psi_sched_switch(prev, next, !task_on_rq_queued(prev)); 6491 6492 trace_sched_switch(sched_mode & SM_MASK_PREEMPT, prev, next, prev_state); 6493 6494 /* Also unlocks the rq: */ 6495 rq = context_switch(rq, prev, next, &rf); 6496 } else { 6497 rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP); 6498 6499 rq_unpin_lock(rq, &rf); 6500 __balance_callbacks(rq); 6501 raw_spin_rq_unlock_irq(rq); 6502 } 6503 } 6504 6505 void __noreturn do_task_dead(void) 6506 { 6507 /* Causes final put_task_struct in finish_task_switch(): */ 6508 set_special_state(TASK_DEAD); 6509 6510 /* Tell freezer to ignore us: */ 6511 current->flags |= PF_NOFREEZE; 6512 6513 __schedule(SM_NONE); 6514 BUG(); 6515 6516 /* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */ 6517 for (;;) 6518 cpu_relax(); 6519 } 6520 6521 static inline void sched_submit_work(struct task_struct *tsk) 6522 { 6523 unsigned int task_flags; 6524 6525 if (task_is_running(tsk)) 6526 return; 6527 6528 task_flags = tsk->flags; 6529 /* 6530 * If a worker goes to sleep, notify and ask workqueue whether it 6531 * wants to wake up a task to maintain concurrency. 6532 */ 6533 if (task_flags & (PF_WQ_WORKER | PF_IO_WORKER)) { 6534 if (task_flags & PF_WQ_WORKER) 6535 wq_worker_sleeping(tsk); 6536 else 6537 io_wq_worker_sleeping(tsk); 6538 } 6539 6540 /* 6541 * spinlock and rwlock must not flush block requests. This will 6542 * deadlock if the callback attempts to acquire a lock which is 6543 * already acquired. 6544 */ 6545 SCHED_WARN_ON(current->__state & TASK_RTLOCK_WAIT); 6546 6547 /* 6548 * If we are going to sleep and we have plugged IO queued, 6549 * make sure to submit it to avoid deadlocks. 6550 */ 6551 blk_flush_plug(tsk->plug, true); 6552 } 6553 6554 static void sched_update_worker(struct task_struct *tsk) 6555 { 6556 if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER)) { 6557 if (tsk->flags & PF_WQ_WORKER) 6558 wq_worker_running(tsk); 6559 else 6560 io_wq_worker_running(tsk); 6561 } 6562 } 6563 6564 asmlinkage __visible void __sched schedule(void) 6565 { 6566 struct task_struct *tsk = current; 6567 6568 sched_submit_work(tsk); 6569 do { 6570 preempt_disable(); 6571 __schedule(SM_NONE); 6572 sched_preempt_enable_no_resched(); 6573 } while (need_resched()); 6574 sched_update_worker(tsk); 6575 } 6576 EXPORT_SYMBOL(schedule); 6577 6578 /* 6579 * synchronize_rcu_tasks() makes sure that no task is stuck in preempted 6580 * state (have scheduled out non-voluntarily) by making sure that all 6581 * tasks have either left the run queue or have gone into user space. 6582 * As idle tasks do not do either, they must not ever be preempted 6583 * (schedule out non-voluntarily). 6584 * 6585 * schedule_idle() is similar to schedule_preempt_disable() except that it 6586 * never enables preemption because it does not call sched_submit_work(). 6587 */ 6588 void __sched schedule_idle(void) 6589 { 6590 /* 6591 * As this skips calling sched_submit_work(), which the idle task does 6592 * regardless because that function is a nop when the task is in a 6593 * TASK_RUNNING state, make sure this isn't used someplace that the 6594 * current task can be in any other state. Note, idle is always in the 6595 * TASK_RUNNING state. 6596 */ 6597 WARN_ON_ONCE(current->__state); 6598 do { 6599 __schedule(SM_NONE); 6600 } while (need_resched()); 6601 } 6602 6603 #if defined(CONFIG_CONTEXT_TRACKING_USER) && !defined(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK) 6604 asmlinkage __visible void __sched schedule_user(void) 6605 { 6606 /* 6607 * If we come here after a random call to set_need_resched(), 6608 * or we have been woken up remotely but the IPI has not yet arrived, 6609 * we haven't yet exited the RCU idle mode. Do it here manually until 6610 * we find a better solution. 6611 * 6612 * NB: There are buggy callers of this function. Ideally we 6613 * should warn if prev_state != CONTEXT_USER, but that will trigger 6614 * too frequently to make sense yet. 6615 */ 6616 enum ctx_state prev_state = exception_enter(); 6617 schedule(); 6618 exception_exit(prev_state); 6619 } 6620 #endif 6621 6622 /** 6623 * schedule_preempt_disabled - called with preemption disabled 6624 * 6625 * Returns with preemption disabled. Note: preempt_count must be 1 6626 */ 6627 void __sched schedule_preempt_disabled(void) 6628 { 6629 sched_preempt_enable_no_resched(); 6630 schedule(); 6631 preempt_disable(); 6632 } 6633 6634 #ifdef CONFIG_PREEMPT_RT 6635 void __sched notrace schedule_rtlock(void) 6636 { 6637 do { 6638 preempt_disable(); 6639 __schedule(SM_RTLOCK_WAIT); 6640 sched_preempt_enable_no_resched(); 6641 } while (need_resched()); 6642 } 6643 NOKPROBE_SYMBOL(schedule_rtlock); 6644 #endif 6645 6646 static void __sched notrace preempt_schedule_common(void) 6647 { 6648 do { 6649 /* 6650 * Because the function tracer can trace preempt_count_sub() 6651 * and it also uses preempt_enable/disable_notrace(), if 6652 * NEED_RESCHED is set, the preempt_enable_notrace() called 6653 * by the function tracer will call this function again and 6654 * cause infinite recursion. 6655 * 6656 * Preemption must be disabled here before the function 6657 * tracer can trace. Break up preempt_disable() into two 6658 * calls. One to disable preemption without fear of being 6659 * traced. The other to still record the preemption latency, 6660 * which can also be traced by the function tracer. 6661 */ 6662 preempt_disable_notrace(); 6663 preempt_latency_start(1); 6664 __schedule(SM_PREEMPT); 6665 preempt_latency_stop(1); 6666 preempt_enable_no_resched_notrace(); 6667 6668 /* 6669 * Check again in case we missed a preemption opportunity 6670 * between schedule and now. 6671 */ 6672 } while (need_resched()); 6673 } 6674 6675 #ifdef CONFIG_PREEMPTION 6676 /* 6677 * This is the entry point to schedule() from in-kernel preemption 6678 * off of preempt_enable. 6679 */ 6680 asmlinkage __visible void __sched notrace preempt_schedule(void) 6681 { 6682 /* 6683 * If there is a non-zero preempt_count or interrupts are disabled, 6684 * we do not want to preempt the current task. Just return.. 6685 */ 6686 if (likely(!preemptible())) 6687 return; 6688 preempt_schedule_common(); 6689 } 6690 NOKPROBE_SYMBOL(preempt_schedule); 6691 EXPORT_SYMBOL(preempt_schedule); 6692 6693 #ifdef CONFIG_PREEMPT_DYNAMIC 6694 #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL) 6695 #ifndef preempt_schedule_dynamic_enabled 6696 #define preempt_schedule_dynamic_enabled preempt_schedule 6697 #define preempt_schedule_dynamic_disabled NULL 6698 #endif 6699 DEFINE_STATIC_CALL(preempt_schedule, preempt_schedule_dynamic_enabled); 6700 EXPORT_STATIC_CALL_TRAMP(preempt_schedule); 6701 #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY) 6702 static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule); 6703 void __sched notrace dynamic_preempt_schedule(void) 6704 { 6705 if (!static_branch_unlikely(&sk_dynamic_preempt_schedule)) 6706 return; 6707 preempt_schedule(); 6708 } 6709 NOKPROBE_SYMBOL(dynamic_preempt_schedule); 6710 EXPORT_SYMBOL(dynamic_preempt_schedule); 6711 #endif 6712 #endif 6713 6714 /** 6715 * preempt_schedule_notrace - preempt_schedule called by tracing 6716 * 6717 * The tracing infrastructure uses preempt_enable_notrace to prevent 6718 * recursion and tracing preempt enabling caused by the tracing 6719 * infrastructure itself. But as tracing can happen in areas coming 6720 * from userspace or just about to enter userspace, a preempt enable 6721 * can occur before user_exit() is called. This will cause the scheduler 6722 * to be called when the system is still in usermode. 6723 * 6724 * To prevent this, the preempt_enable_notrace will use this function 6725 * instead of preempt_schedule() to exit user context if needed before 6726 * calling the scheduler. 6727 */ 6728 asmlinkage __visible void __sched notrace preempt_schedule_notrace(void) 6729 { 6730 enum ctx_state prev_ctx; 6731 6732 if (likely(!preemptible())) 6733 return; 6734 6735 do { 6736 /* 6737 * Because the function tracer can trace preempt_count_sub() 6738 * and it also uses preempt_enable/disable_notrace(), if 6739 * NEED_RESCHED is set, the preempt_enable_notrace() called 6740 * by the function tracer will call this function again and 6741 * cause infinite recursion. 6742 * 6743 * Preemption must be disabled here before the function 6744 * tracer can trace. Break up preempt_disable() into two 6745 * calls. One to disable preemption without fear of being 6746 * traced. The other to still record the preemption latency, 6747 * which can also be traced by the function tracer. 6748 */ 6749 preempt_disable_notrace(); 6750 preempt_latency_start(1); 6751 /* 6752 * Needs preempt disabled in case user_exit() is traced 6753 * and the tracer calls preempt_enable_notrace() causing 6754 * an infinite recursion. 6755 */ 6756 prev_ctx = exception_enter(); 6757 __schedule(SM_PREEMPT); 6758 exception_exit(prev_ctx); 6759 6760 preempt_latency_stop(1); 6761 preempt_enable_no_resched_notrace(); 6762 } while (need_resched()); 6763 } 6764 EXPORT_SYMBOL_GPL(preempt_schedule_notrace); 6765 6766 #ifdef CONFIG_PREEMPT_DYNAMIC 6767 #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL) 6768 #ifndef preempt_schedule_notrace_dynamic_enabled 6769 #define preempt_schedule_notrace_dynamic_enabled preempt_schedule_notrace 6770 #define preempt_schedule_notrace_dynamic_disabled NULL 6771 #endif 6772 DEFINE_STATIC_CALL(preempt_schedule_notrace, preempt_schedule_notrace_dynamic_enabled); 6773 EXPORT_STATIC_CALL_TRAMP(preempt_schedule_notrace); 6774 #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY) 6775 static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule_notrace); 6776 void __sched notrace dynamic_preempt_schedule_notrace(void) 6777 { 6778 if (!static_branch_unlikely(&sk_dynamic_preempt_schedule_notrace)) 6779 return; 6780 preempt_schedule_notrace(); 6781 } 6782 NOKPROBE_SYMBOL(dynamic_preempt_schedule_notrace); 6783 EXPORT_SYMBOL(dynamic_preempt_schedule_notrace); 6784 #endif 6785 #endif 6786 6787 #endif /* CONFIG_PREEMPTION */ 6788 6789 /* 6790 * This is the entry point to schedule() from kernel preemption 6791 * off of irq context. 6792 * Note, that this is called and return with irqs disabled. This will 6793 * protect us against recursive calling from irq. 6794 */ 6795 asmlinkage __visible void __sched preempt_schedule_irq(void) 6796 { 6797 enum ctx_state prev_state; 6798 6799 /* Catch callers which need to be fixed */ 6800 BUG_ON(preempt_count() || !irqs_disabled()); 6801 6802 prev_state = exception_enter(); 6803 6804 do { 6805 preempt_disable(); 6806 local_irq_enable(); 6807 __schedule(SM_PREEMPT); 6808 local_irq_disable(); 6809 sched_preempt_enable_no_resched(); 6810 } while (need_resched()); 6811 6812 exception_exit(prev_state); 6813 } 6814 6815 int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags, 6816 void *key) 6817 { 6818 WARN_ON_ONCE(IS_ENABLED(CONFIG_SCHED_DEBUG) && wake_flags & ~WF_SYNC); 6819 return try_to_wake_up(curr->private, mode, wake_flags); 6820 } 6821 EXPORT_SYMBOL(default_wake_function); 6822 6823 static void __setscheduler_prio(struct task_struct *p, int prio) 6824 { 6825 if (dl_prio(prio)) 6826 p->sched_class = &dl_sched_class; 6827 else if (rt_prio(prio)) 6828 p->sched_class = &rt_sched_class; 6829 else 6830 p->sched_class = &fair_sched_class; 6831 6832 p->prio = prio; 6833 } 6834 6835 #ifdef CONFIG_RT_MUTEXES 6836 6837 static inline int __rt_effective_prio(struct task_struct *pi_task, int prio) 6838 { 6839 if (pi_task) 6840 prio = min(prio, pi_task->prio); 6841 6842 return prio; 6843 } 6844 6845 static inline int rt_effective_prio(struct task_struct *p, int prio) 6846 { 6847 struct task_struct *pi_task = rt_mutex_get_top_task(p); 6848 6849 return __rt_effective_prio(pi_task, prio); 6850 } 6851 6852 /* 6853 * rt_mutex_setprio - set the current priority of a task 6854 * @p: task to boost 6855 * @pi_task: donor task 6856 * 6857 * This function changes the 'effective' priority of a task. It does 6858 * not touch ->normal_prio like __setscheduler(). 6859 * 6860 * Used by the rt_mutex code to implement priority inheritance 6861 * logic. Call site only calls if the priority of the task changed. 6862 */ 6863 void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task) 6864 { 6865 int prio, oldprio, queued, running, queue_flag = 6866 DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; 6867 const struct sched_class *prev_class; 6868 struct rq_flags rf; 6869 struct rq *rq; 6870 6871 /* XXX used to be waiter->prio, not waiter->task->prio */ 6872 prio = __rt_effective_prio(pi_task, p->normal_prio); 6873 6874 /* 6875 * If nothing changed; bail early. 6876 */ 6877 if (p->pi_top_task == pi_task && prio == p->prio && !dl_prio(prio)) 6878 return; 6879 6880 rq = __task_rq_lock(p, &rf); 6881 update_rq_clock(rq); 6882 /* 6883 * Set under pi_lock && rq->lock, such that the value can be used under 6884 * either lock. 6885 * 6886 * Note that there is loads of tricky to make this pointer cache work 6887 * right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to 6888 * ensure a task is de-boosted (pi_task is set to NULL) before the 6889 * task is allowed to run again (and can exit). This ensures the pointer 6890 * points to a blocked task -- which guarantees the task is present. 6891 */ 6892 p->pi_top_task = pi_task; 6893 6894 /* 6895 * For FIFO/RR we only need to set prio, if that matches we're done. 6896 */ 6897 if (prio == p->prio && !dl_prio(prio)) 6898 goto out_unlock; 6899 6900 /* 6901 * Idle task boosting is a nono in general. There is one 6902 * exception, when PREEMPT_RT and NOHZ is active: 6903 * 6904 * The idle task calls get_next_timer_interrupt() and holds 6905 * the timer wheel base->lock on the CPU and another CPU wants 6906 * to access the timer (probably to cancel it). We can safely 6907 * ignore the boosting request, as the idle CPU runs this code 6908 * with interrupts disabled and will complete the lock 6909 * protected section without being interrupted. So there is no 6910 * real need to boost. 6911 */ 6912 if (unlikely(p == rq->idle)) { 6913 WARN_ON(p != rq->curr); 6914 WARN_ON(p->pi_blocked_on); 6915 goto out_unlock; 6916 } 6917 6918 trace_sched_pi_setprio(p, pi_task); 6919 oldprio = p->prio; 6920 6921 if (oldprio == prio) 6922 queue_flag &= ~DEQUEUE_MOVE; 6923 6924 prev_class = p->sched_class; 6925 queued = task_on_rq_queued(p); 6926 running = task_current(rq, p); 6927 if (queued) 6928 dequeue_task(rq, p, queue_flag); 6929 if (running) 6930 put_prev_task(rq, p); 6931 6932 /* 6933 * Boosting condition are: 6934 * 1. -rt task is running and holds mutex A 6935 * --> -dl task blocks on mutex A 6936 * 6937 * 2. -dl task is running and holds mutex A 6938 * --> -dl task blocks on mutex A and could preempt the 6939 * running task 6940 */ 6941 if (dl_prio(prio)) { 6942 if (!dl_prio(p->normal_prio) || 6943 (pi_task && dl_prio(pi_task->prio) && 6944 dl_entity_preempt(&pi_task->dl, &p->dl))) { 6945 p->dl.pi_se = pi_task->dl.pi_se; 6946 queue_flag |= ENQUEUE_REPLENISH; 6947 } else { 6948 p->dl.pi_se = &p->dl; 6949 } 6950 } else if (rt_prio(prio)) { 6951 if (dl_prio(oldprio)) 6952 p->dl.pi_se = &p->dl; 6953 if (oldprio < prio) 6954 queue_flag |= ENQUEUE_HEAD; 6955 } else { 6956 if (dl_prio(oldprio)) 6957 p->dl.pi_se = &p->dl; 6958 if (rt_prio(oldprio)) 6959 p->rt.timeout = 0; 6960 } 6961 6962 __setscheduler_prio(p, prio); 6963 6964 if (queued) 6965 enqueue_task(rq, p, queue_flag); 6966 if (running) 6967 set_next_task(rq, p); 6968 6969 check_class_changed(rq, p, prev_class, oldprio); 6970 out_unlock: 6971 /* Avoid rq from going away on us: */ 6972 preempt_disable(); 6973 6974 rq_unpin_lock(rq, &rf); 6975 __balance_callbacks(rq); 6976 raw_spin_rq_unlock(rq); 6977 6978 preempt_enable(); 6979 } 6980 #else 6981 static inline int rt_effective_prio(struct task_struct *p, int prio) 6982 { 6983 return prio; 6984 } 6985 #endif 6986 6987 void set_user_nice(struct task_struct *p, long nice) 6988 { 6989 bool queued, running; 6990 int old_prio; 6991 struct rq_flags rf; 6992 struct rq *rq; 6993 6994 if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE) 6995 return; 6996 /* 6997 * We have to be careful, if called from sys_setpriority(), 6998 * the task might be in the middle of scheduling on another CPU. 6999 */ 7000 rq = task_rq_lock(p, &rf); 7001 update_rq_clock(rq); 7002 7003 /* 7004 * The RT priorities are set via sched_setscheduler(), but we still 7005 * allow the 'normal' nice value to be set - but as expected 7006 * it won't have any effect on scheduling until the task is 7007 * SCHED_DEADLINE, SCHED_FIFO or SCHED_RR: 7008 */ 7009 if (task_has_dl_policy(p) || task_has_rt_policy(p)) { 7010 p->static_prio = NICE_TO_PRIO(nice); 7011 goto out_unlock; 7012 } 7013 queued = task_on_rq_queued(p); 7014 running = task_current(rq, p); 7015 if (queued) 7016 dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK); 7017 if (running) 7018 put_prev_task(rq, p); 7019 7020 p->static_prio = NICE_TO_PRIO(nice); 7021 set_load_weight(p, true); 7022 old_prio = p->prio; 7023 p->prio = effective_prio(p); 7024 7025 if (queued) 7026 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); 7027 if (running) 7028 set_next_task(rq, p); 7029 7030 /* 7031 * If the task increased its priority or is running and 7032 * lowered its priority, then reschedule its CPU: 7033 */ 7034 p->sched_class->prio_changed(rq, p, old_prio); 7035 7036 out_unlock: 7037 task_rq_unlock(rq, p, &rf); 7038 } 7039 EXPORT_SYMBOL(set_user_nice); 7040 7041 /* 7042 * is_nice_reduction - check if nice value is an actual reduction 7043 * 7044 * Similar to can_nice() but does not perform a capability check. 7045 * 7046 * @p: task 7047 * @nice: nice value 7048 */ 7049 static bool is_nice_reduction(const struct task_struct *p, const int nice) 7050 { 7051 /* Convert nice value [19,-20] to rlimit style value [1,40]: */ 7052 int nice_rlim = nice_to_rlimit(nice); 7053 7054 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE)); 7055 } 7056 7057 /* 7058 * can_nice - check if a task can reduce its nice value 7059 * @p: task 7060 * @nice: nice value 7061 */ 7062 int can_nice(const struct task_struct *p, const int nice) 7063 { 7064 return is_nice_reduction(p, nice) || capable(CAP_SYS_NICE); 7065 } 7066 7067 #ifdef __ARCH_WANT_SYS_NICE 7068 7069 /* 7070 * sys_nice - change the priority of the current process. 7071 * @increment: priority increment 7072 * 7073 * sys_setpriority is a more generic, but much slower function that 7074 * does similar things. 7075 */ 7076 SYSCALL_DEFINE1(nice, int, increment) 7077 { 7078 long nice, retval; 7079 7080 /* 7081 * Setpriority might change our priority at the same moment. 7082 * We don't have to worry. Conceptually one call occurs first 7083 * and we have a single winner. 7084 */ 7085 increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH); 7086 nice = task_nice(current) + increment; 7087 7088 nice = clamp_val(nice, MIN_NICE, MAX_NICE); 7089 if (increment < 0 && !can_nice(current, nice)) 7090 return -EPERM; 7091 7092 retval = security_task_setnice(current, nice); 7093 if (retval) 7094 return retval; 7095 7096 set_user_nice(current, nice); 7097 return 0; 7098 } 7099 7100 #endif 7101 7102 /** 7103 * task_prio - return the priority value of a given task. 7104 * @p: the task in question. 7105 * 7106 * Return: The priority value as seen by users in /proc. 7107 * 7108 * sched policy return value kernel prio user prio/nice 7109 * 7110 * normal, batch, idle [0 ... 39] [100 ... 139] 0/[-20 ... 19] 7111 * fifo, rr [-2 ... -100] [98 ... 0] [1 ... 99] 7112 * deadline -101 -1 0 7113 */ 7114 int task_prio(const struct task_struct *p) 7115 { 7116 return p->prio - MAX_RT_PRIO; 7117 } 7118 7119 /** 7120 * idle_cpu - is a given CPU idle currently? 7121 * @cpu: the processor in question. 7122 * 7123 * Return: 1 if the CPU is currently idle. 0 otherwise. 7124 */ 7125 int idle_cpu(int cpu) 7126 { 7127 struct rq *rq = cpu_rq(cpu); 7128 7129 if (rq->curr != rq->idle) 7130 return 0; 7131 7132 if (rq->nr_running) 7133 return 0; 7134 7135 #ifdef CONFIG_SMP 7136 if (rq->ttwu_pending) 7137 return 0; 7138 #endif 7139 7140 return 1; 7141 } 7142 7143 /** 7144 * available_idle_cpu - is a given CPU idle for enqueuing work. 7145 * @cpu: the CPU in question. 7146 * 7147 * Return: 1 if the CPU is currently idle. 0 otherwise. 7148 */ 7149 int available_idle_cpu(int cpu) 7150 { 7151 if (!idle_cpu(cpu)) 7152 return 0; 7153 7154 if (vcpu_is_preempted(cpu)) 7155 return 0; 7156 7157 return 1; 7158 } 7159 7160 /** 7161 * idle_task - return the idle task for a given CPU. 7162 * @cpu: the processor in question. 7163 * 7164 * Return: The idle task for the CPU @cpu. 7165 */ 7166 struct task_struct *idle_task(int cpu) 7167 { 7168 return cpu_rq(cpu)->idle; 7169 } 7170 7171 #ifdef CONFIG_SMP 7172 /* 7173 * This function computes an effective utilization for the given CPU, to be 7174 * used for frequency selection given the linear relation: f = u * f_max. 7175 * 7176 * The scheduler tracks the following metrics: 7177 * 7178 * cpu_util_{cfs,rt,dl,irq}() 7179 * cpu_bw_dl() 7180 * 7181 * Where the cfs,rt and dl util numbers are tracked with the same metric and 7182 * synchronized windows and are thus directly comparable. 7183 * 7184 * The cfs,rt,dl utilization are the running times measured with rq->clock_task 7185 * which excludes things like IRQ and steal-time. These latter are then accrued 7186 * in the irq utilization. 7187 * 7188 * The DL bandwidth number otoh is not a measured metric but a value computed 7189 * based on the task model parameters and gives the minimal utilization 7190 * required to meet deadlines. 7191 */ 7192 unsigned long effective_cpu_util(int cpu, unsigned long util_cfs, 7193 enum cpu_util_type type, 7194 struct task_struct *p) 7195 { 7196 unsigned long dl_util, util, irq, max; 7197 struct rq *rq = cpu_rq(cpu); 7198 7199 max = arch_scale_cpu_capacity(cpu); 7200 7201 if (!uclamp_is_used() && 7202 type == FREQUENCY_UTIL && rt_rq_is_runnable(&rq->rt)) { 7203 return max; 7204 } 7205 7206 /* 7207 * Early check to see if IRQ/steal time saturates the CPU, can be 7208 * because of inaccuracies in how we track these -- see 7209 * update_irq_load_avg(). 7210 */ 7211 irq = cpu_util_irq(rq); 7212 if (unlikely(irq >= max)) 7213 return max; 7214 7215 /* 7216 * Because the time spend on RT/DL tasks is visible as 'lost' time to 7217 * CFS tasks and we use the same metric to track the effective 7218 * utilization (PELT windows are synchronized) we can directly add them 7219 * to obtain the CPU's actual utilization. 7220 * 7221 * CFS and RT utilization can be boosted or capped, depending on 7222 * utilization clamp constraints requested by currently RUNNABLE 7223 * tasks. 7224 * When there are no CFS RUNNABLE tasks, clamps are released and 7225 * frequency will be gracefully reduced with the utilization decay. 7226 */ 7227 util = util_cfs + cpu_util_rt(rq); 7228 if (type == FREQUENCY_UTIL) 7229 util = uclamp_rq_util_with(rq, util, p); 7230 7231 dl_util = cpu_util_dl(rq); 7232 7233 /* 7234 * For frequency selection we do not make cpu_util_dl() a permanent part 7235 * of this sum because we want to use cpu_bw_dl() later on, but we need 7236 * to check if the CFS+RT+DL sum is saturated (ie. no idle time) such 7237 * that we select f_max when there is no idle time. 7238 * 7239 * NOTE: numerical errors or stop class might cause us to not quite hit 7240 * saturation when we should -- something for later. 7241 */ 7242 if (util + dl_util >= max) 7243 return max; 7244 7245 /* 7246 * OTOH, for energy computation we need the estimated running time, so 7247 * include util_dl and ignore dl_bw. 7248 */ 7249 if (type == ENERGY_UTIL) 7250 util += dl_util; 7251 7252 /* 7253 * There is still idle time; further improve the number by using the 7254 * irq metric. Because IRQ/steal time is hidden from the task clock we 7255 * need to scale the task numbers: 7256 * 7257 * max - irq 7258 * U' = irq + --------- * U 7259 * max 7260 */ 7261 util = scale_irq_capacity(util, irq, max); 7262 util += irq; 7263 7264 /* 7265 * Bandwidth required by DEADLINE must always be granted while, for 7266 * FAIR and RT, we use blocked utilization of IDLE CPUs as a mechanism 7267 * to gracefully reduce the frequency when no tasks show up for longer 7268 * periods of time. 7269 * 7270 * Ideally we would like to set bw_dl as min/guaranteed freq and util + 7271 * bw_dl as requested freq. However, cpufreq is not yet ready for such 7272 * an interface. So, we only do the latter for now. 7273 */ 7274 if (type == FREQUENCY_UTIL) 7275 util += cpu_bw_dl(rq); 7276 7277 return min(max, util); 7278 } 7279 7280 unsigned long sched_cpu_util(int cpu) 7281 { 7282 return effective_cpu_util(cpu, cpu_util_cfs(cpu), ENERGY_UTIL, NULL); 7283 } 7284 #endif /* CONFIG_SMP */ 7285 7286 /** 7287 * find_process_by_pid - find a process with a matching PID value. 7288 * @pid: the pid in question. 7289 * 7290 * The task of @pid, if found. %NULL otherwise. 7291 */ 7292 static struct task_struct *find_process_by_pid(pid_t pid) 7293 { 7294 return pid ? find_task_by_vpid(pid) : current; 7295 } 7296 7297 /* 7298 * sched_setparam() passes in -1 for its policy, to let the functions 7299 * it calls know not to change it. 7300 */ 7301 #define SETPARAM_POLICY -1 7302 7303 static void __setscheduler_params(struct task_struct *p, 7304 const struct sched_attr *attr) 7305 { 7306 int policy = attr->sched_policy; 7307 7308 if (policy == SETPARAM_POLICY) 7309 policy = p->policy; 7310 7311 p->policy = policy; 7312 7313 if (dl_policy(policy)) 7314 __setparam_dl(p, attr); 7315 else if (fair_policy(policy)) 7316 p->static_prio = NICE_TO_PRIO(attr->sched_nice); 7317 7318 /* 7319 * __sched_setscheduler() ensures attr->sched_priority == 0 when 7320 * !rt_policy. Always setting this ensures that things like 7321 * getparam()/getattr() don't report silly values for !rt tasks. 7322 */ 7323 p->rt_priority = attr->sched_priority; 7324 p->normal_prio = normal_prio(p); 7325 set_load_weight(p, true); 7326 } 7327 7328 /* 7329 * Check the target process has a UID that matches the current process's: 7330 */ 7331 static bool check_same_owner(struct task_struct *p) 7332 { 7333 const struct cred *cred = current_cred(), *pcred; 7334 bool match; 7335 7336 rcu_read_lock(); 7337 pcred = __task_cred(p); 7338 match = (uid_eq(cred->euid, pcred->euid) || 7339 uid_eq(cred->euid, pcred->uid)); 7340 rcu_read_unlock(); 7341 return match; 7342 } 7343 7344 /* 7345 * Allow unprivileged RT tasks to decrease priority. 7346 * Only issue a capable test if needed and only once to avoid an audit 7347 * event on permitted non-privileged operations: 7348 */ 7349 static int user_check_sched_setscheduler(struct task_struct *p, 7350 const struct sched_attr *attr, 7351 int policy, int reset_on_fork) 7352 { 7353 if (fair_policy(policy)) { 7354 if (attr->sched_nice < task_nice(p) && 7355 !is_nice_reduction(p, attr->sched_nice)) 7356 goto req_priv; 7357 } 7358 7359 if (rt_policy(policy)) { 7360 unsigned long rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO); 7361 7362 /* Can't set/change the rt policy: */ 7363 if (policy != p->policy && !rlim_rtprio) 7364 goto req_priv; 7365 7366 /* Can't increase priority: */ 7367 if (attr->sched_priority > p->rt_priority && 7368 attr->sched_priority > rlim_rtprio) 7369 goto req_priv; 7370 } 7371 7372 /* 7373 * Can't set/change SCHED_DEADLINE policy at all for now 7374 * (safest behavior); in the future we would like to allow 7375 * unprivileged DL tasks to increase their relative deadline 7376 * or reduce their runtime (both ways reducing utilization) 7377 */ 7378 if (dl_policy(policy)) 7379 goto req_priv; 7380 7381 /* 7382 * Treat SCHED_IDLE as nice 20. Only allow a switch to 7383 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it. 7384 */ 7385 if (task_has_idle_policy(p) && !idle_policy(policy)) { 7386 if (!is_nice_reduction(p, task_nice(p))) 7387 goto req_priv; 7388 } 7389 7390 /* Can't change other user's priorities: */ 7391 if (!check_same_owner(p)) 7392 goto req_priv; 7393 7394 /* Normal users shall not reset the sched_reset_on_fork flag: */ 7395 if (p->sched_reset_on_fork && !reset_on_fork) 7396 goto req_priv; 7397 7398 return 0; 7399 7400 req_priv: 7401 if (!capable(CAP_SYS_NICE)) 7402 return -EPERM; 7403 7404 return 0; 7405 } 7406 7407 static int __sched_setscheduler(struct task_struct *p, 7408 const struct sched_attr *attr, 7409 bool user, bool pi) 7410 { 7411 int oldpolicy = -1, policy = attr->sched_policy; 7412 int retval, oldprio, newprio, queued, running; 7413 const struct sched_class *prev_class; 7414 struct callback_head *head; 7415 struct rq_flags rf; 7416 int reset_on_fork; 7417 int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; 7418 struct rq *rq; 7419 7420 /* The pi code expects interrupts enabled */ 7421 BUG_ON(pi && in_interrupt()); 7422 recheck: 7423 /* Double check policy once rq lock held: */ 7424 if (policy < 0) { 7425 reset_on_fork = p->sched_reset_on_fork; 7426 policy = oldpolicy = p->policy; 7427 } else { 7428 reset_on_fork = !!(attr->sched_flags & SCHED_FLAG_RESET_ON_FORK); 7429 7430 if (!valid_policy(policy)) 7431 return -EINVAL; 7432 } 7433 7434 if (attr->sched_flags & ~(SCHED_FLAG_ALL | SCHED_FLAG_SUGOV)) 7435 return -EINVAL; 7436 7437 /* 7438 * Valid priorities for SCHED_FIFO and SCHED_RR are 7439 * 1..MAX_RT_PRIO-1, valid priority for SCHED_NORMAL, 7440 * SCHED_BATCH and SCHED_IDLE is 0. 7441 */ 7442 if (attr->sched_priority > MAX_RT_PRIO-1) 7443 return -EINVAL; 7444 if ((dl_policy(policy) && !__checkparam_dl(attr)) || 7445 (rt_policy(policy) != (attr->sched_priority != 0))) 7446 return -EINVAL; 7447 7448 if (user) { 7449 retval = user_check_sched_setscheduler(p, attr, policy, reset_on_fork); 7450 if (retval) 7451 return retval; 7452 7453 if (attr->sched_flags & SCHED_FLAG_SUGOV) 7454 return -EINVAL; 7455 7456 retval = security_task_setscheduler(p); 7457 if (retval) 7458 return retval; 7459 } 7460 7461 /* Update task specific "requested" clamps */ 7462 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) { 7463 retval = uclamp_validate(p, attr); 7464 if (retval) 7465 return retval; 7466 } 7467 7468 if (pi) 7469 cpuset_read_lock(); 7470 7471 /* 7472 * Make sure no PI-waiters arrive (or leave) while we are 7473 * changing the priority of the task: 7474 * 7475 * To be able to change p->policy safely, the appropriate 7476 * runqueue lock must be held. 7477 */ 7478 rq = task_rq_lock(p, &rf); 7479 update_rq_clock(rq); 7480 7481 /* 7482 * Changing the policy of the stop threads its a very bad idea: 7483 */ 7484 if (p == rq->stop) { 7485 retval = -EINVAL; 7486 goto unlock; 7487 } 7488 7489 /* 7490 * If not changing anything there's no need to proceed further, 7491 * but store a possible modification of reset_on_fork. 7492 */ 7493 if (unlikely(policy == p->policy)) { 7494 if (fair_policy(policy) && attr->sched_nice != task_nice(p)) 7495 goto change; 7496 if (rt_policy(policy) && attr->sched_priority != p->rt_priority) 7497 goto change; 7498 if (dl_policy(policy) && dl_param_changed(p, attr)) 7499 goto change; 7500 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) 7501 goto change; 7502 7503 p->sched_reset_on_fork = reset_on_fork; 7504 retval = 0; 7505 goto unlock; 7506 } 7507 change: 7508 7509 if (user) { 7510 #ifdef CONFIG_RT_GROUP_SCHED 7511 /* 7512 * Do not allow realtime tasks into groups that have no runtime 7513 * assigned. 7514 */ 7515 if (rt_bandwidth_enabled() && rt_policy(policy) && 7516 task_group(p)->rt_bandwidth.rt_runtime == 0 && 7517 !task_group_is_autogroup(task_group(p))) { 7518 retval = -EPERM; 7519 goto unlock; 7520 } 7521 #endif 7522 #ifdef CONFIG_SMP 7523 if (dl_bandwidth_enabled() && dl_policy(policy) && 7524 !(attr->sched_flags & SCHED_FLAG_SUGOV)) { 7525 cpumask_t *span = rq->rd->span; 7526 7527 /* 7528 * Don't allow tasks with an affinity mask smaller than 7529 * the entire root_domain to become SCHED_DEADLINE. We 7530 * will also fail if there's no bandwidth available. 7531 */ 7532 if (!cpumask_subset(span, p->cpus_ptr) || 7533 rq->rd->dl_bw.bw == 0) { 7534 retval = -EPERM; 7535 goto unlock; 7536 } 7537 } 7538 #endif 7539 } 7540 7541 /* Re-check policy now with rq lock held: */ 7542 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { 7543 policy = oldpolicy = -1; 7544 task_rq_unlock(rq, p, &rf); 7545 if (pi) 7546 cpuset_read_unlock(); 7547 goto recheck; 7548 } 7549 7550 /* 7551 * If setscheduling to SCHED_DEADLINE (or changing the parameters 7552 * of a SCHED_DEADLINE task) we need to check if enough bandwidth 7553 * is available. 7554 */ 7555 if ((dl_policy(policy) || dl_task(p)) && sched_dl_overflow(p, policy, attr)) { 7556 retval = -EBUSY; 7557 goto unlock; 7558 } 7559 7560 p->sched_reset_on_fork = reset_on_fork; 7561 oldprio = p->prio; 7562 7563 newprio = __normal_prio(policy, attr->sched_priority, attr->sched_nice); 7564 if (pi) { 7565 /* 7566 * Take priority boosted tasks into account. If the new 7567 * effective priority is unchanged, we just store the new 7568 * normal parameters and do not touch the scheduler class and 7569 * the runqueue. This will be done when the task deboost 7570 * itself. 7571 */ 7572 newprio = rt_effective_prio(p, newprio); 7573 if (newprio == oldprio) 7574 queue_flags &= ~DEQUEUE_MOVE; 7575 } 7576 7577 queued = task_on_rq_queued(p); 7578 running = task_current(rq, p); 7579 if (queued) 7580 dequeue_task(rq, p, queue_flags); 7581 if (running) 7582 put_prev_task(rq, p); 7583 7584 prev_class = p->sched_class; 7585 7586 if (!(attr->sched_flags & SCHED_FLAG_KEEP_PARAMS)) { 7587 __setscheduler_params(p, attr); 7588 __setscheduler_prio(p, newprio); 7589 } 7590 __setscheduler_uclamp(p, attr); 7591 7592 if (queued) { 7593 /* 7594 * We enqueue to tail when the priority of a task is 7595 * increased (user space view). 7596 */ 7597 if (oldprio < p->prio) 7598 queue_flags |= ENQUEUE_HEAD; 7599 7600 enqueue_task(rq, p, queue_flags); 7601 } 7602 if (running) 7603 set_next_task(rq, p); 7604 7605 check_class_changed(rq, p, prev_class, oldprio); 7606 7607 /* Avoid rq from going away on us: */ 7608 preempt_disable(); 7609 head = splice_balance_callbacks(rq); 7610 task_rq_unlock(rq, p, &rf); 7611 7612 if (pi) { 7613 cpuset_read_unlock(); 7614 rt_mutex_adjust_pi(p); 7615 } 7616 7617 /* Run balance callbacks after we've adjusted the PI chain: */ 7618 balance_callbacks(rq, head); 7619 preempt_enable(); 7620 7621 return 0; 7622 7623 unlock: 7624 task_rq_unlock(rq, p, &rf); 7625 if (pi) 7626 cpuset_read_unlock(); 7627 return retval; 7628 } 7629 7630 static int _sched_setscheduler(struct task_struct *p, int policy, 7631 const struct sched_param *param, bool check) 7632 { 7633 struct sched_attr attr = { 7634 .sched_policy = policy, 7635 .sched_priority = param->sched_priority, 7636 .sched_nice = PRIO_TO_NICE(p->static_prio), 7637 }; 7638 7639 /* Fixup the legacy SCHED_RESET_ON_FORK hack. */ 7640 if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) { 7641 attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; 7642 policy &= ~SCHED_RESET_ON_FORK; 7643 attr.sched_policy = policy; 7644 } 7645 7646 return __sched_setscheduler(p, &attr, check, true); 7647 } 7648 /** 7649 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread. 7650 * @p: the task in question. 7651 * @policy: new policy. 7652 * @param: structure containing the new RT priority. 7653 * 7654 * Use sched_set_fifo(), read its comment. 7655 * 7656 * Return: 0 on success. An error code otherwise. 7657 * 7658 * NOTE that the task may be already dead. 7659 */ 7660 int sched_setscheduler(struct task_struct *p, int policy, 7661 const struct sched_param *param) 7662 { 7663 return _sched_setscheduler(p, policy, param, true); 7664 } 7665 7666 int sched_setattr(struct task_struct *p, const struct sched_attr *attr) 7667 { 7668 return __sched_setscheduler(p, attr, true, true); 7669 } 7670 7671 int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr) 7672 { 7673 return __sched_setscheduler(p, attr, false, true); 7674 } 7675 EXPORT_SYMBOL_GPL(sched_setattr_nocheck); 7676 7677 /** 7678 * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace. 7679 * @p: the task in question. 7680 * @policy: new policy. 7681 * @param: structure containing the new RT priority. 7682 * 7683 * Just like sched_setscheduler, only don't bother checking if the 7684 * current context has permission. For example, this is needed in 7685 * stop_machine(): we create temporary high priority worker threads, 7686 * but our caller might not have that capability. 7687 * 7688 * Return: 0 on success. An error code otherwise. 7689 */ 7690 int sched_setscheduler_nocheck(struct task_struct *p, int policy, 7691 const struct sched_param *param) 7692 { 7693 return _sched_setscheduler(p, policy, param, false); 7694 } 7695 7696 /* 7697 * SCHED_FIFO is a broken scheduler model; that is, it is fundamentally 7698 * incapable of resource management, which is the one thing an OS really should 7699 * be doing. 7700 * 7701 * This is of course the reason it is limited to privileged users only. 7702 * 7703 * Worse still; it is fundamentally impossible to compose static priority 7704 * workloads. You cannot take two correctly working static prio workloads 7705 * and smash them together and still expect them to work. 7706 * 7707 * For this reason 'all' FIFO tasks the kernel creates are basically at: 7708 * 7709 * MAX_RT_PRIO / 2 7710 * 7711 * The administrator _MUST_ configure the system, the kernel simply doesn't 7712 * know enough information to make a sensible choice. 7713 */ 7714 void sched_set_fifo(struct task_struct *p) 7715 { 7716 struct sched_param sp = { .sched_priority = MAX_RT_PRIO / 2 }; 7717 WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0); 7718 } 7719 EXPORT_SYMBOL_GPL(sched_set_fifo); 7720 7721 /* 7722 * For when you don't much care about FIFO, but want to be above SCHED_NORMAL. 7723 */ 7724 void sched_set_fifo_low(struct task_struct *p) 7725 { 7726 struct sched_param sp = { .sched_priority = 1 }; 7727 WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0); 7728 } 7729 EXPORT_SYMBOL_GPL(sched_set_fifo_low); 7730 7731 void sched_set_normal(struct task_struct *p, int nice) 7732 { 7733 struct sched_attr attr = { 7734 .sched_policy = SCHED_NORMAL, 7735 .sched_nice = nice, 7736 }; 7737 WARN_ON_ONCE(sched_setattr_nocheck(p, &attr) != 0); 7738 } 7739 EXPORT_SYMBOL_GPL(sched_set_normal); 7740 7741 static int 7742 do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) 7743 { 7744 struct sched_param lparam; 7745 struct task_struct *p; 7746 int retval; 7747 7748 if (!param || pid < 0) 7749 return -EINVAL; 7750 if (copy_from_user(&lparam, param, sizeof(struct sched_param))) 7751 return -EFAULT; 7752 7753 rcu_read_lock(); 7754 retval = -ESRCH; 7755 p = find_process_by_pid(pid); 7756 if (likely(p)) 7757 get_task_struct(p); 7758 rcu_read_unlock(); 7759 7760 if (likely(p)) { 7761 retval = sched_setscheduler(p, policy, &lparam); 7762 put_task_struct(p); 7763 } 7764 7765 return retval; 7766 } 7767 7768 /* 7769 * Mimics kernel/events/core.c perf_copy_attr(). 7770 */ 7771 static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *attr) 7772 { 7773 u32 size; 7774 int ret; 7775 7776 /* Zero the full structure, so that a short copy will be nice: */ 7777 memset(attr, 0, sizeof(*attr)); 7778 7779 ret = get_user(size, &uattr->size); 7780 if (ret) 7781 return ret; 7782 7783 /* ABI compatibility quirk: */ 7784 if (!size) 7785 size = SCHED_ATTR_SIZE_VER0; 7786 if (size < SCHED_ATTR_SIZE_VER0 || size > PAGE_SIZE) 7787 goto err_size; 7788 7789 ret = copy_struct_from_user(attr, sizeof(*attr), uattr, size); 7790 if (ret) { 7791 if (ret == -E2BIG) 7792 goto err_size; 7793 return ret; 7794 } 7795 7796 if ((attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) && 7797 size < SCHED_ATTR_SIZE_VER1) 7798 return -EINVAL; 7799 7800 /* 7801 * XXX: Do we want to be lenient like existing syscalls; or do we want 7802 * to be strict and return an error on out-of-bounds values? 7803 */ 7804 attr->sched_nice = clamp(attr->sched_nice, MIN_NICE, MAX_NICE); 7805 7806 return 0; 7807 7808 err_size: 7809 put_user(sizeof(*attr), &uattr->size); 7810 return -E2BIG; 7811 } 7812 7813 static void get_params(struct task_struct *p, struct sched_attr *attr) 7814 { 7815 if (task_has_dl_policy(p)) 7816 __getparam_dl(p, attr); 7817 else if (task_has_rt_policy(p)) 7818 attr->sched_priority = p->rt_priority; 7819 else 7820 attr->sched_nice = task_nice(p); 7821 } 7822 7823 /** 7824 * sys_sched_setscheduler - set/change the scheduler policy and RT priority 7825 * @pid: the pid in question. 7826 * @policy: new policy. 7827 * @param: structure containing the new RT priority. 7828 * 7829 * Return: 0 on success. An error code otherwise. 7830 */ 7831 SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param) 7832 { 7833 if (policy < 0) 7834 return -EINVAL; 7835 7836 return do_sched_setscheduler(pid, policy, param); 7837 } 7838 7839 /** 7840 * sys_sched_setparam - set/change the RT priority of a thread 7841 * @pid: the pid in question. 7842 * @param: structure containing the new RT priority. 7843 * 7844 * Return: 0 on success. An error code otherwise. 7845 */ 7846 SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) 7847 { 7848 return do_sched_setscheduler(pid, SETPARAM_POLICY, param); 7849 } 7850 7851 /** 7852 * sys_sched_setattr - same as above, but with extended sched_attr 7853 * @pid: the pid in question. 7854 * @uattr: structure containing the extended parameters. 7855 * @flags: for future extension. 7856 */ 7857 SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr, 7858 unsigned int, flags) 7859 { 7860 struct sched_attr attr; 7861 struct task_struct *p; 7862 int retval; 7863 7864 if (!uattr || pid < 0 || flags) 7865 return -EINVAL; 7866 7867 retval = sched_copy_attr(uattr, &attr); 7868 if (retval) 7869 return retval; 7870 7871 if ((int)attr.sched_policy < 0) 7872 return -EINVAL; 7873 if (attr.sched_flags & SCHED_FLAG_KEEP_POLICY) 7874 attr.sched_policy = SETPARAM_POLICY; 7875 7876 rcu_read_lock(); 7877 retval = -ESRCH; 7878 p = find_process_by_pid(pid); 7879 if (likely(p)) 7880 get_task_struct(p); 7881 rcu_read_unlock(); 7882 7883 if (likely(p)) { 7884 if (attr.sched_flags & SCHED_FLAG_KEEP_PARAMS) 7885 get_params(p, &attr); 7886 retval = sched_setattr(p, &attr); 7887 put_task_struct(p); 7888 } 7889 7890 return retval; 7891 } 7892 7893 /** 7894 * sys_sched_getscheduler - get the policy (scheduling class) of a thread 7895 * @pid: the pid in question. 7896 * 7897 * Return: On success, the policy of the thread. Otherwise, a negative error 7898 * code. 7899 */ 7900 SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) 7901 { 7902 struct task_struct *p; 7903 int retval; 7904 7905 if (pid < 0) 7906 return -EINVAL; 7907 7908 retval = -ESRCH; 7909 rcu_read_lock(); 7910 p = find_process_by_pid(pid); 7911 if (p) { 7912 retval = security_task_getscheduler(p); 7913 if (!retval) 7914 retval = p->policy 7915 | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0); 7916 } 7917 rcu_read_unlock(); 7918 return retval; 7919 } 7920 7921 /** 7922 * sys_sched_getparam - get the RT priority of a thread 7923 * @pid: the pid in question. 7924 * @param: structure containing the RT priority. 7925 * 7926 * Return: On success, 0 and the RT priority is in @param. Otherwise, an error 7927 * code. 7928 */ 7929 SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) 7930 { 7931 struct sched_param lp = { .sched_priority = 0 }; 7932 struct task_struct *p; 7933 int retval; 7934 7935 if (!param || pid < 0) 7936 return -EINVAL; 7937 7938 rcu_read_lock(); 7939 p = find_process_by_pid(pid); 7940 retval = -ESRCH; 7941 if (!p) 7942 goto out_unlock; 7943 7944 retval = security_task_getscheduler(p); 7945 if (retval) 7946 goto out_unlock; 7947 7948 if (task_has_rt_policy(p)) 7949 lp.sched_priority = p->rt_priority; 7950 rcu_read_unlock(); 7951 7952 /* 7953 * This one might sleep, we cannot do it with a spinlock held ... 7954 */ 7955 retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0; 7956 7957 return retval; 7958 7959 out_unlock: 7960 rcu_read_unlock(); 7961 return retval; 7962 } 7963 7964 /* 7965 * Copy the kernel size attribute structure (which might be larger 7966 * than what user-space knows about) to user-space. 7967 * 7968 * Note that all cases are valid: user-space buffer can be larger or 7969 * smaller than the kernel-space buffer. The usual case is that both 7970 * have the same size. 7971 */ 7972 static int 7973 sched_attr_copy_to_user(struct sched_attr __user *uattr, 7974 struct sched_attr *kattr, 7975 unsigned int usize) 7976 { 7977 unsigned int ksize = sizeof(*kattr); 7978 7979 if (!access_ok(uattr, usize)) 7980 return -EFAULT; 7981 7982 /* 7983 * sched_getattr() ABI forwards and backwards compatibility: 7984 * 7985 * If usize == ksize then we just copy everything to user-space and all is good. 7986 * 7987 * If usize < ksize then we only copy as much as user-space has space for, 7988 * this keeps ABI compatibility as well. We skip the rest. 7989 * 7990 * If usize > ksize then user-space is using a newer version of the ABI, 7991 * which part the kernel doesn't know about. Just ignore it - tooling can 7992 * detect the kernel's knowledge of attributes from the attr->size value 7993 * which is set to ksize in this case. 7994 */ 7995 kattr->size = min(usize, ksize); 7996 7997 if (copy_to_user(uattr, kattr, kattr->size)) 7998 return -EFAULT; 7999 8000 return 0; 8001 } 8002 8003 /** 8004 * sys_sched_getattr - similar to sched_getparam, but with sched_attr 8005 * @pid: the pid in question. 8006 * @uattr: structure containing the extended parameters. 8007 * @usize: sizeof(attr) for fwd/bwd comp. 8008 * @flags: for future extension. 8009 */ 8010 SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, 8011 unsigned int, usize, unsigned int, flags) 8012 { 8013 struct sched_attr kattr = { }; 8014 struct task_struct *p; 8015 int retval; 8016 8017 if (!uattr || pid < 0 || usize > PAGE_SIZE || 8018 usize < SCHED_ATTR_SIZE_VER0 || flags) 8019 return -EINVAL; 8020 8021 rcu_read_lock(); 8022 p = find_process_by_pid(pid); 8023 retval = -ESRCH; 8024 if (!p) 8025 goto out_unlock; 8026 8027 retval = security_task_getscheduler(p); 8028 if (retval) 8029 goto out_unlock; 8030 8031 kattr.sched_policy = p->policy; 8032 if (p->sched_reset_on_fork) 8033 kattr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; 8034 get_params(p, &kattr); 8035 kattr.sched_flags &= SCHED_FLAG_ALL; 8036 8037 #ifdef CONFIG_UCLAMP_TASK 8038 /* 8039 * This could race with another potential updater, but this is fine 8040 * because it'll correctly read the old or the new value. We don't need 8041 * to guarantee who wins the race as long as it doesn't return garbage. 8042 */ 8043 kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value; 8044 kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value; 8045 #endif 8046 8047 rcu_read_unlock(); 8048 8049 return sched_attr_copy_to_user(uattr, &kattr, usize); 8050 8051 out_unlock: 8052 rcu_read_unlock(); 8053 return retval; 8054 } 8055 8056 #ifdef CONFIG_SMP 8057 int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask) 8058 { 8059 int ret = 0; 8060 8061 /* 8062 * If the task isn't a deadline task or admission control is 8063 * disabled then we don't care about affinity changes. 8064 */ 8065 if (!task_has_dl_policy(p) || !dl_bandwidth_enabled()) 8066 return 0; 8067 8068 /* 8069 * Since bandwidth control happens on root_domain basis, 8070 * if admission test is enabled, we only admit -deadline 8071 * tasks allowed to run on all the CPUs in the task's 8072 * root_domain. 8073 */ 8074 rcu_read_lock(); 8075 if (!cpumask_subset(task_rq(p)->rd->span, mask)) 8076 ret = -EBUSY; 8077 rcu_read_unlock(); 8078 return ret; 8079 } 8080 #endif 8081 8082 static int 8083 __sched_setaffinity(struct task_struct *p, const struct cpumask *mask) 8084 { 8085 int retval; 8086 cpumask_var_t cpus_allowed, new_mask; 8087 8088 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) 8089 return -ENOMEM; 8090 8091 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) { 8092 retval = -ENOMEM; 8093 goto out_free_cpus_allowed; 8094 } 8095 8096 cpuset_cpus_allowed(p, cpus_allowed); 8097 cpumask_and(new_mask, mask, cpus_allowed); 8098 8099 retval = dl_task_check_affinity(p, new_mask); 8100 if (retval) 8101 goto out_free_new_mask; 8102 again: 8103 retval = __set_cpus_allowed_ptr(p, new_mask, SCA_CHECK | SCA_USER); 8104 if (retval) 8105 goto out_free_new_mask; 8106 8107 cpuset_cpus_allowed(p, cpus_allowed); 8108 if (!cpumask_subset(new_mask, cpus_allowed)) { 8109 /* 8110 * We must have raced with a concurrent cpuset update. 8111 * Just reset the cpumask to the cpuset's cpus_allowed. 8112 */ 8113 cpumask_copy(new_mask, cpus_allowed); 8114 goto again; 8115 } 8116 8117 out_free_new_mask: 8118 free_cpumask_var(new_mask); 8119 out_free_cpus_allowed: 8120 free_cpumask_var(cpus_allowed); 8121 return retval; 8122 } 8123 8124 long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) 8125 { 8126 struct task_struct *p; 8127 int retval; 8128 8129 rcu_read_lock(); 8130 8131 p = find_process_by_pid(pid); 8132 if (!p) { 8133 rcu_read_unlock(); 8134 return -ESRCH; 8135 } 8136 8137 /* Prevent p going away */ 8138 get_task_struct(p); 8139 rcu_read_unlock(); 8140 8141 if (p->flags & PF_NO_SETAFFINITY) { 8142 retval = -EINVAL; 8143 goto out_put_task; 8144 } 8145 8146 if (!check_same_owner(p)) { 8147 rcu_read_lock(); 8148 if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) { 8149 rcu_read_unlock(); 8150 retval = -EPERM; 8151 goto out_put_task; 8152 } 8153 rcu_read_unlock(); 8154 } 8155 8156 retval = security_task_setscheduler(p); 8157 if (retval) 8158 goto out_put_task; 8159 8160 retval = __sched_setaffinity(p, in_mask); 8161 out_put_task: 8162 put_task_struct(p); 8163 return retval; 8164 } 8165 8166 static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, 8167 struct cpumask *new_mask) 8168 { 8169 if (len < cpumask_size()) 8170 cpumask_clear(new_mask); 8171 else if (len > cpumask_size()) 8172 len = cpumask_size(); 8173 8174 return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0; 8175 } 8176 8177 /** 8178 * sys_sched_setaffinity - set the CPU affinity of a process 8179 * @pid: pid of the process 8180 * @len: length in bytes of the bitmask pointed to by user_mask_ptr 8181 * @user_mask_ptr: user-space pointer to the new CPU mask 8182 * 8183 * Return: 0 on success. An error code otherwise. 8184 */ 8185 SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len, 8186 unsigned long __user *, user_mask_ptr) 8187 { 8188 cpumask_var_t new_mask; 8189 int retval; 8190 8191 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) 8192 return -ENOMEM; 8193 8194 retval = get_user_cpu_mask(user_mask_ptr, len, new_mask); 8195 if (retval == 0) 8196 retval = sched_setaffinity(pid, new_mask); 8197 free_cpumask_var(new_mask); 8198 return retval; 8199 } 8200 8201 long sched_getaffinity(pid_t pid, struct cpumask *mask) 8202 { 8203 struct task_struct *p; 8204 unsigned long flags; 8205 int retval; 8206 8207 rcu_read_lock(); 8208 8209 retval = -ESRCH; 8210 p = find_process_by_pid(pid); 8211 if (!p) 8212 goto out_unlock; 8213 8214 retval = security_task_getscheduler(p); 8215 if (retval) 8216 goto out_unlock; 8217 8218 raw_spin_lock_irqsave(&p->pi_lock, flags); 8219 cpumask_and(mask, &p->cpus_mask, cpu_active_mask); 8220 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 8221 8222 out_unlock: 8223 rcu_read_unlock(); 8224 8225 return retval; 8226 } 8227 8228 /** 8229 * sys_sched_getaffinity - get the CPU affinity of a process 8230 * @pid: pid of the process 8231 * @len: length in bytes of the bitmask pointed to by user_mask_ptr 8232 * @user_mask_ptr: user-space pointer to hold the current CPU mask 8233 * 8234 * Return: size of CPU mask copied to user_mask_ptr on success. An 8235 * error code otherwise. 8236 */ 8237 SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, 8238 unsigned long __user *, user_mask_ptr) 8239 { 8240 int ret; 8241 cpumask_var_t mask; 8242 8243 if ((len * BITS_PER_BYTE) < nr_cpu_ids) 8244 return -EINVAL; 8245 if (len & (sizeof(unsigned long)-1)) 8246 return -EINVAL; 8247 8248 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) 8249 return -ENOMEM; 8250 8251 ret = sched_getaffinity(pid, mask); 8252 if (ret == 0) { 8253 unsigned int retlen = min(len, cpumask_size()); 8254 8255 if (copy_to_user(user_mask_ptr, mask, retlen)) 8256 ret = -EFAULT; 8257 else 8258 ret = retlen; 8259 } 8260 free_cpumask_var(mask); 8261 8262 return ret; 8263 } 8264 8265 static void do_sched_yield(void) 8266 { 8267 struct rq_flags rf; 8268 struct rq *rq; 8269 8270 rq = this_rq_lock_irq(&rf); 8271 8272 schedstat_inc(rq->yld_count); 8273 current->sched_class->yield_task(rq); 8274 8275 preempt_disable(); 8276 rq_unlock_irq(rq, &rf); 8277 sched_preempt_enable_no_resched(); 8278 8279 schedule(); 8280 } 8281 8282 /** 8283 * sys_sched_yield - yield the current processor to other threads. 8284 * 8285 * This function yields the current CPU to other tasks. If there are no 8286 * other threads running on this CPU then this function will return. 8287 * 8288 * Return: 0. 8289 */ 8290 SYSCALL_DEFINE0(sched_yield) 8291 { 8292 do_sched_yield(); 8293 return 0; 8294 } 8295 8296 #if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC) 8297 int __sched __cond_resched(void) 8298 { 8299 if (should_resched(0)) { 8300 preempt_schedule_common(); 8301 return 1; 8302 } 8303 /* 8304 * In preemptible kernels, ->rcu_read_lock_nesting tells the tick 8305 * whether the current CPU is in an RCU read-side critical section, 8306 * so the tick can report quiescent states even for CPUs looping 8307 * in kernel context. In contrast, in non-preemptible kernels, 8308 * RCU readers leave no in-memory hints, which means that CPU-bound 8309 * processes executing in kernel context might never report an 8310 * RCU quiescent state. Therefore, the following code causes 8311 * cond_resched() to report a quiescent state, but only when RCU 8312 * is in urgent need of one. 8313 */ 8314 #ifndef CONFIG_PREEMPT_RCU 8315 rcu_all_qs(); 8316 #endif 8317 return 0; 8318 } 8319 EXPORT_SYMBOL(__cond_resched); 8320 #endif 8321 8322 #ifdef CONFIG_PREEMPT_DYNAMIC 8323 #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL) 8324 #define cond_resched_dynamic_enabled __cond_resched 8325 #define cond_resched_dynamic_disabled ((void *)&__static_call_return0) 8326 DEFINE_STATIC_CALL_RET0(cond_resched, __cond_resched); 8327 EXPORT_STATIC_CALL_TRAMP(cond_resched); 8328 8329 #define might_resched_dynamic_enabled __cond_resched 8330 #define might_resched_dynamic_disabled ((void *)&__static_call_return0) 8331 DEFINE_STATIC_CALL_RET0(might_resched, __cond_resched); 8332 EXPORT_STATIC_CALL_TRAMP(might_resched); 8333 #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY) 8334 static DEFINE_STATIC_KEY_FALSE(sk_dynamic_cond_resched); 8335 int __sched dynamic_cond_resched(void) 8336 { 8337 if (!static_branch_unlikely(&sk_dynamic_cond_resched)) 8338 return 0; 8339 return __cond_resched(); 8340 } 8341 EXPORT_SYMBOL(dynamic_cond_resched); 8342 8343 static DEFINE_STATIC_KEY_FALSE(sk_dynamic_might_resched); 8344 int __sched dynamic_might_resched(void) 8345 { 8346 if (!static_branch_unlikely(&sk_dynamic_might_resched)) 8347 return 0; 8348 return __cond_resched(); 8349 } 8350 EXPORT_SYMBOL(dynamic_might_resched); 8351 #endif 8352 #endif 8353 8354 /* 8355 * __cond_resched_lock() - if a reschedule is pending, drop the given lock, 8356 * call schedule, and on return reacquire the lock. 8357 * 8358 * This works OK both with and without CONFIG_PREEMPTION. We do strange low-level 8359 * operations here to prevent schedule() from being called twice (once via 8360 * spin_unlock(), once by hand). 8361 */ 8362 int __cond_resched_lock(spinlock_t *lock) 8363 { 8364 int resched = should_resched(PREEMPT_LOCK_OFFSET); 8365 int ret = 0; 8366 8367 lockdep_assert_held(lock); 8368 8369 if (spin_needbreak(lock) || resched) { 8370 spin_unlock(lock); 8371 if (!_cond_resched()) 8372 cpu_relax(); 8373 ret = 1; 8374 spin_lock(lock); 8375 } 8376 return ret; 8377 } 8378 EXPORT_SYMBOL(__cond_resched_lock); 8379 8380 int __cond_resched_rwlock_read(rwlock_t *lock) 8381 { 8382 int resched = should_resched(PREEMPT_LOCK_OFFSET); 8383 int ret = 0; 8384 8385 lockdep_assert_held_read(lock); 8386 8387 if (rwlock_needbreak(lock) || resched) { 8388 read_unlock(lock); 8389 if (!_cond_resched()) 8390 cpu_relax(); 8391 ret = 1; 8392 read_lock(lock); 8393 } 8394 return ret; 8395 } 8396 EXPORT_SYMBOL(__cond_resched_rwlock_read); 8397 8398 int __cond_resched_rwlock_write(rwlock_t *lock) 8399 { 8400 int resched = should_resched(PREEMPT_LOCK_OFFSET); 8401 int ret = 0; 8402 8403 lockdep_assert_held_write(lock); 8404 8405 if (rwlock_needbreak(lock) || resched) { 8406 write_unlock(lock); 8407 if (!_cond_resched()) 8408 cpu_relax(); 8409 ret = 1; 8410 write_lock(lock); 8411 } 8412 return ret; 8413 } 8414 EXPORT_SYMBOL(__cond_resched_rwlock_write); 8415 8416 #ifdef CONFIG_PREEMPT_DYNAMIC 8417 8418 #ifdef CONFIG_GENERIC_ENTRY 8419 #include <linux/entry-common.h> 8420 #endif 8421 8422 /* 8423 * SC:cond_resched 8424 * SC:might_resched 8425 * SC:preempt_schedule 8426 * SC:preempt_schedule_notrace 8427 * SC:irqentry_exit_cond_resched 8428 * 8429 * 8430 * NONE: 8431 * cond_resched <- __cond_resched 8432 * might_resched <- RET0 8433 * preempt_schedule <- NOP 8434 * preempt_schedule_notrace <- NOP 8435 * irqentry_exit_cond_resched <- NOP 8436 * 8437 * VOLUNTARY: 8438 * cond_resched <- __cond_resched 8439 * might_resched <- __cond_resched 8440 * preempt_schedule <- NOP 8441 * preempt_schedule_notrace <- NOP 8442 * irqentry_exit_cond_resched <- NOP 8443 * 8444 * FULL: 8445 * cond_resched <- RET0 8446 * might_resched <- RET0 8447 * preempt_schedule <- preempt_schedule 8448 * preempt_schedule_notrace <- preempt_schedule_notrace 8449 * irqentry_exit_cond_resched <- irqentry_exit_cond_resched 8450 */ 8451 8452 enum { 8453 preempt_dynamic_undefined = -1, 8454 preempt_dynamic_none, 8455 preempt_dynamic_voluntary, 8456 preempt_dynamic_full, 8457 }; 8458 8459 int preempt_dynamic_mode = preempt_dynamic_undefined; 8460 8461 int sched_dynamic_mode(const char *str) 8462 { 8463 if (!strcmp(str, "none")) 8464 return preempt_dynamic_none; 8465 8466 if (!strcmp(str, "voluntary")) 8467 return preempt_dynamic_voluntary; 8468 8469 if (!strcmp(str, "full")) 8470 return preempt_dynamic_full; 8471 8472 return -EINVAL; 8473 } 8474 8475 #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL) 8476 #define preempt_dynamic_enable(f) static_call_update(f, f##_dynamic_enabled) 8477 #define preempt_dynamic_disable(f) static_call_update(f, f##_dynamic_disabled) 8478 #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY) 8479 #define preempt_dynamic_enable(f) static_key_enable(&sk_dynamic_##f.key) 8480 #define preempt_dynamic_disable(f) static_key_disable(&sk_dynamic_##f.key) 8481 #else 8482 #error "Unsupported PREEMPT_DYNAMIC mechanism" 8483 #endif 8484 8485 void sched_dynamic_update(int mode) 8486 { 8487 /* 8488 * Avoid {NONE,VOLUNTARY} -> FULL transitions from ever ending up in 8489 * the ZERO state, which is invalid. 8490 */ 8491 preempt_dynamic_enable(cond_resched); 8492 preempt_dynamic_enable(might_resched); 8493 preempt_dynamic_enable(preempt_schedule); 8494 preempt_dynamic_enable(preempt_schedule_notrace); 8495 preempt_dynamic_enable(irqentry_exit_cond_resched); 8496 8497 switch (mode) { 8498 case preempt_dynamic_none: 8499 preempt_dynamic_enable(cond_resched); 8500 preempt_dynamic_disable(might_resched); 8501 preempt_dynamic_disable(preempt_schedule); 8502 preempt_dynamic_disable(preempt_schedule_notrace); 8503 preempt_dynamic_disable(irqentry_exit_cond_resched); 8504 pr_info("Dynamic Preempt: none\n"); 8505 break; 8506 8507 case preempt_dynamic_voluntary: 8508 preempt_dynamic_enable(cond_resched); 8509 preempt_dynamic_enable(might_resched); 8510 preempt_dynamic_disable(preempt_schedule); 8511 preempt_dynamic_disable(preempt_schedule_notrace); 8512 preempt_dynamic_disable(irqentry_exit_cond_resched); 8513 pr_info("Dynamic Preempt: voluntary\n"); 8514 break; 8515 8516 case preempt_dynamic_full: 8517 preempt_dynamic_disable(cond_resched); 8518 preempt_dynamic_disable(might_resched); 8519 preempt_dynamic_enable(preempt_schedule); 8520 preempt_dynamic_enable(preempt_schedule_notrace); 8521 preempt_dynamic_enable(irqentry_exit_cond_resched); 8522 pr_info("Dynamic Preempt: full\n"); 8523 break; 8524 } 8525 8526 preempt_dynamic_mode = mode; 8527 } 8528 8529 static int __init setup_preempt_mode(char *str) 8530 { 8531 int mode = sched_dynamic_mode(str); 8532 if (mode < 0) { 8533 pr_warn("Dynamic Preempt: unsupported mode: %s\n", str); 8534 return 0; 8535 } 8536 8537 sched_dynamic_update(mode); 8538 return 1; 8539 } 8540 __setup("preempt=", setup_preempt_mode); 8541 8542 static void __init preempt_dynamic_init(void) 8543 { 8544 if (preempt_dynamic_mode == preempt_dynamic_undefined) { 8545 if (IS_ENABLED(CONFIG_PREEMPT_NONE)) { 8546 sched_dynamic_update(preempt_dynamic_none); 8547 } else if (IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY)) { 8548 sched_dynamic_update(preempt_dynamic_voluntary); 8549 } else { 8550 /* Default static call setting, nothing to do */ 8551 WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT)); 8552 preempt_dynamic_mode = preempt_dynamic_full; 8553 pr_info("Dynamic Preempt: full\n"); 8554 } 8555 } 8556 } 8557 8558 #define PREEMPT_MODEL_ACCESSOR(mode) \ 8559 bool preempt_model_##mode(void) \ 8560 { \ 8561 WARN_ON_ONCE(preempt_dynamic_mode == preempt_dynamic_undefined); \ 8562 return preempt_dynamic_mode == preempt_dynamic_##mode; \ 8563 } \ 8564 EXPORT_SYMBOL_GPL(preempt_model_##mode) 8565 8566 PREEMPT_MODEL_ACCESSOR(none); 8567 PREEMPT_MODEL_ACCESSOR(voluntary); 8568 PREEMPT_MODEL_ACCESSOR(full); 8569 8570 #else /* !CONFIG_PREEMPT_DYNAMIC */ 8571 8572 static inline void preempt_dynamic_init(void) { } 8573 8574 #endif /* #ifdef CONFIG_PREEMPT_DYNAMIC */ 8575 8576 /** 8577 * yield - yield the current processor to other threads. 8578 * 8579 * Do not ever use this function, there's a 99% chance you're doing it wrong. 8580 * 8581 * The scheduler is at all times free to pick the calling task as the most 8582 * eligible task to run, if removing the yield() call from your code breaks 8583 * it, it's already broken. 8584 * 8585 * Typical broken usage is: 8586 * 8587 * while (!event) 8588 * yield(); 8589 * 8590 * where one assumes that yield() will let 'the other' process run that will 8591 * make event true. If the current task is a SCHED_FIFO task that will never 8592 * happen. Never use yield() as a progress guarantee!! 8593 * 8594 * If you want to use yield() to wait for something, use wait_event(). 8595 * If you want to use yield() to be 'nice' for others, use cond_resched(). 8596 * If you still want to use yield(), do not! 8597 */ 8598 void __sched yield(void) 8599 { 8600 set_current_state(TASK_RUNNING); 8601 do_sched_yield(); 8602 } 8603 EXPORT_SYMBOL(yield); 8604 8605 /** 8606 * yield_to - yield the current processor to another thread in 8607 * your thread group, or accelerate that thread toward the 8608 * processor it's on. 8609 * @p: target task 8610 * @preempt: whether task preemption is allowed or not 8611 * 8612 * It's the caller's job to ensure that the target task struct 8613 * can't go away on us before we can do any checks. 8614 * 8615 * Return: 8616 * true (>0) if we indeed boosted the target task. 8617 * false (0) if we failed to boost the target. 8618 * -ESRCH if there's no task to yield to. 8619 */ 8620 int __sched yield_to(struct task_struct *p, bool preempt) 8621 { 8622 struct task_struct *curr = current; 8623 struct rq *rq, *p_rq; 8624 unsigned long flags; 8625 int yielded = 0; 8626 8627 local_irq_save(flags); 8628 rq = this_rq(); 8629 8630 again: 8631 p_rq = task_rq(p); 8632 /* 8633 * If we're the only runnable task on the rq and target rq also 8634 * has only one task, there's absolutely no point in yielding. 8635 */ 8636 if (rq->nr_running == 1 && p_rq->nr_running == 1) { 8637 yielded = -ESRCH; 8638 goto out_irq; 8639 } 8640 8641 double_rq_lock(rq, p_rq); 8642 if (task_rq(p) != p_rq) { 8643 double_rq_unlock(rq, p_rq); 8644 goto again; 8645 } 8646 8647 if (!curr->sched_class->yield_to_task) 8648 goto out_unlock; 8649 8650 if (curr->sched_class != p->sched_class) 8651 goto out_unlock; 8652 8653 if (task_running(p_rq, p) || !task_is_running(p)) 8654 goto out_unlock; 8655 8656 yielded = curr->sched_class->yield_to_task(rq, p); 8657 if (yielded) { 8658 schedstat_inc(rq->yld_count); 8659 /* 8660 * Make p's CPU reschedule; pick_next_entity takes care of 8661 * fairness. 8662 */ 8663 if (preempt && rq != p_rq) 8664 resched_curr(p_rq); 8665 } 8666 8667 out_unlock: 8668 double_rq_unlock(rq, p_rq); 8669 out_irq: 8670 local_irq_restore(flags); 8671 8672 if (yielded > 0) 8673 schedule(); 8674 8675 return yielded; 8676 } 8677 EXPORT_SYMBOL_GPL(yield_to); 8678 8679 int io_schedule_prepare(void) 8680 { 8681 int old_iowait = current->in_iowait; 8682 8683 current->in_iowait = 1; 8684 blk_flush_plug(current->plug, true); 8685 return old_iowait; 8686 } 8687 8688 void io_schedule_finish(int token) 8689 { 8690 current->in_iowait = token; 8691 } 8692 8693 /* 8694 * This task is about to go to sleep on IO. Increment rq->nr_iowait so 8695 * that process accounting knows that this is a task in IO wait state. 8696 */ 8697 long __sched io_schedule_timeout(long timeout) 8698 { 8699 int token; 8700 long ret; 8701 8702 token = io_schedule_prepare(); 8703 ret = schedule_timeout(timeout); 8704 io_schedule_finish(token); 8705 8706 return ret; 8707 } 8708 EXPORT_SYMBOL(io_schedule_timeout); 8709 8710 void __sched io_schedule(void) 8711 { 8712 int token; 8713 8714 token = io_schedule_prepare(); 8715 schedule(); 8716 io_schedule_finish(token); 8717 } 8718 EXPORT_SYMBOL(io_schedule); 8719 8720 /** 8721 * sys_sched_get_priority_max - return maximum RT priority. 8722 * @policy: scheduling class. 8723 * 8724 * Return: On success, this syscall returns the maximum 8725 * rt_priority that can be used by a given scheduling class. 8726 * On failure, a negative error code is returned. 8727 */ 8728 SYSCALL_DEFINE1(sched_get_priority_max, int, policy) 8729 { 8730 int ret = -EINVAL; 8731 8732 switch (policy) { 8733 case SCHED_FIFO: 8734 case SCHED_RR: 8735 ret = MAX_RT_PRIO-1; 8736 break; 8737 case SCHED_DEADLINE: 8738 case SCHED_NORMAL: 8739 case SCHED_BATCH: 8740 case SCHED_IDLE: 8741 ret = 0; 8742 break; 8743 } 8744 return ret; 8745 } 8746 8747 /** 8748 * sys_sched_get_priority_min - return minimum RT priority. 8749 * @policy: scheduling class. 8750 * 8751 * Return: On success, this syscall returns the minimum 8752 * rt_priority that can be used by a given scheduling class. 8753 * On failure, a negative error code is returned. 8754 */ 8755 SYSCALL_DEFINE1(sched_get_priority_min, int, policy) 8756 { 8757 int ret = -EINVAL; 8758 8759 switch (policy) { 8760 case SCHED_FIFO: 8761 case SCHED_RR: 8762 ret = 1; 8763 break; 8764 case SCHED_DEADLINE: 8765 case SCHED_NORMAL: 8766 case SCHED_BATCH: 8767 case SCHED_IDLE: 8768 ret = 0; 8769 } 8770 return ret; 8771 } 8772 8773 static int sched_rr_get_interval(pid_t pid, struct timespec64 *t) 8774 { 8775 struct task_struct *p; 8776 unsigned int time_slice; 8777 struct rq_flags rf; 8778 struct rq *rq; 8779 int retval; 8780 8781 if (pid < 0) 8782 return -EINVAL; 8783 8784 retval = -ESRCH; 8785 rcu_read_lock(); 8786 p = find_process_by_pid(pid); 8787 if (!p) 8788 goto out_unlock; 8789 8790 retval = security_task_getscheduler(p); 8791 if (retval) 8792 goto out_unlock; 8793 8794 rq = task_rq_lock(p, &rf); 8795 time_slice = 0; 8796 if (p->sched_class->get_rr_interval) 8797 time_slice = p->sched_class->get_rr_interval(rq, p); 8798 task_rq_unlock(rq, p, &rf); 8799 8800 rcu_read_unlock(); 8801 jiffies_to_timespec64(time_slice, t); 8802 return 0; 8803 8804 out_unlock: 8805 rcu_read_unlock(); 8806 return retval; 8807 } 8808 8809 /** 8810 * sys_sched_rr_get_interval - return the default timeslice of a process. 8811 * @pid: pid of the process. 8812 * @interval: userspace pointer to the timeslice value. 8813 * 8814 * this syscall writes the default timeslice value of a given process 8815 * into the user-space timespec buffer. A value of '0' means infinity. 8816 * 8817 * Return: On success, 0 and the timeslice is in @interval. Otherwise, 8818 * an error code. 8819 */ 8820 SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, 8821 struct __kernel_timespec __user *, interval) 8822 { 8823 struct timespec64 t; 8824 int retval = sched_rr_get_interval(pid, &t); 8825 8826 if (retval == 0) 8827 retval = put_timespec64(&t, interval); 8828 8829 return retval; 8830 } 8831 8832 #ifdef CONFIG_COMPAT_32BIT_TIME 8833 SYSCALL_DEFINE2(sched_rr_get_interval_time32, pid_t, pid, 8834 struct old_timespec32 __user *, interval) 8835 { 8836 struct timespec64 t; 8837 int retval = sched_rr_get_interval(pid, &t); 8838 8839 if (retval == 0) 8840 retval = put_old_timespec32(&t, interval); 8841 return retval; 8842 } 8843 #endif 8844 8845 void sched_show_task(struct task_struct *p) 8846 { 8847 unsigned long free = 0; 8848 int ppid; 8849 8850 if (!try_get_task_stack(p)) 8851 return; 8852 8853 pr_info("task:%-15.15s state:%c", p->comm, task_state_to_char(p)); 8854 8855 if (task_is_running(p)) 8856 pr_cont(" running task "); 8857 #ifdef CONFIG_DEBUG_STACK_USAGE 8858 free = stack_not_used(p); 8859 #endif 8860 ppid = 0; 8861 rcu_read_lock(); 8862 if (pid_alive(p)) 8863 ppid = task_pid_nr(rcu_dereference(p->real_parent)); 8864 rcu_read_unlock(); 8865 pr_cont(" stack:%5lu pid:%5d ppid:%6d flags:0x%08lx\n", 8866 free, task_pid_nr(p), ppid, 8867 read_task_thread_flags(p)); 8868 8869 print_worker_info(KERN_INFO, p); 8870 print_stop_info(KERN_INFO, p); 8871 show_stack(p, NULL, KERN_INFO); 8872 put_task_stack(p); 8873 } 8874 EXPORT_SYMBOL_GPL(sched_show_task); 8875 8876 static inline bool 8877 state_filter_match(unsigned long state_filter, struct task_struct *p) 8878 { 8879 unsigned int state = READ_ONCE(p->__state); 8880 8881 /* no filter, everything matches */ 8882 if (!state_filter) 8883 return true; 8884 8885 /* filter, but doesn't match */ 8886 if (!(state & state_filter)) 8887 return false; 8888 8889 /* 8890 * When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows 8891 * TASK_KILLABLE). 8892 */ 8893 if (state_filter == TASK_UNINTERRUPTIBLE && state == TASK_IDLE) 8894 return false; 8895 8896 return true; 8897 } 8898 8899 8900 void show_state_filter(unsigned int state_filter) 8901 { 8902 struct task_struct *g, *p; 8903 8904 rcu_read_lock(); 8905 for_each_process_thread(g, p) { 8906 /* 8907 * reset the NMI-timeout, listing all files on a slow 8908 * console might take a lot of time: 8909 * Also, reset softlockup watchdogs on all CPUs, because 8910 * another CPU might be blocked waiting for us to process 8911 * an IPI. 8912 */ 8913 touch_nmi_watchdog(); 8914 touch_all_softlockup_watchdogs(); 8915 if (state_filter_match(state_filter, p)) 8916 sched_show_task(p); 8917 } 8918 8919 #ifdef CONFIG_SCHED_DEBUG 8920 if (!state_filter) 8921 sysrq_sched_debug_show(); 8922 #endif 8923 rcu_read_unlock(); 8924 /* 8925 * Only show locks if all tasks are dumped: 8926 */ 8927 if (!state_filter) 8928 debug_show_all_locks(); 8929 } 8930 8931 /** 8932 * init_idle - set up an idle thread for a given CPU 8933 * @idle: task in question 8934 * @cpu: CPU the idle task belongs to 8935 * 8936 * NOTE: this function does not set the idle thread's NEED_RESCHED 8937 * flag, to make booting more robust. 8938 */ 8939 void __init init_idle(struct task_struct *idle, int cpu) 8940 { 8941 struct rq *rq = cpu_rq(cpu); 8942 unsigned long flags; 8943 8944 __sched_fork(0, idle); 8945 8946 raw_spin_lock_irqsave(&idle->pi_lock, flags); 8947 raw_spin_rq_lock(rq); 8948 8949 idle->__state = TASK_RUNNING; 8950 idle->se.exec_start = sched_clock(); 8951 /* 8952 * PF_KTHREAD should already be set at this point; regardless, make it 8953 * look like a proper per-CPU kthread. 8954 */ 8955 idle->flags |= PF_IDLE | PF_KTHREAD | PF_NO_SETAFFINITY; 8956 kthread_set_per_cpu(idle, cpu); 8957 8958 #ifdef CONFIG_SMP 8959 /* 8960 * It's possible that init_idle() gets called multiple times on a task, 8961 * in that case do_set_cpus_allowed() will not do the right thing. 8962 * 8963 * And since this is boot we can forgo the serialization. 8964 */ 8965 set_cpus_allowed_common(idle, cpumask_of(cpu), 0); 8966 #endif 8967 /* 8968 * We're having a chicken and egg problem, even though we are 8969 * holding rq->lock, the CPU isn't yet set to this CPU so the 8970 * lockdep check in task_group() will fail. 8971 * 8972 * Similar case to sched_fork(). / Alternatively we could 8973 * use task_rq_lock() here and obtain the other rq->lock. 8974 * 8975 * Silence PROVE_RCU 8976 */ 8977 rcu_read_lock(); 8978 __set_task_cpu(idle, cpu); 8979 rcu_read_unlock(); 8980 8981 rq->idle = idle; 8982 rcu_assign_pointer(rq->curr, idle); 8983 idle->on_rq = TASK_ON_RQ_QUEUED; 8984 #ifdef CONFIG_SMP 8985 idle->on_cpu = 1; 8986 #endif 8987 raw_spin_rq_unlock(rq); 8988 raw_spin_unlock_irqrestore(&idle->pi_lock, flags); 8989 8990 /* Set the preempt count _outside_ the spinlocks! */ 8991 init_idle_preempt_count(idle, cpu); 8992 8993 /* 8994 * The idle tasks have their own, simple scheduling class: 8995 */ 8996 idle->sched_class = &idle_sched_class; 8997 ftrace_graph_init_idle_task(idle, cpu); 8998 vtime_init_idle(idle, cpu); 8999 #ifdef CONFIG_SMP 9000 sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu); 9001 #endif 9002 } 9003 9004 #ifdef CONFIG_SMP 9005 9006 int cpuset_cpumask_can_shrink(const struct cpumask *cur, 9007 const struct cpumask *trial) 9008 { 9009 int ret = 1; 9010 9011 if (cpumask_empty(cur)) 9012 return ret; 9013 9014 ret = dl_cpuset_cpumask_can_shrink(cur, trial); 9015 9016 return ret; 9017 } 9018 9019 int task_can_attach(struct task_struct *p, 9020 const struct cpumask *cs_effective_cpus) 9021 { 9022 int ret = 0; 9023 9024 /* 9025 * Kthreads which disallow setaffinity shouldn't be moved 9026 * to a new cpuset; we don't want to change their CPU 9027 * affinity and isolating such threads by their set of 9028 * allowed nodes is unnecessary. Thus, cpusets are not 9029 * applicable for such threads. This prevents checking for 9030 * success of set_cpus_allowed_ptr() on all attached tasks 9031 * before cpus_mask may be changed. 9032 */ 9033 if (p->flags & PF_NO_SETAFFINITY) { 9034 ret = -EINVAL; 9035 goto out; 9036 } 9037 9038 if (dl_task(p) && !cpumask_intersects(task_rq(p)->rd->span, 9039 cs_effective_cpus)) { 9040 int cpu = cpumask_any_and(cpu_active_mask, cs_effective_cpus); 9041 9042 if (unlikely(cpu >= nr_cpu_ids)) 9043 return -EINVAL; 9044 ret = dl_cpu_busy(cpu, p); 9045 } 9046 9047 out: 9048 return ret; 9049 } 9050 9051 bool sched_smp_initialized __read_mostly; 9052 9053 #ifdef CONFIG_NUMA_BALANCING 9054 /* Migrate current task p to target_cpu */ 9055 int migrate_task_to(struct task_struct *p, int target_cpu) 9056 { 9057 struct migration_arg arg = { p, target_cpu }; 9058 int curr_cpu = task_cpu(p); 9059 9060 if (curr_cpu == target_cpu) 9061 return 0; 9062 9063 if (!cpumask_test_cpu(target_cpu, p->cpus_ptr)) 9064 return -EINVAL; 9065 9066 /* TODO: This is not properly updating schedstats */ 9067 9068 trace_sched_move_numa(p, curr_cpu, target_cpu); 9069 return stop_one_cpu(curr_cpu, migration_cpu_stop, &arg); 9070 } 9071 9072 /* 9073 * Requeue a task on a given node and accurately track the number of NUMA 9074 * tasks on the runqueues 9075 */ 9076 void sched_setnuma(struct task_struct *p, int nid) 9077 { 9078 bool queued, running; 9079 struct rq_flags rf; 9080 struct rq *rq; 9081 9082 rq = task_rq_lock(p, &rf); 9083 queued = task_on_rq_queued(p); 9084 running = task_current(rq, p); 9085 9086 if (queued) 9087 dequeue_task(rq, p, DEQUEUE_SAVE); 9088 if (running) 9089 put_prev_task(rq, p); 9090 9091 p->numa_preferred_nid = nid; 9092 9093 if (queued) 9094 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); 9095 if (running) 9096 set_next_task(rq, p); 9097 task_rq_unlock(rq, p, &rf); 9098 } 9099 #endif /* CONFIG_NUMA_BALANCING */ 9100 9101 #ifdef CONFIG_HOTPLUG_CPU 9102 /* 9103 * Ensure that the idle task is using init_mm right before its CPU goes 9104 * offline. 9105 */ 9106 void idle_task_exit(void) 9107 { 9108 struct mm_struct *mm = current->active_mm; 9109 9110 BUG_ON(cpu_online(smp_processor_id())); 9111 BUG_ON(current != this_rq()->idle); 9112 9113 if (mm != &init_mm) { 9114 switch_mm(mm, &init_mm, current); 9115 finish_arch_post_lock_switch(); 9116 } 9117 9118 /* finish_cpu(), as ran on the BP, will clean up the active_mm state */ 9119 } 9120 9121 static int __balance_push_cpu_stop(void *arg) 9122 { 9123 struct task_struct *p = arg; 9124 struct rq *rq = this_rq(); 9125 struct rq_flags rf; 9126 int cpu; 9127 9128 raw_spin_lock_irq(&p->pi_lock); 9129 rq_lock(rq, &rf); 9130 9131 update_rq_clock(rq); 9132 9133 if (task_rq(p) == rq && task_on_rq_queued(p)) { 9134 cpu = select_fallback_rq(rq->cpu, p); 9135 rq = __migrate_task(rq, &rf, p, cpu); 9136 } 9137 9138 rq_unlock(rq, &rf); 9139 raw_spin_unlock_irq(&p->pi_lock); 9140 9141 put_task_struct(p); 9142 9143 return 0; 9144 } 9145 9146 static DEFINE_PER_CPU(struct cpu_stop_work, push_work); 9147 9148 /* 9149 * Ensure we only run per-cpu kthreads once the CPU goes !active. 9150 * 9151 * This is enabled below SCHED_AP_ACTIVE; when !cpu_active(), but only 9152 * effective when the hotplug motion is down. 9153 */ 9154 static void balance_push(struct rq *rq) 9155 { 9156 struct task_struct *push_task = rq->curr; 9157 9158 lockdep_assert_rq_held(rq); 9159 9160 /* 9161 * Ensure the thing is persistent until balance_push_set(.on = false); 9162 */ 9163 rq->balance_callback = &balance_push_callback; 9164 9165 /* 9166 * Only active while going offline and when invoked on the outgoing 9167 * CPU. 9168 */ 9169 if (!cpu_dying(rq->cpu) || rq != this_rq()) 9170 return; 9171 9172 /* 9173 * Both the cpu-hotplug and stop task are in this case and are 9174 * required to complete the hotplug process. 9175 */ 9176 if (kthread_is_per_cpu(push_task) || 9177 is_migration_disabled(push_task)) { 9178 9179 /* 9180 * If this is the idle task on the outgoing CPU try to wake 9181 * up the hotplug control thread which might wait for the 9182 * last task to vanish. The rcuwait_active() check is 9183 * accurate here because the waiter is pinned on this CPU 9184 * and can't obviously be running in parallel. 9185 * 9186 * On RT kernels this also has to check whether there are 9187 * pinned and scheduled out tasks on the runqueue. They 9188 * need to leave the migrate disabled section first. 9189 */ 9190 if (!rq->nr_running && !rq_has_pinned_tasks(rq) && 9191 rcuwait_active(&rq->hotplug_wait)) { 9192 raw_spin_rq_unlock(rq); 9193 rcuwait_wake_up(&rq->hotplug_wait); 9194 raw_spin_rq_lock(rq); 9195 } 9196 return; 9197 } 9198 9199 get_task_struct(push_task); 9200 /* 9201 * Temporarily drop rq->lock such that we can wake-up the stop task. 9202 * Both preemption and IRQs are still disabled. 9203 */ 9204 raw_spin_rq_unlock(rq); 9205 stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task, 9206 this_cpu_ptr(&push_work)); 9207 /* 9208 * At this point need_resched() is true and we'll take the loop in 9209 * schedule(). The next pick is obviously going to be the stop task 9210 * which kthread_is_per_cpu() and will push this task away. 9211 */ 9212 raw_spin_rq_lock(rq); 9213 } 9214 9215 static void balance_push_set(int cpu, bool on) 9216 { 9217 struct rq *rq = cpu_rq(cpu); 9218 struct rq_flags rf; 9219 9220 rq_lock_irqsave(rq, &rf); 9221 if (on) { 9222 WARN_ON_ONCE(rq->balance_callback); 9223 rq->balance_callback = &balance_push_callback; 9224 } else if (rq->balance_callback == &balance_push_callback) { 9225 rq->balance_callback = NULL; 9226 } 9227 rq_unlock_irqrestore(rq, &rf); 9228 } 9229 9230 /* 9231 * Invoked from a CPUs hotplug control thread after the CPU has been marked 9232 * inactive. All tasks which are not per CPU kernel threads are either 9233 * pushed off this CPU now via balance_push() or placed on a different CPU 9234 * during wakeup. Wait until the CPU is quiescent. 9235 */ 9236 static void balance_hotplug_wait(void) 9237 { 9238 struct rq *rq = this_rq(); 9239 9240 rcuwait_wait_event(&rq->hotplug_wait, 9241 rq->nr_running == 1 && !rq_has_pinned_tasks(rq), 9242 TASK_UNINTERRUPTIBLE); 9243 } 9244 9245 #else 9246 9247 static inline void balance_push(struct rq *rq) 9248 { 9249 } 9250 9251 static inline void balance_push_set(int cpu, bool on) 9252 { 9253 } 9254 9255 static inline void balance_hotplug_wait(void) 9256 { 9257 } 9258 9259 #endif /* CONFIG_HOTPLUG_CPU */ 9260 9261 void set_rq_online(struct rq *rq) 9262 { 9263 if (!rq->online) { 9264 const struct sched_class *class; 9265 9266 cpumask_set_cpu(rq->cpu, rq->rd->online); 9267 rq->online = 1; 9268 9269 for_each_class(class) { 9270 if (class->rq_online) 9271 class->rq_online(rq); 9272 } 9273 } 9274 } 9275 9276 void set_rq_offline(struct rq *rq) 9277 { 9278 if (rq->online) { 9279 const struct sched_class *class; 9280 9281 for_each_class(class) { 9282 if (class->rq_offline) 9283 class->rq_offline(rq); 9284 } 9285 9286 cpumask_clear_cpu(rq->cpu, rq->rd->online); 9287 rq->online = 0; 9288 } 9289 } 9290 9291 /* 9292 * used to mark begin/end of suspend/resume: 9293 */ 9294 static int num_cpus_frozen; 9295 9296 /* 9297 * Update cpusets according to cpu_active mask. If cpusets are 9298 * disabled, cpuset_update_active_cpus() becomes a simple wrapper 9299 * around partition_sched_domains(). 9300 * 9301 * If we come here as part of a suspend/resume, don't touch cpusets because we 9302 * want to restore it back to its original state upon resume anyway. 9303 */ 9304 static void cpuset_cpu_active(void) 9305 { 9306 if (cpuhp_tasks_frozen) { 9307 /* 9308 * num_cpus_frozen tracks how many CPUs are involved in suspend 9309 * resume sequence. As long as this is not the last online 9310 * operation in the resume sequence, just build a single sched 9311 * domain, ignoring cpusets. 9312 */ 9313 partition_sched_domains(1, NULL, NULL); 9314 if (--num_cpus_frozen) 9315 return; 9316 /* 9317 * This is the last CPU online operation. So fall through and 9318 * restore the original sched domains by considering the 9319 * cpuset configurations. 9320 */ 9321 cpuset_force_rebuild(); 9322 } 9323 cpuset_update_active_cpus(); 9324 } 9325 9326 static int cpuset_cpu_inactive(unsigned int cpu) 9327 { 9328 if (!cpuhp_tasks_frozen) { 9329 int ret = dl_cpu_busy(cpu, NULL); 9330 9331 if (ret) 9332 return ret; 9333 cpuset_update_active_cpus(); 9334 } else { 9335 num_cpus_frozen++; 9336 partition_sched_domains(1, NULL, NULL); 9337 } 9338 return 0; 9339 } 9340 9341 int sched_cpu_activate(unsigned int cpu) 9342 { 9343 struct rq *rq = cpu_rq(cpu); 9344 struct rq_flags rf; 9345 9346 /* 9347 * Clear the balance_push callback and prepare to schedule 9348 * regular tasks. 9349 */ 9350 balance_push_set(cpu, false); 9351 9352 #ifdef CONFIG_SCHED_SMT 9353 /* 9354 * When going up, increment the number of cores with SMT present. 9355 */ 9356 if (cpumask_weight(cpu_smt_mask(cpu)) == 2) 9357 static_branch_inc_cpuslocked(&sched_smt_present); 9358 #endif 9359 set_cpu_active(cpu, true); 9360 9361 if (sched_smp_initialized) { 9362 sched_update_numa(cpu, true); 9363 sched_domains_numa_masks_set(cpu); 9364 cpuset_cpu_active(); 9365 } 9366 9367 /* 9368 * Put the rq online, if not already. This happens: 9369 * 9370 * 1) In the early boot process, because we build the real domains 9371 * after all CPUs have been brought up. 9372 * 9373 * 2) At runtime, if cpuset_cpu_active() fails to rebuild the 9374 * domains. 9375 */ 9376 rq_lock_irqsave(rq, &rf); 9377 if (rq->rd) { 9378 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 9379 set_rq_online(rq); 9380 } 9381 rq_unlock_irqrestore(rq, &rf); 9382 9383 return 0; 9384 } 9385 9386 int sched_cpu_deactivate(unsigned int cpu) 9387 { 9388 struct rq *rq = cpu_rq(cpu); 9389 struct rq_flags rf; 9390 int ret; 9391 9392 /* 9393 * Remove CPU from nohz.idle_cpus_mask to prevent participating in 9394 * load balancing when not active 9395 */ 9396 nohz_balance_exit_idle(rq); 9397 9398 set_cpu_active(cpu, false); 9399 9400 /* 9401 * From this point forward, this CPU will refuse to run any task that 9402 * is not: migrate_disable() or KTHREAD_IS_PER_CPU, and will actively 9403 * push those tasks away until this gets cleared, see 9404 * sched_cpu_dying(). 9405 */ 9406 balance_push_set(cpu, true); 9407 9408 /* 9409 * We've cleared cpu_active_mask / set balance_push, wait for all 9410 * preempt-disabled and RCU users of this state to go away such that 9411 * all new such users will observe it. 9412 * 9413 * Specifically, we rely on ttwu to no longer target this CPU, see 9414 * ttwu_queue_cond() and is_cpu_allowed(). 9415 * 9416 * Do sync before park smpboot threads to take care the rcu boost case. 9417 */ 9418 synchronize_rcu(); 9419 9420 rq_lock_irqsave(rq, &rf); 9421 if (rq->rd) { 9422 update_rq_clock(rq); 9423 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 9424 set_rq_offline(rq); 9425 } 9426 rq_unlock_irqrestore(rq, &rf); 9427 9428 #ifdef CONFIG_SCHED_SMT 9429 /* 9430 * When going down, decrement the number of cores with SMT present. 9431 */ 9432 if (cpumask_weight(cpu_smt_mask(cpu)) == 2) 9433 static_branch_dec_cpuslocked(&sched_smt_present); 9434 9435 sched_core_cpu_deactivate(cpu); 9436 #endif 9437 9438 if (!sched_smp_initialized) 9439 return 0; 9440 9441 sched_update_numa(cpu, false); 9442 ret = cpuset_cpu_inactive(cpu); 9443 if (ret) { 9444 balance_push_set(cpu, false); 9445 set_cpu_active(cpu, true); 9446 sched_update_numa(cpu, true); 9447 return ret; 9448 } 9449 sched_domains_numa_masks_clear(cpu); 9450 return 0; 9451 } 9452 9453 static void sched_rq_cpu_starting(unsigned int cpu) 9454 { 9455 struct rq *rq = cpu_rq(cpu); 9456 9457 rq->calc_load_update = calc_load_update; 9458 update_max_interval(); 9459 } 9460 9461 int sched_cpu_starting(unsigned int cpu) 9462 { 9463 sched_core_cpu_starting(cpu); 9464 sched_rq_cpu_starting(cpu); 9465 sched_tick_start(cpu); 9466 return 0; 9467 } 9468 9469 #ifdef CONFIG_HOTPLUG_CPU 9470 9471 /* 9472 * Invoked immediately before the stopper thread is invoked to bring the 9473 * CPU down completely. At this point all per CPU kthreads except the 9474 * hotplug thread (current) and the stopper thread (inactive) have been 9475 * either parked or have been unbound from the outgoing CPU. Ensure that 9476 * any of those which might be on the way out are gone. 9477 * 9478 * If after this point a bound task is being woken on this CPU then the 9479 * responsible hotplug callback has failed to do it's job. 9480 * sched_cpu_dying() will catch it with the appropriate fireworks. 9481 */ 9482 int sched_cpu_wait_empty(unsigned int cpu) 9483 { 9484 balance_hotplug_wait(); 9485 return 0; 9486 } 9487 9488 /* 9489 * Since this CPU is going 'away' for a while, fold any nr_active delta we 9490 * might have. Called from the CPU stopper task after ensuring that the 9491 * stopper is the last running task on the CPU, so nr_active count is 9492 * stable. We need to take the teardown thread which is calling this into 9493 * account, so we hand in adjust = 1 to the load calculation. 9494 * 9495 * Also see the comment "Global load-average calculations". 9496 */ 9497 static void calc_load_migrate(struct rq *rq) 9498 { 9499 long delta = calc_load_fold_active(rq, 1); 9500 9501 if (delta) 9502 atomic_long_add(delta, &calc_load_tasks); 9503 } 9504 9505 static void dump_rq_tasks(struct rq *rq, const char *loglvl) 9506 { 9507 struct task_struct *g, *p; 9508 int cpu = cpu_of(rq); 9509 9510 lockdep_assert_rq_held(rq); 9511 9512 printk("%sCPU%d enqueued tasks (%u total):\n", loglvl, cpu, rq->nr_running); 9513 for_each_process_thread(g, p) { 9514 if (task_cpu(p) != cpu) 9515 continue; 9516 9517 if (!task_on_rq_queued(p)) 9518 continue; 9519 9520 printk("%s\tpid: %d, name: %s\n", loglvl, p->pid, p->comm); 9521 } 9522 } 9523 9524 int sched_cpu_dying(unsigned int cpu) 9525 { 9526 struct rq *rq = cpu_rq(cpu); 9527 struct rq_flags rf; 9528 9529 /* Handle pending wakeups and then migrate everything off */ 9530 sched_tick_stop(cpu); 9531 9532 rq_lock_irqsave(rq, &rf); 9533 if (rq->nr_running != 1 || rq_has_pinned_tasks(rq)) { 9534 WARN(true, "Dying CPU not properly vacated!"); 9535 dump_rq_tasks(rq, KERN_WARNING); 9536 } 9537 rq_unlock_irqrestore(rq, &rf); 9538 9539 calc_load_migrate(rq); 9540 update_max_interval(); 9541 hrtick_clear(rq); 9542 sched_core_cpu_dying(cpu); 9543 return 0; 9544 } 9545 #endif 9546 9547 void __init sched_init_smp(void) 9548 { 9549 sched_init_numa(NUMA_NO_NODE); 9550 9551 /* 9552 * There's no userspace yet to cause hotplug operations; hence all the 9553 * CPU masks are stable and all blatant races in the below code cannot 9554 * happen. 9555 */ 9556 mutex_lock(&sched_domains_mutex); 9557 sched_init_domains(cpu_active_mask); 9558 mutex_unlock(&sched_domains_mutex); 9559 9560 /* Move init over to a non-isolated CPU */ 9561 if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_TYPE_DOMAIN)) < 0) 9562 BUG(); 9563 current->flags &= ~PF_NO_SETAFFINITY; 9564 sched_init_granularity(); 9565 9566 init_sched_rt_class(); 9567 init_sched_dl_class(); 9568 9569 sched_smp_initialized = true; 9570 } 9571 9572 static int __init migration_init(void) 9573 { 9574 sched_cpu_starting(smp_processor_id()); 9575 return 0; 9576 } 9577 early_initcall(migration_init); 9578 9579 #else 9580 void __init sched_init_smp(void) 9581 { 9582 sched_init_granularity(); 9583 } 9584 #endif /* CONFIG_SMP */ 9585 9586 int in_sched_functions(unsigned long addr) 9587 { 9588 return in_lock_functions(addr) || 9589 (addr >= (unsigned long)__sched_text_start 9590 && addr < (unsigned long)__sched_text_end); 9591 } 9592 9593 #ifdef CONFIG_CGROUP_SCHED 9594 /* 9595 * Default task group. 9596 * Every task in system belongs to this group at bootup. 9597 */ 9598 struct task_group root_task_group; 9599 LIST_HEAD(task_groups); 9600 9601 /* Cacheline aligned slab cache for task_group */ 9602 static struct kmem_cache *task_group_cache __read_mostly; 9603 #endif 9604 9605 DECLARE_PER_CPU(cpumask_var_t, load_balance_mask); 9606 DECLARE_PER_CPU(cpumask_var_t, select_rq_mask); 9607 9608 void __init sched_init(void) 9609 { 9610 unsigned long ptr = 0; 9611 int i; 9612 9613 /* Make sure the linker didn't screw up */ 9614 BUG_ON(&idle_sched_class != &fair_sched_class + 1 || 9615 &fair_sched_class != &rt_sched_class + 1 || 9616 &rt_sched_class != &dl_sched_class + 1); 9617 #ifdef CONFIG_SMP 9618 BUG_ON(&dl_sched_class != &stop_sched_class + 1); 9619 #endif 9620 9621 wait_bit_init(); 9622 9623 #ifdef CONFIG_FAIR_GROUP_SCHED 9624 ptr += 2 * nr_cpu_ids * sizeof(void **); 9625 #endif 9626 #ifdef CONFIG_RT_GROUP_SCHED 9627 ptr += 2 * nr_cpu_ids * sizeof(void **); 9628 #endif 9629 if (ptr) { 9630 ptr = (unsigned long)kzalloc(ptr, GFP_NOWAIT); 9631 9632 #ifdef CONFIG_FAIR_GROUP_SCHED 9633 root_task_group.se = (struct sched_entity **)ptr; 9634 ptr += nr_cpu_ids * sizeof(void **); 9635 9636 root_task_group.cfs_rq = (struct cfs_rq **)ptr; 9637 ptr += nr_cpu_ids * sizeof(void **); 9638 9639 root_task_group.shares = ROOT_TASK_GROUP_LOAD; 9640 init_cfs_bandwidth(&root_task_group.cfs_bandwidth); 9641 #endif /* CONFIG_FAIR_GROUP_SCHED */ 9642 #ifdef CONFIG_RT_GROUP_SCHED 9643 root_task_group.rt_se = (struct sched_rt_entity **)ptr; 9644 ptr += nr_cpu_ids * sizeof(void **); 9645 9646 root_task_group.rt_rq = (struct rt_rq **)ptr; 9647 ptr += nr_cpu_ids * sizeof(void **); 9648 9649 #endif /* CONFIG_RT_GROUP_SCHED */ 9650 } 9651 #ifdef CONFIG_CPUMASK_OFFSTACK 9652 for_each_possible_cpu(i) { 9653 per_cpu(load_balance_mask, i) = (cpumask_var_t)kzalloc_node( 9654 cpumask_size(), GFP_KERNEL, cpu_to_node(i)); 9655 per_cpu(select_rq_mask, i) = (cpumask_var_t)kzalloc_node( 9656 cpumask_size(), GFP_KERNEL, cpu_to_node(i)); 9657 } 9658 #endif /* CONFIG_CPUMASK_OFFSTACK */ 9659 9660 init_rt_bandwidth(&def_rt_bandwidth, global_rt_period(), global_rt_runtime()); 9661 9662 #ifdef CONFIG_SMP 9663 init_defrootdomain(); 9664 #endif 9665 9666 #ifdef CONFIG_RT_GROUP_SCHED 9667 init_rt_bandwidth(&root_task_group.rt_bandwidth, 9668 global_rt_period(), global_rt_runtime()); 9669 #endif /* CONFIG_RT_GROUP_SCHED */ 9670 9671 #ifdef CONFIG_CGROUP_SCHED 9672 task_group_cache = KMEM_CACHE(task_group, 0); 9673 9674 list_add(&root_task_group.list, &task_groups); 9675 INIT_LIST_HEAD(&root_task_group.children); 9676 INIT_LIST_HEAD(&root_task_group.siblings); 9677 autogroup_init(&init_task); 9678 #endif /* CONFIG_CGROUP_SCHED */ 9679 9680 for_each_possible_cpu(i) { 9681 struct rq *rq; 9682 9683 rq = cpu_rq(i); 9684 raw_spin_lock_init(&rq->__lock); 9685 rq->nr_running = 0; 9686 rq->calc_load_active = 0; 9687 rq->calc_load_update = jiffies + LOAD_FREQ; 9688 init_cfs_rq(&rq->cfs); 9689 init_rt_rq(&rq->rt); 9690 init_dl_rq(&rq->dl); 9691 #ifdef CONFIG_FAIR_GROUP_SCHED 9692 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list); 9693 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list; 9694 /* 9695 * How much CPU bandwidth does root_task_group get? 9696 * 9697 * In case of task-groups formed thr' the cgroup filesystem, it 9698 * gets 100% of the CPU resources in the system. This overall 9699 * system CPU resource is divided among the tasks of 9700 * root_task_group and its child task-groups in a fair manner, 9701 * based on each entity's (task or task-group's) weight 9702 * (se->load.weight). 9703 * 9704 * In other words, if root_task_group has 10 tasks of weight 9705 * 1024) and two child groups A0 and A1 (of weight 1024 each), 9706 * then A0's share of the CPU resource is: 9707 * 9708 * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33% 9709 * 9710 * We achieve this by letting root_task_group's tasks sit 9711 * directly in rq->cfs (i.e root_task_group->se[] = NULL). 9712 */ 9713 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL); 9714 #endif /* CONFIG_FAIR_GROUP_SCHED */ 9715 9716 rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime; 9717 #ifdef CONFIG_RT_GROUP_SCHED 9718 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL); 9719 #endif 9720 #ifdef CONFIG_SMP 9721 rq->sd = NULL; 9722 rq->rd = NULL; 9723 rq->cpu_capacity = rq->cpu_capacity_orig = SCHED_CAPACITY_SCALE; 9724 rq->balance_callback = &balance_push_callback; 9725 rq->active_balance = 0; 9726 rq->next_balance = jiffies; 9727 rq->push_cpu = 0; 9728 rq->cpu = i; 9729 rq->online = 0; 9730 rq->idle_stamp = 0; 9731 rq->avg_idle = 2*sysctl_sched_migration_cost; 9732 rq->wake_stamp = jiffies; 9733 rq->wake_avg_idle = rq->avg_idle; 9734 rq->max_idle_balance_cost = sysctl_sched_migration_cost; 9735 9736 INIT_LIST_HEAD(&rq->cfs_tasks); 9737 9738 rq_attach_root(rq, &def_root_domain); 9739 #ifdef CONFIG_NO_HZ_COMMON 9740 rq->last_blocked_load_update_tick = jiffies; 9741 atomic_set(&rq->nohz_flags, 0); 9742 9743 INIT_CSD(&rq->nohz_csd, nohz_csd_func, rq); 9744 #endif 9745 #ifdef CONFIG_HOTPLUG_CPU 9746 rcuwait_init(&rq->hotplug_wait); 9747 #endif 9748 #endif /* CONFIG_SMP */ 9749 hrtick_rq_init(rq); 9750 atomic_set(&rq->nr_iowait, 0); 9751 9752 #ifdef CONFIG_SCHED_CORE 9753 rq->core = rq; 9754 rq->core_pick = NULL; 9755 rq->core_enabled = 0; 9756 rq->core_tree = RB_ROOT; 9757 rq->core_forceidle_count = 0; 9758 rq->core_forceidle_occupation = 0; 9759 rq->core_forceidle_start = 0; 9760 9761 rq->core_cookie = 0UL; 9762 #endif 9763 } 9764 9765 set_load_weight(&init_task, false); 9766 9767 /* 9768 * The boot idle thread does lazy MMU switching as well: 9769 */ 9770 mmgrab(&init_mm); 9771 enter_lazy_tlb(&init_mm, current); 9772 9773 /* 9774 * The idle task doesn't need the kthread struct to function, but it 9775 * is dressed up as a per-CPU kthread and thus needs to play the part 9776 * if we want to avoid special-casing it in code that deals with per-CPU 9777 * kthreads. 9778 */ 9779 WARN_ON(!set_kthread_struct(current)); 9780 9781 /* 9782 * Make us the idle thread. Technically, schedule() should not be 9783 * called from this thread, however somewhere below it might be, 9784 * but because we are the idle thread, we just pick up running again 9785 * when this runqueue becomes "idle". 9786 */ 9787 init_idle(current, smp_processor_id()); 9788 9789 calc_load_update = jiffies + LOAD_FREQ; 9790 9791 #ifdef CONFIG_SMP 9792 idle_thread_set_boot_cpu(); 9793 balance_push_set(smp_processor_id(), false); 9794 #endif 9795 init_sched_fair_class(); 9796 9797 psi_init(); 9798 9799 init_uclamp(); 9800 9801 preempt_dynamic_init(); 9802 9803 scheduler_running = 1; 9804 } 9805 9806 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP 9807 9808 void __might_sleep(const char *file, int line) 9809 { 9810 unsigned int state = get_current_state(); 9811 /* 9812 * Blocking primitives will set (and therefore destroy) current->state, 9813 * since we will exit with TASK_RUNNING make sure we enter with it, 9814 * otherwise we will destroy state. 9815 */ 9816 WARN_ONCE(state != TASK_RUNNING && current->task_state_change, 9817 "do not call blocking ops when !TASK_RUNNING; " 9818 "state=%x set at [<%p>] %pS\n", state, 9819 (void *)current->task_state_change, 9820 (void *)current->task_state_change); 9821 9822 __might_resched(file, line, 0); 9823 } 9824 EXPORT_SYMBOL(__might_sleep); 9825 9826 static void print_preempt_disable_ip(int preempt_offset, unsigned long ip) 9827 { 9828 if (!IS_ENABLED(CONFIG_DEBUG_PREEMPT)) 9829 return; 9830 9831 if (preempt_count() == preempt_offset) 9832 return; 9833 9834 pr_err("Preemption disabled at:"); 9835 print_ip_sym(KERN_ERR, ip); 9836 } 9837 9838 static inline bool resched_offsets_ok(unsigned int offsets) 9839 { 9840 unsigned int nested = preempt_count(); 9841 9842 nested += rcu_preempt_depth() << MIGHT_RESCHED_RCU_SHIFT; 9843 9844 return nested == offsets; 9845 } 9846 9847 void __might_resched(const char *file, int line, unsigned int offsets) 9848 { 9849 /* Ratelimiting timestamp: */ 9850 static unsigned long prev_jiffy; 9851 9852 unsigned long preempt_disable_ip; 9853 9854 /* WARN_ON_ONCE() by default, no rate limit required: */ 9855 rcu_sleep_check(); 9856 9857 if ((resched_offsets_ok(offsets) && !irqs_disabled() && 9858 !is_idle_task(current) && !current->non_block_count) || 9859 system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING || 9860 oops_in_progress) 9861 return; 9862 9863 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) 9864 return; 9865 prev_jiffy = jiffies; 9866 9867 /* Save this before calling printk(), since that will clobber it: */ 9868 preempt_disable_ip = get_preempt_disable_ip(current); 9869 9870 pr_err("BUG: sleeping function called from invalid context at %s:%d\n", 9871 file, line); 9872 pr_err("in_atomic(): %d, irqs_disabled(): %d, non_block: %d, pid: %d, name: %s\n", 9873 in_atomic(), irqs_disabled(), current->non_block_count, 9874 current->pid, current->comm); 9875 pr_err("preempt_count: %x, expected: %x\n", preempt_count(), 9876 offsets & MIGHT_RESCHED_PREEMPT_MASK); 9877 9878 if (IS_ENABLED(CONFIG_PREEMPT_RCU)) { 9879 pr_err("RCU nest depth: %d, expected: %u\n", 9880 rcu_preempt_depth(), offsets >> MIGHT_RESCHED_RCU_SHIFT); 9881 } 9882 9883 if (task_stack_end_corrupted(current)) 9884 pr_emerg("Thread overran stack, or stack corrupted\n"); 9885 9886 debug_show_held_locks(current); 9887 if (irqs_disabled()) 9888 print_irqtrace_events(current); 9889 9890 print_preempt_disable_ip(offsets & MIGHT_RESCHED_PREEMPT_MASK, 9891 preempt_disable_ip); 9892 9893 dump_stack(); 9894 add_taint(TAINT_WARN, LOCKDEP_STILL_OK); 9895 } 9896 EXPORT_SYMBOL(__might_resched); 9897 9898 void __cant_sleep(const char *file, int line, int preempt_offset) 9899 { 9900 static unsigned long prev_jiffy; 9901 9902 if (irqs_disabled()) 9903 return; 9904 9905 if (!IS_ENABLED(CONFIG_PREEMPT_COUNT)) 9906 return; 9907 9908 if (preempt_count() > preempt_offset) 9909 return; 9910 9911 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) 9912 return; 9913 prev_jiffy = jiffies; 9914 9915 printk(KERN_ERR "BUG: assuming atomic context at %s:%d\n", file, line); 9916 printk(KERN_ERR "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n", 9917 in_atomic(), irqs_disabled(), 9918 current->pid, current->comm); 9919 9920 debug_show_held_locks(current); 9921 dump_stack(); 9922 add_taint(TAINT_WARN, LOCKDEP_STILL_OK); 9923 } 9924 EXPORT_SYMBOL_GPL(__cant_sleep); 9925 9926 #ifdef CONFIG_SMP 9927 void __cant_migrate(const char *file, int line) 9928 { 9929 static unsigned long prev_jiffy; 9930 9931 if (irqs_disabled()) 9932 return; 9933 9934 if (is_migration_disabled(current)) 9935 return; 9936 9937 if (!IS_ENABLED(CONFIG_PREEMPT_COUNT)) 9938 return; 9939 9940 if (preempt_count() > 0) 9941 return; 9942 9943 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) 9944 return; 9945 prev_jiffy = jiffies; 9946 9947 pr_err("BUG: assuming non migratable context at %s:%d\n", file, line); 9948 pr_err("in_atomic(): %d, irqs_disabled(): %d, migration_disabled() %u pid: %d, name: %s\n", 9949 in_atomic(), irqs_disabled(), is_migration_disabled(current), 9950 current->pid, current->comm); 9951 9952 debug_show_held_locks(current); 9953 dump_stack(); 9954 add_taint(TAINT_WARN, LOCKDEP_STILL_OK); 9955 } 9956 EXPORT_SYMBOL_GPL(__cant_migrate); 9957 #endif 9958 #endif 9959 9960 #ifdef CONFIG_MAGIC_SYSRQ 9961 void normalize_rt_tasks(void) 9962 { 9963 struct task_struct *g, *p; 9964 struct sched_attr attr = { 9965 .sched_policy = SCHED_NORMAL, 9966 }; 9967 9968 read_lock(&tasklist_lock); 9969 for_each_process_thread(g, p) { 9970 /* 9971 * Only normalize user tasks: 9972 */ 9973 if (p->flags & PF_KTHREAD) 9974 continue; 9975 9976 p->se.exec_start = 0; 9977 schedstat_set(p->stats.wait_start, 0); 9978 schedstat_set(p->stats.sleep_start, 0); 9979 schedstat_set(p->stats.block_start, 0); 9980 9981 if (!dl_task(p) && !rt_task(p)) { 9982 /* 9983 * Renice negative nice level userspace 9984 * tasks back to 0: 9985 */ 9986 if (task_nice(p) < 0) 9987 set_user_nice(p, 0); 9988 continue; 9989 } 9990 9991 __sched_setscheduler(p, &attr, false, false); 9992 } 9993 read_unlock(&tasklist_lock); 9994 } 9995 9996 #endif /* CONFIG_MAGIC_SYSRQ */ 9997 9998 #if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) 9999 /* 10000 * These functions are only useful for the IA64 MCA handling, or kdb. 10001 * 10002 * They can only be called when the whole system has been 10003 * stopped - every CPU needs to be quiescent, and no scheduling 10004 * activity can take place. Using them for anything else would 10005 * be a serious bug, and as a result, they aren't even visible 10006 * under any other configuration. 10007 */ 10008 10009 /** 10010 * curr_task - return the current task for a given CPU. 10011 * @cpu: the processor in question. 10012 * 10013 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! 10014 * 10015 * Return: The current task for @cpu. 10016 */ 10017 struct task_struct *curr_task(int cpu) 10018 { 10019 return cpu_curr(cpu); 10020 } 10021 10022 #endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */ 10023 10024 #ifdef CONFIG_IA64 10025 /** 10026 * ia64_set_curr_task - set the current task for a given CPU. 10027 * @cpu: the processor in question. 10028 * @p: the task pointer to set. 10029 * 10030 * Description: This function must only be used when non-maskable interrupts 10031 * are serviced on a separate stack. It allows the architecture to switch the 10032 * notion of the current task on a CPU in a non-blocking manner. This function 10033 * must be called with all CPU's synchronized, and interrupts disabled, the 10034 * and caller must save the original value of the current task (see 10035 * curr_task() above) and restore that value before reenabling interrupts and 10036 * re-starting the system. 10037 * 10038 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! 10039 */ 10040 void ia64_set_curr_task(int cpu, struct task_struct *p) 10041 { 10042 cpu_curr(cpu) = p; 10043 } 10044 10045 #endif 10046 10047 #ifdef CONFIG_CGROUP_SCHED 10048 /* task_group_lock serializes the addition/removal of task groups */ 10049 static DEFINE_SPINLOCK(task_group_lock); 10050 10051 static inline void alloc_uclamp_sched_group(struct task_group *tg, 10052 struct task_group *parent) 10053 { 10054 #ifdef CONFIG_UCLAMP_TASK_GROUP 10055 enum uclamp_id clamp_id; 10056 10057 for_each_clamp_id(clamp_id) { 10058 uclamp_se_set(&tg->uclamp_req[clamp_id], 10059 uclamp_none(clamp_id), false); 10060 tg->uclamp[clamp_id] = parent->uclamp[clamp_id]; 10061 } 10062 #endif 10063 } 10064 10065 static void sched_free_group(struct task_group *tg) 10066 { 10067 free_fair_sched_group(tg); 10068 free_rt_sched_group(tg); 10069 autogroup_free(tg); 10070 kmem_cache_free(task_group_cache, tg); 10071 } 10072 10073 static void sched_free_group_rcu(struct rcu_head *rcu) 10074 { 10075 sched_free_group(container_of(rcu, struct task_group, rcu)); 10076 } 10077 10078 static void sched_unregister_group(struct task_group *tg) 10079 { 10080 unregister_fair_sched_group(tg); 10081 unregister_rt_sched_group(tg); 10082 /* 10083 * We have to wait for yet another RCU grace period to expire, as 10084 * print_cfs_stats() might run concurrently. 10085 */ 10086 call_rcu(&tg->rcu, sched_free_group_rcu); 10087 } 10088 10089 /* allocate runqueue etc for a new task group */ 10090 struct task_group *sched_create_group(struct task_group *parent) 10091 { 10092 struct task_group *tg; 10093 10094 tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO); 10095 if (!tg) 10096 return ERR_PTR(-ENOMEM); 10097 10098 if (!alloc_fair_sched_group(tg, parent)) 10099 goto err; 10100 10101 if (!alloc_rt_sched_group(tg, parent)) 10102 goto err; 10103 10104 alloc_uclamp_sched_group(tg, parent); 10105 10106 return tg; 10107 10108 err: 10109 sched_free_group(tg); 10110 return ERR_PTR(-ENOMEM); 10111 } 10112 10113 void sched_online_group(struct task_group *tg, struct task_group *parent) 10114 { 10115 unsigned long flags; 10116 10117 spin_lock_irqsave(&task_group_lock, flags); 10118 list_add_rcu(&tg->list, &task_groups); 10119 10120 /* Root should already exist: */ 10121 WARN_ON(!parent); 10122 10123 tg->parent = parent; 10124 INIT_LIST_HEAD(&tg->children); 10125 list_add_rcu(&tg->siblings, &parent->children); 10126 spin_unlock_irqrestore(&task_group_lock, flags); 10127 10128 online_fair_sched_group(tg); 10129 } 10130 10131 /* rcu callback to free various structures associated with a task group */ 10132 static void sched_unregister_group_rcu(struct rcu_head *rhp) 10133 { 10134 /* Now it should be safe to free those cfs_rqs: */ 10135 sched_unregister_group(container_of(rhp, struct task_group, rcu)); 10136 } 10137 10138 void sched_destroy_group(struct task_group *tg) 10139 { 10140 /* Wait for possible concurrent references to cfs_rqs complete: */ 10141 call_rcu(&tg->rcu, sched_unregister_group_rcu); 10142 } 10143 10144 void sched_release_group(struct task_group *tg) 10145 { 10146 unsigned long flags; 10147 10148 /* 10149 * Unlink first, to avoid walk_tg_tree_from() from finding us (via 10150 * sched_cfs_period_timer()). 10151 * 10152 * For this to be effective, we have to wait for all pending users of 10153 * this task group to leave their RCU critical section to ensure no new 10154 * user will see our dying task group any more. Specifically ensure 10155 * that tg_unthrottle_up() won't add decayed cfs_rq's to it. 10156 * 10157 * We therefore defer calling unregister_fair_sched_group() to 10158 * sched_unregister_group() which is guarantied to get called only after the 10159 * current RCU grace period has expired. 10160 */ 10161 spin_lock_irqsave(&task_group_lock, flags); 10162 list_del_rcu(&tg->list); 10163 list_del_rcu(&tg->siblings); 10164 spin_unlock_irqrestore(&task_group_lock, flags); 10165 } 10166 10167 static void sched_change_group(struct task_struct *tsk, int type) 10168 { 10169 struct task_group *tg; 10170 10171 /* 10172 * All callers are synchronized by task_rq_lock(); we do not use RCU 10173 * which is pointless here. Thus, we pass "true" to task_css_check() 10174 * to prevent lockdep warnings. 10175 */ 10176 tg = container_of(task_css_check(tsk, cpu_cgrp_id, true), 10177 struct task_group, css); 10178 tg = autogroup_task_group(tsk, tg); 10179 tsk->sched_task_group = tg; 10180 10181 #ifdef CONFIG_FAIR_GROUP_SCHED 10182 if (tsk->sched_class->task_change_group) 10183 tsk->sched_class->task_change_group(tsk, type); 10184 else 10185 #endif 10186 set_task_rq(tsk, task_cpu(tsk)); 10187 } 10188 10189 /* 10190 * Change task's runqueue when it moves between groups. 10191 * 10192 * The caller of this function should have put the task in its new group by 10193 * now. This function just updates tsk->se.cfs_rq and tsk->se.parent to reflect 10194 * its new group. 10195 */ 10196 void sched_move_task(struct task_struct *tsk) 10197 { 10198 int queued, running, queue_flags = 10199 DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; 10200 struct rq_flags rf; 10201 struct rq *rq; 10202 10203 rq = task_rq_lock(tsk, &rf); 10204 update_rq_clock(rq); 10205 10206 running = task_current(rq, tsk); 10207 queued = task_on_rq_queued(tsk); 10208 10209 if (queued) 10210 dequeue_task(rq, tsk, queue_flags); 10211 if (running) 10212 put_prev_task(rq, tsk); 10213 10214 sched_change_group(tsk, TASK_MOVE_GROUP); 10215 10216 if (queued) 10217 enqueue_task(rq, tsk, queue_flags); 10218 if (running) { 10219 set_next_task(rq, tsk); 10220 /* 10221 * After changing group, the running task may have joined a 10222 * throttled one but it's still the running task. Trigger a 10223 * resched to make sure that task can still run. 10224 */ 10225 resched_curr(rq); 10226 } 10227 10228 task_rq_unlock(rq, tsk, &rf); 10229 } 10230 10231 static inline struct task_group *css_tg(struct cgroup_subsys_state *css) 10232 { 10233 return css ? container_of(css, struct task_group, css) : NULL; 10234 } 10235 10236 static struct cgroup_subsys_state * 10237 cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) 10238 { 10239 struct task_group *parent = css_tg(parent_css); 10240 struct task_group *tg; 10241 10242 if (!parent) { 10243 /* This is early initialization for the top cgroup */ 10244 return &root_task_group.css; 10245 } 10246 10247 tg = sched_create_group(parent); 10248 if (IS_ERR(tg)) 10249 return ERR_PTR(-ENOMEM); 10250 10251 return &tg->css; 10252 } 10253 10254 /* Expose task group only after completing cgroup initialization */ 10255 static int cpu_cgroup_css_online(struct cgroup_subsys_state *css) 10256 { 10257 struct task_group *tg = css_tg(css); 10258 struct task_group *parent = css_tg(css->parent); 10259 10260 if (parent) 10261 sched_online_group(tg, parent); 10262 10263 #ifdef CONFIG_UCLAMP_TASK_GROUP 10264 /* Propagate the effective uclamp value for the new group */ 10265 mutex_lock(&uclamp_mutex); 10266 rcu_read_lock(); 10267 cpu_util_update_eff(css); 10268 rcu_read_unlock(); 10269 mutex_unlock(&uclamp_mutex); 10270 #endif 10271 10272 return 0; 10273 } 10274 10275 static void cpu_cgroup_css_released(struct cgroup_subsys_state *css) 10276 { 10277 struct task_group *tg = css_tg(css); 10278 10279 sched_release_group(tg); 10280 } 10281 10282 static void cpu_cgroup_css_free(struct cgroup_subsys_state *css) 10283 { 10284 struct task_group *tg = css_tg(css); 10285 10286 /* 10287 * Relies on the RCU grace period between css_released() and this. 10288 */ 10289 sched_unregister_group(tg); 10290 } 10291 10292 /* 10293 * This is called before wake_up_new_task(), therefore we really only 10294 * have to set its group bits, all the other stuff does not apply. 10295 */ 10296 static void cpu_cgroup_fork(struct task_struct *task) 10297 { 10298 struct rq_flags rf; 10299 struct rq *rq; 10300 10301 rq = task_rq_lock(task, &rf); 10302 10303 update_rq_clock(rq); 10304 sched_change_group(task, TASK_SET_GROUP); 10305 10306 task_rq_unlock(rq, task, &rf); 10307 } 10308 10309 static int cpu_cgroup_can_attach(struct cgroup_taskset *tset) 10310 { 10311 struct task_struct *task; 10312 struct cgroup_subsys_state *css; 10313 int ret = 0; 10314 10315 cgroup_taskset_for_each(task, css, tset) { 10316 #ifdef CONFIG_RT_GROUP_SCHED 10317 if (!sched_rt_can_attach(css_tg(css), task)) 10318 return -EINVAL; 10319 #endif 10320 /* 10321 * Serialize against wake_up_new_task() such that if it's 10322 * running, we're sure to observe its full state. 10323 */ 10324 raw_spin_lock_irq(&task->pi_lock); 10325 /* 10326 * Avoid calling sched_move_task() before wake_up_new_task() 10327 * has happened. This would lead to problems with PELT, due to 10328 * move wanting to detach+attach while we're not attached yet. 10329 */ 10330 if (READ_ONCE(task->__state) == TASK_NEW) 10331 ret = -EINVAL; 10332 raw_spin_unlock_irq(&task->pi_lock); 10333 10334 if (ret) 10335 break; 10336 } 10337 return ret; 10338 } 10339 10340 static void cpu_cgroup_attach(struct cgroup_taskset *tset) 10341 { 10342 struct task_struct *task; 10343 struct cgroup_subsys_state *css; 10344 10345 cgroup_taskset_for_each(task, css, tset) 10346 sched_move_task(task); 10347 } 10348 10349 #ifdef CONFIG_UCLAMP_TASK_GROUP 10350 static void cpu_util_update_eff(struct cgroup_subsys_state *css) 10351 { 10352 struct cgroup_subsys_state *top_css = css; 10353 struct uclamp_se *uc_parent = NULL; 10354 struct uclamp_se *uc_se = NULL; 10355 unsigned int eff[UCLAMP_CNT]; 10356 enum uclamp_id clamp_id; 10357 unsigned int clamps; 10358 10359 lockdep_assert_held(&uclamp_mutex); 10360 SCHED_WARN_ON(!rcu_read_lock_held()); 10361 10362 css_for_each_descendant_pre(css, top_css) { 10363 uc_parent = css_tg(css)->parent 10364 ? css_tg(css)->parent->uclamp : NULL; 10365 10366 for_each_clamp_id(clamp_id) { 10367 /* Assume effective clamps matches requested clamps */ 10368 eff[clamp_id] = css_tg(css)->uclamp_req[clamp_id].value; 10369 /* Cap effective clamps with parent's effective clamps */ 10370 if (uc_parent && 10371 eff[clamp_id] > uc_parent[clamp_id].value) { 10372 eff[clamp_id] = uc_parent[clamp_id].value; 10373 } 10374 } 10375 /* Ensure protection is always capped by limit */ 10376 eff[UCLAMP_MIN] = min(eff[UCLAMP_MIN], eff[UCLAMP_MAX]); 10377 10378 /* Propagate most restrictive effective clamps */ 10379 clamps = 0x0; 10380 uc_se = css_tg(css)->uclamp; 10381 for_each_clamp_id(clamp_id) { 10382 if (eff[clamp_id] == uc_se[clamp_id].value) 10383 continue; 10384 uc_se[clamp_id].value = eff[clamp_id]; 10385 uc_se[clamp_id].bucket_id = uclamp_bucket_id(eff[clamp_id]); 10386 clamps |= (0x1 << clamp_id); 10387 } 10388 if (!clamps) { 10389 css = css_rightmost_descendant(css); 10390 continue; 10391 } 10392 10393 /* Immediately update descendants RUNNABLE tasks */ 10394 uclamp_update_active_tasks(css); 10395 } 10396 } 10397 10398 /* 10399 * Integer 10^N with a given N exponent by casting to integer the literal "1eN" 10400 * C expression. Since there is no way to convert a macro argument (N) into a 10401 * character constant, use two levels of macros. 10402 */ 10403 #define _POW10(exp) ((unsigned int)1e##exp) 10404 #define POW10(exp) _POW10(exp) 10405 10406 struct uclamp_request { 10407 #define UCLAMP_PERCENT_SHIFT 2 10408 #define UCLAMP_PERCENT_SCALE (100 * POW10(UCLAMP_PERCENT_SHIFT)) 10409 s64 percent; 10410 u64 util; 10411 int ret; 10412 }; 10413 10414 static inline struct uclamp_request 10415 capacity_from_percent(char *buf) 10416 { 10417 struct uclamp_request req = { 10418 .percent = UCLAMP_PERCENT_SCALE, 10419 .util = SCHED_CAPACITY_SCALE, 10420 .ret = 0, 10421 }; 10422 10423 buf = strim(buf); 10424 if (strcmp(buf, "max")) { 10425 req.ret = cgroup_parse_float(buf, UCLAMP_PERCENT_SHIFT, 10426 &req.percent); 10427 if (req.ret) 10428 return req; 10429 if ((u64)req.percent > UCLAMP_PERCENT_SCALE) { 10430 req.ret = -ERANGE; 10431 return req; 10432 } 10433 10434 req.util = req.percent << SCHED_CAPACITY_SHIFT; 10435 req.util = DIV_ROUND_CLOSEST_ULL(req.util, UCLAMP_PERCENT_SCALE); 10436 } 10437 10438 return req; 10439 } 10440 10441 static ssize_t cpu_uclamp_write(struct kernfs_open_file *of, char *buf, 10442 size_t nbytes, loff_t off, 10443 enum uclamp_id clamp_id) 10444 { 10445 struct uclamp_request req; 10446 struct task_group *tg; 10447 10448 req = capacity_from_percent(buf); 10449 if (req.ret) 10450 return req.ret; 10451 10452 static_branch_enable(&sched_uclamp_used); 10453 10454 mutex_lock(&uclamp_mutex); 10455 rcu_read_lock(); 10456 10457 tg = css_tg(of_css(of)); 10458 if (tg->uclamp_req[clamp_id].value != req.util) 10459 uclamp_se_set(&tg->uclamp_req[clamp_id], req.util, false); 10460 10461 /* 10462 * Because of not recoverable conversion rounding we keep track of the 10463 * exact requested value 10464 */ 10465 tg->uclamp_pct[clamp_id] = req.percent; 10466 10467 /* Update effective clamps to track the most restrictive value */ 10468 cpu_util_update_eff(of_css(of)); 10469 10470 rcu_read_unlock(); 10471 mutex_unlock(&uclamp_mutex); 10472 10473 return nbytes; 10474 } 10475 10476 static ssize_t cpu_uclamp_min_write(struct kernfs_open_file *of, 10477 char *buf, size_t nbytes, 10478 loff_t off) 10479 { 10480 return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MIN); 10481 } 10482 10483 static ssize_t cpu_uclamp_max_write(struct kernfs_open_file *of, 10484 char *buf, size_t nbytes, 10485 loff_t off) 10486 { 10487 return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MAX); 10488 } 10489 10490 static inline void cpu_uclamp_print(struct seq_file *sf, 10491 enum uclamp_id clamp_id) 10492 { 10493 struct task_group *tg; 10494 u64 util_clamp; 10495 u64 percent; 10496 u32 rem; 10497 10498 rcu_read_lock(); 10499 tg = css_tg(seq_css(sf)); 10500 util_clamp = tg->uclamp_req[clamp_id].value; 10501 rcu_read_unlock(); 10502 10503 if (util_clamp == SCHED_CAPACITY_SCALE) { 10504 seq_puts(sf, "max\n"); 10505 return; 10506 } 10507 10508 percent = tg->uclamp_pct[clamp_id]; 10509 percent = div_u64_rem(percent, POW10(UCLAMP_PERCENT_SHIFT), &rem); 10510 seq_printf(sf, "%llu.%0*u\n", percent, UCLAMP_PERCENT_SHIFT, rem); 10511 } 10512 10513 static int cpu_uclamp_min_show(struct seq_file *sf, void *v) 10514 { 10515 cpu_uclamp_print(sf, UCLAMP_MIN); 10516 return 0; 10517 } 10518 10519 static int cpu_uclamp_max_show(struct seq_file *sf, void *v) 10520 { 10521 cpu_uclamp_print(sf, UCLAMP_MAX); 10522 return 0; 10523 } 10524 #endif /* CONFIG_UCLAMP_TASK_GROUP */ 10525 10526 #ifdef CONFIG_FAIR_GROUP_SCHED 10527 static int cpu_shares_write_u64(struct cgroup_subsys_state *css, 10528 struct cftype *cftype, u64 shareval) 10529 { 10530 if (shareval > scale_load_down(ULONG_MAX)) 10531 shareval = MAX_SHARES; 10532 return sched_group_set_shares(css_tg(css), scale_load(shareval)); 10533 } 10534 10535 static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css, 10536 struct cftype *cft) 10537 { 10538 struct task_group *tg = css_tg(css); 10539 10540 return (u64) scale_load_down(tg->shares); 10541 } 10542 10543 #ifdef CONFIG_CFS_BANDWIDTH 10544 static DEFINE_MUTEX(cfs_constraints_mutex); 10545 10546 const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */ 10547 static const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */ 10548 /* More than 203 days if BW_SHIFT equals 20. */ 10549 static const u64 max_cfs_runtime = MAX_BW * NSEC_PER_USEC; 10550 10551 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime); 10552 10553 static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota, 10554 u64 burst) 10555 { 10556 int i, ret = 0, runtime_enabled, runtime_was_enabled; 10557 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 10558 10559 if (tg == &root_task_group) 10560 return -EINVAL; 10561 10562 /* 10563 * Ensure we have at some amount of bandwidth every period. This is 10564 * to prevent reaching a state of large arrears when throttled via 10565 * entity_tick() resulting in prolonged exit starvation. 10566 */ 10567 if (quota < min_cfs_quota_period || period < min_cfs_quota_period) 10568 return -EINVAL; 10569 10570 /* 10571 * Likewise, bound things on the other side by preventing insane quota 10572 * periods. This also allows us to normalize in computing quota 10573 * feasibility. 10574 */ 10575 if (period > max_cfs_quota_period) 10576 return -EINVAL; 10577 10578 /* 10579 * Bound quota to defend quota against overflow during bandwidth shift. 10580 */ 10581 if (quota != RUNTIME_INF && quota > max_cfs_runtime) 10582 return -EINVAL; 10583 10584 if (quota != RUNTIME_INF && (burst > quota || 10585 burst + quota > max_cfs_runtime)) 10586 return -EINVAL; 10587 10588 /* 10589 * Prevent race between setting of cfs_rq->runtime_enabled and 10590 * unthrottle_offline_cfs_rqs(). 10591 */ 10592 cpus_read_lock(); 10593 mutex_lock(&cfs_constraints_mutex); 10594 ret = __cfs_schedulable(tg, period, quota); 10595 if (ret) 10596 goto out_unlock; 10597 10598 runtime_enabled = quota != RUNTIME_INF; 10599 runtime_was_enabled = cfs_b->quota != RUNTIME_INF; 10600 /* 10601 * If we need to toggle cfs_bandwidth_used, off->on must occur 10602 * before making related changes, and on->off must occur afterwards 10603 */ 10604 if (runtime_enabled && !runtime_was_enabled) 10605 cfs_bandwidth_usage_inc(); 10606 raw_spin_lock_irq(&cfs_b->lock); 10607 cfs_b->period = ns_to_ktime(period); 10608 cfs_b->quota = quota; 10609 cfs_b->burst = burst; 10610 10611 __refill_cfs_bandwidth_runtime(cfs_b); 10612 10613 /* Restart the period timer (if active) to handle new period expiry: */ 10614 if (runtime_enabled) 10615 start_cfs_bandwidth(cfs_b); 10616 10617 raw_spin_unlock_irq(&cfs_b->lock); 10618 10619 for_each_online_cpu(i) { 10620 struct cfs_rq *cfs_rq = tg->cfs_rq[i]; 10621 struct rq *rq = cfs_rq->rq; 10622 struct rq_flags rf; 10623 10624 rq_lock_irq(rq, &rf); 10625 cfs_rq->runtime_enabled = runtime_enabled; 10626 cfs_rq->runtime_remaining = 0; 10627 10628 if (cfs_rq->throttled) 10629 unthrottle_cfs_rq(cfs_rq); 10630 rq_unlock_irq(rq, &rf); 10631 } 10632 if (runtime_was_enabled && !runtime_enabled) 10633 cfs_bandwidth_usage_dec(); 10634 out_unlock: 10635 mutex_unlock(&cfs_constraints_mutex); 10636 cpus_read_unlock(); 10637 10638 return ret; 10639 } 10640 10641 static int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us) 10642 { 10643 u64 quota, period, burst; 10644 10645 period = ktime_to_ns(tg->cfs_bandwidth.period); 10646 burst = tg->cfs_bandwidth.burst; 10647 if (cfs_quota_us < 0) 10648 quota = RUNTIME_INF; 10649 else if ((u64)cfs_quota_us <= U64_MAX / NSEC_PER_USEC) 10650 quota = (u64)cfs_quota_us * NSEC_PER_USEC; 10651 else 10652 return -EINVAL; 10653 10654 return tg_set_cfs_bandwidth(tg, period, quota, burst); 10655 } 10656 10657 static long tg_get_cfs_quota(struct task_group *tg) 10658 { 10659 u64 quota_us; 10660 10661 if (tg->cfs_bandwidth.quota == RUNTIME_INF) 10662 return -1; 10663 10664 quota_us = tg->cfs_bandwidth.quota; 10665 do_div(quota_us, NSEC_PER_USEC); 10666 10667 return quota_us; 10668 } 10669 10670 static int tg_set_cfs_period(struct task_group *tg, long cfs_period_us) 10671 { 10672 u64 quota, period, burst; 10673 10674 if ((u64)cfs_period_us > U64_MAX / NSEC_PER_USEC) 10675 return -EINVAL; 10676 10677 period = (u64)cfs_period_us * NSEC_PER_USEC; 10678 quota = tg->cfs_bandwidth.quota; 10679 burst = tg->cfs_bandwidth.burst; 10680 10681 return tg_set_cfs_bandwidth(tg, period, quota, burst); 10682 } 10683 10684 static long tg_get_cfs_period(struct task_group *tg) 10685 { 10686 u64 cfs_period_us; 10687 10688 cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period); 10689 do_div(cfs_period_us, NSEC_PER_USEC); 10690 10691 return cfs_period_us; 10692 } 10693 10694 static int tg_set_cfs_burst(struct task_group *tg, long cfs_burst_us) 10695 { 10696 u64 quota, period, burst; 10697 10698 if ((u64)cfs_burst_us > U64_MAX / NSEC_PER_USEC) 10699 return -EINVAL; 10700 10701 burst = (u64)cfs_burst_us * NSEC_PER_USEC; 10702 period = ktime_to_ns(tg->cfs_bandwidth.period); 10703 quota = tg->cfs_bandwidth.quota; 10704 10705 return tg_set_cfs_bandwidth(tg, period, quota, burst); 10706 } 10707 10708 static long tg_get_cfs_burst(struct task_group *tg) 10709 { 10710 u64 burst_us; 10711 10712 burst_us = tg->cfs_bandwidth.burst; 10713 do_div(burst_us, NSEC_PER_USEC); 10714 10715 return burst_us; 10716 } 10717 10718 static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css, 10719 struct cftype *cft) 10720 { 10721 return tg_get_cfs_quota(css_tg(css)); 10722 } 10723 10724 static int cpu_cfs_quota_write_s64(struct cgroup_subsys_state *css, 10725 struct cftype *cftype, s64 cfs_quota_us) 10726 { 10727 return tg_set_cfs_quota(css_tg(css), cfs_quota_us); 10728 } 10729 10730 static u64 cpu_cfs_period_read_u64(struct cgroup_subsys_state *css, 10731 struct cftype *cft) 10732 { 10733 return tg_get_cfs_period(css_tg(css)); 10734 } 10735 10736 static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css, 10737 struct cftype *cftype, u64 cfs_period_us) 10738 { 10739 return tg_set_cfs_period(css_tg(css), cfs_period_us); 10740 } 10741 10742 static u64 cpu_cfs_burst_read_u64(struct cgroup_subsys_state *css, 10743 struct cftype *cft) 10744 { 10745 return tg_get_cfs_burst(css_tg(css)); 10746 } 10747 10748 static int cpu_cfs_burst_write_u64(struct cgroup_subsys_state *css, 10749 struct cftype *cftype, u64 cfs_burst_us) 10750 { 10751 return tg_set_cfs_burst(css_tg(css), cfs_burst_us); 10752 } 10753 10754 struct cfs_schedulable_data { 10755 struct task_group *tg; 10756 u64 period, quota; 10757 }; 10758 10759 /* 10760 * normalize group quota/period to be quota/max_period 10761 * note: units are usecs 10762 */ 10763 static u64 normalize_cfs_quota(struct task_group *tg, 10764 struct cfs_schedulable_data *d) 10765 { 10766 u64 quota, period; 10767 10768 if (tg == d->tg) { 10769 period = d->period; 10770 quota = d->quota; 10771 } else { 10772 period = tg_get_cfs_period(tg); 10773 quota = tg_get_cfs_quota(tg); 10774 } 10775 10776 /* note: these should typically be equivalent */ 10777 if (quota == RUNTIME_INF || quota == -1) 10778 return RUNTIME_INF; 10779 10780 return to_ratio(period, quota); 10781 } 10782 10783 static int tg_cfs_schedulable_down(struct task_group *tg, void *data) 10784 { 10785 struct cfs_schedulable_data *d = data; 10786 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 10787 s64 quota = 0, parent_quota = -1; 10788 10789 if (!tg->parent) { 10790 quota = RUNTIME_INF; 10791 } else { 10792 struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth; 10793 10794 quota = normalize_cfs_quota(tg, d); 10795 parent_quota = parent_b->hierarchical_quota; 10796 10797 /* 10798 * Ensure max(child_quota) <= parent_quota. On cgroup2, 10799 * always take the min. On cgroup1, only inherit when no 10800 * limit is set: 10801 */ 10802 if (cgroup_subsys_on_dfl(cpu_cgrp_subsys)) { 10803 quota = min(quota, parent_quota); 10804 } else { 10805 if (quota == RUNTIME_INF) 10806 quota = parent_quota; 10807 else if (parent_quota != RUNTIME_INF && quota > parent_quota) 10808 return -EINVAL; 10809 } 10810 } 10811 cfs_b->hierarchical_quota = quota; 10812 10813 return 0; 10814 } 10815 10816 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota) 10817 { 10818 int ret; 10819 struct cfs_schedulable_data data = { 10820 .tg = tg, 10821 .period = period, 10822 .quota = quota, 10823 }; 10824 10825 if (quota != RUNTIME_INF) { 10826 do_div(data.period, NSEC_PER_USEC); 10827 do_div(data.quota, NSEC_PER_USEC); 10828 } 10829 10830 rcu_read_lock(); 10831 ret = walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data); 10832 rcu_read_unlock(); 10833 10834 return ret; 10835 } 10836 10837 static int cpu_cfs_stat_show(struct seq_file *sf, void *v) 10838 { 10839 struct task_group *tg = css_tg(seq_css(sf)); 10840 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 10841 10842 seq_printf(sf, "nr_periods %d\n", cfs_b->nr_periods); 10843 seq_printf(sf, "nr_throttled %d\n", cfs_b->nr_throttled); 10844 seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time); 10845 10846 if (schedstat_enabled() && tg != &root_task_group) { 10847 struct sched_statistics *stats; 10848 u64 ws = 0; 10849 int i; 10850 10851 for_each_possible_cpu(i) { 10852 stats = __schedstats_from_se(tg->se[i]); 10853 ws += schedstat_val(stats->wait_sum); 10854 } 10855 10856 seq_printf(sf, "wait_sum %llu\n", ws); 10857 } 10858 10859 seq_printf(sf, "nr_bursts %d\n", cfs_b->nr_burst); 10860 seq_printf(sf, "burst_time %llu\n", cfs_b->burst_time); 10861 10862 return 0; 10863 } 10864 #endif /* CONFIG_CFS_BANDWIDTH */ 10865 #endif /* CONFIG_FAIR_GROUP_SCHED */ 10866 10867 #ifdef CONFIG_RT_GROUP_SCHED 10868 static int cpu_rt_runtime_write(struct cgroup_subsys_state *css, 10869 struct cftype *cft, s64 val) 10870 { 10871 return sched_group_set_rt_runtime(css_tg(css), val); 10872 } 10873 10874 static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css, 10875 struct cftype *cft) 10876 { 10877 return sched_group_rt_runtime(css_tg(css)); 10878 } 10879 10880 static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css, 10881 struct cftype *cftype, u64 rt_period_us) 10882 { 10883 return sched_group_set_rt_period(css_tg(css), rt_period_us); 10884 } 10885 10886 static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css, 10887 struct cftype *cft) 10888 { 10889 return sched_group_rt_period(css_tg(css)); 10890 } 10891 #endif /* CONFIG_RT_GROUP_SCHED */ 10892 10893 #ifdef CONFIG_FAIR_GROUP_SCHED 10894 static s64 cpu_idle_read_s64(struct cgroup_subsys_state *css, 10895 struct cftype *cft) 10896 { 10897 return css_tg(css)->idle; 10898 } 10899 10900 static int cpu_idle_write_s64(struct cgroup_subsys_state *css, 10901 struct cftype *cft, s64 idle) 10902 { 10903 return sched_group_set_idle(css_tg(css), idle); 10904 } 10905 #endif 10906 10907 static struct cftype cpu_legacy_files[] = { 10908 #ifdef CONFIG_FAIR_GROUP_SCHED 10909 { 10910 .name = "shares", 10911 .read_u64 = cpu_shares_read_u64, 10912 .write_u64 = cpu_shares_write_u64, 10913 }, 10914 { 10915 .name = "idle", 10916 .read_s64 = cpu_idle_read_s64, 10917 .write_s64 = cpu_idle_write_s64, 10918 }, 10919 #endif 10920 #ifdef CONFIG_CFS_BANDWIDTH 10921 { 10922 .name = "cfs_quota_us", 10923 .read_s64 = cpu_cfs_quota_read_s64, 10924 .write_s64 = cpu_cfs_quota_write_s64, 10925 }, 10926 { 10927 .name = "cfs_period_us", 10928 .read_u64 = cpu_cfs_period_read_u64, 10929 .write_u64 = cpu_cfs_period_write_u64, 10930 }, 10931 { 10932 .name = "cfs_burst_us", 10933 .read_u64 = cpu_cfs_burst_read_u64, 10934 .write_u64 = cpu_cfs_burst_write_u64, 10935 }, 10936 { 10937 .name = "stat", 10938 .seq_show = cpu_cfs_stat_show, 10939 }, 10940 #endif 10941 #ifdef CONFIG_RT_GROUP_SCHED 10942 { 10943 .name = "rt_runtime_us", 10944 .read_s64 = cpu_rt_runtime_read, 10945 .write_s64 = cpu_rt_runtime_write, 10946 }, 10947 { 10948 .name = "rt_period_us", 10949 .read_u64 = cpu_rt_period_read_uint, 10950 .write_u64 = cpu_rt_period_write_uint, 10951 }, 10952 #endif 10953 #ifdef CONFIG_UCLAMP_TASK_GROUP 10954 { 10955 .name = "uclamp.min", 10956 .flags = CFTYPE_NOT_ON_ROOT, 10957 .seq_show = cpu_uclamp_min_show, 10958 .write = cpu_uclamp_min_write, 10959 }, 10960 { 10961 .name = "uclamp.max", 10962 .flags = CFTYPE_NOT_ON_ROOT, 10963 .seq_show = cpu_uclamp_max_show, 10964 .write = cpu_uclamp_max_write, 10965 }, 10966 #endif 10967 { } /* Terminate */ 10968 }; 10969 10970 static int cpu_extra_stat_show(struct seq_file *sf, 10971 struct cgroup_subsys_state *css) 10972 { 10973 #ifdef CONFIG_CFS_BANDWIDTH 10974 { 10975 struct task_group *tg = css_tg(css); 10976 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 10977 u64 throttled_usec, burst_usec; 10978 10979 throttled_usec = cfs_b->throttled_time; 10980 do_div(throttled_usec, NSEC_PER_USEC); 10981 burst_usec = cfs_b->burst_time; 10982 do_div(burst_usec, NSEC_PER_USEC); 10983 10984 seq_printf(sf, "nr_periods %d\n" 10985 "nr_throttled %d\n" 10986 "throttled_usec %llu\n" 10987 "nr_bursts %d\n" 10988 "burst_usec %llu\n", 10989 cfs_b->nr_periods, cfs_b->nr_throttled, 10990 throttled_usec, cfs_b->nr_burst, burst_usec); 10991 } 10992 #endif 10993 return 0; 10994 } 10995 10996 #ifdef CONFIG_FAIR_GROUP_SCHED 10997 static u64 cpu_weight_read_u64(struct cgroup_subsys_state *css, 10998 struct cftype *cft) 10999 { 11000 struct task_group *tg = css_tg(css); 11001 u64 weight = scale_load_down(tg->shares); 11002 11003 return DIV_ROUND_CLOSEST_ULL(weight * CGROUP_WEIGHT_DFL, 1024); 11004 } 11005 11006 static int cpu_weight_write_u64(struct cgroup_subsys_state *css, 11007 struct cftype *cft, u64 weight) 11008 { 11009 /* 11010 * cgroup weight knobs should use the common MIN, DFL and MAX 11011 * values which are 1, 100 and 10000 respectively. While it loses 11012 * a bit of range on both ends, it maps pretty well onto the shares 11013 * value used by scheduler and the round-trip conversions preserve 11014 * the original value over the entire range. 11015 */ 11016 if (weight < CGROUP_WEIGHT_MIN || weight > CGROUP_WEIGHT_MAX) 11017 return -ERANGE; 11018 11019 weight = DIV_ROUND_CLOSEST_ULL(weight * 1024, CGROUP_WEIGHT_DFL); 11020 11021 return sched_group_set_shares(css_tg(css), scale_load(weight)); 11022 } 11023 11024 static s64 cpu_weight_nice_read_s64(struct cgroup_subsys_state *css, 11025 struct cftype *cft) 11026 { 11027 unsigned long weight = scale_load_down(css_tg(css)->shares); 11028 int last_delta = INT_MAX; 11029 int prio, delta; 11030 11031 /* find the closest nice value to the current weight */ 11032 for (prio = 0; prio < ARRAY_SIZE(sched_prio_to_weight); prio++) { 11033 delta = abs(sched_prio_to_weight[prio] - weight); 11034 if (delta >= last_delta) 11035 break; 11036 last_delta = delta; 11037 } 11038 11039 return PRIO_TO_NICE(prio - 1 + MAX_RT_PRIO); 11040 } 11041 11042 static int cpu_weight_nice_write_s64(struct cgroup_subsys_state *css, 11043 struct cftype *cft, s64 nice) 11044 { 11045 unsigned long weight; 11046 int idx; 11047 11048 if (nice < MIN_NICE || nice > MAX_NICE) 11049 return -ERANGE; 11050 11051 idx = NICE_TO_PRIO(nice) - MAX_RT_PRIO; 11052 idx = array_index_nospec(idx, 40); 11053 weight = sched_prio_to_weight[idx]; 11054 11055 return sched_group_set_shares(css_tg(css), scale_load(weight)); 11056 } 11057 #endif 11058 11059 static void __maybe_unused cpu_period_quota_print(struct seq_file *sf, 11060 long period, long quota) 11061 { 11062 if (quota < 0) 11063 seq_puts(sf, "max"); 11064 else 11065 seq_printf(sf, "%ld", quota); 11066 11067 seq_printf(sf, " %ld\n", period); 11068 } 11069 11070 /* caller should put the current value in *@periodp before calling */ 11071 static int __maybe_unused cpu_period_quota_parse(char *buf, 11072 u64 *periodp, u64 *quotap) 11073 { 11074 char tok[21]; /* U64_MAX */ 11075 11076 if (sscanf(buf, "%20s %llu", tok, periodp) < 1) 11077 return -EINVAL; 11078 11079 *periodp *= NSEC_PER_USEC; 11080 11081 if (sscanf(tok, "%llu", quotap)) 11082 *quotap *= NSEC_PER_USEC; 11083 else if (!strcmp(tok, "max")) 11084 *quotap = RUNTIME_INF; 11085 else 11086 return -EINVAL; 11087 11088 return 0; 11089 } 11090 11091 #ifdef CONFIG_CFS_BANDWIDTH 11092 static int cpu_max_show(struct seq_file *sf, void *v) 11093 { 11094 struct task_group *tg = css_tg(seq_css(sf)); 11095 11096 cpu_period_quota_print(sf, tg_get_cfs_period(tg), tg_get_cfs_quota(tg)); 11097 return 0; 11098 } 11099 11100 static ssize_t cpu_max_write(struct kernfs_open_file *of, 11101 char *buf, size_t nbytes, loff_t off) 11102 { 11103 struct task_group *tg = css_tg(of_css(of)); 11104 u64 period = tg_get_cfs_period(tg); 11105 u64 burst = tg_get_cfs_burst(tg); 11106 u64 quota; 11107 int ret; 11108 11109 ret = cpu_period_quota_parse(buf, &period, "a); 11110 if (!ret) 11111 ret = tg_set_cfs_bandwidth(tg, period, quota, burst); 11112 return ret ?: nbytes; 11113 } 11114 #endif 11115 11116 static struct cftype cpu_files[] = { 11117 #ifdef CONFIG_FAIR_GROUP_SCHED 11118 { 11119 .name = "weight", 11120 .flags = CFTYPE_NOT_ON_ROOT, 11121 .read_u64 = cpu_weight_read_u64, 11122 .write_u64 = cpu_weight_write_u64, 11123 }, 11124 { 11125 .name = "weight.nice", 11126 .flags = CFTYPE_NOT_ON_ROOT, 11127 .read_s64 = cpu_weight_nice_read_s64, 11128 .write_s64 = cpu_weight_nice_write_s64, 11129 }, 11130 { 11131 .name = "idle", 11132 .flags = CFTYPE_NOT_ON_ROOT, 11133 .read_s64 = cpu_idle_read_s64, 11134 .write_s64 = cpu_idle_write_s64, 11135 }, 11136 #endif 11137 #ifdef CONFIG_CFS_BANDWIDTH 11138 { 11139 .name = "max", 11140 .flags = CFTYPE_NOT_ON_ROOT, 11141 .seq_show = cpu_max_show, 11142 .write = cpu_max_write, 11143 }, 11144 { 11145 .name = "max.burst", 11146 .flags = CFTYPE_NOT_ON_ROOT, 11147 .read_u64 = cpu_cfs_burst_read_u64, 11148 .write_u64 = cpu_cfs_burst_write_u64, 11149 }, 11150 #endif 11151 #ifdef CONFIG_UCLAMP_TASK_GROUP 11152 { 11153 .name = "uclamp.min", 11154 .flags = CFTYPE_NOT_ON_ROOT, 11155 .seq_show = cpu_uclamp_min_show, 11156 .write = cpu_uclamp_min_write, 11157 }, 11158 { 11159 .name = "uclamp.max", 11160 .flags = CFTYPE_NOT_ON_ROOT, 11161 .seq_show = cpu_uclamp_max_show, 11162 .write = cpu_uclamp_max_write, 11163 }, 11164 #endif 11165 { } /* terminate */ 11166 }; 11167 11168 struct cgroup_subsys cpu_cgrp_subsys = { 11169 .css_alloc = cpu_cgroup_css_alloc, 11170 .css_online = cpu_cgroup_css_online, 11171 .css_released = cpu_cgroup_css_released, 11172 .css_free = cpu_cgroup_css_free, 11173 .css_extra_stat_show = cpu_extra_stat_show, 11174 .fork = cpu_cgroup_fork, 11175 .can_attach = cpu_cgroup_can_attach, 11176 .attach = cpu_cgroup_attach, 11177 .legacy_cftypes = cpu_legacy_files, 11178 .dfl_cftypes = cpu_files, 11179 .early_init = true, 11180 .threaded = true, 11181 }; 11182 11183 #endif /* CONFIG_CGROUP_SCHED */ 11184 11185 void dump_cpu_task(int cpu) 11186 { 11187 if (cpu == smp_processor_id() && in_hardirq()) { 11188 struct pt_regs *regs; 11189 11190 regs = get_irq_regs(); 11191 if (regs) { 11192 show_regs(regs); 11193 return; 11194 } 11195 } 11196 11197 if (trigger_single_cpu_backtrace(cpu)) 11198 return; 11199 11200 pr_info("Task dump for CPU %d:\n", cpu); 11201 sched_show_task(cpu_curr(cpu)); 11202 } 11203 11204 /* 11205 * Nice levels are multiplicative, with a gentle 10% change for every 11206 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to 11207 * nice 1, it will get ~10% less CPU time than another CPU-bound task 11208 * that remained on nice 0. 11209 * 11210 * The "10% effect" is relative and cumulative: from _any_ nice level, 11211 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level 11212 * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25. 11213 * If a task goes up by ~10% and another task goes down by ~10% then 11214 * the relative distance between them is ~25%.) 11215 */ 11216 const int sched_prio_to_weight[40] = { 11217 /* -20 */ 88761, 71755, 56483, 46273, 36291, 11218 /* -15 */ 29154, 23254, 18705, 14949, 11916, 11219 /* -10 */ 9548, 7620, 6100, 4904, 3906, 11220 /* -5 */ 3121, 2501, 1991, 1586, 1277, 11221 /* 0 */ 1024, 820, 655, 526, 423, 11222 /* 5 */ 335, 272, 215, 172, 137, 11223 /* 10 */ 110, 87, 70, 56, 45, 11224 /* 15 */ 36, 29, 23, 18, 15, 11225 }; 11226 11227 /* 11228 * Inverse (2^32/x) values of the sched_prio_to_weight[] array, precalculated. 11229 * 11230 * In cases where the weight does not change often, we can use the 11231 * precalculated inverse to speed up arithmetics by turning divisions 11232 * into multiplications: 11233 */ 11234 const u32 sched_prio_to_wmult[40] = { 11235 /* -20 */ 48388, 59856, 76040, 92818, 118348, 11236 /* -15 */ 147320, 184698, 229616, 287308, 360437, 11237 /* -10 */ 449829, 563644, 704093, 875809, 1099582, 11238 /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326, 11239 /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587, 11240 /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126, 11241 /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717, 11242 /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153, 11243 }; 11244 11245 void call_trace_sched_update_nr_running(struct rq *rq, int count) 11246 { 11247 trace_sched_update_nr_running_tp(rq, count); 11248 } 11249