1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * kernel/sched/core.c 4 * 5 * Core kernel scheduler code and related syscalls 6 * 7 * Copyright (C) 1991-2002 Linus Torvalds 8 */ 9 #include <linux/highmem.h> 10 #include <linux/hrtimer_api.h> 11 #include <linux/ktime_api.h> 12 #include <linux/sched/signal.h> 13 #include <linux/syscalls_api.h> 14 #include <linux/debug_locks.h> 15 #include <linux/prefetch.h> 16 #include <linux/capability.h> 17 #include <linux/pgtable_api.h> 18 #include <linux/wait_bit.h> 19 #include <linux/jiffies.h> 20 #include <linux/spinlock_api.h> 21 #include <linux/cpumask_api.h> 22 #include <linux/lockdep_api.h> 23 #include <linux/hardirq.h> 24 #include <linux/softirq.h> 25 #include <linux/refcount_api.h> 26 #include <linux/topology.h> 27 #include <linux/sched/clock.h> 28 #include <linux/sched/cond_resched.h> 29 #include <linux/sched/cputime.h> 30 #include <linux/sched/debug.h> 31 #include <linux/sched/hotplug.h> 32 #include <linux/sched/init.h> 33 #include <linux/sched/isolation.h> 34 #include <linux/sched/loadavg.h> 35 #include <linux/sched/mm.h> 36 #include <linux/sched/nohz.h> 37 #include <linux/sched/rseq_api.h> 38 #include <linux/sched/rt.h> 39 40 #include <linux/blkdev.h> 41 #include <linux/context_tracking.h> 42 #include <linux/cpuset.h> 43 #include <linux/delayacct.h> 44 #include <linux/init_task.h> 45 #include <linux/interrupt.h> 46 #include <linux/ioprio.h> 47 #include <linux/kallsyms.h> 48 #include <linux/kcov.h> 49 #include <linux/kprobes.h> 50 #include <linux/llist_api.h> 51 #include <linux/mmu_context.h> 52 #include <linux/mmzone.h> 53 #include <linux/mutex_api.h> 54 #include <linux/nmi.h> 55 #include <linux/nospec.h> 56 #include <linux/perf_event_api.h> 57 #include <linux/profile.h> 58 #include <linux/psi.h> 59 #include <linux/rcuwait_api.h> 60 #include <linux/sched/wake_q.h> 61 #include <linux/scs.h> 62 #include <linux/slab.h> 63 #include <linux/syscalls.h> 64 #include <linux/vtime.h> 65 #include <linux/wait_api.h> 66 #include <linux/workqueue_api.h> 67 68 #ifdef CONFIG_PREEMPT_DYNAMIC 69 # ifdef CONFIG_GENERIC_ENTRY 70 # include <linux/entry-common.h> 71 # endif 72 #endif 73 74 #include <uapi/linux/sched/types.h> 75 76 #include <asm/irq_regs.h> 77 #include <asm/switch_to.h> 78 #include <asm/tlb.h> 79 80 #define CREATE_TRACE_POINTS 81 #include <linux/sched/rseq_api.h> 82 #include <trace/events/sched.h> 83 #undef CREATE_TRACE_POINTS 84 85 #include "sched.h" 86 #include "stats.h" 87 #include "autogroup.h" 88 89 #include "autogroup.h" 90 #include "pelt.h" 91 #include "smp.h" 92 #include "stats.h" 93 94 #include "../workqueue_internal.h" 95 #include "../../io_uring/io-wq.h" 96 #include "../smpboot.h" 97 98 /* 99 * Export tracepoints that act as a bare tracehook (ie: have no trace event 100 * associated with them) to allow external modules to probe them. 101 */ 102 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_cfs_tp); 103 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_rt_tp); 104 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_dl_tp); 105 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp); 106 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_se_tp); 107 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_thermal_tp); 108 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_cpu_capacity_tp); 109 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_overutilized_tp); 110 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_cfs_tp); 111 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_se_tp); 112 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_update_nr_running_tp); 113 114 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); 115 116 #ifdef CONFIG_SCHED_DEBUG 117 /* 118 * Debugging: various feature bits 119 * 120 * If SCHED_DEBUG is disabled, each compilation unit has its own copy of 121 * sysctl_sched_features, defined in sched.h, to allow constants propagation 122 * at compile time and compiler optimization based on features default. 123 */ 124 #define SCHED_FEAT(name, enabled) \ 125 (1UL << __SCHED_FEAT_##name) * enabled | 126 const_debug unsigned int sysctl_sched_features = 127 #include "features.h" 128 0; 129 #undef SCHED_FEAT 130 131 /* 132 * Print a warning if need_resched is set for the given duration (if 133 * LATENCY_WARN is enabled). 134 * 135 * If sysctl_resched_latency_warn_once is set, only one warning will be shown 136 * per boot. 137 */ 138 __read_mostly int sysctl_resched_latency_warn_ms = 100; 139 __read_mostly int sysctl_resched_latency_warn_once = 1; 140 #endif /* CONFIG_SCHED_DEBUG */ 141 142 /* 143 * Number of tasks to iterate in a single balance run. 144 * Limited because this is done with IRQs disabled. 145 */ 146 const_debug unsigned int sysctl_sched_nr_migrate = SCHED_NR_MIGRATE_BREAK; 147 148 __read_mostly int scheduler_running; 149 150 #ifdef CONFIG_SCHED_CORE 151 152 DEFINE_STATIC_KEY_FALSE(__sched_core_enabled); 153 154 /* kernel prio, less is more */ 155 static inline int __task_prio(struct task_struct *p) 156 { 157 if (p->sched_class == &stop_sched_class) /* trumps deadline */ 158 return -2; 159 160 if (rt_prio(p->prio)) /* includes deadline */ 161 return p->prio; /* [-1, 99] */ 162 163 if (p->sched_class == &idle_sched_class) 164 return MAX_RT_PRIO + NICE_WIDTH; /* 140 */ 165 166 return MAX_RT_PRIO + MAX_NICE; /* 120, squash fair */ 167 } 168 169 /* 170 * l(a,b) 171 * le(a,b) := !l(b,a) 172 * g(a,b) := l(b,a) 173 * ge(a,b) := !l(a,b) 174 */ 175 176 /* real prio, less is less */ 177 static inline bool prio_less(struct task_struct *a, struct task_struct *b, bool in_fi) 178 { 179 180 int pa = __task_prio(a), pb = __task_prio(b); 181 182 if (-pa < -pb) 183 return true; 184 185 if (-pb < -pa) 186 return false; 187 188 if (pa == -1) /* dl_prio() doesn't work because of stop_class above */ 189 return !dl_time_before(a->dl.deadline, b->dl.deadline); 190 191 if (pa == MAX_RT_PRIO + MAX_NICE) /* fair */ 192 return cfs_prio_less(a, b, in_fi); 193 194 return false; 195 } 196 197 static inline bool __sched_core_less(struct task_struct *a, struct task_struct *b) 198 { 199 if (a->core_cookie < b->core_cookie) 200 return true; 201 202 if (a->core_cookie > b->core_cookie) 203 return false; 204 205 /* flip prio, so high prio is leftmost */ 206 if (prio_less(b, a, !!task_rq(a)->core->core_forceidle_count)) 207 return true; 208 209 return false; 210 } 211 212 #define __node_2_sc(node) rb_entry((node), struct task_struct, core_node) 213 214 static inline bool rb_sched_core_less(struct rb_node *a, const struct rb_node *b) 215 { 216 return __sched_core_less(__node_2_sc(a), __node_2_sc(b)); 217 } 218 219 static inline int rb_sched_core_cmp(const void *key, const struct rb_node *node) 220 { 221 const struct task_struct *p = __node_2_sc(node); 222 unsigned long cookie = (unsigned long)key; 223 224 if (cookie < p->core_cookie) 225 return -1; 226 227 if (cookie > p->core_cookie) 228 return 1; 229 230 return 0; 231 } 232 233 void sched_core_enqueue(struct rq *rq, struct task_struct *p) 234 { 235 rq->core->core_task_seq++; 236 237 if (!p->core_cookie) 238 return; 239 240 rb_add(&p->core_node, &rq->core_tree, rb_sched_core_less); 241 } 242 243 void sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags) 244 { 245 rq->core->core_task_seq++; 246 247 if (sched_core_enqueued(p)) { 248 rb_erase(&p->core_node, &rq->core_tree); 249 RB_CLEAR_NODE(&p->core_node); 250 } 251 252 /* 253 * Migrating the last task off the cpu, with the cpu in forced idle 254 * state. Reschedule to create an accounting edge for forced idle, 255 * and re-examine whether the core is still in forced idle state. 256 */ 257 if (!(flags & DEQUEUE_SAVE) && rq->nr_running == 1 && 258 rq->core->core_forceidle_count && rq->curr == rq->idle) 259 resched_curr(rq); 260 } 261 262 /* 263 * Find left-most (aka, highest priority) task matching @cookie. 264 */ 265 static struct task_struct *sched_core_find(struct rq *rq, unsigned long cookie) 266 { 267 struct rb_node *node; 268 269 node = rb_find_first((void *)cookie, &rq->core_tree, rb_sched_core_cmp); 270 /* 271 * The idle task always matches any cookie! 272 */ 273 if (!node) 274 return idle_sched_class.pick_task(rq); 275 276 return __node_2_sc(node); 277 } 278 279 static struct task_struct *sched_core_next(struct task_struct *p, unsigned long cookie) 280 { 281 struct rb_node *node = &p->core_node; 282 283 node = rb_next(node); 284 if (!node) 285 return NULL; 286 287 p = container_of(node, struct task_struct, core_node); 288 if (p->core_cookie != cookie) 289 return NULL; 290 291 return p; 292 } 293 294 /* 295 * Magic required such that: 296 * 297 * raw_spin_rq_lock(rq); 298 * ... 299 * raw_spin_rq_unlock(rq); 300 * 301 * ends up locking and unlocking the _same_ lock, and all CPUs 302 * always agree on what rq has what lock. 303 * 304 * XXX entirely possible to selectively enable cores, don't bother for now. 305 */ 306 307 static DEFINE_MUTEX(sched_core_mutex); 308 static atomic_t sched_core_count; 309 static struct cpumask sched_core_mask; 310 311 static void sched_core_lock(int cpu, unsigned long *flags) 312 { 313 const struct cpumask *smt_mask = cpu_smt_mask(cpu); 314 int t, i = 0; 315 316 local_irq_save(*flags); 317 for_each_cpu(t, smt_mask) 318 raw_spin_lock_nested(&cpu_rq(t)->__lock, i++); 319 } 320 321 static void sched_core_unlock(int cpu, unsigned long *flags) 322 { 323 const struct cpumask *smt_mask = cpu_smt_mask(cpu); 324 int t; 325 326 for_each_cpu(t, smt_mask) 327 raw_spin_unlock(&cpu_rq(t)->__lock); 328 local_irq_restore(*flags); 329 } 330 331 static void __sched_core_flip(bool enabled) 332 { 333 unsigned long flags; 334 int cpu, t; 335 336 cpus_read_lock(); 337 338 /* 339 * Toggle the online cores, one by one. 340 */ 341 cpumask_copy(&sched_core_mask, cpu_online_mask); 342 for_each_cpu(cpu, &sched_core_mask) { 343 const struct cpumask *smt_mask = cpu_smt_mask(cpu); 344 345 sched_core_lock(cpu, &flags); 346 347 for_each_cpu(t, smt_mask) 348 cpu_rq(t)->core_enabled = enabled; 349 350 cpu_rq(cpu)->core->core_forceidle_start = 0; 351 352 sched_core_unlock(cpu, &flags); 353 354 cpumask_andnot(&sched_core_mask, &sched_core_mask, smt_mask); 355 } 356 357 /* 358 * Toggle the offline CPUs. 359 */ 360 for_each_cpu_andnot(cpu, cpu_possible_mask, cpu_online_mask) 361 cpu_rq(cpu)->core_enabled = enabled; 362 363 cpus_read_unlock(); 364 } 365 366 static void sched_core_assert_empty(void) 367 { 368 int cpu; 369 370 for_each_possible_cpu(cpu) 371 WARN_ON_ONCE(!RB_EMPTY_ROOT(&cpu_rq(cpu)->core_tree)); 372 } 373 374 static void __sched_core_enable(void) 375 { 376 static_branch_enable(&__sched_core_enabled); 377 /* 378 * Ensure all previous instances of raw_spin_rq_*lock() have finished 379 * and future ones will observe !sched_core_disabled(). 380 */ 381 synchronize_rcu(); 382 __sched_core_flip(true); 383 sched_core_assert_empty(); 384 } 385 386 static void __sched_core_disable(void) 387 { 388 sched_core_assert_empty(); 389 __sched_core_flip(false); 390 static_branch_disable(&__sched_core_enabled); 391 } 392 393 void sched_core_get(void) 394 { 395 if (atomic_inc_not_zero(&sched_core_count)) 396 return; 397 398 mutex_lock(&sched_core_mutex); 399 if (!atomic_read(&sched_core_count)) 400 __sched_core_enable(); 401 402 smp_mb__before_atomic(); 403 atomic_inc(&sched_core_count); 404 mutex_unlock(&sched_core_mutex); 405 } 406 407 static void __sched_core_put(struct work_struct *work) 408 { 409 if (atomic_dec_and_mutex_lock(&sched_core_count, &sched_core_mutex)) { 410 __sched_core_disable(); 411 mutex_unlock(&sched_core_mutex); 412 } 413 } 414 415 void sched_core_put(void) 416 { 417 static DECLARE_WORK(_work, __sched_core_put); 418 419 /* 420 * "There can be only one" 421 * 422 * Either this is the last one, or we don't actually need to do any 423 * 'work'. If it is the last *again*, we rely on 424 * WORK_STRUCT_PENDING_BIT. 425 */ 426 if (!atomic_add_unless(&sched_core_count, -1, 1)) 427 schedule_work(&_work); 428 } 429 430 #else /* !CONFIG_SCHED_CORE */ 431 432 static inline void sched_core_enqueue(struct rq *rq, struct task_struct *p) { } 433 static inline void 434 sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags) { } 435 436 #endif /* CONFIG_SCHED_CORE */ 437 438 /* 439 * Serialization rules: 440 * 441 * Lock order: 442 * 443 * p->pi_lock 444 * rq->lock 445 * hrtimer_cpu_base->lock (hrtimer_start() for bandwidth controls) 446 * 447 * rq1->lock 448 * rq2->lock where: rq1 < rq2 449 * 450 * Regular state: 451 * 452 * Normal scheduling state is serialized by rq->lock. __schedule() takes the 453 * local CPU's rq->lock, it optionally removes the task from the runqueue and 454 * always looks at the local rq data structures to find the most eligible task 455 * to run next. 456 * 457 * Task enqueue is also under rq->lock, possibly taken from another CPU. 458 * Wakeups from another LLC domain might use an IPI to transfer the enqueue to 459 * the local CPU to avoid bouncing the runqueue state around [ see 460 * ttwu_queue_wakelist() ] 461 * 462 * Task wakeup, specifically wakeups that involve migration, are horribly 463 * complicated to avoid having to take two rq->locks. 464 * 465 * Special state: 466 * 467 * System-calls and anything external will use task_rq_lock() which acquires 468 * both p->pi_lock and rq->lock. As a consequence the state they change is 469 * stable while holding either lock: 470 * 471 * - sched_setaffinity()/ 472 * set_cpus_allowed_ptr(): p->cpus_ptr, p->nr_cpus_allowed 473 * - set_user_nice(): p->se.load, p->*prio 474 * - __sched_setscheduler(): p->sched_class, p->policy, p->*prio, 475 * p->se.load, p->rt_priority, 476 * p->dl.dl_{runtime, deadline, period, flags, bw, density} 477 * - sched_setnuma(): p->numa_preferred_nid 478 * - sched_move_task(): p->sched_task_group 479 * - uclamp_update_active() p->uclamp* 480 * 481 * p->state <- TASK_*: 482 * 483 * is changed locklessly using set_current_state(), __set_current_state() or 484 * set_special_state(), see their respective comments, or by 485 * try_to_wake_up(). This latter uses p->pi_lock to serialize against 486 * concurrent self. 487 * 488 * p->on_rq <- { 0, 1 = TASK_ON_RQ_QUEUED, 2 = TASK_ON_RQ_MIGRATING }: 489 * 490 * is set by activate_task() and cleared by deactivate_task(), under 491 * rq->lock. Non-zero indicates the task is runnable, the special 492 * ON_RQ_MIGRATING state is used for migration without holding both 493 * rq->locks. It indicates task_cpu() is not stable, see task_rq_lock(). 494 * 495 * p->on_cpu <- { 0, 1 }: 496 * 497 * is set by prepare_task() and cleared by finish_task() such that it will be 498 * set before p is scheduled-in and cleared after p is scheduled-out, both 499 * under rq->lock. Non-zero indicates the task is running on its CPU. 500 * 501 * [ The astute reader will observe that it is possible for two tasks on one 502 * CPU to have ->on_cpu = 1 at the same time. ] 503 * 504 * task_cpu(p): is changed by set_task_cpu(), the rules are: 505 * 506 * - Don't call set_task_cpu() on a blocked task: 507 * 508 * We don't care what CPU we're not running on, this simplifies hotplug, 509 * the CPU assignment of blocked tasks isn't required to be valid. 510 * 511 * - for try_to_wake_up(), called under p->pi_lock: 512 * 513 * This allows try_to_wake_up() to only take one rq->lock, see its comment. 514 * 515 * - for migration called under rq->lock: 516 * [ see task_on_rq_migrating() in task_rq_lock() ] 517 * 518 * o move_queued_task() 519 * o detach_task() 520 * 521 * - for migration called under double_rq_lock(): 522 * 523 * o __migrate_swap_task() 524 * o push_rt_task() / pull_rt_task() 525 * o push_dl_task() / pull_dl_task() 526 * o dl_task_offline_migration() 527 * 528 */ 529 530 void raw_spin_rq_lock_nested(struct rq *rq, int subclass) 531 { 532 raw_spinlock_t *lock; 533 534 /* Matches synchronize_rcu() in __sched_core_enable() */ 535 preempt_disable(); 536 if (sched_core_disabled()) { 537 raw_spin_lock_nested(&rq->__lock, subclass); 538 /* preempt_count *MUST* be > 1 */ 539 preempt_enable_no_resched(); 540 return; 541 } 542 543 for (;;) { 544 lock = __rq_lockp(rq); 545 raw_spin_lock_nested(lock, subclass); 546 if (likely(lock == __rq_lockp(rq))) { 547 /* preempt_count *MUST* be > 1 */ 548 preempt_enable_no_resched(); 549 return; 550 } 551 raw_spin_unlock(lock); 552 } 553 } 554 555 bool raw_spin_rq_trylock(struct rq *rq) 556 { 557 raw_spinlock_t *lock; 558 bool ret; 559 560 /* Matches synchronize_rcu() in __sched_core_enable() */ 561 preempt_disable(); 562 if (sched_core_disabled()) { 563 ret = raw_spin_trylock(&rq->__lock); 564 preempt_enable(); 565 return ret; 566 } 567 568 for (;;) { 569 lock = __rq_lockp(rq); 570 ret = raw_spin_trylock(lock); 571 if (!ret || (likely(lock == __rq_lockp(rq)))) { 572 preempt_enable(); 573 return ret; 574 } 575 raw_spin_unlock(lock); 576 } 577 } 578 579 void raw_spin_rq_unlock(struct rq *rq) 580 { 581 raw_spin_unlock(rq_lockp(rq)); 582 } 583 584 #ifdef CONFIG_SMP 585 /* 586 * double_rq_lock - safely lock two runqueues 587 */ 588 void double_rq_lock(struct rq *rq1, struct rq *rq2) 589 { 590 lockdep_assert_irqs_disabled(); 591 592 if (rq_order_less(rq2, rq1)) 593 swap(rq1, rq2); 594 595 raw_spin_rq_lock(rq1); 596 if (__rq_lockp(rq1) != __rq_lockp(rq2)) 597 raw_spin_rq_lock_nested(rq2, SINGLE_DEPTH_NESTING); 598 599 double_rq_clock_clear_update(rq1, rq2); 600 } 601 #endif 602 603 /* 604 * __task_rq_lock - lock the rq @p resides on. 605 */ 606 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) 607 __acquires(rq->lock) 608 { 609 struct rq *rq; 610 611 lockdep_assert_held(&p->pi_lock); 612 613 for (;;) { 614 rq = task_rq(p); 615 raw_spin_rq_lock(rq); 616 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { 617 rq_pin_lock(rq, rf); 618 return rq; 619 } 620 raw_spin_rq_unlock(rq); 621 622 while (unlikely(task_on_rq_migrating(p))) 623 cpu_relax(); 624 } 625 } 626 627 /* 628 * task_rq_lock - lock p->pi_lock and lock the rq @p resides on. 629 */ 630 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) 631 __acquires(p->pi_lock) 632 __acquires(rq->lock) 633 { 634 struct rq *rq; 635 636 for (;;) { 637 raw_spin_lock_irqsave(&p->pi_lock, rf->flags); 638 rq = task_rq(p); 639 raw_spin_rq_lock(rq); 640 /* 641 * move_queued_task() task_rq_lock() 642 * 643 * ACQUIRE (rq->lock) 644 * [S] ->on_rq = MIGRATING [L] rq = task_rq() 645 * WMB (__set_task_cpu()) ACQUIRE (rq->lock); 646 * [S] ->cpu = new_cpu [L] task_rq() 647 * [L] ->on_rq 648 * RELEASE (rq->lock) 649 * 650 * If we observe the old CPU in task_rq_lock(), the acquire of 651 * the old rq->lock will fully serialize against the stores. 652 * 653 * If we observe the new CPU in task_rq_lock(), the address 654 * dependency headed by '[L] rq = task_rq()' and the acquire 655 * will pair with the WMB to ensure we then also see migrating. 656 */ 657 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { 658 rq_pin_lock(rq, rf); 659 return rq; 660 } 661 raw_spin_rq_unlock(rq); 662 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); 663 664 while (unlikely(task_on_rq_migrating(p))) 665 cpu_relax(); 666 } 667 } 668 669 /* 670 * RQ-clock updating methods: 671 */ 672 673 static void update_rq_clock_task(struct rq *rq, s64 delta) 674 { 675 /* 676 * In theory, the compile should just see 0 here, and optimize out the call 677 * to sched_rt_avg_update. But I don't trust it... 678 */ 679 s64 __maybe_unused steal = 0, irq_delta = 0; 680 681 #ifdef CONFIG_IRQ_TIME_ACCOUNTING 682 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time; 683 684 /* 685 * Since irq_time is only updated on {soft,}irq_exit, we might run into 686 * this case when a previous update_rq_clock() happened inside a 687 * {soft,}irq region. 688 * 689 * When this happens, we stop ->clock_task and only update the 690 * prev_irq_time stamp to account for the part that fit, so that a next 691 * update will consume the rest. This ensures ->clock_task is 692 * monotonic. 693 * 694 * It does however cause some slight miss-attribution of {soft,}irq 695 * time, a more accurate solution would be to update the irq_time using 696 * the current rq->clock timestamp, except that would require using 697 * atomic ops. 698 */ 699 if (irq_delta > delta) 700 irq_delta = delta; 701 702 rq->prev_irq_time += irq_delta; 703 delta -= irq_delta; 704 psi_account_irqtime(rq->curr, irq_delta); 705 #endif 706 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING 707 if (static_key_false((¶virt_steal_rq_enabled))) { 708 steal = paravirt_steal_clock(cpu_of(rq)); 709 steal -= rq->prev_steal_time_rq; 710 711 if (unlikely(steal > delta)) 712 steal = delta; 713 714 rq->prev_steal_time_rq += steal; 715 delta -= steal; 716 } 717 #endif 718 719 rq->clock_task += delta; 720 721 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ 722 if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY)) 723 update_irq_load_avg(rq, irq_delta + steal); 724 #endif 725 update_rq_clock_pelt(rq, delta); 726 } 727 728 void update_rq_clock(struct rq *rq) 729 { 730 s64 delta; 731 732 lockdep_assert_rq_held(rq); 733 734 if (rq->clock_update_flags & RQCF_ACT_SKIP) 735 return; 736 737 #ifdef CONFIG_SCHED_DEBUG 738 if (sched_feat(WARN_DOUBLE_CLOCK)) 739 SCHED_WARN_ON(rq->clock_update_flags & RQCF_UPDATED); 740 rq->clock_update_flags |= RQCF_UPDATED; 741 #endif 742 743 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock; 744 if (delta < 0) 745 return; 746 rq->clock += delta; 747 update_rq_clock_task(rq, delta); 748 } 749 750 #ifdef CONFIG_SCHED_HRTICK 751 /* 752 * Use HR-timers to deliver accurate preemption points. 753 */ 754 755 static void hrtick_clear(struct rq *rq) 756 { 757 if (hrtimer_active(&rq->hrtick_timer)) 758 hrtimer_cancel(&rq->hrtick_timer); 759 } 760 761 /* 762 * High-resolution timer tick. 763 * Runs from hardirq context with interrupts disabled. 764 */ 765 static enum hrtimer_restart hrtick(struct hrtimer *timer) 766 { 767 struct rq *rq = container_of(timer, struct rq, hrtick_timer); 768 struct rq_flags rf; 769 770 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); 771 772 rq_lock(rq, &rf); 773 update_rq_clock(rq); 774 rq->curr->sched_class->task_tick(rq, rq->curr, 1); 775 rq_unlock(rq, &rf); 776 777 return HRTIMER_NORESTART; 778 } 779 780 #ifdef CONFIG_SMP 781 782 static void __hrtick_restart(struct rq *rq) 783 { 784 struct hrtimer *timer = &rq->hrtick_timer; 785 ktime_t time = rq->hrtick_time; 786 787 hrtimer_start(timer, time, HRTIMER_MODE_ABS_PINNED_HARD); 788 } 789 790 /* 791 * called from hardirq (IPI) context 792 */ 793 static void __hrtick_start(void *arg) 794 { 795 struct rq *rq = arg; 796 struct rq_flags rf; 797 798 rq_lock(rq, &rf); 799 __hrtick_restart(rq); 800 rq_unlock(rq, &rf); 801 } 802 803 /* 804 * Called to set the hrtick timer state. 805 * 806 * called with rq->lock held and irqs disabled 807 */ 808 void hrtick_start(struct rq *rq, u64 delay) 809 { 810 struct hrtimer *timer = &rq->hrtick_timer; 811 s64 delta; 812 813 /* 814 * Don't schedule slices shorter than 10000ns, that just 815 * doesn't make sense and can cause timer DoS. 816 */ 817 delta = max_t(s64, delay, 10000LL); 818 rq->hrtick_time = ktime_add_ns(timer->base->get_time(), delta); 819 820 if (rq == this_rq()) 821 __hrtick_restart(rq); 822 else 823 smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd); 824 } 825 826 #else 827 /* 828 * Called to set the hrtick timer state. 829 * 830 * called with rq->lock held and irqs disabled 831 */ 832 void hrtick_start(struct rq *rq, u64 delay) 833 { 834 /* 835 * Don't schedule slices shorter than 10000ns, that just 836 * doesn't make sense. Rely on vruntime for fairness. 837 */ 838 delay = max_t(u64, delay, 10000LL); 839 hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay), 840 HRTIMER_MODE_REL_PINNED_HARD); 841 } 842 843 #endif /* CONFIG_SMP */ 844 845 static void hrtick_rq_init(struct rq *rq) 846 { 847 #ifdef CONFIG_SMP 848 INIT_CSD(&rq->hrtick_csd, __hrtick_start, rq); 849 #endif 850 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); 851 rq->hrtick_timer.function = hrtick; 852 } 853 #else /* CONFIG_SCHED_HRTICK */ 854 static inline void hrtick_clear(struct rq *rq) 855 { 856 } 857 858 static inline void hrtick_rq_init(struct rq *rq) 859 { 860 } 861 #endif /* CONFIG_SCHED_HRTICK */ 862 863 /* 864 * cmpxchg based fetch_or, macro so it works for different integer types 865 */ 866 #define fetch_or(ptr, mask) \ 867 ({ \ 868 typeof(ptr) _ptr = (ptr); \ 869 typeof(mask) _mask = (mask); \ 870 typeof(*_ptr) _val = *_ptr; \ 871 \ 872 do { \ 873 } while (!try_cmpxchg(_ptr, &_val, _val | _mask)); \ 874 _val; \ 875 }) 876 877 #if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG) 878 /* 879 * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG, 880 * this avoids any races wrt polling state changes and thereby avoids 881 * spurious IPIs. 882 */ 883 static inline bool set_nr_and_not_polling(struct task_struct *p) 884 { 885 struct thread_info *ti = task_thread_info(p); 886 return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG); 887 } 888 889 /* 890 * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set. 891 * 892 * If this returns true, then the idle task promises to call 893 * sched_ttwu_pending() and reschedule soon. 894 */ 895 static bool set_nr_if_polling(struct task_struct *p) 896 { 897 struct thread_info *ti = task_thread_info(p); 898 typeof(ti->flags) val = READ_ONCE(ti->flags); 899 900 for (;;) { 901 if (!(val & _TIF_POLLING_NRFLAG)) 902 return false; 903 if (val & _TIF_NEED_RESCHED) 904 return true; 905 if (try_cmpxchg(&ti->flags, &val, val | _TIF_NEED_RESCHED)) 906 break; 907 } 908 return true; 909 } 910 911 #else 912 static inline bool set_nr_and_not_polling(struct task_struct *p) 913 { 914 set_tsk_need_resched(p); 915 return true; 916 } 917 918 #ifdef CONFIG_SMP 919 static inline bool set_nr_if_polling(struct task_struct *p) 920 { 921 return false; 922 } 923 #endif 924 #endif 925 926 static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task) 927 { 928 struct wake_q_node *node = &task->wake_q; 929 930 /* 931 * Atomically grab the task, if ->wake_q is !nil already it means 932 * it's already queued (either by us or someone else) and will get the 933 * wakeup due to that. 934 * 935 * In order to ensure that a pending wakeup will observe our pending 936 * state, even in the failed case, an explicit smp_mb() must be used. 937 */ 938 smp_mb__before_atomic(); 939 if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL))) 940 return false; 941 942 /* 943 * The head is context local, there can be no concurrency. 944 */ 945 *head->lastp = node; 946 head->lastp = &node->next; 947 return true; 948 } 949 950 /** 951 * wake_q_add() - queue a wakeup for 'later' waking. 952 * @head: the wake_q_head to add @task to 953 * @task: the task to queue for 'later' wakeup 954 * 955 * Queue a task for later wakeup, most likely by the wake_up_q() call in the 956 * same context, _HOWEVER_ this is not guaranteed, the wakeup can come 957 * instantly. 958 * 959 * This function must be used as-if it were wake_up_process(); IOW the task 960 * must be ready to be woken at this location. 961 */ 962 void wake_q_add(struct wake_q_head *head, struct task_struct *task) 963 { 964 if (__wake_q_add(head, task)) 965 get_task_struct(task); 966 } 967 968 /** 969 * wake_q_add_safe() - safely queue a wakeup for 'later' waking. 970 * @head: the wake_q_head to add @task to 971 * @task: the task to queue for 'later' wakeup 972 * 973 * Queue a task for later wakeup, most likely by the wake_up_q() call in the 974 * same context, _HOWEVER_ this is not guaranteed, the wakeup can come 975 * instantly. 976 * 977 * This function must be used as-if it were wake_up_process(); IOW the task 978 * must be ready to be woken at this location. 979 * 980 * This function is essentially a task-safe equivalent to wake_q_add(). Callers 981 * that already hold reference to @task can call the 'safe' version and trust 982 * wake_q to do the right thing depending whether or not the @task is already 983 * queued for wakeup. 984 */ 985 void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task) 986 { 987 if (!__wake_q_add(head, task)) 988 put_task_struct(task); 989 } 990 991 void wake_up_q(struct wake_q_head *head) 992 { 993 struct wake_q_node *node = head->first; 994 995 while (node != WAKE_Q_TAIL) { 996 struct task_struct *task; 997 998 task = container_of(node, struct task_struct, wake_q); 999 /* Task can safely be re-inserted now: */ 1000 node = node->next; 1001 task->wake_q.next = NULL; 1002 1003 /* 1004 * wake_up_process() executes a full barrier, which pairs with 1005 * the queueing in wake_q_add() so as not to miss wakeups. 1006 */ 1007 wake_up_process(task); 1008 put_task_struct(task); 1009 } 1010 } 1011 1012 /* 1013 * resched_curr - mark rq's current task 'to be rescheduled now'. 1014 * 1015 * On UP this means the setting of the need_resched flag, on SMP it 1016 * might also involve a cross-CPU call to trigger the scheduler on 1017 * the target CPU. 1018 */ 1019 void resched_curr(struct rq *rq) 1020 { 1021 struct task_struct *curr = rq->curr; 1022 int cpu; 1023 1024 lockdep_assert_rq_held(rq); 1025 1026 if (test_tsk_need_resched(curr)) 1027 return; 1028 1029 cpu = cpu_of(rq); 1030 1031 if (cpu == smp_processor_id()) { 1032 set_tsk_need_resched(curr); 1033 set_preempt_need_resched(); 1034 return; 1035 } 1036 1037 if (set_nr_and_not_polling(curr)) 1038 smp_send_reschedule(cpu); 1039 else 1040 trace_sched_wake_idle_without_ipi(cpu); 1041 } 1042 1043 void resched_cpu(int cpu) 1044 { 1045 struct rq *rq = cpu_rq(cpu); 1046 unsigned long flags; 1047 1048 raw_spin_rq_lock_irqsave(rq, flags); 1049 if (cpu_online(cpu) || cpu == smp_processor_id()) 1050 resched_curr(rq); 1051 raw_spin_rq_unlock_irqrestore(rq, flags); 1052 } 1053 1054 #ifdef CONFIG_SMP 1055 #ifdef CONFIG_NO_HZ_COMMON 1056 /* 1057 * In the semi idle case, use the nearest busy CPU for migrating timers 1058 * from an idle CPU. This is good for power-savings. 1059 * 1060 * We don't do similar optimization for completely idle system, as 1061 * selecting an idle CPU will add more delays to the timers than intended 1062 * (as that CPU's timer base may not be uptodate wrt jiffies etc). 1063 */ 1064 int get_nohz_timer_target(void) 1065 { 1066 int i, cpu = smp_processor_id(), default_cpu = -1; 1067 struct sched_domain *sd; 1068 const struct cpumask *hk_mask; 1069 1070 if (housekeeping_cpu(cpu, HK_TYPE_TIMER)) { 1071 if (!idle_cpu(cpu)) 1072 return cpu; 1073 default_cpu = cpu; 1074 } 1075 1076 hk_mask = housekeeping_cpumask(HK_TYPE_TIMER); 1077 1078 rcu_read_lock(); 1079 for_each_domain(cpu, sd) { 1080 for_each_cpu_and(i, sched_domain_span(sd), hk_mask) { 1081 if (cpu == i) 1082 continue; 1083 1084 if (!idle_cpu(i)) { 1085 cpu = i; 1086 goto unlock; 1087 } 1088 } 1089 } 1090 1091 if (default_cpu == -1) 1092 default_cpu = housekeeping_any_cpu(HK_TYPE_TIMER); 1093 cpu = default_cpu; 1094 unlock: 1095 rcu_read_unlock(); 1096 return cpu; 1097 } 1098 1099 /* 1100 * When add_timer_on() enqueues a timer into the timer wheel of an 1101 * idle CPU then this timer might expire before the next timer event 1102 * which is scheduled to wake up that CPU. In case of a completely 1103 * idle system the next event might even be infinite time into the 1104 * future. wake_up_idle_cpu() ensures that the CPU is woken up and 1105 * leaves the inner idle loop so the newly added timer is taken into 1106 * account when the CPU goes back to idle and evaluates the timer 1107 * wheel for the next timer event. 1108 */ 1109 static void wake_up_idle_cpu(int cpu) 1110 { 1111 struct rq *rq = cpu_rq(cpu); 1112 1113 if (cpu == smp_processor_id()) 1114 return; 1115 1116 if (set_nr_and_not_polling(rq->idle)) 1117 smp_send_reschedule(cpu); 1118 else 1119 trace_sched_wake_idle_without_ipi(cpu); 1120 } 1121 1122 static bool wake_up_full_nohz_cpu(int cpu) 1123 { 1124 /* 1125 * We just need the target to call irq_exit() and re-evaluate 1126 * the next tick. The nohz full kick at least implies that. 1127 * If needed we can still optimize that later with an 1128 * empty IRQ. 1129 */ 1130 if (cpu_is_offline(cpu)) 1131 return true; /* Don't try to wake offline CPUs. */ 1132 if (tick_nohz_full_cpu(cpu)) { 1133 if (cpu != smp_processor_id() || 1134 tick_nohz_tick_stopped()) 1135 tick_nohz_full_kick_cpu(cpu); 1136 return true; 1137 } 1138 1139 return false; 1140 } 1141 1142 /* 1143 * Wake up the specified CPU. If the CPU is going offline, it is the 1144 * caller's responsibility to deal with the lost wakeup, for example, 1145 * by hooking into the CPU_DEAD notifier like timers and hrtimers do. 1146 */ 1147 void wake_up_nohz_cpu(int cpu) 1148 { 1149 if (!wake_up_full_nohz_cpu(cpu)) 1150 wake_up_idle_cpu(cpu); 1151 } 1152 1153 static void nohz_csd_func(void *info) 1154 { 1155 struct rq *rq = info; 1156 int cpu = cpu_of(rq); 1157 unsigned int flags; 1158 1159 /* 1160 * Release the rq::nohz_csd. 1161 */ 1162 flags = atomic_fetch_andnot(NOHZ_KICK_MASK | NOHZ_NEWILB_KICK, nohz_flags(cpu)); 1163 WARN_ON(!(flags & NOHZ_KICK_MASK)); 1164 1165 rq->idle_balance = idle_cpu(cpu); 1166 if (rq->idle_balance && !need_resched()) { 1167 rq->nohz_idle_balance = flags; 1168 raise_softirq_irqoff(SCHED_SOFTIRQ); 1169 } 1170 } 1171 1172 #endif /* CONFIG_NO_HZ_COMMON */ 1173 1174 #ifdef CONFIG_NO_HZ_FULL 1175 bool sched_can_stop_tick(struct rq *rq) 1176 { 1177 int fifo_nr_running; 1178 1179 /* Deadline tasks, even if single, need the tick */ 1180 if (rq->dl.dl_nr_running) 1181 return false; 1182 1183 /* 1184 * If there are more than one RR tasks, we need the tick to affect the 1185 * actual RR behaviour. 1186 */ 1187 if (rq->rt.rr_nr_running) { 1188 if (rq->rt.rr_nr_running == 1) 1189 return true; 1190 else 1191 return false; 1192 } 1193 1194 /* 1195 * If there's no RR tasks, but FIFO tasks, we can skip the tick, no 1196 * forced preemption between FIFO tasks. 1197 */ 1198 fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running; 1199 if (fifo_nr_running) 1200 return true; 1201 1202 /* 1203 * If there are no DL,RR/FIFO tasks, there must only be CFS tasks left; 1204 * if there's more than one we need the tick for involuntary 1205 * preemption. 1206 */ 1207 if (rq->nr_running > 1) 1208 return false; 1209 1210 return true; 1211 } 1212 #endif /* CONFIG_NO_HZ_FULL */ 1213 #endif /* CONFIG_SMP */ 1214 1215 #if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \ 1216 (defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH))) 1217 /* 1218 * Iterate task_group tree rooted at *from, calling @down when first entering a 1219 * node and @up when leaving it for the final time. 1220 * 1221 * Caller must hold rcu_lock or sufficient equivalent. 1222 */ 1223 int walk_tg_tree_from(struct task_group *from, 1224 tg_visitor down, tg_visitor up, void *data) 1225 { 1226 struct task_group *parent, *child; 1227 int ret; 1228 1229 parent = from; 1230 1231 down: 1232 ret = (*down)(parent, data); 1233 if (ret) 1234 goto out; 1235 list_for_each_entry_rcu(child, &parent->children, siblings) { 1236 parent = child; 1237 goto down; 1238 1239 up: 1240 continue; 1241 } 1242 ret = (*up)(parent, data); 1243 if (ret || parent == from) 1244 goto out; 1245 1246 child = parent; 1247 parent = parent->parent; 1248 if (parent) 1249 goto up; 1250 out: 1251 return ret; 1252 } 1253 1254 int tg_nop(struct task_group *tg, void *data) 1255 { 1256 return 0; 1257 } 1258 #endif 1259 1260 static void set_load_weight(struct task_struct *p, bool update_load) 1261 { 1262 int prio = p->static_prio - MAX_RT_PRIO; 1263 struct load_weight *load = &p->se.load; 1264 1265 /* 1266 * SCHED_IDLE tasks get minimal weight: 1267 */ 1268 if (task_has_idle_policy(p)) { 1269 load->weight = scale_load(WEIGHT_IDLEPRIO); 1270 load->inv_weight = WMULT_IDLEPRIO; 1271 return; 1272 } 1273 1274 /* 1275 * SCHED_OTHER tasks have to update their load when changing their 1276 * weight 1277 */ 1278 if (update_load && p->sched_class == &fair_sched_class) { 1279 reweight_task(p, prio); 1280 } else { 1281 load->weight = scale_load(sched_prio_to_weight[prio]); 1282 load->inv_weight = sched_prio_to_wmult[prio]; 1283 } 1284 } 1285 1286 #ifdef CONFIG_UCLAMP_TASK 1287 /* 1288 * Serializes updates of utilization clamp values 1289 * 1290 * The (slow-path) user-space triggers utilization clamp value updates which 1291 * can require updates on (fast-path) scheduler's data structures used to 1292 * support enqueue/dequeue operations. 1293 * While the per-CPU rq lock protects fast-path update operations, user-space 1294 * requests are serialized using a mutex to reduce the risk of conflicting 1295 * updates or API abuses. 1296 */ 1297 static DEFINE_MUTEX(uclamp_mutex); 1298 1299 /* Max allowed minimum utilization */ 1300 static unsigned int __maybe_unused sysctl_sched_uclamp_util_min = SCHED_CAPACITY_SCALE; 1301 1302 /* Max allowed maximum utilization */ 1303 static unsigned int __maybe_unused sysctl_sched_uclamp_util_max = SCHED_CAPACITY_SCALE; 1304 1305 /* 1306 * By default RT tasks run at the maximum performance point/capacity of the 1307 * system. Uclamp enforces this by always setting UCLAMP_MIN of RT tasks to 1308 * SCHED_CAPACITY_SCALE. 1309 * 1310 * This knob allows admins to change the default behavior when uclamp is being 1311 * used. In battery powered devices, particularly, running at the maximum 1312 * capacity and frequency will increase energy consumption and shorten the 1313 * battery life. 1314 * 1315 * This knob only affects RT tasks that their uclamp_se->user_defined == false. 1316 * 1317 * This knob will not override the system default sched_util_clamp_min defined 1318 * above. 1319 */ 1320 static unsigned int sysctl_sched_uclamp_util_min_rt_default = SCHED_CAPACITY_SCALE; 1321 1322 /* All clamps are required to be less or equal than these values */ 1323 static struct uclamp_se uclamp_default[UCLAMP_CNT]; 1324 1325 /* 1326 * This static key is used to reduce the uclamp overhead in the fast path. It 1327 * primarily disables the call to uclamp_rq_{inc, dec}() in 1328 * enqueue/dequeue_task(). 1329 * 1330 * This allows users to continue to enable uclamp in their kernel config with 1331 * minimum uclamp overhead in the fast path. 1332 * 1333 * As soon as userspace modifies any of the uclamp knobs, the static key is 1334 * enabled, since we have an actual users that make use of uclamp 1335 * functionality. 1336 * 1337 * The knobs that would enable this static key are: 1338 * 1339 * * A task modifying its uclamp value with sched_setattr(). 1340 * * An admin modifying the sysctl_sched_uclamp_{min, max} via procfs. 1341 * * An admin modifying the cgroup cpu.uclamp.{min, max} 1342 */ 1343 DEFINE_STATIC_KEY_FALSE(sched_uclamp_used); 1344 1345 /* Integer rounded range for each bucket */ 1346 #define UCLAMP_BUCKET_DELTA DIV_ROUND_CLOSEST(SCHED_CAPACITY_SCALE, UCLAMP_BUCKETS) 1347 1348 #define for_each_clamp_id(clamp_id) \ 1349 for ((clamp_id) = 0; (clamp_id) < UCLAMP_CNT; (clamp_id)++) 1350 1351 static inline unsigned int uclamp_bucket_id(unsigned int clamp_value) 1352 { 1353 return min_t(unsigned int, clamp_value / UCLAMP_BUCKET_DELTA, UCLAMP_BUCKETS - 1); 1354 } 1355 1356 static inline unsigned int uclamp_none(enum uclamp_id clamp_id) 1357 { 1358 if (clamp_id == UCLAMP_MIN) 1359 return 0; 1360 return SCHED_CAPACITY_SCALE; 1361 } 1362 1363 static inline void uclamp_se_set(struct uclamp_se *uc_se, 1364 unsigned int value, bool user_defined) 1365 { 1366 uc_se->value = value; 1367 uc_se->bucket_id = uclamp_bucket_id(value); 1368 uc_se->user_defined = user_defined; 1369 } 1370 1371 static inline unsigned int 1372 uclamp_idle_value(struct rq *rq, enum uclamp_id clamp_id, 1373 unsigned int clamp_value) 1374 { 1375 /* 1376 * Avoid blocked utilization pushing up the frequency when we go 1377 * idle (which drops the max-clamp) by retaining the last known 1378 * max-clamp. 1379 */ 1380 if (clamp_id == UCLAMP_MAX) { 1381 rq->uclamp_flags |= UCLAMP_FLAG_IDLE; 1382 return clamp_value; 1383 } 1384 1385 return uclamp_none(UCLAMP_MIN); 1386 } 1387 1388 static inline void uclamp_idle_reset(struct rq *rq, enum uclamp_id clamp_id, 1389 unsigned int clamp_value) 1390 { 1391 /* Reset max-clamp retention only on idle exit */ 1392 if (!(rq->uclamp_flags & UCLAMP_FLAG_IDLE)) 1393 return; 1394 1395 WRITE_ONCE(rq->uclamp[clamp_id].value, clamp_value); 1396 } 1397 1398 static inline 1399 unsigned int uclamp_rq_max_value(struct rq *rq, enum uclamp_id clamp_id, 1400 unsigned int clamp_value) 1401 { 1402 struct uclamp_bucket *bucket = rq->uclamp[clamp_id].bucket; 1403 int bucket_id = UCLAMP_BUCKETS - 1; 1404 1405 /* 1406 * Since both min and max clamps are max aggregated, find the 1407 * top most bucket with tasks in. 1408 */ 1409 for ( ; bucket_id >= 0; bucket_id--) { 1410 if (!bucket[bucket_id].tasks) 1411 continue; 1412 return bucket[bucket_id].value; 1413 } 1414 1415 /* No tasks -- default clamp values */ 1416 return uclamp_idle_value(rq, clamp_id, clamp_value); 1417 } 1418 1419 static void __uclamp_update_util_min_rt_default(struct task_struct *p) 1420 { 1421 unsigned int default_util_min; 1422 struct uclamp_se *uc_se; 1423 1424 lockdep_assert_held(&p->pi_lock); 1425 1426 uc_se = &p->uclamp_req[UCLAMP_MIN]; 1427 1428 /* Only sync if user didn't override the default */ 1429 if (uc_se->user_defined) 1430 return; 1431 1432 default_util_min = sysctl_sched_uclamp_util_min_rt_default; 1433 uclamp_se_set(uc_se, default_util_min, false); 1434 } 1435 1436 static void uclamp_update_util_min_rt_default(struct task_struct *p) 1437 { 1438 struct rq_flags rf; 1439 struct rq *rq; 1440 1441 if (!rt_task(p)) 1442 return; 1443 1444 /* Protect updates to p->uclamp_* */ 1445 rq = task_rq_lock(p, &rf); 1446 __uclamp_update_util_min_rt_default(p); 1447 task_rq_unlock(rq, p, &rf); 1448 } 1449 1450 static inline struct uclamp_se 1451 uclamp_tg_restrict(struct task_struct *p, enum uclamp_id clamp_id) 1452 { 1453 /* Copy by value as we could modify it */ 1454 struct uclamp_se uc_req = p->uclamp_req[clamp_id]; 1455 #ifdef CONFIG_UCLAMP_TASK_GROUP 1456 unsigned int tg_min, tg_max, value; 1457 1458 /* 1459 * Tasks in autogroups or root task group will be 1460 * restricted by system defaults. 1461 */ 1462 if (task_group_is_autogroup(task_group(p))) 1463 return uc_req; 1464 if (task_group(p) == &root_task_group) 1465 return uc_req; 1466 1467 tg_min = task_group(p)->uclamp[UCLAMP_MIN].value; 1468 tg_max = task_group(p)->uclamp[UCLAMP_MAX].value; 1469 value = uc_req.value; 1470 value = clamp(value, tg_min, tg_max); 1471 uclamp_se_set(&uc_req, value, false); 1472 #endif 1473 1474 return uc_req; 1475 } 1476 1477 /* 1478 * The effective clamp bucket index of a task depends on, by increasing 1479 * priority: 1480 * - the task specific clamp value, when explicitly requested from userspace 1481 * - the task group effective clamp value, for tasks not either in the root 1482 * group or in an autogroup 1483 * - the system default clamp value, defined by the sysadmin 1484 */ 1485 static inline struct uclamp_se 1486 uclamp_eff_get(struct task_struct *p, enum uclamp_id clamp_id) 1487 { 1488 struct uclamp_se uc_req = uclamp_tg_restrict(p, clamp_id); 1489 struct uclamp_se uc_max = uclamp_default[clamp_id]; 1490 1491 /* System default restrictions always apply */ 1492 if (unlikely(uc_req.value > uc_max.value)) 1493 return uc_max; 1494 1495 return uc_req; 1496 } 1497 1498 unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id) 1499 { 1500 struct uclamp_se uc_eff; 1501 1502 /* Task currently refcounted: use back-annotated (effective) value */ 1503 if (p->uclamp[clamp_id].active) 1504 return (unsigned long)p->uclamp[clamp_id].value; 1505 1506 uc_eff = uclamp_eff_get(p, clamp_id); 1507 1508 return (unsigned long)uc_eff.value; 1509 } 1510 1511 /* 1512 * When a task is enqueued on a rq, the clamp bucket currently defined by the 1513 * task's uclamp::bucket_id is refcounted on that rq. This also immediately 1514 * updates the rq's clamp value if required. 1515 * 1516 * Tasks can have a task-specific value requested from user-space, track 1517 * within each bucket the maximum value for tasks refcounted in it. 1518 * This "local max aggregation" allows to track the exact "requested" value 1519 * for each bucket when all its RUNNABLE tasks require the same clamp. 1520 */ 1521 static inline void uclamp_rq_inc_id(struct rq *rq, struct task_struct *p, 1522 enum uclamp_id clamp_id) 1523 { 1524 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id]; 1525 struct uclamp_se *uc_se = &p->uclamp[clamp_id]; 1526 struct uclamp_bucket *bucket; 1527 1528 lockdep_assert_rq_held(rq); 1529 1530 /* Update task effective clamp */ 1531 p->uclamp[clamp_id] = uclamp_eff_get(p, clamp_id); 1532 1533 bucket = &uc_rq->bucket[uc_se->bucket_id]; 1534 bucket->tasks++; 1535 uc_se->active = true; 1536 1537 uclamp_idle_reset(rq, clamp_id, uc_se->value); 1538 1539 /* 1540 * Local max aggregation: rq buckets always track the max 1541 * "requested" clamp value of its RUNNABLE tasks. 1542 */ 1543 if (bucket->tasks == 1 || uc_se->value > bucket->value) 1544 bucket->value = uc_se->value; 1545 1546 if (uc_se->value > READ_ONCE(uc_rq->value)) 1547 WRITE_ONCE(uc_rq->value, uc_se->value); 1548 } 1549 1550 /* 1551 * When a task is dequeued from a rq, the clamp bucket refcounted by the task 1552 * is released. If this is the last task reference counting the rq's max 1553 * active clamp value, then the rq's clamp value is updated. 1554 * 1555 * Both refcounted tasks and rq's cached clamp values are expected to be 1556 * always valid. If it's detected they are not, as defensive programming, 1557 * enforce the expected state and warn. 1558 */ 1559 static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p, 1560 enum uclamp_id clamp_id) 1561 { 1562 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id]; 1563 struct uclamp_se *uc_se = &p->uclamp[clamp_id]; 1564 struct uclamp_bucket *bucket; 1565 unsigned int bkt_clamp; 1566 unsigned int rq_clamp; 1567 1568 lockdep_assert_rq_held(rq); 1569 1570 /* 1571 * If sched_uclamp_used was enabled after task @p was enqueued, 1572 * we could end up with unbalanced call to uclamp_rq_dec_id(). 1573 * 1574 * In this case the uc_se->active flag should be false since no uclamp 1575 * accounting was performed at enqueue time and we can just return 1576 * here. 1577 * 1578 * Need to be careful of the following enqueue/dequeue ordering 1579 * problem too 1580 * 1581 * enqueue(taskA) 1582 * // sched_uclamp_used gets enabled 1583 * enqueue(taskB) 1584 * dequeue(taskA) 1585 * // Must not decrement bucket->tasks here 1586 * dequeue(taskB) 1587 * 1588 * where we could end up with stale data in uc_se and 1589 * bucket[uc_se->bucket_id]. 1590 * 1591 * The following check here eliminates the possibility of such race. 1592 */ 1593 if (unlikely(!uc_se->active)) 1594 return; 1595 1596 bucket = &uc_rq->bucket[uc_se->bucket_id]; 1597 1598 SCHED_WARN_ON(!bucket->tasks); 1599 if (likely(bucket->tasks)) 1600 bucket->tasks--; 1601 1602 uc_se->active = false; 1603 1604 /* 1605 * Keep "local max aggregation" simple and accept to (possibly) 1606 * overboost some RUNNABLE tasks in the same bucket. 1607 * The rq clamp bucket value is reset to its base value whenever 1608 * there are no more RUNNABLE tasks refcounting it. 1609 */ 1610 if (likely(bucket->tasks)) 1611 return; 1612 1613 rq_clamp = READ_ONCE(uc_rq->value); 1614 /* 1615 * Defensive programming: this should never happen. If it happens, 1616 * e.g. due to future modification, warn and fixup the expected value. 1617 */ 1618 SCHED_WARN_ON(bucket->value > rq_clamp); 1619 if (bucket->value >= rq_clamp) { 1620 bkt_clamp = uclamp_rq_max_value(rq, clamp_id, uc_se->value); 1621 WRITE_ONCE(uc_rq->value, bkt_clamp); 1622 } 1623 } 1624 1625 static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) 1626 { 1627 enum uclamp_id clamp_id; 1628 1629 /* 1630 * Avoid any overhead until uclamp is actually used by the userspace. 1631 * 1632 * The condition is constructed such that a NOP is generated when 1633 * sched_uclamp_used is disabled. 1634 */ 1635 if (!static_branch_unlikely(&sched_uclamp_used)) 1636 return; 1637 1638 if (unlikely(!p->sched_class->uclamp_enabled)) 1639 return; 1640 1641 for_each_clamp_id(clamp_id) 1642 uclamp_rq_inc_id(rq, p, clamp_id); 1643 1644 /* Reset clamp idle holding when there is one RUNNABLE task */ 1645 if (rq->uclamp_flags & UCLAMP_FLAG_IDLE) 1646 rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE; 1647 } 1648 1649 static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) 1650 { 1651 enum uclamp_id clamp_id; 1652 1653 /* 1654 * Avoid any overhead until uclamp is actually used by the userspace. 1655 * 1656 * The condition is constructed such that a NOP is generated when 1657 * sched_uclamp_used is disabled. 1658 */ 1659 if (!static_branch_unlikely(&sched_uclamp_used)) 1660 return; 1661 1662 if (unlikely(!p->sched_class->uclamp_enabled)) 1663 return; 1664 1665 for_each_clamp_id(clamp_id) 1666 uclamp_rq_dec_id(rq, p, clamp_id); 1667 } 1668 1669 static inline void uclamp_rq_reinc_id(struct rq *rq, struct task_struct *p, 1670 enum uclamp_id clamp_id) 1671 { 1672 if (!p->uclamp[clamp_id].active) 1673 return; 1674 1675 uclamp_rq_dec_id(rq, p, clamp_id); 1676 uclamp_rq_inc_id(rq, p, clamp_id); 1677 1678 /* 1679 * Make sure to clear the idle flag if we've transiently reached 0 1680 * active tasks on rq. 1681 */ 1682 if (clamp_id == UCLAMP_MAX && (rq->uclamp_flags & UCLAMP_FLAG_IDLE)) 1683 rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE; 1684 } 1685 1686 static inline void 1687 uclamp_update_active(struct task_struct *p) 1688 { 1689 enum uclamp_id clamp_id; 1690 struct rq_flags rf; 1691 struct rq *rq; 1692 1693 /* 1694 * Lock the task and the rq where the task is (or was) queued. 1695 * 1696 * We might lock the (previous) rq of a !RUNNABLE task, but that's the 1697 * price to pay to safely serialize util_{min,max} updates with 1698 * enqueues, dequeues and migration operations. 1699 * This is the same locking schema used by __set_cpus_allowed_ptr(). 1700 */ 1701 rq = task_rq_lock(p, &rf); 1702 1703 /* 1704 * Setting the clamp bucket is serialized by task_rq_lock(). 1705 * If the task is not yet RUNNABLE and its task_struct is not 1706 * affecting a valid clamp bucket, the next time it's enqueued, 1707 * it will already see the updated clamp bucket value. 1708 */ 1709 for_each_clamp_id(clamp_id) 1710 uclamp_rq_reinc_id(rq, p, clamp_id); 1711 1712 task_rq_unlock(rq, p, &rf); 1713 } 1714 1715 #ifdef CONFIG_UCLAMP_TASK_GROUP 1716 static inline void 1717 uclamp_update_active_tasks(struct cgroup_subsys_state *css) 1718 { 1719 struct css_task_iter it; 1720 struct task_struct *p; 1721 1722 css_task_iter_start(css, 0, &it); 1723 while ((p = css_task_iter_next(&it))) 1724 uclamp_update_active(p); 1725 css_task_iter_end(&it); 1726 } 1727 1728 static void cpu_util_update_eff(struct cgroup_subsys_state *css); 1729 #endif 1730 1731 #ifdef CONFIG_SYSCTL 1732 #ifdef CONFIG_UCLAMP_TASK 1733 #ifdef CONFIG_UCLAMP_TASK_GROUP 1734 static void uclamp_update_root_tg(void) 1735 { 1736 struct task_group *tg = &root_task_group; 1737 1738 uclamp_se_set(&tg->uclamp_req[UCLAMP_MIN], 1739 sysctl_sched_uclamp_util_min, false); 1740 uclamp_se_set(&tg->uclamp_req[UCLAMP_MAX], 1741 sysctl_sched_uclamp_util_max, false); 1742 1743 rcu_read_lock(); 1744 cpu_util_update_eff(&root_task_group.css); 1745 rcu_read_unlock(); 1746 } 1747 #else 1748 static void uclamp_update_root_tg(void) { } 1749 #endif 1750 1751 static void uclamp_sync_util_min_rt_default(void) 1752 { 1753 struct task_struct *g, *p; 1754 1755 /* 1756 * copy_process() sysctl_uclamp 1757 * uclamp_min_rt = X; 1758 * write_lock(&tasklist_lock) read_lock(&tasklist_lock) 1759 * // link thread smp_mb__after_spinlock() 1760 * write_unlock(&tasklist_lock) read_unlock(&tasklist_lock); 1761 * sched_post_fork() for_each_process_thread() 1762 * __uclamp_sync_rt() __uclamp_sync_rt() 1763 * 1764 * Ensures that either sched_post_fork() will observe the new 1765 * uclamp_min_rt or for_each_process_thread() will observe the new 1766 * task. 1767 */ 1768 read_lock(&tasklist_lock); 1769 smp_mb__after_spinlock(); 1770 read_unlock(&tasklist_lock); 1771 1772 rcu_read_lock(); 1773 for_each_process_thread(g, p) 1774 uclamp_update_util_min_rt_default(p); 1775 rcu_read_unlock(); 1776 } 1777 1778 static int sysctl_sched_uclamp_handler(struct ctl_table *table, int write, 1779 void *buffer, size_t *lenp, loff_t *ppos) 1780 { 1781 bool update_root_tg = false; 1782 int old_min, old_max, old_min_rt; 1783 int result; 1784 1785 mutex_lock(&uclamp_mutex); 1786 old_min = sysctl_sched_uclamp_util_min; 1787 old_max = sysctl_sched_uclamp_util_max; 1788 old_min_rt = sysctl_sched_uclamp_util_min_rt_default; 1789 1790 result = proc_dointvec(table, write, buffer, lenp, ppos); 1791 if (result) 1792 goto undo; 1793 if (!write) 1794 goto done; 1795 1796 if (sysctl_sched_uclamp_util_min > sysctl_sched_uclamp_util_max || 1797 sysctl_sched_uclamp_util_max > SCHED_CAPACITY_SCALE || 1798 sysctl_sched_uclamp_util_min_rt_default > SCHED_CAPACITY_SCALE) { 1799 1800 result = -EINVAL; 1801 goto undo; 1802 } 1803 1804 if (old_min != sysctl_sched_uclamp_util_min) { 1805 uclamp_se_set(&uclamp_default[UCLAMP_MIN], 1806 sysctl_sched_uclamp_util_min, false); 1807 update_root_tg = true; 1808 } 1809 if (old_max != sysctl_sched_uclamp_util_max) { 1810 uclamp_se_set(&uclamp_default[UCLAMP_MAX], 1811 sysctl_sched_uclamp_util_max, false); 1812 update_root_tg = true; 1813 } 1814 1815 if (update_root_tg) { 1816 static_branch_enable(&sched_uclamp_used); 1817 uclamp_update_root_tg(); 1818 } 1819 1820 if (old_min_rt != sysctl_sched_uclamp_util_min_rt_default) { 1821 static_branch_enable(&sched_uclamp_used); 1822 uclamp_sync_util_min_rt_default(); 1823 } 1824 1825 /* 1826 * We update all RUNNABLE tasks only when task groups are in use. 1827 * Otherwise, keep it simple and do just a lazy update at each next 1828 * task enqueue time. 1829 */ 1830 1831 goto done; 1832 1833 undo: 1834 sysctl_sched_uclamp_util_min = old_min; 1835 sysctl_sched_uclamp_util_max = old_max; 1836 sysctl_sched_uclamp_util_min_rt_default = old_min_rt; 1837 done: 1838 mutex_unlock(&uclamp_mutex); 1839 1840 return result; 1841 } 1842 #endif 1843 #endif 1844 1845 static int uclamp_validate(struct task_struct *p, 1846 const struct sched_attr *attr) 1847 { 1848 int util_min = p->uclamp_req[UCLAMP_MIN].value; 1849 int util_max = p->uclamp_req[UCLAMP_MAX].value; 1850 1851 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN) { 1852 util_min = attr->sched_util_min; 1853 1854 if (util_min + 1 > SCHED_CAPACITY_SCALE + 1) 1855 return -EINVAL; 1856 } 1857 1858 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX) { 1859 util_max = attr->sched_util_max; 1860 1861 if (util_max + 1 > SCHED_CAPACITY_SCALE + 1) 1862 return -EINVAL; 1863 } 1864 1865 if (util_min != -1 && util_max != -1 && util_min > util_max) 1866 return -EINVAL; 1867 1868 /* 1869 * We have valid uclamp attributes; make sure uclamp is enabled. 1870 * 1871 * We need to do that here, because enabling static branches is a 1872 * blocking operation which obviously cannot be done while holding 1873 * scheduler locks. 1874 */ 1875 static_branch_enable(&sched_uclamp_used); 1876 1877 return 0; 1878 } 1879 1880 static bool uclamp_reset(const struct sched_attr *attr, 1881 enum uclamp_id clamp_id, 1882 struct uclamp_se *uc_se) 1883 { 1884 /* Reset on sched class change for a non user-defined clamp value. */ 1885 if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)) && 1886 !uc_se->user_defined) 1887 return true; 1888 1889 /* Reset on sched_util_{min,max} == -1. */ 1890 if (clamp_id == UCLAMP_MIN && 1891 attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN && 1892 attr->sched_util_min == -1) { 1893 return true; 1894 } 1895 1896 if (clamp_id == UCLAMP_MAX && 1897 attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX && 1898 attr->sched_util_max == -1) { 1899 return true; 1900 } 1901 1902 return false; 1903 } 1904 1905 static void __setscheduler_uclamp(struct task_struct *p, 1906 const struct sched_attr *attr) 1907 { 1908 enum uclamp_id clamp_id; 1909 1910 for_each_clamp_id(clamp_id) { 1911 struct uclamp_se *uc_se = &p->uclamp_req[clamp_id]; 1912 unsigned int value; 1913 1914 if (!uclamp_reset(attr, clamp_id, uc_se)) 1915 continue; 1916 1917 /* 1918 * RT by default have a 100% boost value that could be modified 1919 * at runtime. 1920 */ 1921 if (unlikely(rt_task(p) && clamp_id == UCLAMP_MIN)) 1922 value = sysctl_sched_uclamp_util_min_rt_default; 1923 else 1924 value = uclamp_none(clamp_id); 1925 1926 uclamp_se_set(uc_se, value, false); 1927 1928 } 1929 1930 if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP))) 1931 return; 1932 1933 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN && 1934 attr->sched_util_min != -1) { 1935 uclamp_se_set(&p->uclamp_req[UCLAMP_MIN], 1936 attr->sched_util_min, true); 1937 } 1938 1939 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX && 1940 attr->sched_util_max != -1) { 1941 uclamp_se_set(&p->uclamp_req[UCLAMP_MAX], 1942 attr->sched_util_max, true); 1943 } 1944 } 1945 1946 static void uclamp_fork(struct task_struct *p) 1947 { 1948 enum uclamp_id clamp_id; 1949 1950 /* 1951 * We don't need to hold task_rq_lock() when updating p->uclamp_* here 1952 * as the task is still at its early fork stages. 1953 */ 1954 for_each_clamp_id(clamp_id) 1955 p->uclamp[clamp_id].active = false; 1956 1957 if (likely(!p->sched_reset_on_fork)) 1958 return; 1959 1960 for_each_clamp_id(clamp_id) { 1961 uclamp_se_set(&p->uclamp_req[clamp_id], 1962 uclamp_none(clamp_id), false); 1963 } 1964 } 1965 1966 static void uclamp_post_fork(struct task_struct *p) 1967 { 1968 uclamp_update_util_min_rt_default(p); 1969 } 1970 1971 static void __init init_uclamp_rq(struct rq *rq) 1972 { 1973 enum uclamp_id clamp_id; 1974 struct uclamp_rq *uc_rq = rq->uclamp; 1975 1976 for_each_clamp_id(clamp_id) { 1977 uc_rq[clamp_id] = (struct uclamp_rq) { 1978 .value = uclamp_none(clamp_id) 1979 }; 1980 } 1981 1982 rq->uclamp_flags = UCLAMP_FLAG_IDLE; 1983 } 1984 1985 static void __init init_uclamp(void) 1986 { 1987 struct uclamp_se uc_max = {}; 1988 enum uclamp_id clamp_id; 1989 int cpu; 1990 1991 for_each_possible_cpu(cpu) 1992 init_uclamp_rq(cpu_rq(cpu)); 1993 1994 for_each_clamp_id(clamp_id) { 1995 uclamp_se_set(&init_task.uclamp_req[clamp_id], 1996 uclamp_none(clamp_id), false); 1997 } 1998 1999 /* System defaults allow max clamp values for both indexes */ 2000 uclamp_se_set(&uc_max, uclamp_none(UCLAMP_MAX), false); 2001 for_each_clamp_id(clamp_id) { 2002 uclamp_default[clamp_id] = uc_max; 2003 #ifdef CONFIG_UCLAMP_TASK_GROUP 2004 root_task_group.uclamp_req[clamp_id] = uc_max; 2005 root_task_group.uclamp[clamp_id] = uc_max; 2006 #endif 2007 } 2008 } 2009 2010 #else /* CONFIG_UCLAMP_TASK */ 2011 static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) { } 2012 static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) { } 2013 static inline int uclamp_validate(struct task_struct *p, 2014 const struct sched_attr *attr) 2015 { 2016 return -EOPNOTSUPP; 2017 } 2018 static void __setscheduler_uclamp(struct task_struct *p, 2019 const struct sched_attr *attr) { } 2020 static inline void uclamp_fork(struct task_struct *p) { } 2021 static inline void uclamp_post_fork(struct task_struct *p) { } 2022 static inline void init_uclamp(void) { } 2023 #endif /* CONFIG_UCLAMP_TASK */ 2024 2025 bool sched_task_on_rq(struct task_struct *p) 2026 { 2027 return task_on_rq_queued(p); 2028 } 2029 2030 unsigned long get_wchan(struct task_struct *p) 2031 { 2032 unsigned long ip = 0; 2033 unsigned int state; 2034 2035 if (!p || p == current) 2036 return 0; 2037 2038 /* Only get wchan if task is blocked and we can keep it that way. */ 2039 raw_spin_lock_irq(&p->pi_lock); 2040 state = READ_ONCE(p->__state); 2041 smp_rmb(); /* see try_to_wake_up() */ 2042 if (state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq) 2043 ip = __get_wchan(p); 2044 raw_spin_unlock_irq(&p->pi_lock); 2045 2046 return ip; 2047 } 2048 2049 static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags) 2050 { 2051 if (!(flags & ENQUEUE_NOCLOCK)) 2052 update_rq_clock(rq); 2053 2054 if (!(flags & ENQUEUE_RESTORE)) { 2055 sched_info_enqueue(rq, p); 2056 psi_enqueue(p, flags & ENQUEUE_WAKEUP); 2057 } 2058 2059 uclamp_rq_inc(rq, p); 2060 p->sched_class->enqueue_task(rq, p, flags); 2061 2062 if (sched_core_enabled(rq)) 2063 sched_core_enqueue(rq, p); 2064 } 2065 2066 static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags) 2067 { 2068 if (sched_core_enabled(rq)) 2069 sched_core_dequeue(rq, p, flags); 2070 2071 if (!(flags & DEQUEUE_NOCLOCK)) 2072 update_rq_clock(rq); 2073 2074 if (!(flags & DEQUEUE_SAVE)) { 2075 sched_info_dequeue(rq, p); 2076 psi_dequeue(p, flags & DEQUEUE_SLEEP); 2077 } 2078 2079 uclamp_rq_dec(rq, p); 2080 p->sched_class->dequeue_task(rq, p, flags); 2081 } 2082 2083 void activate_task(struct rq *rq, struct task_struct *p, int flags) 2084 { 2085 enqueue_task(rq, p, flags); 2086 2087 p->on_rq = TASK_ON_RQ_QUEUED; 2088 } 2089 2090 void deactivate_task(struct rq *rq, struct task_struct *p, int flags) 2091 { 2092 p->on_rq = (flags & DEQUEUE_SLEEP) ? 0 : TASK_ON_RQ_MIGRATING; 2093 2094 dequeue_task(rq, p, flags); 2095 } 2096 2097 static inline int __normal_prio(int policy, int rt_prio, int nice) 2098 { 2099 int prio; 2100 2101 if (dl_policy(policy)) 2102 prio = MAX_DL_PRIO - 1; 2103 else if (rt_policy(policy)) 2104 prio = MAX_RT_PRIO - 1 - rt_prio; 2105 else 2106 prio = NICE_TO_PRIO(nice); 2107 2108 return prio; 2109 } 2110 2111 /* 2112 * Calculate the expected normal priority: i.e. priority 2113 * without taking RT-inheritance into account. Might be 2114 * boosted by interactivity modifiers. Changes upon fork, 2115 * setprio syscalls, and whenever the interactivity 2116 * estimator recalculates. 2117 */ 2118 static inline int normal_prio(struct task_struct *p) 2119 { 2120 return __normal_prio(p->policy, p->rt_priority, PRIO_TO_NICE(p->static_prio)); 2121 } 2122 2123 /* 2124 * Calculate the current priority, i.e. the priority 2125 * taken into account by the scheduler. This value might 2126 * be boosted by RT tasks, or might be boosted by 2127 * interactivity modifiers. Will be RT if the task got 2128 * RT-boosted. If not then it returns p->normal_prio. 2129 */ 2130 static int effective_prio(struct task_struct *p) 2131 { 2132 p->normal_prio = normal_prio(p); 2133 /* 2134 * If we are RT tasks or we were boosted to RT priority, 2135 * keep the priority unchanged. Otherwise, update priority 2136 * to the normal priority: 2137 */ 2138 if (!rt_prio(p->prio)) 2139 return p->normal_prio; 2140 return p->prio; 2141 } 2142 2143 /** 2144 * task_curr - is this task currently executing on a CPU? 2145 * @p: the task in question. 2146 * 2147 * Return: 1 if the task is currently executing. 0 otherwise. 2148 */ 2149 inline int task_curr(const struct task_struct *p) 2150 { 2151 return cpu_curr(task_cpu(p)) == p; 2152 } 2153 2154 /* 2155 * switched_from, switched_to and prio_changed must _NOT_ drop rq->lock, 2156 * use the balance_callback list if you want balancing. 2157 * 2158 * this means any call to check_class_changed() must be followed by a call to 2159 * balance_callback(). 2160 */ 2161 static inline void check_class_changed(struct rq *rq, struct task_struct *p, 2162 const struct sched_class *prev_class, 2163 int oldprio) 2164 { 2165 if (prev_class != p->sched_class) { 2166 if (prev_class->switched_from) 2167 prev_class->switched_from(rq, p); 2168 2169 p->sched_class->switched_to(rq, p); 2170 } else if (oldprio != p->prio || dl_task(p)) 2171 p->sched_class->prio_changed(rq, p, oldprio); 2172 } 2173 2174 void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) 2175 { 2176 if (p->sched_class == rq->curr->sched_class) 2177 rq->curr->sched_class->check_preempt_curr(rq, p, flags); 2178 else if (sched_class_above(p->sched_class, rq->curr->sched_class)) 2179 resched_curr(rq); 2180 2181 /* 2182 * A queue event has occurred, and we're going to schedule. In 2183 * this case, we can save a useless back to back clock update. 2184 */ 2185 if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr)) 2186 rq_clock_skip_update(rq); 2187 } 2188 2189 #ifdef CONFIG_SMP 2190 2191 static void 2192 __do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask, u32 flags); 2193 2194 static int __set_cpus_allowed_ptr(struct task_struct *p, 2195 const struct cpumask *new_mask, 2196 u32 flags); 2197 2198 static void migrate_disable_switch(struct rq *rq, struct task_struct *p) 2199 { 2200 if (likely(!p->migration_disabled)) 2201 return; 2202 2203 if (p->cpus_ptr != &p->cpus_mask) 2204 return; 2205 2206 /* 2207 * Violates locking rules! see comment in __do_set_cpus_allowed(). 2208 */ 2209 __do_set_cpus_allowed(p, cpumask_of(rq->cpu), SCA_MIGRATE_DISABLE); 2210 } 2211 2212 void migrate_disable(void) 2213 { 2214 struct task_struct *p = current; 2215 2216 if (p->migration_disabled) { 2217 p->migration_disabled++; 2218 return; 2219 } 2220 2221 preempt_disable(); 2222 this_rq()->nr_pinned++; 2223 p->migration_disabled = 1; 2224 preempt_enable(); 2225 } 2226 EXPORT_SYMBOL_GPL(migrate_disable); 2227 2228 void migrate_enable(void) 2229 { 2230 struct task_struct *p = current; 2231 2232 if (p->migration_disabled > 1) { 2233 p->migration_disabled--; 2234 return; 2235 } 2236 2237 if (WARN_ON_ONCE(!p->migration_disabled)) 2238 return; 2239 2240 /* 2241 * Ensure stop_task runs either before or after this, and that 2242 * __set_cpus_allowed_ptr(SCA_MIGRATE_ENABLE) doesn't schedule(). 2243 */ 2244 preempt_disable(); 2245 if (p->cpus_ptr != &p->cpus_mask) 2246 __set_cpus_allowed_ptr(p, &p->cpus_mask, SCA_MIGRATE_ENABLE); 2247 /* 2248 * Mustn't clear migration_disabled() until cpus_ptr points back at the 2249 * regular cpus_mask, otherwise things that race (eg. 2250 * select_fallback_rq) get confused. 2251 */ 2252 barrier(); 2253 p->migration_disabled = 0; 2254 this_rq()->nr_pinned--; 2255 preempt_enable(); 2256 } 2257 EXPORT_SYMBOL_GPL(migrate_enable); 2258 2259 static inline bool rq_has_pinned_tasks(struct rq *rq) 2260 { 2261 return rq->nr_pinned; 2262 } 2263 2264 /* 2265 * Per-CPU kthreads are allowed to run on !active && online CPUs, see 2266 * __set_cpus_allowed_ptr() and select_fallback_rq(). 2267 */ 2268 static inline bool is_cpu_allowed(struct task_struct *p, int cpu) 2269 { 2270 /* When not in the task's cpumask, no point in looking further. */ 2271 if (!cpumask_test_cpu(cpu, p->cpus_ptr)) 2272 return false; 2273 2274 /* migrate_disabled() must be allowed to finish. */ 2275 if (is_migration_disabled(p)) 2276 return cpu_online(cpu); 2277 2278 /* Non kernel threads are not allowed during either online or offline. */ 2279 if (!(p->flags & PF_KTHREAD)) 2280 return cpu_active(cpu) && task_cpu_possible(cpu, p); 2281 2282 /* KTHREAD_IS_PER_CPU is always allowed. */ 2283 if (kthread_is_per_cpu(p)) 2284 return cpu_online(cpu); 2285 2286 /* Regular kernel threads don't get to stay during offline. */ 2287 if (cpu_dying(cpu)) 2288 return false; 2289 2290 /* But are allowed during online. */ 2291 return cpu_online(cpu); 2292 } 2293 2294 /* 2295 * This is how migration works: 2296 * 2297 * 1) we invoke migration_cpu_stop() on the target CPU using 2298 * stop_one_cpu(). 2299 * 2) stopper starts to run (implicitly forcing the migrated thread 2300 * off the CPU) 2301 * 3) it checks whether the migrated task is still in the wrong runqueue. 2302 * 4) if it's in the wrong runqueue then the migration thread removes 2303 * it and puts it into the right queue. 2304 * 5) stopper completes and stop_one_cpu() returns and the migration 2305 * is done. 2306 */ 2307 2308 /* 2309 * move_queued_task - move a queued task to new rq. 2310 * 2311 * Returns (locked) new rq. Old rq's lock is released. 2312 */ 2313 static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf, 2314 struct task_struct *p, int new_cpu) 2315 { 2316 lockdep_assert_rq_held(rq); 2317 2318 deactivate_task(rq, p, DEQUEUE_NOCLOCK); 2319 set_task_cpu(p, new_cpu); 2320 rq_unlock(rq, rf); 2321 2322 rq = cpu_rq(new_cpu); 2323 2324 rq_lock(rq, rf); 2325 WARN_ON_ONCE(task_cpu(p) != new_cpu); 2326 activate_task(rq, p, 0); 2327 check_preempt_curr(rq, p, 0); 2328 2329 return rq; 2330 } 2331 2332 struct migration_arg { 2333 struct task_struct *task; 2334 int dest_cpu; 2335 struct set_affinity_pending *pending; 2336 }; 2337 2338 /* 2339 * @refs: number of wait_for_completion() 2340 * @stop_pending: is @stop_work in use 2341 */ 2342 struct set_affinity_pending { 2343 refcount_t refs; 2344 unsigned int stop_pending; 2345 struct completion done; 2346 struct cpu_stop_work stop_work; 2347 struct migration_arg arg; 2348 }; 2349 2350 /* 2351 * Move (not current) task off this CPU, onto the destination CPU. We're doing 2352 * this because either it can't run here any more (set_cpus_allowed() 2353 * away from this CPU, or CPU going down), or because we're 2354 * attempting to rebalance this task on exec (sched_exec). 2355 * 2356 * So we race with normal scheduler movements, but that's OK, as long 2357 * as the task is no longer on this CPU. 2358 */ 2359 static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf, 2360 struct task_struct *p, int dest_cpu) 2361 { 2362 /* Affinity changed (again). */ 2363 if (!is_cpu_allowed(p, dest_cpu)) 2364 return rq; 2365 2366 update_rq_clock(rq); 2367 rq = move_queued_task(rq, rf, p, dest_cpu); 2368 2369 return rq; 2370 } 2371 2372 /* 2373 * migration_cpu_stop - this will be executed by a highprio stopper thread 2374 * and performs thread migration by bumping thread off CPU then 2375 * 'pushing' onto another runqueue. 2376 */ 2377 static int migration_cpu_stop(void *data) 2378 { 2379 struct migration_arg *arg = data; 2380 struct set_affinity_pending *pending = arg->pending; 2381 struct task_struct *p = arg->task; 2382 struct rq *rq = this_rq(); 2383 bool complete = false; 2384 struct rq_flags rf; 2385 2386 /* 2387 * The original target CPU might have gone down and we might 2388 * be on another CPU but it doesn't matter. 2389 */ 2390 local_irq_save(rf.flags); 2391 /* 2392 * We need to explicitly wake pending tasks before running 2393 * __migrate_task() such that we will not miss enforcing cpus_ptr 2394 * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test. 2395 */ 2396 flush_smp_call_function_queue(); 2397 2398 raw_spin_lock(&p->pi_lock); 2399 rq_lock(rq, &rf); 2400 2401 /* 2402 * If we were passed a pending, then ->stop_pending was set, thus 2403 * p->migration_pending must have remained stable. 2404 */ 2405 WARN_ON_ONCE(pending && pending != p->migration_pending); 2406 2407 /* 2408 * If task_rq(p) != rq, it cannot be migrated here, because we're 2409 * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because 2410 * we're holding p->pi_lock. 2411 */ 2412 if (task_rq(p) == rq) { 2413 if (is_migration_disabled(p)) 2414 goto out; 2415 2416 if (pending) { 2417 p->migration_pending = NULL; 2418 complete = true; 2419 2420 if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) 2421 goto out; 2422 } 2423 2424 if (task_on_rq_queued(p)) 2425 rq = __migrate_task(rq, &rf, p, arg->dest_cpu); 2426 else 2427 p->wake_cpu = arg->dest_cpu; 2428 2429 /* 2430 * XXX __migrate_task() can fail, at which point we might end 2431 * up running on a dodgy CPU, AFAICT this can only happen 2432 * during CPU hotplug, at which point we'll get pushed out 2433 * anyway, so it's probably not a big deal. 2434 */ 2435 2436 } else if (pending) { 2437 /* 2438 * This happens when we get migrated between migrate_enable()'s 2439 * preempt_enable() and scheduling the stopper task. At that 2440 * point we're a regular task again and not current anymore. 2441 * 2442 * A !PREEMPT kernel has a giant hole here, which makes it far 2443 * more likely. 2444 */ 2445 2446 /* 2447 * The task moved before the stopper got to run. We're holding 2448 * ->pi_lock, so the allowed mask is stable - if it got 2449 * somewhere allowed, we're done. 2450 */ 2451 if (cpumask_test_cpu(task_cpu(p), p->cpus_ptr)) { 2452 p->migration_pending = NULL; 2453 complete = true; 2454 goto out; 2455 } 2456 2457 /* 2458 * When migrate_enable() hits a rq mis-match we can't reliably 2459 * determine is_migration_disabled() and so have to chase after 2460 * it. 2461 */ 2462 WARN_ON_ONCE(!pending->stop_pending); 2463 task_rq_unlock(rq, p, &rf); 2464 stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop, 2465 &pending->arg, &pending->stop_work); 2466 return 0; 2467 } 2468 out: 2469 if (pending) 2470 pending->stop_pending = false; 2471 task_rq_unlock(rq, p, &rf); 2472 2473 if (complete) 2474 complete_all(&pending->done); 2475 2476 return 0; 2477 } 2478 2479 int push_cpu_stop(void *arg) 2480 { 2481 struct rq *lowest_rq = NULL, *rq = this_rq(); 2482 struct task_struct *p = arg; 2483 2484 raw_spin_lock_irq(&p->pi_lock); 2485 raw_spin_rq_lock(rq); 2486 2487 if (task_rq(p) != rq) 2488 goto out_unlock; 2489 2490 if (is_migration_disabled(p)) { 2491 p->migration_flags |= MDF_PUSH; 2492 goto out_unlock; 2493 } 2494 2495 p->migration_flags &= ~MDF_PUSH; 2496 2497 if (p->sched_class->find_lock_rq) 2498 lowest_rq = p->sched_class->find_lock_rq(p, rq); 2499 2500 if (!lowest_rq) 2501 goto out_unlock; 2502 2503 // XXX validate p is still the highest prio task 2504 if (task_rq(p) == rq) { 2505 deactivate_task(rq, p, 0); 2506 set_task_cpu(p, lowest_rq->cpu); 2507 activate_task(lowest_rq, p, 0); 2508 resched_curr(lowest_rq); 2509 } 2510 2511 double_unlock_balance(rq, lowest_rq); 2512 2513 out_unlock: 2514 rq->push_busy = false; 2515 raw_spin_rq_unlock(rq); 2516 raw_spin_unlock_irq(&p->pi_lock); 2517 2518 put_task_struct(p); 2519 return 0; 2520 } 2521 2522 /* 2523 * sched_class::set_cpus_allowed must do the below, but is not required to 2524 * actually call this function. 2525 */ 2526 void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask, u32 flags) 2527 { 2528 if (flags & (SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) { 2529 p->cpus_ptr = new_mask; 2530 return; 2531 } 2532 2533 cpumask_copy(&p->cpus_mask, new_mask); 2534 p->nr_cpus_allowed = cpumask_weight(new_mask); 2535 } 2536 2537 static void 2538 __do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask, u32 flags) 2539 { 2540 struct rq *rq = task_rq(p); 2541 bool queued, running; 2542 2543 /* 2544 * This here violates the locking rules for affinity, since we're only 2545 * supposed to change these variables while holding both rq->lock and 2546 * p->pi_lock. 2547 * 2548 * HOWEVER, it magically works, because ttwu() is the only code that 2549 * accesses these variables under p->pi_lock and only does so after 2550 * smp_cond_load_acquire(&p->on_cpu, !VAL), and we're in __schedule() 2551 * before finish_task(). 2552 * 2553 * XXX do further audits, this smells like something putrid. 2554 */ 2555 if (flags & SCA_MIGRATE_DISABLE) 2556 SCHED_WARN_ON(!p->on_cpu); 2557 else 2558 lockdep_assert_held(&p->pi_lock); 2559 2560 queued = task_on_rq_queued(p); 2561 running = task_current(rq, p); 2562 2563 if (queued) { 2564 /* 2565 * Because __kthread_bind() calls this on blocked tasks without 2566 * holding rq->lock. 2567 */ 2568 lockdep_assert_rq_held(rq); 2569 dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK); 2570 } 2571 if (running) 2572 put_prev_task(rq, p); 2573 2574 p->sched_class->set_cpus_allowed(p, new_mask, flags); 2575 2576 if (queued) 2577 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); 2578 if (running) 2579 set_next_task(rq, p); 2580 } 2581 2582 void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) 2583 { 2584 __do_set_cpus_allowed(p, new_mask, 0); 2585 } 2586 2587 int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, 2588 int node) 2589 { 2590 if (!src->user_cpus_ptr) 2591 return 0; 2592 2593 dst->user_cpus_ptr = kmalloc_node(cpumask_size(), GFP_KERNEL, node); 2594 if (!dst->user_cpus_ptr) 2595 return -ENOMEM; 2596 2597 cpumask_copy(dst->user_cpus_ptr, src->user_cpus_ptr); 2598 return 0; 2599 } 2600 2601 static inline struct cpumask *clear_user_cpus_ptr(struct task_struct *p) 2602 { 2603 struct cpumask *user_mask = NULL; 2604 2605 swap(p->user_cpus_ptr, user_mask); 2606 2607 return user_mask; 2608 } 2609 2610 void release_user_cpus_ptr(struct task_struct *p) 2611 { 2612 kfree(clear_user_cpus_ptr(p)); 2613 } 2614 2615 /* 2616 * This function is wildly self concurrent; here be dragons. 2617 * 2618 * 2619 * When given a valid mask, __set_cpus_allowed_ptr() must block until the 2620 * designated task is enqueued on an allowed CPU. If that task is currently 2621 * running, we have to kick it out using the CPU stopper. 2622 * 2623 * Migrate-Disable comes along and tramples all over our nice sandcastle. 2624 * Consider: 2625 * 2626 * Initial conditions: P0->cpus_mask = [0, 1] 2627 * 2628 * P0@CPU0 P1 2629 * 2630 * migrate_disable(); 2631 * <preempted> 2632 * set_cpus_allowed_ptr(P0, [1]); 2633 * 2634 * P1 *cannot* return from this set_cpus_allowed_ptr() call until P0 executes 2635 * its outermost migrate_enable() (i.e. it exits its Migrate-Disable region). 2636 * This means we need the following scheme: 2637 * 2638 * P0@CPU0 P1 2639 * 2640 * migrate_disable(); 2641 * <preempted> 2642 * set_cpus_allowed_ptr(P0, [1]); 2643 * <blocks> 2644 * <resumes> 2645 * migrate_enable(); 2646 * __set_cpus_allowed_ptr(); 2647 * <wakes local stopper> 2648 * `--> <woken on migration completion> 2649 * 2650 * Now the fun stuff: there may be several P1-like tasks, i.e. multiple 2651 * concurrent set_cpus_allowed_ptr(P0, [*]) calls. CPU affinity changes of any 2652 * task p are serialized by p->pi_lock, which we can leverage: the one that 2653 * should come into effect at the end of the Migrate-Disable region is the last 2654 * one. This means we only need to track a single cpumask (i.e. p->cpus_mask), 2655 * but we still need to properly signal those waiting tasks at the appropriate 2656 * moment. 2657 * 2658 * This is implemented using struct set_affinity_pending. The first 2659 * __set_cpus_allowed_ptr() caller within a given Migrate-Disable region will 2660 * setup an instance of that struct and install it on the targeted task_struct. 2661 * Any and all further callers will reuse that instance. Those then wait for 2662 * a completion signaled at the tail of the CPU stopper callback (1), triggered 2663 * on the end of the Migrate-Disable region (i.e. outermost migrate_enable()). 2664 * 2665 * 2666 * (1) In the cases covered above. There is one more where the completion is 2667 * signaled within affine_move_task() itself: when a subsequent affinity request 2668 * occurs after the stopper bailed out due to the targeted task still being 2669 * Migrate-Disable. Consider: 2670 * 2671 * Initial conditions: P0->cpus_mask = [0, 1] 2672 * 2673 * CPU0 P1 P2 2674 * <P0> 2675 * migrate_disable(); 2676 * <preempted> 2677 * set_cpus_allowed_ptr(P0, [1]); 2678 * <blocks> 2679 * <migration/0> 2680 * migration_cpu_stop() 2681 * is_migration_disabled() 2682 * <bails> 2683 * set_cpus_allowed_ptr(P0, [0, 1]); 2684 * <signal completion> 2685 * <awakes> 2686 * 2687 * Note that the above is safe vs a concurrent migrate_enable(), as any 2688 * pending affinity completion is preceded by an uninstallation of 2689 * p->migration_pending done with p->pi_lock held. 2690 */ 2691 static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flags *rf, 2692 int dest_cpu, unsigned int flags) 2693 { 2694 struct set_affinity_pending my_pending = { }, *pending = NULL; 2695 bool stop_pending, complete = false; 2696 2697 /* Can the task run on the task's current CPU? If so, we're done */ 2698 if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) { 2699 struct task_struct *push_task = NULL; 2700 2701 if ((flags & SCA_MIGRATE_ENABLE) && 2702 (p->migration_flags & MDF_PUSH) && !rq->push_busy) { 2703 rq->push_busy = true; 2704 push_task = get_task_struct(p); 2705 } 2706 2707 /* 2708 * If there are pending waiters, but no pending stop_work, 2709 * then complete now. 2710 */ 2711 pending = p->migration_pending; 2712 if (pending && !pending->stop_pending) { 2713 p->migration_pending = NULL; 2714 complete = true; 2715 } 2716 2717 task_rq_unlock(rq, p, rf); 2718 2719 if (push_task) { 2720 stop_one_cpu_nowait(rq->cpu, push_cpu_stop, 2721 p, &rq->push_work); 2722 } 2723 2724 if (complete) 2725 complete_all(&pending->done); 2726 2727 return 0; 2728 } 2729 2730 if (!(flags & SCA_MIGRATE_ENABLE)) { 2731 /* serialized by p->pi_lock */ 2732 if (!p->migration_pending) { 2733 /* Install the request */ 2734 refcount_set(&my_pending.refs, 1); 2735 init_completion(&my_pending.done); 2736 my_pending.arg = (struct migration_arg) { 2737 .task = p, 2738 .dest_cpu = dest_cpu, 2739 .pending = &my_pending, 2740 }; 2741 2742 p->migration_pending = &my_pending; 2743 } else { 2744 pending = p->migration_pending; 2745 refcount_inc(&pending->refs); 2746 /* 2747 * Affinity has changed, but we've already installed a 2748 * pending. migration_cpu_stop() *must* see this, else 2749 * we risk a completion of the pending despite having a 2750 * task on a disallowed CPU. 2751 * 2752 * Serialized by p->pi_lock, so this is safe. 2753 */ 2754 pending->arg.dest_cpu = dest_cpu; 2755 } 2756 } 2757 pending = p->migration_pending; 2758 /* 2759 * - !MIGRATE_ENABLE: 2760 * we'll have installed a pending if there wasn't one already. 2761 * 2762 * - MIGRATE_ENABLE: 2763 * we're here because the current CPU isn't matching anymore, 2764 * the only way that can happen is because of a concurrent 2765 * set_cpus_allowed_ptr() call, which should then still be 2766 * pending completion. 2767 * 2768 * Either way, we really should have a @pending here. 2769 */ 2770 if (WARN_ON_ONCE(!pending)) { 2771 task_rq_unlock(rq, p, rf); 2772 return -EINVAL; 2773 } 2774 2775 if (task_on_cpu(rq, p) || READ_ONCE(p->__state) == TASK_WAKING) { 2776 /* 2777 * MIGRATE_ENABLE gets here because 'p == current', but for 2778 * anything else we cannot do is_migration_disabled(), punt 2779 * and have the stopper function handle it all race-free. 2780 */ 2781 stop_pending = pending->stop_pending; 2782 if (!stop_pending) 2783 pending->stop_pending = true; 2784 2785 if (flags & SCA_MIGRATE_ENABLE) 2786 p->migration_flags &= ~MDF_PUSH; 2787 2788 task_rq_unlock(rq, p, rf); 2789 2790 if (!stop_pending) { 2791 stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop, 2792 &pending->arg, &pending->stop_work); 2793 } 2794 2795 if (flags & SCA_MIGRATE_ENABLE) 2796 return 0; 2797 } else { 2798 2799 if (!is_migration_disabled(p)) { 2800 if (task_on_rq_queued(p)) 2801 rq = move_queued_task(rq, rf, p, dest_cpu); 2802 2803 if (!pending->stop_pending) { 2804 p->migration_pending = NULL; 2805 complete = true; 2806 } 2807 } 2808 task_rq_unlock(rq, p, rf); 2809 2810 if (complete) 2811 complete_all(&pending->done); 2812 } 2813 2814 wait_for_completion(&pending->done); 2815 2816 if (refcount_dec_and_test(&pending->refs)) 2817 wake_up_var(&pending->refs); /* No UaF, just an address */ 2818 2819 /* 2820 * Block the original owner of &pending until all subsequent callers 2821 * have seen the completion and decremented the refcount 2822 */ 2823 wait_var_event(&my_pending.refs, !refcount_read(&my_pending.refs)); 2824 2825 /* ARGH */ 2826 WARN_ON_ONCE(my_pending.stop_pending); 2827 2828 return 0; 2829 } 2830 2831 /* 2832 * Called with both p->pi_lock and rq->lock held; drops both before returning. 2833 */ 2834 static int __set_cpus_allowed_ptr_locked(struct task_struct *p, 2835 const struct cpumask *new_mask, 2836 u32 flags, 2837 struct rq *rq, 2838 struct rq_flags *rf) 2839 __releases(rq->lock) 2840 __releases(p->pi_lock) 2841 { 2842 const struct cpumask *cpu_allowed_mask = task_cpu_possible_mask(p); 2843 const struct cpumask *cpu_valid_mask = cpu_active_mask; 2844 bool kthread = p->flags & PF_KTHREAD; 2845 struct cpumask *user_mask = NULL; 2846 unsigned int dest_cpu; 2847 int ret = 0; 2848 2849 update_rq_clock(rq); 2850 2851 if (kthread || is_migration_disabled(p)) { 2852 /* 2853 * Kernel threads are allowed on online && !active CPUs, 2854 * however, during cpu-hot-unplug, even these might get pushed 2855 * away if not KTHREAD_IS_PER_CPU. 2856 * 2857 * Specifically, migration_disabled() tasks must not fail the 2858 * cpumask_any_and_distribute() pick below, esp. so on 2859 * SCA_MIGRATE_ENABLE, otherwise we'll not call 2860 * set_cpus_allowed_common() and actually reset p->cpus_ptr. 2861 */ 2862 cpu_valid_mask = cpu_online_mask; 2863 } 2864 2865 if (!kthread && !cpumask_subset(new_mask, cpu_allowed_mask)) { 2866 ret = -EINVAL; 2867 goto out; 2868 } 2869 2870 /* 2871 * Must re-check here, to close a race against __kthread_bind(), 2872 * sched_setaffinity() is not guaranteed to observe the flag. 2873 */ 2874 if ((flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) { 2875 ret = -EINVAL; 2876 goto out; 2877 } 2878 2879 if (!(flags & SCA_MIGRATE_ENABLE)) { 2880 if (cpumask_equal(&p->cpus_mask, new_mask)) 2881 goto out; 2882 2883 if (WARN_ON_ONCE(p == current && 2884 is_migration_disabled(p) && 2885 !cpumask_test_cpu(task_cpu(p), new_mask))) { 2886 ret = -EBUSY; 2887 goto out; 2888 } 2889 } 2890 2891 /* 2892 * Picking a ~random cpu helps in cases where we are changing affinity 2893 * for groups of tasks (ie. cpuset), so that load balancing is not 2894 * immediately required to distribute the tasks within their new mask. 2895 */ 2896 dest_cpu = cpumask_any_and_distribute(cpu_valid_mask, new_mask); 2897 if (dest_cpu >= nr_cpu_ids) { 2898 ret = -EINVAL; 2899 goto out; 2900 } 2901 2902 __do_set_cpus_allowed(p, new_mask, flags); 2903 2904 if (flags & SCA_USER) 2905 user_mask = clear_user_cpus_ptr(p); 2906 2907 ret = affine_move_task(rq, p, rf, dest_cpu, flags); 2908 2909 kfree(user_mask); 2910 2911 return ret; 2912 2913 out: 2914 task_rq_unlock(rq, p, rf); 2915 2916 return ret; 2917 } 2918 2919 /* 2920 * Change a given task's CPU affinity. Migrate the thread to a 2921 * proper CPU and schedule it away if the CPU it's executing on 2922 * is removed from the allowed bitmask. 2923 * 2924 * NOTE: the caller must have a valid reference to the task, the 2925 * task must not exit() & deallocate itself prematurely. The 2926 * call is not atomic; no spinlocks may be held. 2927 */ 2928 static int __set_cpus_allowed_ptr(struct task_struct *p, 2929 const struct cpumask *new_mask, u32 flags) 2930 { 2931 struct rq_flags rf; 2932 struct rq *rq; 2933 2934 rq = task_rq_lock(p, &rf); 2935 return __set_cpus_allowed_ptr_locked(p, new_mask, flags, rq, &rf); 2936 } 2937 2938 int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) 2939 { 2940 return __set_cpus_allowed_ptr(p, new_mask, 0); 2941 } 2942 EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr); 2943 2944 /* 2945 * Change a given task's CPU affinity to the intersection of its current 2946 * affinity mask and @subset_mask, writing the resulting mask to @new_mask 2947 * and pointing @p->user_cpus_ptr to a copy of the old mask. 2948 * If the resulting mask is empty, leave the affinity unchanged and return 2949 * -EINVAL. 2950 */ 2951 static int restrict_cpus_allowed_ptr(struct task_struct *p, 2952 struct cpumask *new_mask, 2953 const struct cpumask *subset_mask) 2954 { 2955 struct cpumask *user_mask = NULL; 2956 struct rq_flags rf; 2957 struct rq *rq; 2958 int err; 2959 2960 if (!p->user_cpus_ptr) { 2961 user_mask = kmalloc(cpumask_size(), GFP_KERNEL); 2962 if (!user_mask) 2963 return -ENOMEM; 2964 } 2965 2966 rq = task_rq_lock(p, &rf); 2967 2968 /* 2969 * Forcefully restricting the affinity of a deadline task is 2970 * likely to cause problems, so fail and noisily override the 2971 * mask entirely. 2972 */ 2973 if (task_has_dl_policy(p) && dl_bandwidth_enabled()) { 2974 err = -EPERM; 2975 goto err_unlock; 2976 } 2977 2978 if (!cpumask_and(new_mask, &p->cpus_mask, subset_mask)) { 2979 err = -EINVAL; 2980 goto err_unlock; 2981 } 2982 2983 /* 2984 * We're about to butcher the task affinity, so keep track of what 2985 * the user asked for in case we're able to restore it later on. 2986 */ 2987 if (user_mask) { 2988 cpumask_copy(user_mask, p->cpus_ptr); 2989 p->user_cpus_ptr = user_mask; 2990 } 2991 2992 return __set_cpus_allowed_ptr_locked(p, new_mask, 0, rq, &rf); 2993 2994 err_unlock: 2995 task_rq_unlock(rq, p, &rf); 2996 kfree(user_mask); 2997 return err; 2998 } 2999 3000 /* 3001 * Restrict the CPU affinity of task @p so that it is a subset of 3002 * task_cpu_possible_mask() and point @p->user_cpu_ptr to a copy of the 3003 * old affinity mask. If the resulting mask is empty, we warn and walk 3004 * up the cpuset hierarchy until we find a suitable mask. 3005 */ 3006 void force_compatible_cpus_allowed_ptr(struct task_struct *p) 3007 { 3008 cpumask_var_t new_mask; 3009 const struct cpumask *override_mask = task_cpu_possible_mask(p); 3010 3011 alloc_cpumask_var(&new_mask, GFP_KERNEL); 3012 3013 /* 3014 * __migrate_task() can fail silently in the face of concurrent 3015 * offlining of the chosen destination CPU, so take the hotplug 3016 * lock to ensure that the migration succeeds. 3017 */ 3018 cpus_read_lock(); 3019 if (!cpumask_available(new_mask)) 3020 goto out_set_mask; 3021 3022 if (!restrict_cpus_allowed_ptr(p, new_mask, override_mask)) 3023 goto out_free_mask; 3024 3025 /* 3026 * We failed to find a valid subset of the affinity mask for the 3027 * task, so override it based on its cpuset hierarchy. 3028 */ 3029 cpuset_cpus_allowed(p, new_mask); 3030 override_mask = new_mask; 3031 3032 out_set_mask: 3033 if (printk_ratelimit()) { 3034 printk_deferred("Overriding affinity for process %d (%s) to CPUs %*pbl\n", 3035 task_pid_nr(p), p->comm, 3036 cpumask_pr_args(override_mask)); 3037 } 3038 3039 WARN_ON(set_cpus_allowed_ptr(p, override_mask)); 3040 out_free_mask: 3041 cpus_read_unlock(); 3042 free_cpumask_var(new_mask); 3043 } 3044 3045 static int 3046 __sched_setaffinity(struct task_struct *p, const struct cpumask *mask); 3047 3048 /* 3049 * Restore the affinity of a task @p which was previously restricted by a 3050 * call to force_compatible_cpus_allowed_ptr(). This will clear (and free) 3051 * @p->user_cpus_ptr. 3052 * 3053 * It is the caller's responsibility to serialise this with any calls to 3054 * force_compatible_cpus_allowed_ptr(@p). 3055 */ 3056 void relax_compatible_cpus_allowed_ptr(struct task_struct *p) 3057 { 3058 struct cpumask *user_mask = p->user_cpus_ptr; 3059 unsigned long flags; 3060 3061 /* 3062 * Try to restore the old affinity mask. If this fails, then 3063 * we free the mask explicitly to avoid it being inherited across 3064 * a subsequent fork(). 3065 */ 3066 if (!user_mask || !__sched_setaffinity(p, user_mask)) 3067 return; 3068 3069 raw_spin_lock_irqsave(&p->pi_lock, flags); 3070 user_mask = clear_user_cpus_ptr(p); 3071 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 3072 3073 kfree(user_mask); 3074 } 3075 3076 void set_task_cpu(struct task_struct *p, unsigned int new_cpu) 3077 { 3078 #ifdef CONFIG_SCHED_DEBUG 3079 unsigned int state = READ_ONCE(p->__state); 3080 3081 /* 3082 * We should never call set_task_cpu() on a blocked task, 3083 * ttwu() will sort out the placement. 3084 */ 3085 WARN_ON_ONCE(state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq); 3086 3087 /* 3088 * Migrating fair class task must have p->on_rq = TASK_ON_RQ_MIGRATING, 3089 * because schedstat_wait_{start,end} rebase migrating task's wait_start 3090 * time relying on p->on_rq. 3091 */ 3092 WARN_ON_ONCE(state == TASK_RUNNING && 3093 p->sched_class == &fair_sched_class && 3094 (p->on_rq && !task_on_rq_migrating(p))); 3095 3096 #ifdef CONFIG_LOCKDEP 3097 /* 3098 * The caller should hold either p->pi_lock or rq->lock, when changing 3099 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks. 3100 * 3101 * sched_move_task() holds both and thus holding either pins the cgroup, 3102 * see task_group(). 3103 * 3104 * Furthermore, all task_rq users should acquire both locks, see 3105 * task_rq_lock(). 3106 */ 3107 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) || 3108 lockdep_is_held(__rq_lockp(task_rq(p))))); 3109 #endif 3110 /* 3111 * Clearly, migrating tasks to offline CPUs is a fairly daft thing. 3112 */ 3113 WARN_ON_ONCE(!cpu_online(new_cpu)); 3114 3115 WARN_ON_ONCE(is_migration_disabled(p)); 3116 #endif 3117 3118 trace_sched_migrate_task(p, new_cpu); 3119 3120 if (task_cpu(p) != new_cpu) { 3121 if (p->sched_class->migrate_task_rq) 3122 p->sched_class->migrate_task_rq(p, new_cpu); 3123 p->se.nr_migrations++; 3124 rseq_migrate(p); 3125 perf_event_task_migrate(p); 3126 } 3127 3128 __set_task_cpu(p, new_cpu); 3129 } 3130 3131 #ifdef CONFIG_NUMA_BALANCING 3132 static void __migrate_swap_task(struct task_struct *p, int cpu) 3133 { 3134 if (task_on_rq_queued(p)) { 3135 struct rq *src_rq, *dst_rq; 3136 struct rq_flags srf, drf; 3137 3138 src_rq = task_rq(p); 3139 dst_rq = cpu_rq(cpu); 3140 3141 rq_pin_lock(src_rq, &srf); 3142 rq_pin_lock(dst_rq, &drf); 3143 3144 deactivate_task(src_rq, p, 0); 3145 set_task_cpu(p, cpu); 3146 activate_task(dst_rq, p, 0); 3147 check_preempt_curr(dst_rq, p, 0); 3148 3149 rq_unpin_lock(dst_rq, &drf); 3150 rq_unpin_lock(src_rq, &srf); 3151 3152 } else { 3153 /* 3154 * Task isn't running anymore; make it appear like we migrated 3155 * it before it went to sleep. This means on wakeup we make the 3156 * previous CPU our target instead of where it really is. 3157 */ 3158 p->wake_cpu = cpu; 3159 } 3160 } 3161 3162 struct migration_swap_arg { 3163 struct task_struct *src_task, *dst_task; 3164 int src_cpu, dst_cpu; 3165 }; 3166 3167 static int migrate_swap_stop(void *data) 3168 { 3169 struct migration_swap_arg *arg = data; 3170 struct rq *src_rq, *dst_rq; 3171 int ret = -EAGAIN; 3172 3173 if (!cpu_active(arg->src_cpu) || !cpu_active(arg->dst_cpu)) 3174 return -EAGAIN; 3175 3176 src_rq = cpu_rq(arg->src_cpu); 3177 dst_rq = cpu_rq(arg->dst_cpu); 3178 3179 double_raw_lock(&arg->src_task->pi_lock, 3180 &arg->dst_task->pi_lock); 3181 double_rq_lock(src_rq, dst_rq); 3182 3183 if (task_cpu(arg->dst_task) != arg->dst_cpu) 3184 goto unlock; 3185 3186 if (task_cpu(arg->src_task) != arg->src_cpu) 3187 goto unlock; 3188 3189 if (!cpumask_test_cpu(arg->dst_cpu, arg->src_task->cpus_ptr)) 3190 goto unlock; 3191 3192 if (!cpumask_test_cpu(arg->src_cpu, arg->dst_task->cpus_ptr)) 3193 goto unlock; 3194 3195 __migrate_swap_task(arg->src_task, arg->dst_cpu); 3196 __migrate_swap_task(arg->dst_task, arg->src_cpu); 3197 3198 ret = 0; 3199 3200 unlock: 3201 double_rq_unlock(src_rq, dst_rq); 3202 raw_spin_unlock(&arg->dst_task->pi_lock); 3203 raw_spin_unlock(&arg->src_task->pi_lock); 3204 3205 return ret; 3206 } 3207 3208 /* 3209 * Cross migrate two tasks 3210 */ 3211 int migrate_swap(struct task_struct *cur, struct task_struct *p, 3212 int target_cpu, int curr_cpu) 3213 { 3214 struct migration_swap_arg arg; 3215 int ret = -EINVAL; 3216 3217 arg = (struct migration_swap_arg){ 3218 .src_task = cur, 3219 .src_cpu = curr_cpu, 3220 .dst_task = p, 3221 .dst_cpu = target_cpu, 3222 }; 3223 3224 if (arg.src_cpu == arg.dst_cpu) 3225 goto out; 3226 3227 /* 3228 * These three tests are all lockless; this is OK since all of them 3229 * will be re-checked with proper locks held further down the line. 3230 */ 3231 if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu)) 3232 goto out; 3233 3234 if (!cpumask_test_cpu(arg.dst_cpu, arg.src_task->cpus_ptr)) 3235 goto out; 3236 3237 if (!cpumask_test_cpu(arg.src_cpu, arg.dst_task->cpus_ptr)) 3238 goto out; 3239 3240 trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu); 3241 ret = stop_two_cpus(arg.dst_cpu, arg.src_cpu, migrate_swap_stop, &arg); 3242 3243 out: 3244 return ret; 3245 } 3246 #endif /* CONFIG_NUMA_BALANCING */ 3247 3248 /* 3249 * wait_task_inactive - wait for a thread to unschedule. 3250 * 3251 * Wait for the thread to block in any of the states set in @match_state. 3252 * If it changes, i.e. @p might have woken up, then return zero. When we 3253 * succeed in waiting for @p to be off its CPU, we return a positive number 3254 * (its total switch count). If a second call a short while later returns the 3255 * same number, the caller can be sure that @p has remained unscheduled the 3256 * whole time. 3257 * 3258 * The caller must ensure that the task *will* unschedule sometime soon, 3259 * else this function might spin for a *long* time. This function can't 3260 * be called with interrupts off, or it may introduce deadlock with 3261 * smp_call_function() if an IPI is sent by the same process we are 3262 * waiting to become inactive. 3263 */ 3264 unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state) 3265 { 3266 int running, queued; 3267 struct rq_flags rf; 3268 unsigned long ncsw; 3269 struct rq *rq; 3270 3271 for (;;) { 3272 /* 3273 * We do the initial early heuristics without holding 3274 * any task-queue locks at all. We'll only try to get 3275 * the runqueue lock when things look like they will 3276 * work out! 3277 */ 3278 rq = task_rq(p); 3279 3280 /* 3281 * If the task is actively running on another CPU 3282 * still, just relax and busy-wait without holding 3283 * any locks. 3284 * 3285 * NOTE! Since we don't hold any locks, it's not 3286 * even sure that "rq" stays as the right runqueue! 3287 * But we don't care, since "task_on_cpu()" will 3288 * return false if the runqueue has changed and p 3289 * is actually now running somewhere else! 3290 */ 3291 while (task_on_cpu(rq, p)) { 3292 if (!(READ_ONCE(p->__state) & match_state)) 3293 return 0; 3294 cpu_relax(); 3295 } 3296 3297 /* 3298 * Ok, time to look more closely! We need the rq 3299 * lock now, to be *sure*. If we're wrong, we'll 3300 * just go back and repeat. 3301 */ 3302 rq = task_rq_lock(p, &rf); 3303 trace_sched_wait_task(p); 3304 running = task_on_cpu(rq, p); 3305 queued = task_on_rq_queued(p); 3306 ncsw = 0; 3307 if (READ_ONCE(p->__state) & match_state) 3308 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ 3309 task_rq_unlock(rq, p, &rf); 3310 3311 /* 3312 * If it changed from the expected state, bail out now. 3313 */ 3314 if (unlikely(!ncsw)) 3315 break; 3316 3317 /* 3318 * Was it really running after all now that we 3319 * checked with the proper locks actually held? 3320 * 3321 * Oops. Go back and try again.. 3322 */ 3323 if (unlikely(running)) { 3324 cpu_relax(); 3325 continue; 3326 } 3327 3328 /* 3329 * It's not enough that it's not actively running, 3330 * it must be off the runqueue _entirely_, and not 3331 * preempted! 3332 * 3333 * So if it was still runnable (but just not actively 3334 * running right now), it's preempted, and we should 3335 * yield - it could be a while. 3336 */ 3337 if (unlikely(queued)) { 3338 ktime_t to = NSEC_PER_SEC / HZ; 3339 3340 set_current_state(TASK_UNINTERRUPTIBLE); 3341 schedule_hrtimeout(&to, HRTIMER_MODE_REL_HARD); 3342 continue; 3343 } 3344 3345 /* 3346 * Ahh, all good. It wasn't running, and it wasn't 3347 * runnable, which means that it will never become 3348 * running in the future either. We're all done! 3349 */ 3350 break; 3351 } 3352 3353 return ncsw; 3354 } 3355 3356 /*** 3357 * kick_process - kick a running thread to enter/exit the kernel 3358 * @p: the to-be-kicked thread 3359 * 3360 * Cause a process which is running on another CPU to enter 3361 * kernel-mode, without any delay. (to get signals handled.) 3362 * 3363 * NOTE: this function doesn't have to take the runqueue lock, 3364 * because all it wants to ensure is that the remote task enters 3365 * the kernel. If the IPI races and the task has been migrated 3366 * to another CPU then no harm is done and the purpose has been 3367 * achieved as well. 3368 */ 3369 void kick_process(struct task_struct *p) 3370 { 3371 int cpu; 3372 3373 preempt_disable(); 3374 cpu = task_cpu(p); 3375 if ((cpu != smp_processor_id()) && task_curr(p)) 3376 smp_send_reschedule(cpu); 3377 preempt_enable(); 3378 } 3379 EXPORT_SYMBOL_GPL(kick_process); 3380 3381 /* 3382 * ->cpus_ptr is protected by both rq->lock and p->pi_lock 3383 * 3384 * A few notes on cpu_active vs cpu_online: 3385 * 3386 * - cpu_active must be a subset of cpu_online 3387 * 3388 * - on CPU-up we allow per-CPU kthreads on the online && !active CPU, 3389 * see __set_cpus_allowed_ptr(). At this point the newly online 3390 * CPU isn't yet part of the sched domains, and balancing will not 3391 * see it. 3392 * 3393 * - on CPU-down we clear cpu_active() to mask the sched domains and 3394 * avoid the load balancer to place new tasks on the to be removed 3395 * CPU. Existing tasks will remain running there and will be taken 3396 * off. 3397 * 3398 * This means that fallback selection must not select !active CPUs. 3399 * And can assume that any active CPU must be online. Conversely 3400 * select_task_rq() below may allow selection of !active CPUs in order 3401 * to satisfy the above rules. 3402 */ 3403 static int select_fallback_rq(int cpu, struct task_struct *p) 3404 { 3405 int nid = cpu_to_node(cpu); 3406 const struct cpumask *nodemask = NULL; 3407 enum { cpuset, possible, fail } state = cpuset; 3408 int dest_cpu; 3409 3410 /* 3411 * If the node that the CPU is on has been offlined, cpu_to_node() 3412 * will return -1. There is no CPU on the node, and we should 3413 * select the CPU on the other node. 3414 */ 3415 if (nid != -1) { 3416 nodemask = cpumask_of_node(nid); 3417 3418 /* Look for allowed, online CPU in same node. */ 3419 for_each_cpu(dest_cpu, nodemask) { 3420 if (is_cpu_allowed(p, dest_cpu)) 3421 return dest_cpu; 3422 } 3423 } 3424 3425 for (;;) { 3426 /* Any allowed, online CPU? */ 3427 for_each_cpu(dest_cpu, p->cpus_ptr) { 3428 if (!is_cpu_allowed(p, dest_cpu)) 3429 continue; 3430 3431 goto out; 3432 } 3433 3434 /* No more Mr. Nice Guy. */ 3435 switch (state) { 3436 case cpuset: 3437 if (cpuset_cpus_allowed_fallback(p)) { 3438 state = possible; 3439 break; 3440 } 3441 fallthrough; 3442 case possible: 3443 /* 3444 * XXX When called from select_task_rq() we only 3445 * hold p->pi_lock and again violate locking order. 3446 * 3447 * More yuck to audit. 3448 */ 3449 do_set_cpus_allowed(p, task_cpu_possible_mask(p)); 3450 state = fail; 3451 break; 3452 case fail: 3453 BUG(); 3454 break; 3455 } 3456 } 3457 3458 out: 3459 if (state != cpuset) { 3460 /* 3461 * Don't tell them about moving exiting tasks or 3462 * kernel threads (both mm NULL), since they never 3463 * leave kernel. 3464 */ 3465 if (p->mm && printk_ratelimit()) { 3466 printk_deferred("process %d (%s) no longer affine to cpu%d\n", 3467 task_pid_nr(p), p->comm, cpu); 3468 } 3469 } 3470 3471 return dest_cpu; 3472 } 3473 3474 /* 3475 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_ptr is stable. 3476 */ 3477 static inline 3478 int select_task_rq(struct task_struct *p, int cpu, int wake_flags) 3479 { 3480 lockdep_assert_held(&p->pi_lock); 3481 3482 if (p->nr_cpus_allowed > 1 && !is_migration_disabled(p)) 3483 cpu = p->sched_class->select_task_rq(p, cpu, wake_flags); 3484 else 3485 cpu = cpumask_any(p->cpus_ptr); 3486 3487 /* 3488 * In order not to call set_task_cpu() on a blocking task we need 3489 * to rely on ttwu() to place the task on a valid ->cpus_ptr 3490 * CPU. 3491 * 3492 * Since this is common to all placement strategies, this lives here. 3493 * 3494 * [ this allows ->select_task() to simply return task_cpu(p) and 3495 * not worry about this generic constraint ] 3496 */ 3497 if (unlikely(!is_cpu_allowed(p, cpu))) 3498 cpu = select_fallback_rq(task_cpu(p), p); 3499 3500 return cpu; 3501 } 3502 3503 void sched_set_stop_task(int cpu, struct task_struct *stop) 3504 { 3505 static struct lock_class_key stop_pi_lock; 3506 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; 3507 struct task_struct *old_stop = cpu_rq(cpu)->stop; 3508 3509 if (stop) { 3510 /* 3511 * Make it appear like a SCHED_FIFO task, its something 3512 * userspace knows about and won't get confused about. 3513 * 3514 * Also, it will make PI more or less work without too 3515 * much confusion -- but then, stop work should not 3516 * rely on PI working anyway. 3517 */ 3518 sched_setscheduler_nocheck(stop, SCHED_FIFO, ¶m); 3519 3520 stop->sched_class = &stop_sched_class; 3521 3522 /* 3523 * The PI code calls rt_mutex_setprio() with ->pi_lock held to 3524 * adjust the effective priority of a task. As a result, 3525 * rt_mutex_setprio() can trigger (RT) balancing operations, 3526 * which can then trigger wakeups of the stop thread to push 3527 * around the current task. 3528 * 3529 * The stop task itself will never be part of the PI-chain, it 3530 * never blocks, therefore that ->pi_lock recursion is safe. 3531 * Tell lockdep about this by placing the stop->pi_lock in its 3532 * own class. 3533 */ 3534 lockdep_set_class(&stop->pi_lock, &stop_pi_lock); 3535 } 3536 3537 cpu_rq(cpu)->stop = stop; 3538 3539 if (old_stop) { 3540 /* 3541 * Reset it back to a normal scheduling class so that 3542 * it can die in pieces. 3543 */ 3544 old_stop->sched_class = &rt_sched_class; 3545 } 3546 } 3547 3548 #else /* CONFIG_SMP */ 3549 3550 static inline int __set_cpus_allowed_ptr(struct task_struct *p, 3551 const struct cpumask *new_mask, 3552 u32 flags) 3553 { 3554 return set_cpus_allowed_ptr(p, new_mask); 3555 } 3556 3557 static inline void migrate_disable_switch(struct rq *rq, struct task_struct *p) { } 3558 3559 static inline bool rq_has_pinned_tasks(struct rq *rq) 3560 { 3561 return false; 3562 } 3563 3564 #endif /* !CONFIG_SMP */ 3565 3566 static void 3567 ttwu_stat(struct task_struct *p, int cpu, int wake_flags) 3568 { 3569 struct rq *rq; 3570 3571 if (!schedstat_enabled()) 3572 return; 3573 3574 rq = this_rq(); 3575 3576 #ifdef CONFIG_SMP 3577 if (cpu == rq->cpu) { 3578 __schedstat_inc(rq->ttwu_local); 3579 __schedstat_inc(p->stats.nr_wakeups_local); 3580 } else { 3581 struct sched_domain *sd; 3582 3583 __schedstat_inc(p->stats.nr_wakeups_remote); 3584 rcu_read_lock(); 3585 for_each_domain(rq->cpu, sd) { 3586 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) { 3587 __schedstat_inc(sd->ttwu_wake_remote); 3588 break; 3589 } 3590 } 3591 rcu_read_unlock(); 3592 } 3593 3594 if (wake_flags & WF_MIGRATED) 3595 __schedstat_inc(p->stats.nr_wakeups_migrate); 3596 #endif /* CONFIG_SMP */ 3597 3598 __schedstat_inc(rq->ttwu_count); 3599 __schedstat_inc(p->stats.nr_wakeups); 3600 3601 if (wake_flags & WF_SYNC) 3602 __schedstat_inc(p->stats.nr_wakeups_sync); 3603 } 3604 3605 /* 3606 * Mark the task runnable and perform wakeup-preemption. 3607 */ 3608 static void ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags, 3609 struct rq_flags *rf) 3610 { 3611 check_preempt_curr(rq, p, wake_flags); 3612 WRITE_ONCE(p->__state, TASK_RUNNING); 3613 trace_sched_wakeup(p); 3614 3615 #ifdef CONFIG_SMP 3616 if (p->sched_class->task_woken) { 3617 /* 3618 * Our task @p is fully woken up and running; so it's safe to 3619 * drop the rq->lock, hereafter rq is only used for statistics. 3620 */ 3621 rq_unpin_lock(rq, rf); 3622 p->sched_class->task_woken(rq, p); 3623 rq_repin_lock(rq, rf); 3624 } 3625 3626 if (rq->idle_stamp) { 3627 u64 delta = rq_clock(rq) - rq->idle_stamp; 3628 u64 max = 2*rq->max_idle_balance_cost; 3629 3630 update_avg(&rq->avg_idle, delta); 3631 3632 if (rq->avg_idle > max) 3633 rq->avg_idle = max; 3634 3635 rq->wake_stamp = jiffies; 3636 rq->wake_avg_idle = rq->avg_idle / 2; 3637 3638 rq->idle_stamp = 0; 3639 } 3640 #endif 3641 } 3642 3643 static void 3644 ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags, 3645 struct rq_flags *rf) 3646 { 3647 int en_flags = ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK; 3648 3649 lockdep_assert_rq_held(rq); 3650 3651 if (p->sched_contributes_to_load) 3652 rq->nr_uninterruptible--; 3653 3654 #ifdef CONFIG_SMP 3655 if (wake_flags & WF_MIGRATED) 3656 en_flags |= ENQUEUE_MIGRATED; 3657 else 3658 #endif 3659 if (p->in_iowait) { 3660 delayacct_blkio_end(p); 3661 atomic_dec(&task_rq(p)->nr_iowait); 3662 } 3663 3664 activate_task(rq, p, en_flags); 3665 ttwu_do_wakeup(rq, p, wake_flags, rf); 3666 } 3667 3668 /* 3669 * Consider @p being inside a wait loop: 3670 * 3671 * for (;;) { 3672 * set_current_state(TASK_UNINTERRUPTIBLE); 3673 * 3674 * if (CONDITION) 3675 * break; 3676 * 3677 * schedule(); 3678 * } 3679 * __set_current_state(TASK_RUNNING); 3680 * 3681 * between set_current_state() and schedule(). In this case @p is still 3682 * runnable, so all that needs doing is change p->state back to TASK_RUNNING in 3683 * an atomic manner. 3684 * 3685 * By taking task_rq(p)->lock we serialize against schedule(), if @p->on_rq 3686 * then schedule() must still happen and p->state can be changed to 3687 * TASK_RUNNING. Otherwise we lost the race, schedule() has happened, and we 3688 * need to do a full wakeup with enqueue. 3689 * 3690 * Returns: %true when the wakeup is done, 3691 * %false otherwise. 3692 */ 3693 static int ttwu_runnable(struct task_struct *p, int wake_flags) 3694 { 3695 struct rq_flags rf; 3696 struct rq *rq; 3697 int ret = 0; 3698 3699 rq = __task_rq_lock(p, &rf); 3700 if (task_on_rq_queued(p)) { 3701 /* check_preempt_curr() may use rq clock */ 3702 update_rq_clock(rq); 3703 ttwu_do_wakeup(rq, p, wake_flags, &rf); 3704 ret = 1; 3705 } 3706 __task_rq_unlock(rq, &rf); 3707 3708 return ret; 3709 } 3710 3711 #ifdef CONFIG_SMP 3712 void sched_ttwu_pending(void *arg) 3713 { 3714 struct llist_node *llist = arg; 3715 struct rq *rq = this_rq(); 3716 struct task_struct *p, *t; 3717 struct rq_flags rf; 3718 3719 if (!llist) 3720 return; 3721 3722 /* 3723 * rq::ttwu_pending racy indication of out-standing wakeups. 3724 * Races such that false-negatives are possible, since they 3725 * are shorter lived that false-positives would be. 3726 */ 3727 WRITE_ONCE(rq->ttwu_pending, 0); 3728 3729 rq_lock_irqsave(rq, &rf); 3730 update_rq_clock(rq); 3731 3732 llist_for_each_entry_safe(p, t, llist, wake_entry.llist) { 3733 if (WARN_ON_ONCE(p->on_cpu)) 3734 smp_cond_load_acquire(&p->on_cpu, !VAL); 3735 3736 if (WARN_ON_ONCE(task_cpu(p) != cpu_of(rq))) 3737 set_task_cpu(p, cpu_of(rq)); 3738 3739 ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0, &rf); 3740 } 3741 3742 rq_unlock_irqrestore(rq, &rf); 3743 } 3744 3745 void send_call_function_single_ipi(int cpu) 3746 { 3747 struct rq *rq = cpu_rq(cpu); 3748 3749 if (!set_nr_if_polling(rq->idle)) 3750 arch_send_call_function_single_ipi(cpu); 3751 else 3752 trace_sched_wake_idle_without_ipi(cpu); 3753 } 3754 3755 /* 3756 * Queue a task on the target CPUs wake_list and wake the CPU via IPI if 3757 * necessary. The wakee CPU on receipt of the IPI will queue the task 3758 * via sched_ttwu_wakeup() for activation so the wakee incurs the cost 3759 * of the wakeup instead of the waker. 3760 */ 3761 static void __ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags) 3762 { 3763 struct rq *rq = cpu_rq(cpu); 3764 3765 p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED); 3766 3767 WRITE_ONCE(rq->ttwu_pending, 1); 3768 __smp_call_single_queue(cpu, &p->wake_entry.llist); 3769 } 3770 3771 void wake_up_if_idle(int cpu) 3772 { 3773 struct rq *rq = cpu_rq(cpu); 3774 struct rq_flags rf; 3775 3776 rcu_read_lock(); 3777 3778 if (!is_idle_task(rcu_dereference(rq->curr))) 3779 goto out; 3780 3781 rq_lock_irqsave(rq, &rf); 3782 if (is_idle_task(rq->curr)) 3783 resched_curr(rq); 3784 /* Else CPU is not idle, do nothing here: */ 3785 rq_unlock_irqrestore(rq, &rf); 3786 3787 out: 3788 rcu_read_unlock(); 3789 } 3790 3791 bool cpus_share_cache(int this_cpu, int that_cpu) 3792 { 3793 if (this_cpu == that_cpu) 3794 return true; 3795 3796 return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu); 3797 } 3798 3799 static inline bool ttwu_queue_cond(struct task_struct *p, int cpu) 3800 { 3801 /* 3802 * Do not complicate things with the async wake_list while the CPU is 3803 * in hotplug state. 3804 */ 3805 if (!cpu_active(cpu)) 3806 return false; 3807 3808 /* Ensure the task will still be allowed to run on the CPU. */ 3809 if (!cpumask_test_cpu(cpu, p->cpus_ptr)) 3810 return false; 3811 3812 /* 3813 * If the CPU does not share cache, then queue the task on the 3814 * remote rqs wakelist to avoid accessing remote data. 3815 */ 3816 if (!cpus_share_cache(smp_processor_id(), cpu)) 3817 return true; 3818 3819 if (cpu == smp_processor_id()) 3820 return false; 3821 3822 /* 3823 * If the wakee cpu is idle, or the task is descheduling and the 3824 * only running task on the CPU, then use the wakelist to offload 3825 * the task activation to the idle (or soon-to-be-idle) CPU as 3826 * the current CPU is likely busy. nr_running is checked to 3827 * avoid unnecessary task stacking. 3828 * 3829 * Note that we can only get here with (wakee) p->on_rq=0, 3830 * p->on_cpu can be whatever, we've done the dequeue, so 3831 * the wakee has been accounted out of ->nr_running. 3832 */ 3833 if (!cpu_rq(cpu)->nr_running) 3834 return true; 3835 3836 return false; 3837 } 3838 3839 static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags) 3840 { 3841 if (sched_feat(TTWU_QUEUE) && ttwu_queue_cond(p, cpu)) { 3842 sched_clock_cpu(cpu); /* Sync clocks across CPUs */ 3843 __ttwu_queue_wakelist(p, cpu, wake_flags); 3844 return true; 3845 } 3846 3847 return false; 3848 } 3849 3850 #else /* !CONFIG_SMP */ 3851 3852 static inline bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags) 3853 { 3854 return false; 3855 } 3856 3857 #endif /* CONFIG_SMP */ 3858 3859 static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags) 3860 { 3861 struct rq *rq = cpu_rq(cpu); 3862 struct rq_flags rf; 3863 3864 if (ttwu_queue_wakelist(p, cpu, wake_flags)) 3865 return; 3866 3867 rq_lock(rq, &rf); 3868 update_rq_clock(rq); 3869 ttwu_do_activate(rq, p, wake_flags, &rf); 3870 rq_unlock(rq, &rf); 3871 } 3872 3873 /* 3874 * Invoked from try_to_wake_up() to check whether the task can be woken up. 3875 * 3876 * The caller holds p::pi_lock if p != current or has preemption 3877 * disabled when p == current. 3878 * 3879 * The rules of PREEMPT_RT saved_state: 3880 * 3881 * The related locking code always holds p::pi_lock when updating 3882 * p::saved_state, which means the code is fully serialized in both cases. 3883 * 3884 * The lock wait and lock wakeups happen via TASK_RTLOCK_WAIT. No other 3885 * bits set. This allows to distinguish all wakeup scenarios. 3886 */ 3887 static __always_inline 3888 bool ttwu_state_match(struct task_struct *p, unsigned int state, int *success) 3889 { 3890 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) { 3891 WARN_ON_ONCE((state & TASK_RTLOCK_WAIT) && 3892 state != TASK_RTLOCK_WAIT); 3893 } 3894 3895 if (READ_ONCE(p->__state) & state) { 3896 *success = 1; 3897 return true; 3898 } 3899 3900 #ifdef CONFIG_PREEMPT_RT 3901 /* 3902 * Saved state preserves the task state across blocking on 3903 * an RT lock. If the state matches, set p::saved_state to 3904 * TASK_RUNNING, but do not wake the task because it waits 3905 * for a lock wakeup. Also indicate success because from 3906 * the regular waker's point of view this has succeeded. 3907 * 3908 * After acquiring the lock the task will restore p::__state 3909 * from p::saved_state which ensures that the regular 3910 * wakeup is not lost. The restore will also set 3911 * p::saved_state to TASK_RUNNING so any further tests will 3912 * not result in false positives vs. @success 3913 */ 3914 if (p->saved_state & state) { 3915 p->saved_state = TASK_RUNNING; 3916 *success = 1; 3917 } 3918 #endif 3919 return false; 3920 } 3921 3922 /* 3923 * Notes on Program-Order guarantees on SMP systems. 3924 * 3925 * MIGRATION 3926 * 3927 * The basic program-order guarantee on SMP systems is that when a task [t] 3928 * migrates, all its activity on its old CPU [c0] happens-before any subsequent 3929 * execution on its new CPU [c1]. 3930 * 3931 * For migration (of runnable tasks) this is provided by the following means: 3932 * 3933 * A) UNLOCK of the rq(c0)->lock scheduling out task t 3934 * B) migration for t is required to synchronize *both* rq(c0)->lock and 3935 * rq(c1)->lock (if not at the same time, then in that order). 3936 * C) LOCK of the rq(c1)->lock scheduling in task 3937 * 3938 * Release/acquire chaining guarantees that B happens after A and C after B. 3939 * Note: the CPU doing B need not be c0 or c1 3940 * 3941 * Example: 3942 * 3943 * CPU0 CPU1 CPU2 3944 * 3945 * LOCK rq(0)->lock 3946 * sched-out X 3947 * sched-in Y 3948 * UNLOCK rq(0)->lock 3949 * 3950 * LOCK rq(0)->lock // orders against CPU0 3951 * dequeue X 3952 * UNLOCK rq(0)->lock 3953 * 3954 * LOCK rq(1)->lock 3955 * enqueue X 3956 * UNLOCK rq(1)->lock 3957 * 3958 * LOCK rq(1)->lock // orders against CPU2 3959 * sched-out Z 3960 * sched-in X 3961 * UNLOCK rq(1)->lock 3962 * 3963 * 3964 * BLOCKING -- aka. SLEEP + WAKEUP 3965 * 3966 * For blocking we (obviously) need to provide the same guarantee as for 3967 * migration. However the means are completely different as there is no lock 3968 * chain to provide order. Instead we do: 3969 * 3970 * 1) smp_store_release(X->on_cpu, 0) -- finish_task() 3971 * 2) smp_cond_load_acquire(!X->on_cpu) -- try_to_wake_up() 3972 * 3973 * Example: 3974 * 3975 * CPU0 (schedule) CPU1 (try_to_wake_up) CPU2 (schedule) 3976 * 3977 * LOCK rq(0)->lock LOCK X->pi_lock 3978 * dequeue X 3979 * sched-out X 3980 * smp_store_release(X->on_cpu, 0); 3981 * 3982 * smp_cond_load_acquire(&X->on_cpu, !VAL); 3983 * X->state = WAKING 3984 * set_task_cpu(X,2) 3985 * 3986 * LOCK rq(2)->lock 3987 * enqueue X 3988 * X->state = RUNNING 3989 * UNLOCK rq(2)->lock 3990 * 3991 * LOCK rq(2)->lock // orders against CPU1 3992 * sched-out Z 3993 * sched-in X 3994 * UNLOCK rq(2)->lock 3995 * 3996 * UNLOCK X->pi_lock 3997 * UNLOCK rq(0)->lock 3998 * 3999 * 4000 * However, for wakeups there is a second guarantee we must provide, namely we 4001 * must ensure that CONDITION=1 done by the caller can not be reordered with 4002 * accesses to the task state; see try_to_wake_up() and set_current_state(). 4003 */ 4004 4005 /** 4006 * try_to_wake_up - wake up a thread 4007 * @p: the thread to be awakened 4008 * @state: the mask of task states that can be woken 4009 * @wake_flags: wake modifier flags (WF_*) 4010 * 4011 * Conceptually does: 4012 * 4013 * If (@state & @p->state) @p->state = TASK_RUNNING. 4014 * 4015 * If the task was not queued/runnable, also place it back on a runqueue. 4016 * 4017 * This function is atomic against schedule() which would dequeue the task. 4018 * 4019 * It issues a full memory barrier before accessing @p->state, see the comment 4020 * with set_current_state(). 4021 * 4022 * Uses p->pi_lock to serialize against concurrent wake-ups. 4023 * 4024 * Relies on p->pi_lock stabilizing: 4025 * - p->sched_class 4026 * - p->cpus_ptr 4027 * - p->sched_task_group 4028 * in order to do migration, see its use of select_task_rq()/set_task_cpu(). 4029 * 4030 * Tries really hard to only take one task_rq(p)->lock for performance. 4031 * Takes rq->lock in: 4032 * - ttwu_runnable() -- old rq, unavoidable, see comment there; 4033 * - ttwu_queue() -- new rq, for enqueue of the task; 4034 * - psi_ttwu_dequeue() -- much sadness :-( accounting will kill us. 4035 * 4036 * As a consequence we race really badly with just about everything. See the 4037 * many memory barriers and their comments for details. 4038 * 4039 * Return: %true if @p->state changes (an actual wakeup was done), 4040 * %false otherwise. 4041 */ 4042 static int 4043 try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) 4044 { 4045 unsigned long flags; 4046 int cpu, success = 0; 4047 4048 preempt_disable(); 4049 if (p == current) { 4050 /* 4051 * We're waking current, this means 'p->on_rq' and 'task_cpu(p) 4052 * == smp_processor_id()'. Together this means we can special 4053 * case the whole 'p->on_rq && ttwu_runnable()' case below 4054 * without taking any locks. 4055 * 4056 * In particular: 4057 * - we rely on Program-Order guarantees for all the ordering, 4058 * - we're serialized against set_special_state() by virtue of 4059 * it disabling IRQs (this allows not taking ->pi_lock). 4060 */ 4061 if (!ttwu_state_match(p, state, &success)) 4062 goto out; 4063 4064 trace_sched_waking(p); 4065 WRITE_ONCE(p->__state, TASK_RUNNING); 4066 trace_sched_wakeup(p); 4067 goto out; 4068 } 4069 4070 /* 4071 * If we are going to wake up a thread waiting for CONDITION we 4072 * need to ensure that CONDITION=1 done by the caller can not be 4073 * reordered with p->state check below. This pairs with smp_store_mb() 4074 * in set_current_state() that the waiting thread does. 4075 */ 4076 raw_spin_lock_irqsave(&p->pi_lock, flags); 4077 smp_mb__after_spinlock(); 4078 if (!ttwu_state_match(p, state, &success)) 4079 goto unlock; 4080 4081 trace_sched_waking(p); 4082 4083 /* 4084 * Ensure we load p->on_rq _after_ p->state, otherwise it would 4085 * be possible to, falsely, observe p->on_rq == 0 and get stuck 4086 * in smp_cond_load_acquire() below. 4087 * 4088 * sched_ttwu_pending() try_to_wake_up() 4089 * STORE p->on_rq = 1 LOAD p->state 4090 * UNLOCK rq->lock 4091 * 4092 * __schedule() (switch to task 'p') 4093 * LOCK rq->lock smp_rmb(); 4094 * smp_mb__after_spinlock(); 4095 * UNLOCK rq->lock 4096 * 4097 * [task p] 4098 * STORE p->state = UNINTERRUPTIBLE LOAD p->on_rq 4099 * 4100 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in 4101 * __schedule(). See the comment for smp_mb__after_spinlock(). 4102 * 4103 * A similar smb_rmb() lives in try_invoke_on_locked_down_task(). 4104 */ 4105 smp_rmb(); 4106 if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags)) 4107 goto unlock; 4108 4109 #ifdef CONFIG_SMP 4110 /* 4111 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be 4112 * possible to, falsely, observe p->on_cpu == 0. 4113 * 4114 * One must be running (->on_cpu == 1) in order to remove oneself 4115 * from the runqueue. 4116 * 4117 * __schedule() (switch to task 'p') try_to_wake_up() 4118 * STORE p->on_cpu = 1 LOAD p->on_rq 4119 * UNLOCK rq->lock 4120 * 4121 * __schedule() (put 'p' to sleep) 4122 * LOCK rq->lock smp_rmb(); 4123 * smp_mb__after_spinlock(); 4124 * STORE p->on_rq = 0 LOAD p->on_cpu 4125 * 4126 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in 4127 * __schedule(). See the comment for smp_mb__after_spinlock(). 4128 * 4129 * Form a control-dep-acquire with p->on_rq == 0 above, to ensure 4130 * schedule()'s deactivate_task() has 'happened' and p will no longer 4131 * care about it's own p->state. See the comment in __schedule(). 4132 */ 4133 smp_acquire__after_ctrl_dep(); 4134 4135 /* 4136 * We're doing the wakeup (@success == 1), they did a dequeue (p->on_rq 4137 * == 0), which means we need to do an enqueue, change p->state to 4138 * TASK_WAKING such that we can unlock p->pi_lock before doing the 4139 * enqueue, such as ttwu_queue_wakelist(). 4140 */ 4141 WRITE_ONCE(p->__state, TASK_WAKING); 4142 4143 /* 4144 * If the owning (remote) CPU is still in the middle of schedule() with 4145 * this task as prev, considering queueing p on the remote CPUs wake_list 4146 * which potentially sends an IPI instead of spinning on p->on_cpu to 4147 * let the waker make forward progress. This is safe because IRQs are 4148 * disabled and the IPI will deliver after on_cpu is cleared. 4149 * 4150 * Ensure we load task_cpu(p) after p->on_cpu: 4151 * 4152 * set_task_cpu(p, cpu); 4153 * STORE p->cpu = @cpu 4154 * __schedule() (switch to task 'p') 4155 * LOCK rq->lock 4156 * smp_mb__after_spin_lock() smp_cond_load_acquire(&p->on_cpu) 4157 * STORE p->on_cpu = 1 LOAD p->cpu 4158 * 4159 * to ensure we observe the correct CPU on which the task is currently 4160 * scheduling. 4161 */ 4162 if (smp_load_acquire(&p->on_cpu) && 4163 ttwu_queue_wakelist(p, task_cpu(p), wake_flags)) 4164 goto unlock; 4165 4166 /* 4167 * If the owning (remote) CPU is still in the middle of schedule() with 4168 * this task as prev, wait until it's done referencing the task. 4169 * 4170 * Pairs with the smp_store_release() in finish_task(). 4171 * 4172 * This ensures that tasks getting woken will be fully ordered against 4173 * their previous state and preserve Program Order. 4174 */ 4175 smp_cond_load_acquire(&p->on_cpu, !VAL); 4176 4177 cpu = select_task_rq(p, p->wake_cpu, wake_flags | WF_TTWU); 4178 if (task_cpu(p) != cpu) { 4179 if (p->in_iowait) { 4180 delayacct_blkio_end(p); 4181 atomic_dec(&task_rq(p)->nr_iowait); 4182 } 4183 4184 wake_flags |= WF_MIGRATED; 4185 psi_ttwu_dequeue(p); 4186 set_task_cpu(p, cpu); 4187 } 4188 #else 4189 cpu = task_cpu(p); 4190 #endif /* CONFIG_SMP */ 4191 4192 ttwu_queue(p, cpu, wake_flags); 4193 unlock: 4194 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 4195 out: 4196 if (success) 4197 ttwu_stat(p, task_cpu(p), wake_flags); 4198 preempt_enable(); 4199 4200 return success; 4201 } 4202 4203 static bool __task_needs_rq_lock(struct task_struct *p) 4204 { 4205 unsigned int state = READ_ONCE(p->__state); 4206 4207 /* 4208 * Since pi->lock blocks try_to_wake_up(), we don't need rq->lock when 4209 * the task is blocked. Make sure to check @state since ttwu() can drop 4210 * locks at the end, see ttwu_queue_wakelist(). 4211 */ 4212 if (state == TASK_RUNNING || state == TASK_WAKING) 4213 return true; 4214 4215 /* 4216 * Ensure we load p->on_rq after p->__state, otherwise it would be 4217 * possible to, falsely, observe p->on_rq == 0. 4218 * 4219 * See try_to_wake_up() for a longer comment. 4220 */ 4221 smp_rmb(); 4222 if (p->on_rq) 4223 return true; 4224 4225 #ifdef CONFIG_SMP 4226 /* 4227 * Ensure the task has finished __schedule() and will not be referenced 4228 * anymore. Again, see try_to_wake_up() for a longer comment. 4229 */ 4230 smp_rmb(); 4231 smp_cond_load_acquire(&p->on_cpu, !VAL); 4232 #endif 4233 4234 return false; 4235 } 4236 4237 /** 4238 * task_call_func - Invoke a function on task in fixed state 4239 * @p: Process for which the function is to be invoked, can be @current. 4240 * @func: Function to invoke. 4241 * @arg: Argument to function. 4242 * 4243 * Fix the task in it's current state by avoiding wakeups and or rq operations 4244 * and call @func(@arg) on it. This function can use ->on_rq and task_curr() 4245 * to work out what the state is, if required. Given that @func can be invoked 4246 * with a runqueue lock held, it had better be quite lightweight. 4247 * 4248 * Returns: 4249 * Whatever @func returns 4250 */ 4251 int task_call_func(struct task_struct *p, task_call_f func, void *arg) 4252 { 4253 struct rq *rq = NULL; 4254 struct rq_flags rf; 4255 int ret; 4256 4257 raw_spin_lock_irqsave(&p->pi_lock, rf.flags); 4258 4259 if (__task_needs_rq_lock(p)) 4260 rq = __task_rq_lock(p, &rf); 4261 4262 /* 4263 * At this point the task is pinned; either: 4264 * - blocked and we're holding off wakeups (pi->lock) 4265 * - woken, and we're holding off enqueue (rq->lock) 4266 * - queued, and we're holding off schedule (rq->lock) 4267 * - running, and we're holding off de-schedule (rq->lock) 4268 * 4269 * The called function (@func) can use: task_curr(), p->on_rq and 4270 * p->__state to differentiate between these states. 4271 */ 4272 ret = func(p, arg); 4273 4274 if (rq) 4275 rq_unlock(rq, &rf); 4276 4277 raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags); 4278 return ret; 4279 } 4280 4281 /** 4282 * cpu_curr_snapshot - Return a snapshot of the currently running task 4283 * @cpu: The CPU on which to snapshot the task. 4284 * 4285 * Returns the task_struct pointer of the task "currently" running on 4286 * the specified CPU. If the same task is running on that CPU throughout, 4287 * the return value will be a pointer to that task's task_struct structure. 4288 * If the CPU did any context switches even vaguely concurrently with the 4289 * execution of this function, the return value will be a pointer to the 4290 * task_struct structure of a randomly chosen task that was running on 4291 * that CPU somewhere around the time that this function was executing. 4292 * 4293 * If the specified CPU was offline, the return value is whatever it 4294 * is, perhaps a pointer to the task_struct structure of that CPU's idle 4295 * task, but there is no guarantee. Callers wishing a useful return 4296 * value must take some action to ensure that the specified CPU remains 4297 * online throughout. 4298 * 4299 * This function executes full memory barriers before and after fetching 4300 * the pointer, which permits the caller to confine this function's fetch 4301 * with respect to the caller's accesses to other shared variables. 4302 */ 4303 struct task_struct *cpu_curr_snapshot(int cpu) 4304 { 4305 struct task_struct *t; 4306 4307 smp_mb(); /* Pairing determined by caller's synchronization design. */ 4308 t = rcu_dereference(cpu_curr(cpu)); 4309 smp_mb(); /* Pairing determined by caller's synchronization design. */ 4310 return t; 4311 } 4312 4313 /** 4314 * wake_up_process - Wake up a specific process 4315 * @p: The process to be woken up. 4316 * 4317 * Attempt to wake up the nominated process and move it to the set of runnable 4318 * processes. 4319 * 4320 * Return: 1 if the process was woken up, 0 if it was already running. 4321 * 4322 * This function executes a full memory barrier before accessing the task state. 4323 */ 4324 int wake_up_process(struct task_struct *p) 4325 { 4326 return try_to_wake_up(p, TASK_NORMAL, 0); 4327 } 4328 EXPORT_SYMBOL(wake_up_process); 4329 4330 int wake_up_state(struct task_struct *p, unsigned int state) 4331 { 4332 return try_to_wake_up(p, state, 0); 4333 } 4334 4335 /* 4336 * Perform scheduler related setup for a newly forked process p. 4337 * p is forked by current. 4338 * 4339 * __sched_fork() is basic setup used by init_idle() too: 4340 */ 4341 static void __sched_fork(unsigned long clone_flags, struct task_struct *p) 4342 { 4343 p->on_rq = 0; 4344 4345 p->se.on_rq = 0; 4346 p->se.exec_start = 0; 4347 p->se.sum_exec_runtime = 0; 4348 p->se.prev_sum_exec_runtime = 0; 4349 p->se.nr_migrations = 0; 4350 p->se.vruntime = 0; 4351 INIT_LIST_HEAD(&p->se.group_node); 4352 4353 #ifdef CONFIG_FAIR_GROUP_SCHED 4354 p->se.cfs_rq = NULL; 4355 #endif 4356 4357 #ifdef CONFIG_SCHEDSTATS 4358 /* Even if schedstat is disabled, there should not be garbage */ 4359 memset(&p->stats, 0, sizeof(p->stats)); 4360 #endif 4361 4362 RB_CLEAR_NODE(&p->dl.rb_node); 4363 init_dl_task_timer(&p->dl); 4364 init_dl_inactive_task_timer(&p->dl); 4365 __dl_clear_params(p); 4366 4367 INIT_LIST_HEAD(&p->rt.run_list); 4368 p->rt.timeout = 0; 4369 p->rt.time_slice = sched_rr_timeslice; 4370 p->rt.on_rq = 0; 4371 p->rt.on_list = 0; 4372 4373 #ifdef CONFIG_PREEMPT_NOTIFIERS 4374 INIT_HLIST_HEAD(&p->preempt_notifiers); 4375 #endif 4376 4377 #ifdef CONFIG_COMPACTION 4378 p->capture_control = NULL; 4379 #endif 4380 init_numa_balancing(clone_flags, p); 4381 #ifdef CONFIG_SMP 4382 p->wake_entry.u_flags = CSD_TYPE_TTWU; 4383 p->migration_pending = NULL; 4384 #endif 4385 } 4386 4387 DEFINE_STATIC_KEY_FALSE(sched_numa_balancing); 4388 4389 #ifdef CONFIG_NUMA_BALANCING 4390 4391 int sysctl_numa_balancing_mode; 4392 4393 static void __set_numabalancing_state(bool enabled) 4394 { 4395 if (enabled) 4396 static_branch_enable(&sched_numa_balancing); 4397 else 4398 static_branch_disable(&sched_numa_balancing); 4399 } 4400 4401 void set_numabalancing_state(bool enabled) 4402 { 4403 if (enabled) 4404 sysctl_numa_balancing_mode = NUMA_BALANCING_NORMAL; 4405 else 4406 sysctl_numa_balancing_mode = NUMA_BALANCING_DISABLED; 4407 __set_numabalancing_state(enabled); 4408 } 4409 4410 #ifdef CONFIG_PROC_SYSCTL 4411 static void reset_memory_tiering(void) 4412 { 4413 struct pglist_data *pgdat; 4414 4415 for_each_online_pgdat(pgdat) { 4416 pgdat->nbp_threshold = 0; 4417 pgdat->nbp_th_nr_cand = node_page_state(pgdat, PGPROMOTE_CANDIDATE); 4418 pgdat->nbp_th_start = jiffies_to_msecs(jiffies); 4419 } 4420 } 4421 4422 int sysctl_numa_balancing(struct ctl_table *table, int write, 4423 void *buffer, size_t *lenp, loff_t *ppos) 4424 { 4425 struct ctl_table t; 4426 int err; 4427 int state = sysctl_numa_balancing_mode; 4428 4429 if (write && !capable(CAP_SYS_ADMIN)) 4430 return -EPERM; 4431 4432 t = *table; 4433 t.data = &state; 4434 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); 4435 if (err < 0) 4436 return err; 4437 if (write) { 4438 if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) && 4439 (state & NUMA_BALANCING_MEMORY_TIERING)) 4440 reset_memory_tiering(); 4441 sysctl_numa_balancing_mode = state; 4442 __set_numabalancing_state(state); 4443 } 4444 return err; 4445 } 4446 #endif 4447 #endif 4448 4449 #ifdef CONFIG_SCHEDSTATS 4450 4451 DEFINE_STATIC_KEY_FALSE(sched_schedstats); 4452 4453 static void set_schedstats(bool enabled) 4454 { 4455 if (enabled) 4456 static_branch_enable(&sched_schedstats); 4457 else 4458 static_branch_disable(&sched_schedstats); 4459 } 4460 4461 void force_schedstat_enabled(void) 4462 { 4463 if (!schedstat_enabled()) { 4464 pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n"); 4465 static_branch_enable(&sched_schedstats); 4466 } 4467 } 4468 4469 static int __init setup_schedstats(char *str) 4470 { 4471 int ret = 0; 4472 if (!str) 4473 goto out; 4474 4475 if (!strcmp(str, "enable")) { 4476 set_schedstats(true); 4477 ret = 1; 4478 } else if (!strcmp(str, "disable")) { 4479 set_schedstats(false); 4480 ret = 1; 4481 } 4482 out: 4483 if (!ret) 4484 pr_warn("Unable to parse schedstats=\n"); 4485 4486 return ret; 4487 } 4488 __setup("schedstats=", setup_schedstats); 4489 4490 #ifdef CONFIG_PROC_SYSCTL 4491 static int sysctl_schedstats(struct ctl_table *table, int write, void *buffer, 4492 size_t *lenp, loff_t *ppos) 4493 { 4494 struct ctl_table t; 4495 int err; 4496 int state = static_branch_likely(&sched_schedstats); 4497 4498 if (write && !capable(CAP_SYS_ADMIN)) 4499 return -EPERM; 4500 4501 t = *table; 4502 t.data = &state; 4503 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); 4504 if (err < 0) 4505 return err; 4506 if (write) 4507 set_schedstats(state); 4508 return err; 4509 } 4510 #endif /* CONFIG_PROC_SYSCTL */ 4511 #endif /* CONFIG_SCHEDSTATS */ 4512 4513 #ifdef CONFIG_SYSCTL 4514 static struct ctl_table sched_core_sysctls[] = { 4515 #ifdef CONFIG_SCHEDSTATS 4516 { 4517 .procname = "sched_schedstats", 4518 .data = NULL, 4519 .maxlen = sizeof(unsigned int), 4520 .mode = 0644, 4521 .proc_handler = sysctl_schedstats, 4522 .extra1 = SYSCTL_ZERO, 4523 .extra2 = SYSCTL_ONE, 4524 }, 4525 #endif /* CONFIG_SCHEDSTATS */ 4526 #ifdef CONFIG_UCLAMP_TASK 4527 { 4528 .procname = "sched_util_clamp_min", 4529 .data = &sysctl_sched_uclamp_util_min, 4530 .maxlen = sizeof(unsigned int), 4531 .mode = 0644, 4532 .proc_handler = sysctl_sched_uclamp_handler, 4533 }, 4534 { 4535 .procname = "sched_util_clamp_max", 4536 .data = &sysctl_sched_uclamp_util_max, 4537 .maxlen = sizeof(unsigned int), 4538 .mode = 0644, 4539 .proc_handler = sysctl_sched_uclamp_handler, 4540 }, 4541 { 4542 .procname = "sched_util_clamp_min_rt_default", 4543 .data = &sysctl_sched_uclamp_util_min_rt_default, 4544 .maxlen = sizeof(unsigned int), 4545 .mode = 0644, 4546 .proc_handler = sysctl_sched_uclamp_handler, 4547 }, 4548 #endif /* CONFIG_UCLAMP_TASK */ 4549 {} 4550 }; 4551 static int __init sched_core_sysctl_init(void) 4552 { 4553 register_sysctl_init("kernel", sched_core_sysctls); 4554 return 0; 4555 } 4556 late_initcall(sched_core_sysctl_init); 4557 #endif /* CONFIG_SYSCTL */ 4558 4559 /* 4560 * fork()/clone()-time setup: 4561 */ 4562 int sched_fork(unsigned long clone_flags, struct task_struct *p) 4563 { 4564 __sched_fork(clone_flags, p); 4565 /* 4566 * We mark the process as NEW here. This guarantees that 4567 * nobody will actually run it, and a signal or other external 4568 * event cannot wake it up and insert it on the runqueue either. 4569 */ 4570 p->__state = TASK_NEW; 4571 4572 /* 4573 * Make sure we do not leak PI boosting priority to the child. 4574 */ 4575 p->prio = current->normal_prio; 4576 4577 uclamp_fork(p); 4578 4579 /* 4580 * Revert to default priority/policy on fork if requested. 4581 */ 4582 if (unlikely(p->sched_reset_on_fork)) { 4583 if (task_has_dl_policy(p) || task_has_rt_policy(p)) { 4584 p->policy = SCHED_NORMAL; 4585 p->static_prio = NICE_TO_PRIO(0); 4586 p->rt_priority = 0; 4587 } else if (PRIO_TO_NICE(p->static_prio) < 0) 4588 p->static_prio = NICE_TO_PRIO(0); 4589 4590 p->prio = p->normal_prio = p->static_prio; 4591 set_load_weight(p, false); 4592 4593 /* 4594 * We don't need the reset flag anymore after the fork. It has 4595 * fulfilled its duty: 4596 */ 4597 p->sched_reset_on_fork = 0; 4598 } 4599 4600 if (dl_prio(p->prio)) 4601 return -EAGAIN; 4602 else if (rt_prio(p->prio)) 4603 p->sched_class = &rt_sched_class; 4604 else 4605 p->sched_class = &fair_sched_class; 4606 4607 init_entity_runnable_average(&p->se); 4608 4609 4610 #ifdef CONFIG_SCHED_INFO 4611 if (likely(sched_info_on())) 4612 memset(&p->sched_info, 0, sizeof(p->sched_info)); 4613 #endif 4614 #if defined(CONFIG_SMP) 4615 p->on_cpu = 0; 4616 #endif 4617 init_task_preempt_count(p); 4618 #ifdef CONFIG_SMP 4619 plist_node_init(&p->pushable_tasks, MAX_PRIO); 4620 RB_CLEAR_NODE(&p->pushable_dl_tasks); 4621 #endif 4622 return 0; 4623 } 4624 4625 void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs) 4626 { 4627 unsigned long flags; 4628 4629 /* 4630 * Because we're not yet on the pid-hash, p->pi_lock isn't strictly 4631 * required yet, but lockdep gets upset if rules are violated. 4632 */ 4633 raw_spin_lock_irqsave(&p->pi_lock, flags); 4634 #ifdef CONFIG_CGROUP_SCHED 4635 if (1) { 4636 struct task_group *tg; 4637 tg = container_of(kargs->cset->subsys[cpu_cgrp_id], 4638 struct task_group, css); 4639 tg = autogroup_task_group(p, tg); 4640 p->sched_task_group = tg; 4641 } 4642 #endif 4643 rseq_migrate(p); 4644 /* 4645 * We're setting the CPU for the first time, we don't migrate, 4646 * so use __set_task_cpu(). 4647 */ 4648 __set_task_cpu(p, smp_processor_id()); 4649 if (p->sched_class->task_fork) 4650 p->sched_class->task_fork(p); 4651 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 4652 } 4653 4654 void sched_post_fork(struct task_struct *p) 4655 { 4656 uclamp_post_fork(p); 4657 } 4658 4659 unsigned long to_ratio(u64 period, u64 runtime) 4660 { 4661 if (runtime == RUNTIME_INF) 4662 return BW_UNIT; 4663 4664 /* 4665 * Doing this here saves a lot of checks in all 4666 * the calling paths, and returning zero seems 4667 * safe for them anyway. 4668 */ 4669 if (period == 0) 4670 return 0; 4671 4672 return div64_u64(runtime << BW_SHIFT, period); 4673 } 4674 4675 /* 4676 * wake_up_new_task - wake up a newly created task for the first time. 4677 * 4678 * This function will do some initial scheduler statistics housekeeping 4679 * that must be done for every newly created context, then puts the task 4680 * on the runqueue and wakes it. 4681 */ 4682 void wake_up_new_task(struct task_struct *p) 4683 { 4684 struct rq_flags rf; 4685 struct rq *rq; 4686 4687 raw_spin_lock_irqsave(&p->pi_lock, rf.flags); 4688 WRITE_ONCE(p->__state, TASK_RUNNING); 4689 #ifdef CONFIG_SMP 4690 /* 4691 * Fork balancing, do it here and not earlier because: 4692 * - cpus_ptr can change in the fork path 4693 * - any previously selected CPU might disappear through hotplug 4694 * 4695 * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq, 4696 * as we're not fully set-up yet. 4697 */ 4698 p->recent_used_cpu = task_cpu(p); 4699 rseq_migrate(p); 4700 __set_task_cpu(p, select_task_rq(p, task_cpu(p), WF_FORK)); 4701 #endif 4702 rq = __task_rq_lock(p, &rf); 4703 update_rq_clock(rq); 4704 post_init_entity_util_avg(p); 4705 4706 activate_task(rq, p, ENQUEUE_NOCLOCK); 4707 trace_sched_wakeup_new(p); 4708 check_preempt_curr(rq, p, WF_FORK); 4709 #ifdef CONFIG_SMP 4710 if (p->sched_class->task_woken) { 4711 /* 4712 * Nothing relies on rq->lock after this, so it's fine to 4713 * drop it. 4714 */ 4715 rq_unpin_lock(rq, &rf); 4716 p->sched_class->task_woken(rq, p); 4717 rq_repin_lock(rq, &rf); 4718 } 4719 #endif 4720 task_rq_unlock(rq, p, &rf); 4721 } 4722 4723 #ifdef CONFIG_PREEMPT_NOTIFIERS 4724 4725 static DEFINE_STATIC_KEY_FALSE(preempt_notifier_key); 4726 4727 void preempt_notifier_inc(void) 4728 { 4729 static_branch_inc(&preempt_notifier_key); 4730 } 4731 EXPORT_SYMBOL_GPL(preempt_notifier_inc); 4732 4733 void preempt_notifier_dec(void) 4734 { 4735 static_branch_dec(&preempt_notifier_key); 4736 } 4737 EXPORT_SYMBOL_GPL(preempt_notifier_dec); 4738 4739 /** 4740 * preempt_notifier_register - tell me when current is being preempted & rescheduled 4741 * @notifier: notifier struct to register 4742 */ 4743 void preempt_notifier_register(struct preempt_notifier *notifier) 4744 { 4745 if (!static_branch_unlikely(&preempt_notifier_key)) 4746 WARN(1, "registering preempt_notifier while notifiers disabled\n"); 4747 4748 hlist_add_head(¬ifier->link, ¤t->preempt_notifiers); 4749 } 4750 EXPORT_SYMBOL_GPL(preempt_notifier_register); 4751 4752 /** 4753 * preempt_notifier_unregister - no longer interested in preemption notifications 4754 * @notifier: notifier struct to unregister 4755 * 4756 * This is *not* safe to call from within a preemption notifier. 4757 */ 4758 void preempt_notifier_unregister(struct preempt_notifier *notifier) 4759 { 4760 hlist_del(¬ifier->link); 4761 } 4762 EXPORT_SYMBOL_GPL(preempt_notifier_unregister); 4763 4764 static void __fire_sched_in_preempt_notifiers(struct task_struct *curr) 4765 { 4766 struct preempt_notifier *notifier; 4767 4768 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) 4769 notifier->ops->sched_in(notifier, raw_smp_processor_id()); 4770 } 4771 4772 static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr) 4773 { 4774 if (static_branch_unlikely(&preempt_notifier_key)) 4775 __fire_sched_in_preempt_notifiers(curr); 4776 } 4777 4778 static void 4779 __fire_sched_out_preempt_notifiers(struct task_struct *curr, 4780 struct task_struct *next) 4781 { 4782 struct preempt_notifier *notifier; 4783 4784 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) 4785 notifier->ops->sched_out(notifier, next); 4786 } 4787 4788 static __always_inline void 4789 fire_sched_out_preempt_notifiers(struct task_struct *curr, 4790 struct task_struct *next) 4791 { 4792 if (static_branch_unlikely(&preempt_notifier_key)) 4793 __fire_sched_out_preempt_notifiers(curr, next); 4794 } 4795 4796 #else /* !CONFIG_PREEMPT_NOTIFIERS */ 4797 4798 static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr) 4799 { 4800 } 4801 4802 static inline void 4803 fire_sched_out_preempt_notifiers(struct task_struct *curr, 4804 struct task_struct *next) 4805 { 4806 } 4807 4808 #endif /* CONFIG_PREEMPT_NOTIFIERS */ 4809 4810 static inline void prepare_task(struct task_struct *next) 4811 { 4812 #ifdef CONFIG_SMP 4813 /* 4814 * Claim the task as running, we do this before switching to it 4815 * such that any running task will have this set. 4816 * 4817 * See the smp_load_acquire(&p->on_cpu) case in ttwu() and 4818 * its ordering comment. 4819 */ 4820 WRITE_ONCE(next->on_cpu, 1); 4821 #endif 4822 } 4823 4824 static inline void finish_task(struct task_struct *prev) 4825 { 4826 #ifdef CONFIG_SMP 4827 /* 4828 * This must be the very last reference to @prev from this CPU. After 4829 * p->on_cpu is cleared, the task can be moved to a different CPU. We 4830 * must ensure this doesn't happen until the switch is completely 4831 * finished. 4832 * 4833 * In particular, the load of prev->state in finish_task_switch() must 4834 * happen before this. 4835 * 4836 * Pairs with the smp_cond_load_acquire() in try_to_wake_up(). 4837 */ 4838 smp_store_release(&prev->on_cpu, 0); 4839 #endif 4840 } 4841 4842 #ifdef CONFIG_SMP 4843 4844 static void do_balance_callbacks(struct rq *rq, struct balance_callback *head) 4845 { 4846 void (*func)(struct rq *rq); 4847 struct balance_callback *next; 4848 4849 lockdep_assert_rq_held(rq); 4850 4851 while (head) { 4852 func = (void (*)(struct rq *))head->func; 4853 next = head->next; 4854 head->next = NULL; 4855 head = next; 4856 4857 func(rq); 4858 } 4859 } 4860 4861 static void balance_push(struct rq *rq); 4862 4863 /* 4864 * balance_push_callback is a right abuse of the callback interface and plays 4865 * by significantly different rules. 4866 * 4867 * Where the normal balance_callback's purpose is to be ran in the same context 4868 * that queued it (only later, when it's safe to drop rq->lock again), 4869 * balance_push_callback is specifically targeted at __schedule(). 4870 * 4871 * This abuse is tolerated because it places all the unlikely/odd cases behind 4872 * a single test, namely: rq->balance_callback == NULL. 4873 */ 4874 struct balance_callback balance_push_callback = { 4875 .next = NULL, 4876 .func = balance_push, 4877 }; 4878 4879 static inline struct balance_callback * 4880 __splice_balance_callbacks(struct rq *rq, bool split) 4881 { 4882 struct balance_callback *head = rq->balance_callback; 4883 4884 if (likely(!head)) 4885 return NULL; 4886 4887 lockdep_assert_rq_held(rq); 4888 /* 4889 * Must not take balance_push_callback off the list when 4890 * splice_balance_callbacks() and balance_callbacks() are not 4891 * in the same rq->lock section. 4892 * 4893 * In that case it would be possible for __schedule() to interleave 4894 * and observe the list empty. 4895 */ 4896 if (split && head == &balance_push_callback) 4897 head = NULL; 4898 else 4899 rq->balance_callback = NULL; 4900 4901 return head; 4902 } 4903 4904 static inline struct balance_callback *splice_balance_callbacks(struct rq *rq) 4905 { 4906 return __splice_balance_callbacks(rq, true); 4907 } 4908 4909 static void __balance_callbacks(struct rq *rq) 4910 { 4911 do_balance_callbacks(rq, __splice_balance_callbacks(rq, false)); 4912 } 4913 4914 static inline void balance_callbacks(struct rq *rq, struct balance_callback *head) 4915 { 4916 unsigned long flags; 4917 4918 if (unlikely(head)) { 4919 raw_spin_rq_lock_irqsave(rq, flags); 4920 do_balance_callbacks(rq, head); 4921 raw_spin_rq_unlock_irqrestore(rq, flags); 4922 } 4923 } 4924 4925 #else 4926 4927 static inline void __balance_callbacks(struct rq *rq) 4928 { 4929 } 4930 4931 static inline struct balance_callback *splice_balance_callbacks(struct rq *rq) 4932 { 4933 return NULL; 4934 } 4935 4936 static inline void balance_callbacks(struct rq *rq, struct balance_callback *head) 4937 { 4938 } 4939 4940 #endif 4941 4942 static inline void 4943 prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf) 4944 { 4945 /* 4946 * Since the runqueue lock will be released by the next 4947 * task (which is an invalid locking op but in the case 4948 * of the scheduler it's an obvious special-case), so we 4949 * do an early lockdep release here: 4950 */ 4951 rq_unpin_lock(rq, rf); 4952 spin_release(&__rq_lockp(rq)->dep_map, _THIS_IP_); 4953 #ifdef CONFIG_DEBUG_SPINLOCK 4954 /* this is a valid case when another task releases the spinlock */ 4955 rq_lockp(rq)->owner = next; 4956 #endif 4957 } 4958 4959 static inline void finish_lock_switch(struct rq *rq) 4960 { 4961 /* 4962 * If we are tracking spinlock dependencies then we have to 4963 * fix up the runqueue lock - which gets 'carried over' from 4964 * prev into current: 4965 */ 4966 spin_acquire(&__rq_lockp(rq)->dep_map, 0, 0, _THIS_IP_); 4967 __balance_callbacks(rq); 4968 raw_spin_rq_unlock_irq(rq); 4969 } 4970 4971 /* 4972 * NOP if the arch has not defined these: 4973 */ 4974 4975 #ifndef prepare_arch_switch 4976 # define prepare_arch_switch(next) do { } while (0) 4977 #endif 4978 4979 #ifndef finish_arch_post_lock_switch 4980 # define finish_arch_post_lock_switch() do { } while (0) 4981 #endif 4982 4983 static inline void kmap_local_sched_out(void) 4984 { 4985 #ifdef CONFIG_KMAP_LOCAL 4986 if (unlikely(current->kmap_ctrl.idx)) 4987 __kmap_local_sched_out(); 4988 #endif 4989 } 4990 4991 static inline void kmap_local_sched_in(void) 4992 { 4993 #ifdef CONFIG_KMAP_LOCAL 4994 if (unlikely(current->kmap_ctrl.idx)) 4995 __kmap_local_sched_in(); 4996 #endif 4997 } 4998 4999 /** 5000 * prepare_task_switch - prepare to switch tasks 5001 * @rq: the runqueue preparing to switch 5002 * @prev: the current task that is being switched out 5003 * @next: the task we are going to switch to. 5004 * 5005 * This is called with the rq lock held and interrupts off. It must 5006 * be paired with a subsequent finish_task_switch after the context 5007 * switch. 5008 * 5009 * prepare_task_switch sets up locking and calls architecture specific 5010 * hooks. 5011 */ 5012 static inline void 5013 prepare_task_switch(struct rq *rq, struct task_struct *prev, 5014 struct task_struct *next) 5015 { 5016 kcov_prepare_switch(prev); 5017 sched_info_switch(rq, prev, next); 5018 perf_event_task_sched_out(prev, next); 5019 rseq_preempt(prev); 5020 fire_sched_out_preempt_notifiers(prev, next); 5021 kmap_local_sched_out(); 5022 prepare_task(next); 5023 prepare_arch_switch(next); 5024 } 5025 5026 /** 5027 * finish_task_switch - clean up after a task-switch 5028 * @prev: the thread we just switched away from. 5029 * 5030 * finish_task_switch must be called after the context switch, paired 5031 * with a prepare_task_switch call before the context switch. 5032 * finish_task_switch will reconcile locking set up by prepare_task_switch, 5033 * and do any other architecture-specific cleanup actions. 5034 * 5035 * Note that we may have delayed dropping an mm in context_switch(). If 5036 * so, we finish that here outside of the runqueue lock. (Doing it 5037 * with the lock held can cause deadlocks; see schedule() for 5038 * details.) 5039 * 5040 * The context switch have flipped the stack from under us and restored the 5041 * local variables which were saved when this task called schedule() in the 5042 * past. prev == current is still correct but we need to recalculate this_rq 5043 * because prev may have moved to another CPU. 5044 */ 5045 static struct rq *finish_task_switch(struct task_struct *prev) 5046 __releases(rq->lock) 5047 { 5048 struct rq *rq = this_rq(); 5049 struct mm_struct *mm = rq->prev_mm; 5050 unsigned int prev_state; 5051 5052 /* 5053 * The previous task will have left us with a preempt_count of 2 5054 * because it left us after: 5055 * 5056 * schedule() 5057 * preempt_disable(); // 1 5058 * __schedule() 5059 * raw_spin_lock_irq(&rq->lock) // 2 5060 * 5061 * Also, see FORK_PREEMPT_COUNT. 5062 */ 5063 if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET, 5064 "corrupted preempt_count: %s/%d/0x%x\n", 5065 current->comm, current->pid, preempt_count())) 5066 preempt_count_set(FORK_PREEMPT_COUNT); 5067 5068 rq->prev_mm = NULL; 5069 5070 /* 5071 * A task struct has one reference for the use as "current". 5072 * If a task dies, then it sets TASK_DEAD in tsk->state and calls 5073 * schedule one last time. The schedule call will never return, and 5074 * the scheduled task must drop that reference. 5075 * 5076 * We must observe prev->state before clearing prev->on_cpu (in 5077 * finish_task), otherwise a concurrent wakeup can get prev 5078 * running on another CPU and we could rave with its RUNNING -> DEAD 5079 * transition, resulting in a double drop. 5080 */ 5081 prev_state = READ_ONCE(prev->__state); 5082 vtime_task_switch(prev); 5083 perf_event_task_sched_in(prev, current); 5084 finish_task(prev); 5085 tick_nohz_task_switch(); 5086 finish_lock_switch(rq); 5087 finish_arch_post_lock_switch(); 5088 kcov_finish_switch(current); 5089 /* 5090 * kmap_local_sched_out() is invoked with rq::lock held and 5091 * interrupts disabled. There is no requirement for that, but the 5092 * sched out code does not have an interrupt enabled section. 5093 * Restoring the maps on sched in does not require interrupts being 5094 * disabled either. 5095 */ 5096 kmap_local_sched_in(); 5097 5098 fire_sched_in_preempt_notifiers(current); 5099 /* 5100 * When switching through a kernel thread, the loop in 5101 * membarrier_{private,global}_expedited() may have observed that 5102 * kernel thread and not issued an IPI. It is therefore possible to 5103 * schedule between user->kernel->user threads without passing though 5104 * switch_mm(). Membarrier requires a barrier after storing to 5105 * rq->curr, before returning to userspace, so provide them here: 5106 * 5107 * - a full memory barrier for {PRIVATE,GLOBAL}_EXPEDITED, implicitly 5108 * provided by mmdrop(), 5109 * - a sync_core for SYNC_CORE. 5110 */ 5111 if (mm) { 5112 membarrier_mm_sync_core_before_usermode(mm); 5113 mmdrop_sched(mm); 5114 } 5115 if (unlikely(prev_state == TASK_DEAD)) { 5116 if (prev->sched_class->task_dead) 5117 prev->sched_class->task_dead(prev); 5118 5119 /* Task is done with its stack. */ 5120 put_task_stack(prev); 5121 5122 put_task_struct_rcu_user(prev); 5123 } 5124 5125 return rq; 5126 } 5127 5128 /** 5129 * schedule_tail - first thing a freshly forked thread must call. 5130 * @prev: the thread we just switched away from. 5131 */ 5132 asmlinkage __visible void schedule_tail(struct task_struct *prev) 5133 __releases(rq->lock) 5134 { 5135 /* 5136 * New tasks start with FORK_PREEMPT_COUNT, see there and 5137 * finish_task_switch() for details. 5138 * 5139 * finish_task_switch() will drop rq->lock() and lower preempt_count 5140 * and the preempt_enable() will end up enabling preemption (on 5141 * PREEMPT_COUNT kernels). 5142 */ 5143 5144 finish_task_switch(prev); 5145 preempt_enable(); 5146 5147 if (current->set_child_tid) 5148 put_user(task_pid_vnr(current), current->set_child_tid); 5149 5150 calculate_sigpending(); 5151 } 5152 5153 /* 5154 * context_switch - switch to the new MM and the new thread's register state. 5155 */ 5156 static __always_inline struct rq * 5157 context_switch(struct rq *rq, struct task_struct *prev, 5158 struct task_struct *next, struct rq_flags *rf) 5159 { 5160 prepare_task_switch(rq, prev, next); 5161 5162 /* 5163 * For paravirt, this is coupled with an exit in switch_to to 5164 * combine the page table reload and the switch backend into 5165 * one hypercall. 5166 */ 5167 arch_start_context_switch(prev); 5168 5169 /* 5170 * kernel -> kernel lazy + transfer active 5171 * user -> kernel lazy + mmgrab() active 5172 * 5173 * kernel -> user switch + mmdrop() active 5174 * user -> user switch 5175 */ 5176 if (!next->mm) { // to kernel 5177 enter_lazy_tlb(prev->active_mm, next); 5178 5179 next->active_mm = prev->active_mm; 5180 if (prev->mm) // from user 5181 mmgrab(prev->active_mm); 5182 else 5183 prev->active_mm = NULL; 5184 } else { // to user 5185 membarrier_switch_mm(rq, prev->active_mm, next->mm); 5186 /* 5187 * sys_membarrier() requires an smp_mb() between setting 5188 * rq->curr / membarrier_switch_mm() and returning to userspace. 5189 * 5190 * The below provides this either through switch_mm(), or in 5191 * case 'prev->active_mm == next->mm' through 5192 * finish_task_switch()'s mmdrop(). 5193 */ 5194 switch_mm_irqs_off(prev->active_mm, next->mm, next); 5195 lru_gen_use_mm(next->mm); 5196 5197 if (!prev->mm) { // from kernel 5198 /* will mmdrop() in finish_task_switch(). */ 5199 rq->prev_mm = prev->active_mm; 5200 prev->active_mm = NULL; 5201 } 5202 } 5203 5204 rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP); 5205 5206 prepare_lock_switch(rq, next, rf); 5207 5208 /* Here we just switch the register state and the stack. */ 5209 switch_to(prev, next, prev); 5210 barrier(); 5211 5212 return finish_task_switch(prev); 5213 } 5214 5215 /* 5216 * nr_running and nr_context_switches: 5217 * 5218 * externally visible scheduler statistics: current number of runnable 5219 * threads, total number of context switches performed since bootup. 5220 */ 5221 unsigned int nr_running(void) 5222 { 5223 unsigned int i, sum = 0; 5224 5225 for_each_online_cpu(i) 5226 sum += cpu_rq(i)->nr_running; 5227 5228 return sum; 5229 } 5230 5231 /* 5232 * Check if only the current task is running on the CPU. 5233 * 5234 * Caution: this function does not check that the caller has disabled 5235 * preemption, thus the result might have a time-of-check-to-time-of-use 5236 * race. The caller is responsible to use it correctly, for example: 5237 * 5238 * - from a non-preemptible section (of course) 5239 * 5240 * - from a thread that is bound to a single CPU 5241 * 5242 * - in a loop with very short iterations (e.g. a polling loop) 5243 */ 5244 bool single_task_running(void) 5245 { 5246 return raw_rq()->nr_running == 1; 5247 } 5248 EXPORT_SYMBOL(single_task_running); 5249 5250 unsigned long long nr_context_switches(void) 5251 { 5252 int i; 5253 unsigned long long sum = 0; 5254 5255 for_each_possible_cpu(i) 5256 sum += cpu_rq(i)->nr_switches; 5257 5258 return sum; 5259 } 5260 5261 /* 5262 * Consumers of these two interfaces, like for example the cpuidle menu 5263 * governor, are using nonsensical data. Preferring shallow idle state selection 5264 * for a CPU that has IO-wait which might not even end up running the task when 5265 * it does become runnable. 5266 */ 5267 5268 unsigned int nr_iowait_cpu(int cpu) 5269 { 5270 return atomic_read(&cpu_rq(cpu)->nr_iowait); 5271 } 5272 5273 /* 5274 * IO-wait accounting, and how it's mostly bollocks (on SMP). 5275 * 5276 * The idea behind IO-wait account is to account the idle time that we could 5277 * have spend running if it were not for IO. That is, if we were to improve the 5278 * storage performance, we'd have a proportional reduction in IO-wait time. 5279 * 5280 * This all works nicely on UP, where, when a task blocks on IO, we account 5281 * idle time as IO-wait, because if the storage were faster, it could've been 5282 * running and we'd not be idle. 5283 * 5284 * This has been extended to SMP, by doing the same for each CPU. This however 5285 * is broken. 5286 * 5287 * Imagine for instance the case where two tasks block on one CPU, only the one 5288 * CPU will have IO-wait accounted, while the other has regular idle. Even 5289 * though, if the storage were faster, both could've ran at the same time, 5290 * utilising both CPUs. 5291 * 5292 * This means, that when looking globally, the current IO-wait accounting on 5293 * SMP is a lower bound, by reason of under accounting. 5294 * 5295 * Worse, since the numbers are provided per CPU, they are sometimes 5296 * interpreted per CPU, and that is nonsensical. A blocked task isn't strictly 5297 * associated with any one particular CPU, it can wake to another CPU than it 5298 * blocked on. This means the per CPU IO-wait number is meaningless. 5299 * 5300 * Task CPU affinities can make all that even more 'interesting'. 5301 */ 5302 5303 unsigned int nr_iowait(void) 5304 { 5305 unsigned int i, sum = 0; 5306 5307 for_each_possible_cpu(i) 5308 sum += nr_iowait_cpu(i); 5309 5310 return sum; 5311 } 5312 5313 #ifdef CONFIG_SMP 5314 5315 /* 5316 * sched_exec - execve() is a valuable balancing opportunity, because at 5317 * this point the task has the smallest effective memory and cache footprint. 5318 */ 5319 void sched_exec(void) 5320 { 5321 struct task_struct *p = current; 5322 unsigned long flags; 5323 int dest_cpu; 5324 5325 raw_spin_lock_irqsave(&p->pi_lock, flags); 5326 dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), WF_EXEC); 5327 if (dest_cpu == smp_processor_id()) 5328 goto unlock; 5329 5330 if (likely(cpu_active(dest_cpu))) { 5331 struct migration_arg arg = { p, dest_cpu }; 5332 5333 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 5334 stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg); 5335 return; 5336 } 5337 unlock: 5338 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 5339 } 5340 5341 #endif 5342 5343 DEFINE_PER_CPU(struct kernel_stat, kstat); 5344 DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat); 5345 5346 EXPORT_PER_CPU_SYMBOL(kstat); 5347 EXPORT_PER_CPU_SYMBOL(kernel_cpustat); 5348 5349 /* 5350 * The function fair_sched_class.update_curr accesses the struct curr 5351 * and its field curr->exec_start; when called from task_sched_runtime(), 5352 * we observe a high rate of cache misses in practice. 5353 * Prefetching this data results in improved performance. 5354 */ 5355 static inline void prefetch_curr_exec_start(struct task_struct *p) 5356 { 5357 #ifdef CONFIG_FAIR_GROUP_SCHED 5358 struct sched_entity *curr = (&p->se)->cfs_rq->curr; 5359 #else 5360 struct sched_entity *curr = (&task_rq(p)->cfs)->curr; 5361 #endif 5362 prefetch(curr); 5363 prefetch(&curr->exec_start); 5364 } 5365 5366 /* 5367 * Return accounted runtime for the task. 5368 * In case the task is currently running, return the runtime plus current's 5369 * pending runtime that have not been accounted yet. 5370 */ 5371 unsigned long long task_sched_runtime(struct task_struct *p) 5372 { 5373 struct rq_flags rf; 5374 struct rq *rq; 5375 u64 ns; 5376 5377 #if defined(CONFIG_64BIT) && defined(CONFIG_SMP) 5378 /* 5379 * 64-bit doesn't need locks to atomically read a 64-bit value. 5380 * So we have a optimization chance when the task's delta_exec is 0. 5381 * Reading ->on_cpu is racy, but this is ok. 5382 * 5383 * If we race with it leaving CPU, we'll take a lock. So we're correct. 5384 * If we race with it entering CPU, unaccounted time is 0. This is 5385 * indistinguishable from the read occurring a few cycles earlier. 5386 * If we see ->on_cpu without ->on_rq, the task is leaving, and has 5387 * been accounted, so we're correct here as well. 5388 */ 5389 if (!p->on_cpu || !task_on_rq_queued(p)) 5390 return p->se.sum_exec_runtime; 5391 #endif 5392 5393 rq = task_rq_lock(p, &rf); 5394 /* 5395 * Must be ->curr _and_ ->on_rq. If dequeued, we would 5396 * project cycles that may never be accounted to this 5397 * thread, breaking clock_gettime(). 5398 */ 5399 if (task_current(rq, p) && task_on_rq_queued(p)) { 5400 prefetch_curr_exec_start(p); 5401 update_rq_clock(rq); 5402 p->sched_class->update_curr(rq); 5403 } 5404 ns = p->se.sum_exec_runtime; 5405 task_rq_unlock(rq, p, &rf); 5406 5407 return ns; 5408 } 5409 5410 #ifdef CONFIG_SCHED_DEBUG 5411 static u64 cpu_resched_latency(struct rq *rq) 5412 { 5413 int latency_warn_ms = READ_ONCE(sysctl_resched_latency_warn_ms); 5414 u64 resched_latency, now = rq_clock(rq); 5415 static bool warned_once; 5416 5417 if (sysctl_resched_latency_warn_once && warned_once) 5418 return 0; 5419 5420 if (!need_resched() || !latency_warn_ms) 5421 return 0; 5422 5423 if (system_state == SYSTEM_BOOTING) 5424 return 0; 5425 5426 if (!rq->last_seen_need_resched_ns) { 5427 rq->last_seen_need_resched_ns = now; 5428 rq->ticks_without_resched = 0; 5429 return 0; 5430 } 5431 5432 rq->ticks_without_resched++; 5433 resched_latency = now - rq->last_seen_need_resched_ns; 5434 if (resched_latency <= latency_warn_ms * NSEC_PER_MSEC) 5435 return 0; 5436 5437 warned_once = true; 5438 5439 return resched_latency; 5440 } 5441 5442 static int __init setup_resched_latency_warn_ms(char *str) 5443 { 5444 long val; 5445 5446 if ((kstrtol(str, 0, &val))) { 5447 pr_warn("Unable to set resched_latency_warn_ms\n"); 5448 return 1; 5449 } 5450 5451 sysctl_resched_latency_warn_ms = val; 5452 return 1; 5453 } 5454 __setup("resched_latency_warn_ms=", setup_resched_latency_warn_ms); 5455 #else 5456 static inline u64 cpu_resched_latency(struct rq *rq) { return 0; } 5457 #endif /* CONFIG_SCHED_DEBUG */ 5458 5459 /* 5460 * This function gets called by the timer code, with HZ frequency. 5461 * We call it with interrupts disabled. 5462 */ 5463 void scheduler_tick(void) 5464 { 5465 int cpu = smp_processor_id(); 5466 struct rq *rq = cpu_rq(cpu); 5467 struct task_struct *curr = rq->curr; 5468 struct rq_flags rf; 5469 unsigned long thermal_pressure; 5470 u64 resched_latency; 5471 5472 arch_scale_freq_tick(); 5473 sched_clock_tick(); 5474 5475 rq_lock(rq, &rf); 5476 5477 update_rq_clock(rq); 5478 thermal_pressure = arch_scale_thermal_pressure(cpu_of(rq)); 5479 update_thermal_load_avg(rq_clock_thermal(rq), rq, thermal_pressure); 5480 curr->sched_class->task_tick(rq, curr, 0); 5481 if (sched_feat(LATENCY_WARN)) 5482 resched_latency = cpu_resched_latency(rq); 5483 calc_global_load_tick(rq); 5484 sched_core_tick(rq); 5485 5486 rq_unlock(rq, &rf); 5487 5488 if (sched_feat(LATENCY_WARN) && resched_latency) 5489 resched_latency_warn(cpu, resched_latency); 5490 5491 perf_event_task_tick(); 5492 5493 #ifdef CONFIG_SMP 5494 rq->idle_balance = idle_cpu(cpu); 5495 trigger_load_balance(rq); 5496 #endif 5497 } 5498 5499 #ifdef CONFIG_NO_HZ_FULL 5500 5501 struct tick_work { 5502 int cpu; 5503 atomic_t state; 5504 struct delayed_work work; 5505 }; 5506 /* Values for ->state, see diagram below. */ 5507 #define TICK_SCHED_REMOTE_OFFLINE 0 5508 #define TICK_SCHED_REMOTE_OFFLINING 1 5509 #define TICK_SCHED_REMOTE_RUNNING 2 5510 5511 /* 5512 * State diagram for ->state: 5513 * 5514 * 5515 * TICK_SCHED_REMOTE_OFFLINE 5516 * | ^ 5517 * | | 5518 * | | sched_tick_remote() 5519 * | | 5520 * | | 5521 * +--TICK_SCHED_REMOTE_OFFLINING 5522 * | ^ 5523 * | | 5524 * sched_tick_start() | | sched_tick_stop() 5525 * | | 5526 * V | 5527 * TICK_SCHED_REMOTE_RUNNING 5528 * 5529 * 5530 * Other transitions get WARN_ON_ONCE(), except that sched_tick_remote() 5531 * and sched_tick_start() are happy to leave the state in RUNNING. 5532 */ 5533 5534 static struct tick_work __percpu *tick_work_cpu; 5535 5536 static void sched_tick_remote(struct work_struct *work) 5537 { 5538 struct delayed_work *dwork = to_delayed_work(work); 5539 struct tick_work *twork = container_of(dwork, struct tick_work, work); 5540 int cpu = twork->cpu; 5541 struct rq *rq = cpu_rq(cpu); 5542 struct task_struct *curr; 5543 struct rq_flags rf; 5544 u64 delta; 5545 int os; 5546 5547 /* 5548 * Handle the tick only if it appears the remote CPU is running in full 5549 * dynticks mode. The check is racy by nature, but missing a tick or 5550 * having one too much is no big deal because the scheduler tick updates 5551 * statistics and checks timeslices in a time-independent way, regardless 5552 * of when exactly it is running. 5553 */ 5554 if (!tick_nohz_tick_stopped_cpu(cpu)) 5555 goto out_requeue; 5556 5557 rq_lock_irq(rq, &rf); 5558 curr = rq->curr; 5559 if (cpu_is_offline(cpu)) 5560 goto out_unlock; 5561 5562 update_rq_clock(rq); 5563 5564 if (!is_idle_task(curr)) { 5565 /* 5566 * Make sure the next tick runs within a reasonable 5567 * amount of time. 5568 */ 5569 delta = rq_clock_task(rq) - curr->se.exec_start; 5570 WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3); 5571 } 5572 curr->sched_class->task_tick(rq, curr, 0); 5573 5574 calc_load_nohz_remote(rq); 5575 out_unlock: 5576 rq_unlock_irq(rq, &rf); 5577 out_requeue: 5578 5579 /* 5580 * Run the remote tick once per second (1Hz). This arbitrary 5581 * frequency is large enough to avoid overload but short enough 5582 * to keep scheduler internal stats reasonably up to date. But 5583 * first update state to reflect hotplug activity if required. 5584 */ 5585 os = atomic_fetch_add_unless(&twork->state, -1, TICK_SCHED_REMOTE_RUNNING); 5586 WARN_ON_ONCE(os == TICK_SCHED_REMOTE_OFFLINE); 5587 if (os == TICK_SCHED_REMOTE_RUNNING) 5588 queue_delayed_work(system_unbound_wq, dwork, HZ); 5589 } 5590 5591 static void sched_tick_start(int cpu) 5592 { 5593 int os; 5594 struct tick_work *twork; 5595 5596 if (housekeeping_cpu(cpu, HK_TYPE_TICK)) 5597 return; 5598 5599 WARN_ON_ONCE(!tick_work_cpu); 5600 5601 twork = per_cpu_ptr(tick_work_cpu, cpu); 5602 os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_RUNNING); 5603 WARN_ON_ONCE(os == TICK_SCHED_REMOTE_RUNNING); 5604 if (os == TICK_SCHED_REMOTE_OFFLINE) { 5605 twork->cpu = cpu; 5606 INIT_DELAYED_WORK(&twork->work, sched_tick_remote); 5607 queue_delayed_work(system_unbound_wq, &twork->work, HZ); 5608 } 5609 } 5610 5611 #ifdef CONFIG_HOTPLUG_CPU 5612 static void sched_tick_stop(int cpu) 5613 { 5614 struct tick_work *twork; 5615 int os; 5616 5617 if (housekeeping_cpu(cpu, HK_TYPE_TICK)) 5618 return; 5619 5620 WARN_ON_ONCE(!tick_work_cpu); 5621 5622 twork = per_cpu_ptr(tick_work_cpu, cpu); 5623 /* There cannot be competing actions, but don't rely on stop-machine. */ 5624 os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_OFFLINING); 5625 WARN_ON_ONCE(os != TICK_SCHED_REMOTE_RUNNING); 5626 /* Don't cancel, as this would mess up the state machine. */ 5627 } 5628 #endif /* CONFIG_HOTPLUG_CPU */ 5629 5630 int __init sched_tick_offload_init(void) 5631 { 5632 tick_work_cpu = alloc_percpu(struct tick_work); 5633 BUG_ON(!tick_work_cpu); 5634 return 0; 5635 } 5636 5637 #else /* !CONFIG_NO_HZ_FULL */ 5638 static inline void sched_tick_start(int cpu) { } 5639 static inline void sched_tick_stop(int cpu) { } 5640 #endif 5641 5642 #if defined(CONFIG_PREEMPTION) && (defined(CONFIG_DEBUG_PREEMPT) || \ 5643 defined(CONFIG_TRACE_PREEMPT_TOGGLE)) 5644 /* 5645 * If the value passed in is equal to the current preempt count 5646 * then we just disabled preemption. Start timing the latency. 5647 */ 5648 static inline void preempt_latency_start(int val) 5649 { 5650 if (preempt_count() == val) { 5651 unsigned long ip = get_lock_parent_ip(); 5652 #ifdef CONFIG_DEBUG_PREEMPT 5653 current->preempt_disable_ip = ip; 5654 #endif 5655 trace_preempt_off(CALLER_ADDR0, ip); 5656 } 5657 } 5658 5659 void preempt_count_add(int val) 5660 { 5661 #ifdef CONFIG_DEBUG_PREEMPT 5662 /* 5663 * Underflow? 5664 */ 5665 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0))) 5666 return; 5667 #endif 5668 __preempt_count_add(val); 5669 #ifdef CONFIG_DEBUG_PREEMPT 5670 /* 5671 * Spinlock count overflowing soon? 5672 */ 5673 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >= 5674 PREEMPT_MASK - 10); 5675 #endif 5676 preempt_latency_start(val); 5677 } 5678 EXPORT_SYMBOL(preempt_count_add); 5679 NOKPROBE_SYMBOL(preempt_count_add); 5680 5681 /* 5682 * If the value passed in equals to the current preempt count 5683 * then we just enabled preemption. Stop timing the latency. 5684 */ 5685 static inline void preempt_latency_stop(int val) 5686 { 5687 if (preempt_count() == val) 5688 trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip()); 5689 } 5690 5691 void preempt_count_sub(int val) 5692 { 5693 #ifdef CONFIG_DEBUG_PREEMPT 5694 /* 5695 * Underflow? 5696 */ 5697 if (DEBUG_LOCKS_WARN_ON(val > preempt_count())) 5698 return; 5699 /* 5700 * Is the spinlock portion underflowing? 5701 */ 5702 if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) && 5703 !(preempt_count() & PREEMPT_MASK))) 5704 return; 5705 #endif 5706 5707 preempt_latency_stop(val); 5708 __preempt_count_sub(val); 5709 } 5710 EXPORT_SYMBOL(preempt_count_sub); 5711 NOKPROBE_SYMBOL(preempt_count_sub); 5712 5713 #else 5714 static inline void preempt_latency_start(int val) { } 5715 static inline void preempt_latency_stop(int val) { } 5716 #endif 5717 5718 static inline unsigned long get_preempt_disable_ip(struct task_struct *p) 5719 { 5720 #ifdef CONFIG_DEBUG_PREEMPT 5721 return p->preempt_disable_ip; 5722 #else 5723 return 0; 5724 #endif 5725 } 5726 5727 /* 5728 * Print scheduling while atomic bug: 5729 */ 5730 static noinline void __schedule_bug(struct task_struct *prev) 5731 { 5732 /* Save this before calling printk(), since that will clobber it */ 5733 unsigned long preempt_disable_ip = get_preempt_disable_ip(current); 5734 5735 if (oops_in_progress) 5736 return; 5737 5738 printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n", 5739 prev->comm, prev->pid, preempt_count()); 5740 5741 debug_show_held_locks(prev); 5742 print_modules(); 5743 if (irqs_disabled()) 5744 print_irqtrace_events(prev); 5745 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT) 5746 && in_atomic_preempt_off()) { 5747 pr_err("Preemption disabled at:"); 5748 print_ip_sym(KERN_ERR, preempt_disable_ip); 5749 } 5750 if (panic_on_warn) 5751 panic("scheduling while atomic\n"); 5752 5753 dump_stack(); 5754 add_taint(TAINT_WARN, LOCKDEP_STILL_OK); 5755 } 5756 5757 /* 5758 * Various schedule()-time debugging checks and statistics: 5759 */ 5760 static inline void schedule_debug(struct task_struct *prev, bool preempt) 5761 { 5762 #ifdef CONFIG_SCHED_STACK_END_CHECK 5763 if (task_stack_end_corrupted(prev)) 5764 panic("corrupted stack end detected inside scheduler\n"); 5765 5766 if (task_scs_end_corrupted(prev)) 5767 panic("corrupted shadow stack detected inside scheduler\n"); 5768 #endif 5769 5770 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP 5771 if (!preempt && READ_ONCE(prev->__state) && prev->non_block_count) { 5772 printk(KERN_ERR "BUG: scheduling in a non-blocking section: %s/%d/%i\n", 5773 prev->comm, prev->pid, prev->non_block_count); 5774 dump_stack(); 5775 add_taint(TAINT_WARN, LOCKDEP_STILL_OK); 5776 } 5777 #endif 5778 5779 if (unlikely(in_atomic_preempt_off())) { 5780 __schedule_bug(prev); 5781 preempt_count_set(PREEMPT_DISABLED); 5782 } 5783 rcu_sleep_check(); 5784 SCHED_WARN_ON(ct_state() == CONTEXT_USER); 5785 5786 profile_hit(SCHED_PROFILING, __builtin_return_address(0)); 5787 5788 schedstat_inc(this_rq()->sched_count); 5789 } 5790 5791 static void put_prev_task_balance(struct rq *rq, struct task_struct *prev, 5792 struct rq_flags *rf) 5793 { 5794 #ifdef CONFIG_SMP 5795 const struct sched_class *class; 5796 /* 5797 * We must do the balancing pass before put_prev_task(), such 5798 * that when we release the rq->lock the task is in the same 5799 * state as before we took rq->lock. 5800 * 5801 * We can terminate the balance pass as soon as we know there is 5802 * a runnable task of @class priority or higher. 5803 */ 5804 for_class_range(class, prev->sched_class, &idle_sched_class) { 5805 if (class->balance(rq, prev, rf)) 5806 break; 5807 } 5808 #endif 5809 5810 put_prev_task(rq, prev); 5811 } 5812 5813 /* 5814 * Pick up the highest-prio task: 5815 */ 5816 static inline struct task_struct * 5817 __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) 5818 { 5819 const struct sched_class *class; 5820 struct task_struct *p; 5821 5822 /* 5823 * Optimization: we know that if all tasks are in the fair class we can 5824 * call that function directly, but only if the @prev task wasn't of a 5825 * higher scheduling class, because otherwise those lose the 5826 * opportunity to pull in more work from other CPUs. 5827 */ 5828 if (likely(!sched_class_above(prev->sched_class, &fair_sched_class) && 5829 rq->nr_running == rq->cfs.h_nr_running)) { 5830 5831 p = pick_next_task_fair(rq, prev, rf); 5832 if (unlikely(p == RETRY_TASK)) 5833 goto restart; 5834 5835 /* Assume the next prioritized class is idle_sched_class */ 5836 if (!p) { 5837 put_prev_task(rq, prev); 5838 p = pick_next_task_idle(rq); 5839 } 5840 5841 return p; 5842 } 5843 5844 restart: 5845 put_prev_task_balance(rq, prev, rf); 5846 5847 for_each_class(class) { 5848 p = class->pick_next_task(rq); 5849 if (p) 5850 return p; 5851 } 5852 5853 BUG(); /* The idle class should always have a runnable task. */ 5854 } 5855 5856 #ifdef CONFIG_SCHED_CORE 5857 static inline bool is_task_rq_idle(struct task_struct *t) 5858 { 5859 return (task_rq(t)->idle == t); 5860 } 5861 5862 static inline bool cookie_equals(struct task_struct *a, unsigned long cookie) 5863 { 5864 return is_task_rq_idle(a) || (a->core_cookie == cookie); 5865 } 5866 5867 static inline bool cookie_match(struct task_struct *a, struct task_struct *b) 5868 { 5869 if (is_task_rq_idle(a) || is_task_rq_idle(b)) 5870 return true; 5871 5872 return a->core_cookie == b->core_cookie; 5873 } 5874 5875 static inline struct task_struct *pick_task(struct rq *rq) 5876 { 5877 const struct sched_class *class; 5878 struct task_struct *p; 5879 5880 for_each_class(class) { 5881 p = class->pick_task(rq); 5882 if (p) 5883 return p; 5884 } 5885 5886 BUG(); /* The idle class should always have a runnable task. */ 5887 } 5888 5889 extern void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi); 5890 5891 static void queue_core_balance(struct rq *rq); 5892 5893 static struct task_struct * 5894 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) 5895 { 5896 struct task_struct *next, *p, *max = NULL; 5897 const struct cpumask *smt_mask; 5898 bool fi_before = false; 5899 bool core_clock_updated = (rq == rq->core); 5900 unsigned long cookie; 5901 int i, cpu, occ = 0; 5902 struct rq *rq_i; 5903 bool need_sync; 5904 5905 if (!sched_core_enabled(rq)) 5906 return __pick_next_task(rq, prev, rf); 5907 5908 cpu = cpu_of(rq); 5909 5910 /* Stopper task is switching into idle, no need core-wide selection. */ 5911 if (cpu_is_offline(cpu)) { 5912 /* 5913 * Reset core_pick so that we don't enter the fastpath when 5914 * coming online. core_pick would already be migrated to 5915 * another cpu during offline. 5916 */ 5917 rq->core_pick = NULL; 5918 return __pick_next_task(rq, prev, rf); 5919 } 5920 5921 /* 5922 * If there were no {en,de}queues since we picked (IOW, the task 5923 * pointers are all still valid), and we haven't scheduled the last 5924 * pick yet, do so now. 5925 * 5926 * rq->core_pick can be NULL if no selection was made for a CPU because 5927 * it was either offline or went offline during a sibling's core-wide 5928 * selection. In this case, do a core-wide selection. 5929 */ 5930 if (rq->core->core_pick_seq == rq->core->core_task_seq && 5931 rq->core->core_pick_seq != rq->core_sched_seq && 5932 rq->core_pick) { 5933 WRITE_ONCE(rq->core_sched_seq, rq->core->core_pick_seq); 5934 5935 next = rq->core_pick; 5936 if (next != prev) { 5937 put_prev_task(rq, prev); 5938 set_next_task(rq, next); 5939 } 5940 5941 rq->core_pick = NULL; 5942 goto out; 5943 } 5944 5945 put_prev_task_balance(rq, prev, rf); 5946 5947 smt_mask = cpu_smt_mask(cpu); 5948 need_sync = !!rq->core->core_cookie; 5949 5950 /* reset state */ 5951 rq->core->core_cookie = 0UL; 5952 if (rq->core->core_forceidle_count) { 5953 if (!core_clock_updated) { 5954 update_rq_clock(rq->core); 5955 core_clock_updated = true; 5956 } 5957 sched_core_account_forceidle(rq); 5958 /* reset after accounting force idle */ 5959 rq->core->core_forceidle_start = 0; 5960 rq->core->core_forceidle_count = 0; 5961 rq->core->core_forceidle_occupation = 0; 5962 need_sync = true; 5963 fi_before = true; 5964 } 5965 5966 /* 5967 * core->core_task_seq, core->core_pick_seq, rq->core_sched_seq 5968 * 5969 * @task_seq guards the task state ({en,de}queues) 5970 * @pick_seq is the @task_seq we did a selection on 5971 * @sched_seq is the @pick_seq we scheduled 5972 * 5973 * However, preemptions can cause multiple picks on the same task set. 5974 * 'Fix' this by also increasing @task_seq for every pick. 5975 */ 5976 rq->core->core_task_seq++; 5977 5978 /* 5979 * Optimize for common case where this CPU has no cookies 5980 * and there are no cookied tasks running on siblings. 5981 */ 5982 if (!need_sync) { 5983 next = pick_task(rq); 5984 if (!next->core_cookie) { 5985 rq->core_pick = NULL; 5986 /* 5987 * For robustness, update the min_vruntime_fi for 5988 * unconstrained picks as well. 5989 */ 5990 WARN_ON_ONCE(fi_before); 5991 task_vruntime_update(rq, next, false); 5992 goto out_set_next; 5993 } 5994 } 5995 5996 /* 5997 * For each thread: do the regular task pick and find the max prio task 5998 * amongst them. 5999 * 6000 * Tie-break prio towards the current CPU 6001 */ 6002 for_each_cpu_wrap(i, smt_mask, cpu) { 6003 rq_i = cpu_rq(i); 6004 6005 /* 6006 * Current cpu always has its clock updated on entrance to 6007 * pick_next_task(). If the current cpu is not the core, 6008 * the core may also have been updated above. 6009 */ 6010 if (i != cpu && (rq_i != rq->core || !core_clock_updated)) 6011 update_rq_clock(rq_i); 6012 6013 p = rq_i->core_pick = pick_task(rq_i); 6014 if (!max || prio_less(max, p, fi_before)) 6015 max = p; 6016 } 6017 6018 cookie = rq->core->core_cookie = max->core_cookie; 6019 6020 /* 6021 * For each thread: try and find a runnable task that matches @max or 6022 * force idle. 6023 */ 6024 for_each_cpu(i, smt_mask) { 6025 rq_i = cpu_rq(i); 6026 p = rq_i->core_pick; 6027 6028 if (!cookie_equals(p, cookie)) { 6029 p = NULL; 6030 if (cookie) 6031 p = sched_core_find(rq_i, cookie); 6032 if (!p) 6033 p = idle_sched_class.pick_task(rq_i); 6034 } 6035 6036 rq_i->core_pick = p; 6037 6038 if (p == rq_i->idle) { 6039 if (rq_i->nr_running) { 6040 rq->core->core_forceidle_count++; 6041 if (!fi_before) 6042 rq->core->core_forceidle_seq++; 6043 } 6044 } else { 6045 occ++; 6046 } 6047 } 6048 6049 if (schedstat_enabled() && rq->core->core_forceidle_count) { 6050 rq->core->core_forceidle_start = rq_clock(rq->core); 6051 rq->core->core_forceidle_occupation = occ; 6052 } 6053 6054 rq->core->core_pick_seq = rq->core->core_task_seq; 6055 next = rq->core_pick; 6056 rq->core_sched_seq = rq->core->core_pick_seq; 6057 6058 /* Something should have been selected for current CPU */ 6059 WARN_ON_ONCE(!next); 6060 6061 /* 6062 * Reschedule siblings 6063 * 6064 * NOTE: L1TF -- at this point we're no longer running the old task and 6065 * sending an IPI (below) ensures the sibling will no longer be running 6066 * their task. This ensures there is no inter-sibling overlap between 6067 * non-matching user state. 6068 */ 6069 for_each_cpu(i, smt_mask) { 6070 rq_i = cpu_rq(i); 6071 6072 /* 6073 * An online sibling might have gone offline before a task 6074 * could be picked for it, or it might be offline but later 6075 * happen to come online, but its too late and nothing was 6076 * picked for it. That's Ok - it will pick tasks for itself, 6077 * so ignore it. 6078 */ 6079 if (!rq_i->core_pick) 6080 continue; 6081 6082 /* 6083 * Update for new !FI->FI transitions, or if continuing to be in !FI: 6084 * fi_before fi update? 6085 * 0 0 1 6086 * 0 1 1 6087 * 1 0 1 6088 * 1 1 0 6089 */ 6090 if (!(fi_before && rq->core->core_forceidle_count)) 6091 task_vruntime_update(rq_i, rq_i->core_pick, !!rq->core->core_forceidle_count); 6092 6093 rq_i->core_pick->core_occupation = occ; 6094 6095 if (i == cpu) { 6096 rq_i->core_pick = NULL; 6097 continue; 6098 } 6099 6100 /* Did we break L1TF mitigation requirements? */ 6101 WARN_ON_ONCE(!cookie_match(next, rq_i->core_pick)); 6102 6103 if (rq_i->curr == rq_i->core_pick) { 6104 rq_i->core_pick = NULL; 6105 continue; 6106 } 6107 6108 resched_curr(rq_i); 6109 } 6110 6111 out_set_next: 6112 set_next_task(rq, next); 6113 out: 6114 if (rq->core->core_forceidle_count && next == rq->idle) 6115 queue_core_balance(rq); 6116 6117 return next; 6118 } 6119 6120 static bool try_steal_cookie(int this, int that) 6121 { 6122 struct rq *dst = cpu_rq(this), *src = cpu_rq(that); 6123 struct task_struct *p; 6124 unsigned long cookie; 6125 bool success = false; 6126 6127 local_irq_disable(); 6128 double_rq_lock(dst, src); 6129 6130 cookie = dst->core->core_cookie; 6131 if (!cookie) 6132 goto unlock; 6133 6134 if (dst->curr != dst->idle) 6135 goto unlock; 6136 6137 p = sched_core_find(src, cookie); 6138 if (p == src->idle) 6139 goto unlock; 6140 6141 do { 6142 if (p == src->core_pick || p == src->curr) 6143 goto next; 6144 6145 if (!is_cpu_allowed(p, this)) 6146 goto next; 6147 6148 if (p->core_occupation > dst->idle->core_occupation) 6149 goto next; 6150 6151 deactivate_task(src, p, 0); 6152 set_task_cpu(p, this); 6153 activate_task(dst, p, 0); 6154 6155 resched_curr(dst); 6156 6157 success = true; 6158 break; 6159 6160 next: 6161 p = sched_core_next(p, cookie); 6162 } while (p); 6163 6164 unlock: 6165 double_rq_unlock(dst, src); 6166 local_irq_enable(); 6167 6168 return success; 6169 } 6170 6171 static bool steal_cookie_task(int cpu, struct sched_domain *sd) 6172 { 6173 int i; 6174 6175 for_each_cpu_wrap(i, sched_domain_span(sd), cpu) { 6176 if (i == cpu) 6177 continue; 6178 6179 if (need_resched()) 6180 break; 6181 6182 if (try_steal_cookie(cpu, i)) 6183 return true; 6184 } 6185 6186 return false; 6187 } 6188 6189 static void sched_core_balance(struct rq *rq) 6190 { 6191 struct sched_domain *sd; 6192 int cpu = cpu_of(rq); 6193 6194 preempt_disable(); 6195 rcu_read_lock(); 6196 raw_spin_rq_unlock_irq(rq); 6197 for_each_domain(cpu, sd) { 6198 if (need_resched()) 6199 break; 6200 6201 if (steal_cookie_task(cpu, sd)) 6202 break; 6203 } 6204 raw_spin_rq_lock_irq(rq); 6205 rcu_read_unlock(); 6206 preempt_enable(); 6207 } 6208 6209 static DEFINE_PER_CPU(struct balance_callback, core_balance_head); 6210 6211 static void queue_core_balance(struct rq *rq) 6212 { 6213 if (!sched_core_enabled(rq)) 6214 return; 6215 6216 if (!rq->core->core_cookie) 6217 return; 6218 6219 if (!rq->nr_running) /* not forced idle */ 6220 return; 6221 6222 queue_balance_callback(rq, &per_cpu(core_balance_head, rq->cpu), sched_core_balance); 6223 } 6224 6225 static void sched_core_cpu_starting(unsigned int cpu) 6226 { 6227 const struct cpumask *smt_mask = cpu_smt_mask(cpu); 6228 struct rq *rq = cpu_rq(cpu), *core_rq = NULL; 6229 unsigned long flags; 6230 int t; 6231 6232 sched_core_lock(cpu, &flags); 6233 6234 WARN_ON_ONCE(rq->core != rq); 6235 6236 /* if we're the first, we'll be our own leader */ 6237 if (cpumask_weight(smt_mask) == 1) 6238 goto unlock; 6239 6240 /* find the leader */ 6241 for_each_cpu(t, smt_mask) { 6242 if (t == cpu) 6243 continue; 6244 rq = cpu_rq(t); 6245 if (rq->core == rq) { 6246 core_rq = rq; 6247 break; 6248 } 6249 } 6250 6251 if (WARN_ON_ONCE(!core_rq)) /* whoopsie */ 6252 goto unlock; 6253 6254 /* install and validate core_rq */ 6255 for_each_cpu(t, smt_mask) { 6256 rq = cpu_rq(t); 6257 6258 if (t == cpu) 6259 rq->core = core_rq; 6260 6261 WARN_ON_ONCE(rq->core != core_rq); 6262 } 6263 6264 unlock: 6265 sched_core_unlock(cpu, &flags); 6266 } 6267 6268 static void sched_core_cpu_deactivate(unsigned int cpu) 6269 { 6270 const struct cpumask *smt_mask = cpu_smt_mask(cpu); 6271 struct rq *rq = cpu_rq(cpu), *core_rq = NULL; 6272 unsigned long flags; 6273 int t; 6274 6275 sched_core_lock(cpu, &flags); 6276 6277 /* if we're the last man standing, nothing to do */ 6278 if (cpumask_weight(smt_mask) == 1) { 6279 WARN_ON_ONCE(rq->core != rq); 6280 goto unlock; 6281 } 6282 6283 /* if we're not the leader, nothing to do */ 6284 if (rq->core != rq) 6285 goto unlock; 6286 6287 /* find a new leader */ 6288 for_each_cpu(t, smt_mask) { 6289 if (t == cpu) 6290 continue; 6291 core_rq = cpu_rq(t); 6292 break; 6293 } 6294 6295 if (WARN_ON_ONCE(!core_rq)) /* impossible */ 6296 goto unlock; 6297 6298 /* copy the shared state to the new leader */ 6299 core_rq->core_task_seq = rq->core_task_seq; 6300 core_rq->core_pick_seq = rq->core_pick_seq; 6301 core_rq->core_cookie = rq->core_cookie; 6302 core_rq->core_forceidle_count = rq->core_forceidle_count; 6303 core_rq->core_forceidle_seq = rq->core_forceidle_seq; 6304 core_rq->core_forceidle_occupation = rq->core_forceidle_occupation; 6305 6306 /* 6307 * Accounting edge for forced idle is handled in pick_next_task(). 6308 * Don't need another one here, since the hotplug thread shouldn't 6309 * have a cookie. 6310 */ 6311 core_rq->core_forceidle_start = 0; 6312 6313 /* install new leader */ 6314 for_each_cpu(t, smt_mask) { 6315 rq = cpu_rq(t); 6316 rq->core = core_rq; 6317 } 6318 6319 unlock: 6320 sched_core_unlock(cpu, &flags); 6321 } 6322 6323 static inline void sched_core_cpu_dying(unsigned int cpu) 6324 { 6325 struct rq *rq = cpu_rq(cpu); 6326 6327 if (rq->core != rq) 6328 rq->core = rq; 6329 } 6330 6331 #else /* !CONFIG_SCHED_CORE */ 6332 6333 static inline void sched_core_cpu_starting(unsigned int cpu) {} 6334 static inline void sched_core_cpu_deactivate(unsigned int cpu) {} 6335 static inline void sched_core_cpu_dying(unsigned int cpu) {} 6336 6337 static struct task_struct * 6338 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) 6339 { 6340 return __pick_next_task(rq, prev, rf); 6341 } 6342 6343 #endif /* CONFIG_SCHED_CORE */ 6344 6345 /* 6346 * Constants for the sched_mode argument of __schedule(). 6347 * 6348 * The mode argument allows RT enabled kernels to differentiate a 6349 * preemption from blocking on an 'sleeping' spin/rwlock. Note that 6350 * SM_MASK_PREEMPT for !RT has all bits set, which allows the compiler to 6351 * optimize the AND operation out and just check for zero. 6352 */ 6353 #define SM_NONE 0x0 6354 #define SM_PREEMPT 0x1 6355 #define SM_RTLOCK_WAIT 0x2 6356 6357 #ifndef CONFIG_PREEMPT_RT 6358 # define SM_MASK_PREEMPT (~0U) 6359 #else 6360 # define SM_MASK_PREEMPT SM_PREEMPT 6361 #endif 6362 6363 /* 6364 * __schedule() is the main scheduler function. 6365 * 6366 * The main means of driving the scheduler and thus entering this function are: 6367 * 6368 * 1. Explicit blocking: mutex, semaphore, waitqueue, etc. 6369 * 6370 * 2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return 6371 * paths. For example, see arch/x86/entry_64.S. 6372 * 6373 * To drive preemption between tasks, the scheduler sets the flag in timer 6374 * interrupt handler scheduler_tick(). 6375 * 6376 * 3. Wakeups don't really cause entry into schedule(). They add a 6377 * task to the run-queue and that's it. 6378 * 6379 * Now, if the new task added to the run-queue preempts the current 6380 * task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets 6381 * called on the nearest possible occasion: 6382 * 6383 * - If the kernel is preemptible (CONFIG_PREEMPTION=y): 6384 * 6385 * - in syscall or exception context, at the next outmost 6386 * preempt_enable(). (this might be as soon as the wake_up()'s 6387 * spin_unlock()!) 6388 * 6389 * - in IRQ context, return from interrupt-handler to 6390 * preemptible context 6391 * 6392 * - If the kernel is not preemptible (CONFIG_PREEMPTION is not set) 6393 * then at the next: 6394 * 6395 * - cond_resched() call 6396 * - explicit schedule() call 6397 * - return from syscall or exception to user-space 6398 * - return from interrupt-handler to user-space 6399 * 6400 * WARNING: must be called with preemption disabled! 6401 */ 6402 static void __sched notrace __schedule(unsigned int sched_mode) 6403 { 6404 struct task_struct *prev, *next; 6405 unsigned long *switch_count; 6406 unsigned long prev_state; 6407 struct rq_flags rf; 6408 struct rq *rq; 6409 int cpu; 6410 6411 cpu = smp_processor_id(); 6412 rq = cpu_rq(cpu); 6413 prev = rq->curr; 6414 6415 schedule_debug(prev, !!sched_mode); 6416 6417 if (sched_feat(HRTICK) || sched_feat(HRTICK_DL)) 6418 hrtick_clear(rq); 6419 6420 local_irq_disable(); 6421 rcu_note_context_switch(!!sched_mode); 6422 6423 /* 6424 * Make sure that signal_pending_state()->signal_pending() below 6425 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE) 6426 * done by the caller to avoid the race with signal_wake_up(): 6427 * 6428 * __set_current_state(@state) signal_wake_up() 6429 * schedule() set_tsk_thread_flag(p, TIF_SIGPENDING) 6430 * wake_up_state(p, state) 6431 * LOCK rq->lock LOCK p->pi_state 6432 * smp_mb__after_spinlock() smp_mb__after_spinlock() 6433 * if (signal_pending_state()) if (p->state & @state) 6434 * 6435 * Also, the membarrier system call requires a full memory barrier 6436 * after coming from user-space, before storing to rq->curr. 6437 */ 6438 rq_lock(rq, &rf); 6439 smp_mb__after_spinlock(); 6440 6441 /* Promote REQ to ACT */ 6442 rq->clock_update_flags <<= 1; 6443 update_rq_clock(rq); 6444 6445 switch_count = &prev->nivcsw; 6446 6447 /* 6448 * We must load prev->state once (task_struct::state is volatile), such 6449 * that we form a control dependency vs deactivate_task() below. 6450 */ 6451 prev_state = READ_ONCE(prev->__state); 6452 if (!(sched_mode & SM_MASK_PREEMPT) && prev_state) { 6453 if (signal_pending_state(prev_state, prev)) { 6454 WRITE_ONCE(prev->__state, TASK_RUNNING); 6455 } else { 6456 prev->sched_contributes_to_load = 6457 (prev_state & TASK_UNINTERRUPTIBLE) && 6458 !(prev_state & TASK_NOLOAD) && 6459 !(prev_state & TASK_FROZEN); 6460 6461 if (prev->sched_contributes_to_load) 6462 rq->nr_uninterruptible++; 6463 6464 /* 6465 * __schedule() ttwu() 6466 * prev_state = prev->state; if (p->on_rq && ...) 6467 * if (prev_state) goto out; 6468 * p->on_rq = 0; smp_acquire__after_ctrl_dep(); 6469 * p->state = TASK_WAKING 6470 * 6471 * Where __schedule() and ttwu() have matching control dependencies. 6472 * 6473 * After this, schedule() must not care about p->state any more. 6474 */ 6475 deactivate_task(rq, prev, DEQUEUE_SLEEP | DEQUEUE_NOCLOCK); 6476 6477 if (prev->in_iowait) { 6478 atomic_inc(&rq->nr_iowait); 6479 delayacct_blkio_start(); 6480 } 6481 } 6482 switch_count = &prev->nvcsw; 6483 } 6484 6485 next = pick_next_task(rq, prev, &rf); 6486 clear_tsk_need_resched(prev); 6487 clear_preempt_need_resched(); 6488 #ifdef CONFIG_SCHED_DEBUG 6489 rq->last_seen_need_resched_ns = 0; 6490 #endif 6491 6492 if (likely(prev != next)) { 6493 rq->nr_switches++; 6494 /* 6495 * RCU users of rcu_dereference(rq->curr) may not see 6496 * changes to task_struct made by pick_next_task(). 6497 */ 6498 RCU_INIT_POINTER(rq->curr, next); 6499 /* 6500 * The membarrier system call requires each architecture 6501 * to have a full memory barrier after updating 6502 * rq->curr, before returning to user-space. 6503 * 6504 * Here are the schemes providing that barrier on the 6505 * various architectures: 6506 * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC. 6507 * switch_mm() rely on membarrier_arch_switch_mm() on PowerPC. 6508 * - finish_lock_switch() for weakly-ordered 6509 * architectures where spin_unlock is a full barrier, 6510 * - switch_to() for arm64 (weakly-ordered, spin_unlock 6511 * is a RELEASE barrier), 6512 */ 6513 ++*switch_count; 6514 6515 migrate_disable_switch(rq, prev); 6516 psi_sched_switch(prev, next, !task_on_rq_queued(prev)); 6517 6518 trace_sched_switch(sched_mode & SM_MASK_PREEMPT, prev, next, prev_state); 6519 6520 /* Also unlocks the rq: */ 6521 rq = context_switch(rq, prev, next, &rf); 6522 } else { 6523 rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP); 6524 6525 rq_unpin_lock(rq, &rf); 6526 __balance_callbacks(rq); 6527 raw_spin_rq_unlock_irq(rq); 6528 } 6529 } 6530 6531 void __noreturn do_task_dead(void) 6532 { 6533 /* Causes final put_task_struct in finish_task_switch(): */ 6534 set_special_state(TASK_DEAD); 6535 6536 /* Tell freezer to ignore us: */ 6537 current->flags |= PF_NOFREEZE; 6538 6539 __schedule(SM_NONE); 6540 BUG(); 6541 6542 /* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */ 6543 for (;;) 6544 cpu_relax(); 6545 } 6546 6547 static inline void sched_submit_work(struct task_struct *tsk) 6548 { 6549 unsigned int task_flags; 6550 6551 if (task_is_running(tsk)) 6552 return; 6553 6554 task_flags = tsk->flags; 6555 /* 6556 * If a worker goes to sleep, notify and ask workqueue whether it 6557 * wants to wake up a task to maintain concurrency. 6558 */ 6559 if (task_flags & (PF_WQ_WORKER | PF_IO_WORKER)) { 6560 if (task_flags & PF_WQ_WORKER) 6561 wq_worker_sleeping(tsk); 6562 else 6563 io_wq_worker_sleeping(tsk); 6564 } 6565 6566 /* 6567 * spinlock and rwlock must not flush block requests. This will 6568 * deadlock if the callback attempts to acquire a lock which is 6569 * already acquired. 6570 */ 6571 SCHED_WARN_ON(current->__state & TASK_RTLOCK_WAIT); 6572 6573 /* 6574 * If we are going to sleep and we have plugged IO queued, 6575 * make sure to submit it to avoid deadlocks. 6576 */ 6577 blk_flush_plug(tsk->plug, true); 6578 } 6579 6580 static void sched_update_worker(struct task_struct *tsk) 6581 { 6582 if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER)) { 6583 if (tsk->flags & PF_WQ_WORKER) 6584 wq_worker_running(tsk); 6585 else 6586 io_wq_worker_running(tsk); 6587 } 6588 } 6589 6590 asmlinkage __visible void __sched schedule(void) 6591 { 6592 struct task_struct *tsk = current; 6593 6594 sched_submit_work(tsk); 6595 do { 6596 preempt_disable(); 6597 __schedule(SM_NONE); 6598 sched_preempt_enable_no_resched(); 6599 } while (need_resched()); 6600 sched_update_worker(tsk); 6601 } 6602 EXPORT_SYMBOL(schedule); 6603 6604 /* 6605 * synchronize_rcu_tasks() makes sure that no task is stuck in preempted 6606 * state (have scheduled out non-voluntarily) by making sure that all 6607 * tasks have either left the run queue or have gone into user space. 6608 * As idle tasks do not do either, they must not ever be preempted 6609 * (schedule out non-voluntarily). 6610 * 6611 * schedule_idle() is similar to schedule_preempt_disable() except that it 6612 * never enables preemption because it does not call sched_submit_work(). 6613 */ 6614 void __sched schedule_idle(void) 6615 { 6616 /* 6617 * As this skips calling sched_submit_work(), which the idle task does 6618 * regardless because that function is a nop when the task is in a 6619 * TASK_RUNNING state, make sure this isn't used someplace that the 6620 * current task can be in any other state. Note, idle is always in the 6621 * TASK_RUNNING state. 6622 */ 6623 WARN_ON_ONCE(current->__state); 6624 do { 6625 __schedule(SM_NONE); 6626 } while (need_resched()); 6627 } 6628 6629 #if defined(CONFIG_CONTEXT_TRACKING_USER) && !defined(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK) 6630 asmlinkage __visible void __sched schedule_user(void) 6631 { 6632 /* 6633 * If we come here after a random call to set_need_resched(), 6634 * or we have been woken up remotely but the IPI has not yet arrived, 6635 * we haven't yet exited the RCU idle mode. Do it here manually until 6636 * we find a better solution. 6637 * 6638 * NB: There are buggy callers of this function. Ideally we 6639 * should warn if prev_state != CONTEXT_USER, but that will trigger 6640 * too frequently to make sense yet. 6641 */ 6642 enum ctx_state prev_state = exception_enter(); 6643 schedule(); 6644 exception_exit(prev_state); 6645 } 6646 #endif 6647 6648 /** 6649 * schedule_preempt_disabled - called with preemption disabled 6650 * 6651 * Returns with preemption disabled. Note: preempt_count must be 1 6652 */ 6653 void __sched schedule_preempt_disabled(void) 6654 { 6655 sched_preempt_enable_no_resched(); 6656 schedule(); 6657 preempt_disable(); 6658 } 6659 6660 #ifdef CONFIG_PREEMPT_RT 6661 void __sched notrace schedule_rtlock(void) 6662 { 6663 do { 6664 preempt_disable(); 6665 __schedule(SM_RTLOCK_WAIT); 6666 sched_preempt_enable_no_resched(); 6667 } while (need_resched()); 6668 } 6669 NOKPROBE_SYMBOL(schedule_rtlock); 6670 #endif 6671 6672 static void __sched notrace preempt_schedule_common(void) 6673 { 6674 do { 6675 /* 6676 * Because the function tracer can trace preempt_count_sub() 6677 * and it also uses preempt_enable/disable_notrace(), if 6678 * NEED_RESCHED is set, the preempt_enable_notrace() called 6679 * by the function tracer will call this function again and 6680 * cause infinite recursion. 6681 * 6682 * Preemption must be disabled here before the function 6683 * tracer can trace. Break up preempt_disable() into two 6684 * calls. One to disable preemption without fear of being 6685 * traced. The other to still record the preemption latency, 6686 * which can also be traced by the function tracer. 6687 */ 6688 preempt_disable_notrace(); 6689 preempt_latency_start(1); 6690 __schedule(SM_PREEMPT); 6691 preempt_latency_stop(1); 6692 preempt_enable_no_resched_notrace(); 6693 6694 /* 6695 * Check again in case we missed a preemption opportunity 6696 * between schedule and now. 6697 */ 6698 } while (need_resched()); 6699 } 6700 6701 #ifdef CONFIG_PREEMPTION 6702 /* 6703 * This is the entry point to schedule() from in-kernel preemption 6704 * off of preempt_enable. 6705 */ 6706 asmlinkage __visible void __sched notrace preempt_schedule(void) 6707 { 6708 /* 6709 * If there is a non-zero preempt_count or interrupts are disabled, 6710 * we do not want to preempt the current task. Just return.. 6711 */ 6712 if (likely(!preemptible())) 6713 return; 6714 preempt_schedule_common(); 6715 } 6716 NOKPROBE_SYMBOL(preempt_schedule); 6717 EXPORT_SYMBOL(preempt_schedule); 6718 6719 #ifdef CONFIG_PREEMPT_DYNAMIC 6720 #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL) 6721 #ifndef preempt_schedule_dynamic_enabled 6722 #define preempt_schedule_dynamic_enabled preempt_schedule 6723 #define preempt_schedule_dynamic_disabled NULL 6724 #endif 6725 DEFINE_STATIC_CALL(preempt_schedule, preempt_schedule_dynamic_enabled); 6726 EXPORT_STATIC_CALL_TRAMP(preempt_schedule); 6727 #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY) 6728 static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule); 6729 void __sched notrace dynamic_preempt_schedule(void) 6730 { 6731 if (!static_branch_unlikely(&sk_dynamic_preempt_schedule)) 6732 return; 6733 preempt_schedule(); 6734 } 6735 NOKPROBE_SYMBOL(dynamic_preempt_schedule); 6736 EXPORT_SYMBOL(dynamic_preempt_schedule); 6737 #endif 6738 #endif 6739 6740 /** 6741 * preempt_schedule_notrace - preempt_schedule called by tracing 6742 * 6743 * The tracing infrastructure uses preempt_enable_notrace to prevent 6744 * recursion and tracing preempt enabling caused by the tracing 6745 * infrastructure itself. But as tracing can happen in areas coming 6746 * from userspace or just about to enter userspace, a preempt enable 6747 * can occur before user_exit() is called. This will cause the scheduler 6748 * to be called when the system is still in usermode. 6749 * 6750 * To prevent this, the preempt_enable_notrace will use this function 6751 * instead of preempt_schedule() to exit user context if needed before 6752 * calling the scheduler. 6753 */ 6754 asmlinkage __visible void __sched notrace preempt_schedule_notrace(void) 6755 { 6756 enum ctx_state prev_ctx; 6757 6758 if (likely(!preemptible())) 6759 return; 6760 6761 do { 6762 /* 6763 * Because the function tracer can trace preempt_count_sub() 6764 * and it also uses preempt_enable/disable_notrace(), if 6765 * NEED_RESCHED is set, the preempt_enable_notrace() called 6766 * by the function tracer will call this function again and 6767 * cause infinite recursion. 6768 * 6769 * Preemption must be disabled here before the function 6770 * tracer can trace. Break up preempt_disable() into two 6771 * calls. One to disable preemption without fear of being 6772 * traced. The other to still record the preemption latency, 6773 * which can also be traced by the function tracer. 6774 */ 6775 preempt_disable_notrace(); 6776 preempt_latency_start(1); 6777 /* 6778 * Needs preempt disabled in case user_exit() is traced 6779 * and the tracer calls preempt_enable_notrace() causing 6780 * an infinite recursion. 6781 */ 6782 prev_ctx = exception_enter(); 6783 __schedule(SM_PREEMPT); 6784 exception_exit(prev_ctx); 6785 6786 preempt_latency_stop(1); 6787 preempt_enable_no_resched_notrace(); 6788 } while (need_resched()); 6789 } 6790 EXPORT_SYMBOL_GPL(preempt_schedule_notrace); 6791 6792 #ifdef CONFIG_PREEMPT_DYNAMIC 6793 #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL) 6794 #ifndef preempt_schedule_notrace_dynamic_enabled 6795 #define preempt_schedule_notrace_dynamic_enabled preempt_schedule_notrace 6796 #define preempt_schedule_notrace_dynamic_disabled NULL 6797 #endif 6798 DEFINE_STATIC_CALL(preempt_schedule_notrace, preempt_schedule_notrace_dynamic_enabled); 6799 EXPORT_STATIC_CALL_TRAMP(preempt_schedule_notrace); 6800 #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY) 6801 static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule_notrace); 6802 void __sched notrace dynamic_preempt_schedule_notrace(void) 6803 { 6804 if (!static_branch_unlikely(&sk_dynamic_preempt_schedule_notrace)) 6805 return; 6806 preempt_schedule_notrace(); 6807 } 6808 NOKPROBE_SYMBOL(dynamic_preempt_schedule_notrace); 6809 EXPORT_SYMBOL(dynamic_preempt_schedule_notrace); 6810 #endif 6811 #endif 6812 6813 #endif /* CONFIG_PREEMPTION */ 6814 6815 /* 6816 * This is the entry point to schedule() from kernel preemption 6817 * off of irq context. 6818 * Note, that this is called and return with irqs disabled. This will 6819 * protect us against recursive calling from irq. 6820 */ 6821 asmlinkage __visible void __sched preempt_schedule_irq(void) 6822 { 6823 enum ctx_state prev_state; 6824 6825 /* Catch callers which need to be fixed */ 6826 BUG_ON(preempt_count() || !irqs_disabled()); 6827 6828 prev_state = exception_enter(); 6829 6830 do { 6831 preempt_disable(); 6832 local_irq_enable(); 6833 __schedule(SM_PREEMPT); 6834 local_irq_disable(); 6835 sched_preempt_enable_no_resched(); 6836 } while (need_resched()); 6837 6838 exception_exit(prev_state); 6839 } 6840 6841 int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags, 6842 void *key) 6843 { 6844 WARN_ON_ONCE(IS_ENABLED(CONFIG_SCHED_DEBUG) && wake_flags & ~WF_SYNC); 6845 return try_to_wake_up(curr->private, mode, wake_flags); 6846 } 6847 EXPORT_SYMBOL(default_wake_function); 6848 6849 static void __setscheduler_prio(struct task_struct *p, int prio) 6850 { 6851 if (dl_prio(prio)) 6852 p->sched_class = &dl_sched_class; 6853 else if (rt_prio(prio)) 6854 p->sched_class = &rt_sched_class; 6855 else 6856 p->sched_class = &fair_sched_class; 6857 6858 p->prio = prio; 6859 } 6860 6861 #ifdef CONFIG_RT_MUTEXES 6862 6863 static inline int __rt_effective_prio(struct task_struct *pi_task, int prio) 6864 { 6865 if (pi_task) 6866 prio = min(prio, pi_task->prio); 6867 6868 return prio; 6869 } 6870 6871 static inline int rt_effective_prio(struct task_struct *p, int prio) 6872 { 6873 struct task_struct *pi_task = rt_mutex_get_top_task(p); 6874 6875 return __rt_effective_prio(pi_task, prio); 6876 } 6877 6878 /* 6879 * rt_mutex_setprio - set the current priority of a task 6880 * @p: task to boost 6881 * @pi_task: donor task 6882 * 6883 * This function changes the 'effective' priority of a task. It does 6884 * not touch ->normal_prio like __setscheduler(). 6885 * 6886 * Used by the rt_mutex code to implement priority inheritance 6887 * logic. Call site only calls if the priority of the task changed. 6888 */ 6889 void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task) 6890 { 6891 int prio, oldprio, queued, running, queue_flag = 6892 DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; 6893 const struct sched_class *prev_class; 6894 struct rq_flags rf; 6895 struct rq *rq; 6896 6897 /* XXX used to be waiter->prio, not waiter->task->prio */ 6898 prio = __rt_effective_prio(pi_task, p->normal_prio); 6899 6900 /* 6901 * If nothing changed; bail early. 6902 */ 6903 if (p->pi_top_task == pi_task && prio == p->prio && !dl_prio(prio)) 6904 return; 6905 6906 rq = __task_rq_lock(p, &rf); 6907 update_rq_clock(rq); 6908 /* 6909 * Set under pi_lock && rq->lock, such that the value can be used under 6910 * either lock. 6911 * 6912 * Note that there is loads of tricky to make this pointer cache work 6913 * right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to 6914 * ensure a task is de-boosted (pi_task is set to NULL) before the 6915 * task is allowed to run again (and can exit). This ensures the pointer 6916 * points to a blocked task -- which guarantees the task is present. 6917 */ 6918 p->pi_top_task = pi_task; 6919 6920 /* 6921 * For FIFO/RR we only need to set prio, if that matches we're done. 6922 */ 6923 if (prio == p->prio && !dl_prio(prio)) 6924 goto out_unlock; 6925 6926 /* 6927 * Idle task boosting is a nono in general. There is one 6928 * exception, when PREEMPT_RT and NOHZ is active: 6929 * 6930 * The idle task calls get_next_timer_interrupt() and holds 6931 * the timer wheel base->lock on the CPU and another CPU wants 6932 * to access the timer (probably to cancel it). We can safely 6933 * ignore the boosting request, as the idle CPU runs this code 6934 * with interrupts disabled and will complete the lock 6935 * protected section without being interrupted. So there is no 6936 * real need to boost. 6937 */ 6938 if (unlikely(p == rq->idle)) { 6939 WARN_ON(p != rq->curr); 6940 WARN_ON(p->pi_blocked_on); 6941 goto out_unlock; 6942 } 6943 6944 trace_sched_pi_setprio(p, pi_task); 6945 oldprio = p->prio; 6946 6947 if (oldprio == prio) 6948 queue_flag &= ~DEQUEUE_MOVE; 6949 6950 prev_class = p->sched_class; 6951 queued = task_on_rq_queued(p); 6952 running = task_current(rq, p); 6953 if (queued) 6954 dequeue_task(rq, p, queue_flag); 6955 if (running) 6956 put_prev_task(rq, p); 6957 6958 /* 6959 * Boosting condition are: 6960 * 1. -rt task is running and holds mutex A 6961 * --> -dl task blocks on mutex A 6962 * 6963 * 2. -dl task is running and holds mutex A 6964 * --> -dl task blocks on mutex A and could preempt the 6965 * running task 6966 */ 6967 if (dl_prio(prio)) { 6968 if (!dl_prio(p->normal_prio) || 6969 (pi_task && dl_prio(pi_task->prio) && 6970 dl_entity_preempt(&pi_task->dl, &p->dl))) { 6971 p->dl.pi_se = pi_task->dl.pi_se; 6972 queue_flag |= ENQUEUE_REPLENISH; 6973 } else { 6974 p->dl.pi_se = &p->dl; 6975 } 6976 } else if (rt_prio(prio)) { 6977 if (dl_prio(oldprio)) 6978 p->dl.pi_se = &p->dl; 6979 if (oldprio < prio) 6980 queue_flag |= ENQUEUE_HEAD; 6981 } else { 6982 if (dl_prio(oldprio)) 6983 p->dl.pi_se = &p->dl; 6984 if (rt_prio(oldprio)) 6985 p->rt.timeout = 0; 6986 } 6987 6988 __setscheduler_prio(p, prio); 6989 6990 if (queued) 6991 enqueue_task(rq, p, queue_flag); 6992 if (running) 6993 set_next_task(rq, p); 6994 6995 check_class_changed(rq, p, prev_class, oldprio); 6996 out_unlock: 6997 /* Avoid rq from going away on us: */ 6998 preempt_disable(); 6999 7000 rq_unpin_lock(rq, &rf); 7001 __balance_callbacks(rq); 7002 raw_spin_rq_unlock(rq); 7003 7004 preempt_enable(); 7005 } 7006 #else 7007 static inline int rt_effective_prio(struct task_struct *p, int prio) 7008 { 7009 return prio; 7010 } 7011 #endif 7012 7013 void set_user_nice(struct task_struct *p, long nice) 7014 { 7015 bool queued, running; 7016 int old_prio; 7017 struct rq_flags rf; 7018 struct rq *rq; 7019 7020 if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE) 7021 return; 7022 /* 7023 * We have to be careful, if called from sys_setpriority(), 7024 * the task might be in the middle of scheduling on another CPU. 7025 */ 7026 rq = task_rq_lock(p, &rf); 7027 update_rq_clock(rq); 7028 7029 /* 7030 * The RT priorities are set via sched_setscheduler(), but we still 7031 * allow the 'normal' nice value to be set - but as expected 7032 * it won't have any effect on scheduling until the task is 7033 * SCHED_DEADLINE, SCHED_FIFO or SCHED_RR: 7034 */ 7035 if (task_has_dl_policy(p) || task_has_rt_policy(p)) { 7036 p->static_prio = NICE_TO_PRIO(nice); 7037 goto out_unlock; 7038 } 7039 queued = task_on_rq_queued(p); 7040 running = task_current(rq, p); 7041 if (queued) 7042 dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK); 7043 if (running) 7044 put_prev_task(rq, p); 7045 7046 p->static_prio = NICE_TO_PRIO(nice); 7047 set_load_weight(p, true); 7048 old_prio = p->prio; 7049 p->prio = effective_prio(p); 7050 7051 if (queued) 7052 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); 7053 if (running) 7054 set_next_task(rq, p); 7055 7056 /* 7057 * If the task increased its priority or is running and 7058 * lowered its priority, then reschedule its CPU: 7059 */ 7060 p->sched_class->prio_changed(rq, p, old_prio); 7061 7062 out_unlock: 7063 task_rq_unlock(rq, p, &rf); 7064 } 7065 EXPORT_SYMBOL(set_user_nice); 7066 7067 /* 7068 * is_nice_reduction - check if nice value is an actual reduction 7069 * 7070 * Similar to can_nice() but does not perform a capability check. 7071 * 7072 * @p: task 7073 * @nice: nice value 7074 */ 7075 static bool is_nice_reduction(const struct task_struct *p, const int nice) 7076 { 7077 /* Convert nice value [19,-20] to rlimit style value [1,40]: */ 7078 int nice_rlim = nice_to_rlimit(nice); 7079 7080 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE)); 7081 } 7082 7083 /* 7084 * can_nice - check if a task can reduce its nice value 7085 * @p: task 7086 * @nice: nice value 7087 */ 7088 int can_nice(const struct task_struct *p, const int nice) 7089 { 7090 return is_nice_reduction(p, nice) || capable(CAP_SYS_NICE); 7091 } 7092 7093 #ifdef __ARCH_WANT_SYS_NICE 7094 7095 /* 7096 * sys_nice - change the priority of the current process. 7097 * @increment: priority increment 7098 * 7099 * sys_setpriority is a more generic, but much slower function that 7100 * does similar things. 7101 */ 7102 SYSCALL_DEFINE1(nice, int, increment) 7103 { 7104 long nice, retval; 7105 7106 /* 7107 * Setpriority might change our priority at the same moment. 7108 * We don't have to worry. Conceptually one call occurs first 7109 * and we have a single winner. 7110 */ 7111 increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH); 7112 nice = task_nice(current) + increment; 7113 7114 nice = clamp_val(nice, MIN_NICE, MAX_NICE); 7115 if (increment < 0 && !can_nice(current, nice)) 7116 return -EPERM; 7117 7118 retval = security_task_setnice(current, nice); 7119 if (retval) 7120 return retval; 7121 7122 set_user_nice(current, nice); 7123 return 0; 7124 } 7125 7126 #endif 7127 7128 /** 7129 * task_prio - return the priority value of a given task. 7130 * @p: the task in question. 7131 * 7132 * Return: The priority value as seen by users in /proc. 7133 * 7134 * sched policy return value kernel prio user prio/nice 7135 * 7136 * normal, batch, idle [0 ... 39] [100 ... 139] 0/[-20 ... 19] 7137 * fifo, rr [-2 ... -100] [98 ... 0] [1 ... 99] 7138 * deadline -101 -1 0 7139 */ 7140 int task_prio(const struct task_struct *p) 7141 { 7142 return p->prio - MAX_RT_PRIO; 7143 } 7144 7145 /** 7146 * idle_cpu - is a given CPU idle currently? 7147 * @cpu: the processor in question. 7148 * 7149 * Return: 1 if the CPU is currently idle. 0 otherwise. 7150 */ 7151 int idle_cpu(int cpu) 7152 { 7153 struct rq *rq = cpu_rq(cpu); 7154 7155 if (rq->curr != rq->idle) 7156 return 0; 7157 7158 if (rq->nr_running) 7159 return 0; 7160 7161 #ifdef CONFIG_SMP 7162 if (rq->ttwu_pending) 7163 return 0; 7164 #endif 7165 7166 return 1; 7167 } 7168 7169 /** 7170 * available_idle_cpu - is a given CPU idle for enqueuing work. 7171 * @cpu: the CPU in question. 7172 * 7173 * Return: 1 if the CPU is currently idle. 0 otherwise. 7174 */ 7175 int available_idle_cpu(int cpu) 7176 { 7177 if (!idle_cpu(cpu)) 7178 return 0; 7179 7180 if (vcpu_is_preempted(cpu)) 7181 return 0; 7182 7183 return 1; 7184 } 7185 7186 /** 7187 * idle_task - return the idle task for a given CPU. 7188 * @cpu: the processor in question. 7189 * 7190 * Return: The idle task for the CPU @cpu. 7191 */ 7192 struct task_struct *idle_task(int cpu) 7193 { 7194 return cpu_rq(cpu)->idle; 7195 } 7196 7197 #ifdef CONFIG_SMP 7198 /* 7199 * This function computes an effective utilization for the given CPU, to be 7200 * used for frequency selection given the linear relation: f = u * f_max. 7201 * 7202 * The scheduler tracks the following metrics: 7203 * 7204 * cpu_util_{cfs,rt,dl,irq}() 7205 * cpu_bw_dl() 7206 * 7207 * Where the cfs,rt and dl util numbers are tracked with the same metric and 7208 * synchronized windows and are thus directly comparable. 7209 * 7210 * The cfs,rt,dl utilization are the running times measured with rq->clock_task 7211 * which excludes things like IRQ and steal-time. These latter are then accrued 7212 * in the irq utilization. 7213 * 7214 * The DL bandwidth number otoh is not a measured metric but a value computed 7215 * based on the task model parameters and gives the minimal utilization 7216 * required to meet deadlines. 7217 */ 7218 unsigned long effective_cpu_util(int cpu, unsigned long util_cfs, 7219 enum cpu_util_type type, 7220 struct task_struct *p) 7221 { 7222 unsigned long dl_util, util, irq, max; 7223 struct rq *rq = cpu_rq(cpu); 7224 7225 max = arch_scale_cpu_capacity(cpu); 7226 7227 if (!uclamp_is_used() && 7228 type == FREQUENCY_UTIL && rt_rq_is_runnable(&rq->rt)) { 7229 return max; 7230 } 7231 7232 /* 7233 * Early check to see if IRQ/steal time saturates the CPU, can be 7234 * because of inaccuracies in how we track these -- see 7235 * update_irq_load_avg(). 7236 */ 7237 irq = cpu_util_irq(rq); 7238 if (unlikely(irq >= max)) 7239 return max; 7240 7241 /* 7242 * Because the time spend on RT/DL tasks is visible as 'lost' time to 7243 * CFS tasks and we use the same metric to track the effective 7244 * utilization (PELT windows are synchronized) we can directly add them 7245 * to obtain the CPU's actual utilization. 7246 * 7247 * CFS and RT utilization can be boosted or capped, depending on 7248 * utilization clamp constraints requested by currently RUNNABLE 7249 * tasks. 7250 * When there are no CFS RUNNABLE tasks, clamps are released and 7251 * frequency will be gracefully reduced with the utilization decay. 7252 */ 7253 util = util_cfs + cpu_util_rt(rq); 7254 if (type == FREQUENCY_UTIL) 7255 util = uclamp_rq_util_with(rq, util, p); 7256 7257 dl_util = cpu_util_dl(rq); 7258 7259 /* 7260 * For frequency selection we do not make cpu_util_dl() a permanent part 7261 * of this sum because we want to use cpu_bw_dl() later on, but we need 7262 * to check if the CFS+RT+DL sum is saturated (ie. no idle time) such 7263 * that we select f_max when there is no idle time. 7264 * 7265 * NOTE: numerical errors or stop class might cause us to not quite hit 7266 * saturation when we should -- something for later. 7267 */ 7268 if (util + dl_util >= max) 7269 return max; 7270 7271 /* 7272 * OTOH, for energy computation we need the estimated running time, so 7273 * include util_dl and ignore dl_bw. 7274 */ 7275 if (type == ENERGY_UTIL) 7276 util += dl_util; 7277 7278 /* 7279 * There is still idle time; further improve the number by using the 7280 * irq metric. Because IRQ/steal time is hidden from the task clock we 7281 * need to scale the task numbers: 7282 * 7283 * max - irq 7284 * U' = irq + --------- * U 7285 * max 7286 */ 7287 util = scale_irq_capacity(util, irq, max); 7288 util += irq; 7289 7290 /* 7291 * Bandwidth required by DEADLINE must always be granted while, for 7292 * FAIR and RT, we use blocked utilization of IDLE CPUs as a mechanism 7293 * to gracefully reduce the frequency when no tasks show up for longer 7294 * periods of time. 7295 * 7296 * Ideally we would like to set bw_dl as min/guaranteed freq and util + 7297 * bw_dl as requested freq. However, cpufreq is not yet ready for such 7298 * an interface. So, we only do the latter for now. 7299 */ 7300 if (type == FREQUENCY_UTIL) 7301 util += cpu_bw_dl(rq); 7302 7303 return min(max, util); 7304 } 7305 7306 unsigned long sched_cpu_util(int cpu) 7307 { 7308 return effective_cpu_util(cpu, cpu_util_cfs(cpu), ENERGY_UTIL, NULL); 7309 } 7310 #endif /* CONFIG_SMP */ 7311 7312 /** 7313 * find_process_by_pid - find a process with a matching PID value. 7314 * @pid: the pid in question. 7315 * 7316 * The task of @pid, if found. %NULL otherwise. 7317 */ 7318 static struct task_struct *find_process_by_pid(pid_t pid) 7319 { 7320 return pid ? find_task_by_vpid(pid) : current; 7321 } 7322 7323 /* 7324 * sched_setparam() passes in -1 for its policy, to let the functions 7325 * it calls know not to change it. 7326 */ 7327 #define SETPARAM_POLICY -1 7328 7329 static void __setscheduler_params(struct task_struct *p, 7330 const struct sched_attr *attr) 7331 { 7332 int policy = attr->sched_policy; 7333 7334 if (policy == SETPARAM_POLICY) 7335 policy = p->policy; 7336 7337 p->policy = policy; 7338 7339 if (dl_policy(policy)) 7340 __setparam_dl(p, attr); 7341 else if (fair_policy(policy)) 7342 p->static_prio = NICE_TO_PRIO(attr->sched_nice); 7343 7344 /* 7345 * __sched_setscheduler() ensures attr->sched_priority == 0 when 7346 * !rt_policy. Always setting this ensures that things like 7347 * getparam()/getattr() don't report silly values for !rt tasks. 7348 */ 7349 p->rt_priority = attr->sched_priority; 7350 p->normal_prio = normal_prio(p); 7351 set_load_weight(p, true); 7352 } 7353 7354 /* 7355 * Check the target process has a UID that matches the current process's: 7356 */ 7357 static bool check_same_owner(struct task_struct *p) 7358 { 7359 const struct cred *cred = current_cred(), *pcred; 7360 bool match; 7361 7362 rcu_read_lock(); 7363 pcred = __task_cred(p); 7364 match = (uid_eq(cred->euid, pcred->euid) || 7365 uid_eq(cred->euid, pcred->uid)); 7366 rcu_read_unlock(); 7367 return match; 7368 } 7369 7370 /* 7371 * Allow unprivileged RT tasks to decrease priority. 7372 * Only issue a capable test if needed and only once to avoid an audit 7373 * event on permitted non-privileged operations: 7374 */ 7375 static int user_check_sched_setscheduler(struct task_struct *p, 7376 const struct sched_attr *attr, 7377 int policy, int reset_on_fork) 7378 { 7379 if (fair_policy(policy)) { 7380 if (attr->sched_nice < task_nice(p) && 7381 !is_nice_reduction(p, attr->sched_nice)) 7382 goto req_priv; 7383 } 7384 7385 if (rt_policy(policy)) { 7386 unsigned long rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO); 7387 7388 /* Can't set/change the rt policy: */ 7389 if (policy != p->policy && !rlim_rtprio) 7390 goto req_priv; 7391 7392 /* Can't increase priority: */ 7393 if (attr->sched_priority > p->rt_priority && 7394 attr->sched_priority > rlim_rtprio) 7395 goto req_priv; 7396 } 7397 7398 /* 7399 * Can't set/change SCHED_DEADLINE policy at all for now 7400 * (safest behavior); in the future we would like to allow 7401 * unprivileged DL tasks to increase their relative deadline 7402 * or reduce their runtime (both ways reducing utilization) 7403 */ 7404 if (dl_policy(policy)) 7405 goto req_priv; 7406 7407 /* 7408 * Treat SCHED_IDLE as nice 20. Only allow a switch to 7409 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it. 7410 */ 7411 if (task_has_idle_policy(p) && !idle_policy(policy)) { 7412 if (!is_nice_reduction(p, task_nice(p))) 7413 goto req_priv; 7414 } 7415 7416 /* Can't change other user's priorities: */ 7417 if (!check_same_owner(p)) 7418 goto req_priv; 7419 7420 /* Normal users shall not reset the sched_reset_on_fork flag: */ 7421 if (p->sched_reset_on_fork && !reset_on_fork) 7422 goto req_priv; 7423 7424 return 0; 7425 7426 req_priv: 7427 if (!capable(CAP_SYS_NICE)) 7428 return -EPERM; 7429 7430 return 0; 7431 } 7432 7433 static int __sched_setscheduler(struct task_struct *p, 7434 const struct sched_attr *attr, 7435 bool user, bool pi) 7436 { 7437 int oldpolicy = -1, policy = attr->sched_policy; 7438 int retval, oldprio, newprio, queued, running; 7439 const struct sched_class *prev_class; 7440 struct balance_callback *head; 7441 struct rq_flags rf; 7442 int reset_on_fork; 7443 int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; 7444 struct rq *rq; 7445 7446 /* The pi code expects interrupts enabled */ 7447 BUG_ON(pi && in_interrupt()); 7448 recheck: 7449 /* Double check policy once rq lock held: */ 7450 if (policy < 0) { 7451 reset_on_fork = p->sched_reset_on_fork; 7452 policy = oldpolicy = p->policy; 7453 } else { 7454 reset_on_fork = !!(attr->sched_flags & SCHED_FLAG_RESET_ON_FORK); 7455 7456 if (!valid_policy(policy)) 7457 return -EINVAL; 7458 } 7459 7460 if (attr->sched_flags & ~(SCHED_FLAG_ALL | SCHED_FLAG_SUGOV)) 7461 return -EINVAL; 7462 7463 /* 7464 * Valid priorities for SCHED_FIFO and SCHED_RR are 7465 * 1..MAX_RT_PRIO-1, valid priority for SCHED_NORMAL, 7466 * SCHED_BATCH and SCHED_IDLE is 0. 7467 */ 7468 if (attr->sched_priority > MAX_RT_PRIO-1) 7469 return -EINVAL; 7470 if ((dl_policy(policy) && !__checkparam_dl(attr)) || 7471 (rt_policy(policy) != (attr->sched_priority != 0))) 7472 return -EINVAL; 7473 7474 if (user) { 7475 retval = user_check_sched_setscheduler(p, attr, policy, reset_on_fork); 7476 if (retval) 7477 return retval; 7478 7479 if (attr->sched_flags & SCHED_FLAG_SUGOV) 7480 return -EINVAL; 7481 7482 retval = security_task_setscheduler(p); 7483 if (retval) 7484 return retval; 7485 } 7486 7487 /* Update task specific "requested" clamps */ 7488 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) { 7489 retval = uclamp_validate(p, attr); 7490 if (retval) 7491 return retval; 7492 } 7493 7494 if (pi) 7495 cpuset_read_lock(); 7496 7497 /* 7498 * Make sure no PI-waiters arrive (or leave) while we are 7499 * changing the priority of the task: 7500 * 7501 * To be able to change p->policy safely, the appropriate 7502 * runqueue lock must be held. 7503 */ 7504 rq = task_rq_lock(p, &rf); 7505 update_rq_clock(rq); 7506 7507 /* 7508 * Changing the policy of the stop threads its a very bad idea: 7509 */ 7510 if (p == rq->stop) { 7511 retval = -EINVAL; 7512 goto unlock; 7513 } 7514 7515 /* 7516 * If not changing anything there's no need to proceed further, 7517 * but store a possible modification of reset_on_fork. 7518 */ 7519 if (unlikely(policy == p->policy)) { 7520 if (fair_policy(policy) && attr->sched_nice != task_nice(p)) 7521 goto change; 7522 if (rt_policy(policy) && attr->sched_priority != p->rt_priority) 7523 goto change; 7524 if (dl_policy(policy) && dl_param_changed(p, attr)) 7525 goto change; 7526 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) 7527 goto change; 7528 7529 p->sched_reset_on_fork = reset_on_fork; 7530 retval = 0; 7531 goto unlock; 7532 } 7533 change: 7534 7535 if (user) { 7536 #ifdef CONFIG_RT_GROUP_SCHED 7537 /* 7538 * Do not allow realtime tasks into groups that have no runtime 7539 * assigned. 7540 */ 7541 if (rt_bandwidth_enabled() && rt_policy(policy) && 7542 task_group(p)->rt_bandwidth.rt_runtime == 0 && 7543 !task_group_is_autogroup(task_group(p))) { 7544 retval = -EPERM; 7545 goto unlock; 7546 } 7547 #endif 7548 #ifdef CONFIG_SMP 7549 if (dl_bandwidth_enabled() && dl_policy(policy) && 7550 !(attr->sched_flags & SCHED_FLAG_SUGOV)) { 7551 cpumask_t *span = rq->rd->span; 7552 7553 /* 7554 * Don't allow tasks with an affinity mask smaller than 7555 * the entire root_domain to become SCHED_DEADLINE. We 7556 * will also fail if there's no bandwidth available. 7557 */ 7558 if (!cpumask_subset(span, p->cpus_ptr) || 7559 rq->rd->dl_bw.bw == 0) { 7560 retval = -EPERM; 7561 goto unlock; 7562 } 7563 } 7564 #endif 7565 } 7566 7567 /* Re-check policy now with rq lock held: */ 7568 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { 7569 policy = oldpolicy = -1; 7570 task_rq_unlock(rq, p, &rf); 7571 if (pi) 7572 cpuset_read_unlock(); 7573 goto recheck; 7574 } 7575 7576 /* 7577 * If setscheduling to SCHED_DEADLINE (or changing the parameters 7578 * of a SCHED_DEADLINE task) we need to check if enough bandwidth 7579 * is available. 7580 */ 7581 if ((dl_policy(policy) || dl_task(p)) && sched_dl_overflow(p, policy, attr)) { 7582 retval = -EBUSY; 7583 goto unlock; 7584 } 7585 7586 p->sched_reset_on_fork = reset_on_fork; 7587 oldprio = p->prio; 7588 7589 newprio = __normal_prio(policy, attr->sched_priority, attr->sched_nice); 7590 if (pi) { 7591 /* 7592 * Take priority boosted tasks into account. If the new 7593 * effective priority is unchanged, we just store the new 7594 * normal parameters and do not touch the scheduler class and 7595 * the runqueue. This will be done when the task deboost 7596 * itself. 7597 */ 7598 newprio = rt_effective_prio(p, newprio); 7599 if (newprio == oldprio) 7600 queue_flags &= ~DEQUEUE_MOVE; 7601 } 7602 7603 queued = task_on_rq_queued(p); 7604 running = task_current(rq, p); 7605 if (queued) 7606 dequeue_task(rq, p, queue_flags); 7607 if (running) 7608 put_prev_task(rq, p); 7609 7610 prev_class = p->sched_class; 7611 7612 if (!(attr->sched_flags & SCHED_FLAG_KEEP_PARAMS)) { 7613 __setscheduler_params(p, attr); 7614 __setscheduler_prio(p, newprio); 7615 } 7616 __setscheduler_uclamp(p, attr); 7617 7618 if (queued) { 7619 /* 7620 * We enqueue to tail when the priority of a task is 7621 * increased (user space view). 7622 */ 7623 if (oldprio < p->prio) 7624 queue_flags |= ENQUEUE_HEAD; 7625 7626 enqueue_task(rq, p, queue_flags); 7627 } 7628 if (running) 7629 set_next_task(rq, p); 7630 7631 check_class_changed(rq, p, prev_class, oldprio); 7632 7633 /* Avoid rq from going away on us: */ 7634 preempt_disable(); 7635 head = splice_balance_callbacks(rq); 7636 task_rq_unlock(rq, p, &rf); 7637 7638 if (pi) { 7639 cpuset_read_unlock(); 7640 rt_mutex_adjust_pi(p); 7641 } 7642 7643 /* Run balance callbacks after we've adjusted the PI chain: */ 7644 balance_callbacks(rq, head); 7645 preempt_enable(); 7646 7647 return 0; 7648 7649 unlock: 7650 task_rq_unlock(rq, p, &rf); 7651 if (pi) 7652 cpuset_read_unlock(); 7653 return retval; 7654 } 7655 7656 static int _sched_setscheduler(struct task_struct *p, int policy, 7657 const struct sched_param *param, bool check) 7658 { 7659 struct sched_attr attr = { 7660 .sched_policy = policy, 7661 .sched_priority = param->sched_priority, 7662 .sched_nice = PRIO_TO_NICE(p->static_prio), 7663 }; 7664 7665 /* Fixup the legacy SCHED_RESET_ON_FORK hack. */ 7666 if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) { 7667 attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; 7668 policy &= ~SCHED_RESET_ON_FORK; 7669 attr.sched_policy = policy; 7670 } 7671 7672 return __sched_setscheduler(p, &attr, check, true); 7673 } 7674 /** 7675 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread. 7676 * @p: the task in question. 7677 * @policy: new policy. 7678 * @param: structure containing the new RT priority. 7679 * 7680 * Use sched_set_fifo(), read its comment. 7681 * 7682 * Return: 0 on success. An error code otherwise. 7683 * 7684 * NOTE that the task may be already dead. 7685 */ 7686 int sched_setscheduler(struct task_struct *p, int policy, 7687 const struct sched_param *param) 7688 { 7689 return _sched_setscheduler(p, policy, param, true); 7690 } 7691 7692 int sched_setattr(struct task_struct *p, const struct sched_attr *attr) 7693 { 7694 return __sched_setscheduler(p, attr, true, true); 7695 } 7696 7697 int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr) 7698 { 7699 return __sched_setscheduler(p, attr, false, true); 7700 } 7701 EXPORT_SYMBOL_GPL(sched_setattr_nocheck); 7702 7703 /** 7704 * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace. 7705 * @p: the task in question. 7706 * @policy: new policy. 7707 * @param: structure containing the new RT priority. 7708 * 7709 * Just like sched_setscheduler, only don't bother checking if the 7710 * current context has permission. For example, this is needed in 7711 * stop_machine(): we create temporary high priority worker threads, 7712 * but our caller might not have that capability. 7713 * 7714 * Return: 0 on success. An error code otherwise. 7715 */ 7716 int sched_setscheduler_nocheck(struct task_struct *p, int policy, 7717 const struct sched_param *param) 7718 { 7719 return _sched_setscheduler(p, policy, param, false); 7720 } 7721 7722 /* 7723 * SCHED_FIFO is a broken scheduler model; that is, it is fundamentally 7724 * incapable of resource management, which is the one thing an OS really should 7725 * be doing. 7726 * 7727 * This is of course the reason it is limited to privileged users only. 7728 * 7729 * Worse still; it is fundamentally impossible to compose static priority 7730 * workloads. You cannot take two correctly working static prio workloads 7731 * and smash them together and still expect them to work. 7732 * 7733 * For this reason 'all' FIFO tasks the kernel creates are basically at: 7734 * 7735 * MAX_RT_PRIO / 2 7736 * 7737 * The administrator _MUST_ configure the system, the kernel simply doesn't 7738 * know enough information to make a sensible choice. 7739 */ 7740 void sched_set_fifo(struct task_struct *p) 7741 { 7742 struct sched_param sp = { .sched_priority = MAX_RT_PRIO / 2 }; 7743 WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0); 7744 } 7745 EXPORT_SYMBOL_GPL(sched_set_fifo); 7746 7747 /* 7748 * For when you don't much care about FIFO, but want to be above SCHED_NORMAL. 7749 */ 7750 void sched_set_fifo_low(struct task_struct *p) 7751 { 7752 struct sched_param sp = { .sched_priority = 1 }; 7753 WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0); 7754 } 7755 EXPORT_SYMBOL_GPL(sched_set_fifo_low); 7756 7757 void sched_set_normal(struct task_struct *p, int nice) 7758 { 7759 struct sched_attr attr = { 7760 .sched_policy = SCHED_NORMAL, 7761 .sched_nice = nice, 7762 }; 7763 WARN_ON_ONCE(sched_setattr_nocheck(p, &attr) != 0); 7764 } 7765 EXPORT_SYMBOL_GPL(sched_set_normal); 7766 7767 static int 7768 do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) 7769 { 7770 struct sched_param lparam; 7771 struct task_struct *p; 7772 int retval; 7773 7774 if (!param || pid < 0) 7775 return -EINVAL; 7776 if (copy_from_user(&lparam, param, sizeof(struct sched_param))) 7777 return -EFAULT; 7778 7779 rcu_read_lock(); 7780 retval = -ESRCH; 7781 p = find_process_by_pid(pid); 7782 if (likely(p)) 7783 get_task_struct(p); 7784 rcu_read_unlock(); 7785 7786 if (likely(p)) { 7787 retval = sched_setscheduler(p, policy, &lparam); 7788 put_task_struct(p); 7789 } 7790 7791 return retval; 7792 } 7793 7794 /* 7795 * Mimics kernel/events/core.c perf_copy_attr(). 7796 */ 7797 static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *attr) 7798 { 7799 u32 size; 7800 int ret; 7801 7802 /* Zero the full structure, so that a short copy will be nice: */ 7803 memset(attr, 0, sizeof(*attr)); 7804 7805 ret = get_user(size, &uattr->size); 7806 if (ret) 7807 return ret; 7808 7809 /* ABI compatibility quirk: */ 7810 if (!size) 7811 size = SCHED_ATTR_SIZE_VER0; 7812 if (size < SCHED_ATTR_SIZE_VER0 || size > PAGE_SIZE) 7813 goto err_size; 7814 7815 ret = copy_struct_from_user(attr, sizeof(*attr), uattr, size); 7816 if (ret) { 7817 if (ret == -E2BIG) 7818 goto err_size; 7819 return ret; 7820 } 7821 7822 if ((attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) && 7823 size < SCHED_ATTR_SIZE_VER1) 7824 return -EINVAL; 7825 7826 /* 7827 * XXX: Do we want to be lenient like existing syscalls; or do we want 7828 * to be strict and return an error on out-of-bounds values? 7829 */ 7830 attr->sched_nice = clamp(attr->sched_nice, MIN_NICE, MAX_NICE); 7831 7832 return 0; 7833 7834 err_size: 7835 put_user(sizeof(*attr), &uattr->size); 7836 return -E2BIG; 7837 } 7838 7839 static void get_params(struct task_struct *p, struct sched_attr *attr) 7840 { 7841 if (task_has_dl_policy(p)) 7842 __getparam_dl(p, attr); 7843 else if (task_has_rt_policy(p)) 7844 attr->sched_priority = p->rt_priority; 7845 else 7846 attr->sched_nice = task_nice(p); 7847 } 7848 7849 /** 7850 * sys_sched_setscheduler - set/change the scheduler policy and RT priority 7851 * @pid: the pid in question. 7852 * @policy: new policy. 7853 * @param: structure containing the new RT priority. 7854 * 7855 * Return: 0 on success. An error code otherwise. 7856 */ 7857 SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param) 7858 { 7859 if (policy < 0) 7860 return -EINVAL; 7861 7862 return do_sched_setscheduler(pid, policy, param); 7863 } 7864 7865 /** 7866 * sys_sched_setparam - set/change the RT priority of a thread 7867 * @pid: the pid in question. 7868 * @param: structure containing the new RT priority. 7869 * 7870 * Return: 0 on success. An error code otherwise. 7871 */ 7872 SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) 7873 { 7874 return do_sched_setscheduler(pid, SETPARAM_POLICY, param); 7875 } 7876 7877 /** 7878 * sys_sched_setattr - same as above, but with extended sched_attr 7879 * @pid: the pid in question. 7880 * @uattr: structure containing the extended parameters. 7881 * @flags: for future extension. 7882 */ 7883 SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr, 7884 unsigned int, flags) 7885 { 7886 struct sched_attr attr; 7887 struct task_struct *p; 7888 int retval; 7889 7890 if (!uattr || pid < 0 || flags) 7891 return -EINVAL; 7892 7893 retval = sched_copy_attr(uattr, &attr); 7894 if (retval) 7895 return retval; 7896 7897 if ((int)attr.sched_policy < 0) 7898 return -EINVAL; 7899 if (attr.sched_flags & SCHED_FLAG_KEEP_POLICY) 7900 attr.sched_policy = SETPARAM_POLICY; 7901 7902 rcu_read_lock(); 7903 retval = -ESRCH; 7904 p = find_process_by_pid(pid); 7905 if (likely(p)) 7906 get_task_struct(p); 7907 rcu_read_unlock(); 7908 7909 if (likely(p)) { 7910 if (attr.sched_flags & SCHED_FLAG_KEEP_PARAMS) 7911 get_params(p, &attr); 7912 retval = sched_setattr(p, &attr); 7913 put_task_struct(p); 7914 } 7915 7916 return retval; 7917 } 7918 7919 /** 7920 * sys_sched_getscheduler - get the policy (scheduling class) of a thread 7921 * @pid: the pid in question. 7922 * 7923 * Return: On success, the policy of the thread. Otherwise, a negative error 7924 * code. 7925 */ 7926 SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) 7927 { 7928 struct task_struct *p; 7929 int retval; 7930 7931 if (pid < 0) 7932 return -EINVAL; 7933 7934 retval = -ESRCH; 7935 rcu_read_lock(); 7936 p = find_process_by_pid(pid); 7937 if (p) { 7938 retval = security_task_getscheduler(p); 7939 if (!retval) 7940 retval = p->policy 7941 | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0); 7942 } 7943 rcu_read_unlock(); 7944 return retval; 7945 } 7946 7947 /** 7948 * sys_sched_getparam - get the RT priority of a thread 7949 * @pid: the pid in question. 7950 * @param: structure containing the RT priority. 7951 * 7952 * Return: On success, 0 and the RT priority is in @param. Otherwise, an error 7953 * code. 7954 */ 7955 SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) 7956 { 7957 struct sched_param lp = { .sched_priority = 0 }; 7958 struct task_struct *p; 7959 int retval; 7960 7961 if (!param || pid < 0) 7962 return -EINVAL; 7963 7964 rcu_read_lock(); 7965 p = find_process_by_pid(pid); 7966 retval = -ESRCH; 7967 if (!p) 7968 goto out_unlock; 7969 7970 retval = security_task_getscheduler(p); 7971 if (retval) 7972 goto out_unlock; 7973 7974 if (task_has_rt_policy(p)) 7975 lp.sched_priority = p->rt_priority; 7976 rcu_read_unlock(); 7977 7978 /* 7979 * This one might sleep, we cannot do it with a spinlock held ... 7980 */ 7981 retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0; 7982 7983 return retval; 7984 7985 out_unlock: 7986 rcu_read_unlock(); 7987 return retval; 7988 } 7989 7990 /* 7991 * Copy the kernel size attribute structure (which might be larger 7992 * than what user-space knows about) to user-space. 7993 * 7994 * Note that all cases are valid: user-space buffer can be larger or 7995 * smaller than the kernel-space buffer. The usual case is that both 7996 * have the same size. 7997 */ 7998 static int 7999 sched_attr_copy_to_user(struct sched_attr __user *uattr, 8000 struct sched_attr *kattr, 8001 unsigned int usize) 8002 { 8003 unsigned int ksize = sizeof(*kattr); 8004 8005 if (!access_ok(uattr, usize)) 8006 return -EFAULT; 8007 8008 /* 8009 * sched_getattr() ABI forwards and backwards compatibility: 8010 * 8011 * If usize == ksize then we just copy everything to user-space and all is good. 8012 * 8013 * If usize < ksize then we only copy as much as user-space has space for, 8014 * this keeps ABI compatibility as well. We skip the rest. 8015 * 8016 * If usize > ksize then user-space is using a newer version of the ABI, 8017 * which part the kernel doesn't know about. Just ignore it - tooling can 8018 * detect the kernel's knowledge of attributes from the attr->size value 8019 * which is set to ksize in this case. 8020 */ 8021 kattr->size = min(usize, ksize); 8022 8023 if (copy_to_user(uattr, kattr, kattr->size)) 8024 return -EFAULT; 8025 8026 return 0; 8027 } 8028 8029 /** 8030 * sys_sched_getattr - similar to sched_getparam, but with sched_attr 8031 * @pid: the pid in question. 8032 * @uattr: structure containing the extended parameters. 8033 * @usize: sizeof(attr) for fwd/bwd comp. 8034 * @flags: for future extension. 8035 */ 8036 SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, 8037 unsigned int, usize, unsigned int, flags) 8038 { 8039 struct sched_attr kattr = { }; 8040 struct task_struct *p; 8041 int retval; 8042 8043 if (!uattr || pid < 0 || usize > PAGE_SIZE || 8044 usize < SCHED_ATTR_SIZE_VER0 || flags) 8045 return -EINVAL; 8046 8047 rcu_read_lock(); 8048 p = find_process_by_pid(pid); 8049 retval = -ESRCH; 8050 if (!p) 8051 goto out_unlock; 8052 8053 retval = security_task_getscheduler(p); 8054 if (retval) 8055 goto out_unlock; 8056 8057 kattr.sched_policy = p->policy; 8058 if (p->sched_reset_on_fork) 8059 kattr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; 8060 get_params(p, &kattr); 8061 kattr.sched_flags &= SCHED_FLAG_ALL; 8062 8063 #ifdef CONFIG_UCLAMP_TASK 8064 /* 8065 * This could race with another potential updater, but this is fine 8066 * because it'll correctly read the old or the new value. We don't need 8067 * to guarantee who wins the race as long as it doesn't return garbage. 8068 */ 8069 kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value; 8070 kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value; 8071 #endif 8072 8073 rcu_read_unlock(); 8074 8075 return sched_attr_copy_to_user(uattr, &kattr, usize); 8076 8077 out_unlock: 8078 rcu_read_unlock(); 8079 return retval; 8080 } 8081 8082 #ifdef CONFIG_SMP 8083 int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask) 8084 { 8085 int ret = 0; 8086 8087 /* 8088 * If the task isn't a deadline task or admission control is 8089 * disabled then we don't care about affinity changes. 8090 */ 8091 if (!task_has_dl_policy(p) || !dl_bandwidth_enabled()) 8092 return 0; 8093 8094 /* 8095 * Since bandwidth control happens on root_domain basis, 8096 * if admission test is enabled, we only admit -deadline 8097 * tasks allowed to run on all the CPUs in the task's 8098 * root_domain. 8099 */ 8100 rcu_read_lock(); 8101 if (!cpumask_subset(task_rq(p)->rd->span, mask)) 8102 ret = -EBUSY; 8103 rcu_read_unlock(); 8104 return ret; 8105 } 8106 #endif 8107 8108 static int 8109 __sched_setaffinity(struct task_struct *p, const struct cpumask *mask) 8110 { 8111 int retval; 8112 cpumask_var_t cpus_allowed, new_mask; 8113 8114 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) 8115 return -ENOMEM; 8116 8117 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) { 8118 retval = -ENOMEM; 8119 goto out_free_cpus_allowed; 8120 } 8121 8122 cpuset_cpus_allowed(p, cpus_allowed); 8123 cpumask_and(new_mask, mask, cpus_allowed); 8124 8125 retval = dl_task_check_affinity(p, new_mask); 8126 if (retval) 8127 goto out_free_new_mask; 8128 again: 8129 retval = __set_cpus_allowed_ptr(p, new_mask, SCA_CHECK | SCA_USER); 8130 if (retval) 8131 goto out_free_new_mask; 8132 8133 cpuset_cpus_allowed(p, cpus_allowed); 8134 if (!cpumask_subset(new_mask, cpus_allowed)) { 8135 /* 8136 * We must have raced with a concurrent cpuset update. 8137 * Just reset the cpumask to the cpuset's cpus_allowed. 8138 */ 8139 cpumask_copy(new_mask, cpus_allowed); 8140 goto again; 8141 } 8142 8143 out_free_new_mask: 8144 free_cpumask_var(new_mask); 8145 out_free_cpus_allowed: 8146 free_cpumask_var(cpus_allowed); 8147 return retval; 8148 } 8149 8150 long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) 8151 { 8152 struct task_struct *p; 8153 int retval; 8154 8155 rcu_read_lock(); 8156 8157 p = find_process_by_pid(pid); 8158 if (!p) { 8159 rcu_read_unlock(); 8160 return -ESRCH; 8161 } 8162 8163 /* Prevent p going away */ 8164 get_task_struct(p); 8165 rcu_read_unlock(); 8166 8167 if (p->flags & PF_NO_SETAFFINITY) { 8168 retval = -EINVAL; 8169 goto out_put_task; 8170 } 8171 8172 if (!check_same_owner(p)) { 8173 rcu_read_lock(); 8174 if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) { 8175 rcu_read_unlock(); 8176 retval = -EPERM; 8177 goto out_put_task; 8178 } 8179 rcu_read_unlock(); 8180 } 8181 8182 retval = security_task_setscheduler(p); 8183 if (retval) 8184 goto out_put_task; 8185 8186 retval = __sched_setaffinity(p, in_mask); 8187 out_put_task: 8188 put_task_struct(p); 8189 return retval; 8190 } 8191 8192 static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, 8193 struct cpumask *new_mask) 8194 { 8195 if (len < cpumask_size()) 8196 cpumask_clear(new_mask); 8197 else if (len > cpumask_size()) 8198 len = cpumask_size(); 8199 8200 return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0; 8201 } 8202 8203 /** 8204 * sys_sched_setaffinity - set the CPU affinity of a process 8205 * @pid: pid of the process 8206 * @len: length in bytes of the bitmask pointed to by user_mask_ptr 8207 * @user_mask_ptr: user-space pointer to the new CPU mask 8208 * 8209 * Return: 0 on success. An error code otherwise. 8210 */ 8211 SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len, 8212 unsigned long __user *, user_mask_ptr) 8213 { 8214 cpumask_var_t new_mask; 8215 int retval; 8216 8217 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) 8218 return -ENOMEM; 8219 8220 retval = get_user_cpu_mask(user_mask_ptr, len, new_mask); 8221 if (retval == 0) 8222 retval = sched_setaffinity(pid, new_mask); 8223 free_cpumask_var(new_mask); 8224 return retval; 8225 } 8226 8227 long sched_getaffinity(pid_t pid, struct cpumask *mask) 8228 { 8229 struct task_struct *p; 8230 unsigned long flags; 8231 int retval; 8232 8233 rcu_read_lock(); 8234 8235 retval = -ESRCH; 8236 p = find_process_by_pid(pid); 8237 if (!p) 8238 goto out_unlock; 8239 8240 retval = security_task_getscheduler(p); 8241 if (retval) 8242 goto out_unlock; 8243 8244 raw_spin_lock_irqsave(&p->pi_lock, flags); 8245 cpumask_and(mask, &p->cpus_mask, cpu_active_mask); 8246 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 8247 8248 out_unlock: 8249 rcu_read_unlock(); 8250 8251 return retval; 8252 } 8253 8254 /** 8255 * sys_sched_getaffinity - get the CPU affinity of a process 8256 * @pid: pid of the process 8257 * @len: length in bytes of the bitmask pointed to by user_mask_ptr 8258 * @user_mask_ptr: user-space pointer to hold the current CPU mask 8259 * 8260 * Return: size of CPU mask copied to user_mask_ptr on success. An 8261 * error code otherwise. 8262 */ 8263 SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, 8264 unsigned long __user *, user_mask_ptr) 8265 { 8266 int ret; 8267 cpumask_var_t mask; 8268 8269 if ((len * BITS_PER_BYTE) < nr_cpu_ids) 8270 return -EINVAL; 8271 if (len & (sizeof(unsigned long)-1)) 8272 return -EINVAL; 8273 8274 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) 8275 return -ENOMEM; 8276 8277 ret = sched_getaffinity(pid, mask); 8278 if (ret == 0) { 8279 unsigned int retlen = min(len, cpumask_size()); 8280 8281 if (copy_to_user(user_mask_ptr, mask, retlen)) 8282 ret = -EFAULT; 8283 else 8284 ret = retlen; 8285 } 8286 free_cpumask_var(mask); 8287 8288 return ret; 8289 } 8290 8291 static void do_sched_yield(void) 8292 { 8293 struct rq_flags rf; 8294 struct rq *rq; 8295 8296 rq = this_rq_lock_irq(&rf); 8297 8298 schedstat_inc(rq->yld_count); 8299 current->sched_class->yield_task(rq); 8300 8301 preempt_disable(); 8302 rq_unlock_irq(rq, &rf); 8303 sched_preempt_enable_no_resched(); 8304 8305 schedule(); 8306 } 8307 8308 /** 8309 * sys_sched_yield - yield the current processor to other threads. 8310 * 8311 * This function yields the current CPU to other tasks. If there are no 8312 * other threads running on this CPU then this function will return. 8313 * 8314 * Return: 0. 8315 */ 8316 SYSCALL_DEFINE0(sched_yield) 8317 { 8318 do_sched_yield(); 8319 return 0; 8320 } 8321 8322 #if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC) 8323 int __sched __cond_resched(void) 8324 { 8325 if (should_resched(0)) { 8326 preempt_schedule_common(); 8327 return 1; 8328 } 8329 /* 8330 * In preemptible kernels, ->rcu_read_lock_nesting tells the tick 8331 * whether the current CPU is in an RCU read-side critical section, 8332 * so the tick can report quiescent states even for CPUs looping 8333 * in kernel context. In contrast, in non-preemptible kernels, 8334 * RCU readers leave no in-memory hints, which means that CPU-bound 8335 * processes executing in kernel context might never report an 8336 * RCU quiescent state. Therefore, the following code causes 8337 * cond_resched() to report a quiescent state, but only when RCU 8338 * is in urgent need of one. 8339 */ 8340 #ifndef CONFIG_PREEMPT_RCU 8341 rcu_all_qs(); 8342 #endif 8343 return 0; 8344 } 8345 EXPORT_SYMBOL(__cond_resched); 8346 #endif 8347 8348 #ifdef CONFIG_PREEMPT_DYNAMIC 8349 #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL) 8350 #define cond_resched_dynamic_enabled __cond_resched 8351 #define cond_resched_dynamic_disabled ((void *)&__static_call_return0) 8352 DEFINE_STATIC_CALL_RET0(cond_resched, __cond_resched); 8353 EXPORT_STATIC_CALL_TRAMP(cond_resched); 8354 8355 #define might_resched_dynamic_enabled __cond_resched 8356 #define might_resched_dynamic_disabled ((void *)&__static_call_return0) 8357 DEFINE_STATIC_CALL_RET0(might_resched, __cond_resched); 8358 EXPORT_STATIC_CALL_TRAMP(might_resched); 8359 #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY) 8360 static DEFINE_STATIC_KEY_FALSE(sk_dynamic_cond_resched); 8361 int __sched dynamic_cond_resched(void) 8362 { 8363 if (!static_branch_unlikely(&sk_dynamic_cond_resched)) 8364 return 0; 8365 return __cond_resched(); 8366 } 8367 EXPORT_SYMBOL(dynamic_cond_resched); 8368 8369 static DEFINE_STATIC_KEY_FALSE(sk_dynamic_might_resched); 8370 int __sched dynamic_might_resched(void) 8371 { 8372 if (!static_branch_unlikely(&sk_dynamic_might_resched)) 8373 return 0; 8374 return __cond_resched(); 8375 } 8376 EXPORT_SYMBOL(dynamic_might_resched); 8377 #endif 8378 #endif 8379 8380 /* 8381 * __cond_resched_lock() - if a reschedule is pending, drop the given lock, 8382 * call schedule, and on return reacquire the lock. 8383 * 8384 * This works OK both with and without CONFIG_PREEMPTION. We do strange low-level 8385 * operations here to prevent schedule() from being called twice (once via 8386 * spin_unlock(), once by hand). 8387 */ 8388 int __cond_resched_lock(spinlock_t *lock) 8389 { 8390 int resched = should_resched(PREEMPT_LOCK_OFFSET); 8391 int ret = 0; 8392 8393 lockdep_assert_held(lock); 8394 8395 if (spin_needbreak(lock) || resched) { 8396 spin_unlock(lock); 8397 if (!_cond_resched()) 8398 cpu_relax(); 8399 ret = 1; 8400 spin_lock(lock); 8401 } 8402 return ret; 8403 } 8404 EXPORT_SYMBOL(__cond_resched_lock); 8405 8406 int __cond_resched_rwlock_read(rwlock_t *lock) 8407 { 8408 int resched = should_resched(PREEMPT_LOCK_OFFSET); 8409 int ret = 0; 8410 8411 lockdep_assert_held_read(lock); 8412 8413 if (rwlock_needbreak(lock) || resched) { 8414 read_unlock(lock); 8415 if (!_cond_resched()) 8416 cpu_relax(); 8417 ret = 1; 8418 read_lock(lock); 8419 } 8420 return ret; 8421 } 8422 EXPORT_SYMBOL(__cond_resched_rwlock_read); 8423 8424 int __cond_resched_rwlock_write(rwlock_t *lock) 8425 { 8426 int resched = should_resched(PREEMPT_LOCK_OFFSET); 8427 int ret = 0; 8428 8429 lockdep_assert_held_write(lock); 8430 8431 if (rwlock_needbreak(lock) || resched) { 8432 write_unlock(lock); 8433 if (!_cond_resched()) 8434 cpu_relax(); 8435 ret = 1; 8436 write_lock(lock); 8437 } 8438 return ret; 8439 } 8440 EXPORT_SYMBOL(__cond_resched_rwlock_write); 8441 8442 #ifdef CONFIG_PREEMPT_DYNAMIC 8443 8444 #ifdef CONFIG_GENERIC_ENTRY 8445 #include <linux/entry-common.h> 8446 #endif 8447 8448 /* 8449 * SC:cond_resched 8450 * SC:might_resched 8451 * SC:preempt_schedule 8452 * SC:preempt_schedule_notrace 8453 * SC:irqentry_exit_cond_resched 8454 * 8455 * 8456 * NONE: 8457 * cond_resched <- __cond_resched 8458 * might_resched <- RET0 8459 * preempt_schedule <- NOP 8460 * preempt_schedule_notrace <- NOP 8461 * irqentry_exit_cond_resched <- NOP 8462 * 8463 * VOLUNTARY: 8464 * cond_resched <- __cond_resched 8465 * might_resched <- __cond_resched 8466 * preempt_schedule <- NOP 8467 * preempt_schedule_notrace <- NOP 8468 * irqentry_exit_cond_resched <- NOP 8469 * 8470 * FULL: 8471 * cond_resched <- RET0 8472 * might_resched <- RET0 8473 * preempt_schedule <- preempt_schedule 8474 * preempt_schedule_notrace <- preempt_schedule_notrace 8475 * irqentry_exit_cond_resched <- irqentry_exit_cond_resched 8476 */ 8477 8478 enum { 8479 preempt_dynamic_undefined = -1, 8480 preempt_dynamic_none, 8481 preempt_dynamic_voluntary, 8482 preempt_dynamic_full, 8483 }; 8484 8485 int preempt_dynamic_mode = preempt_dynamic_undefined; 8486 8487 int sched_dynamic_mode(const char *str) 8488 { 8489 if (!strcmp(str, "none")) 8490 return preempt_dynamic_none; 8491 8492 if (!strcmp(str, "voluntary")) 8493 return preempt_dynamic_voluntary; 8494 8495 if (!strcmp(str, "full")) 8496 return preempt_dynamic_full; 8497 8498 return -EINVAL; 8499 } 8500 8501 #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL) 8502 #define preempt_dynamic_enable(f) static_call_update(f, f##_dynamic_enabled) 8503 #define preempt_dynamic_disable(f) static_call_update(f, f##_dynamic_disabled) 8504 #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY) 8505 #define preempt_dynamic_enable(f) static_key_enable(&sk_dynamic_##f.key) 8506 #define preempt_dynamic_disable(f) static_key_disable(&sk_dynamic_##f.key) 8507 #else 8508 #error "Unsupported PREEMPT_DYNAMIC mechanism" 8509 #endif 8510 8511 void sched_dynamic_update(int mode) 8512 { 8513 /* 8514 * Avoid {NONE,VOLUNTARY} -> FULL transitions from ever ending up in 8515 * the ZERO state, which is invalid. 8516 */ 8517 preempt_dynamic_enable(cond_resched); 8518 preempt_dynamic_enable(might_resched); 8519 preempt_dynamic_enable(preempt_schedule); 8520 preempt_dynamic_enable(preempt_schedule_notrace); 8521 preempt_dynamic_enable(irqentry_exit_cond_resched); 8522 8523 switch (mode) { 8524 case preempt_dynamic_none: 8525 preempt_dynamic_enable(cond_resched); 8526 preempt_dynamic_disable(might_resched); 8527 preempt_dynamic_disable(preempt_schedule); 8528 preempt_dynamic_disable(preempt_schedule_notrace); 8529 preempt_dynamic_disable(irqentry_exit_cond_resched); 8530 pr_info("Dynamic Preempt: none\n"); 8531 break; 8532 8533 case preempt_dynamic_voluntary: 8534 preempt_dynamic_enable(cond_resched); 8535 preempt_dynamic_enable(might_resched); 8536 preempt_dynamic_disable(preempt_schedule); 8537 preempt_dynamic_disable(preempt_schedule_notrace); 8538 preempt_dynamic_disable(irqentry_exit_cond_resched); 8539 pr_info("Dynamic Preempt: voluntary\n"); 8540 break; 8541 8542 case preempt_dynamic_full: 8543 preempt_dynamic_disable(cond_resched); 8544 preempt_dynamic_disable(might_resched); 8545 preempt_dynamic_enable(preempt_schedule); 8546 preempt_dynamic_enable(preempt_schedule_notrace); 8547 preempt_dynamic_enable(irqentry_exit_cond_resched); 8548 pr_info("Dynamic Preempt: full\n"); 8549 break; 8550 } 8551 8552 preempt_dynamic_mode = mode; 8553 } 8554 8555 static int __init setup_preempt_mode(char *str) 8556 { 8557 int mode = sched_dynamic_mode(str); 8558 if (mode < 0) { 8559 pr_warn("Dynamic Preempt: unsupported mode: %s\n", str); 8560 return 0; 8561 } 8562 8563 sched_dynamic_update(mode); 8564 return 1; 8565 } 8566 __setup("preempt=", setup_preempt_mode); 8567 8568 static void __init preempt_dynamic_init(void) 8569 { 8570 if (preempt_dynamic_mode == preempt_dynamic_undefined) { 8571 if (IS_ENABLED(CONFIG_PREEMPT_NONE)) { 8572 sched_dynamic_update(preempt_dynamic_none); 8573 } else if (IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY)) { 8574 sched_dynamic_update(preempt_dynamic_voluntary); 8575 } else { 8576 /* Default static call setting, nothing to do */ 8577 WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT)); 8578 preempt_dynamic_mode = preempt_dynamic_full; 8579 pr_info("Dynamic Preempt: full\n"); 8580 } 8581 } 8582 } 8583 8584 #define PREEMPT_MODEL_ACCESSOR(mode) \ 8585 bool preempt_model_##mode(void) \ 8586 { \ 8587 WARN_ON_ONCE(preempt_dynamic_mode == preempt_dynamic_undefined); \ 8588 return preempt_dynamic_mode == preempt_dynamic_##mode; \ 8589 } \ 8590 EXPORT_SYMBOL_GPL(preempt_model_##mode) 8591 8592 PREEMPT_MODEL_ACCESSOR(none); 8593 PREEMPT_MODEL_ACCESSOR(voluntary); 8594 PREEMPT_MODEL_ACCESSOR(full); 8595 8596 #else /* !CONFIG_PREEMPT_DYNAMIC */ 8597 8598 static inline void preempt_dynamic_init(void) { } 8599 8600 #endif /* #ifdef CONFIG_PREEMPT_DYNAMIC */ 8601 8602 /** 8603 * yield - yield the current processor to other threads. 8604 * 8605 * Do not ever use this function, there's a 99% chance you're doing it wrong. 8606 * 8607 * The scheduler is at all times free to pick the calling task as the most 8608 * eligible task to run, if removing the yield() call from your code breaks 8609 * it, it's already broken. 8610 * 8611 * Typical broken usage is: 8612 * 8613 * while (!event) 8614 * yield(); 8615 * 8616 * where one assumes that yield() will let 'the other' process run that will 8617 * make event true. If the current task is a SCHED_FIFO task that will never 8618 * happen. Never use yield() as a progress guarantee!! 8619 * 8620 * If you want to use yield() to wait for something, use wait_event(). 8621 * If you want to use yield() to be 'nice' for others, use cond_resched(). 8622 * If you still want to use yield(), do not! 8623 */ 8624 void __sched yield(void) 8625 { 8626 set_current_state(TASK_RUNNING); 8627 do_sched_yield(); 8628 } 8629 EXPORT_SYMBOL(yield); 8630 8631 /** 8632 * yield_to - yield the current processor to another thread in 8633 * your thread group, or accelerate that thread toward the 8634 * processor it's on. 8635 * @p: target task 8636 * @preempt: whether task preemption is allowed or not 8637 * 8638 * It's the caller's job to ensure that the target task struct 8639 * can't go away on us before we can do any checks. 8640 * 8641 * Return: 8642 * true (>0) if we indeed boosted the target task. 8643 * false (0) if we failed to boost the target. 8644 * -ESRCH if there's no task to yield to. 8645 */ 8646 int __sched yield_to(struct task_struct *p, bool preempt) 8647 { 8648 struct task_struct *curr = current; 8649 struct rq *rq, *p_rq; 8650 unsigned long flags; 8651 int yielded = 0; 8652 8653 local_irq_save(flags); 8654 rq = this_rq(); 8655 8656 again: 8657 p_rq = task_rq(p); 8658 /* 8659 * If we're the only runnable task on the rq and target rq also 8660 * has only one task, there's absolutely no point in yielding. 8661 */ 8662 if (rq->nr_running == 1 && p_rq->nr_running == 1) { 8663 yielded = -ESRCH; 8664 goto out_irq; 8665 } 8666 8667 double_rq_lock(rq, p_rq); 8668 if (task_rq(p) != p_rq) { 8669 double_rq_unlock(rq, p_rq); 8670 goto again; 8671 } 8672 8673 if (!curr->sched_class->yield_to_task) 8674 goto out_unlock; 8675 8676 if (curr->sched_class != p->sched_class) 8677 goto out_unlock; 8678 8679 if (task_on_cpu(p_rq, p) || !task_is_running(p)) 8680 goto out_unlock; 8681 8682 yielded = curr->sched_class->yield_to_task(rq, p); 8683 if (yielded) { 8684 schedstat_inc(rq->yld_count); 8685 /* 8686 * Make p's CPU reschedule; pick_next_entity takes care of 8687 * fairness. 8688 */ 8689 if (preempt && rq != p_rq) 8690 resched_curr(p_rq); 8691 } 8692 8693 out_unlock: 8694 double_rq_unlock(rq, p_rq); 8695 out_irq: 8696 local_irq_restore(flags); 8697 8698 if (yielded > 0) 8699 schedule(); 8700 8701 return yielded; 8702 } 8703 EXPORT_SYMBOL_GPL(yield_to); 8704 8705 int io_schedule_prepare(void) 8706 { 8707 int old_iowait = current->in_iowait; 8708 8709 current->in_iowait = 1; 8710 blk_flush_plug(current->plug, true); 8711 return old_iowait; 8712 } 8713 8714 void io_schedule_finish(int token) 8715 { 8716 current->in_iowait = token; 8717 } 8718 8719 /* 8720 * This task is about to go to sleep on IO. Increment rq->nr_iowait so 8721 * that process accounting knows that this is a task in IO wait state. 8722 */ 8723 long __sched io_schedule_timeout(long timeout) 8724 { 8725 int token; 8726 long ret; 8727 8728 token = io_schedule_prepare(); 8729 ret = schedule_timeout(timeout); 8730 io_schedule_finish(token); 8731 8732 return ret; 8733 } 8734 EXPORT_SYMBOL(io_schedule_timeout); 8735 8736 void __sched io_schedule(void) 8737 { 8738 int token; 8739 8740 token = io_schedule_prepare(); 8741 schedule(); 8742 io_schedule_finish(token); 8743 } 8744 EXPORT_SYMBOL(io_schedule); 8745 8746 /** 8747 * sys_sched_get_priority_max - return maximum RT priority. 8748 * @policy: scheduling class. 8749 * 8750 * Return: On success, this syscall returns the maximum 8751 * rt_priority that can be used by a given scheduling class. 8752 * On failure, a negative error code is returned. 8753 */ 8754 SYSCALL_DEFINE1(sched_get_priority_max, int, policy) 8755 { 8756 int ret = -EINVAL; 8757 8758 switch (policy) { 8759 case SCHED_FIFO: 8760 case SCHED_RR: 8761 ret = MAX_RT_PRIO-1; 8762 break; 8763 case SCHED_DEADLINE: 8764 case SCHED_NORMAL: 8765 case SCHED_BATCH: 8766 case SCHED_IDLE: 8767 ret = 0; 8768 break; 8769 } 8770 return ret; 8771 } 8772 8773 /** 8774 * sys_sched_get_priority_min - return minimum RT priority. 8775 * @policy: scheduling class. 8776 * 8777 * Return: On success, this syscall returns the minimum 8778 * rt_priority that can be used by a given scheduling class. 8779 * On failure, a negative error code is returned. 8780 */ 8781 SYSCALL_DEFINE1(sched_get_priority_min, int, policy) 8782 { 8783 int ret = -EINVAL; 8784 8785 switch (policy) { 8786 case SCHED_FIFO: 8787 case SCHED_RR: 8788 ret = 1; 8789 break; 8790 case SCHED_DEADLINE: 8791 case SCHED_NORMAL: 8792 case SCHED_BATCH: 8793 case SCHED_IDLE: 8794 ret = 0; 8795 } 8796 return ret; 8797 } 8798 8799 static int sched_rr_get_interval(pid_t pid, struct timespec64 *t) 8800 { 8801 struct task_struct *p; 8802 unsigned int time_slice; 8803 struct rq_flags rf; 8804 struct rq *rq; 8805 int retval; 8806 8807 if (pid < 0) 8808 return -EINVAL; 8809 8810 retval = -ESRCH; 8811 rcu_read_lock(); 8812 p = find_process_by_pid(pid); 8813 if (!p) 8814 goto out_unlock; 8815 8816 retval = security_task_getscheduler(p); 8817 if (retval) 8818 goto out_unlock; 8819 8820 rq = task_rq_lock(p, &rf); 8821 time_slice = 0; 8822 if (p->sched_class->get_rr_interval) 8823 time_slice = p->sched_class->get_rr_interval(rq, p); 8824 task_rq_unlock(rq, p, &rf); 8825 8826 rcu_read_unlock(); 8827 jiffies_to_timespec64(time_slice, t); 8828 return 0; 8829 8830 out_unlock: 8831 rcu_read_unlock(); 8832 return retval; 8833 } 8834 8835 /** 8836 * sys_sched_rr_get_interval - return the default timeslice of a process. 8837 * @pid: pid of the process. 8838 * @interval: userspace pointer to the timeslice value. 8839 * 8840 * this syscall writes the default timeslice value of a given process 8841 * into the user-space timespec buffer. A value of '0' means infinity. 8842 * 8843 * Return: On success, 0 and the timeslice is in @interval. Otherwise, 8844 * an error code. 8845 */ 8846 SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, 8847 struct __kernel_timespec __user *, interval) 8848 { 8849 struct timespec64 t; 8850 int retval = sched_rr_get_interval(pid, &t); 8851 8852 if (retval == 0) 8853 retval = put_timespec64(&t, interval); 8854 8855 return retval; 8856 } 8857 8858 #ifdef CONFIG_COMPAT_32BIT_TIME 8859 SYSCALL_DEFINE2(sched_rr_get_interval_time32, pid_t, pid, 8860 struct old_timespec32 __user *, interval) 8861 { 8862 struct timespec64 t; 8863 int retval = sched_rr_get_interval(pid, &t); 8864 8865 if (retval == 0) 8866 retval = put_old_timespec32(&t, interval); 8867 return retval; 8868 } 8869 #endif 8870 8871 void sched_show_task(struct task_struct *p) 8872 { 8873 unsigned long free = 0; 8874 int ppid; 8875 8876 if (!try_get_task_stack(p)) 8877 return; 8878 8879 pr_info("task:%-15.15s state:%c", p->comm, task_state_to_char(p)); 8880 8881 if (task_is_running(p)) 8882 pr_cont(" running task "); 8883 #ifdef CONFIG_DEBUG_STACK_USAGE 8884 free = stack_not_used(p); 8885 #endif 8886 ppid = 0; 8887 rcu_read_lock(); 8888 if (pid_alive(p)) 8889 ppid = task_pid_nr(rcu_dereference(p->real_parent)); 8890 rcu_read_unlock(); 8891 pr_cont(" stack:%-5lu pid:%-5d ppid:%-6d flags:0x%08lx\n", 8892 free, task_pid_nr(p), ppid, 8893 read_task_thread_flags(p)); 8894 8895 print_worker_info(KERN_INFO, p); 8896 print_stop_info(KERN_INFO, p); 8897 show_stack(p, NULL, KERN_INFO); 8898 put_task_stack(p); 8899 } 8900 EXPORT_SYMBOL_GPL(sched_show_task); 8901 8902 static inline bool 8903 state_filter_match(unsigned long state_filter, struct task_struct *p) 8904 { 8905 unsigned int state = READ_ONCE(p->__state); 8906 8907 /* no filter, everything matches */ 8908 if (!state_filter) 8909 return true; 8910 8911 /* filter, but doesn't match */ 8912 if (!(state & state_filter)) 8913 return false; 8914 8915 /* 8916 * When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows 8917 * TASK_KILLABLE). 8918 */ 8919 if (state_filter == TASK_UNINTERRUPTIBLE && (state & TASK_NOLOAD)) 8920 return false; 8921 8922 return true; 8923 } 8924 8925 8926 void show_state_filter(unsigned int state_filter) 8927 { 8928 struct task_struct *g, *p; 8929 8930 rcu_read_lock(); 8931 for_each_process_thread(g, p) { 8932 /* 8933 * reset the NMI-timeout, listing all files on a slow 8934 * console might take a lot of time: 8935 * Also, reset softlockup watchdogs on all CPUs, because 8936 * another CPU might be blocked waiting for us to process 8937 * an IPI. 8938 */ 8939 touch_nmi_watchdog(); 8940 touch_all_softlockup_watchdogs(); 8941 if (state_filter_match(state_filter, p)) 8942 sched_show_task(p); 8943 } 8944 8945 #ifdef CONFIG_SCHED_DEBUG 8946 if (!state_filter) 8947 sysrq_sched_debug_show(); 8948 #endif 8949 rcu_read_unlock(); 8950 /* 8951 * Only show locks if all tasks are dumped: 8952 */ 8953 if (!state_filter) 8954 debug_show_all_locks(); 8955 } 8956 8957 /** 8958 * init_idle - set up an idle thread for a given CPU 8959 * @idle: task in question 8960 * @cpu: CPU the idle task belongs to 8961 * 8962 * NOTE: this function does not set the idle thread's NEED_RESCHED 8963 * flag, to make booting more robust. 8964 */ 8965 void __init init_idle(struct task_struct *idle, int cpu) 8966 { 8967 struct rq *rq = cpu_rq(cpu); 8968 unsigned long flags; 8969 8970 __sched_fork(0, idle); 8971 8972 raw_spin_lock_irqsave(&idle->pi_lock, flags); 8973 raw_spin_rq_lock(rq); 8974 8975 idle->__state = TASK_RUNNING; 8976 idle->se.exec_start = sched_clock(); 8977 /* 8978 * PF_KTHREAD should already be set at this point; regardless, make it 8979 * look like a proper per-CPU kthread. 8980 */ 8981 idle->flags |= PF_IDLE | PF_KTHREAD | PF_NO_SETAFFINITY; 8982 kthread_set_per_cpu(idle, cpu); 8983 8984 #ifdef CONFIG_SMP 8985 /* 8986 * It's possible that init_idle() gets called multiple times on a task, 8987 * in that case do_set_cpus_allowed() will not do the right thing. 8988 * 8989 * And since this is boot we can forgo the serialization. 8990 */ 8991 set_cpus_allowed_common(idle, cpumask_of(cpu), 0); 8992 #endif 8993 /* 8994 * We're having a chicken and egg problem, even though we are 8995 * holding rq->lock, the CPU isn't yet set to this CPU so the 8996 * lockdep check in task_group() will fail. 8997 * 8998 * Similar case to sched_fork(). / Alternatively we could 8999 * use task_rq_lock() here and obtain the other rq->lock. 9000 * 9001 * Silence PROVE_RCU 9002 */ 9003 rcu_read_lock(); 9004 __set_task_cpu(idle, cpu); 9005 rcu_read_unlock(); 9006 9007 rq->idle = idle; 9008 rcu_assign_pointer(rq->curr, idle); 9009 idle->on_rq = TASK_ON_RQ_QUEUED; 9010 #ifdef CONFIG_SMP 9011 idle->on_cpu = 1; 9012 #endif 9013 raw_spin_rq_unlock(rq); 9014 raw_spin_unlock_irqrestore(&idle->pi_lock, flags); 9015 9016 /* Set the preempt count _outside_ the spinlocks! */ 9017 init_idle_preempt_count(idle, cpu); 9018 9019 /* 9020 * The idle tasks have their own, simple scheduling class: 9021 */ 9022 idle->sched_class = &idle_sched_class; 9023 ftrace_graph_init_idle_task(idle, cpu); 9024 vtime_init_idle(idle, cpu); 9025 #ifdef CONFIG_SMP 9026 sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu); 9027 #endif 9028 } 9029 9030 #ifdef CONFIG_SMP 9031 9032 int cpuset_cpumask_can_shrink(const struct cpumask *cur, 9033 const struct cpumask *trial) 9034 { 9035 int ret = 1; 9036 9037 if (cpumask_empty(cur)) 9038 return ret; 9039 9040 ret = dl_cpuset_cpumask_can_shrink(cur, trial); 9041 9042 return ret; 9043 } 9044 9045 int task_can_attach(struct task_struct *p, 9046 const struct cpumask *cs_effective_cpus) 9047 { 9048 int ret = 0; 9049 9050 /* 9051 * Kthreads which disallow setaffinity shouldn't be moved 9052 * to a new cpuset; we don't want to change their CPU 9053 * affinity and isolating such threads by their set of 9054 * allowed nodes is unnecessary. Thus, cpusets are not 9055 * applicable for such threads. This prevents checking for 9056 * success of set_cpus_allowed_ptr() on all attached tasks 9057 * before cpus_mask may be changed. 9058 */ 9059 if (p->flags & PF_NO_SETAFFINITY) { 9060 ret = -EINVAL; 9061 goto out; 9062 } 9063 9064 if (dl_task(p) && !cpumask_intersects(task_rq(p)->rd->span, 9065 cs_effective_cpus)) { 9066 int cpu = cpumask_any_and(cpu_active_mask, cs_effective_cpus); 9067 9068 if (unlikely(cpu >= nr_cpu_ids)) 9069 return -EINVAL; 9070 ret = dl_cpu_busy(cpu, p); 9071 } 9072 9073 out: 9074 return ret; 9075 } 9076 9077 bool sched_smp_initialized __read_mostly; 9078 9079 #ifdef CONFIG_NUMA_BALANCING 9080 /* Migrate current task p to target_cpu */ 9081 int migrate_task_to(struct task_struct *p, int target_cpu) 9082 { 9083 struct migration_arg arg = { p, target_cpu }; 9084 int curr_cpu = task_cpu(p); 9085 9086 if (curr_cpu == target_cpu) 9087 return 0; 9088 9089 if (!cpumask_test_cpu(target_cpu, p->cpus_ptr)) 9090 return -EINVAL; 9091 9092 /* TODO: This is not properly updating schedstats */ 9093 9094 trace_sched_move_numa(p, curr_cpu, target_cpu); 9095 return stop_one_cpu(curr_cpu, migration_cpu_stop, &arg); 9096 } 9097 9098 /* 9099 * Requeue a task on a given node and accurately track the number of NUMA 9100 * tasks on the runqueues 9101 */ 9102 void sched_setnuma(struct task_struct *p, int nid) 9103 { 9104 bool queued, running; 9105 struct rq_flags rf; 9106 struct rq *rq; 9107 9108 rq = task_rq_lock(p, &rf); 9109 queued = task_on_rq_queued(p); 9110 running = task_current(rq, p); 9111 9112 if (queued) 9113 dequeue_task(rq, p, DEQUEUE_SAVE); 9114 if (running) 9115 put_prev_task(rq, p); 9116 9117 p->numa_preferred_nid = nid; 9118 9119 if (queued) 9120 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); 9121 if (running) 9122 set_next_task(rq, p); 9123 task_rq_unlock(rq, p, &rf); 9124 } 9125 #endif /* CONFIG_NUMA_BALANCING */ 9126 9127 #ifdef CONFIG_HOTPLUG_CPU 9128 /* 9129 * Ensure that the idle task is using init_mm right before its CPU goes 9130 * offline. 9131 */ 9132 void idle_task_exit(void) 9133 { 9134 struct mm_struct *mm = current->active_mm; 9135 9136 BUG_ON(cpu_online(smp_processor_id())); 9137 BUG_ON(current != this_rq()->idle); 9138 9139 if (mm != &init_mm) { 9140 switch_mm(mm, &init_mm, current); 9141 finish_arch_post_lock_switch(); 9142 } 9143 9144 /* finish_cpu(), as ran on the BP, will clean up the active_mm state */ 9145 } 9146 9147 static int __balance_push_cpu_stop(void *arg) 9148 { 9149 struct task_struct *p = arg; 9150 struct rq *rq = this_rq(); 9151 struct rq_flags rf; 9152 int cpu; 9153 9154 raw_spin_lock_irq(&p->pi_lock); 9155 rq_lock(rq, &rf); 9156 9157 update_rq_clock(rq); 9158 9159 if (task_rq(p) == rq && task_on_rq_queued(p)) { 9160 cpu = select_fallback_rq(rq->cpu, p); 9161 rq = __migrate_task(rq, &rf, p, cpu); 9162 } 9163 9164 rq_unlock(rq, &rf); 9165 raw_spin_unlock_irq(&p->pi_lock); 9166 9167 put_task_struct(p); 9168 9169 return 0; 9170 } 9171 9172 static DEFINE_PER_CPU(struct cpu_stop_work, push_work); 9173 9174 /* 9175 * Ensure we only run per-cpu kthreads once the CPU goes !active. 9176 * 9177 * This is enabled below SCHED_AP_ACTIVE; when !cpu_active(), but only 9178 * effective when the hotplug motion is down. 9179 */ 9180 static void balance_push(struct rq *rq) 9181 { 9182 struct task_struct *push_task = rq->curr; 9183 9184 lockdep_assert_rq_held(rq); 9185 9186 /* 9187 * Ensure the thing is persistent until balance_push_set(.on = false); 9188 */ 9189 rq->balance_callback = &balance_push_callback; 9190 9191 /* 9192 * Only active while going offline and when invoked on the outgoing 9193 * CPU. 9194 */ 9195 if (!cpu_dying(rq->cpu) || rq != this_rq()) 9196 return; 9197 9198 /* 9199 * Both the cpu-hotplug and stop task are in this case and are 9200 * required to complete the hotplug process. 9201 */ 9202 if (kthread_is_per_cpu(push_task) || 9203 is_migration_disabled(push_task)) { 9204 9205 /* 9206 * If this is the idle task on the outgoing CPU try to wake 9207 * up the hotplug control thread which might wait for the 9208 * last task to vanish. The rcuwait_active() check is 9209 * accurate here because the waiter is pinned on this CPU 9210 * and can't obviously be running in parallel. 9211 * 9212 * On RT kernels this also has to check whether there are 9213 * pinned and scheduled out tasks on the runqueue. They 9214 * need to leave the migrate disabled section first. 9215 */ 9216 if (!rq->nr_running && !rq_has_pinned_tasks(rq) && 9217 rcuwait_active(&rq->hotplug_wait)) { 9218 raw_spin_rq_unlock(rq); 9219 rcuwait_wake_up(&rq->hotplug_wait); 9220 raw_spin_rq_lock(rq); 9221 } 9222 return; 9223 } 9224 9225 get_task_struct(push_task); 9226 /* 9227 * Temporarily drop rq->lock such that we can wake-up the stop task. 9228 * Both preemption and IRQs are still disabled. 9229 */ 9230 raw_spin_rq_unlock(rq); 9231 stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task, 9232 this_cpu_ptr(&push_work)); 9233 /* 9234 * At this point need_resched() is true and we'll take the loop in 9235 * schedule(). The next pick is obviously going to be the stop task 9236 * which kthread_is_per_cpu() and will push this task away. 9237 */ 9238 raw_spin_rq_lock(rq); 9239 } 9240 9241 static void balance_push_set(int cpu, bool on) 9242 { 9243 struct rq *rq = cpu_rq(cpu); 9244 struct rq_flags rf; 9245 9246 rq_lock_irqsave(rq, &rf); 9247 if (on) { 9248 WARN_ON_ONCE(rq->balance_callback); 9249 rq->balance_callback = &balance_push_callback; 9250 } else if (rq->balance_callback == &balance_push_callback) { 9251 rq->balance_callback = NULL; 9252 } 9253 rq_unlock_irqrestore(rq, &rf); 9254 } 9255 9256 /* 9257 * Invoked from a CPUs hotplug control thread after the CPU has been marked 9258 * inactive. All tasks which are not per CPU kernel threads are either 9259 * pushed off this CPU now via balance_push() or placed on a different CPU 9260 * during wakeup. Wait until the CPU is quiescent. 9261 */ 9262 static void balance_hotplug_wait(void) 9263 { 9264 struct rq *rq = this_rq(); 9265 9266 rcuwait_wait_event(&rq->hotplug_wait, 9267 rq->nr_running == 1 && !rq_has_pinned_tasks(rq), 9268 TASK_UNINTERRUPTIBLE); 9269 } 9270 9271 #else 9272 9273 static inline void balance_push(struct rq *rq) 9274 { 9275 } 9276 9277 static inline void balance_push_set(int cpu, bool on) 9278 { 9279 } 9280 9281 static inline void balance_hotplug_wait(void) 9282 { 9283 } 9284 9285 #endif /* CONFIG_HOTPLUG_CPU */ 9286 9287 void set_rq_online(struct rq *rq) 9288 { 9289 if (!rq->online) { 9290 const struct sched_class *class; 9291 9292 cpumask_set_cpu(rq->cpu, rq->rd->online); 9293 rq->online = 1; 9294 9295 for_each_class(class) { 9296 if (class->rq_online) 9297 class->rq_online(rq); 9298 } 9299 } 9300 } 9301 9302 void set_rq_offline(struct rq *rq) 9303 { 9304 if (rq->online) { 9305 const struct sched_class *class; 9306 9307 for_each_class(class) { 9308 if (class->rq_offline) 9309 class->rq_offline(rq); 9310 } 9311 9312 cpumask_clear_cpu(rq->cpu, rq->rd->online); 9313 rq->online = 0; 9314 } 9315 } 9316 9317 /* 9318 * used to mark begin/end of suspend/resume: 9319 */ 9320 static int num_cpus_frozen; 9321 9322 /* 9323 * Update cpusets according to cpu_active mask. If cpusets are 9324 * disabled, cpuset_update_active_cpus() becomes a simple wrapper 9325 * around partition_sched_domains(). 9326 * 9327 * If we come here as part of a suspend/resume, don't touch cpusets because we 9328 * want to restore it back to its original state upon resume anyway. 9329 */ 9330 static void cpuset_cpu_active(void) 9331 { 9332 if (cpuhp_tasks_frozen) { 9333 /* 9334 * num_cpus_frozen tracks how many CPUs are involved in suspend 9335 * resume sequence. As long as this is not the last online 9336 * operation in the resume sequence, just build a single sched 9337 * domain, ignoring cpusets. 9338 */ 9339 partition_sched_domains(1, NULL, NULL); 9340 if (--num_cpus_frozen) 9341 return; 9342 /* 9343 * This is the last CPU online operation. So fall through and 9344 * restore the original sched domains by considering the 9345 * cpuset configurations. 9346 */ 9347 cpuset_force_rebuild(); 9348 } 9349 cpuset_update_active_cpus(); 9350 } 9351 9352 static int cpuset_cpu_inactive(unsigned int cpu) 9353 { 9354 if (!cpuhp_tasks_frozen) { 9355 int ret = dl_cpu_busy(cpu, NULL); 9356 9357 if (ret) 9358 return ret; 9359 cpuset_update_active_cpus(); 9360 } else { 9361 num_cpus_frozen++; 9362 partition_sched_domains(1, NULL, NULL); 9363 } 9364 return 0; 9365 } 9366 9367 int sched_cpu_activate(unsigned int cpu) 9368 { 9369 struct rq *rq = cpu_rq(cpu); 9370 struct rq_flags rf; 9371 9372 /* 9373 * Clear the balance_push callback and prepare to schedule 9374 * regular tasks. 9375 */ 9376 balance_push_set(cpu, false); 9377 9378 #ifdef CONFIG_SCHED_SMT 9379 /* 9380 * When going up, increment the number of cores with SMT present. 9381 */ 9382 if (cpumask_weight(cpu_smt_mask(cpu)) == 2) 9383 static_branch_inc_cpuslocked(&sched_smt_present); 9384 #endif 9385 set_cpu_active(cpu, true); 9386 9387 if (sched_smp_initialized) { 9388 sched_update_numa(cpu, true); 9389 sched_domains_numa_masks_set(cpu); 9390 cpuset_cpu_active(); 9391 } 9392 9393 /* 9394 * Put the rq online, if not already. This happens: 9395 * 9396 * 1) In the early boot process, because we build the real domains 9397 * after all CPUs have been brought up. 9398 * 9399 * 2) At runtime, if cpuset_cpu_active() fails to rebuild the 9400 * domains. 9401 */ 9402 rq_lock_irqsave(rq, &rf); 9403 if (rq->rd) { 9404 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 9405 set_rq_online(rq); 9406 } 9407 rq_unlock_irqrestore(rq, &rf); 9408 9409 return 0; 9410 } 9411 9412 int sched_cpu_deactivate(unsigned int cpu) 9413 { 9414 struct rq *rq = cpu_rq(cpu); 9415 struct rq_flags rf; 9416 int ret; 9417 9418 /* 9419 * Remove CPU from nohz.idle_cpus_mask to prevent participating in 9420 * load balancing when not active 9421 */ 9422 nohz_balance_exit_idle(rq); 9423 9424 set_cpu_active(cpu, false); 9425 9426 /* 9427 * From this point forward, this CPU will refuse to run any task that 9428 * is not: migrate_disable() or KTHREAD_IS_PER_CPU, and will actively 9429 * push those tasks away until this gets cleared, see 9430 * sched_cpu_dying(). 9431 */ 9432 balance_push_set(cpu, true); 9433 9434 /* 9435 * We've cleared cpu_active_mask / set balance_push, wait for all 9436 * preempt-disabled and RCU users of this state to go away such that 9437 * all new such users will observe it. 9438 * 9439 * Specifically, we rely on ttwu to no longer target this CPU, see 9440 * ttwu_queue_cond() and is_cpu_allowed(). 9441 * 9442 * Do sync before park smpboot threads to take care the rcu boost case. 9443 */ 9444 synchronize_rcu(); 9445 9446 rq_lock_irqsave(rq, &rf); 9447 if (rq->rd) { 9448 update_rq_clock(rq); 9449 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 9450 set_rq_offline(rq); 9451 } 9452 rq_unlock_irqrestore(rq, &rf); 9453 9454 #ifdef CONFIG_SCHED_SMT 9455 /* 9456 * When going down, decrement the number of cores with SMT present. 9457 */ 9458 if (cpumask_weight(cpu_smt_mask(cpu)) == 2) 9459 static_branch_dec_cpuslocked(&sched_smt_present); 9460 9461 sched_core_cpu_deactivate(cpu); 9462 #endif 9463 9464 if (!sched_smp_initialized) 9465 return 0; 9466 9467 sched_update_numa(cpu, false); 9468 ret = cpuset_cpu_inactive(cpu); 9469 if (ret) { 9470 balance_push_set(cpu, false); 9471 set_cpu_active(cpu, true); 9472 sched_update_numa(cpu, true); 9473 return ret; 9474 } 9475 sched_domains_numa_masks_clear(cpu); 9476 return 0; 9477 } 9478 9479 static void sched_rq_cpu_starting(unsigned int cpu) 9480 { 9481 struct rq *rq = cpu_rq(cpu); 9482 9483 rq->calc_load_update = calc_load_update; 9484 update_max_interval(); 9485 } 9486 9487 int sched_cpu_starting(unsigned int cpu) 9488 { 9489 sched_core_cpu_starting(cpu); 9490 sched_rq_cpu_starting(cpu); 9491 sched_tick_start(cpu); 9492 return 0; 9493 } 9494 9495 #ifdef CONFIG_HOTPLUG_CPU 9496 9497 /* 9498 * Invoked immediately before the stopper thread is invoked to bring the 9499 * CPU down completely. At this point all per CPU kthreads except the 9500 * hotplug thread (current) and the stopper thread (inactive) have been 9501 * either parked or have been unbound from the outgoing CPU. Ensure that 9502 * any of those which might be on the way out are gone. 9503 * 9504 * If after this point a bound task is being woken on this CPU then the 9505 * responsible hotplug callback has failed to do it's job. 9506 * sched_cpu_dying() will catch it with the appropriate fireworks. 9507 */ 9508 int sched_cpu_wait_empty(unsigned int cpu) 9509 { 9510 balance_hotplug_wait(); 9511 return 0; 9512 } 9513 9514 /* 9515 * Since this CPU is going 'away' for a while, fold any nr_active delta we 9516 * might have. Called from the CPU stopper task after ensuring that the 9517 * stopper is the last running task on the CPU, so nr_active count is 9518 * stable. We need to take the teardown thread which is calling this into 9519 * account, so we hand in adjust = 1 to the load calculation. 9520 * 9521 * Also see the comment "Global load-average calculations". 9522 */ 9523 static void calc_load_migrate(struct rq *rq) 9524 { 9525 long delta = calc_load_fold_active(rq, 1); 9526 9527 if (delta) 9528 atomic_long_add(delta, &calc_load_tasks); 9529 } 9530 9531 static void dump_rq_tasks(struct rq *rq, const char *loglvl) 9532 { 9533 struct task_struct *g, *p; 9534 int cpu = cpu_of(rq); 9535 9536 lockdep_assert_rq_held(rq); 9537 9538 printk("%sCPU%d enqueued tasks (%u total):\n", loglvl, cpu, rq->nr_running); 9539 for_each_process_thread(g, p) { 9540 if (task_cpu(p) != cpu) 9541 continue; 9542 9543 if (!task_on_rq_queued(p)) 9544 continue; 9545 9546 printk("%s\tpid: %d, name: %s\n", loglvl, p->pid, p->comm); 9547 } 9548 } 9549 9550 int sched_cpu_dying(unsigned int cpu) 9551 { 9552 struct rq *rq = cpu_rq(cpu); 9553 struct rq_flags rf; 9554 9555 /* Handle pending wakeups and then migrate everything off */ 9556 sched_tick_stop(cpu); 9557 9558 rq_lock_irqsave(rq, &rf); 9559 if (rq->nr_running != 1 || rq_has_pinned_tasks(rq)) { 9560 WARN(true, "Dying CPU not properly vacated!"); 9561 dump_rq_tasks(rq, KERN_WARNING); 9562 } 9563 rq_unlock_irqrestore(rq, &rf); 9564 9565 calc_load_migrate(rq); 9566 update_max_interval(); 9567 hrtick_clear(rq); 9568 sched_core_cpu_dying(cpu); 9569 return 0; 9570 } 9571 #endif 9572 9573 void __init sched_init_smp(void) 9574 { 9575 sched_init_numa(NUMA_NO_NODE); 9576 9577 /* 9578 * There's no userspace yet to cause hotplug operations; hence all the 9579 * CPU masks are stable and all blatant races in the below code cannot 9580 * happen. 9581 */ 9582 mutex_lock(&sched_domains_mutex); 9583 sched_init_domains(cpu_active_mask); 9584 mutex_unlock(&sched_domains_mutex); 9585 9586 /* Move init over to a non-isolated CPU */ 9587 if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_TYPE_DOMAIN)) < 0) 9588 BUG(); 9589 current->flags &= ~PF_NO_SETAFFINITY; 9590 sched_init_granularity(); 9591 9592 init_sched_rt_class(); 9593 init_sched_dl_class(); 9594 9595 sched_smp_initialized = true; 9596 } 9597 9598 static int __init migration_init(void) 9599 { 9600 sched_cpu_starting(smp_processor_id()); 9601 return 0; 9602 } 9603 early_initcall(migration_init); 9604 9605 #else 9606 void __init sched_init_smp(void) 9607 { 9608 sched_init_granularity(); 9609 } 9610 #endif /* CONFIG_SMP */ 9611 9612 int in_sched_functions(unsigned long addr) 9613 { 9614 return in_lock_functions(addr) || 9615 (addr >= (unsigned long)__sched_text_start 9616 && addr < (unsigned long)__sched_text_end); 9617 } 9618 9619 #ifdef CONFIG_CGROUP_SCHED 9620 /* 9621 * Default task group. 9622 * Every task in system belongs to this group at bootup. 9623 */ 9624 struct task_group root_task_group; 9625 LIST_HEAD(task_groups); 9626 9627 /* Cacheline aligned slab cache for task_group */ 9628 static struct kmem_cache *task_group_cache __read_mostly; 9629 #endif 9630 9631 void __init sched_init(void) 9632 { 9633 unsigned long ptr = 0; 9634 int i; 9635 9636 /* Make sure the linker didn't screw up */ 9637 BUG_ON(&idle_sched_class != &fair_sched_class + 1 || 9638 &fair_sched_class != &rt_sched_class + 1 || 9639 &rt_sched_class != &dl_sched_class + 1); 9640 #ifdef CONFIG_SMP 9641 BUG_ON(&dl_sched_class != &stop_sched_class + 1); 9642 #endif 9643 9644 wait_bit_init(); 9645 9646 #ifdef CONFIG_FAIR_GROUP_SCHED 9647 ptr += 2 * nr_cpu_ids * sizeof(void **); 9648 #endif 9649 #ifdef CONFIG_RT_GROUP_SCHED 9650 ptr += 2 * nr_cpu_ids * sizeof(void **); 9651 #endif 9652 if (ptr) { 9653 ptr = (unsigned long)kzalloc(ptr, GFP_NOWAIT); 9654 9655 #ifdef CONFIG_FAIR_GROUP_SCHED 9656 root_task_group.se = (struct sched_entity **)ptr; 9657 ptr += nr_cpu_ids * sizeof(void **); 9658 9659 root_task_group.cfs_rq = (struct cfs_rq **)ptr; 9660 ptr += nr_cpu_ids * sizeof(void **); 9661 9662 root_task_group.shares = ROOT_TASK_GROUP_LOAD; 9663 init_cfs_bandwidth(&root_task_group.cfs_bandwidth); 9664 #endif /* CONFIG_FAIR_GROUP_SCHED */ 9665 #ifdef CONFIG_RT_GROUP_SCHED 9666 root_task_group.rt_se = (struct sched_rt_entity **)ptr; 9667 ptr += nr_cpu_ids * sizeof(void **); 9668 9669 root_task_group.rt_rq = (struct rt_rq **)ptr; 9670 ptr += nr_cpu_ids * sizeof(void **); 9671 9672 #endif /* CONFIG_RT_GROUP_SCHED */ 9673 } 9674 9675 init_rt_bandwidth(&def_rt_bandwidth, global_rt_period(), global_rt_runtime()); 9676 9677 #ifdef CONFIG_SMP 9678 init_defrootdomain(); 9679 #endif 9680 9681 #ifdef CONFIG_RT_GROUP_SCHED 9682 init_rt_bandwidth(&root_task_group.rt_bandwidth, 9683 global_rt_period(), global_rt_runtime()); 9684 #endif /* CONFIG_RT_GROUP_SCHED */ 9685 9686 #ifdef CONFIG_CGROUP_SCHED 9687 task_group_cache = KMEM_CACHE(task_group, 0); 9688 9689 list_add(&root_task_group.list, &task_groups); 9690 INIT_LIST_HEAD(&root_task_group.children); 9691 INIT_LIST_HEAD(&root_task_group.siblings); 9692 autogroup_init(&init_task); 9693 #endif /* CONFIG_CGROUP_SCHED */ 9694 9695 for_each_possible_cpu(i) { 9696 struct rq *rq; 9697 9698 rq = cpu_rq(i); 9699 raw_spin_lock_init(&rq->__lock); 9700 rq->nr_running = 0; 9701 rq->calc_load_active = 0; 9702 rq->calc_load_update = jiffies + LOAD_FREQ; 9703 init_cfs_rq(&rq->cfs); 9704 init_rt_rq(&rq->rt); 9705 init_dl_rq(&rq->dl); 9706 #ifdef CONFIG_FAIR_GROUP_SCHED 9707 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list); 9708 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list; 9709 /* 9710 * How much CPU bandwidth does root_task_group get? 9711 * 9712 * In case of task-groups formed thr' the cgroup filesystem, it 9713 * gets 100% of the CPU resources in the system. This overall 9714 * system CPU resource is divided among the tasks of 9715 * root_task_group and its child task-groups in a fair manner, 9716 * based on each entity's (task or task-group's) weight 9717 * (se->load.weight). 9718 * 9719 * In other words, if root_task_group has 10 tasks of weight 9720 * 1024) and two child groups A0 and A1 (of weight 1024 each), 9721 * then A0's share of the CPU resource is: 9722 * 9723 * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33% 9724 * 9725 * We achieve this by letting root_task_group's tasks sit 9726 * directly in rq->cfs (i.e root_task_group->se[] = NULL). 9727 */ 9728 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL); 9729 #endif /* CONFIG_FAIR_GROUP_SCHED */ 9730 9731 rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime; 9732 #ifdef CONFIG_RT_GROUP_SCHED 9733 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL); 9734 #endif 9735 #ifdef CONFIG_SMP 9736 rq->sd = NULL; 9737 rq->rd = NULL; 9738 rq->cpu_capacity = rq->cpu_capacity_orig = SCHED_CAPACITY_SCALE; 9739 rq->balance_callback = &balance_push_callback; 9740 rq->active_balance = 0; 9741 rq->next_balance = jiffies; 9742 rq->push_cpu = 0; 9743 rq->cpu = i; 9744 rq->online = 0; 9745 rq->idle_stamp = 0; 9746 rq->avg_idle = 2*sysctl_sched_migration_cost; 9747 rq->wake_stamp = jiffies; 9748 rq->wake_avg_idle = rq->avg_idle; 9749 rq->max_idle_balance_cost = sysctl_sched_migration_cost; 9750 9751 INIT_LIST_HEAD(&rq->cfs_tasks); 9752 9753 rq_attach_root(rq, &def_root_domain); 9754 #ifdef CONFIG_NO_HZ_COMMON 9755 rq->last_blocked_load_update_tick = jiffies; 9756 atomic_set(&rq->nohz_flags, 0); 9757 9758 INIT_CSD(&rq->nohz_csd, nohz_csd_func, rq); 9759 #endif 9760 #ifdef CONFIG_HOTPLUG_CPU 9761 rcuwait_init(&rq->hotplug_wait); 9762 #endif 9763 #endif /* CONFIG_SMP */ 9764 hrtick_rq_init(rq); 9765 atomic_set(&rq->nr_iowait, 0); 9766 9767 #ifdef CONFIG_SCHED_CORE 9768 rq->core = rq; 9769 rq->core_pick = NULL; 9770 rq->core_enabled = 0; 9771 rq->core_tree = RB_ROOT; 9772 rq->core_forceidle_count = 0; 9773 rq->core_forceidle_occupation = 0; 9774 rq->core_forceidle_start = 0; 9775 9776 rq->core_cookie = 0UL; 9777 #endif 9778 } 9779 9780 set_load_weight(&init_task, false); 9781 9782 /* 9783 * The boot idle thread does lazy MMU switching as well: 9784 */ 9785 mmgrab(&init_mm); 9786 enter_lazy_tlb(&init_mm, current); 9787 9788 /* 9789 * The idle task doesn't need the kthread struct to function, but it 9790 * is dressed up as a per-CPU kthread and thus needs to play the part 9791 * if we want to avoid special-casing it in code that deals with per-CPU 9792 * kthreads. 9793 */ 9794 WARN_ON(!set_kthread_struct(current)); 9795 9796 /* 9797 * Make us the idle thread. Technically, schedule() should not be 9798 * called from this thread, however somewhere below it might be, 9799 * but because we are the idle thread, we just pick up running again 9800 * when this runqueue becomes "idle". 9801 */ 9802 init_idle(current, smp_processor_id()); 9803 9804 calc_load_update = jiffies + LOAD_FREQ; 9805 9806 #ifdef CONFIG_SMP 9807 idle_thread_set_boot_cpu(); 9808 balance_push_set(smp_processor_id(), false); 9809 #endif 9810 init_sched_fair_class(); 9811 9812 psi_init(); 9813 9814 init_uclamp(); 9815 9816 preempt_dynamic_init(); 9817 9818 scheduler_running = 1; 9819 } 9820 9821 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP 9822 9823 void __might_sleep(const char *file, int line) 9824 { 9825 unsigned int state = get_current_state(); 9826 /* 9827 * Blocking primitives will set (and therefore destroy) current->state, 9828 * since we will exit with TASK_RUNNING make sure we enter with it, 9829 * otherwise we will destroy state. 9830 */ 9831 WARN_ONCE(state != TASK_RUNNING && current->task_state_change, 9832 "do not call blocking ops when !TASK_RUNNING; " 9833 "state=%x set at [<%p>] %pS\n", state, 9834 (void *)current->task_state_change, 9835 (void *)current->task_state_change); 9836 9837 __might_resched(file, line, 0); 9838 } 9839 EXPORT_SYMBOL(__might_sleep); 9840 9841 static void print_preempt_disable_ip(int preempt_offset, unsigned long ip) 9842 { 9843 if (!IS_ENABLED(CONFIG_DEBUG_PREEMPT)) 9844 return; 9845 9846 if (preempt_count() == preempt_offset) 9847 return; 9848 9849 pr_err("Preemption disabled at:"); 9850 print_ip_sym(KERN_ERR, ip); 9851 } 9852 9853 static inline bool resched_offsets_ok(unsigned int offsets) 9854 { 9855 unsigned int nested = preempt_count(); 9856 9857 nested += rcu_preempt_depth() << MIGHT_RESCHED_RCU_SHIFT; 9858 9859 return nested == offsets; 9860 } 9861 9862 void __might_resched(const char *file, int line, unsigned int offsets) 9863 { 9864 /* Ratelimiting timestamp: */ 9865 static unsigned long prev_jiffy; 9866 9867 unsigned long preempt_disable_ip; 9868 9869 /* WARN_ON_ONCE() by default, no rate limit required: */ 9870 rcu_sleep_check(); 9871 9872 if ((resched_offsets_ok(offsets) && !irqs_disabled() && 9873 !is_idle_task(current) && !current->non_block_count) || 9874 system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING || 9875 oops_in_progress) 9876 return; 9877 9878 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) 9879 return; 9880 prev_jiffy = jiffies; 9881 9882 /* Save this before calling printk(), since that will clobber it: */ 9883 preempt_disable_ip = get_preempt_disable_ip(current); 9884 9885 pr_err("BUG: sleeping function called from invalid context at %s:%d\n", 9886 file, line); 9887 pr_err("in_atomic(): %d, irqs_disabled(): %d, non_block: %d, pid: %d, name: %s\n", 9888 in_atomic(), irqs_disabled(), current->non_block_count, 9889 current->pid, current->comm); 9890 pr_err("preempt_count: %x, expected: %x\n", preempt_count(), 9891 offsets & MIGHT_RESCHED_PREEMPT_MASK); 9892 9893 if (IS_ENABLED(CONFIG_PREEMPT_RCU)) { 9894 pr_err("RCU nest depth: %d, expected: %u\n", 9895 rcu_preempt_depth(), offsets >> MIGHT_RESCHED_RCU_SHIFT); 9896 } 9897 9898 if (task_stack_end_corrupted(current)) 9899 pr_emerg("Thread overran stack, or stack corrupted\n"); 9900 9901 debug_show_held_locks(current); 9902 if (irqs_disabled()) 9903 print_irqtrace_events(current); 9904 9905 print_preempt_disable_ip(offsets & MIGHT_RESCHED_PREEMPT_MASK, 9906 preempt_disable_ip); 9907 9908 dump_stack(); 9909 add_taint(TAINT_WARN, LOCKDEP_STILL_OK); 9910 } 9911 EXPORT_SYMBOL(__might_resched); 9912 9913 void __cant_sleep(const char *file, int line, int preempt_offset) 9914 { 9915 static unsigned long prev_jiffy; 9916 9917 if (irqs_disabled()) 9918 return; 9919 9920 if (!IS_ENABLED(CONFIG_PREEMPT_COUNT)) 9921 return; 9922 9923 if (preempt_count() > preempt_offset) 9924 return; 9925 9926 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) 9927 return; 9928 prev_jiffy = jiffies; 9929 9930 printk(KERN_ERR "BUG: assuming atomic context at %s:%d\n", file, line); 9931 printk(KERN_ERR "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n", 9932 in_atomic(), irqs_disabled(), 9933 current->pid, current->comm); 9934 9935 debug_show_held_locks(current); 9936 dump_stack(); 9937 add_taint(TAINT_WARN, LOCKDEP_STILL_OK); 9938 } 9939 EXPORT_SYMBOL_GPL(__cant_sleep); 9940 9941 #ifdef CONFIG_SMP 9942 void __cant_migrate(const char *file, int line) 9943 { 9944 static unsigned long prev_jiffy; 9945 9946 if (irqs_disabled()) 9947 return; 9948 9949 if (is_migration_disabled(current)) 9950 return; 9951 9952 if (!IS_ENABLED(CONFIG_PREEMPT_COUNT)) 9953 return; 9954 9955 if (preempt_count() > 0) 9956 return; 9957 9958 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) 9959 return; 9960 prev_jiffy = jiffies; 9961 9962 pr_err("BUG: assuming non migratable context at %s:%d\n", file, line); 9963 pr_err("in_atomic(): %d, irqs_disabled(): %d, migration_disabled() %u pid: %d, name: %s\n", 9964 in_atomic(), irqs_disabled(), is_migration_disabled(current), 9965 current->pid, current->comm); 9966 9967 debug_show_held_locks(current); 9968 dump_stack(); 9969 add_taint(TAINT_WARN, LOCKDEP_STILL_OK); 9970 } 9971 EXPORT_SYMBOL_GPL(__cant_migrate); 9972 #endif 9973 #endif 9974 9975 #ifdef CONFIG_MAGIC_SYSRQ 9976 void normalize_rt_tasks(void) 9977 { 9978 struct task_struct *g, *p; 9979 struct sched_attr attr = { 9980 .sched_policy = SCHED_NORMAL, 9981 }; 9982 9983 read_lock(&tasklist_lock); 9984 for_each_process_thread(g, p) { 9985 /* 9986 * Only normalize user tasks: 9987 */ 9988 if (p->flags & PF_KTHREAD) 9989 continue; 9990 9991 p->se.exec_start = 0; 9992 schedstat_set(p->stats.wait_start, 0); 9993 schedstat_set(p->stats.sleep_start, 0); 9994 schedstat_set(p->stats.block_start, 0); 9995 9996 if (!dl_task(p) && !rt_task(p)) { 9997 /* 9998 * Renice negative nice level userspace 9999 * tasks back to 0: 10000 */ 10001 if (task_nice(p) < 0) 10002 set_user_nice(p, 0); 10003 continue; 10004 } 10005 10006 __sched_setscheduler(p, &attr, false, false); 10007 } 10008 read_unlock(&tasklist_lock); 10009 } 10010 10011 #endif /* CONFIG_MAGIC_SYSRQ */ 10012 10013 #if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) 10014 /* 10015 * These functions are only useful for the IA64 MCA handling, or kdb. 10016 * 10017 * They can only be called when the whole system has been 10018 * stopped - every CPU needs to be quiescent, and no scheduling 10019 * activity can take place. Using them for anything else would 10020 * be a serious bug, and as a result, they aren't even visible 10021 * under any other configuration. 10022 */ 10023 10024 /** 10025 * curr_task - return the current task for a given CPU. 10026 * @cpu: the processor in question. 10027 * 10028 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! 10029 * 10030 * Return: The current task for @cpu. 10031 */ 10032 struct task_struct *curr_task(int cpu) 10033 { 10034 return cpu_curr(cpu); 10035 } 10036 10037 #endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */ 10038 10039 #ifdef CONFIG_IA64 10040 /** 10041 * ia64_set_curr_task - set the current task for a given CPU. 10042 * @cpu: the processor in question. 10043 * @p: the task pointer to set. 10044 * 10045 * Description: This function must only be used when non-maskable interrupts 10046 * are serviced on a separate stack. It allows the architecture to switch the 10047 * notion of the current task on a CPU in a non-blocking manner. This function 10048 * must be called with all CPU's synchronized, and interrupts disabled, the 10049 * and caller must save the original value of the current task (see 10050 * curr_task() above) and restore that value before reenabling interrupts and 10051 * re-starting the system. 10052 * 10053 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! 10054 */ 10055 void ia64_set_curr_task(int cpu, struct task_struct *p) 10056 { 10057 cpu_curr(cpu) = p; 10058 } 10059 10060 #endif 10061 10062 #ifdef CONFIG_CGROUP_SCHED 10063 /* task_group_lock serializes the addition/removal of task groups */ 10064 static DEFINE_SPINLOCK(task_group_lock); 10065 10066 static inline void alloc_uclamp_sched_group(struct task_group *tg, 10067 struct task_group *parent) 10068 { 10069 #ifdef CONFIG_UCLAMP_TASK_GROUP 10070 enum uclamp_id clamp_id; 10071 10072 for_each_clamp_id(clamp_id) { 10073 uclamp_se_set(&tg->uclamp_req[clamp_id], 10074 uclamp_none(clamp_id), false); 10075 tg->uclamp[clamp_id] = parent->uclamp[clamp_id]; 10076 } 10077 #endif 10078 } 10079 10080 static void sched_free_group(struct task_group *tg) 10081 { 10082 free_fair_sched_group(tg); 10083 free_rt_sched_group(tg); 10084 autogroup_free(tg); 10085 kmem_cache_free(task_group_cache, tg); 10086 } 10087 10088 static void sched_free_group_rcu(struct rcu_head *rcu) 10089 { 10090 sched_free_group(container_of(rcu, struct task_group, rcu)); 10091 } 10092 10093 static void sched_unregister_group(struct task_group *tg) 10094 { 10095 unregister_fair_sched_group(tg); 10096 unregister_rt_sched_group(tg); 10097 /* 10098 * We have to wait for yet another RCU grace period to expire, as 10099 * print_cfs_stats() might run concurrently. 10100 */ 10101 call_rcu(&tg->rcu, sched_free_group_rcu); 10102 } 10103 10104 /* allocate runqueue etc for a new task group */ 10105 struct task_group *sched_create_group(struct task_group *parent) 10106 { 10107 struct task_group *tg; 10108 10109 tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO); 10110 if (!tg) 10111 return ERR_PTR(-ENOMEM); 10112 10113 if (!alloc_fair_sched_group(tg, parent)) 10114 goto err; 10115 10116 if (!alloc_rt_sched_group(tg, parent)) 10117 goto err; 10118 10119 alloc_uclamp_sched_group(tg, parent); 10120 10121 return tg; 10122 10123 err: 10124 sched_free_group(tg); 10125 return ERR_PTR(-ENOMEM); 10126 } 10127 10128 void sched_online_group(struct task_group *tg, struct task_group *parent) 10129 { 10130 unsigned long flags; 10131 10132 spin_lock_irqsave(&task_group_lock, flags); 10133 list_add_rcu(&tg->list, &task_groups); 10134 10135 /* Root should already exist: */ 10136 WARN_ON(!parent); 10137 10138 tg->parent = parent; 10139 INIT_LIST_HEAD(&tg->children); 10140 list_add_rcu(&tg->siblings, &parent->children); 10141 spin_unlock_irqrestore(&task_group_lock, flags); 10142 10143 online_fair_sched_group(tg); 10144 } 10145 10146 /* rcu callback to free various structures associated with a task group */ 10147 static void sched_unregister_group_rcu(struct rcu_head *rhp) 10148 { 10149 /* Now it should be safe to free those cfs_rqs: */ 10150 sched_unregister_group(container_of(rhp, struct task_group, rcu)); 10151 } 10152 10153 void sched_destroy_group(struct task_group *tg) 10154 { 10155 /* Wait for possible concurrent references to cfs_rqs complete: */ 10156 call_rcu(&tg->rcu, sched_unregister_group_rcu); 10157 } 10158 10159 void sched_release_group(struct task_group *tg) 10160 { 10161 unsigned long flags; 10162 10163 /* 10164 * Unlink first, to avoid walk_tg_tree_from() from finding us (via 10165 * sched_cfs_period_timer()). 10166 * 10167 * For this to be effective, we have to wait for all pending users of 10168 * this task group to leave their RCU critical section to ensure no new 10169 * user will see our dying task group any more. Specifically ensure 10170 * that tg_unthrottle_up() won't add decayed cfs_rq's to it. 10171 * 10172 * We therefore defer calling unregister_fair_sched_group() to 10173 * sched_unregister_group() which is guarantied to get called only after the 10174 * current RCU grace period has expired. 10175 */ 10176 spin_lock_irqsave(&task_group_lock, flags); 10177 list_del_rcu(&tg->list); 10178 list_del_rcu(&tg->siblings); 10179 spin_unlock_irqrestore(&task_group_lock, flags); 10180 } 10181 10182 static void sched_change_group(struct task_struct *tsk) 10183 { 10184 struct task_group *tg; 10185 10186 /* 10187 * All callers are synchronized by task_rq_lock(); we do not use RCU 10188 * which is pointless here. Thus, we pass "true" to task_css_check() 10189 * to prevent lockdep warnings. 10190 */ 10191 tg = container_of(task_css_check(tsk, cpu_cgrp_id, true), 10192 struct task_group, css); 10193 tg = autogroup_task_group(tsk, tg); 10194 tsk->sched_task_group = tg; 10195 10196 #ifdef CONFIG_FAIR_GROUP_SCHED 10197 if (tsk->sched_class->task_change_group) 10198 tsk->sched_class->task_change_group(tsk); 10199 else 10200 #endif 10201 set_task_rq(tsk, task_cpu(tsk)); 10202 } 10203 10204 /* 10205 * Change task's runqueue when it moves between groups. 10206 * 10207 * The caller of this function should have put the task in its new group by 10208 * now. This function just updates tsk->se.cfs_rq and tsk->se.parent to reflect 10209 * its new group. 10210 */ 10211 void sched_move_task(struct task_struct *tsk) 10212 { 10213 int queued, running, queue_flags = 10214 DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; 10215 struct rq_flags rf; 10216 struct rq *rq; 10217 10218 rq = task_rq_lock(tsk, &rf); 10219 update_rq_clock(rq); 10220 10221 running = task_current(rq, tsk); 10222 queued = task_on_rq_queued(tsk); 10223 10224 if (queued) 10225 dequeue_task(rq, tsk, queue_flags); 10226 if (running) 10227 put_prev_task(rq, tsk); 10228 10229 sched_change_group(tsk); 10230 10231 if (queued) 10232 enqueue_task(rq, tsk, queue_flags); 10233 if (running) { 10234 set_next_task(rq, tsk); 10235 /* 10236 * After changing group, the running task may have joined a 10237 * throttled one but it's still the running task. Trigger a 10238 * resched to make sure that task can still run. 10239 */ 10240 resched_curr(rq); 10241 } 10242 10243 task_rq_unlock(rq, tsk, &rf); 10244 } 10245 10246 static inline struct task_group *css_tg(struct cgroup_subsys_state *css) 10247 { 10248 return css ? container_of(css, struct task_group, css) : NULL; 10249 } 10250 10251 static struct cgroup_subsys_state * 10252 cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) 10253 { 10254 struct task_group *parent = css_tg(parent_css); 10255 struct task_group *tg; 10256 10257 if (!parent) { 10258 /* This is early initialization for the top cgroup */ 10259 return &root_task_group.css; 10260 } 10261 10262 tg = sched_create_group(parent); 10263 if (IS_ERR(tg)) 10264 return ERR_PTR(-ENOMEM); 10265 10266 return &tg->css; 10267 } 10268 10269 /* Expose task group only after completing cgroup initialization */ 10270 static int cpu_cgroup_css_online(struct cgroup_subsys_state *css) 10271 { 10272 struct task_group *tg = css_tg(css); 10273 struct task_group *parent = css_tg(css->parent); 10274 10275 if (parent) 10276 sched_online_group(tg, parent); 10277 10278 #ifdef CONFIG_UCLAMP_TASK_GROUP 10279 /* Propagate the effective uclamp value for the new group */ 10280 mutex_lock(&uclamp_mutex); 10281 rcu_read_lock(); 10282 cpu_util_update_eff(css); 10283 rcu_read_unlock(); 10284 mutex_unlock(&uclamp_mutex); 10285 #endif 10286 10287 return 0; 10288 } 10289 10290 static void cpu_cgroup_css_released(struct cgroup_subsys_state *css) 10291 { 10292 struct task_group *tg = css_tg(css); 10293 10294 sched_release_group(tg); 10295 } 10296 10297 static void cpu_cgroup_css_free(struct cgroup_subsys_state *css) 10298 { 10299 struct task_group *tg = css_tg(css); 10300 10301 /* 10302 * Relies on the RCU grace period between css_released() and this. 10303 */ 10304 sched_unregister_group(tg); 10305 } 10306 10307 #ifdef CONFIG_RT_GROUP_SCHED 10308 static int cpu_cgroup_can_attach(struct cgroup_taskset *tset) 10309 { 10310 struct task_struct *task; 10311 struct cgroup_subsys_state *css; 10312 10313 cgroup_taskset_for_each(task, css, tset) { 10314 if (!sched_rt_can_attach(css_tg(css), task)) 10315 return -EINVAL; 10316 } 10317 return 0; 10318 } 10319 #endif 10320 10321 static void cpu_cgroup_attach(struct cgroup_taskset *tset) 10322 { 10323 struct task_struct *task; 10324 struct cgroup_subsys_state *css; 10325 10326 cgroup_taskset_for_each(task, css, tset) 10327 sched_move_task(task); 10328 } 10329 10330 #ifdef CONFIG_UCLAMP_TASK_GROUP 10331 static void cpu_util_update_eff(struct cgroup_subsys_state *css) 10332 { 10333 struct cgroup_subsys_state *top_css = css; 10334 struct uclamp_se *uc_parent = NULL; 10335 struct uclamp_se *uc_se = NULL; 10336 unsigned int eff[UCLAMP_CNT]; 10337 enum uclamp_id clamp_id; 10338 unsigned int clamps; 10339 10340 lockdep_assert_held(&uclamp_mutex); 10341 SCHED_WARN_ON(!rcu_read_lock_held()); 10342 10343 css_for_each_descendant_pre(css, top_css) { 10344 uc_parent = css_tg(css)->parent 10345 ? css_tg(css)->parent->uclamp : NULL; 10346 10347 for_each_clamp_id(clamp_id) { 10348 /* Assume effective clamps matches requested clamps */ 10349 eff[clamp_id] = css_tg(css)->uclamp_req[clamp_id].value; 10350 /* Cap effective clamps with parent's effective clamps */ 10351 if (uc_parent && 10352 eff[clamp_id] > uc_parent[clamp_id].value) { 10353 eff[clamp_id] = uc_parent[clamp_id].value; 10354 } 10355 } 10356 /* Ensure protection is always capped by limit */ 10357 eff[UCLAMP_MIN] = min(eff[UCLAMP_MIN], eff[UCLAMP_MAX]); 10358 10359 /* Propagate most restrictive effective clamps */ 10360 clamps = 0x0; 10361 uc_se = css_tg(css)->uclamp; 10362 for_each_clamp_id(clamp_id) { 10363 if (eff[clamp_id] == uc_se[clamp_id].value) 10364 continue; 10365 uc_se[clamp_id].value = eff[clamp_id]; 10366 uc_se[clamp_id].bucket_id = uclamp_bucket_id(eff[clamp_id]); 10367 clamps |= (0x1 << clamp_id); 10368 } 10369 if (!clamps) { 10370 css = css_rightmost_descendant(css); 10371 continue; 10372 } 10373 10374 /* Immediately update descendants RUNNABLE tasks */ 10375 uclamp_update_active_tasks(css); 10376 } 10377 } 10378 10379 /* 10380 * Integer 10^N with a given N exponent by casting to integer the literal "1eN" 10381 * C expression. Since there is no way to convert a macro argument (N) into a 10382 * character constant, use two levels of macros. 10383 */ 10384 #define _POW10(exp) ((unsigned int)1e##exp) 10385 #define POW10(exp) _POW10(exp) 10386 10387 struct uclamp_request { 10388 #define UCLAMP_PERCENT_SHIFT 2 10389 #define UCLAMP_PERCENT_SCALE (100 * POW10(UCLAMP_PERCENT_SHIFT)) 10390 s64 percent; 10391 u64 util; 10392 int ret; 10393 }; 10394 10395 static inline struct uclamp_request 10396 capacity_from_percent(char *buf) 10397 { 10398 struct uclamp_request req = { 10399 .percent = UCLAMP_PERCENT_SCALE, 10400 .util = SCHED_CAPACITY_SCALE, 10401 .ret = 0, 10402 }; 10403 10404 buf = strim(buf); 10405 if (strcmp(buf, "max")) { 10406 req.ret = cgroup_parse_float(buf, UCLAMP_PERCENT_SHIFT, 10407 &req.percent); 10408 if (req.ret) 10409 return req; 10410 if ((u64)req.percent > UCLAMP_PERCENT_SCALE) { 10411 req.ret = -ERANGE; 10412 return req; 10413 } 10414 10415 req.util = req.percent << SCHED_CAPACITY_SHIFT; 10416 req.util = DIV_ROUND_CLOSEST_ULL(req.util, UCLAMP_PERCENT_SCALE); 10417 } 10418 10419 return req; 10420 } 10421 10422 static ssize_t cpu_uclamp_write(struct kernfs_open_file *of, char *buf, 10423 size_t nbytes, loff_t off, 10424 enum uclamp_id clamp_id) 10425 { 10426 struct uclamp_request req; 10427 struct task_group *tg; 10428 10429 req = capacity_from_percent(buf); 10430 if (req.ret) 10431 return req.ret; 10432 10433 static_branch_enable(&sched_uclamp_used); 10434 10435 mutex_lock(&uclamp_mutex); 10436 rcu_read_lock(); 10437 10438 tg = css_tg(of_css(of)); 10439 if (tg->uclamp_req[clamp_id].value != req.util) 10440 uclamp_se_set(&tg->uclamp_req[clamp_id], req.util, false); 10441 10442 /* 10443 * Because of not recoverable conversion rounding we keep track of the 10444 * exact requested value 10445 */ 10446 tg->uclamp_pct[clamp_id] = req.percent; 10447 10448 /* Update effective clamps to track the most restrictive value */ 10449 cpu_util_update_eff(of_css(of)); 10450 10451 rcu_read_unlock(); 10452 mutex_unlock(&uclamp_mutex); 10453 10454 return nbytes; 10455 } 10456 10457 static ssize_t cpu_uclamp_min_write(struct kernfs_open_file *of, 10458 char *buf, size_t nbytes, 10459 loff_t off) 10460 { 10461 return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MIN); 10462 } 10463 10464 static ssize_t cpu_uclamp_max_write(struct kernfs_open_file *of, 10465 char *buf, size_t nbytes, 10466 loff_t off) 10467 { 10468 return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MAX); 10469 } 10470 10471 static inline void cpu_uclamp_print(struct seq_file *sf, 10472 enum uclamp_id clamp_id) 10473 { 10474 struct task_group *tg; 10475 u64 util_clamp; 10476 u64 percent; 10477 u32 rem; 10478 10479 rcu_read_lock(); 10480 tg = css_tg(seq_css(sf)); 10481 util_clamp = tg->uclamp_req[clamp_id].value; 10482 rcu_read_unlock(); 10483 10484 if (util_clamp == SCHED_CAPACITY_SCALE) { 10485 seq_puts(sf, "max\n"); 10486 return; 10487 } 10488 10489 percent = tg->uclamp_pct[clamp_id]; 10490 percent = div_u64_rem(percent, POW10(UCLAMP_PERCENT_SHIFT), &rem); 10491 seq_printf(sf, "%llu.%0*u\n", percent, UCLAMP_PERCENT_SHIFT, rem); 10492 } 10493 10494 static int cpu_uclamp_min_show(struct seq_file *sf, void *v) 10495 { 10496 cpu_uclamp_print(sf, UCLAMP_MIN); 10497 return 0; 10498 } 10499 10500 static int cpu_uclamp_max_show(struct seq_file *sf, void *v) 10501 { 10502 cpu_uclamp_print(sf, UCLAMP_MAX); 10503 return 0; 10504 } 10505 #endif /* CONFIG_UCLAMP_TASK_GROUP */ 10506 10507 #ifdef CONFIG_FAIR_GROUP_SCHED 10508 static int cpu_shares_write_u64(struct cgroup_subsys_state *css, 10509 struct cftype *cftype, u64 shareval) 10510 { 10511 if (shareval > scale_load_down(ULONG_MAX)) 10512 shareval = MAX_SHARES; 10513 return sched_group_set_shares(css_tg(css), scale_load(shareval)); 10514 } 10515 10516 static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css, 10517 struct cftype *cft) 10518 { 10519 struct task_group *tg = css_tg(css); 10520 10521 return (u64) scale_load_down(tg->shares); 10522 } 10523 10524 #ifdef CONFIG_CFS_BANDWIDTH 10525 static DEFINE_MUTEX(cfs_constraints_mutex); 10526 10527 const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */ 10528 static const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */ 10529 /* More than 203 days if BW_SHIFT equals 20. */ 10530 static const u64 max_cfs_runtime = MAX_BW * NSEC_PER_USEC; 10531 10532 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime); 10533 10534 static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota, 10535 u64 burst) 10536 { 10537 int i, ret = 0, runtime_enabled, runtime_was_enabled; 10538 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 10539 10540 if (tg == &root_task_group) 10541 return -EINVAL; 10542 10543 /* 10544 * Ensure we have at some amount of bandwidth every period. This is 10545 * to prevent reaching a state of large arrears when throttled via 10546 * entity_tick() resulting in prolonged exit starvation. 10547 */ 10548 if (quota < min_cfs_quota_period || period < min_cfs_quota_period) 10549 return -EINVAL; 10550 10551 /* 10552 * Likewise, bound things on the other side by preventing insane quota 10553 * periods. This also allows us to normalize in computing quota 10554 * feasibility. 10555 */ 10556 if (period > max_cfs_quota_period) 10557 return -EINVAL; 10558 10559 /* 10560 * Bound quota to defend quota against overflow during bandwidth shift. 10561 */ 10562 if (quota != RUNTIME_INF && quota > max_cfs_runtime) 10563 return -EINVAL; 10564 10565 if (quota != RUNTIME_INF && (burst > quota || 10566 burst + quota > max_cfs_runtime)) 10567 return -EINVAL; 10568 10569 /* 10570 * Prevent race between setting of cfs_rq->runtime_enabled and 10571 * unthrottle_offline_cfs_rqs(). 10572 */ 10573 cpus_read_lock(); 10574 mutex_lock(&cfs_constraints_mutex); 10575 ret = __cfs_schedulable(tg, period, quota); 10576 if (ret) 10577 goto out_unlock; 10578 10579 runtime_enabled = quota != RUNTIME_INF; 10580 runtime_was_enabled = cfs_b->quota != RUNTIME_INF; 10581 /* 10582 * If we need to toggle cfs_bandwidth_used, off->on must occur 10583 * before making related changes, and on->off must occur afterwards 10584 */ 10585 if (runtime_enabled && !runtime_was_enabled) 10586 cfs_bandwidth_usage_inc(); 10587 raw_spin_lock_irq(&cfs_b->lock); 10588 cfs_b->period = ns_to_ktime(period); 10589 cfs_b->quota = quota; 10590 cfs_b->burst = burst; 10591 10592 __refill_cfs_bandwidth_runtime(cfs_b); 10593 10594 /* Restart the period timer (if active) to handle new period expiry: */ 10595 if (runtime_enabled) 10596 start_cfs_bandwidth(cfs_b); 10597 10598 raw_spin_unlock_irq(&cfs_b->lock); 10599 10600 for_each_online_cpu(i) { 10601 struct cfs_rq *cfs_rq = tg->cfs_rq[i]; 10602 struct rq *rq = cfs_rq->rq; 10603 struct rq_flags rf; 10604 10605 rq_lock_irq(rq, &rf); 10606 cfs_rq->runtime_enabled = runtime_enabled; 10607 cfs_rq->runtime_remaining = 0; 10608 10609 if (cfs_rq->throttled) 10610 unthrottle_cfs_rq(cfs_rq); 10611 rq_unlock_irq(rq, &rf); 10612 } 10613 if (runtime_was_enabled && !runtime_enabled) 10614 cfs_bandwidth_usage_dec(); 10615 out_unlock: 10616 mutex_unlock(&cfs_constraints_mutex); 10617 cpus_read_unlock(); 10618 10619 return ret; 10620 } 10621 10622 static int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us) 10623 { 10624 u64 quota, period, burst; 10625 10626 period = ktime_to_ns(tg->cfs_bandwidth.period); 10627 burst = tg->cfs_bandwidth.burst; 10628 if (cfs_quota_us < 0) 10629 quota = RUNTIME_INF; 10630 else if ((u64)cfs_quota_us <= U64_MAX / NSEC_PER_USEC) 10631 quota = (u64)cfs_quota_us * NSEC_PER_USEC; 10632 else 10633 return -EINVAL; 10634 10635 return tg_set_cfs_bandwidth(tg, period, quota, burst); 10636 } 10637 10638 static long tg_get_cfs_quota(struct task_group *tg) 10639 { 10640 u64 quota_us; 10641 10642 if (tg->cfs_bandwidth.quota == RUNTIME_INF) 10643 return -1; 10644 10645 quota_us = tg->cfs_bandwidth.quota; 10646 do_div(quota_us, NSEC_PER_USEC); 10647 10648 return quota_us; 10649 } 10650 10651 static int tg_set_cfs_period(struct task_group *tg, long cfs_period_us) 10652 { 10653 u64 quota, period, burst; 10654 10655 if ((u64)cfs_period_us > U64_MAX / NSEC_PER_USEC) 10656 return -EINVAL; 10657 10658 period = (u64)cfs_period_us * NSEC_PER_USEC; 10659 quota = tg->cfs_bandwidth.quota; 10660 burst = tg->cfs_bandwidth.burst; 10661 10662 return tg_set_cfs_bandwidth(tg, period, quota, burst); 10663 } 10664 10665 static long tg_get_cfs_period(struct task_group *tg) 10666 { 10667 u64 cfs_period_us; 10668 10669 cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period); 10670 do_div(cfs_period_us, NSEC_PER_USEC); 10671 10672 return cfs_period_us; 10673 } 10674 10675 static int tg_set_cfs_burst(struct task_group *tg, long cfs_burst_us) 10676 { 10677 u64 quota, period, burst; 10678 10679 if ((u64)cfs_burst_us > U64_MAX / NSEC_PER_USEC) 10680 return -EINVAL; 10681 10682 burst = (u64)cfs_burst_us * NSEC_PER_USEC; 10683 period = ktime_to_ns(tg->cfs_bandwidth.period); 10684 quota = tg->cfs_bandwidth.quota; 10685 10686 return tg_set_cfs_bandwidth(tg, period, quota, burst); 10687 } 10688 10689 static long tg_get_cfs_burst(struct task_group *tg) 10690 { 10691 u64 burst_us; 10692 10693 burst_us = tg->cfs_bandwidth.burst; 10694 do_div(burst_us, NSEC_PER_USEC); 10695 10696 return burst_us; 10697 } 10698 10699 static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css, 10700 struct cftype *cft) 10701 { 10702 return tg_get_cfs_quota(css_tg(css)); 10703 } 10704 10705 static int cpu_cfs_quota_write_s64(struct cgroup_subsys_state *css, 10706 struct cftype *cftype, s64 cfs_quota_us) 10707 { 10708 return tg_set_cfs_quota(css_tg(css), cfs_quota_us); 10709 } 10710 10711 static u64 cpu_cfs_period_read_u64(struct cgroup_subsys_state *css, 10712 struct cftype *cft) 10713 { 10714 return tg_get_cfs_period(css_tg(css)); 10715 } 10716 10717 static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css, 10718 struct cftype *cftype, u64 cfs_period_us) 10719 { 10720 return tg_set_cfs_period(css_tg(css), cfs_period_us); 10721 } 10722 10723 static u64 cpu_cfs_burst_read_u64(struct cgroup_subsys_state *css, 10724 struct cftype *cft) 10725 { 10726 return tg_get_cfs_burst(css_tg(css)); 10727 } 10728 10729 static int cpu_cfs_burst_write_u64(struct cgroup_subsys_state *css, 10730 struct cftype *cftype, u64 cfs_burst_us) 10731 { 10732 return tg_set_cfs_burst(css_tg(css), cfs_burst_us); 10733 } 10734 10735 struct cfs_schedulable_data { 10736 struct task_group *tg; 10737 u64 period, quota; 10738 }; 10739 10740 /* 10741 * normalize group quota/period to be quota/max_period 10742 * note: units are usecs 10743 */ 10744 static u64 normalize_cfs_quota(struct task_group *tg, 10745 struct cfs_schedulable_data *d) 10746 { 10747 u64 quota, period; 10748 10749 if (tg == d->tg) { 10750 period = d->period; 10751 quota = d->quota; 10752 } else { 10753 period = tg_get_cfs_period(tg); 10754 quota = tg_get_cfs_quota(tg); 10755 } 10756 10757 /* note: these should typically be equivalent */ 10758 if (quota == RUNTIME_INF || quota == -1) 10759 return RUNTIME_INF; 10760 10761 return to_ratio(period, quota); 10762 } 10763 10764 static int tg_cfs_schedulable_down(struct task_group *tg, void *data) 10765 { 10766 struct cfs_schedulable_data *d = data; 10767 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 10768 s64 quota = 0, parent_quota = -1; 10769 10770 if (!tg->parent) { 10771 quota = RUNTIME_INF; 10772 } else { 10773 struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth; 10774 10775 quota = normalize_cfs_quota(tg, d); 10776 parent_quota = parent_b->hierarchical_quota; 10777 10778 /* 10779 * Ensure max(child_quota) <= parent_quota. On cgroup2, 10780 * always take the min. On cgroup1, only inherit when no 10781 * limit is set: 10782 */ 10783 if (cgroup_subsys_on_dfl(cpu_cgrp_subsys)) { 10784 quota = min(quota, parent_quota); 10785 } else { 10786 if (quota == RUNTIME_INF) 10787 quota = parent_quota; 10788 else if (parent_quota != RUNTIME_INF && quota > parent_quota) 10789 return -EINVAL; 10790 } 10791 } 10792 cfs_b->hierarchical_quota = quota; 10793 10794 return 0; 10795 } 10796 10797 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota) 10798 { 10799 int ret; 10800 struct cfs_schedulable_data data = { 10801 .tg = tg, 10802 .period = period, 10803 .quota = quota, 10804 }; 10805 10806 if (quota != RUNTIME_INF) { 10807 do_div(data.period, NSEC_PER_USEC); 10808 do_div(data.quota, NSEC_PER_USEC); 10809 } 10810 10811 rcu_read_lock(); 10812 ret = walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data); 10813 rcu_read_unlock(); 10814 10815 return ret; 10816 } 10817 10818 static int cpu_cfs_stat_show(struct seq_file *sf, void *v) 10819 { 10820 struct task_group *tg = css_tg(seq_css(sf)); 10821 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 10822 10823 seq_printf(sf, "nr_periods %d\n", cfs_b->nr_periods); 10824 seq_printf(sf, "nr_throttled %d\n", cfs_b->nr_throttled); 10825 seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time); 10826 10827 if (schedstat_enabled() && tg != &root_task_group) { 10828 struct sched_statistics *stats; 10829 u64 ws = 0; 10830 int i; 10831 10832 for_each_possible_cpu(i) { 10833 stats = __schedstats_from_se(tg->se[i]); 10834 ws += schedstat_val(stats->wait_sum); 10835 } 10836 10837 seq_printf(sf, "wait_sum %llu\n", ws); 10838 } 10839 10840 seq_printf(sf, "nr_bursts %d\n", cfs_b->nr_burst); 10841 seq_printf(sf, "burst_time %llu\n", cfs_b->burst_time); 10842 10843 return 0; 10844 } 10845 #endif /* CONFIG_CFS_BANDWIDTH */ 10846 #endif /* CONFIG_FAIR_GROUP_SCHED */ 10847 10848 #ifdef CONFIG_RT_GROUP_SCHED 10849 static int cpu_rt_runtime_write(struct cgroup_subsys_state *css, 10850 struct cftype *cft, s64 val) 10851 { 10852 return sched_group_set_rt_runtime(css_tg(css), val); 10853 } 10854 10855 static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css, 10856 struct cftype *cft) 10857 { 10858 return sched_group_rt_runtime(css_tg(css)); 10859 } 10860 10861 static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css, 10862 struct cftype *cftype, u64 rt_period_us) 10863 { 10864 return sched_group_set_rt_period(css_tg(css), rt_period_us); 10865 } 10866 10867 static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css, 10868 struct cftype *cft) 10869 { 10870 return sched_group_rt_period(css_tg(css)); 10871 } 10872 #endif /* CONFIG_RT_GROUP_SCHED */ 10873 10874 #ifdef CONFIG_FAIR_GROUP_SCHED 10875 static s64 cpu_idle_read_s64(struct cgroup_subsys_state *css, 10876 struct cftype *cft) 10877 { 10878 return css_tg(css)->idle; 10879 } 10880 10881 static int cpu_idle_write_s64(struct cgroup_subsys_state *css, 10882 struct cftype *cft, s64 idle) 10883 { 10884 return sched_group_set_idle(css_tg(css), idle); 10885 } 10886 #endif 10887 10888 static struct cftype cpu_legacy_files[] = { 10889 #ifdef CONFIG_FAIR_GROUP_SCHED 10890 { 10891 .name = "shares", 10892 .read_u64 = cpu_shares_read_u64, 10893 .write_u64 = cpu_shares_write_u64, 10894 }, 10895 { 10896 .name = "idle", 10897 .read_s64 = cpu_idle_read_s64, 10898 .write_s64 = cpu_idle_write_s64, 10899 }, 10900 #endif 10901 #ifdef CONFIG_CFS_BANDWIDTH 10902 { 10903 .name = "cfs_quota_us", 10904 .read_s64 = cpu_cfs_quota_read_s64, 10905 .write_s64 = cpu_cfs_quota_write_s64, 10906 }, 10907 { 10908 .name = "cfs_period_us", 10909 .read_u64 = cpu_cfs_period_read_u64, 10910 .write_u64 = cpu_cfs_period_write_u64, 10911 }, 10912 { 10913 .name = "cfs_burst_us", 10914 .read_u64 = cpu_cfs_burst_read_u64, 10915 .write_u64 = cpu_cfs_burst_write_u64, 10916 }, 10917 { 10918 .name = "stat", 10919 .seq_show = cpu_cfs_stat_show, 10920 }, 10921 #endif 10922 #ifdef CONFIG_RT_GROUP_SCHED 10923 { 10924 .name = "rt_runtime_us", 10925 .read_s64 = cpu_rt_runtime_read, 10926 .write_s64 = cpu_rt_runtime_write, 10927 }, 10928 { 10929 .name = "rt_period_us", 10930 .read_u64 = cpu_rt_period_read_uint, 10931 .write_u64 = cpu_rt_period_write_uint, 10932 }, 10933 #endif 10934 #ifdef CONFIG_UCLAMP_TASK_GROUP 10935 { 10936 .name = "uclamp.min", 10937 .flags = CFTYPE_NOT_ON_ROOT, 10938 .seq_show = cpu_uclamp_min_show, 10939 .write = cpu_uclamp_min_write, 10940 }, 10941 { 10942 .name = "uclamp.max", 10943 .flags = CFTYPE_NOT_ON_ROOT, 10944 .seq_show = cpu_uclamp_max_show, 10945 .write = cpu_uclamp_max_write, 10946 }, 10947 #endif 10948 { } /* Terminate */ 10949 }; 10950 10951 static int cpu_extra_stat_show(struct seq_file *sf, 10952 struct cgroup_subsys_state *css) 10953 { 10954 #ifdef CONFIG_CFS_BANDWIDTH 10955 { 10956 struct task_group *tg = css_tg(css); 10957 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 10958 u64 throttled_usec, burst_usec; 10959 10960 throttled_usec = cfs_b->throttled_time; 10961 do_div(throttled_usec, NSEC_PER_USEC); 10962 burst_usec = cfs_b->burst_time; 10963 do_div(burst_usec, NSEC_PER_USEC); 10964 10965 seq_printf(sf, "nr_periods %d\n" 10966 "nr_throttled %d\n" 10967 "throttled_usec %llu\n" 10968 "nr_bursts %d\n" 10969 "burst_usec %llu\n", 10970 cfs_b->nr_periods, cfs_b->nr_throttled, 10971 throttled_usec, cfs_b->nr_burst, burst_usec); 10972 } 10973 #endif 10974 return 0; 10975 } 10976 10977 #ifdef CONFIG_FAIR_GROUP_SCHED 10978 static u64 cpu_weight_read_u64(struct cgroup_subsys_state *css, 10979 struct cftype *cft) 10980 { 10981 struct task_group *tg = css_tg(css); 10982 u64 weight = scale_load_down(tg->shares); 10983 10984 return DIV_ROUND_CLOSEST_ULL(weight * CGROUP_WEIGHT_DFL, 1024); 10985 } 10986 10987 static int cpu_weight_write_u64(struct cgroup_subsys_state *css, 10988 struct cftype *cft, u64 weight) 10989 { 10990 /* 10991 * cgroup weight knobs should use the common MIN, DFL and MAX 10992 * values which are 1, 100 and 10000 respectively. While it loses 10993 * a bit of range on both ends, it maps pretty well onto the shares 10994 * value used by scheduler and the round-trip conversions preserve 10995 * the original value over the entire range. 10996 */ 10997 if (weight < CGROUP_WEIGHT_MIN || weight > CGROUP_WEIGHT_MAX) 10998 return -ERANGE; 10999 11000 weight = DIV_ROUND_CLOSEST_ULL(weight * 1024, CGROUP_WEIGHT_DFL); 11001 11002 return sched_group_set_shares(css_tg(css), scale_load(weight)); 11003 } 11004 11005 static s64 cpu_weight_nice_read_s64(struct cgroup_subsys_state *css, 11006 struct cftype *cft) 11007 { 11008 unsigned long weight = scale_load_down(css_tg(css)->shares); 11009 int last_delta = INT_MAX; 11010 int prio, delta; 11011 11012 /* find the closest nice value to the current weight */ 11013 for (prio = 0; prio < ARRAY_SIZE(sched_prio_to_weight); prio++) { 11014 delta = abs(sched_prio_to_weight[prio] - weight); 11015 if (delta >= last_delta) 11016 break; 11017 last_delta = delta; 11018 } 11019 11020 return PRIO_TO_NICE(prio - 1 + MAX_RT_PRIO); 11021 } 11022 11023 static int cpu_weight_nice_write_s64(struct cgroup_subsys_state *css, 11024 struct cftype *cft, s64 nice) 11025 { 11026 unsigned long weight; 11027 int idx; 11028 11029 if (nice < MIN_NICE || nice > MAX_NICE) 11030 return -ERANGE; 11031 11032 idx = NICE_TO_PRIO(nice) - MAX_RT_PRIO; 11033 idx = array_index_nospec(idx, 40); 11034 weight = sched_prio_to_weight[idx]; 11035 11036 return sched_group_set_shares(css_tg(css), scale_load(weight)); 11037 } 11038 #endif 11039 11040 static void __maybe_unused cpu_period_quota_print(struct seq_file *sf, 11041 long period, long quota) 11042 { 11043 if (quota < 0) 11044 seq_puts(sf, "max"); 11045 else 11046 seq_printf(sf, "%ld", quota); 11047 11048 seq_printf(sf, " %ld\n", period); 11049 } 11050 11051 /* caller should put the current value in *@periodp before calling */ 11052 static int __maybe_unused cpu_period_quota_parse(char *buf, 11053 u64 *periodp, u64 *quotap) 11054 { 11055 char tok[21]; /* U64_MAX */ 11056 11057 if (sscanf(buf, "%20s %llu", tok, periodp) < 1) 11058 return -EINVAL; 11059 11060 *periodp *= NSEC_PER_USEC; 11061 11062 if (sscanf(tok, "%llu", quotap)) 11063 *quotap *= NSEC_PER_USEC; 11064 else if (!strcmp(tok, "max")) 11065 *quotap = RUNTIME_INF; 11066 else 11067 return -EINVAL; 11068 11069 return 0; 11070 } 11071 11072 #ifdef CONFIG_CFS_BANDWIDTH 11073 static int cpu_max_show(struct seq_file *sf, void *v) 11074 { 11075 struct task_group *tg = css_tg(seq_css(sf)); 11076 11077 cpu_period_quota_print(sf, tg_get_cfs_period(tg), tg_get_cfs_quota(tg)); 11078 return 0; 11079 } 11080 11081 static ssize_t cpu_max_write(struct kernfs_open_file *of, 11082 char *buf, size_t nbytes, loff_t off) 11083 { 11084 struct task_group *tg = css_tg(of_css(of)); 11085 u64 period = tg_get_cfs_period(tg); 11086 u64 burst = tg_get_cfs_burst(tg); 11087 u64 quota; 11088 int ret; 11089 11090 ret = cpu_period_quota_parse(buf, &period, "a); 11091 if (!ret) 11092 ret = tg_set_cfs_bandwidth(tg, period, quota, burst); 11093 return ret ?: nbytes; 11094 } 11095 #endif 11096 11097 static struct cftype cpu_files[] = { 11098 #ifdef CONFIG_FAIR_GROUP_SCHED 11099 { 11100 .name = "weight", 11101 .flags = CFTYPE_NOT_ON_ROOT, 11102 .read_u64 = cpu_weight_read_u64, 11103 .write_u64 = cpu_weight_write_u64, 11104 }, 11105 { 11106 .name = "weight.nice", 11107 .flags = CFTYPE_NOT_ON_ROOT, 11108 .read_s64 = cpu_weight_nice_read_s64, 11109 .write_s64 = cpu_weight_nice_write_s64, 11110 }, 11111 { 11112 .name = "idle", 11113 .flags = CFTYPE_NOT_ON_ROOT, 11114 .read_s64 = cpu_idle_read_s64, 11115 .write_s64 = cpu_idle_write_s64, 11116 }, 11117 #endif 11118 #ifdef CONFIG_CFS_BANDWIDTH 11119 { 11120 .name = "max", 11121 .flags = CFTYPE_NOT_ON_ROOT, 11122 .seq_show = cpu_max_show, 11123 .write = cpu_max_write, 11124 }, 11125 { 11126 .name = "max.burst", 11127 .flags = CFTYPE_NOT_ON_ROOT, 11128 .read_u64 = cpu_cfs_burst_read_u64, 11129 .write_u64 = cpu_cfs_burst_write_u64, 11130 }, 11131 #endif 11132 #ifdef CONFIG_UCLAMP_TASK_GROUP 11133 { 11134 .name = "uclamp.min", 11135 .flags = CFTYPE_NOT_ON_ROOT, 11136 .seq_show = cpu_uclamp_min_show, 11137 .write = cpu_uclamp_min_write, 11138 }, 11139 { 11140 .name = "uclamp.max", 11141 .flags = CFTYPE_NOT_ON_ROOT, 11142 .seq_show = cpu_uclamp_max_show, 11143 .write = cpu_uclamp_max_write, 11144 }, 11145 #endif 11146 { } /* terminate */ 11147 }; 11148 11149 struct cgroup_subsys cpu_cgrp_subsys = { 11150 .css_alloc = cpu_cgroup_css_alloc, 11151 .css_online = cpu_cgroup_css_online, 11152 .css_released = cpu_cgroup_css_released, 11153 .css_free = cpu_cgroup_css_free, 11154 .css_extra_stat_show = cpu_extra_stat_show, 11155 #ifdef CONFIG_RT_GROUP_SCHED 11156 .can_attach = cpu_cgroup_can_attach, 11157 #endif 11158 .attach = cpu_cgroup_attach, 11159 .legacy_cftypes = cpu_legacy_files, 11160 .dfl_cftypes = cpu_files, 11161 .early_init = true, 11162 .threaded = true, 11163 }; 11164 11165 #endif /* CONFIG_CGROUP_SCHED */ 11166 11167 void dump_cpu_task(int cpu) 11168 { 11169 if (cpu == smp_processor_id() && in_hardirq()) { 11170 struct pt_regs *regs; 11171 11172 regs = get_irq_regs(); 11173 if (regs) { 11174 show_regs(regs); 11175 return; 11176 } 11177 } 11178 11179 if (trigger_single_cpu_backtrace(cpu)) 11180 return; 11181 11182 pr_info("Task dump for CPU %d:\n", cpu); 11183 sched_show_task(cpu_curr(cpu)); 11184 } 11185 11186 /* 11187 * Nice levels are multiplicative, with a gentle 10% change for every 11188 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to 11189 * nice 1, it will get ~10% less CPU time than another CPU-bound task 11190 * that remained on nice 0. 11191 * 11192 * The "10% effect" is relative and cumulative: from _any_ nice level, 11193 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level 11194 * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25. 11195 * If a task goes up by ~10% and another task goes down by ~10% then 11196 * the relative distance between them is ~25%.) 11197 */ 11198 const int sched_prio_to_weight[40] = { 11199 /* -20 */ 88761, 71755, 56483, 46273, 36291, 11200 /* -15 */ 29154, 23254, 18705, 14949, 11916, 11201 /* -10 */ 9548, 7620, 6100, 4904, 3906, 11202 /* -5 */ 3121, 2501, 1991, 1586, 1277, 11203 /* 0 */ 1024, 820, 655, 526, 423, 11204 /* 5 */ 335, 272, 215, 172, 137, 11205 /* 10 */ 110, 87, 70, 56, 45, 11206 /* 15 */ 36, 29, 23, 18, 15, 11207 }; 11208 11209 /* 11210 * Inverse (2^32/x) values of the sched_prio_to_weight[] array, precalculated. 11211 * 11212 * In cases where the weight does not change often, we can use the 11213 * precalculated inverse to speed up arithmetics by turning divisions 11214 * into multiplications: 11215 */ 11216 const u32 sched_prio_to_wmult[40] = { 11217 /* -20 */ 48388, 59856, 76040, 92818, 118348, 11218 /* -15 */ 147320, 184698, 229616, 287308, 360437, 11219 /* -10 */ 449829, 563644, 704093, 875809, 1099582, 11220 /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326, 11221 /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587, 11222 /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126, 11223 /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717, 11224 /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153, 11225 }; 11226 11227 void call_trace_sched_update_nr_running(struct rq *rq, int count) 11228 { 11229 trace_sched_update_nr_running_tp(rq, count); 11230 } 11231