1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * kernel/sched/core.c 4 * 5 * Core kernel scheduler code and related syscalls 6 * 7 * Copyright (C) 1991-2002 Linus Torvalds 8 */ 9 #include <linux/highmem.h> 10 #include <linux/hrtimer_api.h> 11 #include <linux/ktime_api.h> 12 #include <linux/sched/signal.h> 13 #include <linux/syscalls_api.h> 14 #include <linux/debug_locks.h> 15 #include <linux/prefetch.h> 16 #include <linux/capability.h> 17 #include <linux/pgtable_api.h> 18 #include <linux/wait_bit.h> 19 #include <linux/jiffies.h> 20 #include <linux/spinlock_api.h> 21 #include <linux/cpumask_api.h> 22 #include <linux/lockdep_api.h> 23 #include <linux/hardirq.h> 24 #include <linux/softirq.h> 25 #include <linux/refcount_api.h> 26 #include <linux/topology.h> 27 #include <linux/sched/clock.h> 28 #include <linux/sched/cond_resched.h> 29 #include <linux/sched/cputime.h> 30 #include <linux/sched/debug.h> 31 #include <linux/sched/hotplug.h> 32 #include <linux/sched/init.h> 33 #include <linux/sched/isolation.h> 34 #include <linux/sched/loadavg.h> 35 #include <linux/sched/mm.h> 36 #include <linux/sched/nohz.h> 37 #include <linux/sched/rseq_api.h> 38 #include <linux/sched/rt.h> 39 40 #include <linux/blkdev.h> 41 #include <linux/context_tracking.h> 42 #include <linux/cpuset.h> 43 #include <linux/delayacct.h> 44 #include <linux/init_task.h> 45 #include <linux/interrupt.h> 46 #include <linux/ioprio.h> 47 #include <linux/kallsyms.h> 48 #include <linux/kcov.h> 49 #include <linux/kprobes.h> 50 #include <linux/llist_api.h> 51 #include <linux/mmu_context.h> 52 #include <linux/mmzone.h> 53 #include <linux/mutex_api.h> 54 #include <linux/nmi.h> 55 #include <linux/nospec.h> 56 #include <linux/perf_event_api.h> 57 #include <linux/profile.h> 58 #include <linux/psi.h> 59 #include <linux/rcuwait_api.h> 60 #include <linux/sched/wake_q.h> 61 #include <linux/scs.h> 62 #include <linux/slab.h> 63 #include <linux/syscalls.h> 64 #include <linux/vtime.h> 65 #include <linux/wait_api.h> 66 #include <linux/workqueue_api.h> 67 68 #ifdef CONFIG_PREEMPT_DYNAMIC 69 # ifdef CONFIG_GENERIC_ENTRY 70 # include <linux/entry-common.h> 71 # endif 72 #endif 73 74 #include <uapi/linux/sched/types.h> 75 76 #include <asm/irq_regs.h> 77 #include <asm/switch_to.h> 78 #include <asm/tlb.h> 79 80 #define CREATE_TRACE_POINTS 81 #include <linux/sched/rseq_api.h> 82 #include <trace/events/sched.h> 83 #include <trace/events/ipi.h> 84 #undef CREATE_TRACE_POINTS 85 86 #include "sched.h" 87 #include "stats.h" 88 #include "autogroup.h" 89 90 #include "autogroup.h" 91 #include "pelt.h" 92 #include "smp.h" 93 #include "stats.h" 94 95 #include "../workqueue_internal.h" 96 #include "../../io_uring/io-wq.h" 97 #include "../smpboot.h" 98 99 EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpu); 100 EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpumask); 101 102 /* 103 * Export tracepoints that act as a bare tracehook (ie: have no trace event 104 * associated with them) to allow external modules to probe them. 105 */ 106 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_cfs_tp); 107 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_rt_tp); 108 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_dl_tp); 109 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp); 110 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_se_tp); 111 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_thermal_tp); 112 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_cpu_capacity_tp); 113 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_overutilized_tp); 114 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_cfs_tp); 115 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_se_tp); 116 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_update_nr_running_tp); 117 118 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); 119 120 #ifdef CONFIG_SCHED_DEBUG 121 /* 122 * Debugging: various feature bits 123 * 124 * If SCHED_DEBUG is disabled, each compilation unit has its own copy of 125 * sysctl_sched_features, defined in sched.h, to allow constants propagation 126 * at compile time and compiler optimization based on features default. 127 */ 128 #define SCHED_FEAT(name, enabled) \ 129 (1UL << __SCHED_FEAT_##name) * enabled | 130 const_debug unsigned int sysctl_sched_features = 131 #include "features.h" 132 0; 133 #undef SCHED_FEAT 134 135 /* 136 * Print a warning if need_resched is set for the given duration (if 137 * LATENCY_WARN is enabled). 138 * 139 * If sysctl_resched_latency_warn_once is set, only one warning will be shown 140 * per boot. 141 */ 142 __read_mostly int sysctl_resched_latency_warn_ms = 100; 143 __read_mostly int sysctl_resched_latency_warn_once = 1; 144 #endif /* CONFIG_SCHED_DEBUG */ 145 146 /* 147 * Number of tasks to iterate in a single balance run. 148 * Limited because this is done with IRQs disabled. 149 */ 150 const_debug unsigned int sysctl_sched_nr_migrate = SCHED_NR_MIGRATE_BREAK; 151 152 __read_mostly int scheduler_running; 153 154 #ifdef CONFIG_SCHED_CORE 155 156 DEFINE_STATIC_KEY_FALSE(__sched_core_enabled); 157 158 /* kernel prio, less is more */ 159 static inline int __task_prio(const struct task_struct *p) 160 { 161 if (p->sched_class == &stop_sched_class) /* trumps deadline */ 162 return -2; 163 164 if (rt_prio(p->prio)) /* includes deadline */ 165 return p->prio; /* [-1, 99] */ 166 167 if (p->sched_class == &idle_sched_class) 168 return MAX_RT_PRIO + NICE_WIDTH; /* 140 */ 169 170 return MAX_RT_PRIO + MAX_NICE; /* 120, squash fair */ 171 } 172 173 /* 174 * l(a,b) 175 * le(a,b) := !l(b,a) 176 * g(a,b) := l(b,a) 177 * ge(a,b) := !l(a,b) 178 */ 179 180 /* real prio, less is less */ 181 static inline bool prio_less(const struct task_struct *a, 182 const struct task_struct *b, bool in_fi) 183 { 184 185 int pa = __task_prio(a), pb = __task_prio(b); 186 187 if (-pa < -pb) 188 return true; 189 190 if (-pb < -pa) 191 return false; 192 193 if (pa == -1) /* dl_prio() doesn't work because of stop_class above */ 194 return !dl_time_before(a->dl.deadline, b->dl.deadline); 195 196 if (pa == MAX_RT_PRIO + MAX_NICE) /* fair */ 197 return cfs_prio_less(a, b, in_fi); 198 199 return false; 200 } 201 202 static inline bool __sched_core_less(const struct task_struct *a, 203 const struct task_struct *b) 204 { 205 if (a->core_cookie < b->core_cookie) 206 return true; 207 208 if (a->core_cookie > b->core_cookie) 209 return false; 210 211 /* flip prio, so high prio is leftmost */ 212 if (prio_less(b, a, !!task_rq(a)->core->core_forceidle_count)) 213 return true; 214 215 return false; 216 } 217 218 #define __node_2_sc(node) rb_entry((node), struct task_struct, core_node) 219 220 static inline bool rb_sched_core_less(struct rb_node *a, const struct rb_node *b) 221 { 222 return __sched_core_less(__node_2_sc(a), __node_2_sc(b)); 223 } 224 225 static inline int rb_sched_core_cmp(const void *key, const struct rb_node *node) 226 { 227 const struct task_struct *p = __node_2_sc(node); 228 unsigned long cookie = (unsigned long)key; 229 230 if (cookie < p->core_cookie) 231 return -1; 232 233 if (cookie > p->core_cookie) 234 return 1; 235 236 return 0; 237 } 238 239 void sched_core_enqueue(struct rq *rq, struct task_struct *p) 240 { 241 rq->core->core_task_seq++; 242 243 if (!p->core_cookie) 244 return; 245 246 rb_add(&p->core_node, &rq->core_tree, rb_sched_core_less); 247 } 248 249 void sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags) 250 { 251 rq->core->core_task_seq++; 252 253 if (sched_core_enqueued(p)) { 254 rb_erase(&p->core_node, &rq->core_tree); 255 RB_CLEAR_NODE(&p->core_node); 256 } 257 258 /* 259 * Migrating the last task off the cpu, with the cpu in forced idle 260 * state. Reschedule to create an accounting edge for forced idle, 261 * and re-examine whether the core is still in forced idle state. 262 */ 263 if (!(flags & DEQUEUE_SAVE) && rq->nr_running == 1 && 264 rq->core->core_forceidle_count && rq->curr == rq->idle) 265 resched_curr(rq); 266 } 267 268 static int sched_task_is_throttled(struct task_struct *p, int cpu) 269 { 270 if (p->sched_class->task_is_throttled) 271 return p->sched_class->task_is_throttled(p, cpu); 272 273 return 0; 274 } 275 276 static struct task_struct *sched_core_next(struct task_struct *p, unsigned long cookie) 277 { 278 struct rb_node *node = &p->core_node; 279 int cpu = task_cpu(p); 280 281 do { 282 node = rb_next(node); 283 if (!node) 284 return NULL; 285 286 p = __node_2_sc(node); 287 if (p->core_cookie != cookie) 288 return NULL; 289 290 } while (sched_task_is_throttled(p, cpu)); 291 292 return p; 293 } 294 295 /* 296 * Find left-most (aka, highest priority) and unthrottled task matching @cookie. 297 * If no suitable task is found, NULL will be returned. 298 */ 299 static struct task_struct *sched_core_find(struct rq *rq, unsigned long cookie) 300 { 301 struct task_struct *p; 302 struct rb_node *node; 303 304 node = rb_find_first((void *)cookie, &rq->core_tree, rb_sched_core_cmp); 305 if (!node) 306 return NULL; 307 308 p = __node_2_sc(node); 309 if (!sched_task_is_throttled(p, rq->cpu)) 310 return p; 311 312 return sched_core_next(p, cookie); 313 } 314 315 /* 316 * Magic required such that: 317 * 318 * raw_spin_rq_lock(rq); 319 * ... 320 * raw_spin_rq_unlock(rq); 321 * 322 * ends up locking and unlocking the _same_ lock, and all CPUs 323 * always agree on what rq has what lock. 324 * 325 * XXX entirely possible to selectively enable cores, don't bother for now. 326 */ 327 328 static DEFINE_MUTEX(sched_core_mutex); 329 static atomic_t sched_core_count; 330 static struct cpumask sched_core_mask; 331 332 static void sched_core_lock(int cpu, unsigned long *flags) 333 { 334 const struct cpumask *smt_mask = cpu_smt_mask(cpu); 335 int t, i = 0; 336 337 local_irq_save(*flags); 338 for_each_cpu(t, smt_mask) 339 raw_spin_lock_nested(&cpu_rq(t)->__lock, i++); 340 } 341 342 static void sched_core_unlock(int cpu, unsigned long *flags) 343 { 344 const struct cpumask *smt_mask = cpu_smt_mask(cpu); 345 int t; 346 347 for_each_cpu(t, smt_mask) 348 raw_spin_unlock(&cpu_rq(t)->__lock); 349 local_irq_restore(*flags); 350 } 351 352 static void __sched_core_flip(bool enabled) 353 { 354 unsigned long flags; 355 int cpu, t; 356 357 cpus_read_lock(); 358 359 /* 360 * Toggle the online cores, one by one. 361 */ 362 cpumask_copy(&sched_core_mask, cpu_online_mask); 363 for_each_cpu(cpu, &sched_core_mask) { 364 const struct cpumask *smt_mask = cpu_smt_mask(cpu); 365 366 sched_core_lock(cpu, &flags); 367 368 for_each_cpu(t, smt_mask) 369 cpu_rq(t)->core_enabled = enabled; 370 371 cpu_rq(cpu)->core->core_forceidle_start = 0; 372 373 sched_core_unlock(cpu, &flags); 374 375 cpumask_andnot(&sched_core_mask, &sched_core_mask, smt_mask); 376 } 377 378 /* 379 * Toggle the offline CPUs. 380 */ 381 for_each_cpu_andnot(cpu, cpu_possible_mask, cpu_online_mask) 382 cpu_rq(cpu)->core_enabled = enabled; 383 384 cpus_read_unlock(); 385 } 386 387 static void sched_core_assert_empty(void) 388 { 389 int cpu; 390 391 for_each_possible_cpu(cpu) 392 WARN_ON_ONCE(!RB_EMPTY_ROOT(&cpu_rq(cpu)->core_tree)); 393 } 394 395 static void __sched_core_enable(void) 396 { 397 static_branch_enable(&__sched_core_enabled); 398 /* 399 * Ensure all previous instances of raw_spin_rq_*lock() have finished 400 * and future ones will observe !sched_core_disabled(). 401 */ 402 synchronize_rcu(); 403 __sched_core_flip(true); 404 sched_core_assert_empty(); 405 } 406 407 static void __sched_core_disable(void) 408 { 409 sched_core_assert_empty(); 410 __sched_core_flip(false); 411 static_branch_disable(&__sched_core_enabled); 412 } 413 414 void sched_core_get(void) 415 { 416 if (atomic_inc_not_zero(&sched_core_count)) 417 return; 418 419 mutex_lock(&sched_core_mutex); 420 if (!atomic_read(&sched_core_count)) 421 __sched_core_enable(); 422 423 smp_mb__before_atomic(); 424 atomic_inc(&sched_core_count); 425 mutex_unlock(&sched_core_mutex); 426 } 427 428 static void __sched_core_put(struct work_struct *work) 429 { 430 if (atomic_dec_and_mutex_lock(&sched_core_count, &sched_core_mutex)) { 431 __sched_core_disable(); 432 mutex_unlock(&sched_core_mutex); 433 } 434 } 435 436 void sched_core_put(void) 437 { 438 static DECLARE_WORK(_work, __sched_core_put); 439 440 /* 441 * "There can be only one" 442 * 443 * Either this is the last one, or we don't actually need to do any 444 * 'work'. If it is the last *again*, we rely on 445 * WORK_STRUCT_PENDING_BIT. 446 */ 447 if (!atomic_add_unless(&sched_core_count, -1, 1)) 448 schedule_work(&_work); 449 } 450 451 #else /* !CONFIG_SCHED_CORE */ 452 453 static inline void sched_core_enqueue(struct rq *rq, struct task_struct *p) { } 454 static inline void 455 sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags) { } 456 457 #endif /* CONFIG_SCHED_CORE */ 458 459 /* 460 * Serialization rules: 461 * 462 * Lock order: 463 * 464 * p->pi_lock 465 * rq->lock 466 * hrtimer_cpu_base->lock (hrtimer_start() for bandwidth controls) 467 * 468 * rq1->lock 469 * rq2->lock where: rq1 < rq2 470 * 471 * Regular state: 472 * 473 * Normal scheduling state is serialized by rq->lock. __schedule() takes the 474 * local CPU's rq->lock, it optionally removes the task from the runqueue and 475 * always looks at the local rq data structures to find the most eligible task 476 * to run next. 477 * 478 * Task enqueue is also under rq->lock, possibly taken from another CPU. 479 * Wakeups from another LLC domain might use an IPI to transfer the enqueue to 480 * the local CPU to avoid bouncing the runqueue state around [ see 481 * ttwu_queue_wakelist() ] 482 * 483 * Task wakeup, specifically wakeups that involve migration, are horribly 484 * complicated to avoid having to take two rq->locks. 485 * 486 * Special state: 487 * 488 * System-calls and anything external will use task_rq_lock() which acquires 489 * both p->pi_lock and rq->lock. As a consequence the state they change is 490 * stable while holding either lock: 491 * 492 * - sched_setaffinity()/ 493 * set_cpus_allowed_ptr(): p->cpus_ptr, p->nr_cpus_allowed 494 * - set_user_nice(): p->se.load, p->*prio 495 * - __sched_setscheduler(): p->sched_class, p->policy, p->*prio, 496 * p->se.load, p->rt_priority, 497 * p->dl.dl_{runtime, deadline, period, flags, bw, density} 498 * - sched_setnuma(): p->numa_preferred_nid 499 * - sched_move_task(): p->sched_task_group 500 * - uclamp_update_active() p->uclamp* 501 * 502 * p->state <- TASK_*: 503 * 504 * is changed locklessly using set_current_state(), __set_current_state() or 505 * set_special_state(), see their respective comments, or by 506 * try_to_wake_up(). This latter uses p->pi_lock to serialize against 507 * concurrent self. 508 * 509 * p->on_rq <- { 0, 1 = TASK_ON_RQ_QUEUED, 2 = TASK_ON_RQ_MIGRATING }: 510 * 511 * is set by activate_task() and cleared by deactivate_task(), under 512 * rq->lock. Non-zero indicates the task is runnable, the special 513 * ON_RQ_MIGRATING state is used for migration without holding both 514 * rq->locks. It indicates task_cpu() is not stable, see task_rq_lock(). 515 * 516 * p->on_cpu <- { 0, 1 }: 517 * 518 * is set by prepare_task() and cleared by finish_task() such that it will be 519 * set before p is scheduled-in and cleared after p is scheduled-out, both 520 * under rq->lock. Non-zero indicates the task is running on its CPU. 521 * 522 * [ The astute reader will observe that it is possible for two tasks on one 523 * CPU to have ->on_cpu = 1 at the same time. ] 524 * 525 * task_cpu(p): is changed by set_task_cpu(), the rules are: 526 * 527 * - Don't call set_task_cpu() on a blocked task: 528 * 529 * We don't care what CPU we're not running on, this simplifies hotplug, 530 * the CPU assignment of blocked tasks isn't required to be valid. 531 * 532 * - for try_to_wake_up(), called under p->pi_lock: 533 * 534 * This allows try_to_wake_up() to only take one rq->lock, see its comment. 535 * 536 * - for migration called under rq->lock: 537 * [ see task_on_rq_migrating() in task_rq_lock() ] 538 * 539 * o move_queued_task() 540 * o detach_task() 541 * 542 * - for migration called under double_rq_lock(): 543 * 544 * o __migrate_swap_task() 545 * o push_rt_task() / pull_rt_task() 546 * o push_dl_task() / pull_dl_task() 547 * o dl_task_offline_migration() 548 * 549 */ 550 551 void raw_spin_rq_lock_nested(struct rq *rq, int subclass) 552 { 553 raw_spinlock_t *lock; 554 555 /* Matches synchronize_rcu() in __sched_core_enable() */ 556 preempt_disable(); 557 if (sched_core_disabled()) { 558 raw_spin_lock_nested(&rq->__lock, subclass); 559 /* preempt_count *MUST* be > 1 */ 560 preempt_enable_no_resched(); 561 return; 562 } 563 564 for (;;) { 565 lock = __rq_lockp(rq); 566 raw_spin_lock_nested(lock, subclass); 567 if (likely(lock == __rq_lockp(rq))) { 568 /* preempt_count *MUST* be > 1 */ 569 preempt_enable_no_resched(); 570 return; 571 } 572 raw_spin_unlock(lock); 573 } 574 } 575 576 bool raw_spin_rq_trylock(struct rq *rq) 577 { 578 raw_spinlock_t *lock; 579 bool ret; 580 581 /* Matches synchronize_rcu() in __sched_core_enable() */ 582 preempt_disable(); 583 if (sched_core_disabled()) { 584 ret = raw_spin_trylock(&rq->__lock); 585 preempt_enable(); 586 return ret; 587 } 588 589 for (;;) { 590 lock = __rq_lockp(rq); 591 ret = raw_spin_trylock(lock); 592 if (!ret || (likely(lock == __rq_lockp(rq)))) { 593 preempt_enable(); 594 return ret; 595 } 596 raw_spin_unlock(lock); 597 } 598 } 599 600 void raw_spin_rq_unlock(struct rq *rq) 601 { 602 raw_spin_unlock(rq_lockp(rq)); 603 } 604 605 #ifdef CONFIG_SMP 606 /* 607 * double_rq_lock - safely lock two runqueues 608 */ 609 void double_rq_lock(struct rq *rq1, struct rq *rq2) 610 { 611 lockdep_assert_irqs_disabled(); 612 613 if (rq_order_less(rq2, rq1)) 614 swap(rq1, rq2); 615 616 raw_spin_rq_lock(rq1); 617 if (__rq_lockp(rq1) != __rq_lockp(rq2)) 618 raw_spin_rq_lock_nested(rq2, SINGLE_DEPTH_NESTING); 619 620 double_rq_clock_clear_update(rq1, rq2); 621 } 622 #endif 623 624 /* 625 * __task_rq_lock - lock the rq @p resides on. 626 */ 627 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) 628 __acquires(rq->lock) 629 { 630 struct rq *rq; 631 632 lockdep_assert_held(&p->pi_lock); 633 634 for (;;) { 635 rq = task_rq(p); 636 raw_spin_rq_lock(rq); 637 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { 638 rq_pin_lock(rq, rf); 639 return rq; 640 } 641 raw_spin_rq_unlock(rq); 642 643 while (unlikely(task_on_rq_migrating(p))) 644 cpu_relax(); 645 } 646 } 647 648 /* 649 * task_rq_lock - lock p->pi_lock and lock the rq @p resides on. 650 */ 651 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) 652 __acquires(p->pi_lock) 653 __acquires(rq->lock) 654 { 655 struct rq *rq; 656 657 for (;;) { 658 raw_spin_lock_irqsave(&p->pi_lock, rf->flags); 659 rq = task_rq(p); 660 raw_spin_rq_lock(rq); 661 /* 662 * move_queued_task() task_rq_lock() 663 * 664 * ACQUIRE (rq->lock) 665 * [S] ->on_rq = MIGRATING [L] rq = task_rq() 666 * WMB (__set_task_cpu()) ACQUIRE (rq->lock); 667 * [S] ->cpu = new_cpu [L] task_rq() 668 * [L] ->on_rq 669 * RELEASE (rq->lock) 670 * 671 * If we observe the old CPU in task_rq_lock(), the acquire of 672 * the old rq->lock will fully serialize against the stores. 673 * 674 * If we observe the new CPU in task_rq_lock(), the address 675 * dependency headed by '[L] rq = task_rq()' and the acquire 676 * will pair with the WMB to ensure we then also see migrating. 677 */ 678 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { 679 rq_pin_lock(rq, rf); 680 return rq; 681 } 682 raw_spin_rq_unlock(rq); 683 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); 684 685 while (unlikely(task_on_rq_migrating(p))) 686 cpu_relax(); 687 } 688 } 689 690 /* 691 * RQ-clock updating methods: 692 */ 693 694 static void update_rq_clock_task(struct rq *rq, s64 delta) 695 { 696 /* 697 * In theory, the compile should just see 0 here, and optimize out the call 698 * to sched_rt_avg_update. But I don't trust it... 699 */ 700 s64 __maybe_unused steal = 0, irq_delta = 0; 701 702 #ifdef CONFIG_IRQ_TIME_ACCOUNTING 703 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time; 704 705 /* 706 * Since irq_time is only updated on {soft,}irq_exit, we might run into 707 * this case when a previous update_rq_clock() happened inside a 708 * {soft,}irq region. 709 * 710 * When this happens, we stop ->clock_task and only update the 711 * prev_irq_time stamp to account for the part that fit, so that a next 712 * update will consume the rest. This ensures ->clock_task is 713 * monotonic. 714 * 715 * It does however cause some slight miss-attribution of {soft,}irq 716 * time, a more accurate solution would be to update the irq_time using 717 * the current rq->clock timestamp, except that would require using 718 * atomic ops. 719 */ 720 if (irq_delta > delta) 721 irq_delta = delta; 722 723 rq->prev_irq_time += irq_delta; 724 delta -= irq_delta; 725 delayacct_irq(rq->curr, irq_delta); 726 #endif 727 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING 728 if (static_key_false((¶virt_steal_rq_enabled))) { 729 steal = paravirt_steal_clock(cpu_of(rq)); 730 steal -= rq->prev_steal_time_rq; 731 732 if (unlikely(steal > delta)) 733 steal = delta; 734 735 rq->prev_steal_time_rq += steal; 736 delta -= steal; 737 } 738 #endif 739 740 rq->clock_task += delta; 741 742 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ 743 if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY)) 744 update_irq_load_avg(rq, irq_delta + steal); 745 #endif 746 update_rq_clock_pelt(rq, delta); 747 } 748 749 void update_rq_clock(struct rq *rq) 750 { 751 s64 delta; 752 753 lockdep_assert_rq_held(rq); 754 755 if (rq->clock_update_flags & RQCF_ACT_SKIP) 756 return; 757 758 #ifdef CONFIG_SCHED_DEBUG 759 if (sched_feat(WARN_DOUBLE_CLOCK)) 760 SCHED_WARN_ON(rq->clock_update_flags & RQCF_UPDATED); 761 rq->clock_update_flags |= RQCF_UPDATED; 762 #endif 763 764 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock; 765 if (delta < 0) 766 return; 767 rq->clock += delta; 768 update_rq_clock_task(rq, delta); 769 } 770 771 #ifdef CONFIG_SCHED_HRTICK 772 /* 773 * Use HR-timers to deliver accurate preemption points. 774 */ 775 776 static void hrtick_clear(struct rq *rq) 777 { 778 if (hrtimer_active(&rq->hrtick_timer)) 779 hrtimer_cancel(&rq->hrtick_timer); 780 } 781 782 /* 783 * High-resolution timer tick. 784 * Runs from hardirq context with interrupts disabled. 785 */ 786 static enum hrtimer_restart hrtick(struct hrtimer *timer) 787 { 788 struct rq *rq = container_of(timer, struct rq, hrtick_timer); 789 struct rq_flags rf; 790 791 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); 792 793 rq_lock(rq, &rf); 794 update_rq_clock(rq); 795 rq->curr->sched_class->task_tick(rq, rq->curr, 1); 796 rq_unlock(rq, &rf); 797 798 return HRTIMER_NORESTART; 799 } 800 801 #ifdef CONFIG_SMP 802 803 static void __hrtick_restart(struct rq *rq) 804 { 805 struct hrtimer *timer = &rq->hrtick_timer; 806 ktime_t time = rq->hrtick_time; 807 808 hrtimer_start(timer, time, HRTIMER_MODE_ABS_PINNED_HARD); 809 } 810 811 /* 812 * called from hardirq (IPI) context 813 */ 814 static void __hrtick_start(void *arg) 815 { 816 struct rq *rq = arg; 817 struct rq_flags rf; 818 819 rq_lock(rq, &rf); 820 __hrtick_restart(rq); 821 rq_unlock(rq, &rf); 822 } 823 824 /* 825 * Called to set the hrtick timer state. 826 * 827 * called with rq->lock held and irqs disabled 828 */ 829 void hrtick_start(struct rq *rq, u64 delay) 830 { 831 struct hrtimer *timer = &rq->hrtick_timer; 832 s64 delta; 833 834 /* 835 * Don't schedule slices shorter than 10000ns, that just 836 * doesn't make sense and can cause timer DoS. 837 */ 838 delta = max_t(s64, delay, 10000LL); 839 rq->hrtick_time = ktime_add_ns(timer->base->get_time(), delta); 840 841 if (rq == this_rq()) 842 __hrtick_restart(rq); 843 else 844 smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd); 845 } 846 847 #else 848 /* 849 * Called to set the hrtick timer state. 850 * 851 * called with rq->lock held and irqs disabled 852 */ 853 void hrtick_start(struct rq *rq, u64 delay) 854 { 855 /* 856 * Don't schedule slices shorter than 10000ns, that just 857 * doesn't make sense. Rely on vruntime for fairness. 858 */ 859 delay = max_t(u64, delay, 10000LL); 860 hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay), 861 HRTIMER_MODE_REL_PINNED_HARD); 862 } 863 864 #endif /* CONFIG_SMP */ 865 866 static void hrtick_rq_init(struct rq *rq) 867 { 868 #ifdef CONFIG_SMP 869 INIT_CSD(&rq->hrtick_csd, __hrtick_start, rq); 870 #endif 871 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); 872 rq->hrtick_timer.function = hrtick; 873 } 874 #else /* CONFIG_SCHED_HRTICK */ 875 static inline void hrtick_clear(struct rq *rq) 876 { 877 } 878 879 static inline void hrtick_rq_init(struct rq *rq) 880 { 881 } 882 #endif /* CONFIG_SCHED_HRTICK */ 883 884 /* 885 * cmpxchg based fetch_or, macro so it works for different integer types 886 */ 887 #define fetch_or(ptr, mask) \ 888 ({ \ 889 typeof(ptr) _ptr = (ptr); \ 890 typeof(mask) _mask = (mask); \ 891 typeof(*_ptr) _val = *_ptr; \ 892 \ 893 do { \ 894 } while (!try_cmpxchg(_ptr, &_val, _val | _mask)); \ 895 _val; \ 896 }) 897 898 #if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG) 899 /* 900 * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG, 901 * this avoids any races wrt polling state changes and thereby avoids 902 * spurious IPIs. 903 */ 904 static inline bool set_nr_and_not_polling(struct task_struct *p) 905 { 906 struct thread_info *ti = task_thread_info(p); 907 return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG); 908 } 909 910 /* 911 * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set. 912 * 913 * If this returns true, then the idle task promises to call 914 * sched_ttwu_pending() and reschedule soon. 915 */ 916 static bool set_nr_if_polling(struct task_struct *p) 917 { 918 struct thread_info *ti = task_thread_info(p); 919 typeof(ti->flags) val = READ_ONCE(ti->flags); 920 921 for (;;) { 922 if (!(val & _TIF_POLLING_NRFLAG)) 923 return false; 924 if (val & _TIF_NEED_RESCHED) 925 return true; 926 if (try_cmpxchg(&ti->flags, &val, val | _TIF_NEED_RESCHED)) 927 break; 928 } 929 return true; 930 } 931 932 #else 933 static inline bool set_nr_and_not_polling(struct task_struct *p) 934 { 935 set_tsk_need_resched(p); 936 return true; 937 } 938 939 #ifdef CONFIG_SMP 940 static inline bool set_nr_if_polling(struct task_struct *p) 941 { 942 return false; 943 } 944 #endif 945 #endif 946 947 static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task) 948 { 949 struct wake_q_node *node = &task->wake_q; 950 951 /* 952 * Atomically grab the task, if ->wake_q is !nil already it means 953 * it's already queued (either by us or someone else) and will get the 954 * wakeup due to that. 955 * 956 * In order to ensure that a pending wakeup will observe our pending 957 * state, even in the failed case, an explicit smp_mb() must be used. 958 */ 959 smp_mb__before_atomic(); 960 if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL))) 961 return false; 962 963 /* 964 * The head is context local, there can be no concurrency. 965 */ 966 *head->lastp = node; 967 head->lastp = &node->next; 968 return true; 969 } 970 971 /** 972 * wake_q_add() - queue a wakeup for 'later' waking. 973 * @head: the wake_q_head to add @task to 974 * @task: the task to queue for 'later' wakeup 975 * 976 * Queue a task for later wakeup, most likely by the wake_up_q() call in the 977 * same context, _HOWEVER_ this is not guaranteed, the wakeup can come 978 * instantly. 979 * 980 * This function must be used as-if it were wake_up_process(); IOW the task 981 * must be ready to be woken at this location. 982 */ 983 void wake_q_add(struct wake_q_head *head, struct task_struct *task) 984 { 985 if (__wake_q_add(head, task)) 986 get_task_struct(task); 987 } 988 989 /** 990 * wake_q_add_safe() - safely queue a wakeup for 'later' waking. 991 * @head: the wake_q_head to add @task to 992 * @task: the task to queue for 'later' wakeup 993 * 994 * Queue a task for later wakeup, most likely by the wake_up_q() call in the 995 * same context, _HOWEVER_ this is not guaranteed, the wakeup can come 996 * instantly. 997 * 998 * This function must be used as-if it were wake_up_process(); IOW the task 999 * must be ready to be woken at this location. 1000 * 1001 * This function is essentially a task-safe equivalent to wake_q_add(). Callers 1002 * that already hold reference to @task can call the 'safe' version and trust 1003 * wake_q to do the right thing depending whether or not the @task is already 1004 * queued for wakeup. 1005 */ 1006 void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task) 1007 { 1008 if (!__wake_q_add(head, task)) 1009 put_task_struct(task); 1010 } 1011 1012 void wake_up_q(struct wake_q_head *head) 1013 { 1014 struct wake_q_node *node = head->first; 1015 1016 while (node != WAKE_Q_TAIL) { 1017 struct task_struct *task; 1018 1019 task = container_of(node, struct task_struct, wake_q); 1020 /* Task can safely be re-inserted now: */ 1021 node = node->next; 1022 task->wake_q.next = NULL; 1023 1024 /* 1025 * wake_up_process() executes a full barrier, which pairs with 1026 * the queueing in wake_q_add() so as not to miss wakeups. 1027 */ 1028 wake_up_process(task); 1029 put_task_struct(task); 1030 } 1031 } 1032 1033 /* 1034 * resched_curr - mark rq's current task 'to be rescheduled now'. 1035 * 1036 * On UP this means the setting of the need_resched flag, on SMP it 1037 * might also involve a cross-CPU call to trigger the scheduler on 1038 * the target CPU. 1039 */ 1040 void resched_curr(struct rq *rq) 1041 { 1042 struct task_struct *curr = rq->curr; 1043 int cpu; 1044 1045 lockdep_assert_rq_held(rq); 1046 1047 if (test_tsk_need_resched(curr)) 1048 return; 1049 1050 cpu = cpu_of(rq); 1051 1052 if (cpu == smp_processor_id()) { 1053 set_tsk_need_resched(curr); 1054 set_preempt_need_resched(); 1055 return; 1056 } 1057 1058 if (set_nr_and_not_polling(curr)) 1059 smp_send_reschedule(cpu); 1060 else 1061 trace_sched_wake_idle_without_ipi(cpu); 1062 } 1063 1064 void resched_cpu(int cpu) 1065 { 1066 struct rq *rq = cpu_rq(cpu); 1067 unsigned long flags; 1068 1069 raw_spin_rq_lock_irqsave(rq, flags); 1070 if (cpu_online(cpu) || cpu == smp_processor_id()) 1071 resched_curr(rq); 1072 raw_spin_rq_unlock_irqrestore(rq, flags); 1073 } 1074 1075 #ifdef CONFIG_SMP 1076 #ifdef CONFIG_NO_HZ_COMMON 1077 /* 1078 * In the semi idle case, use the nearest busy CPU for migrating timers 1079 * from an idle CPU. This is good for power-savings. 1080 * 1081 * We don't do similar optimization for completely idle system, as 1082 * selecting an idle CPU will add more delays to the timers than intended 1083 * (as that CPU's timer base may not be uptodate wrt jiffies etc). 1084 */ 1085 int get_nohz_timer_target(void) 1086 { 1087 int i, cpu = smp_processor_id(), default_cpu = -1; 1088 struct sched_domain *sd; 1089 const struct cpumask *hk_mask; 1090 1091 if (housekeeping_cpu(cpu, HK_TYPE_TIMER)) { 1092 if (!idle_cpu(cpu)) 1093 return cpu; 1094 default_cpu = cpu; 1095 } 1096 1097 hk_mask = housekeeping_cpumask(HK_TYPE_TIMER); 1098 1099 guard(rcu)(); 1100 1101 for_each_domain(cpu, sd) { 1102 for_each_cpu_and(i, sched_domain_span(sd), hk_mask) { 1103 if (cpu == i) 1104 continue; 1105 1106 if (!idle_cpu(i)) 1107 return i; 1108 } 1109 } 1110 1111 if (default_cpu == -1) 1112 default_cpu = housekeeping_any_cpu(HK_TYPE_TIMER); 1113 1114 return default_cpu; 1115 } 1116 1117 /* 1118 * When add_timer_on() enqueues a timer into the timer wheel of an 1119 * idle CPU then this timer might expire before the next timer event 1120 * which is scheduled to wake up that CPU. In case of a completely 1121 * idle system the next event might even be infinite time into the 1122 * future. wake_up_idle_cpu() ensures that the CPU is woken up and 1123 * leaves the inner idle loop so the newly added timer is taken into 1124 * account when the CPU goes back to idle and evaluates the timer 1125 * wheel for the next timer event. 1126 */ 1127 static void wake_up_idle_cpu(int cpu) 1128 { 1129 struct rq *rq = cpu_rq(cpu); 1130 1131 if (cpu == smp_processor_id()) 1132 return; 1133 1134 if (set_nr_and_not_polling(rq->idle)) 1135 smp_send_reschedule(cpu); 1136 else 1137 trace_sched_wake_idle_without_ipi(cpu); 1138 } 1139 1140 static bool wake_up_full_nohz_cpu(int cpu) 1141 { 1142 /* 1143 * We just need the target to call irq_exit() and re-evaluate 1144 * the next tick. The nohz full kick at least implies that. 1145 * If needed we can still optimize that later with an 1146 * empty IRQ. 1147 */ 1148 if (cpu_is_offline(cpu)) 1149 return true; /* Don't try to wake offline CPUs. */ 1150 if (tick_nohz_full_cpu(cpu)) { 1151 if (cpu != smp_processor_id() || 1152 tick_nohz_tick_stopped()) 1153 tick_nohz_full_kick_cpu(cpu); 1154 return true; 1155 } 1156 1157 return false; 1158 } 1159 1160 /* 1161 * Wake up the specified CPU. If the CPU is going offline, it is the 1162 * caller's responsibility to deal with the lost wakeup, for example, 1163 * by hooking into the CPU_DEAD notifier like timers and hrtimers do. 1164 */ 1165 void wake_up_nohz_cpu(int cpu) 1166 { 1167 if (!wake_up_full_nohz_cpu(cpu)) 1168 wake_up_idle_cpu(cpu); 1169 } 1170 1171 static void nohz_csd_func(void *info) 1172 { 1173 struct rq *rq = info; 1174 int cpu = cpu_of(rq); 1175 unsigned int flags; 1176 1177 /* 1178 * Release the rq::nohz_csd. 1179 */ 1180 flags = atomic_fetch_andnot(NOHZ_KICK_MASK | NOHZ_NEWILB_KICK, nohz_flags(cpu)); 1181 WARN_ON(!(flags & NOHZ_KICK_MASK)); 1182 1183 rq->idle_balance = idle_cpu(cpu); 1184 if (rq->idle_balance && !need_resched()) { 1185 rq->nohz_idle_balance = flags; 1186 raise_softirq_irqoff(SCHED_SOFTIRQ); 1187 } 1188 } 1189 1190 #endif /* CONFIG_NO_HZ_COMMON */ 1191 1192 #ifdef CONFIG_NO_HZ_FULL 1193 static inline bool __need_bw_check(struct rq *rq, struct task_struct *p) 1194 { 1195 if (rq->nr_running != 1) 1196 return false; 1197 1198 if (p->sched_class != &fair_sched_class) 1199 return false; 1200 1201 if (!task_on_rq_queued(p)) 1202 return false; 1203 1204 return true; 1205 } 1206 1207 bool sched_can_stop_tick(struct rq *rq) 1208 { 1209 int fifo_nr_running; 1210 1211 /* Deadline tasks, even if single, need the tick */ 1212 if (rq->dl.dl_nr_running) 1213 return false; 1214 1215 /* 1216 * If there are more than one RR tasks, we need the tick to affect the 1217 * actual RR behaviour. 1218 */ 1219 if (rq->rt.rr_nr_running) { 1220 if (rq->rt.rr_nr_running == 1) 1221 return true; 1222 else 1223 return false; 1224 } 1225 1226 /* 1227 * If there's no RR tasks, but FIFO tasks, we can skip the tick, no 1228 * forced preemption between FIFO tasks. 1229 */ 1230 fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running; 1231 if (fifo_nr_running) 1232 return true; 1233 1234 /* 1235 * If there are no DL,RR/FIFO tasks, there must only be CFS tasks left; 1236 * if there's more than one we need the tick for involuntary 1237 * preemption. 1238 */ 1239 if (rq->nr_running > 1) 1240 return false; 1241 1242 /* 1243 * If there is one task and it has CFS runtime bandwidth constraints 1244 * and it's on the cpu now we don't want to stop the tick. 1245 * This check prevents clearing the bit if a newly enqueued task here is 1246 * dequeued by migrating while the constrained task continues to run. 1247 * E.g. going from 2->1 without going through pick_next_task(). 1248 */ 1249 if (sched_feat(HZ_BW) && __need_bw_check(rq, rq->curr)) { 1250 if (cfs_task_bw_constrained(rq->curr)) 1251 return false; 1252 } 1253 1254 return true; 1255 } 1256 #endif /* CONFIG_NO_HZ_FULL */ 1257 #endif /* CONFIG_SMP */ 1258 1259 #if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \ 1260 (defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH))) 1261 /* 1262 * Iterate task_group tree rooted at *from, calling @down when first entering a 1263 * node and @up when leaving it for the final time. 1264 * 1265 * Caller must hold rcu_lock or sufficient equivalent. 1266 */ 1267 int walk_tg_tree_from(struct task_group *from, 1268 tg_visitor down, tg_visitor up, void *data) 1269 { 1270 struct task_group *parent, *child; 1271 int ret; 1272 1273 parent = from; 1274 1275 down: 1276 ret = (*down)(parent, data); 1277 if (ret) 1278 goto out; 1279 list_for_each_entry_rcu(child, &parent->children, siblings) { 1280 parent = child; 1281 goto down; 1282 1283 up: 1284 continue; 1285 } 1286 ret = (*up)(parent, data); 1287 if (ret || parent == from) 1288 goto out; 1289 1290 child = parent; 1291 parent = parent->parent; 1292 if (parent) 1293 goto up; 1294 out: 1295 return ret; 1296 } 1297 1298 int tg_nop(struct task_group *tg, void *data) 1299 { 1300 return 0; 1301 } 1302 #endif 1303 1304 static void set_load_weight(struct task_struct *p, bool update_load) 1305 { 1306 int prio = p->static_prio - MAX_RT_PRIO; 1307 struct load_weight lw; 1308 1309 if (task_has_idle_policy(p)) { 1310 lw.weight = scale_load(WEIGHT_IDLEPRIO); 1311 lw.inv_weight = WMULT_IDLEPRIO; 1312 } else { 1313 lw.weight = scale_load(sched_prio_to_weight[prio]); 1314 lw.inv_weight = sched_prio_to_wmult[prio]; 1315 } 1316 1317 /* 1318 * SCHED_OTHER tasks have to update their load when changing their 1319 * weight 1320 */ 1321 if (update_load && p->sched_class == &fair_sched_class) 1322 reweight_task(p, &lw); 1323 else 1324 p->se.load = lw; 1325 } 1326 1327 #ifdef CONFIG_UCLAMP_TASK 1328 /* 1329 * Serializes updates of utilization clamp values 1330 * 1331 * The (slow-path) user-space triggers utilization clamp value updates which 1332 * can require updates on (fast-path) scheduler's data structures used to 1333 * support enqueue/dequeue operations. 1334 * While the per-CPU rq lock protects fast-path update operations, user-space 1335 * requests are serialized using a mutex to reduce the risk of conflicting 1336 * updates or API abuses. 1337 */ 1338 static DEFINE_MUTEX(uclamp_mutex); 1339 1340 /* Max allowed minimum utilization */ 1341 static unsigned int __maybe_unused sysctl_sched_uclamp_util_min = SCHED_CAPACITY_SCALE; 1342 1343 /* Max allowed maximum utilization */ 1344 static unsigned int __maybe_unused sysctl_sched_uclamp_util_max = SCHED_CAPACITY_SCALE; 1345 1346 /* 1347 * By default RT tasks run at the maximum performance point/capacity of the 1348 * system. Uclamp enforces this by always setting UCLAMP_MIN of RT tasks to 1349 * SCHED_CAPACITY_SCALE. 1350 * 1351 * This knob allows admins to change the default behavior when uclamp is being 1352 * used. In battery powered devices, particularly, running at the maximum 1353 * capacity and frequency will increase energy consumption and shorten the 1354 * battery life. 1355 * 1356 * This knob only affects RT tasks that their uclamp_se->user_defined == false. 1357 * 1358 * This knob will not override the system default sched_util_clamp_min defined 1359 * above. 1360 */ 1361 static unsigned int sysctl_sched_uclamp_util_min_rt_default = SCHED_CAPACITY_SCALE; 1362 1363 /* All clamps are required to be less or equal than these values */ 1364 static struct uclamp_se uclamp_default[UCLAMP_CNT]; 1365 1366 /* 1367 * This static key is used to reduce the uclamp overhead in the fast path. It 1368 * primarily disables the call to uclamp_rq_{inc, dec}() in 1369 * enqueue/dequeue_task(). 1370 * 1371 * This allows users to continue to enable uclamp in their kernel config with 1372 * minimum uclamp overhead in the fast path. 1373 * 1374 * As soon as userspace modifies any of the uclamp knobs, the static key is 1375 * enabled, since we have an actual users that make use of uclamp 1376 * functionality. 1377 * 1378 * The knobs that would enable this static key are: 1379 * 1380 * * A task modifying its uclamp value with sched_setattr(). 1381 * * An admin modifying the sysctl_sched_uclamp_{min, max} via procfs. 1382 * * An admin modifying the cgroup cpu.uclamp.{min, max} 1383 */ 1384 DEFINE_STATIC_KEY_FALSE(sched_uclamp_used); 1385 1386 /* Integer rounded range for each bucket */ 1387 #define UCLAMP_BUCKET_DELTA DIV_ROUND_CLOSEST(SCHED_CAPACITY_SCALE, UCLAMP_BUCKETS) 1388 1389 #define for_each_clamp_id(clamp_id) \ 1390 for ((clamp_id) = 0; (clamp_id) < UCLAMP_CNT; (clamp_id)++) 1391 1392 static inline unsigned int uclamp_bucket_id(unsigned int clamp_value) 1393 { 1394 return min_t(unsigned int, clamp_value / UCLAMP_BUCKET_DELTA, UCLAMP_BUCKETS - 1); 1395 } 1396 1397 static inline unsigned int uclamp_none(enum uclamp_id clamp_id) 1398 { 1399 if (clamp_id == UCLAMP_MIN) 1400 return 0; 1401 return SCHED_CAPACITY_SCALE; 1402 } 1403 1404 static inline void uclamp_se_set(struct uclamp_se *uc_se, 1405 unsigned int value, bool user_defined) 1406 { 1407 uc_se->value = value; 1408 uc_se->bucket_id = uclamp_bucket_id(value); 1409 uc_se->user_defined = user_defined; 1410 } 1411 1412 static inline unsigned int 1413 uclamp_idle_value(struct rq *rq, enum uclamp_id clamp_id, 1414 unsigned int clamp_value) 1415 { 1416 /* 1417 * Avoid blocked utilization pushing up the frequency when we go 1418 * idle (which drops the max-clamp) by retaining the last known 1419 * max-clamp. 1420 */ 1421 if (clamp_id == UCLAMP_MAX) { 1422 rq->uclamp_flags |= UCLAMP_FLAG_IDLE; 1423 return clamp_value; 1424 } 1425 1426 return uclamp_none(UCLAMP_MIN); 1427 } 1428 1429 static inline void uclamp_idle_reset(struct rq *rq, enum uclamp_id clamp_id, 1430 unsigned int clamp_value) 1431 { 1432 /* Reset max-clamp retention only on idle exit */ 1433 if (!(rq->uclamp_flags & UCLAMP_FLAG_IDLE)) 1434 return; 1435 1436 uclamp_rq_set(rq, clamp_id, clamp_value); 1437 } 1438 1439 static inline 1440 unsigned int uclamp_rq_max_value(struct rq *rq, enum uclamp_id clamp_id, 1441 unsigned int clamp_value) 1442 { 1443 struct uclamp_bucket *bucket = rq->uclamp[clamp_id].bucket; 1444 int bucket_id = UCLAMP_BUCKETS - 1; 1445 1446 /* 1447 * Since both min and max clamps are max aggregated, find the 1448 * top most bucket with tasks in. 1449 */ 1450 for ( ; bucket_id >= 0; bucket_id--) { 1451 if (!bucket[bucket_id].tasks) 1452 continue; 1453 return bucket[bucket_id].value; 1454 } 1455 1456 /* No tasks -- default clamp values */ 1457 return uclamp_idle_value(rq, clamp_id, clamp_value); 1458 } 1459 1460 static void __uclamp_update_util_min_rt_default(struct task_struct *p) 1461 { 1462 unsigned int default_util_min; 1463 struct uclamp_se *uc_se; 1464 1465 lockdep_assert_held(&p->pi_lock); 1466 1467 uc_se = &p->uclamp_req[UCLAMP_MIN]; 1468 1469 /* Only sync if user didn't override the default */ 1470 if (uc_se->user_defined) 1471 return; 1472 1473 default_util_min = sysctl_sched_uclamp_util_min_rt_default; 1474 uclamp_se_set(uc_se, default_util_min, false); 1475 } 1476 1477 static void uclamp_update_util_min_rt_default(struct task_struct *p) 1478 { 1479 struct rq_flags rf; 1480 struct rq *rq; 1481 1482 if (!rt_task(p)) 1483 return; 1484 1485 /* Protect updates to p->uclamp_* */ 1486 rq = task_rq_lock(p, &rf); 1487 __uclamp_update_util_min_rt_default(p); 1488 task_rq_unlock(rq, p, &rf); 1489 } 1490 1491 static inline struct uclamp_se 1492 uclamp_tg_restrict(struct task_struct *p, enum uclamp_id clamp_id) 1493 { 1494 /* Copy by value as we could modify it */ 1495 struct uclamp_se uc_req = p->uclamp_req[clamp_id]; 1496 #ifdef CONFIG_UCLAMP_TASK_GROUP 1497 unsigned int tg_min, tg_max, value; 1498 1499 /* 1500 * Tasks in autogroups or root task group will be 1501 * restricted by system defaults. 1502 */ 1503 if (task_group_is_autogroup(task_group(p))) 1504 return uc_req; 1505 if (task_group(p) == &root_task_group) 1506 return uc_req; 1507 1508 tg_min = task_group(p)->uclamp[UCLAMP_MIN].value; 1509 tg_max = task_group(p)->uclamp[UCLAMP_MAX].value; 1510 value = uc_req.value; 1511 value = clamp(value, tg_min, tg_max); 1512 uclamp_se_set(&uc_req, value, false); 1513 #endif 1514 1515 return uc_req; 1516 } 1517 1518 /* 1519 * The effective clamp bucket index of a task depends on, by increasing 1520 * priority: 1521 * - the task specific clamp value, when explicitly requested from userspace 1522 * - the task group effective clamp value, for tasks not either in the root 1523 * group or in an autogroup 1524 * - the system default clamp value, defined by the sysadmin 1525 */ 1526 static inline struct uclamp_se 1527 uclamp_eff_get(struct task_struct *p, enum uclamp_id clamp_id) 1528 { 1529 struct uclamp_se uc_req = uclamp_tg_restrict(p, clamp_id); 1530 struct uclamp_se uc_max = uclamp_default[clamp_id]; 1531 1532 /* System default restrictions always apply */ 1533 if (unlikely(uc_req.value > uc_max.value)) 1534 return uc_max; 1535 1536 return uc_req; 1537 } 1538 1539 unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id) 1540 { 1541 struct uclamp_se uc_eff; 1542 1543 /* Task currently refcounted: use back-annotated (effective) value */ 1544 if (p->uclamp[clamp_id].active) 1545 return (unsigned long)p->uclamp[clamp_id].value; 1546 1547 uc_eff = uclamp_eff_get(p, clamp_id); 1548 1549 return (unsigned long)uc_eff.value; 1550 } 1551 1552 /* 1553 * When a task is enqueued on a rq, the clamp bucket currently defined by the 1554 * task's uclamp::bucket_id is refcounted on that rq. This also immediately 1555 * updates the rq's clamp value if required. 1556 * 1557 * Tasks can have a task-specific value requested from user-space, track 1558 * within each bucket the maximum value for tasks refcounted in it. 1559 * This "local max aggregation" allows to track the exact "requested" value 1560 * for each bucket when all its RUNNABLE tasks require the same clamp. 1561 */ 1562 static inline void uclamp_rq_inc_id(struct rq *rq, struct task_struct *p, 1563 enum uclamp_id clamp_id) 1564 { 1565 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id]; 1566 struct uclamp_se *uc_se = &p->uclamp[clamp_id]; 1567 struct uclamp_bucket *bucket; 1568 1569 lockdep_assert_rq_held(rq); 1570 1571 /* Update task effective clamp */ 1572 p->uclamp[clamp_id] = uclamp_eff_get(p, clamp_id); 1573 1574 bucket = &uc_rq->bucket[uc_se->bucket_id]; 1575 bucket->tasks++; 1576 uc_se->active = true; 1577 1578 uclamp_idle_reset(rq, clamp_id, uc_se->value); 1579 1580 /* 1581 * Local max aggregation: rq buckets always track the max 1582 * "requested" clamp value of its RUNNABLE tasks. 1583 */ 1584 if (bucket->tasks == 1 || uc_se->value > bucket->value) 1585 bucket->value = uc_se->value; 1586 1587 if (uc_se->value > uclamp_rq_get(rq, clamp_id)) 1588 uclamp_rq_set(rq, clamp_id, uc_se->value); 1589 } 1590 1591 /* 1592 * When a task is dequeued from a rq, the clamp bucket refcounted by the task 1593 * is released. If this is the last task reference counting the rq's max 1594 * active clamp value, then the rq's clamp value is updated. 1595 * 1596 * Both refcounted tasks and rq's cached clamp values are expected to be 1597 * always valid. If it's detected they are not, as defensive programming, 1598 * enforce the expected state and warn. 1599 */ 1600 static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p, 1601 enum uclamp_id clamp_id) 1602 { 1603 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id]; 1604 struct uclamp_se *uc_se = &p->uclamp[clamp_id]; 1605 struct uclamp_bucket *bucket; 1606 unsigned int bkt_clamp; 1607 unsigned int rq_clamp; 1608 1609 lockdep_assert_rq_held(rq); 1610 1611 /* 1612 * If sched_uclamp_used was enabled after task @p was enqueued, 1613 * we could end up with unbalanced call to uclamp_rq_dec_id(). 1614 * 1615 * In this case the uc_se->active flag should be false since no uclamp 1616 * accounting was performed at enqueue time and we can just return 1617 * here. 1618 * 1619 * Need to be careful of the following enqueue/dequeue ordering 1620 * problem too 1621 * 1622 * enqueue(taskA) 1623 * // sched_uclamp_used gets enabled 1624 * enqueue(taskB) 1625 * dequeue(taskA) 1626 * // Must not decrement bucket->tasks here 1627 * dequeue(taskB) 1628 * 1629 * where we could end up with stale data in uc_se and 1630 * bucket[uc_se->bucket_id]. 1631 * 1632 * The following check here eliminates the possibility of such race. 1633 */ 1634 if (unlikely(!uc_se->active)) 1635 return; 1636 1637 bucket = &uc_rq->bucket[uc_se->bucket_id]; 1638 1639 SCHED_WARN_ON(!bucket->tasks); 1640 if (likely(bucket->tasks)) 1641 bucket->tasks--; 1642 1643 uc_se->active = false; 1644 1645 /* 1646 * Keep "local max aggregation" simple and accept to (possibly) 1647 * overboost some RUNNABLE tasks in the same bucket. 1648 * The rq clamp bucket value is reset to its base value whenever 1649 * there are no more RUNNABLE tasks refcounting it. 1650 */ 1651 if (likely(bucket->tasks)) 1652 return; 1653 1654 rq_clamp = uclamp_rq_get(rq, clamp_id); 1655 /* 1656 * Defensive programming: this should never happen. If it happens, 1657 * e.g. due to future modification, warn and fixup the expected value. 1658 */ 1659 SCHED_WARN_ON(bucket->value > rq_clamp); 1660 if (bucket->value >= rq_clamp) { 1661 bkt_clamp = uclamp_rq_max_value(rq, clamp_id, uc_se->value); 1662 uclamp_rq_set(rq, clamp_id, bkt_clamp); 1663 } 1664 } 1665 1666 static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) 1667 { 1668 enum uclamp_id clamp_id; 1669 1670 /* 1671 * Avoid any overhead until uclamp is actually used by the userspace. 1672 * 1673 * The condition is constructed such that a NOP is generated when 1674 * sched_uclamp_used is disabled. 1675 */ 1676 if (!static_branch_unlikely(&sched_uclamp_used)) 1677 return; 1678 1679 if (unlikely(!p->sched_class->uclamp_enabled)) 1680 return; 1681 1682 for_each_clamp_id(clamp_id) 1683 uclamp_rq_inc_id(rq, p, clamp_id); 1684 1685 /* Reset clamp idle holding when there is one RUNNABLE task */ 1686 if (rq->uclamp_flags & UCLAMP_FLAG_IDLE) 1687 rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE; 1688 } 1689 1690 static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) 1691 { 1692 enum uclamp_id clamp_id; 1693 1694 /* 1695 * Avoid any overhead until uclamp is actually used by the userspace. 1696 * 1697 * The condition is constructed such that a NOP is generated when 1698 * sched_uclamp_used is disabled. 1699 */ 1700 if (!static_branch_unlikely(&sched_uclamp_used)) 1701 return; 1702 1703 if (unlikely(!p->sched_class->uclamp_enabled)) 1704 return; 1705 1706 for_each_clamp_id(clamp_id) 1707 uclamp_rq_dec_id(rq, p, clamp_id); 1708 } 1709 1710 static inline void uclamp_rq_reinc_id(struct rq *rq, struct task_struct *p, 1711 enum uclamp_id clamp_id) 1712 { 1713 if (!p->uclamp[clamp_id].active) 1714 return; 1715 1716 uclamp_rq_dec_id(rq, p, clamp_id); 1717 uclamp_rq_inc_id(rq, p, clamp_id); 1718 1719 /* 1720 * Make sure to clear the idle flag if we've transiently reached 0 1721 * active tasks on rq. 1722 */ 1723 if (clamp_id == UCLAMP_MAX && (rq->uclamp_flags & UCLAMP_FLAG_IDLE)) 1724 rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE; 1725 } 1726 1727 static inline void 1728 uclamp_update_active(struct task_struct *p) 1729 { 1730 enum uclamp_id clamp_id; 1731 struct rq_flags rf; 1732 struct rq *rq; 1733 1734 /* 1735 * Lock the task and the rq where the task is (or was) queued. 1736 * 1737 * We might lock the (previous) rq of a !RUNNABLE task, but that's the 1738 * price to pay to safely serialize util_{min,max} updates with 1739 * enqueues, dequeues and migration operations. 1740 * This is the same locking schema used by __set_cpus_allowed_ptr(). 1741 */ 1742 rq = task_rq_lock(p, &rf); 1743 1744 /* 1745 * Setting the clamp bucket is serialized by task_rq_lock(). 1746 * If the task is not yet RUNNABLE and its task_struct is not 1747 * affecting a valid clamp bucket, the next time it's enqueued, 1748 * it will already see the updated clamp bucket value. 1749 */ 1750 for_each_clamp_id(clamp_id) 1751 uclamp_rq_reinc_id(rq, p, clamp_id); 1752 1753 task_rq_unlock(rq, p, &rf); 1754 } 1755 1756 #ifdef CONFIG_UCLAMP_TASK_GROUP 1757 static inline void 1758 uclamp_update_active_tasks(struct cgroup_subsys_state *css) 1759 { 1760 struct css_task_iter it; 1761 struct task_struct *p; 1762 1763 css_task_iter_start(css, 0, &it); 1764 while ((p = css_task_iter_next(&it))) 1765 uclamp_update_active(p); 1766 css_task_iter_end(&it); 1767 } 1768 1769 static void cpu_util_update_eff(struct cgroup_subsys_state *css); 1770 #endif 1771 1772 #ifdef CONFIG_SYSCTL 1773 #ifdef CONFIG_UCLAMP_TASK 1774 #ifdef CONFIG_UCLAMP_TASK_GROUP 1775 static void uclamp_update_root_tg(void) 1776 { 1777 struct task_group *tg = &root_task_group; 1778 1779 uclamp_se_set(&tg->uclamp_req[UCLAMP_MIN], 1780 sysctl_sched_uclamp_util_min, false); 1781 uclamp_se_set(&tg->uclamp_req[UCLAMP_MAX], 1782 sysctl_sched_uclamp_util_max, false); 1783 1784 rcu_read_lock(); 1785 cpu_util_update_eff(&root_task_group.css); 1786 rcu_read_unlock(); 1787 } 1788 #else 1789 static void uclamp_update_root_tg(void) { } 1790 #endif 1791 1792 static void uclamp_sync_util_min_rt_default(void) 1793 { 1794 struct task_struct *g, *p; 1795 1796 /* 1797 * copy_process() sysctl_uclamp 1798 * uclamp_min_rt = X; 1799 * write_lock(&tasklist_lock) read_lock(&tasklist_lock) 1800 * // link thread smp_mb__after_spinlock() 1801 * write_unlock(&tasklist_lock) read_unlock(&tasklist_lock); 1802 * sched_post_fork() for_each_process_thread() 1803 * __uclamp_sync_rt() __uclamp_sync_rt() 1804 * 1805 * Ensures that either sched_post_fork() will observe the new 1806 * uclamp_min_rt or for_each_process_thread() will observe the new 1807 * task. 1808 */ 1809 read_lock(&tasklist_lock); 1810 smp_mb__after_spinlock(); 1811 read_unlock(&tasklist_lock); 1812 1813 rcu_read_lock(); 1814 for_each_process_thread(g, p) 1815 uclamp_update_util_min_rt_default(p); 1816 rcu_read_unlock(); 1817 } 1818 1819 static int sysctl_sched_uclamp_handler(struct ctl_table *table, int write, 1820 void *buffer, size_t *lenp, loff_t *ppos) 1821 { 1822 bool update_root_tg = false; 1823 int old_min, old_max, old_min_rt; 1824 int result; 1825 1826 guard(mutex)(&uclamp_mutex); 1827 1828 old_min = sysctl_sched_uclamp_util_min; 1829 old_max = sysctl_sched_uclamp_util_max; 1830 old_min_rt = sysctl_sched_uclamp_util_min_rt_default; 1831 1832 result = proc_dointvec(table, write, buffer, lenp, ppos); 1833 if (result) 1834 goto undo; 1835 if (!write) 1836 return 0; 1837 1838 if (sysctl_sched_uclamp_util_min > sysctl_sched_uclamp_util_max || 1839 sysctl_sched_uclamp_util_max > SCHED_CAPACITY_SCALE || 1840 sysctl_sched_uclamp_util_min_rt_default > SCHED_CAPACITY_SCALE) { 1841 1842 result = -EINVAL; 1843 goto undo; 1844 } 1845 1846 if (old_min != sysctl_sched_uclamp_util_min) { 1847 uclamp_se_set(&uclamp_default[UCLAMP_MIN], 1848 sysctl_sched_uclamp_util_min, false); 1849 update_root_tg = true; 1850 } 1851 if (old_max != sysctl_sched_uclamp_util_max) { 1852 uclamp_se_set(&uclamp_default[UCLAMP_MAX], 1853 sysctl_sched_uclamp_util_max, false); 1854 update_root_tg = true; 1855 } 1856 1857 if (update_root_tg) { 1858 static_branch_enable(&sched_uclamp_used); 1859 uclamp_update_root_tg(); 1860 } 1861 1862 if (old_min_rt != sysctl_sched_uclamp_util_min_rt_default) { 1863 static_branch_enable(&sched_uclamp_used); 1864 uclamp_sync_util_min_rt_default(); 1865 } 1866 1867 /* 1868 * We update all RUNNABLE tasks only when task groups are in use. 1869 * Otherwise, keep it simple and do just a lazy update at each next 1870 * task enqueue time. 1871 */ 1872 return 0; 1873 1874 undo: 1875 sysctl_sched_uclamp_util_min = old_min; 1876 sysctl_sched_uclamp_util_max = old_max; 1877 sysctl_sched_uclamp_util_min_rt_default = old_min_rt; 1878 return result; 1879 } 1880 #endif 1881 #endif 1882 1883 static int uclamp_validate(struct task_struct *p, 1884 const struct sched_attr *attr) 1885 { 1886 int util_min = p->uclamp_req[UCLAMP_MIN].value; 1887 int util_max = p->uclamp_req[UCLAMP_MAX].value; 1888 1889 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN) { 1890 util_min = attr->sched_util_min; 1891 1892 if (util_min + 1 > SCHED_CAPACITY_SCALE + 1) 1893 return -EINVAL; 1894 } 1895 1896 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX) { 1897 util_max = attr->sched_util_max; 1898 1899 if (util_max + 1 > SCHED_CAPACITY_SCALE + 1) 1900 return -EINVAL; 1901 } 1902 1903 if (util_min != -1 && util_max != -1 && util_min > util_max) 1904 return -EINVAL; 1905 1906 /* 1907 * We have valid uclamp attributes; make sure uclamp is enabled. 1908 * 1909 * We need to do that here, because enabling static branches is a 1910 * blocking operation which obviously cannot be done while holding 1911 * scheduler locks. 1912 */ 1913 static_branch_enable(&sched_uclamp_used); 1914 1915 return 0; 1916 } 1917 1918 static bool uclamp_reset(const struct sched_attr *attr, 1919 enum uclamp_id clamp_id, 1920 struct uclamp_se *uc_se) 1921 { 1922 /* Reset on sched class change for a non user-defined clamp value. */ 1923 if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)) && 1924 !uc_se->user_defined) 1925 return true; 1926 1927 /* Reset on sched_util_{min,max} == -1. */ 1928 if (clamp_id == UCLAMP_MIN && 1929 attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN && 1930 attr->sched_util_min == -1) { 1931 return true; 1932 } 1933 1934 if (clamp_id == UCLAMP_MAX && 1935 attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX && 1936 attr->sched_util_max == -1) { 1937 return true; 1938 } 1939 1940 return false; 1941 } 1942 1943 static void __setscheduler_uclamp(struct task_struct *p, 1944 const struct sched_attr *attr) 1945 { 1946 enum uclamp_id clamp_id; 1947 1948 for_each_clamp_id(clamp_id) { 1949 struct uclamp_se *uc_se = &p->uclamp_req[clamp_id]; 1950 unsigned int value; 1951 1952 if (!uclamp_reset(attr, clamp_id, uc_se)) 1953 continue; 1954 1955 /* 1956 * RT by default have a 100% boost value that could be modified 1957 * at runtime. 1958 */ 1959 if (unlikely(rt_task(p) && clamp_id == UCLAMP_MIN)) 1960 value = sysctl_sched_uclamp_util_min_rt_default; 1961 else 1962 value = uclamp_none(clamp_id); 1963 1964 uclamp_se_set(uc_se, value, false); 1965 1966 } 1967 1968 if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP))) 1969 return; 1970 1971 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN && 1972 attr->sched_util_min != -1) { 1973 uclamp_se_set(&p->uclamp_req[UCLAMP_MIN], 1974 attr->sched_util_min, true); 1975 } 1976 1977 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX && 1978 attr->sched_util_max != -1) { 1979 uclamp_se_set(&p->uclamp_req[UCLAMP_MAX], 1980 attr->sched_util_max, true); 1981 } 1982 } 1983 1984 static void uclamp_fork(struct task_struct *p) 1985 { 1986 enum uclamp_id clamp_id; 1987 1988 /* 1989 * We don't need to hold task_rq_lock() when updating p->uclamp_* here 1990 * as the task is still at its early fork stages. 1991 */ 1992 for_each_clamp_id(clamp_id) 1993 p->uclamp[clamp_id].active = false; 1994 1995 if (likely(!p->sched_reset_on_fork)) 1996 return; 1997 1998 for_each_clamp_id(clamp_id) { 1999 uclamp_se_set(&p->uclamp_req[clamp_id], 2000 uclamp_none(clamp_id), false); 2001 } 2002 } 2003 2004 static void uclamp_post_fork(struct task_struct *p) 2005 { 2006 uclamp_update_util_min_rt_default(p); 2007 } 2008 2009 static void __init init_uclamp_rq(struct rq *rq) 2010 { 2011 enum uclamp_id clamp_id; 2012 struct uclamp_rq *uc_rq = rq->uclamp; 2013 2014 for_each_clamp_id(clamp_id) { 2015 uc_rq[clamp_id] = (struct uclamp_rq) { 2016 .value = uclamp_none(clamp_id) 2017 }; 2018 } 2019 2020 rq->uclamp_flags = UCLAMP_FLAG_IDLE; 2021 } 2022 2023 static void __init init_uclamp(void) 2024 { 2025 struct uclamp_se uc_max = {}; 2026 enum uclamp_id clamp_id; 2027 int cpu; 2028 2029 for_each_possible_cpu(cpu) 2030 init_uclamp_rq(cpu_rq(cpu)); 2031 2032 for_each_clamp_id(clamp_id) { 2033 uclamp_se_set(&init_task.uclamp_req[clamp_id], 2034 uclamp_none(clamp_id), false); 2035 } 2036 2037 /* System defaults allow max clamp values for both indexes */ 2038 uclamp_se_set(&uc_max, uclamp_none(UCLAMP_MAX), false); 2039 for_each_clamp_id(clamp_id) { 2040 uclamp_default[clamp_id] = uc_max; 2041 #ifdef CONFIG_UCLAMP_TASK_GROUP 2042 root_task_group.uclamp_req[clamp_id] = uc_max; 2043 root_task_group.uclamp[clamp_id] = uc_max; 2044 #endif 2045 } 2046 } 2047 2048 #else /* CONFIG_UCLAMP_TASK */ 2049 static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) { } 2050 static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) { } 2051 static inline int uclamp_validate(struct task_struct *p, 2052 const struct sched_attr *attr) 2053 { 2054 return -EOPNOTSUPP; 2055 } 2056 static void __setscheduler_uclamp(struct task_struct *p, 2057 const struct sched_attr *attr) { } 2058 static inline void uclamp_fork(struct task_struct *p) { } 2059 static inline void uclamp_post_fork(struct task_struct *p) { } 2060 static inline void init_uclamp(void) { } 2061 #endif /* CONFIG_UCLAMP_TASK */ 2062 2063 bool sched_task_on_rq(struct task_struct *p) 2064 { 2065 return task_on_rq_queued(p); 2066 } 2067 2068 unsigned long get_wchan(struct task_struct *p) 2069 { 2070 unsigned long ip = 0; 2071 unsigned int state; 2072 2073 if (!p || p == current) 2074 return 0; 2075 2076 /* Only get wchan if task is blocked and we can keep it that way. */ 2077 raw_spin_lock_irq(&p->pi_lock); 2078 state = READ_ONCE(p->__state); 2079 smp_rmb(); /* see try_to_wake_up() */ 2080 if (state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq) 2081 ip = __get_wchan(p); 2082 raw_spin_unlock_irq(&p->pi_lock); 2083 2084 return ip; 2085 } 2086 2087 static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags) 2088 { 2089 if (!(flags & ENQUEUE_NOCLOCK)) 2090 update_rq_clock(rq); 2091 2092 if (!(flags & ENQUEUE_RESTORE)) { 2093 sched_info_enqueue(rq, p); 2094 psi_enqueue(p, (flags & ENQUEUE_WAKEUP) && !(flags & ENQUEUE_MIGRATED)); 2095 } 2096 2097 uclamp_rq_inc(rq, p); 2098 p->sched_class->enqueue_task(rq, p, flags); 2099 2100 if (sched_core_enabled(rq)) 2101 sched_core_enqueue(rq, p); 2102 } 2103 2104 static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags) 2105 { 2106 if (sched_core_enabled(rq)) 2107 sched_core_dequeue(rq, p, flags); 2108 2109 if (!(flags & DEQUEUE_NOCLOCK)) 2110 update_rq_clock(rq); 2111 2112 if (!(flags & DEQUEUE_SAVE)) { 2113 sched_info_dequeue(rq, p); 2114 psi_dequeue(p, flags & DEQUEUE_SLEEP); 2115 } 2116 2117 uclamp_rq_dec(rq, p); 2118 p->sched_class->dequeue_task(rq, p, flags); 2119 } 2120 2121 void activate_task(struct rq *rq, struct task_struct *p, int flags) 2122 { 2123 if (task_on_rq_migrating(p)) 2124 flags |= ENQUEUE_MIGRATED; 2125 if (flags & ENQUEUE_MIGRATED) 2126 sched_mm_cid_migrate_to(rq, p); 2127 2128 enqueue_task(rq, p, flags); 2129 2130 p->on_rq = TASK_ON_RQ_QUEUED; 2131 } 2132 2133 void deactivate_task(struct rq *rq, struct task_struct *p, int flags) 2134 { 2135 p->on_rq = (flags & DEQUEUE_SLEEP) ? 0 : TASK_ON_RQ_MIGRATING; 2136 2137 dequeue_task(rq, p, flags); 2138 } 2139 2140 static inline int __normal_prio(int policy, int rt_prio, int nice) 2141 { 2142 int prio; 2143 2144 if (dl_policy(policy)) 2145 prio = MAX_DL_PRIO - 1; 2146 else if (rt_policy(policy)) 2147 prio = MAX_RT_PRIO - 1 - rt_prio; 2148 else 2149 prio = NICE_TO_PRIO(nice); 2150 2151 return prio; 2152 } 2153 2154 /* 2155 * Calculate the expected normal priority: i.e. priority 2156 * without taking RT-inheritance into account. Might be 2157 * boosted by interactivity modifiers. Changes upon fork, 2158 * setprio syscalls, and whenever the interactivity 2159 * estimator recalculates. 2160 */ 2161 static inline int normal_prio(struct task_struct *p) 2162 { 2163 return __normal_prio(p->policy, p->rt_priority, PRIO_TO_NICE(p->static_prio)); 2164 } 2165 2166 /* 2167 * Calculate the current priority, i.e. the priority 2168 * taken into account by the scheduler. This value might 2169 * be boosted by RT tasks, or might be boosted by 2170 * interactivity modifiers. Will be RT if the task got 2171 * RT-boosted. If not then it returns p->normal_prio. 2172 */ 2173 static int effective_prio(struct task_struct *p) 2174 { 2175 p->normal_prio = normal_prio(p); 2176 /* 2177 * If we are RT tasks or we were boosted to RT priority, 2178 * keep the priority unchanged. Otherwise, update priority 2179 * to the normal priority: 2180 */ 2181 if (!rt_prio(p->prio)) 2182 return p->normal_prio; 2183 return p->prio; 2184 } 2185 2186 /** 2187 * task_curr - is this task currently executing on a CPU? 2188 * @p: the task in question. 2189 * 2190 * Return: 1 if the task is currently executing. 0 otherwise. 2191 */ 2192 inline int task_curr(const struct task_struct *p) 2193 { 2194 return cpu_curr(task_cpu(p)) == p; 2195 } 2196 2197 /* 2198 * switched_from, switched_to and prio_changed must _NOT_ drop rq->lock, 2199 * use the balance_callback list if you want balancing. 2200 * 2201 * this means any call to check_class_changed() must be followed by a call to 2202 * balance_callback(). 2203 */ 2204 static inline void check_class_changed(struct rq *rq, struct task_struct *p, 2205 const struct sched_class *prev_class, 2206 int oldprio) 2207 { 2208 if (prev_class != p->sched_class) { 2209 if (prev_class->switched_from) 2210 prev_class->switched_from(rq, p); 2211 2212 p->sched_class->switched_to(rq, p); 2213 } else if (oldprio != p->prio || dl_task(p)) 2214 p->sched_class->prio_changed(rq, p, oldprio); 2215 } 2216 2217 void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) 2218 { 2219 if (p->sched_class == rq->curr->sched_class) 2220 rq->curr->sched_class->check_preempt_curr(rq, p, flags); 2221 else if (sched_class_above(p->sched_class, rq->curr->sched_class)) 2222 resched_curr(rq); 2223 2224 /* 2225 * A queue event has occurred, and we're going to schedule. In 2226 * this case, we can save a useless back to back clock update. 2227 */ 2228 if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr)) 2229 rq_clock_skip_update(rq); 2230 } 2231 2232 static __always_inline 2233 int __task_state_match(struct task_struct *p, unsigned int state) 2234 { 2235 if (READ_ONCE(p->__state) & state) 2236 return 1; 2237 2238 #ifdef CONFIG_PREEMPT_RT 2239 if (READ_ONCE(p->saved_state) & state) 2240 return -1; 2241 #endif 2242 return 0; 2243 } 2244 2245 static __always_inline 2246 int task_state_match(struct task_struct *p, unsigned int state) 2247 { 2248 #ifdef CONFIG_PREEMPT_RT 2249 int match; 2250 2251 /* 2252 * Serialize against current_save_and_set_rtlock_wait_state() and 2253 * current_restore_rtlock_saved_state(). 2254 */ 2255 raw_spin_lock_irq(&p->pi_lock); 2256 match = __task_state_match(p, state); 2257 raw_spin_unlock_irq(&p->pi_lock); 2258 2259 return match; 2260 #else 2261 return __task_state_match(p, state); 2262 #endif 2263 } 2264 2265 /* 2266 * wait_task_inactive - wait for a thread to unschedule. 2267 * 2268 * Wait for the thread to block in any of the states set in @match_state. 2269 * If it changes, i.e. @p might have woken up, then return zero. When we 2270 * succeed in waiting for @p to be off its CPU, we return a positive number 2271 * (its total switch count). If a second call a short while later returns the 2272 * same number, the caller can be sure that @p has remained unscheduled the 2273 * whole time. 2274 * 2275 * The caller must ensure that the task *will* unschedule sometime soon, 2276 * else this function might spin for a *long* time. This function can't 2277 * be called with interrupts off, or it may introduce deadlock with 2278 * smp_call_function() if an IPI is sent by the same process we are 2279 * waiting to become inactive. 2280 */ 2281 unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state) 2282 { 2283 int running, queued, match; 2284 struct rq_flags rf; 2285 unsigned long ncsw; 2286 struct rq *rq; 2287 2288 for (;;) { 2289 /* 2290 * We do the initial early heuristics without holding 2291 * any task-queue locks at all. We'll only try to get 2292 * the runqueue lock when things look like they will 2293 * work out! 2294 */ 2295 rq = task_rq(p); 2296 2297 /* 2298 * If the task is actively running on another CPU 2299 * still, just relax and busy-wait without holding 2300 * any locks. 2301 * 2302 * NOTE! Since we don't hold any locks, it's not 2303 * even sure that "rq" stays as the right runqueue! 2304 * But we don't care, since "task_on_cpu()" will 2305 * return false if the runqueue has changed and p 2306 * is actually now running somewhere else! 2307 */ 2308 while (task_on_cpu(rq, p)) { 2309 if (!task_state_match(p, match_state)) 2310 return 0; 2311 cpu_relax(); 2312 } 2313 2314 /* 2315 * Ok, time to look more closely! We need the rq 2316 * lock now, to be *sure*. If we're wrong, we'll 2317 * just go back and repeat. 2318 */ 2319 rq = task_rq_lock(p, &rf); 2320 trace_sched_wait_task(p); 2321 running = task_on_cpu(rq, p); 2322 queued = task_on_rq_queued(p); 2323 ncsw = 0; 2324 if ((match = __task_state_match(p, match_state))) { 2325 /* 2326 * When matching on p->saved_state, consider this task 2327 * still queued so it will wait. 2328 */ 2329 if (match < 0) 2330 queued = 1; 2331 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ 2332 } 2333 task_rq_unlock(rq, p, &rf); 2334 2335 /* 2336 * If it changed from the expected state, bail out now. 2337 */ 2338 if (unlikely(!ncsw)) 2339 break; 2340 2341 /* 2342 * Was it really running after all now that we 2343 * checked with the proper locks actually held? 2344 * 2345 * Oops. Go back and try again.. 2346 */ 2347 if (unlikely(running)) { 2348 cpu_relax(); 2349 continue; 2350 } 2351 2352 /* 2353 * It's not enough that it's not actively running, 2354 * it must be off the runqueue _entirely_, and not 2355 * preempted! 2356 * 2357 * So if it was still runnable (but just not actively 2358 * running right now), it's preempted, and we should 2359 * yield - it could be a while. 2360 */ 2361 if (unlikely(queued)) { 2362 ktime_t to = NSEC_PER_SEC / HZ; 2363 2364 set_current_state(TASK_UNINTERRUPTIBLE); 2365 schedule_hrtimeout(&to, HRTIMER_MODE_REL_HARD); 2366 continue; 2367 } 2368 2369 /* 2370 * Ahh, all good. It wasn't running, and it wasn't 2371 * runnable, which means that it will never become 2372 * running in the future either. We're all done! 2373 */ 2374 break; 2375 } 2376 2377 return ncsw; 2378 } 2379 2380 #ifdef CONFIG_SMP 2381 2382 static void 2383 __do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx); 2384 2385 static int __set_cpus_allowed_ptr(struct task_struct *p, 2386 struct affinity_context *ctx); 2387 2388 static void migrate_disable_switch(struct rq *rq, struct task_struct *p) 2389 { 2390 struct affinity_context ac = { 2391 .new_mask = cpumask_of(rq->cpu), 2392 .flags = SCA_MIGRATE_DISABLE, 2393 }; 2394 2395 if (likely(!p->migration_disabled)) 2396 return; 2397 2398 if (p->cpus_ptr != &p->cpus_mask) 2399 return; 2400 2401 /* 2402 * Violates locking rules! see comment in __do_set_cpus_allowed(). 2403 */ 2404 __do_set_cpus_allowed(p, &ac); 2405 } 2406 2407 void migrate_disable(void) 2408 { 2409 struct task_struct *p = current; 2410 2411 if (p->migration_disabled) { 2412 p->migration_disabled++; 2413 return; 2414 } 2415 2416 preempt_disable(); 2417 this_rq()->nr_pinned++; 2418 p->migration_disabled = 1; 2419 preempt_enable(); 2420 } 2421 EXPORT_SYMBOL_GPL(migrate_disable); 2422 2423 void migrate_enable(void) 2424 { 2425 struct task_struct *p = current; 2426 struct affinity_context ac = { 2427 .new_mask = &p->cpus_mask, 2428 .flags = SCA_MIGRATE_ENABLE, 2429 }; 2430 2431 if (p->migration_disabled > 1) { 2432 p->migration_disabled--; 2433 return; 2434 } 2435 2436 if (WARN_ON_ONCE(!p->migration_disabled)) 2437 return; 2438 2439 /* 2440 * Ensure stop_task runs either before or after this, and that 2441 * __set_cpus_allowed_ptr(SCA_MIGRATE_ENABLE) doesn't schedule(). 2442 */ 2443 preempt_disable(); 2444 if (p->cpus_ptr != &p->cpus_mask) 2445 __set_cpus_allowed_ptr(p, &ac); 2446 /* 2447 * Mustn't clear migration_disabled() until cpus_ptr points back at the 2448 * regular cpus_mask, otherwise things that race (eg. 2449 * select_fallback_rq) get confused. 2450 */ 2451 barrier(); 2452 p->migration_disabled = 0; 2453 this_rq()->nr_pinned--; 2454 preempt_enable(); 2455 } 2456 EXPORT_SYMBOL_GPL(migrate_enable); 2457 2458 static inline bool rq_has_pinned_tasks(struct rq *rq) 2459 { 2460 return rq->nr_pinned; 2461 } 2462 2463 /* 2464 * Per-CPU kthreads are allowed to run on !active && online CPUs, see 2465 * __set_cpus_allowed_ptr() and select_fallback_rq(). 2466 */ 2467 static inline bool is_cpu_allowed(struct task_struct *p, int cpu) 2468 { 2469 /* When not in the task's cpumask, no point in looking further. */ 2470 if (!cpumask_test_cpu(cpu, p->cpus_ptr)) 2471 return false; 2472 2473 /* migrate_disabled() must be allowed to finish. */ 2474 if (is_migration_disabled(p)) 2475 return cpu_online(cpu); 2476 2477 /* Non kernel threads are not allowed during either online or offline. */ 2478 if (!(p->flags & PF_KTHREAD)) 2479 return cpu_active(cpu) && task_cpu_possible(cpu, p); 2480 2481 /* KTHREAD_IS_PER_CPU is always allowed. */ 2482 if (kthread_is_per_cpu(p)) 2483 return cpu_online(cpu); 2484 2485 /* Regular kernel threads don't get to stay during offline. */ 2486 if (cpu_dying(cpu)) 2487 return false; 2488 2489 /* But are allowed during online. */ 2490 return cpu_online(cpu); 2491 } 2492 2493 /* 2494 * This is how migration works: 2495 * 2496 * 1) we invoke migration_cpu_stop() on the target CPU using 2497 * stop_one_cpu(). 2498 * 2) stopper starts to run (implicitly forcing the migrated thread 2499 * off the CPU) 2500 * 3) it checks whether the migrated task is still in the wrong runqueue. 2501 * 4) if it's in the wrong runqueue then the migration thread removes 2502 * it and puts it into the right queue. 2503 * 5) stopper completes and stop_one_cpu() returns and the migration 2504 * is done. 2505 */ 2506 2507 /* 2508 * move_queued_task - move a queued task to new rq. 2509 * 2510 * Returns (locked) new rq. Old rq's lock is released. 2511 */ 2512 static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf, 2513 struct task_struct *p, int new_cpu) 2514 { 2515 lockdep_assert_rq_held(rq); 2516 2517 deactivate_task(rq, p, DEQUEUE_NOCLOCK); 2518 set_task_cpu(p, new_cpu); 2519 rq_unlock(rq, rf); 2520 2521 rq = cpu_rq(new_cpu); 2522 2523 rq_lock(rq, rf); 2524 WARN_ON_ONCE(task_cpu(p) != new_cpu); 2525 activate_task(rq, p, 0); 2526 check_preempt_curr(rq, p, 0); 2527 2528 return rq; 2529 } 2530 2531 struct migration_arg { 2532 struct task_struct *task; 2533 int dest_cpu; 2534 struct set_affinity_pending *pending; 2535 }; 2536 2537 /* 2538 * @refs: number of wait_for_completion() 2539 * @stop_pending: is @stop_work in use 2540 */ 2541 struct set_affinity_pending { 2542 refcount_t refs; 2543 unsigned int stop_pending; 2544 struct completion done; 2545 struct cpu_stop_work stop_work; 2546 struct migration_arg arg; 2547 }; 2548 2549 /* 2550 * Move (not current) task off this CPU, onto the destination CPU. We're doing 2551 * this because either it can't run here any more (set_cpus_allowed() 2552 * away from this CPU, or CPU going down), or because we're 2553 * attempting to rebalance this task on exec (sched_exec). 2554 * 2555 * So we race with normal scheduler movements, but that's OK, as long 2556 * as the task is no longer on this CPU. 2557 */ 2558 static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf, 2559 struct task_struct *p, int dest_cpu) 2560 { 2561 /* Affinity changed (again). */ 2562 if (!is_cpu_allowed(p, dest_cpu)) 2563 return rq; 2564 2565 rq = move_queued_task(rq, rf, p, dest_cpu); 2566 2567 return rq; 2568 } 2569 2570 /* 2571 * migration_cpu_stop - this will be executed by a highprio stopper thread 2572 * and performs thread migration by bumping thread off CPU then 2573 * 'pushing' onto another runqueue. 2574 */ 2575 static int migration_cpu_stop(void *data) 2576 { 2577 struct migration_arg *arg = data; 2578 struct set_affinity_pending *pending = arg->pending; 2579 struct task_struct *p = arg->task; 2580 struct rq *rq = this_rq(); 2581 bool complete = false; 2582 struct rq_flags rf; 2583 2584 /* 2585 * The original target CPU might have gone down and we might 2586 * be on another CPU but it doesn't matter. 2587 */ 2588 local_irq_save(rf.flags); 2589 /* 2590 * We need to explicitly wake pending tasks before running 2591 * __migrate_task() such that we will not miss enforcing cpus_ptr 2592 * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test. 2593 */ 2594 flush_smp_call_function_queue(); 2595 2596 raw_spin_lock(&p->pi_lock); 2597 rq_lock(rq, &rf); 2598 2599 /* 2600 * If we were passed a pending, then ->stop_pending was set, thus 2601 * p->migration_pending must have remained stable. 2602 */ 2603 WARN_ON_ONCE(pending && pending != p->migration_pending); 2604 2605 /* 2606 * If task_rq(p) != rq, it cannot be migrated here, because we're 2607 * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because 2608 * we're holding p->pi_lock. 2609 */ 2610 if (task_rq(p) == rq) { 2611 if (is_migration_disabled(p)) 2612 goto out; 2613 2614 if (pending) { 2615 p->migration_pending = NULL; 2616 complete = true; 2617 2618 if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) 2619 goto out; 2620 } 2621 2622 if (task_on_rq_queued(p)) { 2623 update_rq_clock(rq); 2624 rq = __migrate_task(rq, &rf, p, arg->dest_cpu); 2625 } else { 2626 p->wake_cpu = arg->dest_cpu; 2627 } 2628 2629 /* 2630 * XXX __migrate_task() can fail, at which point we might end 2631 * up running on a dodgy CPU, AFAICT this can only happen 2632 * during CPU hotplug, at which point we'll get pushed out 2633 * anyway, so it's probably not a big deal. 2634 */ 2635 2636 } else if (pending) { 2637 /* 2638 * This happens when we get migrated between migrate_enable()'s 2639 * preempt_enable() and scheduling the stopper task. At that 2640 * point we're a regular task again and not current anymore. 2641 * 2642 * A !PREEMPT kernel has a giant hole here, which makes it far 2643 * more likely. 2644 */ 2645 2646 /* 2647 * The task moved before the stopper got to run. We're holding 2648 * ->pi_lock, so the allowed mask is stable - if it got 2649 * somewhere allowed, we're done. 2650 */ 2651 if (cpumask_test_cpu(task_cpu(p), p->cpus_ptr)) { 2652 p->migration_pending = NULL; 2653 complete = true; 2654 goto out; 2655 } 2656 2657 /* 2658 * When migrate_enable() hits a rq mis-match we can't reliably 2659 * determine is_migration_disabled() and so have to chase after 2660 * it. 2661 */ 2662 WARN_ON_ONCE(!pending->stop_pending); 2663 preempt_disable(); 2664 task_rq_unlock(rq, p, &rf); 2665 stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop, 2666 &pending->arg, &pending->stop_work); 2667 preempt_enable(); 2668 return 0; 2669 } 2670 out: 2671 if (pending) 2672 pending->stop_pending = false; 2673 task_rq_unlock(rq, p, &rf); 2674 2675 if (complete) 2676 complete_all(&pending->done); 2677 2678 return 0; 2679 } 2680 2681 int push_cpu_stop(void *arg) 2682 { 2683 struct rq *lowest_rq = NULL, *rq = this_rq(); 2684 struct task_struct *p = arg; 2685 2686 raw_spin_lock_irq(&p->pi_lock); 2687 raw_spin_rq_lock(rq); 2688 2689 if (task_rq(p) != rq) 2690 goto out_unlock; 2691 2692 if (is_migration_disabled(p)) { 2693 p->migration_flags |= MDF_PUSH; 2694 goto out_unlock; 2695 } 2696 2697 p->migration_flags &= ~MDF_PUSH; 2698 2699 if (p->sched_class->find_lock_rq) 2700 lowest_rq = p->sched_class->find_lock_rq(p, rq); 2701 2702 if (!lowest_rq) 2703 goto out_unlock; 2704 2705 // XXX validate p is still the highest prio task 2706 if (task_rq(p) == rq) { 2707 deactivate_task(rq, p, 0); 2708 set_task_cpu(p, lowest_rq->cpu); 2709 activate_task(lowest_rq, p, 0); 2710 resched_curr(lowest_rq); 2711 } 2712 2713 double_unlock_balance(rq, lowest_rq); 2714 2715 out_unlock: 2716 rq->push_busy = false; 2717 raw_spin_rq_unlock(rq); 2718 raw_spin_unlock_irq(&p->pi_lock); 2719 2720 put_task_struct(p); 2721 return 0; 2722 } 2723 2724 /* 2725 * sched_class::set_cpus_allowed must do the below, but is not required to 2726 * actually call this function. 2727 */ 2728 void set_cpus_allowed_common(struct task_struct *p, struct affinity_context *ctx) 2729 { 2730 if (ctx->flags & (SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) { 2731 p->cpus_ptr = ctx->new_mask; 2732 return; 2733 } 2734 2735 cpumask_copy(&p->cpus_mask, ctx->new_mask); 2736 p->nr_cpus_allowed = cpumask_weight(ctx->new_mask); 2737 2738 /* 2739 * Swap in a new user_cpus_ptr if SCA_USER flag set 2740 */ 2741 if (ctx->flags & SCA_USER) 2742 swap(p->user_cpus_ptr, ctx->user_mask); 2743 } 2744 2745 static void 2746 __do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx) 2747 { 2748 struct rq *rq = task_rq(p); 2749 bool queued, running; 2750 2751 /* 2752 * This here violates the locking rules for affinity, since we're only 2753 * supposed to change these variables while holding both rq->lock and 2754 * p->pi_lock. 2755 * 2756 * HOWEVER, it magically works, because ttwu() is the only code that 2757 * accesses these variables under p->pi_lock and only does so after 2758 * smp_cond_load_acquire(&p->on_cpu, !VAL), and we're in __schedule() 2759 * before finish_task(). 2760 * 2761 * XXX do further audits, this smells like something putrid. 2762 */ 2763 if (ctx->flags & SCA_MIGRATE_DISABLE) 2764 SCHED_WARN_ON(!p->on_cpu); 2765 else 2766 lockdep_assert_held(&p->pi_lock); 2767 2768 queued = task_on_rq_queued(p); 2769 running = task_current(rq, p); 2770 2771 if (queued) { 2772 /* 2773 * Because __kthread_bind() calls this on blocked tasks without 2774 * holding rq->lock. 2775 */ 2776 lockdep_assert_rq_held(rq); 2777 dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK); 2778 } 2779 if (running) 2780 put_prev_task(rq, p); 2781 2782 p->sched_class->set_cpus_allowed(p, ctx); 2783 2784 if (queued) 2785 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); 2786 if (running) 2787 set_next_task(rq, p); 2788 } 2789 2790 /* 2791 * Used for kthread_bind() and select_fallback_rq(), in both cases the user 2792 * affinity (if any) should be destroyed too. 2793 */ 2794 void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) 2795 { 2796 struct affinity_context ac = { 2797 .new_mask = new_mask, 2798 .user_mask = NULL, 2799 .flags = SCA_USER, /* clear the user requested mask */ 2800 }; 2801 union cpumask_rcuhead { 2802 cpumask_t cpumask; 2803 struct rcu_head rcu; 2804 }; 2805 2806 __do_set_cpus_allowed(p, &ac); 2807 2808 /* 2809 * Because this is called with p->pi_lock held, it is not possible 2810 * to use kfree() here (when PREEMPT_RT=y), therefore punt to using 2811 * kfree_rcu(). 2812 */ 2813 kfree_rcu((union cpumask_rcuhead *)ac.user_mask, rcu); 2814 } 2815 2816 static cpumask_t *alloc_user_cpus_ptr(int node) 2817 { 2818 /* 2819 * See do_set_cpus_allowed() above for the rcu_head usage. 2820 */ 2821 int size = max_t(int, cpumask_size(), sizeof(struct rcu_head)); 2822 2823 return kmalloc_node(size, GFP_KERNEL, node); 2824 } 2825 2826 int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, 2827 int node) 2828 { 2829 cpumask_t *user_mask; 2830 unsigned long flags; 2831 2832 /* 2833 * Always clear dst->user_cpus_ptr first as their user_cpus_ptr's 2834 * may differ by now due to racing. 2835 */ 2836 dst->user_cpus_ptr = NULL; 2837 2838 /* 2839 * This check is racy and losing the race is a valid situation. 2840 * It is not worth the extra overhead of taking the pi_lock on 2841 * every fork/clone. 2842 */ 2843 if (data_race(!src->user_cpus_ptr)) 2844 return 0; 2845 2846 user_mask = alloc_user_cpus_ptr(node); 2847 if (!user_mask) 2848 return -ENOMEM; 2849 2850 /* 2851 * Use pi_lock to protect content of user_cpus_ptr 2852 * 2853 * Though unlikely, user_cpus_ptr can be reset to NULL by a concurrent 2854 * do_set_cpus_allowed(). 2855 */ 2856 raw_spin_lock_irqsave(&src->pi_lock, flags); 2857 if (src->user_cpus_ptr) { 2858 swap(dst->user_cpus_ptr, user_mask); 2859 cpumask_copy(dst->user_cpus_ptr, src->user_cpus_ptr); 2860 } 2861 raw_spin_unlock_irqrestore(&src->pi_lock, flags); 2862 2863 if (unlikely(user_mask)) 2864 kfree(user_mask); 2865 2866 return 0; 2867 } 2868 2869 static inline struct cpumask *clear_user_cpus_ptr(struct task_struct *p) 2870 { 2871 struct cpumask *user_mask = NULL; 2872 2873 swap(p->user_cpus_ptr, user_mask); 2874 2875 return user_mask; 2876 } 2877 2878 void release_user_cpus_ptr(struct task_struct *p) 2879 { 2880 kfree(clear_user_cpus_ptr(p)); 2881 } 2882 2883 /* 2884 * This function is wildly self concurrent; here be dragons. 2885 * 2886 * 2887 * When given a valid mask, __set_cpus_allowed_ptr() must block until the 2888 * designated task is enqueued on an allowed CPU. If that task is currently 2889 * running, we have to kick it out using the CPU stopper. 2890 * 2891 * Migrate-Disable comes along and tramples all over our nice sandcastle. 2892 * Consider: 2893 * 2894 * Initial conditions: P0->cpus_mask = [0, 1] 2895 * 2896 * P0@CPU0 P1 2897 * 2898 * migrate_disable(); 2899 * <preempted> 2900 * set_cpus_allowed_ptr(P0, [1]); 2901 * 2902 * P1 *cannot* return from this set_cpus_allowed_ptr() call until P0 executes 2903 * its outermost migrate_enable() (i.e. it exits its Migrate-Disable region). 2904 * This means we need the following scheme: 2905 * 2906 * P0@CPU0 P1 2907 * 2908 * migrate_disable(); 2909 * <preempted> 2910 * set_cpus_allowed_ptr(P0, [1]); 2911 * <blocks> 2912 * <resumes> 2913 * migrate_enable(); 2914 * __set_cpus_allowed_ptr(); 2915 * <wakes local stopper> 2916 * `--> <woken on migration completion> 2917 * 2918 * Now the fun stuff: there may be several P1-like tasks, i.e. multiple 2919 * concurrent set_cpus_allowed_ptr(P0, [*]) calls. CPU affinity changes of any 2920 * task p are serialized by p->pi_lock, which we can leverage: the one that 2921 * should come into effect at the end of the Migrate-Disable region is the last 2922 * one. This means we only need to track a single cpumask (i.e. p->cpus_mask), 2923 * but we still need to properly signal those waiting tasks at the appropriate 2924 * moment. 2925 * 2926 * This is implemented using struct set_affinity_pending. The first 2927 * __set_cpus_allowed_ptr() caller within a given Migrate-Disable region will 2928 * setup an instance of that struct and install it on the targeted task_struct. 2929 * Any and all further callers will reuse that instance. Those then wait for 2930 * a completion signaled at the tail of the CPU stopper callback (1), triggered 2931 * on the end of the Migrate-Disable region (i.e. outermost migrate_enable()). 2932 * 2933 * 2934 * (1) In the cases covered above. There is one more where the completion is 2935 * signaled within affine_move_task() itself: when a subsequent affinity request 2936 * occurs after the stopper bailed out due to the targeted task still being 2937 * Migrate-Disable. Consider: 2938 * 2939 * Initial conditions: P0->cpus_mask = [0, 1] 2940 * 2941 * CPU0 P1 P2 2942 * <P0> 2943 * migrate_disable(); 2944 * <preempted> 2945 * set_cpus_allowed_ptr(P0, [1]); 2946 * <blocks> 2947 * <migration/0> 2948 * migration_cpu_stop() 2949 * is_migration_disabled() 2950 * <bails> 2951 * set_cpus_allowed_ptr(P0, [0, 1]); 2952 * <signal completion> 2953 * <awakes> 2954 * 2955 * Note that the above is safe vs a concurrent migrate_enable(), as any 2956 * pending affinity completion is preceded by an uninstallation of 2957 * p->migration_pending done with p->pi_lock held. 2958 */ 2959 static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flags *rf, 2960 int dest_cpu, unsigned int flags) 2961 __releases(rq->lock) 2962 __releases(p->pi_lock) 2963 { 2964 struct set_affinity_pending my_pending = { }, *pending = NULL; 2965 bool stop_pending, complete = false; 2966 2967 /* Can the task run on the task's current CPU? If so, we're done */ 2968 if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) { 2969 struct task_struct *push_task = NULL; 2970 2971 if ((flags & SCA_MIGRATE_ENABLE) && 2972 (p->migration_flags & MDF_PUSH) && !rq->push_busy) { 2973 rq->push_busy = true; 2974 push_task = get_task_struct(p); 2975 } 2976 2977 /* 2978 * If there are pending waiters, but no pending stop_work, 2979 * then complete now. 2980 */ 2981 pending = p->migration_pending; 2982 if (pending && !pending->stop_pending) { 2983 p->migration_pending = NULL; 2984 complete = true; 2985 } 2986 2987 preempt_disable(); 2988 task_rq_unlock(rq, p, rf); 2989 if (push_task) { 2990 stop_one_cpu_nowait(rq->cpu, push_cpu_stop, 2991 p, &rq->push_work); 2992 } 2993 preempt_enable(); 2994 2995 if (complete) 2996 complete_all(&pending->done); 2997 2998 return 0; 2999 } 3000 3001 if (!(flags & SCA_MIGRATE_ENABLE)) { 3002 /* serialized by p->pi_lock */ 3003 if (!p->migration_pending) { 3004 /* Install the request */ 3005 refcount_set(&my_pending.refs, 1); 3006 init_completion(&my_pending.done); 3007 my_pending.arg = (struct migration_arg) { 3008 .task = p, 3009 .dest_cpu = dest_cpu, 3010 .pending = &my_pending, 3011 }; 3012 3013 p->migration_pending = &my_pending; 3014 } else { 3015 pending = p->migration_pending; 3016 refcount_inc(&pending->refs); 3017 /* 3018 * Affinity has changed, but we've already installed a 3019 * pending. migration_cpu_stop() *must* see this, else 3020 * we risk a completion of the pending despite having a 3021 * task on a disallowed CPU. 3022 * 3023 * Serialized by p->pi_lock, so this is safe. 3024 */ 3025 pending->arg.dest_cpu = dest_cpu; 3026 } 3027 } 3028 pending = p->migration_pending; 3029 /* 3030 * - !MIGRATE_ENABLE: 3031 * we'll have installed a pending if there wasn't one already. 3032 * 3033 * - MIGRATE_ENABLE: 3034 * we're here because the current CPU isn't matching anymore, 3035 * the only way that can happen is because of a concurrent 3036 * set_cpus_allowed_ptr() call, which should then still be 3037 * pending completion. 3038 * 3039 * Either way, we really should have a @pending here. 3040 */ 3041 if (WARN_ON_ONCE(!pending)) { 3042 task_rq_unlock(rq, p, rf); 3043 return -EINVAL; 3044 } 3045 3046 if (task_on_cpu(rq, p) || READ_ONCE(p->__state) == TASK_WAKING) { 3047 /* 3048 * MIGRATE_ENABLE gets here because 'p == current', but for 3049 * anything else we cannot do is_migration_disabled(), punt 3050 * and have the stopper function handle it all race-free. 3051 */ 3052 stop_pending = pending->stop_pending; 3053 if (!stop_pending) 3054 pending->stop_pending = true; 3055 3056 if (flags & SCA_MIGRATE_ENABLE) 3057 p->migration_flags &= ~MDF_PUSH; 3058 3059 preempt_disable(); 3060 task_rq_unlock(rq, p, rf); 3061 if (!stop_pending) { 3062 stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop, 3063 &pending->arg, &pending->stop_work); 3064 } 3065 preempt_enable(); 3066 3067 if (flags & SCA_MIGRATE_ENABLE) 3068 return 0; 3069 } else { 3070 3071 if (!is_migration_disabled(p)) { 3072 if (task_on_rq_queued(p)) 3073 rq = move_queued_task(rq, rf, p, dest_cpu); 3074 3075 if (!pending->stop_pending) { 3076 p->migration_pending = NULL; 3077 complete = true; 3078 } 3079 } 3080 task_rq_unlock(rq, p, rf); 3081 3082 if (complete) 3083 complete_all(&pending->done); 3084 } 3085 3086 wait_for_completion(&pending->done); 3087 3088 if (refcount_dec_and_test(&pending->refs)) 3089 wake_up_var(&pending->refs); /* No UaF, just an address */ 3090 3091 /* 3092 * Block the original owner of &pending until all subsequent callers 3093 * have seen the completion and decremented the refcount 3094 */ 3095 wait_var_event(&my_pending.refs, !refcount_read(&my_pending.refs)); 3096 3097 /* ARGH */ 3098 WARN_ON_ONCE(my_pending.stop_pending); 3099 3100 return 0; 3101 } 3102 3103 /* 3104 * Called with both p->pi_lock and rq->lock held; drops both before returning. 3105 */ 3106 static int __set_cpus_allowed_ptr_locked(struct task_struct *p, 3107 struct affinity_context *ctx, 3108 struct rq *rq, 3109 struct rq_flags *rf) 3110 __releases(rq->lock) 3111 __releases(p->pi_lock) 3112 { 3113 const struct cpumask *cpu_allowed_mask = task_cpu_possible_mask(p); 3114 const struct cpumask *cpu_valid_mask = cpu_active_mask; 3115 bool kthread = p->flags & PF_KTHREAD; 3116 unsigned int dest_cpu; 3117 int ret = 0; 3118 3119 update_rq_clock(rq); 3120 3121 if (kthread || is_migration_disabled(p)) { 3122 /* 3123 * Kernel threads are allowed on online && !active CPUs, 3124 * however, during cpu-hot-unplug, even these might get pushed 3125 * away if not KTHREAD_IS_PER_CPU. 3126 * 3127 * Specifically, migration_disabled() tasks must not fail the 3128 * cpumask_any_and_distribute() pick below, esp. so on 3129 * SCA_MIGRATE_ENABLE, otherwise we'll not call 3130 * set_cpus_allowed_common() and actually reset p->cpus_ptr. 3131 */ 3132 cpu_valid_mask = cpu_online_mask; 3133 } 3134 3135 if (!kthread && !cpumask_subset(ctx->new_mask, cpu_allowed_mask)) { 3136 ret = -EINVAL; 3137 goto out; 3138 } 3139 3140 /* 3141 * Must re-check here, to close a race against __kthread_bind(), 3142 * sched_setaffinity() is not guaranteed to observe the flag. 3143 */ 3144 if ((ctx->flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) { 3145 ret = -EINVAL; 3146 goto out; 3147 } 3148 3149 if (!(ctx->flags & SCA_MIGRATE_ENABLE)) { 3150 if (cpumask_equal(&p->cpus_mask, ctx->new_mask)) { 3151 if (ctx->flags & SCA_USER) 3152 swap(p->user_cpus_ptr, ctx->user_mask); 3153 goto out; 3154 } 3155 3156 if (WARN_ON_ONCE(p == current && 3157 is_migration_disabled(p) && 3158 !cpumask_test_cpu(task_cpu(p), ctx->new_mask))) { 3159 ret = -EBUSY; 3160 goto out; 3161 } 3162 } 3163 3164 /* 3165 * Picking a ~random cpu helps in cases where we are changing affinity 3166 * for groups of tasks (ie. cpuset), so that load balancing is not 3167 * immediately required to distribute the tasks within their new mask. 3168 */ 3169 dest_cpu = cpumask_any_and_distribute(cpu_valid_mask, ctx->new_mask); 3170 if (dest_cpu >= nr_cpu_ids) { 3171 ret = -EINVAL; 3172 goto out; 3173 } 3174 3175 __do_set_cpus_allowed(p, ctx); 3176 3177 return affine_move_task(rq, p, rf, dest_cpu, ctx->flags); 3178 3179 out: 3180 task_rq_unlock(rq, p, rf); 3181 3182 return ret; 3183 } 3184 3185 /* 3186 * Change a given task's CPU affinity. Migrate the thread to a 3187 * proper CPU and schedule it away if the CPU it's executing on 3188 * is removed from the allowed bitmask. 3189 * 3190 * NOTE: the caller must have a valid reference to the task, the 3191 * task must not exit() & deallocate itself prematurely. The 3192 * call is not atomic; no spinlocks may be held. 3193 */ 3194 static int __set_cpus_allowed_ptr(struct task_struct *p, 3195 struct affinity_context *ctx) 3196 { 3197 struct rq_flags rf; 3198 struct rq *rq; 3199 3200 rq = task_rq_lock(p, &rf); 3201 /* 3202 * Masking should be skipped if SCA_USER or any of the SCA_MIGRATE_* 3203 * flags are set. 3204 */ 3205 if (p->user_cpus_ptr && 3206 !(ctx->flags & (SCA_USER | SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) && 3207 cpumask_and(rq->scratch_mask, ctx->new_mask, p->user_cpus_ptr)) 3208 ctx->new_mask = rq->scratch_mask; 3209 3210 return __set_cpus_allowed_ptr_locked(p, ctx, rq, &rf); 3211 } 3212 3213 int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) 3214 { 3215 struct affinity_context ac = { 3216 .new_mask = new_mask, 3217 .flags = 0, 3218 }; 3219 3220 return __set_cpus_allowed_ptr(p, &ac); 3221 } 3222 EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr); 3223 3224 /* 3225 * Change a given task's CPU affinity to the intersection of its current 3226 * affinity mask and @subset_mask, writing the resulting mask to @new_mask. 3227 * If user_cpus_ptr is defined, use it as the basis for restricting CPU 3228 * affinity or use cpu_online_mask instead. 3229 * 3230 * If the resulting mask is empty, leave the affinity unchanged and return 3231 * -EINVAL. 3232 */ 3233 static int restrict_cpus_allowed_ptr(struct task_struct *p, 3234 struct cpumask *new_mask, 3235 const struct cpumask *subset_mask) 3236 { 3237 struct affinity_context ac = { 3238 .new_mask = new_mask, 3239 .flags = 0, 3240 }; 3241 struct rq_flags rf; 3242 struct rq *rq; 3243 int err; 3244 3245 rq = task_rq_lock(p, &rf); 3246 3247 /* 3248 * Forcefully restricting the affinity of a deadline task is 3249 * likely to cause problems, so fail and noisily override the 3250 * mask entirely. 3251 */ 3252 if (task_has_dl_policy(p) && dl_bandwidth_enabled()) { 3253 err = -EPERM; 3254 goto err_unlock; 3255 } 3256 3257 if (!cpumask_and(new_mask, task_user_cpus(p), subset_mask)) { 3258 err = -EINVAL; 3259 goto err_unlock; 3260 } 3261 3262 return __set_cpus_allowed_ptr_locked(p, &ac, rq, &rf); 3263 3264 err_unlock: 3265 task_rq_unlock(rq, p, &rf); 3266 return err; 3267 } 3268 3269 /* 3270 * Restrict the CPU affinity of task @p so that it is a subset of 3271 * task_cpu_possible_mask() and point @p->user_cpus_ptr to a copy of the 3272 * old affinity mask. If the resulting mask is empty, we warn and walk 3273 * up the cpuset hierarchy until we find a suitable mask. 3274 */ 3275 void force_compatible_cpus_allowed_ptr(struct task_struct *p) 3276 { 3277 cpumask_var_t new_mask; 3278 const struct cpumask *override_mask = task_cpu_possible_mask(p); 3279 3280 alloc_cpumask_var(&new_mask, GFP_KERNEL); 3281 3282 /* 3283 * __migrate_task() can fail silently in the face of concurrent 3284 * offlining of the chosen destination CPU, so take the hotplug 3285 * lock to ensure that the migration succeeds. 3286 */ 3287 cpus_read_lock(); 3288 if (!cpumask_available(new_mask)) 3289 goto out_set_mask; 3290 3291 if (!restrict_cpus_allowed_ptr(p, new_mask, override_mask)) 3292 goto out_free_mask; 3293 3294 /* 3295 * We failed to find a valid subset of the affinity mask for the 3296 * task, so override it based on its cpuset hierarchy. 3297 */ 3298 cpuset_cpus_allowed(p, new_mask); 3299 override_mask = new_mask; 3300 3301 out_set_mask: 3302 if (printk_ratelimit()) { 3303 printk_deferred("Overriding affinity for process %d (%s) to CPUs %*pbl\n", 3304 task_pid_nr(p), p->comm, 3305 cpumask_pr_args(override_mask)); 3306 } 3307 3308 WARN_ON(set_cpus_allowed_ptr(p, override_mask)); 3309 out_free_mask: 3310 cpus_read_unlock(); 3311 free_cpumask_var(new_mask); 3312 } 3313 3314 static int 3315 __sched_setaffinity(struct task_struct *p, struct affinity_context *ctx); 3316 3317 /* 3318 * Restore the affinity of a task @p which was previously restricted by a 3319 * call to force_compatible_cpus_allowed_ptr(). 3320 * 3321 * It is the caller's responsibility to serialise this with any calls to 3322 * force_compatible_cpus_allowed_ptr(@p). 3323 */ 3324 void relax_compatible_cpus_allowed_ptr(struct task_struct *p) 3325 { 3326 struct affinity_context ac = { 3327 .new_mask = task_user_cpus(p), 3328 .flags = 0, 3329 }; 3330 int ret; 3331 3332 /* 3333 * Try to restore the old affinity mask with __sched_setaffinity(). 3334 * Cpuset masking will be done there too. 3335 */ 3336 ret = __sched_setaffinity(p, &ac); 3337 WARN_ON_ONCE(ret); 3338 } 3339 3340 void set_task_cpu(struct task_struct *p, unsigned int new_cpu) 3341 { 3342 #ifdef CONFIG_SCHED_DEBUG 3343 unsigned int state = READ_ONCE(p->__state); 3344 3345 /* 3346 * We should never call set_task_cpu() on a blocked task, 3347 * ttwu() will sort out the placement. 3348 */ 3349 WARN_ON_ONCE(state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq); 3350 3351 /* 3352 * Migrating fair class task must have p->on_rq = TASK_ON_RQ_MIGRATING, 3353 * because schedstat_wait_{start,end} rebase migrating task's wait_start 3354 * time relying on p->on_rq. 3355 */ 3356 WARN_ON_ONCE(state == TASK_RUNNING && 3357 p->sched_class == &fair_sched_class && 3358 (p->on_rq && !task_on_rq_migrating(p))); 3359 3360 #ifdef CONFIG_LOCKDEP 3361 /* 3362 * The caller should hold either p->pi_lock or rq->lock, when changing 3363 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks. 3364 * 3365 * sched_move_task() holds both and thus holding either pins the cgroup, 3366 * see task_group(). 3367 * 3368 * Furthermore, all task_rq users should acquire both locks, see 3369 * task_rq_lock(). 3370 */ 3371 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) || 3372 lockdep_is_held(__rq_lockp(task_rq(p))))); 3373 #endif 3374 /* 3375 * Clearly, migrating tasks to offline CPUs is a fairly daft thing. 3376 */ 3377 WARN_ON_ONCE(!cpu_online(new_cpu)); 3378 3379 WARN_ON_ONCE(is_migration_disabled(p)); 3380 #endif 3381 3382 trace_sched_migrate_task(p, new_cpu); 3383 3384 if (task_cpu(p) != new_cpu) { 3385 if (p->sched_class->migrate_task_rq) 3386 p->sched_class->migrate_task_rq(p, new_cpu); 3387 p->se.nr_migrations++; 3388 rseq_migrate(p); 3389 sched_mm_cid_migrate_from(p); 3390 perf_event_task_migrate(p); 3391 } 3392 3393 __set_task_cpu(p, new_cpu); 3394 } 3395 3396 #ifdef CONFIG_NUMA_BALANCING 3397 static void __migrate_swap_task(struct task_struct *p, int cpu) 3398 { 3399 if (task_on_rq_queued(p)) { 3400 struct rq *src_rq, *dst_rq; 3401 struct rq_flags srf, drf; 3402 3403 src_rq = task_rq(p); 3404 dst_rq = cpu_rq(cpu); 3405 3406 rq_pin_lock(src_rq, &srf); 3407 rq_pin_lock(dst_rq, &drf); 3408 3409 deactivate_task(src_rq, p, 0); 3410 set_task_cpu(p, cpu); 3411 activate_task(dst_rq, p, 0); 3412 check_preempt_curr(dst_rq, p, 0); 3413 3414 rq_unpin_lock(dst_rq, &drf); 3415 rq_unpin_lock(src_rq, &srf); 3416 3417 } else { 3418 /* 3419 * Task isn't running anymore; make it appear like we migrated 3420 * it before it went to sleep. This means on wakeup we make the 3421 * previous CPU our target instead of where it really is. 3422 */ 3423 p->wake_cpu = cpu; 3424 } 3425 } 3426 3427 struct migration_swap_arg { 3428 struct task_struct *src_task, *dst_task; 3429 int src_cpu, dst_cpu; 3430 }; 3431 3432 static int migrate_swap_stop(void *data) 3433 { 3434 struct migration_swap_arg *arg = data; 3435 struct rq *src_rq, *dst_rq; 3436 3437 if (!cpu_active(arg->src_cpu) || !cpu_active(arg->dst_cpu)) 3438 return -EAGAIN; 3439 3440 src_rq = cpu_rq(arg->src_cpu); 3441 dst_rq = cpu_rq(arg->dst_cpu); 3442 3443 guard(double_raw_spinlock)(&arg->src_task->pi_lock, &arg->dst_task->pi_lock); 3444 guard(double_rq_lock)(src_rq, dst_rq); 3445 3446 if (task_cpu(arg->dst_task) != arg->dst_cpu) 3447 return -EAGAIN; 3448 3449 if (task_cpu(arg->src_task) != arg->src_cpu) 3450 return -EAGAIN; 3451 3452 if (!cpumask_test_cpu(arg->dst_cpu, arg->src_task->cpus_ptr)) 3453 return -EAGAIN; 3454 3455 if (!cpumask_test_cpu(arg->src_cpu, arg->dst_task->cpus_ptr)) 3456 return -EAGAIN; 3457 3458 __migrate_swap_task(arg->src_task, arg->dst_cpu); 3459 __migrate_swap_task(arg->dst_task, arg->src_cpu); 3460 3461 return 0; 3462 } 3463 3464 /* 3465 * Cross migrate two tasks 3466 */ 3467 int migrate_swap(struct task_struct *cur, struct task_struct *p, 3468 int target_cpu, int curr_cpu) 3469 { 3470 struct migration_swap_arg arg; 3471 int ret = -EINVAL; 3472 3473 arg = (struct migration_swap_arg){ 3474 .src_task = cur, 3475 .src_cpu = curr_cpu, 3476 .dst_task = p, 3477 .dst_cpu = target_cpu, 3478 }; 3479 3480 if (arg.src_cpu == arg.dst_cpu) 3481 goto out; 3482 3483 /* 3484 * These three tests are all lockless; this is OK since all of them 3485 * will be re-checked with proper locks held further down the line. 3486 */ 3487 if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu)) 3488 goto out; 3489 3490 if (!cpumask_test_cpu(arg.dst_cpu, arg.src_task->cpus_ptr)) 3491 goto out; 3492 3493 if (!cpumask_test_cpu(arg.src_cpu, arg.dst_task->cpus_ptr)) 3494 goto out; 3495 3496 trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu); 3497 ret = stop_two_cpus(arg.dst_cpu, arg.src_cpu, migrate_swap_stop, &arg); 3498 3499 out: 3500 return ret; 3501 } 3502 #endif /* CONFIG_NUMA_BALANCING */ 3503 3504 /*** 3505 * kick_process - kick a running thread to enter/exit the kernel 3506 * @p: the to-be-kicked thread 3507 * 3508 * Cause a process which is running on another CPU to enter 3509 * kernel-mode, without any delay. (to get signals handled.) 3510 * 3511 * NOTE: this function doesn't have to take the runqueue lock, 3512 * because all it wants to ensure is that the remote task enters 3513 * the kernel. If the IPI races and the task has been migrated 3514 * to another CPU then no harm is done and the purpose has been 3515 * achieved as well. 3516 */ 3517 void kick_process(struct task_struct *p) 3518 { 3519 int cpu; 3520 3521 preempt_disable(); 3522 cpu = task_cpu(p); 3523 if ((cpu != smp_processor_id()) && task_curr(p)) 3524 smp_send_reschedule(cpu); 3525 preempt_enable(); 3526 } 3527 EXPORT_SYMBOL_GPL(kick_process); 3528 3529 /* 3530 * ->cpus_ptr is protected by both rq->lock and p->pi_lock 3531 * 3532 * A few notes on cpu_active vs cpu_online: 3533 * 3534 * - cpu_active must be a subset of cpu_online 3535 * 3536 * - on CPU-up we allow per-CPU kthreads on the online && !active CPU, 3537 * see __set_cpus_allowed_ptr(). At this point the newly online 3538 * CPU isn't yet part of the sched domains, and balancing will not 3539 * see it. 3540 * 3541 * - on CPU-down we clear cpu_active() to mask the sched domains and 3542 * avoid the load balancer to place new tasks on the to be removed 3543 * CPU. Existing tasks will remain running there and will be taken 3544 * off. 3545 * 3546 * This means that fallback selection must not select !active CPUs. 3547 * And can assume that any active CPU must be online. Conversely 3548 * select_task_rq() below may allow selection of !active CPUs in order 3549 * to satisfy the above rules. 3550 */ 3551 static int select_fallback_rq(int cpu, struct task_struct *p) 3552 { 3553 int nid = cpu_to_node(cpu); 3554 const struct cpumask *nodemask = NULL; 3555 enum { cpuset, possible, fail } state = cpuset; 3556 int dest_cpu; 3557 3558 /* 3559 * If the node that the CPU is on has been offlined, cpu_to_node() 3560 * will return -1. There is no CPU on the node, and we should 3561 * select the CPU on the other node. 3562 */ 3563 if (nid != -1) { 3564 nodemask = cpumask_of_node(nid); 3565 3566 /* Look for allowed, online CPU in same node. */ 3567 for_each_cpu(dest_cpu, nodemask) { 3568 if (is_cpu_allowed(p, dest_cpu)) 3569 return dest_cpu; 3570 } 3571 } 3572 3573 for (;;) { 3574 /* Any allowed, online CPU? */ 3575 for_each_cpu(dest_cpu, p->cpus_ptr) { 3576 if (!is_cpu_allowed(p, dest_cpu)) 3577 continue; 3578 3579 goto out; 3580 } 3581 3582 /* No more Mr. Nice Guy. */ 3583 switch (state) { 3584 case cpuset: 3585 if (cpuset_cpus_allowed_fallback(p)) { 3586 state = possible; 3587 break; 3588 } 3589 fallthrough; 3590 case possible: 3591 /* 3592 * XXX When called from select_task_rq() we only 3593 * hold p->pi_lock and again violate locking order. 3594 * 3595 * More yuck to audit. 3596 */ 3597 do_set_cpus_allowed(p, task_cpu_possible_mask(p)); 3598 state = fail; 3599 break; 3600 case fail: 3601 BUG(); 3602 break; 3603 } 3604 } 3605 3606 out: 3607 if (state != cpuset) { 3608 /* 3609 * Don't tell them about moving exiting tasks or 3610 * kernel threads (both mm NULL), since they never 3611 * leave kernel. 3612 */ 3613 if (p->mm && printk_ratelimit()) { 3614 printk_deferred("process %d (%s) no longer affine to cpu%d\n", 3615 task_pid_nr(p), p->comm, cpu); 3616 } 3617 } 3618 3619 return dest_cpu; 3620 } 3621 3622 /* 3623 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_ptr is stable. 3624 */ 3625 static inline 3626 int select_task_rq(struct task_struct *p, int cpu, int wake_flags) 3627 { 3628 lockdep_assert_held(&p->pi_lock); 3629 3630 if (p->nr_cpus_allowed > 1 && !is_migration_disabled(p)) 3631 cpu = p->sched_class->select_task_rq(p, cpu, wake_flags); 3632 else 3633 cpu = cpumask_any(p->cpus_ptr); 3634 3635 /* 3636 * In order not to call set_task_cpu() on a blocking task we need 3637 * to rely on ttwu() to place the task on a valid ->cpus_ptr 3638 * CPU. 3639 * 3640 * Since this is common to all placement strategies, this lives here. 3641 * 3642 * [ this allows ->select_task() to simply return task_cpu(p) and 3643 * not worry about this generic constraint ] 3644 */ 3645 if (unlikely(!is_cpu_allowed(p, cpu))) 3646 cpu = select_fallback_rq(task_cpu(p), p); 3647 3648 return cpu; 3649 } 3650 3651 void sched_set_stop_task(int cpu, struct task_struct *stop) 3652 { 3653 static struct lock_class_key stop_pi_lock; 3654 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; 3655 struct task_struct *old_stop = cpu_rq(cpu)->stop; 3656 3657 if (stop) { 3658 /* 3659 * Make it appear like a SCHED_FIFO task, its something 3660 * userspace knows about and won't get confused about. 3661 * 3662 * Also, it will make PI more or less work without too 3663 * much confusion -- but then, stop work should not 3664 * rely on PI working anyway. 3665 */ 3666 sched_setscheduler_nocheck(stop, SCHED_FIFO, ¶m); 3667 3668 stop->sched_class = &stop_sched_class; 3669 3670 /* 3671 * The PI code calls rt_mutex_setprio() with ->pi_lock held to 3672 * adjust the effective priority of a task. As a result, 3673 * rt_mutex_setprio() can trigger (RT) balancing operations, 3674 * which can then trigger wakeups of the stop thread to push 3675 * around the current task. 3676 * 3677 * The stop task itself will never be part of the PI-chain, it 3678 * never blocks, therefore that ->pi_lock recursion is safe. 3679 * Tell lockdep about this by placing the stop->pi_lock in its 3680 * own class. 3681 */ 3682 lockdep_set_class(&stop->pi_lock, &stop_pi_lock); 3683 } 3684 3685 cpu_rq(cpu)->stop = stop; 3686 3687 if (old_stop) { 3688 /* 3689 * Reset it back to a normal scheduling class so that 3690 * it can die in pieces. 3691 */ 3692 old_stop->sched_class = &rt_sched_class; 3693 } 3694 } 3695 3696 #else /* CONFIG_SMP */ 3697 3698 static inline int __set_cpus_allowed_ptr(struct task_struct *p, 3699 struct affinity_context *ctx) 3700 { 3701 return set_cpus_allowed_ptr(p, ctx->new_mask); 3702 } 3703 3704 static inline void migrate_disable_switch(struct rq *rq, struct task_struct *p) { } 3705 3706 static inline bool rq_has_pinned_tasks(struct rq *rq) 3707 { 3708 return false; 3709 } 3710 3711 static inline cpumask_t *alloc_user_cpus_ptr(int node) 3712 { 3713 return NULL; 3714 } 3715 3716 #endif /* !CONFIG_SMP */ 3717 3718 static void 3719 ttwu_stat(struct task_struct *p, int cpu, int wake_flags) 3720 { 3721 struct rq *rq; 3722 3723 if (!schedstat_enabled()) 3724 return; 3725 3726 rq = this_rq(); 3727 3728 #ifdef CONFIG_SMP 3729 if (cpu == rq->cpu) { 3730 __schedstat_inc(rq->ttwu_local); 3731 __schedstat_inc(p->stats.nr_wakeups_local); 3732 } else { 3733 struct sched_domain *sd; 3734 3735 __schedstat_inc(p->stats.nr_wakeups_remote); 3736 3737 guard(rcu)(); 3738 for_each_domain(rq->cpu, sd) { 3739 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) { 3740 __schedstat_inc(sd->ttwu_wake_remote); 3741 break; 3742 } 3743 } 3744 } 3745 3746 if (wake_flags & WF_MIGRATED) 3747 __schedstat_inc(p->stats.nr_wakeups_migrate); 3748 #endif /* CONFIG_SMP */ 3749 3750 __schedstat_inc(rq->ttwu_count); 3751 __schedstat_inc(p->stats.nr_wakeups); 3752 3753 if (wake_flags & WF_SYNC) 3754 __schedstat_inc(p->stats.nr_wakeups_sync); 3755 } 3756 3757 /* 3758 * Mark the task runnable. 3759 */ 3760 static inline void ttwu_do_wakeup(struct task_struct *p) 3761 { 3762 WRITE_ONCE(p->__state, TASK_RUNNING); 3763 trace_sched_wakeup(p); 3764 } 3765 3766 static void 3767 ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags, 3768 struct rq_flags *rf) 3769 { 3770 int en_flags = ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK; 3771 3772 lockdep_assert_rq_held(rq); 3773 3774 if (p->sched_contributes_to_load) 3775 rq->nr_uninterruptible--; 3776 3777 #ifdef CONFIG_SMP 3778 if (wake_flags & WF_MIGRATED) 3779 en_flags |= ENQUEUE_MIGRATED; 3780 else 3781 #endif 3782 if (p->in_iowait) { 3783 delayacct_blkio_end(p); 3784 atomic_dec(&task_rq(p)->nr_iowait); 3785 } 3786 3787 activate_task(rq, p, en_flags); 3788 check_preempt_curr(rq, p, wake_flags); 3789 3790 ttwu_do_wakeup(p); 3791 3792 #ifdef CONFIG_SMP 3793 if (p->sched_class->task_woken) { 3794 /* 3795 * Our task @p is fully woken up and running; so it's safe to 3796 * drop the rq->lock, hereafter rq is only used for statistics. 3797 */ 3798 rq_unpin_lock(rq, rf); 3799 p->sched_class->task_woken(rq, p); 3800 rq_repin_lock(rq, rf); 3801 } 3802 3803 if (rq->idle_stamp) { 3804 u64 delta = rq_clock(rq) - rq->idle_stamp; 3805 u64 max = 2*rq->max_idle_balance_cost; 3806 3807 update_avg(&rq->avg_idle, delta); 3808 3809 if (rq->avg_idle > max) 3810 rq->avg_idle = max; 3811 3812 rq->wake_stamp = jiffies; 3813 rq->wake_avg_idle = rq->avg_idle / 2; 3814 3815 rq->idle_stamp = 0; 3816 } 3817 #endif 3818 } 3819 3820 /* 3821 * Consider @p being inside a wait loop: 3822 * 3823 * for (;;) { 3824 * set_current_state(TASK_UNINTERRUPTIBLE); 3825 * 3826 * if (CONDITION) 3827 * break; 3828 * 3829 * schedule(); 3830 * } 3831 * __set_current_state(TASK_RUNNING); 3832 * 3833 * between set_current_state() and schedule(). In this case @p is still 3834 * runnable, so all that needs doing is change p->state back to TASK_RUNNING in 3835 * an atomic manner. 3836 * 3837 * By taking task_rq(p)->lock we serialize against schedule(), if @p->on_rq 3838 * then schedule() must still happen and p->state can be changed to 3839 * TASK_RUNNING. Otherwise we lost the race, schedule() has happened, and we 3840 * need to do a full wakeup with enqueue. 3841 * 3842 * Returns: %true when the wakeup is done, 3843 * %false otherwise. 3844 */ 3845 static int ttwu_runnable(struct task_struct *p, int wake_flags) 3846 { 3847 struct rq_flags rf; 3848 struct rq *rq; 3849 int ret = 0; 3850 3851 rq = __task_rq_lock(p, &rf); 3852 if (task_on_rq_queued(p)) { 3853 if (!task_on_cpu(rq, p)) { 3854 /* 3855 * When on_rq && !on_cpu the task is preempted, see if 3856 * it should preempt the task that is current now. 3857 */ 3858 update_rq_clock(rq); 3859 check_preempt_curr(rq, p, wake_flags); 3860 } 3861 ttwu_do_wakeup(p); 3862 ret = 1; 3863 } 3864 __task_rq_unlock(rq, &rf); 3865 3866 return ret; 3867 } 3868 3869 #ifdef CONFIG_SMP 3870 void sched_ttwu_pending(void *arg) 3871 { 3872 struct llist_node *llist = arg; 3873 struct rq *rq = this_rq(); 3874 struct task_struct *p, *t; 3875 struct rq_flags rf; 3876 3877 if (!llist) 3878 return; 3879 3880 rq_lock_irqsave(rq, &rf); 3881 update_rq_clock(rq); 3882 3883 llist_for_each_entry_safe(p, t, llist, wake_entry.llist) { 3884 if (WARN_ON_ONCE(p->on_cpu)) 3885 smp_cond_load_acquire(&p->on_cpu, !VAL); 3886 3887 if (WARN_ON_ONCE(task_cpu(p) != cpu_of(rq))) 3888 set_task_cpu(p, cpu_of(rq)); 3889 3890 ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0, &rf); 3891 } 3892 3893 /* 3894 * Must be after enqueueing at least once task such that 3895 * idle_cpu() does not observe a false-negative -- if it does, 3896 * it is possible for select_idle_siblings() to stack a number 3897 * of tasks on this CPU during that window. 3898 * 3899 * It is ok to clear ttwu_pending when another task pending. 3900 * We will receive IPI after local irq enabled and then enqueue it. 3901 * Since now nr_running > 0, idle_cpu() will always get correct result. 3902 */ 3903 WRITE_ONCE(rq->ttwu_pending, 0); 3904 rq_unlock_irqrestore(rq, &rf); 3905 } 3906 3907 /* 3908 * Prepare the scene for sending an IPI for a remote smp_call 3909 * 3910 * Returns true if the caller can proceed with sending the IPI. 3911 * Returns false otherwise. 3912 */ 3913 bool call_function_single_prep_ipi(int cpu) 3914 { 3915 if (set_nr_if_polling(cpu_rq(cpu)->idle)) { 3916 trace_sched_wake_idle_without_ipi(cpu); 3917 return false; 3918 } 3919 3920 return true; 3921 } 3922 3923 /* 3924 * Queue a task on the target CPUs wake_list and wake the CPU via IPI if 3925 * necessary. The wakee CPU on receipt of the IPI will queue the task 3926 * via sched_ttwu_wakeup() for activation so the wakee incurs the cost 3927 * of the wakeup instead of the waker. 3928 */ 3929 static void __ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags) 3930 { 3931 struct rq *rq = cpu_rq(cpu); 3932 3933 p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED); 3934 3935 WRITE_ONCE(rq->ttwu_pending, 1); 3936 __smp_call_single_queue(cpu, &p->wake_entry.llist); 3937 } 3938 3939 void wake_up_if_idle(int cpu) 3940 { 3941 struct rq *rq = cpu_rq(cpu); 3942 3943 guard(rcu)(); 3944 if (is_idle_task(rcu_dereference(rq->curr))) { 3945 guard(rq_lock_irqsave)(rq); 3946 if (is_idle_task(rq->curr)) 3947 resched_curr(rq); 3948 } 3949 } 3950 3951 bool cpus_share_cache(int this_cpu, int that_cpu) 3952 { 3953 if (this_cpu == that_cpu) 3954 return true; 3955 3956 return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu); 3957 } 3958 3959 static inline bool ttwu_queue_cond(struct task_struct *p, int cpu) 3960 { 3961 /* 3962 * Do not complicate things with the async wake_list while the CPU is 3963 * in hotplug state. 3964 */ 3965 if (!cpu_active(cpu)) 3966 return false; 3967 3968 /* Ensure the task will still be allowed to run on the CPU. */ 3969 if (!cpumask_test_cpu(cpu, p->cpus_ptr)) 3970 return false; 3971 3972 /* 3973 * If the CPU does not share cache, then queue the task on the 3974 * remote rqs wakelist to avoid accessing remote data. 3975 */ 3976 if (!cpus_share_cache(smp_processor_id(), cpu)) 3977 return true; 3978 3979 if (cpu == smp_processor_id()) 3980 return false; 3981 3982 /* 3983 * If the wakee cpu is idle, or the task is descheduling and the 3984 * only running task on the CPU, then use the wakelist to offload 3985 * the task activation to the idle (or soon-to-be-idle) CPU as 3986 * the current CPU is likely busy. nr_running is checked to 3987 * avoid unnecessary task stacking. 3988 * 3989 * Note that we can only get here with (wakee) p->on_rq=0, 3990 * p->on_cpu can be whatever, we've done the dequeue, so 3991 * the wakee has been accounted out of ->nr_running. 3992 */ 3993 if (!cpu_rq(cpu)->nr_running) 3994 return true; 3995 3996 return false; 3997 } 3998 3999 static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags) 4000 { 4001 if (sched_feat(TTWU_QUEUE) && ttwu_queue_cond(p, cpu)) { 4002 sched_clock_cpu(cpu); /* Sync clocks across CPUs */ 4003 __ttwu_queue_wakelist(p, cpu, wake_flags); 4004 return true; 4005 } 4006 4007 return false; 4008 } 4009 4010 #else /* !CONFIG_SMP */ 4011 4012 static inline bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags) 4013 { 4014 return false; 4015 } 4016 4017 #endif /* CONFIG_SMP */ 4018 4019 static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags) 4020 { 4021 struct rq *rq = cpu_rq(cpu); 4022 struct rq_flags rf; 4023 4024 if (ttwu_queue_wakelist(p, cpu, wake_flags)) 4025 return; 4026 4027 rq_lock(rq, &rf); 4028 update_rq_clock(rq); 4029 ttwu_do_activate(rq, p, wake_flags, &rf); 4030 rq_unlock(rq, &rf); 4031 } 4032 4033 /* 4034 * Invoked from try_to_wake_up() to check whether the task can be woken up. 4035 * 4036 * The caller holds p::pi_lock if p != current or has preemption 4037 * disabled when p == current. 4038 * 4039 * The rules of PREEMPT_RT saved_state: 4040 * 4041 * The related locking code always holds p::pi_lock when updating 4042 * p::saved_state, which means the code is fully serialized in both cases. 4043 * 4044 * The lock wait and lock wakeups happen via TASK_RTLOCK_WAIT. No other 4045 * bits set. This allows to distinguish all wakeup scenarios. 4046 */ 4047 static __always_inline 4048 bool ttwu_state_match(struct task_struct *p, unsigned int state, int *success) 4049 { 4050 int match; 4051 4052 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) { 4053 WARN_ON_ONCE((state & TASK_RTLOCK_WAIT) && 4054 state != TASK_RTLOCK_WAIT); 4055 } 4056 4057 *success = !!(match = __task_state_match(p, state)); 4058 4059 #ifdef CONFIG_PREEMPT_RT 4060 /* 4061 * Saved state preserves the task state across blocking on 4062 * an RT lock. If the state matches, set p::saved_state to 4063 * TASK_RUNNING, but do not wake the task because it waits 4064 * for a lock wakeup. Also indicate success because from 4065 * the regular waker's point of view this has succeeded. 4066 * 4067 * After acquiring the lock the task will restore p::__state 4068 * from p::saved_state which ensures that the regular 4069 * wakeup is not lost. The restore will also set 4070 * p::saved_state to TASK_RUNNING so any further tests will 4071 * not result in false positives vs. @success 4072 */ 4073 if (match < 0) 4074 p->saved_state = TASK_RUNNING; 4075 #endif 4076 return match > 0; 4077 } 4078 4079 /* 4080 * Notes on Program-Order guarantees on SMP systems. 4081 * 4082 * MIGRATION 4083 * 4084 * The basic program-order guarantee on SMP systems is that when a task [t] 4085 * migrates, all its activity on its old CPU [c0] happens-before any subsequent 4086 * execution on its new CPU [c1]. 4087 * 4088 * For migration (of runnable tasks) this is provided by the following means: 4089 * 4090 * A) UNLOCK of the rq(c0)->lock scheduling out task t 4091 * B) migration for t is required to synchronize *both* rq(c0)->lock and 4092 * rq(c1)->lock (if not at the same time, then in that order). 4093 * C) LOCK of the rq(c1)->lock scheduling in task 4094 * 4095 * Release/acquire chaining guarantees that B happens after A and C after B. 4096 * Note: the CPU doing B need not be c0 or c1 4097 * 4098 * Example: 4099 * 4100 * CPU0 CPU1 CPU2 4101 * 4102 * LOCK rq(0)->lock 4103 * sched-out X 4104 * sched-in Y 4105 * UNLOCK rq(0)->lock 4106 * 4107 * LOCK rq(0)->lock // orders against CPU0 4108 * dequeue X 4109 * UNLOCK rq(0)->lock 4110 * 4111 * LOCK rq(1)->lock 4112 * enqueue X 4113 * UNLOCK rq(1)->lock 4114 * 4115 * LOCK rq(1)->lock // orders against CPU2 4116 * sched-out Z 4117 * sched-in X 4118 * UNLOCK rq(1)->lock 4119 * 4120 * 4121 * BLOCKING -- aka. SLEEP + WAKEUP 4122 * 4123 * For blocking we (obviously) need to provide the same guarantee as for 4124 * migration. However the means are completely different as there is no lock 4125 * chain to provide order. Instead we do: 4126 * 4127 * 1) smp_store_release(X->on_cpu, 0) -- finish_task() 4128 * 2) smp_cond_load_acquire(!X->on_cpu) -- try_to_wake_up() 4129 * 4130 * Example: 4131 * 4132 * CPU0 (schedule) CPU1 (try_to_wake_up) CPU2 (schedule) 4133 * 4134 * LOCK rq(0)->lock LOCK X->pi_lock 4135 * dequeue X 4136 * sched-out X 4137 * smp_store_release(X->on_cpu, 0); 4138 * 4139 * smp_cond_load_acquire(&X->on_cpu, !VAL); 4140 * X->state = WAKING 4141 * set_task_cpu(X,2) 4142 * 4143 * LOCK rq(2)->lock 4144 * enqueue X 4145 * X->state = RUNNING 4146 * UNLOCK rq(2)->lock 4147 * 4148 * LOCK rq(2)->lock // orders against CPU1 4149 * sched-out Z 4150 * sched-in X 4151 * UNLOCK rq(2)->lock 4152 * 4153 * UNLOCK X->pi_lock 4154 * UNLOCK rq(0)->lock 4155 * 4156 * 4157 * However, for wakeups there is a second guarantee we must provide, namely we 4158 * must ensure that CONDITION=1 done by the caller can not be reordered with 4159 * accesses to the task state; see try_to_wake_up() and set_current_state(). 4160 */ 4161 4162 /** 4163 * try_to_wake_up - wake up a thread 4164 * @p: the thread to be awakened 4165 * @state: the mask of task states that can be woken 4166 * @wake_flags: wake modifier flags (WF_*) 4167 * 4168 * Conceptually does: 4169 * 4170 * If (@state & @p->state) @p->state = TASK_RUNNING. 4171 * 4172 * If the task was not queued/runnable, also place it back on a runqueue. 4173 * 4174 * This function is atomic against schedule() which would dequeue the task. 4175 * 4176 * It issues a full memory barrier before accessing @p->state, see the comment 4177 * with set_current_state(). 4178 * 4179 * Uses p->pi_lock to serialize against concurrent wake-ups. 4180 * 4181 * Relies on p->pi_lock stabilizing: 4182 * - p->sched_class 4183 * - p->cpus_ptr 4184 * - p->sched_task_group 4185 * in order to do migration, see its use of select_task_rq()/set_task_cpu(). 4186 * 4187 * Tries really hard to only take one task_rq(p)->lock for performance. 4188 * Takes rq->lock in: 4189 * - ttwu_runnable() -- old rq, unavoidable, see comment there; 4190 * - ttwu_queue() -- new rq, for enqueue of the task; 4191 * - psi_ttwu_dequeue() -- much sadness :-( accounting will kill us. 4192 * 4193 * As a consequence we race really badly with just about everything. See the 4194 * many memory barriers and their comments for details. 4195 * 4196 * Return: %true if @p->state changes (an actual wakeup was done), 4197 * %false otherwise. 4198 */ 4199 int try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) 4200 { 4201 guard(preempt)(); 4202 int cpu, success = 0; 4203 4204 if (p == current) { 4205 /* 4206 * We're waking current, this means 'p->on_rq' and 'task_cpu(p) 4207 * == smp_processor_id()'. Together this means we can special 4208 * case the whole 'p->on_rq && ttwu_runnable()' case below 4209 * without taking any locks. 4210 * 4211 * In particular: 4212 * - we rely on Program-Order guarantees for all the ordering, 4213 * - we're serialized against set_special_state() by virtue of 4214 * it disabling IRQs (this allows not taking ->pi_lock). 4215 */ 4216 if (!ttwu_state_match(p, state, &success)) 4217 goto out; 4218 4219 trace_sched_waking(p); 4220 ttwu_do_wakeup(p); 4221 goto out; 4222 } 4223 4224 /* 4225 * If we are going to wake up a thread waiting for CONDITION we 4226 * need to ensure that CONDITION=1 done by the caller can not be 4227 * reordered with p->state check below. This pairs with smp_store_mb() 4228 * in set_current_state() that the waiting thread does. 4229 */ 4230 scoped_guard (raw_spinlock_irqsave, &p->pi_lock) { 4231 smp_mb__after_spinlock(); 4232 if (!ttwu_state_match(p, state, &success)) 4233 break; 4234 4235 trace_sched_waking(p); 4236 4237 /* 4238 * Ensure we load p->on_rq _after_ p->state, otherwise it would 4239 * be possible to, falsely, observe p->on_rq == 0 and get stuck 4240 * in smp_cond_load_acquire() below. 4241 * 4242 * sched_ttwu_pending() try_to_wake_up() 4243 * STORE p->on_rq = 1 LOAD p->state 4244 * UNLOCK rq->lock 4245 * 4246 * __schedule() (switch to task 'p') 4247 * LOCK rq->lock smp_rmb(); 4248 * smp_mb__after_spinlock(); 4249 * UNLOCK rq->lock 4250 * 4251 * [task p] 4252 * STORE p->state = UNINTERRUPTIBLE LOAD p->on_rq 4253 * 4254 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in 4255 * __schedule(). See the comment for smp_mb__after_spinlock(). 4256 * 4257 * A similar smb_rmb() lives in try_invoke_on_locked_down_task(). 4258 */ 4259 smp_rmb(); 4260 if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags)) 4261 break; 4262 4263 #ifdef CONFIG_SMP 4264 /* 4265 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be 4266 * possible to, falsely, observe p->on_cpu == 0. 4267 * 4268 * One must be running (->on_cpu == 1) in order to remove oneself 4269 * from the runqueue. 4270 * 4271 * __schedule() (switch to task 'p') try_to_wake_up() 4272 * STORE p->on_cpu = 1 LOAD p->on_rq 4273 * UNLOCK rq->lock 4274 * 4275 * __schedule() (put 'p' to sleep) 4276 * LOCK rq->lock smp_rmb(); 4277 * smp_mb__after_spinlock(); 4278 * STORE p->on_rq = 0 LOAD p->on_cpu 4279 * 4280 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in 4281 * __schedule(). See the comment for smp_mb__after_spinlock(). 4282 * 4283 * Form a control-dep-acquire with p->on_rq == 0 above, to ensure 4284 * schedule()'s deactivate_task() has 'happened' and p will no longer 4285 * care about it's own p->state. See the comment in __schedule(). 4286 */ 4287 smp_acquire__after_ctrl_dep(); 4288 4289 /* 4290 * We're doing the wakeup (@success == 1), they did a dequeue (p->on_rq 4291 * == 0), which means we need to do an enqueue, change p->state to 4292 * TASK_WAKING such that we can unlock p->pi_lock before doing the 4293 * enqueue, such as ttwu_queue_wakelist(). 4294 */ 4295 WRITE_ONCE(p->__state, TASK_WAKING); 4296 4297 /* 4298 * If the owning (remote) CPU is still in the middle of schedule() with 4299 * this task as prev, considering queueing p on the remote CPUs wake_list 4300 * which potentially sends an IPI instead of spinning on p->on_cpu to 4301 * let the waker make forward progress. This is safe because IRQs are 4302 * disabled and the IPI will deliver after on_cpu is cleared. 4303 * 4304 * Ensure we load task_cpu(p) after p->on_cpu: 4305 * 4306 * set_task_cpu(p, cpu); 4307 * STORE p->cpu = @cpu 4308 * __schedule() (switch to task 'p') 4309 * LOCK rq->lock 4310 * smp_mb__after_spin_lock() smp_cond_load_acquire(&p->on_cpu) 4311 * STORE p->on_cpu = 1 LOAD p->cpu 4312 * 4313 * to ensure we observe the correct CPU on which the task is currently 4314 * scheduling. 4315 */ 4316 if (smp_load_acquire(&p->on_cpu) && 4317 ttwu_queue_wakelist(p, task_cpu(p), wake_flags)) 4318 break; 4319 4320 /* 4321 * If the owning (remote) CPU is still in the middle of schedule() with 4322 * this task as prev, wait until it's done referencing the task. 4323 * 4324 * Pairs with the smp_store_release() in finish_task(). 4325 * 4326 * This ensures that tasks getting woken will be fully ordered against 4327 * their previous state and preserve Program Order. 4328 */ 4329 smp_cond_load_acquire(&p->on_cpu, !VAL); 4330 4331 cpu = select_task_rq(p, p->wake_cpu, wake_flags | WF_TTWU); 4332 if (task_cpu(p) != cpu) { 4333 if (p->in_iowait) { 4334 delayacct_blkio_end(p); 4335 atomic_dec(&task_rq(p)->nr_iowait); 4336 } 4337 4338 wake_flags |= WF_MIGRATED; 4339 psi_ttwu_dequeue(p); 4340 set_task_cpu(p, cpu); 4341 } 4342 #else 4343 cpu = task_cpu(p); 4344 #endif /* CONFIG_SMP */ 4345 4346 ttwu_queue(p, cpu, wake_flags); 4347 } 4348 out: 4349 if (success) 4350 ttwu_stat(p, task_cpu(p), wake_flags); 4351 4352 return success; 4353 } 4354 4355 static bool __task_needs_rq_lock(struct task_struct *p) 4356 { 4357 unsigned int state = READ_ONCE(p->__state); 4358 4359 /* 4360 * Since pi->lock blocks try_to_wake_up(), we don't need rq->lock when 4361 * the task is blocked. Make sure to check @state since ttwu() can drop 4362 * locks at the end, see ttwu_queue_wakelist(). 4363 */ 4364 if (state == TASK_RUNNING || state == TASK_WAKING) 4365 return true; 4366 4367 /* 4368 * Ensure we load p->on_rq after p->__state, otherwise it would be 4369 * possible to, falsely, observe p->on_rq == 0. 4370 * 4371 * See try_to_wake_up() for a longer comment. 4372 */ 4373 smp_rmb(); 4374 if (p->on_rq) 4375 return true; 4376 4377 #ifdef CONFIG_SMP 4378 /* 4379 * Ensure the task has finished __schedule() and will not be referenced 4380 * anymore. Again, see try_to_wake_up() for a longer comment. 4381 */ 4382 smp_rmb(); 4383 smp_cond_load_acquire(&p->on_cpu, !VAL); 4384 #endif 4385 4386 return false; 4387 } 4388 4389 /** 4390 * task_call_func - Invoke a function on task in fixed state 4391 * @p: Process for which the function is to be invoked, can be @current. 4392 * @func: Function to invoke. 4393 * @arg: Argument to function. 4394 * 4395 * Fix the task in it's current state by avoiding wakeups and or rq operations 4396 * and call @func(@arg) on it. This function can use ->on_rq and task_curr() 4397 * to work out what the state is, if required. Given that @func can be invoked 4398 * with a runqueue lock held, it had better be quite lightweight. 4399 * 4400 * Returns: 4401 * Whatever @func returns 4402 */ 4403 int task_call_func(struct task_struct *p, task_call_f func, void *arg) 4404 { 4405 struct rq *rq = NULL; 4406 struct rq_flags rf; 4407 int ret; 4408 4409 raw_spin_lock_irqsave(&p->pi_lock, rf.flags); 4410 4411 if (__task_needs_rq_lock(p)) 4412 rq = __task_rq_lock(p, &rf); 4413 4414 /* 4415 * At this point the task is pinned; either: 4416 * - blocked and we're holding off wakeups (pi->lock) 4417 * - woken, and we're holding off enqueue (rq->lock) 4418 * - queued, and we're holding off schedule (rq->lock) 4419 * - running, and we're holding off de-schedule (rq->lock) 4420 * 4421 * The called function (@func) can use: task_curr(), p->on_rq and 4422 * p->__state to differentiate between these states. 4423 */ 4424 ret = func(p, arg); 4425 4426 if (rq) 4427 rq_unlock(rq, &rf); 4428 4429 raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags); 4430 return ret; 4431 } 4432 4433 /** 4434 * cpu_curr_snapshot - Return a snapshot of the currently running task 4435 * @cpu: The CPU on which to snapshot the task. 4436 * 4437 * Returns the task_struct pointer of the task "currently" running on 4438 * the specified CPU. 4439 * 4440 * If the specified CPU was offline, the return value is whatever it 4441 * is, perhaps a pointer to the task_struct structure of that CPU's idle 4442 * task, but there is no guarantee. Callers wishing a useful return 4443 * value must take some action to ensure that the specified CPU remains 4444 * online throughout. 4445 * 4446 * This function executes full memory barriers before and after fetching 4447 * the pointer, which permits the caller to confine this function's fetch 4448 * with respect to the caller's accesses to other shared variables. 4449 */ 4450 struct task_struct *cpu_curr_snapshot(int cpu) 4451 { 4452 struct rq *rq = cpu_rq(cpu); 4453 struct task_struct *t; 4454 struct rq_flags rf; 4455 4456 rq_lock_irqsave(rq, &rf); 4457 smp_mb__after_spinlock(); /* Pairing determined by caller's synchronization design. */ 4458 t = rcu_dereference(cpu_curr(cpu)); 4459 rq_unlock_irqrestore(rq, &rf); 4460 smp_mb(); /* Pairing determined by caller's synchronization design. */ 4461 4462 return t; 4463 } 4464 4465 /** 4466 * wake_up_process - Wake up a specific process 4467 * @p: The process to be woken up. 4468 * 4469 * Attempt to wake up the nominated process and move it to the set of runnable 4470 * processes. 4471 * 4472 * Return: 1 if the process was woken up, 0 if it was already running. 4473 * 4474 * This function executes a full memory barrier before accessing the task state. 4475 */ 4476 int wake_up_process(struct task_struct *p) 4477 { 4478 return try_to_wake_up(p, TASK_NORMAL, 0); 4479 } 4480 EXPORT_SYMBOL(wake_up_process); 4481 4482 int wake_up_state(struct task_struct *p, unsigned int state) 4483 { 4484 return try_to_wake_up(p, state, 0); 4485 } 4486 4487 /* 4488 * Perform scheduler related setup for a newly forked process p. 4489 * p is forked by current. 4490 * 4491 * __sched_fork() is basic setup used by init_idle() too: 4492 */ 4493 static void __sched_fork(unsigned long clone_flags, struct task_struct *p) 4494 { 4495 p->on_rq = 0; 4496 4497 p->se.on_rq = 0; 4498 p->se.exec_start = 0; 4499 p->se.sum_exec_runtime = 0; 4500 p->se.prev_sum_exec_runtime = 0; 4501 p->se.nr_migrations = 0; 4502 p->se.vruntime = 0; 4503 p->se.vlag = 0; 4504 p->se.slice = sysctl_sched_base_slice; 4505 INIT_LIST_HEAD(&p->se.group_node); 4506 4507 #ifdef CONFIG_FAIR_GROUP_SCHED 4508 p->se.cfs_rq = NULL; 4509 #endif 4510 4511 #ifdef CONFIG_SCHEDSTATS 4512 /* Even if schedstat is disabled, there should not be garbage */ 4513 memset(&p->stats, 0, sizeof(p->stats)); 4514 #endif 4515 4516 RB_CLEAR_NODE(&p->dl.rb_node); 4517 init_dl_task_timer(&p->dl); 4518 init_dl_inactive_task_timer(&p->dl); 4519 __dl_clear_params(p); 4520 4521 INIT_LIST_HEAD(&p->rt.run_list); 4522 p->rt.timeout = 0; 4523 p->rt.time_slice = sched_rr_timeslice; 4524 p->rt.on_rq = 0; 4525 p->rt.on_list = 0; 4526 4527 #ifdef CONFIG_PREEMPT_NOTIFIERS 4528 INIT_HLIST_HEAD(&p->preempt_notifiers); 4529 #endif 4530 4531 #ifdef CONFIG_COMPACTION 4532 p->capture_control = NULL; 4533 #endif 4534 init_numa_balancing(clone_flags, p); 4535 #ifdef CONFIG_SMP 4536 p->wake_entry.u_flags = CSD_TYPE_TTWU; 4537 p->migration_pending = NULL; 4538 #endif 4539 init_sched_mm_cid(p); 4540 } 4541 4542 DEFINE_STATIC_KEY_FALSE(sched_numa_balancing); 4543 4544 #ifdef CONFIG_NUMA_BALANCING 4545 4546 int sysctl_numa_balancing_mode; 4547 4548 static void __set_numabalancing_state(bool enabled) 4549 { 4550 if (enabled) 4551 static_branch_enable(&sched_numa_balancing); 4552 else 4553 static_branch_disable(&sched_numa_balancing); 4554 } 4555 4556 void set_numabalancing_state(bool enabled) 4557 { 4558 if (enabled) 4559 sysctl_numa_balancing_mode = NUMA_BALANCING_NORMAL; 4560 else 4561 sysctl_numa_balancing_mode = NUMA_BALANCING_DISABLED; 4562 __set_numabalancing_state(enabled); 4563 } 4564 4565 #ifdef CONFIG_PROC_SYSCTL 4566 static void reset_memory_tiering(void) 4567 { 4568 struct pglist_data *pgdat; 4569 4570 for_each_online_pgdat(pgdat) { 4571 pgdat->nbp_threshold = 0; 4572 pgdat->nbp_th_nr_cand = node_page_state(pgdat, PGPROMOTE_CANDIDATE); 4573 pgdat->nbp_th_start = jiffies_to_msecs(jiffies); 4574 } 4575 } 4576 4577 static int sysctl_numa_balancing(struct ctl_table *table, int write, 4578 void *buffer, size_t *lenp, loff_t *ppos) 4579 { 4580 struct ctl_table t; 4581 int err; 4582 int state = sysctl_numa_balancing_mode; 4583 4584 if (write && !capable(CAP_SYS_ADMIN)) 4585 return -EPERM; 4586 4587 t = *table; 4588 t.data = &state; 4589 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); 4590 if (err < 0) 4591 return err; 4592 if (write) { 4593 if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) && 4594 (state & NUMA_BALANCING_MEMORY_TIERING)) 4595 reset_memory_tiering(); 4596 sysctl_numa_balancing_mode = state; 4597 __set_numabalancing_state(state); 4598 } 4599 return err; 4600 } 4601 #endif 4602 #endif 4603 4604 #ifdef CONFIG_SCHEDSTATS 4605 4606 DEFINE_STATIC_KEY_FALSE(sched_schedstats); 4607 4608 static void set_schedstats(bool enabled) 4609 { 4610 if (enabled) 4611 static_branch_enable(&sched_schedstats); 4612 else 4613 static_branch_disable(&sched_schedstats); 4614 } 4615 4616 void force_schedstat_enabled(void) 4617 { 4618 if (!schedstat_enabled()) { 4619 pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n"); 4620 static_branch_enable(&sched_schedstats); 4621 } 4622 } 4623 4624 static int __init setup_schedstats(char *str) 4625 { 4626 int ret = 0; 4627 if (!str) 4628 goto out; 4629 4630 if (!strcmp(str, "enable")) { 4631 set_schedstats(true); 4632 ret = 1; 4633 } else if (!strcmp(str, "disable")) { 4634 set_schedstats(false); 4635 ret = 1; 4636 } 4637 out: 4638 if (!ret) 4639 pr_warn("Unable to parse schedstats=\n"); 4640 4641 return ret; 4642 } 4643 __setup("schedstats=", setup_schedstats); 4644 4645 #ifdef CONFIG_PROC_SYSCTL 4646 static int sysctl_schedstats(struct ctl_table *table, int write, void *buffer, 4647 size_t *lenp, loff_t *ppos) 4648 { 4649 struct ctl_table t; 4650 int err; 4651 int state = static_branch_likely(&sched_schedstats); 4652 4653 if (write && !capable(CAP_SYS_ADMIN)) 4654 return -EPERM; 4655 4656 t = *table; 4657 t.data = &state; 4658 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); 4659 if (err < 0) 4660 return err; 4661 if (write) 4662 set_schedstats(state); 4663 return err; 4664 } 4665 #endif /* CONFIG_PROC_SYSCTL */ 4666 #endif /* CONFIG_SCHEDSTATS */ 4667 4668 #ifdef CONFIG_SYSCTL 4669 static struct ctl_table sched_core_sysctls[] = { 4670 #ifdef CONFIG_SCHEDSTATS 4671 { 4672 .procname = "sched_schedstats", 4673 .data = NULL, 4674 .maxlen = sizeof(unsigned int), 4675 .mode = 0644, 4676 .proc_handler = sysctl_schedstats, 4677 .extra1 = SYSCTL_ZERO, 4678 .extra2 = SYSCTL_ONE, 4679 }, 4680 #endif /* CONFIG_SCHEDSTATS */ 4681 #ifdef CONFIG_UCLAMP_TASK 4682 { 4683 .procname = "sched_util_clamp_min", 4684 .data = &sysctl_sched_uclamp_util_min, 4685 .maxlen = sizeof(unsigned int), 4686 .mode = 0644, 4687 .proc_handler = sysctl_sched_uclamp_handler, 4688 }, 4689 { 4690 .procname = "sched_util_clamp_max", 4691 .data = &sysctl_sched_uclamp_util_max, 4692 .maxlen = sizeof(unsigned int), 4693 .mode = 0644, 4694 .proc_handler = sysctl_sched_uclamp_handler, 4695 }, 4696 { 4697 .procname = "sched_util_clamp_min_rt_default", 4698 .data = &sysctl_sched_uclamp_util_min_rt_default, 4699 .maxlen = sizeof(unsigned int), 4700 .mode = 0644, 4701 .proc_handler = sysctl_sched_uclamp_handler, 4702 }, 4703 #endif /* CONFIG_UCLAMP_TASK */ 4704 #ifdef CONFIG_NUMA_BALANCING 4705 { 4706 .procname = "numa_balancing", 4707 .data = NULL, /* filled in by handler */ 4708 .maxlen = sizeof(unsigned int), 4709 .mode = 0644, 4710 .proc_handler = sysctl_numa_balancing, 4711 .extra1 = SYSCTL_ZERO, 4712 .extra2 = SYSCTL_FOUR, 4713 }, 4714 #endif /* CONFIG_NUMA_BALANCING */ 4715 {} 4716 }; 4717 static int __init sched_core_sysctl_init(void) 4718 { 4719 register_sysctl_init("kernel", sched_core_sysctls); 4720 return 0; 4721 } 4722 late_initcall(sched_core_sysctl_init); 4723 #endif /* CONFIG_SYSCTL */ 4724 4725 /* 4726 * fork()/clone()-time setup: 4727 */ 4728 int sched_fork(unsigned long clone_flags, struct task_struct *p) 4729 { 4730 __sched_fork(clone_flags, p); 4731 /* 4732 * We mark the process as NEW here. This guarantees that 4733 * nobody will actually run it, and a signal or other external 4734 * event cannot wake it up and insert it on the runqueue either. 4735 */ 4736 p->__state = TASK_NEW; 4737 4738 /* 4739 * Make sure we do not leak PI boosting priority to the child. 4740 */ 4741 p->prio = current->normal_prio; 4742 4743 uclamp_fork(p); 4744 4745 /* 4746 * Revert to default priority/policy on fork if requested. 4747 */ 4748 if (unlikely(p->sched_reset_on_fork)) { 4749 if (task_has_dl_policy(p) || task_has_rt_policy(p)) { 4750 p->policy = SCHED_NORMAL; 4751 p->static_prio = NICE_TO_PRIO(0); 4752 p->rt_priority = 0; 4753 } else if (PRIO_TO_NICE(p->static_prio) < 0) 4754 p->static_prio = NICE_TO_PRIO(0); 4755 4756 p->prio = p->normal_prio = p->static_prio; 4757 set_load_weight(p, false); 4758 4759 /* 4760 * We don't need the reset flag anymore after the fork. It has 4761 * fulfilled its duty: 4762 */ 4763 p->sched_reset_on_fork = 0; 4764 } 4765 4766 if (dl_prio(p->prio)) 4767 return -EAGAIN; 4768 else if (rt_prio(p->prio)) 4769 p->sched_class = &rt_sched_class; 4770 else 4771 p->sched_class = &fair_sched_class; 4772 4773 init_entity_runnable_average(&p->se); 4774 4775 4776 #ifdef CONFIG_SCHED_INFO 4777 if (likely(sched_info_on())) 4778 memset(&p->sched_info, 0, sizeof(p->sched_info)); 4779 #endif 4780 #if defined(CONFIG_SMP) 4781 p->on_cpu = 0; 4782 #endif 4783 init_task_preempt_count(p); 4784 #ifdef CONFIG_SMP 4785 plist_node_init(&p->pushable_tasks, MAX_PRIO); 4786 RB_CLEAR_NODE(&p->pushable_dl_tasks); 4787 #endif 4788 return 0; 4789 } 4790 4791 void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs) 4792 { 4793 unsigned long flags; 4794 4795 /* 4796 * Because we're not yet on the pid-hash, p->pi_lock isn't strictly 4797 * required yet, but lockdep gets upset if rules are violated. 4798 */ 4799 raw_spin_lock_irqsave(&p->pi_lock, flags); 4800 #ifdef CONFIG_CGROUP_SCHED 4801 if (1) { 4802 struct task_group *tg; 4803 tg = container_of(kargs->cset->subsys[cpu_cgrp_id], 4804 struct task_group, css); 4805 tg = autogroup_task_group(p, tg); 4806 p->sched_task_group = tg; 4807 } 4808 #endif 4809 rseq_migrate(p); 4810 /* 4811 * We're setting the CPU for the first time, we don't migrate, 4812 * so use __set_task_cpu(). 4813 */ 4814 __set_task_cpu(p, smp_processor_id()); 4815 if (p->sched_class->task_fork) 4816 p->sched_class->task_fork(p); 4817 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 4818 } 4819 4820 void sched_post_fork(struct task_struct *p) 4821 { 4822 uclamp_post_fork(p); 4823 } 4824 4825 unsigned long to_ratio(u64 period, u64 runtime) 4826 { 4827 if (runtime == RUNTIME_INF) 4828 return BW_UNIT; 4829 4830 /* 4831 * Doing this here saves a lot of checks in all 4832 * the calling paths, and returning zero seems 4833 * safe for them anyway. 4834 */ 4835 if (period == 0) 4836 return 0; 4837 4838 return div64_u64(runtime << BW_SHIFT, period); 4839 } 4840 4841 /* 4842 * wake_up_new_task - wake up a newly created task for the first time. 4843 * 4844 * This function will do some initial scheduler statistics housekeeping 4845 * that must be done for every newly created context, then puts the task 4846 * on the runqueue and wakes it. 4847 */ 4848 void wake_up_new_task(struct task_struct *p) 4849 { 4850 struct rq_flags rf; 4851 struct rq *rq; 4852 4853 raw_spin_lock_irqsave(&p->pi_lock, rf.flags); 4854 WRITE_ONCE(p->__state, TASK_RUNNING); 4855 #ifdef CONFIG_SMP 4856 /* 4857 * Fork balancing, do it here and not earlier because: 4858 * - cpus_ptr can change in the fork path 4859 * - any previously selected CPU might disappear through hotplug 4860 * 4861 * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq, 4862 * as we're not fully set-up yet. 4863 */ 4864 p->recent_used_cpu = task_cpu(p); 4865 rseq_migrate(p); 4866 __set_task_cpu(p, select_task_rq(p, task_cpu(p), WF_FORK)); 4867 #endif 4868 rq = __task_rq_lock(p, &rf); 4869 update_rq_clock(rq); 4870 post_init_entity_util_avg(p); 4871 4872 activate_task(rq, p, ENQUEUE_NOCLOCK); 4873 trace_sched_wakeup_new(p); 4874 check_preempt_curr(rq, p, WF_FORK); 4875 #ifdef CONFIG_SMP 4876 if (p->sched_class->task_woken) { 4877 /* 4878 * Nothing relies on rq->lock after this, so it's fine to 4879 * drop it. 4880 */ 4881 rq_unpin_lock(rq, &rf); 4882 p->sched_class->task_woken(rq, p); 4883 rq_repin_lock(rq, &rf); 4884 } 4885 #endif 4886 task_rq_unlock(rq, p, &rf); 4887 } 4888 4889 #ifdef CONFIG_PREEMPT_NOTIFIERS 4890 4891 static DEFINE_STATIC_KEY_FALSE(preempt_notifier_key); 4892 4893 void preempt_notifier_inc(void) 4894 { 4895 static_branch_inc(&preempt_notifier_key); 4896 } 4897 EXPORT_SYMBOL_GPL(preempt_notifier_inc); 4898 4899 void preempt_notifier_dec(void) 4900 { 4901 static_branch_dec(&preempt_notifier_key); 4902 } 4903 EXPORT_SYMBOL_GPL(preempt_notifier_dec); 4904 4905 /** 4906 * preempt_notifier_register - tell me when current is being preempted & rescheduled 4907 * @notifier: notifier struct to register 4908 */ 4909 void preempt_notifier_register(struct preempt_notifier *notifier) 4910 { 4911 if (!static_branch_unlikely(&preempt_notifier_key)) 4912 WARN(1, "registering preempt_notifier while notifiers disabled\n"); 4913 4914 hlist_add_head(¬ifier->link, ¤t->preempt_notifiers); 4915 } 4916 EXPORT_SYMBOL_GPL(preempt_notifier_register); 4917 4918 /** 4919 * preempt_notifier_unregister - no longer interested in preemption notifications 4920 * @notifier: notifier struct to unregister 4921 * 4922 * This is *not* safe to call from within a preemption notifier. 4923 */ 4924 void preempt_notifier_unregister(struct preempt_notifier *notifier) 4925 { 4926 hlist_del(¬ifier->link); 4927 } 4928 EXPORT_SYMBOL_GPL(preempt_notifier_unregister); 4929 4930 static void __fire_sched_in_preempt_notifiers(struct task_struct *curr) 4931 { 4932 struct preempt_notifier *notifier; 4933 4934 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) 4935 notifier->ops->sched_in(notifier, raw_smp_processor_id()); 4936 } 4937 4938 static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr) 4939 { 4940 if (static_branch_unlikely(&preempt_notifier_key)) 4941 __fire_sched_in_preempt_notifiers(curr); 4942 } 4943 4944 static void 4945 __fire_sched_out_preempt_notifiers(struct task_struct *curr, 4946 struct task_struct *next) 4947 { 4948 struct preempt_notifier *notifier; 4949 4950 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) 4951 notifier->ops->sched_out(notifier, next); 4952 } 4953 4954 static __always_inline void 4955 fire_sched_out_preempt_notifiers(struct task_struct *curr, 4956 struct task_struct *next) 4957 { 4958 if (static_branch_unlikely(&preempt_notifier_key)) 4959 __fire_sched_out_preempt_notifiers(curr, next); 4960 } 4961 4962 #else /* !CONFIG_PREEMPT_NOTIFIERS */ 4963 4964 static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr) 4965 { 4966 } 4967 4968 static inline void 4969 fire_sched_out_preempt_notifiers(struct task_struct *curr, 4970 struct task_struct *next) 4971 { 4972 } 4973 4974 #endif /* CONFIG_PREEMPT_NOTIFIERS */ 4975 4976 static inline void prepare_task(struct task_struct *next) 4977 { 4978 #ifdef CONFIG_SMP 4979 /* 4980 * Claim the task as running, we do this before switching to it 4981 * such that any running task will have this set. 4982 * 4983 * See the smp_load_acquire(&p->on_cpu) case in ttwu() and 4984 * its ordering comment. 4985 */ 4986 WRITE_ONCE(next->on_cpu, 1); 4987 #endif 4988 } 4989 4990 static inline void finish_task(struct task_struct *prev) 4991 { 4992 #ifdef CONFIG_SMP 4993 /* 4994 * This must be the very last reference to @prev from this CPU. After 4995 * p->on_cpu is cleared, the task can be moved to a different CPU. We 4996 * must ensure this doesn't happen until the switch is completely 4997 * finished. 4998 * 4999 * In particular, the load of prev->state in finish_task_switch() must 5000 * happen before this. 5001 * 5002 * Pairs with the smp_cond_load_acquire() in try_to_wake_up(). 5003 */ 5004 smp_store_release(&prev->on_cpu, 0); 5005 #endif 5006 } 5007 5008 #ifdef CONFIG_SMP 5009 5010 static void do_balance_callbacks(struct rq *rq, struct balance_callback *head) 5011 { 5012 void (*func)(struct rq *rq); 5013 struct balance_callback *next; 5014 5015 lockdep_assert_rq_held(rq); 5016 5017 while (head) { 5018 func = (void (*)(struct rq *))head->func; 5019 next = head->next; 5020 head->next = NULL; 5021 head = next; 5022 5023 func(rq); 5024 } 5025 } 5026 5027 static void balance_push(struct rq *rq); 5028 5029 /* 5030 * balance_push_callback is a right abuse of the callback interface and plays 5031 * by significantly different rules. 5032 * 5033 * Where the normal balance_callback's purpose is to be ran in the same context 5034 * that queued it (only later, when it's safe to drop rq->lock again), 5035 * balance_push_callback is specifically targeted at __schedule(). 5036 * 5037 * This abuse is tolerated because it places all the unlikely/odd cases behind 5038 * a single test, namely: rq->balance_callback == NULL. 5039 */ 5040 struct balance_callback balance_push_callback = { 5041 .next = NULL, 5042 .func = balance_push, 5043 }; 5044 5045 static inline struct balance_callback * 5046 __splice_balance_callbacks(struct rq *rq, bool split) 5047 { 5048 struct balance_callback *head = rq->balance_callback; 5049 5050 if (likely(!head)) 5051 return NULL; 5052 5053 lockdep_assert_rq_held(rq); 5054 /* 5055 * Must not take balance_push_callback off the list when 5056 * splice_balance_callbacks() and balance_callbacks() are not 5057 * in the same rq->lock section. 5058 * 5059 * In that case it would be possible for __schedule() to interleave 5060 * and observe the list empty. 5061 */ 5062 if (split && head == &balance_push_callback) 5063 head = NULL; 5064 else 5065 rq->balance_callback = NULL; 5066 5067 return head; 5068 } 5069 5070 static inline struct balance_callback *splice_balance_callbacks(struct rq *rq) 5071 { 5072 return __splice_balance_callbacks(rq, true); 5073 } 5074 5075 static void __balance_callbacks(struct rq *rq) 5076 { 5077 do_balance_callbacks(rq, __splice_balance_callbacks(rq, false)); 5078 } 5079 5080 static inline void balance_callbacks(struct rq *rq, struct balance_callback *head) 5081 { 5082 unsigned long flags; 5083 5084 if (unlikely(head)) { 5085 raw_spin_rq_lock_irqsave(rq, flags); 5086 do_balance_callbacks(rq, head); 5087 raw_spin_rq_unlock_irqrestore(rq, flags); 5088 } 5089 } 5090 5091 #else 5092 5093 static inline void __balance_callbacks(struct rq *rq) 5094 { 5095 } 5096 5097 static inline struct balance_callback *splice_balance_callbacks(struct rq *rq) 5098 { 5099 return NULL; 5100 } 5101 5102 static inline void balance_callbacks(struct rq *rq, struct balance_callback *head) 5103 { 5104 } 5105 5106 #endif 5107 5108 static inline void 5109 prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf) 5110 { 5111 /* 5112 * Since the runqueue lock will be released by the next 5113 * task (which is an invalid locking op but in the case 5114 * of the scheduler it's an obvious special-case), so we 5115 * do an early lockdep release here: 5116 */ 5117 rq_unpin_lock(rq, rf); 5118 spin_release(&__rq_lockp(rq)->dep_map, _THIS_IP_); 5119 #ifdef CONFIG_DEBUG_SPINLOCK 5120 /* this is a valid case when another task releases the spinlock */ 5121 rq_lockp(rq)->owner = next; 5122 #endif 5123 } 5124 5125 static inline void finish_lock_switch(struct rq *rq) 5126 { 5127 /* 5128 * If we are tracking spinlock dependencies then we have to 5129 * fix up the runqueue lock - which gets 'carried over' from 5130 * prev into current: 5131 */ 5132 spin_acquire(&__rq_lockp(rq)->dep_map, 0, 0, _THIS_IP_); 5133 __balance_callbacks(rq); 5134 raw_spin_rq_unlock_irq(rq); 5135 } 5136 5137 /* 5138 * NOP if the arch has not defined these: 5139 */ 5140 5141 #ifndef prepare_arch_switch 5142 # define prepare_arch_switch(next) do { } while (0) 5143 #endif 5144 5145 #ifndef finish_arch_post_lock_switch 5146 # define finish_arch_post_lock_switch() do { } while (0) 5147 #endif 5148 5149 static inline void kmap_local_sched_out(void) 5150 { 5151 #ifdef CONFIG_KMAP_LOCAL 5152 if (unlikely(current->kmap_ctrl.idx)) 5153 __kmap_local_sched_out(); 5154 #endif 5155 } 5156 5157 static inline void kmap_local_sched_in(void) 5158 { 5159 #ifdef CONFIG_KMAP_LOCAL 5160 if (unlikely(current->kmap_ctrl.idx)) 5161 __kmap_local_sched_in(); 5162 #endif 5163 } 5164 5165 /** 5166 * prepare_task_switch - prepare to switch tasks 5167 * @rq: the runqueue preparing to switch 5168 * @prev: the current task that is being switched out 5169 * @next: the task we are going to switch to. 5170 * 5171 * This is called with the rq lock held and interrupts off. It must 5172 * be paired with a subsequent finish_task_switch after the context 5173 * switch. 5174 * 5175 * prepare_task_switch sets up locking and calls architecture specific 5176 * hooks. 5177 */ 5178 static inline void 5179 prepare_task_switch(struct rq *rq, struct task_struct *prev, 5180 struct task_struct *next) 5181 { 5182 kcov_prepare_switch(prev); 5183 sched_info_switch(rq, prev, next); 5184 perf_event_task_sched_out(prev, next); 5185 rseq_preempt(prev); 5186 fire_sched_out_preempt_notifiers(prev, next); 5187 kmap_local_sched_out(); 5188 prepare_task(next); 5189 prepare_arch_switch(next); 5190 } 5191 5192 /** 5193 * finish_task_switch - clean up after a task-switch 5194 * @prev: the thread we just switched away from. 5195 * 5196 * finish_task_switch must be called after the context switch, paired 5197 * with a prepare_task_switch call before the context switch. 5198 * finish_task_switch will reconcile locking set up by prepare_task_switch, 5199 * and do any other architecture-specific cleanup actions. 5200 * 5201 * Note that we may have delayed dropping an mm in context_switch(). If 5202 * so, we finish that here outside of the runqueue lock. (Doing it 5203 * with the lock held can cause deadlocks; see schedule() for 5204 * details.) 5205 * 5206 * The context switch have flipped the stack from under us and restored the 5207 * local variables which were saved when this task called schedule() in the 5208 * past. prev == current is still correct but we need to recalculate this_rq 5209 * because prev may have moved to another CPU. 5210 */ 5211 static struct rq *finish_task_switch(struct task_struct *prev) 5212 __releases(rq->lock) 5213 { 5214 struct rq *rq = this_rq(); 5215 struct mm_struct *mm = rq->prev_mm; 5216 unsigned int prev_state; 5217 5218 /* 5219 * The previous task will have left us with a preempt_count of 2 5220 * because it left us after: 5221 * 5222 * schedule() 5223 * preempt_disable(); // 1 5224 * __schedule() 5225 * raw_spin_lock_irq(&rq->lock) // 2 5226 * 5227 * Also, see FORK_PREEMPT_COUNT. 5228 */ 5229 if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET, 5230 "corrupted preempt_count: %s/%d/0x%x\n", 5231 current->comm, current->pid, preempt_count())) 5232 preempt_count_set(FORK_PREEMPT_COUNT); 5233 5234 rq->prev_mm = NULL; 5235 5236 /* 5237 * A task struct has one reference for the use as "current". 5238 * If a task dies, then it sets TASK_DEAD in tsk->state and calls 5239 * schedule one last time. The schedule call will never return, and 5240 * the scheduled task must drop that reference. 5241 * 5242 * We must observe prev->state before clearing prev->on_cpu (in 5243 * finish_task), otherwise a concurrent wakeup can get prev 5244 * running on another CPU and we could rave with its RUNNING -> DEAD 5245 * transition, resulting in a double drop. 5246 */ 5247 prev_state = READ_ONCE(prev->__state); 5248 vtime_task_switch(prev); 5249 perf_event_task_sched_in(prev, current); 5250 finish_task(prev); 5251 tick_nohz_task_switch(); 5252 finish_lock_switch(rq); 5253 finish_arch_post_lock_switch(); 5254 kcov_finish_switch(current); 5255 /* 5256 * kmap_local_sched_out() is invoked with rq::lock held and 5257 * interrupts disabled. There is no requirement for that, but the 5258 * sched out code does not have an interrupt enabled section. 5259 * Restoring the maps on sched in does not require interrupts being 5260 * disabled either. 5261 */ 5262 kmap_local_sched_in(); 5263 5264 fire_sched_in_preempt_notifiers(current); 5265 /* 5266 * When switching through a kernel thread, the loop in 5267 * membarrier_{private,global}_expedited() may have observed that 5268 * kernel thread and not issued an IPI. It is therefore possible to 5269 * schedule between user->kernel->user threads without passing though 5270 * switch_mm(). Membarrier requires a barrier after storing to 5271 * rq->curr, before returning to userspace, so provide them here: 5272 * 5273 * - a full memory barrier for {PRIVATE,GLOBAL}_EXPEDITED, implicitly 5274 * provided by mmdrop_lazy_tlb(), 5275 * - a sync_core for SYNC_CORE. 5276 */ 5277 if (mm) { 5278 membarrier_mm_sync_core_before_usermode(mm); 5279 mmdrop_lazy_tlb_sched(mm); 5280 } 5281 5282 if (unlikely(prev_state == TASK_DEAD)) { 5283 if (prev->sched_class->task_dead) 5284 prev->sched_class->task_dead(prev); 5285 5286 /* Task is done with its stack. */ 5287 put_task_stack(prev); 5288 5289 put_task_struct_rcu_user(prev); 5290 } 5291 5292 return rq; 5293 } 5294 5295 /** 5296 * schedule_tail - first thing a freshly forked thread must call. 5297 * @prev: the thread we just switched away from. 5298 */ 5299 asmlinkage __visible void schedule_tail(struct task_struct *prev) 5300 __releases(rq->lock) 5301 { 5302 /* 5303 * New tasks start with FORK_PREEMPT_COUNT, see there and 5304 * finish_task_switch() for details. 5305 * 5306 * finish_task_switch() will drop rq->lock() and lower preempt_count 5307 * and the preempt_enable() will end up enabling preemption (on 5308 * PREEMPT_COUNT kernels). 5309 */ 5310 5311 finish_task_switch(prev); 5312 preempt_enable(); 5313 5314 if (current->set_child_tid) 5315 put_user(task_pid_vnr(current), current->set_child_tid); 5316 5317 calculate_sigpending(); 5318 } 5319 5320 /* 5321 * context_switch - switch to the new MM and the new thread's register state. 5322 */ 5323 static __always_inline struct rq * 5324 context_switch(struct rq *rq, struct task_struct *prev, 5325 struct task_struct *next, struct rq_flags *rf) 5326 { 5327 prepare_task_switch(rq, prev, next); 5328 5329 /* 5330 * For paravirt, this is coupled with an exit in switch_to to 5331 * combine the page table reload and the switch backend into 5332 * one hypercall. 5333 */ 5334 arch_start_context_switch(prev); 5335 5336 /* 5337 * kernel -> kernel lazy + transfer active 5338 * user -> kernel lazy + mmgrab_lazy_tlb() active 5339 * 5340 * kernel -> user switch + mmdrop_lazy_tlb() active 5341 * user -> user switch 5342 * 5343 * switch_mm_cid() needs to be updated if the barriers provided 5344 * by context_switch() are modified. 5345 */ 5346 if (!next->mm) { // to kernel 5347 enter_lazy_tlb(prev->active_mm, next); 5348 5349 next->active_mm = prev->active_mm; 5350 if (prev->mm) // from user 5351 mmgrab_lazy_tlb(prev->active_mm); 5352 else 5353 prev->active_mm = NULL; 5354 } else { // to user 5355 membarrier_switch_mm(rq, prev->active_mm, next->mm); 5356 /* 5357 * sys_membarrier() requires an smp_mb() between setting 5358 * rq->curr / membarrier_switch_mm() and returning to userspace. 5359 * 5360 * The below provides this either through switch_mm(), or in 5361 * case 'prev->active_mm == next->mm' through 5362 * finish_task_switch()'s mmdrop(). 5363 */ 5364 switch_mm_irqs_off(prev->active_mm, next->mm, next); 5365 lru_gen_use_mm(next->mm); 5366 5367 if (!prev->mm) { // from kernel 5368 /* will mmdrop_lazy_tlb() in finish_task_switch(). */ 5369 rq->prev_mm = prev->active_mm; 5370 prev->active_mm = NULL; 5371 } 5372 } 5373 5374 /* switch_mm_cid() requires the memory barriers above. */ 5375 switch_mm_cid(rq, prev, next); 5376 5377 prepare_lock_switch(rq, next, rf); 5378 5379 /* Here we just switch the register state and the stack. */ 5380 switch_to(prev, next, prev); 5381 barrier(); 5382 5383 return finish_task_switch(prev); 5384 } 5385 5386 /* 5387 * nr_running and nr_context_switches: 5388 * 5389 * externally visible scheduler statistics: current number of runnable 5390 * threads, total number of context switches performed since bootup. 5391 */ 5392 unsigned int nr_running(void) 5393 { 5394 unsigned int i, sum = 0; 5395 5396 for_each_online_cpu(i) 5397 sum += cpu_rq(i)->nr_running; 5398 5399 return sum; 5400 } 5401 5402 /* 5403 * Check if only the current task is running on the CPU. 5404 * 5405 * Caution: this function does not check that the caller has disabled 5406 * preemption, thus the result might have a time-of-check-to-time-of-use 5407 * race. The caller is responsible to use it correctly, for example: 5408 * 5409 * - from a non-preemptible section (of course) 5410 * 5411 * - from a thread that is bound to a single CPU 5412 * 5413 * - in a loop with very short iterations (e.g. a polling loop) 5414 */ 5415 bool single_task_running(void) 5416 { 5417 return raw_rq()->nr_running == 1; 5418 } 5419 EXPORT_SYMBOL(single_task_running); 5420 5421 unsigned long long nr_context_switches_cpu(int cpu) 5422 { 5423 return cpu_rq(cpu)->nr_switches; 5424 } 5425 5426 unsigned long long nr_context_switches(void) 5427 { 5428 int i; 5429 unsigned long long sum = 0; 5430 5431 for_each_possible_cpu(i) 5432 sum += cpu_rq(i)->nr_switches; 5433 5434 return sum; 5435 } 5436 5437 /* 5438 * Consumers of these two interfaces, like for example the cpuidle menu 5439 * governor, are using nonsensical data. Preferring shallow idle state selection 5440 * for a CPU that has IO-wait which might not even end up running the task when 5441 * it does become runnable. 5442 */ 5443 5444 unsigned int nr_iowait_cpu(int cpu) 5445 { 5446 return atomic_read(&cpu_rq(cpu)->nr_iowait); 5447 } 5448 5449 /* 5450 * IO-wait accounting, and how it's mostly bollocks (on SMP). 5451 * 5452 * The idea behind IO-wait account is to account the idle time that we could 5453 * have spend running if it were not for IO. That is, if we were to improve the 5454 * storage performance, we'd have a proportional reduction in IO-wait time. 5455 * 5456 * This all works nicely on UP, where, when a task blocks on IO, we account 5457 * idle time as IO-wait, because if the storage were faster, it could've been 5458 * running and we'd not be idle. 5459 * 5460 * This has been extended to SMP, by doing the same for each CPU. This however 5461 * is broken. 5462 * 5463 * Imagine for instance the case where two tasks block on one CPU, only the one 5464 * CPU will have IO-wait accounted, while the other has regular idle. Even 5465 * though, if the storage were faster, both could've ran at the same time, 5466 * utilising both CPUs. 5467 * 5468 * This means, that when looking globally, the current IO-wait accounting on 5469 * SMP is a lower bound, by reason of under accounting. 5470 * 5471 * Worse, since the numbers are provided per CPU, they are sometimes 5472 * interpreted per CPU, and that is nonsensical. A blocked task isn't strictly 5473 * associated with any one particular CPU, it can wake to another CPU than it 5474 * blocked on. This means the per CPU IO-wait number is meaningless. 5475 * 5476 * Task CPU affinities can make all that even more 'interesting'. 5477 */ 5478 5479 unsigned int nr_iowait(void) 5480 { 5481 unsigned int i, sum = 0; 5482 5483 for_each_possible_cpu(i) 5484 sum += nr_iowait_cpu(i); 5485 5486 return sum; 5487 } 5488 5489 #ifdef CONFIG_SMP 5490 5491 /* 5492 * sched_exec - execve() is a valuable balancing opportunity, because at 5493 * this point the task has the smallest effective memory and cache footprint. 5494 */ 5495 void sched_exec(void) 5496 { 5497 struct task_struct *p = current; 5498 struct migration_arg arg; 5499 int dest_cpu; 5500 5501 scoped_guard (raw_spinlock_irqsave, &p->pi_lock) { 5502 dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), WF_EXEC); 5503 if (dest_cpu == smp_processor_id()) 5504 return; 5505 5506 if (unlikely(!cpu_active(dest_cpu))) 5507 return; 5508 5509 arg = (struct migration_arg){ p, dest_cpu }; 5510 } 5511 stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg); 5512 } 5513 5514 #endif 5515 5516 DEFINE_PER_CPU(struct kernel_stat, kstat); 5517 DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat); 5518 5519 EXPORT_PER_CPU_SYMBOL(kstat); 5520 EXPORT_PER_CPU_SYMBOL(kernel_cpustat); 5521 5522 /* 5523 * The function fair_sched_class.update_curr accesses the struct curr 5524 * and its field curr->exec_start; when called from task_sched_runtime(), 5525 * we observe a high rate of cache misses in practice. 5526 * Prefetching this data results in improved performance. 5527 */ 5528 static inline void prefetch_curr_exec_start(struct task_struct *p) 5529 { 5530 #ifdef CONFIG_FAIR_GROUP_SCHED 5531 struct sched_entity *curr = (&p->se)->cfs_rq->curr; 5532 #else 5533 struct sched_entity *curr = (&task_rq(p)->cfs)->curr; 5534 #endif 5535 prefetch(curr); 5536 prefetch(&curr->exec_start); 5537 } 5538 5539 /* 5540 * Return accounted runtime for the task. 5541 * In case the task is currently running, return the runtime plus current's 5542 * pending runtime that have not been accounted yet. 5543 */ 5544 unsigned long long task_sched_runtime(struct task_struct *p) 5545 { 5546 struct rq_flags rf; 5547 struct rq *rq; 5548 u64 ns; 5549 5550 #if defined(CONFIG_64BIT) && defined(CONFIG_SMP) 5551 /* 5552 * 64-bit doesn't need locks to atomically read a 64-bit value. 5553 * So we have a optimization chance when the task's delta_exec is 0. 5554 * Reading ->on_cpu is racy, but this is ok. 5555 * 5556 * If we race with it leaving CPU, we'll take a lock. So we're correct. 5557 * If we race with it entering CPU, unaccounted time is 0. This is 5558 * indistinguishable from the read occurring a few cycles earlier. 5559 * If we see ->on_cpu without ->on_rq, the task is leaving, and has 5560 * been accounted, so we're correct here as well. 5561 */ 5562 if (!p->on_cpu || !task_on_rq_queued(p)) 5563 return p->se.sum_exec_runtime; 5564 #endif 5565 5566 rq = task_rq_lock(p, &rf); 5567 /* 5568 * Must be ->curr _and_ ->on_rq. If dequeued, we would 5569 * project cycles that may never be accounted to this 5570 * thread, breaking clock_gettime(). 5571 */ 5572 if (task_current(rq, p) && task_on_rq_queued(p)) { 5573 prefetch_curr_exec_start(p); 5574 update_rq_clock(rq); 5575 p->sched_class->update_curr(rq); 5576 } 5577 ns = p->se.sum_exec_runtime; 5578 task_rq_unlock(rq, p, &rf); 5579 5580 return ns; 5581 } 5582 5583 #ifdef CONFIG_SCHED_DEBUG 5584 static u64 cpu_resched_latency(struct rq *rq) 5585 { 5586 int latency_warn_ms = READ_ONCE(sysctl_resched_latency_warn_ms); 5587 u64 resched_latency, now = rq_clock(rq); 5588 static bool warned_once; 5589 5590 if (sysctl_resched_latency_warn_once && warned_once) 5591 return 0; 5592 5593 if (!need_resched() || !latency_warn_ms) 5594 return 0; 5595 5596 if (system_state == SYSTEM_BOOTING) 5597 return 0; 5598 5599 if (!rq->last_seen_need_resched_ns) { 5600 rq->last_seen_need_resched_ns = now; 5601 rq->ticks_without_resched = 0; 5602 return 0; 5603 } 5604 5605 rq->ticks_without_resched++; 5606 resched_latency = now - rq->last_seen_need_resched_ns; 5607 if (resched_latency <= latency_warn_ms * NSEC_PER_MSEC) 5608 return 0; 5609 5610 warned_once = true; 5611 5612 return resched_latency; 5613 } 5614 5615 static int __init setup_resched_latency_warn_ms(char *str) 5616 { 5617 long val; 5618 5619 if ((kstrtol(str, 0, &val))) { 5620 pr_warn("Unable to set resched_latency_warn_ms\n"); 5621 return 1; 5622 } 5623 5624 sysctl_resched_latency_warn_ms = val; 5625 return 1; 5626 } 5627 __setup("resched_latency_warn_ms=", setup_resched_latency_warn_ms); 5628 #else 5629 static inline u64 cpu_resched_latency(struct rq *rq) { return 0; } 5630 #endif /* CONFIG_SCHED_DEBUG */ 5631 5632 /* 5633 * This function gets called by the timer code, with HZ frequency. 5634 * We call it with interrupts disabled. 5635 */ 5636 void scheduler_tick(void) 5637 { 5638 int cpu = smp_processor_id(); 5639 struct rq *rq = cpu_rq(cpu); 5640 struct task_struct *curr; 5641 struct rq_flags rf; 5642 unsigned long thermal_pressure; 5643 u64 resched_latency; 5644 5645 if (housekeeping_cpu(cpu, HK_TYPE_TICK)) 5646 arch_scale_freq_tick(); 5647 5648 sched_clock_tick(); 5649 5650 rq_lock(rq, &rf); 5651 5652 curr = rq->curr; 5653 psi_account_irqtime(rq, curr, NULL); 5654 5655 update_rq_clock(rq); 5656 thermal_pressure = arch_scale_thermal_pressure(cpu_of(rq)); 5657 update_thermal_load_avg(rq_clock_thermal(rq), rq, thermal_pressure); 5658 curr->sched_class->task_tick(rq, curr, 0); 5659 if (sched_feat(LATENCY_WARN)) 5660 resched_latency = cpu_resched_latency(rq); 5661 calc_global_load_tick(rq); 5662 sched_core_tick(rq); 5663 task_tick_mm_cid(rq, curr); 5664 5665 rq_unlock(rq, &rf); 5666 5667 if (sched_feat(LATENCY_WARN) && resched_latency) 5668 resched_latency_warn(cpu, resched_latency); 5669 5670 perf_event_task_tick(); 5671 5672 if (curr->flags & PF_WQ_WORKER) 5673 wq_worker_tick(curr); 5674 5675 #ifdef CONFIG_SMP 5676 rq->idle_balance = idle_cpu(cpu); 5677 trigger_load_balance(rq); 5678 #endif 5679 } 5680 5681 #ifdef CONFIG_NO_HZ_FULL 5682 5683 struct tick_work { 5684 int cpu; 5685 atomic_t state; 5686 struct delayed_work work; 5687 }; 5688 /* Values for ->state, see diagram below. */ 5689 #define TICK_SCHED_REMOTE_OFFLINE 0 5690 #define TICK_SCHED_REMOTE_OFFLINING 1 5691 #define TICK_SCHED_REMOTE_RUNNING 2 5692 5693 /* 5694 * State diagram for ->state: 5695 * 5696 * 5697 * TICK_SCHED_REMOTE_OFFLINE 5698 * | ^ 5699 * | | 5700 * | | sched_tick_remote() 5701 * | | 5702 * | | 5703 * +--TICK_SCHED_REMOTE_OFFLINING 5704 * | ^ 5705 * | | 5706 * sched_tick_start() | | sched_tick_stop() 5707 * | | 5708 * V | 5709 * TICK_SCHED_REMOTE_RUNNING 5710 * 5711 * 5712 * Other transitions get WARN_ON_ONCE(), except that sched_tick_remote() 5713 * and sched_tick_start() are happy to leave the state in RUNNING. 5714 */ 5715 5716 static struct tick_work __percpu *tick_work_cpu; 5717 5718 static void sched_tick_remote(struct work_struct *work) 5719 { 5720 struct delayed_work *dwork = to_delayed_work(work); 5721 struct tick_work *twork = container_of(dwork, struct tick_work, work); 5722 int cpu = twork->cpu; 5723 struct rq *rq = cpu_rq(cpu); 5724 int os; 5725 5726 /* 5727 * Handle the tick only if it appears the remote CPU is running in full 5728 * dynticks mode. The check is racy by nature, but missing a tick or 5729 * having one too much is no big deal because the scheduler tick updates 5730 * statistics and checks timeslices in a time-independent way, regardless 5731 * of when exactly it is running. 5732 */ 5733 if (tick_nohz_tick_stopped_cpu(cpu)) { 5734 guard(rq_lock_irq)(rq); 5735 struct task_struct *curr = rq->curr; 5736 5737 if (cpu_online(cpu)) { 5738 update_rq_clock(rq); 5739 5740 if (!is_idle_task(curr)) { 5741 /* 5742 * Make sure the next tick runs within a 5743 * reasonable amount of time. 5744 */ 5745 u64 delta = rq_clock_task(rq) - curr->se.exec_start; 5746 WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3); 5747 } 5748 curr->sched_class->task_tick(rq, curr, 0); 5749 5750 calc_load_nohz_remote(rq); 5751 } 5752 } 5753 5754 /* 5755 * Run the remote tick once per second (1Hz). This arbitrary 5756 * frequency is large enough to avoid overload but short enough 5757 * to keep scheduler internal stats reasonably up to date. But 5758 * first update state to reflect hotplug activity if required. 5759 */ 5760 os = atomic_fetch_add_unless(&twork->state, -1, TICK_SCHED_REMOTE_RUNNING); 5761 WARN_ON_ONCE(os == TICK_SCHED_REMOTE_OFFLINE); 5762 if (os == TICK_SCHED_REMOTE_RUNNING) 5763 queue_delayed_work(system_unbound_wq, dwork, HZ); 5764 } 5765 5766 static void sched_tick_start(int cpu) 5767 { 5768 int os; 5769 struct tick_work *twork; 5770 5771 if (housekeeping_cpu(cpu, HK_TYPE_TICK)) 5772 return; 5773 5774 WARN_ON_ONCE(!tick_work_cpu); 5775 5776 twork = per_cpu_ptr(tick_work_cpu, cpu); 5777 os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_RUNNING); 5778 WARN_ON_ONCE(os == TICK_SCHED_REMOTE_RUNNING); 5779 if (os == TICK_SCHED_REMOTE_OFFLINE) { 5780 twork->cpu = cpu; 5781 INIT_DELAYED_WORK(&twork->work, sched_tick_remote); 5782 queue_delayed_work(system_unbound_wq, &twork->work, HZ); 5783 } 5784 } 5785 5786 #ifdef CONFIG_HOTPLUG_CPU 5787 static void sched_tick_stop(int cpu) 5788 { 5789 struct tick_work *twork; 5790 int os; 5791 5792 if (housekeeping_cpu(cpu, HK_TYPE_TICK)) 5793 return; 5794 5795 WARN_ON_ONCE(!tick_work_cpu); 5796 5797 twork = per_cpu_ptr(tick_work_cpu, cpu); 5798 /* There cannot be competing actions, but don't rely on stop-machine. */ 5799 os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_OFFLINING); 5800 WARN_ON_ONCE(os != TICK_SCHED_REMOTE_RUNNING); 5801 /* Don't cancel, as this would mess up the state machine. */ 5802 } 5803 #endif /* CONFIG_HOTPLUG_CPU */ 5804 5805 int __init sched_tick_offload_init(void) 5806 { 5807 tick_work_cpu = alloc_percpu(struct tick_work); 5808 BUG_ON(!tick_work_cpu); 5809 return 0; 5810 } 5811 5812 #else /* !CONFIG_NO_HZ_FULL */ 5813 static inline void sched_tick_start(int cpu) { } 5814 static inline void sched_tick_stop(int cpu) { } 5815 #endif 5816 5817 #if defined(CONFIG_PREEMPTION) && (defined(CONFIG_DEBUG_PREEMPT) || \ 5818 defined(CONFIG_TRACE_PREEMPT_TOGGLE)) 5819 /* 5820 * If the value passed in is equal to the current preempt count 5821 * then we just disabled preemption. Start timing the latency. 5822 */ 5823 static inline void preempt_latency_start(int val) 5824 { 5825 if (preempt_count() == val) { 5826 unsigned long ip = get_lock_parent_ip(); 5827 #ifdef CONFIG_DEBUG_PREEMPT 5828 current->preempt_disable_ip = ip; 5829 #endif 5830 trace_preempt_off(CALLER_ADDR0, ip); 5831 } 5832 } 5833 5834 void preempt_count_add(int val) 5835 { 5836 #ifdef CONFIG_DEBUG_PREEMPT 5837 /* 5838 * Underflow? 5839 */ 5840 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0))) 5841 return; 5842 #endif 5843 __preempt_count_add(val); 5844 #ifdef CONFIG_DEBUG_PREEMPT 5845 /* 5846 * Spinlock count overflowing soon? 5847 */ 5848 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >= 5849 PREEMPT_MASK - 10); 5850 #endif 5851 preempt_latency_start(val); 5852 } 5853 EXPORT_SYMBOL(preempt_count_add); 5854 NOKPROBE_SYMBOL(preempt_count_add); 5855 5856 /* 5857 * If the value passed in equals to the current preempt count 5858 * then we just enabled preemption. Stop timing the latency. 5859 */ 5860 static inline void preempt_latency_stop(int val) 5861 { 5862 if (preempt_count() == val) 5863 trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip()); 5864 } 5865 5866 void preempt_count_sub(int val) 5867 { 5868 #ifdef CONFIG_DEBUG_PREEMPT 5869 /* 5870 * Underflow? 5871 */ 5872 if (DEBUG_LOCKS_WARN_ON(val > preempt_count())) 5873 return; 5874 /* 5875 * Is the spinlock portion underflowing? 5876 */ 5877 if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) && 5878 !(preempt_count() & PREEMPT_MASK))) 5879 return; 5880 #endif 5881 5882 preempt_latency_stop(val); 5883 __preempt_count_sub(val); 5884 } 5885 EXPORT_SYMBOL(preempt_count_sub); 5886 NOKPROBE_SYMBOL(preempt_count_sub); 5887 5888 #else 5889 static inline void preempt_latency_start(int val) { } 5890 static inline void preempt_latency_stop(int val) { } 5891 #endif 5892 5893 static inline unsigned long get_preempt_disable_ip(struct task_struct *p) 5894 { 5895 #ifdef CONFIG_DEBUG_PREEMPT 5896 return p->preempt_disable_ip; 5897 #else 5898 return 0; 5899 #endif 5900 } 5901 5902 /* 5903 * Print scheduling while atomic bug: 5904 */ 5905 static noinline void __schedule_bug(struct task_struct *prev) 5906 { 5907 /* Save this before calling printk(), since that will clobber it */ 5908 unsigned long preempt_disable_ip = get_preempt_disable_ip(current); 5909 5910 if (oops_in_progress) 5911 return; 5912 5913 printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n", 5914 prev->comm, prev->pid, preempt_count()); 5915 5916 debug_show_held_locks(prev); 5917 print_modules(); 5918 if (irqs_disabled()) 5919 print_irqtrace_events(prev); 5920 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT) 5921 && in_atomic_preempt_off()) { 5922 pr_err("Preemption disabled at:"); 5923 print_ip_sym(KERN_ERR, preempt_disable_ip); 5924 } 5925 check_panic_on_warn("scheduling while atomic"); 5926 5927 dump_stack(); 5928 add_taint(TAINT_WARN, LOCKDEP_STILL_OK); 5929 } 5930 5931 /* 5932 * Various schedule()-time debugging checks and statistics: 5933 */ 5934 static inline void schedule_debug(struct task_struct *prev, bool preempt) 5935 { 5936 #ifdef CONFIG_SCHED_STACK_END_CHECK 5937 if (task_stack_end_corrupted(prev)) 5938 panic("corrupted stack end detected inside scheduler\n"); 5939 5940 if (task_scs_end_corrupted(prev)) 5941 panic("corrupted shadow stack detected inside scheduler\n"); 5942 #endif 5943 5944 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP 5945 if (!preempt && READ_ONCE(prev->__state) && prev->non_block_count) { 5946 printk(KERN_ERR "BUG: scheduling in a non-blocking section: %s/%d/%i\n", 5947 prev->comm, prev->pid, prev->non_block_count); 5948 dump_stack(); 5949 add_taint(TAINT_WARN, LOCKDEP_STILL_OK); 5950 } 5951 #endif 5952 5953 if (unlikely(in_atomic_preempt_off())) { 5954 __schedule_bug(prev); 5955 preempt_count_set(PREEMPT_DISABLED); 5956 } 5957 rcu_sleep_check(); 5958 SCHED_WARN_ON(ct_state() == CONTEXT_USER); 5959 5960 profile_hit(SCHED_PROFILING, __builtin_return_address(0)); 5961 5962 schedstat_inc(this_rq()->sched_count); 5963 } 5964 5965 static void put_prev_task_balance(struct rq *rq, struct task_struct *prev, 5966 struct rq_flags *rf) 5967 { 5968 #ifdef CONFIG_SMP 5969 const struct sched_class *class; 5970 /* 5971 * We must do the balancing pass before put_prev_task(), such 5972 * that when we release the rq->lock the task is in the same 5973 * state as before we took rq->lock. 5974 * 5975 * We can terminate the balance pass as soon as we know there is 5976 * a runnable task of @class priority or higher. 5977 */ 5978 for_class_range(class, prev->sched_class, &idle_sched_class) { 5979 if (class->balance(rq, prev, rf)) 5980 break; 5981 } 5982 #endif 5983 5984 put_prev_task(rq, prev); 5985 } 5986 5987 /* 5988 * Pick up the highest-prio task: 5989 */ 5990 static inline struct task_struct * 5991 __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) 5992 { 5993 const struct sched_class *class; 5994 struct task_struct *p; 5995 5996 /* 5997 * Optimization: we know that if all tasks are in the fair class we can 5998 * call that function directly, but only if the @prev task wasn't of a 5999 * higher scheduling class, because otherwise those lose the 6000 * opportunity to pull in more work from other CPUs. 6001 */ 6002 if (likely(!sched_class_above(prev->sched_class, &fair_sched_class) && 6003 rq->nr_running == rq->cfs.h_nr_running)) { 6004 6005 p = pick_next_task_fair(rq, prev, rf); 6006 if (unlikely(p == RETRY_TASK)) 6007 goto restart; 6008 6009 /* Assume the next prioritized class is idle_sched_class */ 6010 if (!p) { 6011 put_prev_task(rq, prev); 6012 p = pick_next_task_idle(rq); 6013 } 6014 6015 return p; 6016 } 6017 6018 restart: 6019 put_prev_task_balance(rq, prev, rf); 6020 6021 for_each_class(class) { 6022 p = class->pick_next_task(rq); 6023 if (p) 6024 return p; 6025 } 6026 6027 BUG(); /* The idle class should always have a runnable task. */ 6028 } 6029 6030 #ifdef CONFIG_SCHED_CORE 6031 static inline bool is_task_rq_idle(struct task_struct *t) 6032 { 6033 return (task_rq(t)->idle == t); 6034 } 6035 6036 static inline bool cookie_equals(struct task_struct *a, unsigned long cookie) 6037 { 6038 return is_task_rq_idle(a) || (a->core_cookie == cookie); 6039 } 6040 6041 static inline bool cookie_match(struct task_struct *a, struct task_struct *b) 6042 { 6043 if (is_task_rq_idle(a) || is_task_rq_idle(b)) 6044 return true; 6045 6046 return a->core_cookie == b->core_cookie; 6047 } 6048 6049 static inline struct task_struct *pick_task(struct rq *rq) 6050 { 6051 const struct sched_class *class; 6052 struct task_struct *p; 6053 6054 for_each_class(class) { 6055 p = class->pick_task(rq); 6056 if (p) 6057 return p; 6058 } 6059 6060 BUG(); /* The idle class should always have a runnable task. */ 6061 } 6062 6063 extern void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi); 6064 6065 static void queue_core_balance(struct rq *rq); 6066 6067 static struct task_struct * 6068 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) 6069 { 6070 struct task_struct *next, *p, *max = NULL; 6071 const struct cpumask *smt_mask; 6072 bool fi_before = false; 6073 bool core_clock_updated = (rq == rq->core); 6074 unsigned long cookie; 6075 int i, cpu, occ = 0; 6076 struct rq *rq_i; 6077 bool need_sync; 6078 6079 if (!sched_core_enabled(rq)) 6080 return __pick_next_task(rq, prev, rf); 6081 6082 cpu = cpu_of(rq); 6083 6084 /* Stopper task is switching into idle, no need core-wide selection. */ 6085 if (cpu_is_offline(cpu)) { 6086 /* 6087 * Reset core_pick so that we don't enter the fastpath when 6088 * coming online. core_pick would already be migrated to 6089 * another cpu during offline. 6090 */ 6091 rq->core_pick = NULL; 6092 return __pick_next_task(rq, prev, rf); 6093 } 6094 6095 /* 6096 * If there were no {en,de}queues since we picked (IOW, the task 6097 * pointers are all still valid), and we haven't scheduled the last 6098 * pick yet, do so now. 6099 * 6100 * rq->core_pick can be NULL if no selection was made for a CPU because 6101 * it was either offline or went offline during a sibling's core-wide 6102 * selection. In this case, do a core-wide selection. 6103 */ 6104 if (rq->core->core_pick_seq == rq->core->core_task_seq && 6105 rq->core->core_pick_seq != rq->core_sched_seq && 6106 rq->core_pick) { 6107 WRITE_ONCE(rq->core_sched_seq, rq->core->core_pick_seq); 6108 6109 next = rq->core_pick; 6110 if (next != prev) { 6111 put_prev_task(rq, prev); 6112 set_next_task(rq, next); 6113 } 6114 6115 rq->core_pick = NULL; 6116 goto out; 6117 } 6118 6119 put_prev_task_balance(rq, prev, rf); 6120 6121 smt_mask = cpu_smt_mask(cpu); 6122 need_sync = !!rq->core->core_cookie; 6123 6124 /* reset state */ 6125 rq->core->core_cookie = 0UL; 6126 if (rq->core->core_forceidle_count) { 6127 if (!core_clock_updated) { 6128 update_rq_clock(rq->core); 6129 core_clock_updated = true; 6130 } 6131 sched_core_account_forceidle(rq); 6132 /* reset after accounting force idle */ 6133 rq->core->core_forceidle_start = 0; 6134 rq->core->core_forceidle_count = 0; 6135 rq->core->core_forceidle_occupation = 0; 6136 need_sync = true; 6137 fi_before = true; 6138 } 6139 6140 /* 6141 * core->core_task_seq, core->core_pick_seq, rq->core_sched_seq 6142 * 6143 * @task_seq guards the task state ({en,de}queues) 6144 * @pick_seq is the @task_seq we did a selection on 6145 * @sched_seq is the @pick_seq we scheduled 6146 * 6147 * However, preemptions can cause multiple picks on the same task set. 6148 * 'Fix' this by also increasing @task_seq for every pick. 6149 */ 6150 rq->core->core_task_seq++; 6151 6152 /* 6153 * Optimize for common case where this CPU has no cookies 6154 * and there are no cookied tasks running on siblings. 6155 */ 6156 if (!need_sync) { 6157 next = pick_task(rq); 6158 if (!next->core_cookie) { 6159 rq->core_pick = NULL; 6160 /* 6161 * For robustness, update the min_vruntime_fi for 6162 * unconstrained picks as well. 6163 */ 6164 WARN_ON_ONCE(fi_before); 6165 task_vruntime_update(rq, next, false); 6166 goto out_set_next; 6167 } 6168 } 6169 6170 /* 6171 * For each thread: do the regular task pick and find the max prio task 6172 * amongst them. 6173 * 6174 * Tie-break prio towards the current CPU 6175 */ 6176 for_each_cpu_wrap(i, smt_mask, cpu) { 6177 rq_i = cpu_rq(i); 6178 6179 /* 6180 * Current cpu always has its clock updated on entrance to 6181 * pick_next_task(). If the current cpu is not the core, 6182 * the core may also have been updated above. 6183 */ 6184 if (i != cpu && (rq_i != rq->core || !core_clock_updated)) 6185 update_rq_clock(rq_i); 6186 6187 p = rq_i->core_pick = pick_task(rq_i); 6188 if (!max || prio_less(max, p, fi_before)) 6189 max = p; 6190 } 6191 6192 cookie = rq->core->core_cookie = max->core_cookie; 6193 6194 /* 6195 * For each thread: try and find a runnable task that matches @max or 6196 * force idle. 6197 */ 6198 for_each_cpu(i, smt_mask) { 6199 rq_i = cpu_rq(i); 6200 p = rq_i->core_pick; 6201 6202 if (!cookie_equals(p, cookie)) { 6203 p = NULL; 6204 if (cookie) 6205 p = sched_core_find(rq_i, cookie); 6206 if (!p) 6207 p = idle_sched_class.pick_task(rq_i); 6208 } 6209 6210 rq_i->core_pick = p; 6211 6212 if (p == rq_i->idle) { 6213 if (rq_i->nr_running) { 6214 rq->core->core_forceidle_count++; 6215 if (!fi_before) 6216 rq->core->core_forceidle_seq++; 6217 } 6218 } else { 6219 occ++; 6220 } 6221 } 6222 6223 if (schedstat_enabled() && rq->core->core_forceidle_count) { 6224 rq->core->core_forceidle_start = rq_clock(rq->core); 6225 rq->core->core_forceidle_occupation = occ; 6226 } 6227 6228 rq->core->core_pick_seq = rq->core->core_task_seq; 6229 next = rq->core_pick; 6230 rq->core_sched_seq = rq->core->core_pick_seq; 6231 6232 /* Something should have been selected for current CPU */ 6233 WARN_ON_ONCE(!next); 6234 6235 /* 6236 * Reschedule siblings 6237 * 6238 * NOTE: L1TF -- at this point we're no longer running the old task and 6239 * sending an IPI (below) ensures the sibling will no longer be running 6240 * their task. This ensures there is no inter-sibling overlap between 6241 * non-matching user state. 6242 */ 6243 for_each_cpu(i, smt_mask) { 6244 rq_i = cpu_rq(i); 6245 6246 /* 6247 * An online sibling might have gone offline before a task 6248 * could be picked for it, or it might be offline but later 6249 * happen to come online, but its too late and nothing was 6250 * picked for it. That's Ok - it will pick tasks for itself, 6251 * so ignore it. 6252 */ 6253 if (!rq_i->core_pick) 6254 continue; 6255 6256 /* 6257 * Update for new !FI->FI transitions, or if continuing to be in !FI: 6258 * fi_before fi update? 6259 * 0 0 1 6260 * 0 1 1 6261 * 1 0 1 6262 * 1 1 0 6263 */ 6264 if (!(fi_before && rq->core->core_forceidle_count)) 6265 task_vruntime_update(rq_i, rq_i->core_pick, !!rq->core->core_forceidle_count); 6266 6267 rq_i->core_pick->core_occupation = occ; 6268 6269 if (i == cpu) { 6270 rq_i->core_pick = NULL; 6271 continue; 6272 } 6273 6274 /* Did we break L1TF mitigation requirements? */ 6275 WARN_ON_ONCE(!cookie_match(next, rq_i->core_pick)); 6276 6277 if (rq_i->curr == rq_i->core_pick) { 6278 rq_i->core_pick = NULL; 6279 continue; 6280 } 6281 6282 resched_curr(rq_i); 6283 } 6284 6285 out_set_next: 6286 set_next_task(rq, next); 6287 out: 6288 if (rq->core->core_forceidle_count && next == rq->idle) 6289 queue_core_balance(rq); 6290 6291 return next; 6292 } 6293 6294 static bool try_steal_cookie(int this, int that) 6295 { 6296 struct rq *dst = cpu_rq(this), *src = cpu_rq(that); 6297 struct task_struct *p; 6298 unsigned long cookie; 6299 bool success = false; 6300 6301 guard(irq)(); 6302 guard(double_rq_lock)(dst, src); 6303 6304 cookie = dst->core->core_cookie; 6305 if (!cookie) 6306 return false; 6307 6308 if (dst->curr != dst->idle) 6309 return false; 6310 6311 p = sched_core_find(src, cookie); 6312 if (!p) 6313 return false; 6314 6315 do { 6316 if (p == src->core_pick || p == src->curr) 6317 goto next; 6318 6319 if (!is_cpu_allowed(p, this)) 6320 goto next; 6321 6322 if (p->core_occupation > dst->idle->core_occupation) 6323 goto next; 6324 /* 6325 * sched_core_find() and sched_core_next() will ensure 6326 * that task @p is not throttled now, we also need to 6327 * check whether the runqueue of the destination CPU is 6328 * being throttled. 6329 */ 6330 if (sched_task_is_throttled(p, this)) 6331 goto next; 6332 6333 deactivate_task(src, p, 0); 6334 set_task_cpu(p, this); 6335 activate_task(dst, p, 0); 6336 6337 resched_curr(dst); 6338 6339 success = true; 6340 break; 6341 6342 next: 6343 p = sched_core_next(p, cookie); 6344 } while (p); 6345 6346 return success; 6347 } 6348 6349 static bool steal_cookie_task(int cpu, struct sched_domain *sd) 6350 { 6351 int i; 6352 6353 for_each_cpu_wrap(i, sched_domain_span(sd), cpu + 1) { 6354 if (i == cpu) 6355 continue; 6356 6357 if (need_resched()) 6358 break; 6359 6360 if (try_steal_cookie(cpu, i)) 6361 return true; 6362 } 6363 6364 return false; 6365 } 6366 6367 static void sched_core_balance(struct rq *rq) 6368 { 6369 struct sched_domain *sd; 6370 int cpu = cpu_of(rq); 6371 6372 preempt_disable(); 6373 rcu_read_lock(); 6374 raw_spin_rq_unlock_irq(rq); 6375 for_each_domain(cpu, sd) { 6376 if (need_resched()) 6377 break; 6378 6379 if (steal_cookie_task(cpu, sd)) 6380 break; 6381 } 6382 raw_spin_rq_lock_irq(rq); 6383 rcu_read_unlock(); 6384 preempt_enable(); 6385 } 6386 6387 static DEFINE_PER_CPU(struct balance_callback, core_balance_head); 6388 6389 static void queue_core_balance(struct rq *rq) 6390 { 6391 if (!sched_core_enabled(rq)) 6392 return; 6393 6394 if (!rq->core->core_cookie) 6395 return; 6396 6397 if (!rq->nr_running) /* not forced idle */ 6398 return; 6399 6400 queue_balance_callback(rq, &per_cpu(core_balance_head, rq->cpu), sched_core_balance); 6401 } 6402 6403 DEFINE_LOCK_GUARD_1(core_lock, int, 6404 sched_core_lock(*_T->lock, &_T->flags), 6405 sched_core_unlock(*_T->lock, &_T->flags), 6406 unsigned long flags) 6407 6408 static void sched_core_cpu_starting(unsigned int cpu) 6409 { 6410 const struct cpumask *smt_mask = cpu_smt_mask(cpu); 6411 struct rq *rq = cpu_rq(cpu), *core_rq = NULL; 6412 int t; 6413 6414 guard(core_lock)(&cpu); 6415 6416 WARN_ON_ONCE(rq->core != rq); 6417 6418 /* if we're the first, we'll be our own leader */ 6419 if (cpumask_weight(smt_mask) == 1) 6420 return; 6421 6422 /* find the leader */ 6423 for_each_cpu(t, smt_mask) { 6424 if (t == cpu) 6425 continue; 6426 rq = cpu_rq(t); 6427 if (rq->core == rq) { 6428 core_rq = rq; 6429 break; 6430 } 6431 } 6432 6433 if (WARN_ON_ONCE(!core_rq)) /* whoopsie */ 6434 return; 6435 6436 /* install and validate core_rq */ 6437 for_each_cpu(t, smt_mask) { 6438 rq = cpu_rq(t); 6439 6440 if (t == cpu) 6441 rq->core = core_rq; 6442 6443 WARN_ON_ONCE(rq->core != core_rq); 6444 } 6445 } 6446 6447 static void sched_core_cpu_deactivate(unsigned int cpu) 6448 { 6449 const struct cpumask *smt_mask = cpu_smt_mask(cpu); 6450 struct rq *rq = cpu_rq(cpu), *core_rq = NULL; 6451 int t; 6452 6453 guard(core_lock)(&cpu); 6454 6455 /* if we're the last man standing, nothing to do */ 6456 if (cpumask_weight(smt_mask) == 1) { 6457 WARN_ON_ONCE(rq->core != rq); 6458 return; 6459 } 6460 6461 /* if we're not the leader, nothing to do */ 6462 if (rq->core != rq) 6463 return; 6464 6465 /* find a new leader */ 6466 for_each_cpu(t, smt_mask) { 6467 if (t == cpu) 6468 continue; 6469 core_rq = cpu_rq(t); 6470 break; 6471 } 6472 6473 if (WARN_ON_ONCE(!core_rq)) /* impossible */ 6474 return; 6475 6476 /* copy the shared state to the new leader */ 6477 core_rq->core_task_seq = rq->core_task_seq; 6478 core_rq->core_pick_seq = rq->core_pick_seq; 6479 core_rq->core_cookie = rq->core_cookie; 6480 core_rq->core_forceidle_count = rq->core_forceidle_count; 6481 core_rq->core_forceidle_seq = rq->core_forceidle_seq; 6482 core_rq->core_forceidle_occupation = rq->core_forceidle_occupation; 6483 6484 /* 6485 * Accounting edge for forced idle is handled in pick_next_task(). 6486 * Don't need another one here, since the hotplug thread shouldn't 6487 * have a cookie. 6488 */ 6489 core_rq->core_forceidle_start = 0; 6490 6491 /* install new leader */ 6492 for_each_cpu(t, smt_mask) { 6493 rq = cpu_rq(t); 6494 rq->core = core_rq; 6495 } 6496 } 6497 6498 static inline void sched_core_cpu_dying(unsigned int cpu) 6499 { 6500 struct rq *rq = cpu_rq(cpu); 6501 6502 if (rq->core != rq) 6503 rq->core = rq; 6504 } 6505 6506 #else /* !CONFIG_SCHED_CORE */ 6507 6508 static inline void sched_core_cpu_starting(unsigned int cpu) {} 6509 static inline void sched_core_cpu_deactivate(unsigned int cpu) {} 6510 static inline void sched_core_cpu_dying(unsigned int cpu) {} 6511 6512 static struct task_struct * 6513 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) 6514 { 6515 return __pick_next_task(rq, prev, rf); 6516 } 6517 6518 #endif /* CONFIG_SCHED_CORE */ 6519 6520 /* 6521 * Constants for the sched_mode argument of __schedule(). 6522 * 6523 * The mode argument allows RT enabled kernels to differentiate a 6524 * preemption from blocking on an 'sleeping' spin/rwlock. Note that 6525 * SM_MASK_PREEMPT for !RT has all bits set, which allows the compiler to 6526 * optimize the AND operation out and just check for zero. 6527 */ 6528 #define SM_NONE 0x0 6529 #define SM_PREEMPT 0x1 6530 #define SM_RTLOCK_WAIT 0x2 6531 6532 #ifndef CONFIG_PREEMPT_RT 6533 # define SM_MASK_PREEMPT (~0U) 6534 #else 6535 # define SM_MASK_PREEMPT SM_PREEMPT 6536 #endif 6537 6538 /* 6539 * __schedule() is the main scheduler function. 6540 * 6541 * The main means of driving the scheduler and thus entering this function are: 6542 * 6543 * 1. Explicit blocking: mutex, semaphore, waitqueue, etc. 6544 * 6545 * 2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return 6546 * paths. For example, see arch/x86/entry_64.S. 6547 * 6548 * To drive preemption between tasks, the scheduler sets the flag in timer 6549 * interrupt handler scheduler_tick(). 6550 * 6551 * 3. Wakeups don't really cause entry into schedule(). They add a 6552 * task to the run-queue and that's it. 6553 * 6554 * Now, if the new task added to the run-queue preempts the current 6555 * task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets 6556 * called on the nearest possible occasion: 6557 * 6558 * - If the kernel is preemptible (CONFIG_PREEMPTION=y): 6559 * 6560 * - in syscall or exception context, at the next outmost 6561 * preempt_enable(). (this might be as soon as the wake_up()'s 6562 * spin_unlock()!) 6563 * 6564 * - in IRQ context, return from interrupt-handler to 6565 * preemptible context 6566 * 6567 * - If the kernel is not preemptible (CONFIG_PREEMPTION is not set) 6568 * then at the next: 6569 * 6570 * - cond_resched() call 6571 * - explicit schedule() call 6572 * - return from syscall or exception to user-space 6573 * - return from interrupt-handler to user-space 6574 * 6575 * WARNING: must be called with preemption disabled! 6576 */ 6577 static void __sched notrace __schedule(unsigned int sched_mode) 6578 { 6579 struct task_struct *prev, *next; 6580 unsigned long *switch_count; 6581 unsigned long prev_state; 6582 struct rq_flags rf; 6583 struct rq *rq; 6584 int cpu; 6585 6586 cpu = smp_processor_id(); 6587 rq = cpu_rq(cpu); 6588 prev = rq->curr; 6589 6590 schedule_debug(prev, !!sched_mode); 6591 6592 if (sched_feat(HRTICK) || sched_feat(HRTICK_DL)) 6593 hrtick_clear(rq); 6594 6595 local_irq_disable(); 6596 rcu_note_context_switch(!!sched_mode); 6597 6598 /* 6599 * Make sure that signal_pending_state()->signal_pending() below 6600 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE) 6601 * done by the caller to avoid the race with signal_wake_up(): 6602 * 6603 * __set_current_state(@state) signal_wake_up() 6604 * schedule() set_tsk_thread_flag(p, TIF_SIGPENDING) 6605 * wake_up_state(p, state) 6606 * LOCK rq->lock LOCK p->pi_state 6607 * smp_mb__after_spinlock() smp_mb__after_spinlock() 6608 * if (signal_pending_state()) if (p->state & @state) 6609 * 6610 * Also, the membarrier system call requires a full memory barrier 6611 * after coming from user-space, before storing to rq->curr. 6612 */ 6613 rq_lock(rq, &rf); 6614 smp_mb__after_spinlock(); 6615 6616 /* Promote REQ to ACT */ 6617 rq->clock_update_flags <<= 1; 6618 update_rq_clock(rq); 6619 rq->clock_update_flags = RQCF_UPDATED; 6620 6621 switch_count = &prev->nivcsw; 6622 6623 /* 6624 * We must load prev->state once (task_struct::state is volatile), such 6625 * that we form a control dependency vs deactivate_task() below. 6626 */ 6627 prev_state = READ_ONCE(prev->__state); 6628 if (!(sched_mode & SM_MASK_PREEMPT) && prev_state) { 6629 if (signal_pending_state(prev_state, prev)) { 6630 WRITE_ONCE(prev->__state, TASK_RUNNING); 6631 } else { 6632 prev->sched_contributes_to_load = 6633 (prev_state & TASK_UNINTERRUPTIBLE) && 6634 !(prev_state & TASK_NOLOAD) && 6635 !(prev_state & TASK_FROZEN); 6636 6637 if (prev->sched_contributes_to_load) 6638 rq->nr_uninterruptible++; 6639 6640 /* 6641 * __schedule() ttwu() 6642 * prev_state = prev->state; if (p->on_rq && ...) 6643 * if (prev_state) goto out; 6644 * p->on_rq = 0; smp_acquire__after_ctrl_dep(); 6645 * p->state = TASK_WAKING 6646 * 6647 * Where __schedule() and ttwu() have matching control dependencies. 6648 * 6649 * After this, schedule() must not care about p->state any more. 6650 */ 6651 deactivate_task(rq, prev, DEQUEUE_SLEEP | DEQUEUE_NOCLOCK); 6652 6653 if (prev->in_iowait) { 6654 atomic_inc(&rq->nr_iowait); 6655 delayacct_blkio_start(); 6656 } 6657 } 6658 switch_count = &prev->nvcsw; 6659 } 6660 6661 next = pick_next_task(rq, prev, &rf); 6662 clear_tsk_need_resched(prev); 6663 clear_preempt_need_resched(); 6664 #ifdef CONFIG_SCHED_DEBUG 6665 rq->last_seen_need_resched_ns = 0; 6666 #endif 6667 6668 if (likely(prev != next)) { 6669 rq->nr_switches++; 6670 /* 6671 * RCU users of rcu_dereference(rq->curr) may not see 6672 * changes to task_struct made by pick_next_task(). 6673 */ 6674 RCU_INIT_POINTER(rq->curr, next); 6675 /* 6676 * The membarrier system call requires each architecture 6677 * to have a full memory barrier after updating 6678 * rq->curr, before returning to user-space. 6679 * 6680 * Here are the schemes providing that barrier on the 6681 * various architectures: 6682 * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC, 6683 * RISC-V. switch_mm() relies on membarrier_arch_switch_mm() 6684 * on PowerPC and on RISC-V. 6685 * - finish_lock_switch() for weakly-ordered 6686 * architectures where spin_unlock is a full barrier, 6687 * - switch_to() for arm64 (weakly-ordered, spin_unlock 6688 * is a RELEASE barrier), 6689 */ 6690 ++*switch_count; 6691 6692 migrate_disable_switch(rq, prev); 6693 psi_account_irqtime(rq, prev, next); 6694 psi_sched_switch(prev, next, !task_on_rq_queued(prev)); 6695 6696 trace_sched_switch(sched_mode & SM_MASK_PREEMPT, prev, next, prev_state); 6697 6698 /* Also unlocks the rq: */ 6699 rq = context_switch(rq, prev, next, &rf); 6700 } else { 6701 rq_unpin_lock(rq, &rf); 6702 __balance_callbacks(rq); 6703 raw_spin_rq_unlock_irq(rq); 6704 } 6705 } 6706 6707 void __noreturn do_task_dead(void) 6708 { 6709 /* Causes final put_task_struct in finish_task_switch(): */ 6710 set_special_state(TASK_DEAD); 6711 6712 /* Tell freezer to ignore us: */ 6713 current->flags |= PF_NOFREEZE; 6714 6715 __schedule(SM_NONE); 6716 BUG(); 6717 6718 /* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */ 6719 for (;;) 6720 cpu_relax(); 6721 } 6722 6723 static inline void sched_submit_work(struct task_struct *tsk) 6724 { 6725 unsigned int task_flags; 6726 6727 if (task_is_running(tsk)) 6728 return; 6729 6730 task_flags = tsk->flags; 6731 /* 6732 * If a worker goes to sleep, notify and ask workqueue whether it 6733 * wants to wake up a task to maintain concurrency. 6734 */ 6735 if (task_flags & (PF_WQ_WORKER | PF_IO_WORKER)) { 6736 if (task_flags & PF_WQ_WORKER) 6737 wq_worker_sleeping(tsk); 6738 else 6739 io_wq_worker_sleeping(tsk); 6740 } 6741 6742 /* 6743 * spinlock and rwlock must not flush block requests. This will 6744 * deadlock if the callback attempts to acquire a lock which is 6745 * already acquired. 6746 */ 6747 SCHED_WARN_ON(current->__state & TASK_RTLOCK_WAIT); 6748 6749 /* 6750 * If we are going to sleep and we have plugged IO queued, 6751 * make sure to submit it to avoid deadlocks. 6752 */ 6753 blk_flush_plug(tsk->plug, true); 6754 } 6755 6756 static void sched_update_worker(struct task_struct *tsk) 6757 { 6758 if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER)) { 6759 if (tsk->flags & PF_WQ_WORKER) 6760 wq_worker_running(tsk); 6761 else 6762 io_wq_worker_running(tsk); 6763 } 6764 } 6765 6766 asmlinkage __visible void __sched schedule(void) 6767 { 6768 struct task_struct *tsk = current; 6769 6770 sched_submit_work(tsk); 6771 do { 6772 preempt_disable(); 6773 __schedule(SM_NONE); 6774 sched_preempt_enable_no_resched(); 6775 } while (need_resched()); 6776 sched_update_worker(tsk); 6777 } 6778 EXPORT_SYMBOL(schedule); 6779 6780 /* 6781 * synchronize_rcu_tasks() makes sure that no task is stuck in preempted 6782 * state (have scheduled out non-voluntarily) by making sure that all 6783 * tasks have either left the run queue or have gone into user space. 6784 * As idle tasks do not do either, they must not ever be preempted 6785 * (schedule out non-voluntarily). 6786 * 6787 * schedule_idle() is similar to schedule_preempt_disable() except that it 6788 * never enables preemption because it does not call sched_submit_work(). 6789 */ 6790 void __sched schedule_idle(void) 6791 { 6792 /* 6793 * As this skips calling sched_submit_work(), which the idle task does 6794 * regardless because that function is a nop when the task is in a 6795 * TASK_RUNNING state, make sure this isn't used someplace that the 6796 * current task can be in any other state. Note, idle is always in the 6797 * TASK_RUNNING state. 6798 */ 6799 WARN_ON_ONCE(current->__state); 6800 do { 6801 __schedule(SM_NONE); 6802 } while (need_resched()); 6803 } 6804 6805 #if defined(CONFIG_CONTEXT_TRACKING_USER) && !defined(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK) 6806 asmlinkage __visible void __sched schedule_user(void) 6807 { 6808 /* 6809 * If we come here after a random call to set_need_resched(), 6810 * or we have been woken up remotely but the IPI has not yet arrived, 6811 * we haven't yet exited the RCU idle mode. Do it here manually until 6812 * we find a better solution. 6813 * 6814 * NB: There are buggy callers of this function. Ideally we 6815 * should warn if prev_state != CONTEXT_USER, but that will trigger 6816 * too frequently to make sense yet. 6817 */ 6818 enum ctx_state prev_state = exception_enter(); 6819 schedule(); 6820 exception_exit(prev_state); 6821 } 6822 #endif 6823 6824 /** 6825 * schedule_preempt_disabled - called with preemption disabled 6826 * 6827 * Returns with preemption disabled. Note: preempt_count must be 1 6828 */ 6829 void __sched schedule_preempt_disabled(void) 6830 { 6831 sched_preempt_enable_no_resched(); 6832 schedule(); 6833 preempt_disable(); 6834 } 6835 6836 #ifdef CONFIG_PREEMPT_RT 6837 void __sched notrace schedule_rtlock(void) 6838 { 6839 do { 6840 preempt_disable(); 6841 __schedule(SM_RTLOCK_WAIT); 6842 sched_preempt_enable_no_resched(); 6843 } while (need_resched()); 6844 } 6845 NOKPROBE_SYMBOL(schedule_rtlock); 6846 #endif 6847 6848 static void __sched notrace preempt_schedule_common(void) 6849 { 6850 do { 6851 /* 6852 * Because the function tracer can trace preempt_count_sub() 6853 * and it also uses preempt_enable/disable_notrace(), if 6854 * NEED_RESCHED is set, the preempt_enable_notrace() called 6855 * by the function tracer will call this function again and 6856 * cause infinite recursion. 6857 * 6858 * Preemption must be disabled here before the function 6859 * tracer can trace. Break up preempt_disable() into two 6860 * calls. One to disable preemption without fear of being 6861 * traced. The other to still record the preemption latency, 6862 * which can also be traced by the function tracer. 6863 */ 6864 preempt_disable_notrace(); 6865 preempt_latency_start(1); 6866 __schedule(SM_PREEMPT); 6867 preempt_latency_stop(1); 6868 preempt_enable_no_resched_notrace(); 6869 6870 /* 6871 * Check again in case we missed a preemption opportunity 6872 * between schedule and now. 6873 */ 6874 } while (need_resched()); 6875 } 6876 6877 #ifdef CONFIG_PREEMPTION 6878 /* 6879 * This is the entry point to schedule() from in-kernel preemption 6880 * off of preempt_enable. 6881 */ 6882 asmlinkage __visible void __sched notrace preempt_schedule(void) 6883 { 6884 /* 6885 * If there is a non-zero preempt_count or interrupts are disabled, 6886 * we do not want to preempt the current task. Just return.. 6887 */ 6888 if (likely(!preemptible())) 6889 return; 6890 preempt_schedule_common(); 6891 } 6892 NOKPROBE_SYMBOL(preempt_schedule); 6893 EXPORT_SYMBOL(preempt_schedule); 6894 6895 #ifdef CONFIG_PREEMPT_DYNAMIC 6896 #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL) 6897 #ifndef preempt_schedule_dynamic_enabled 6898 #define preempt_schedule_dynamic_enabled preempt_schedule 6899 #define preempt_schedule_dynamic_disabled NULL 6900 #endif 6901 DEFINE_STATIC_CALL(preempt_schedule, preempt_schedule_dynamic_enabled); 6902 EXPORT_STATIC_CALL_TRAMP(preempt_schedule); 6903 #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY) 6904 static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule); 6905 void __sched notrace dynamic_preempt_schedule(void) 6906 { 6907 if (!static_branch_unlikely(&sk_dynamic_preempt_schedule)) 6908 return; 6909 preempt_schedule(); 6910 } 6911 NOKPROBE_SYMBOL(dynamic_preempt_schedule); 6912 EXPORT_SYMBOL(dynamic_preempt_schedule); 6913 #endif 6914 #endif 6915 6916 /** 6917 * preempt_schedule_notrace - preempt_schedule called by tracing 6918 * 6919 * The tracing infrastructure uses preempt_enable_notrace to prevent 6920 * recursion and tracing preempt enabling caused by the tracing 6921 * infrastructure itself. But as tracing can happen in areas coming 6922 * from userspace or just about to enter userspace, a preempt enable 6923 * can occur before user_exit() is called. This will cause the scheduler 6924 * to be called when the system is still in usermode. 6925 * 6926 * To prevent this, the preempt_enable_notrace will use this function 6927 * instead of preempt_schedule() to exit user context if needed before 6928 * calling the scheduler. 6929 */ 6930 asmlinkage __visible void __sched notrace preempt_schedule_notrace(void) 6931 { 6932 enum ctx_state prev_ctx; 6933 6934 if (likely(!preemptible())) 6935 return; 6936 6937 do { 6938 /* 6939 * Because the function tracer can trace preempt_count_sub() 6940 * and it also uses preempt_enable/disable_notrace(), if 6941 * NEED_RESCHED is set, the preempt_enable_notrace() called 6942 * by the function tracer will call this function again and 6943 * cause infinite recursion. 6944 * 6945 * Preemption must be disabled here before the function 6946 * tracer can trace. Break up preempt_disable() into two 6947 * calls. One to disable preemption without fear of being 6948 * traced. The other to still record the preemption latency, 6949 * which can also be traced by the function tracer. 6950 */ 6951 preempt_disable_notrace(); 6952 preempt_latency_start(1); 6953 /* 6954 * Needs preempt disabled in case user_exit() is traced 6955 * and the tracer calls preempt_enable_notrace() causing 6956 * an infinite recursion. 6957 */ 6958 prev_ctx = exception_enter(); 6959 __schedule(SM_PREEMPT); 6960 exception_exit(prev_ctx); 6961 6962 preempt_latency_stop(1); 6963 preempt_enable_no_resched_notrace(); 6964 } while (need_resched()); 6965 } 6966 EXPORT_SYMBOL_GPL(preempt_schedule_notrace); 6967 6968 #ifdef CONFIG_PREEMPT_DYNAMIC 6969 #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL) 6970 #ifndef preempt_schedule_notrace_dynamic_enabled 6971 #define preempt_schedule_notrace_dynamic_enabled preempt_schedule_notrace 6972 #define preempt_schedule_notrace_dynamic_disabled NULL 6973 #endif 6974 DEFINE_STATIC_CALL(preempt_schedule_notrace, preempt_schedule_notrace_dynamic_enabled); 6975 EXPORT_STATIC_CALL_TRAMP(preempt_schedule_notrace); 6976 #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY) 6977 static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule_notrace); 6978 void __sched notrace dynamic_preempt_schedule_notrace(void) 6979 { 6980 if (!static_branch_unlikely(&sk_dynamic_preempt_schedule_notrace)) 6981 return; 6982 preempt_schedule_notrace(); 6983 } 6984 NOKPROBE_SYMBOL(dynamic_preempt_schedule_notrace); 6985 EXPORT_SYMBOL(dynamic_preempt_schedule_notrace); 6986 #endif 6987 #endif 6988 6989 #endif /* CONFIG_PREEMPTION */ 6990 6991 /* 6992 * This is the entry point to schedule() from kernel preemption 6993 * off of irq context. 6994 * Note, that this is called and return with irqs disabled. This will 6995 * protect us against recursive calling from irq. 6996 */ 6997 asmlinkage __visible void __sched preempt_schedule_irq(void) 6998 { 6999 enum ctx_state prev_state; 7000 7001 /* Catch callers which need to be fixed */ 7002 BUG_ON(preempt_count() || !irqs_disabled()); 7003 7004 prev_state = exception_enter(); 7005 7006 do { 7007 preempt_disable(); 7008 local_irq_enable(); 7009 __schedule(SM_PREEMPT); 7010 local_irq_disable(); 7011 sched_preempt_enable_no_resched(); 7012 } while (need_resched()); 7013 7014 exception_exit(prev_state); 7015 } 7016 7017 int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags, 7018 void *key) 7019 { 7020 WARN_ON_ONCE(IS_ENABLED(CONFIG_SCHED_DEBUG) && wake_flags & ~(WF_SYNC|WF_CURRENT_CPU)); 7021 return try_to_wake_up(curr->private, mode, wake_flags); 7022 } 7023 EXPORT_SYMBOL(default_wake_function); 7024 7025 static void __setscheduler_prio(struct task_struct *p, int prio) 7026 { 7027 if (dl_prio(prio)) 7028 p->sched_class = &dl_sched_class; 7029 else if (rt_prio(prio)) 7030 p->sched_class = &rt_sched_class; 7031 else 7032 p->sched_class = &fair_sched_class; 7033 7034 p->prio = prio; 7035 } 7036 7037 #ifdef CONFIG_RT_MUTEXES 7038 7039 static inline int __rt_effective_prio(struct task_struct *pi_task, int prio) 7040 { 7041 if (pi_task) 7042 prio = min(prio, pi_task->prio); 7043 7044 return prio; 7045 } 7046 7047 static inline int rt_effective_prio(struct task_struct *p, int prio) 7048 { 7049 struct task_struct *pi_task = rt_mutex_get_top_task(p); 7050 7051 return __rt_effective_prio(pi_task, prio); 7052 } 7053 7054 /* 7055 * rt_mutex_setprio - set the current priority of a task 7056 * @p: task to boost 7057 * @pi_task: donor task 7058 * 7059 * This function changes the 'effective' priority of a task. It does 7060 * not touch ->normal_prio like __setscheduler(). 7061 * 7062 * Used by the rt_mutex code to implement priority inheritance 7063 * logic. Call site only calls if the priority of the task changed. 7064 */ 7065 void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task) 7066 { 7067 int prio, oldprio, queued, running, queue_flag = 7068 DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; 7069 const struct sched_class *prev_class; 7070 struct rq_flags rf; 7071 struct rq *rq; 7072 7073 /* XXX used to be waiter->prio, not waiter->task->prio */ 7074 prio = __rt_effective_prio(pi_task, p->normal_prio); 7075 7076 /* 7077 * If nothing changed; bail early. 7078 */ 7079 if (p->pi_top_task == pi_task && prio == p->prio && !dl_prio(prio)) 7080 return; 7081 7082 rq = __task_rq_lock(p, &rf); 7083 update_rq_clock(rq); 7084 /* 7085 * Set under pi_lock && rq->lock, such that the value can be used under 7086 * either lock. 7087 * 7088 * Note that there is loads of tricky to make this pointer cache work 7089 * right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to 7090 * ensure a task is de-boosted (pi_task is set to NULL) before the 7091 * task is allowed to run again (and can exit). This ensures the pointer 7092 * points to a blocked task -- which guarantees the task is present. 7093 */ 7094 p->pi_top_task = pi_task; 7095 7096 /* 7097 * For FIFO/RR we only need to set prio, if that matches we're done. 7098 */ 7099 if (prio == p->prio && !dl_prio(prio)) 7100 goto out_unlock; 7101 7102 /* 7103 * Idle task boosting is a nono in general. There is one 7104 * exception, when PREEMPT_RT and NOHZ is active: 7105 * 7106 * The idle task calls get_next_timer_interrupt() and holds 7107 * the timer wheel base->lock on the CPU and another CPU wants 7108 * to access the timer (probably to cancel it). We can safely 7109 * ignore the boosting request, as the idle CPU runs this code 7110 * with interrupts disabled and will complete the lock 7111 * protected section without being interrupted. So there is no 7112 * real need to boost. 7113 */ 7114 if (unlikely(p == rq->idle)) { 7115 WARN_ON(p != rq->curr); 7116 WARN_ON(p->pi_blocked_on); 7117 goto out_unlock; 7118 } 7119 7120 trace_sched_pi_setprio(p, pi_task); 7121 oldprio = p->prio; 7122 7123 if (oldprio == prio) 7124 queue_flag &= ~DEQUEUE_MOVE; 7125 7126 prev_class = p->sched_class; 7127 queued = task_on_rq_queued(p); 7128 running = task_current(rq, p); 7129 if (queued) 7130 dequeue_task(rq, p, queue_flag); 7131 if (running) 7132 put_prev_task(rq, p); 7133 7134 /* 7135 * Boosting condition are: 7136 * 1. -rt task is running and holds mutex A 7137 * --> -dl task blocks on mutex A 7138 * 7139 * 2. -dl task is running and holds mutex A 7140 * --> -dl task blocks on mutex A and could preempt the 7141 * running task 7142 */ 7143 if (dl_prio(prio)) { 7144 if (!dl_prio(p->normal_prio) || 7145 (pi_task && dl_prio(pi_task->prio) && 7146 dl_entity_preempt(&pi_task->dl, &p->dl))) { 7147 p->dl.pi_se = pi_task->dl.pi_se; 7148 queue_flag |= ENQUEUE_REPLENISH; 7149 } else { 7150 p->dl.pi_se = &p->dl; 7151 } 7152 } else if (rt_prio(prio)) { 7153 if (dl_prio(oldprio)) 7154 p->dl.pi_se = &p->dl; 7155 if (oldprio < prio) 7156 queue_flag |= ENQUEUE_HEAD; 7157 } else { 7158 if (dl_prio(oldprio)) 7159 p->dl.pi_se = &p->dl; 7160 if (rt_prio(oldprio)) 7161 p->rt.timeout = 0; 7162 } 7163 7164 __setscheduler_prio(p, prio); 7165 7166 if (queued) 7167 enqueue_task(rq, p, queue_flag); 7168 if (running) 7169 set_next_task(rq, p); 7170 7171 check_class_changed(rq, p, prev_class, oldprio); 7172 out_unlock: 7173 /* Avoid rq from going away on us: */ 7174 preempt_disable(); 7175 7176 rq_unpin_lock(rq, &rf); 7177 __balance_callbacks(rq); 7178 raw_spin_rq_unlock(rq); 7179 7180 preempt_enable(); 7181 } 7182 #else 7183 static inline int rt_effective_prio(struct task_struct *p, int prio) 7184 { 7185 return prio; 7186 } 7187 #endif 7188 7189 void set_user_nice(struct task_struct *p, long nice) 7190 { 7191 bool queued, running; 7192 int old_prio; 7193 struct rq_flags rf; 7194 struct rq *rq; 7195 7196 if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE) 7197 return; 7198 /* 7199 * We have to be careful, if called from sys_setpriority(), 7200 * the task might be in the middle of scheduling on another CPU. 7201 */ 7202 rq = task_rq_lock(p, &rf); 7203 update_rq_clock(rq); 7204 7205 /* 7206 * The RT priorities are set via sched_setscheduler(), but we still 7207 * allow the 'normal' nice value to be set - but as expected 7208 * it won't have any effect on scheduling until the task is 7209 * SCHED_DEADLINE, SCHED_FIFO or SCHED_RR: 7210 */ 7211 if (task_has_dl_policy(p) || task_has_rt_policy(p)) { 7212 p->static_prio = NICE_TO_PRIO(nice); 7213 goto out_unlock; 7214 } 7215 queued = task_on_rq_queued(p); 7216 running = task_current(rq, p); 7217 if (queued) 7218 dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK); 7219 if (running) 7220 put_prev_task(rq, p); 7221 7222 p->static_prio = NICE_TO_PRIO(nice); 7223 set_load_weight(p, true); 7224 old_prio = p->prio; 7225 p->prio = effective_prio(p); 7226 7227 if (queued) 7228 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); 7229 if (running) 7230 set_next_task(rq, p); 7231 7232 /* 7233 * If the task increased its priority or is running and 7234 * lowered its priority, then reschedule its CPU: 7235 */ 7236 p->sched_class->prio_changed(rq, p, old_prio); 7237 7238 out_unlock: 7239 task_rq_unlock(rq, p, &rf); 7240 } 7241 EXPORT_SYMBOL(set_user_nice); 7242 7243 /* 7244 * is_nice_reduction - check if nice value is an actual reduction 7245 * 7246 * Similar to can_nice() but does not perform a capability check. 7247 * 7248 * @p: task 7249 * @nice: nice value 7250 */ 7251 static bool is_nice_reduction(const struct task_struct *p, const int nice) 7252 { 7253 /* Convert nice value [19,-20] to rlimit style value [1,40]: */ 7254 int nice_rlim = nice_to_rlimit(nice); 7255 7256 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE)); 7257 } 7258 7259 /* 7260 * can_nice - check if a task can reduce its nice value 7261 * @p: task 7262 * @nice: nice value 7263 */ 7264 int can_nice(const struct task_struct *p, const int nice) 7265 { 7266 return is_nice_reduction(p, nice) || capable(CAP_SYS_NICE); 7267 } 7268 7269 #ifdef __ARCH_WANT_SYS_NICE 7270 7271 /* 7272 * sys_nice - change the priority of the current process. 7273 * @increment: priority increment 7274 * 7275 * sys_setpriority is a more generic, but much slower function that 7276 * does similar things. 7277 */ 7278 SYSCALL_DEFINE1(nice, int, increment) 7279 { 7280 long nice, retval; 7281 7282 /* 7283 * Setpriority might change our priority at the same moment. 7284 * We don't have to worry. Conceptually one call occurs first 7285 * and we have a single winner. 7286 */ 7287 increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH); 7288 nice = task_nice(current) + increment; 7289 7290 nice = clamp_val(nice, MIN_NICE, MAX_NICE); 7291 if (increment < 0 && !can_nice(current, nice)) 7292 return -EPERM; 7293 7294 retval = security_task_setnice(current, nice); 7295 if (retval) 7296 return retval; 7297 7298 set_user_nice(current, nice); 7299 return 0; 7300 } 7301 7302 #endif 7303 7304 /** 7305 * task_prio - return the priority value of a given task. 7306 * @p: the task in question. 7307 * 7308 * Return: The priority value as seen by users in /proc. 7309 * 7310 * sched policy return value kernel prio user prio/nice 7311 * 7312 * normal, batch, idle [0 ... 39] [100 ... 139] 0/[-20 ... 19] 7313 * fifo, rr [-2 ... -100] [98 ... 0] [1 ... 99] 7314 * deadline -101 -1 0 7315 */ 7316 int task_prio(const struct task_struct *p) 7317 { 7318 return p->prio - MAX_RT_PRIO; 7319 } 7320 7321 /** 7322 * idle_cpu - is a given CPU idle currently? 7323 * @cpu: the processor in question. 7324 * 7325 * Return: 1 if the CPU is currently idle. 0 otherwise. 7326 */ 7327 int idle_cpu(int cpu) 7328 { 7329 struct rq *rq = cpu_rq(cpu); 7330 7331 if (rq->curr != rq->idle) 7332 return 0; 7333 7334 if (rq->nr_running) 7335 return 0; 7336 7337 #ifdef CONFIG_SMP 7338 if (rq->ttwu_pending) 7339 return 0; 7340 #endif 7341 7342 return 1; 7343 } 7344 7345 /** 7346 * available_idle_cpu - is a given CPU idle for enqueuing work. 7347 * @cpu: the CPU in question. 7348 * 7349 * Return: 1 if the CPU is currently idle. 0 otherwise. 7350 */ 7351 int available_idle_cpu(int cpu) 7352 { 7353 if (!idle_cpu(cpu)) 7354 return 0; 7355 7356 if (vcpu_is_preempted(cpu)) 7357 return 0; 7358 7359 return 1; 7360 } 7361 7362 /** 7363 * idle_task - return the idle task for a given CPU. 7364 * @cpu: the processor in question. 7365 * 7366 * Return: The idle task for the CPU @cpu. 7367 */ 7368 struct task_struct *idle_task(int cpu) 7369 { 7370 return cpu_rq(cpu)->idle; 7371 } 7372 7373 #ifdef CONFIG_SCHED_CORE 7374 int sched_core_idle_cpu(int cpu) 7375 { 7376 struct rq *rq = cpu_rq(cpu); 7377 7378 if (sched_core_enabled(rq) && rq->curr == rq->idle) 7379 return 1; 7380 7381 return idle_cpu(cpu); 7382 } 7383 7384 #endif 7385 7386 #ifdef CONFIG_SMP 7387 /* 7388 * This function computes an effective utilization for the given CPU, to be 7389 * used for frequency selection given the linear relation: f = u * f_max. 7390 * 7391 * The scheduler tracks the following metrics: 7392 * 7393 * cpu_util_{cfs,rt,dl,irq}() 7394 * cpu_bw_dl() 7395 * 7396 * Where the cfs,rt and dl util numbers are tracked with the same metric and 7397 * synchronized windows and are thus directly comparable. 7398 * 7399 * The cfs,rt,dl utilization are the running times measured with rq->clock_task 7400 * which excludes things like IRQ and steal-time. These latter are then accrued 7401 * in the irq utilization. 7402 * 7403 * The DL bandwidth number otoh is not a measured metric but a value computed 7404 * based on the task model parameters and gives the minimal utilization 7405 * required to meet deadlines. 7406 */ 7407 unsigned long effective_cpu_util(int cpu, unsigned long util_cfs, 7408 enum cpu_util_type type, 7409 struct task_struct *p) 7410 { 7411 unsigned long dl_util, util, irq, max; 7412 struct rq *rq = cpu_rq(cpu); 7413 7414 max = arch_scale_cpu_capacity(cpu); 7415 7416 if (!uclamp_is_used() && 7417 type == FREQUENCY_UTIL && rt_rq_is_runnable(&rq->rt)) { 7418 return max; 7419 } 7420 7421 /* 7422 * Early check to see if IRQ/steal time saturates the CPU, can be 7423 * because of inaccuracies in how we track these -- see 7424 * update_irq_load_avg(). 7425 */ 7426 irq = cpu_util_irq(rq); 7427 if (unlikely(irq >= max)) 7428 return max; 7429 7430 /* 7431 * Because the time spend on RT/DL tasks is visible as 'lost' time to 7432 * CFS tasks and we use the same metric to track the effective 7433 * utilization (PELT windows are synchronized) we can directly add them 7434 * to obtain the CPU's actual utilization. 7435 * 7436 * CFS and RT utilization can be boosted or capped, depending on 7437 * utilization clamp constraints requested by currently RUNNABLE 7438 * tasks. 7439 * When there are no CFS RUNNABLE tasks, clamps are released and 7440 * frequency will be gracefully reduced with the utilization decay. 7441 */ 7442 util = util_cfs + cpu_util_rt(rq); 7443 if (type == FREQUENCY_UTIL) 7444 util = uclamp_rq_util_with(rq, util, p); 7445 7446 dl_util = cpu_util_dl(rq); 7447 7448 /* 7449 * For frequency selection we do not make cpu_util_dl() a permanent part 7450 * of this sum because we want to use cpu_bw_dl() later on, but we need 7451 * to check if the CFS+RT+DL sum is saturated (ie. no idle time) such 7452 * that we select f_max when there is no idle time. 7453 * 7454 * NOTE: numerical errors or stop class might cause us to not quite hit 7455 * saturation when we should -- something for later. 7456 */ 7457 if (util + dl_util >= max) 7458 return max; 7459 7460 /* 7461 * OTOH, for energy computation we need the estimated running time, so 7462 * include util_dl and ignore dl_bw. 7463 */ 7464 if (type == ENERGY_UTIL) 7465 util += dl_util; 7466 7467 /* 7468 * There is still idle time; further improve the number by using the 7469 * irq metric. Because IRQ/steal time is hidden from the task clock we 7470 * need to scale the task numbers: 7471 * 7472 * max - irq 7473 * U' = irq + --------- * U 7474 * max 7475 */ 7476 util = scale_irq_capacity(util, irq, max); 7477 util += irq; 7478 7479 /* 7480 * Bandwidth required by DEADLINE must always be granted while, for 7481 * FAIR and RT, we use blocked utilization of IDLE CPUs as a mechanism 7482 * to gracefully reduce the frequency when no tasks show up for longer 7483 * periods of time. 7484 * 7485 * Ideally we would like to set bw_dl as min/guaranteed freq and util + 7486 * bw_dl as requested freq. However, cpufreq is not yet ready for such 7487 * an interface. So, we only do the latter for now. 7488 */ 7489 if (type == FREQUENCY_UTIL) 7490 util += cpu_bw_dl(rq); 7491 7492 return min(max, util); 7493 } 7494 7495 unsigned long sched_cpu_util(int cpu) 7496 { 7497 return effective_cpu_util(cpu, cpu_util_cfs(cpu), ENERGY_UTIL, NULL); 7498 } 7499 #endif /* CONFIG_SMP */ 7500 7501 /** 7502 * find_process_by_pid - find a process with a matching PID value. 7503 * @pid: the pid in question. 7504 * 7505 * The task of @pid, if found. %NULL otherwise. 7506 */ 7507 static struct task_struct *find_process_by_pid(pid_t pid) 7508 { 7509 return pid ? find_task_by_vpid(pid) : current; 7510 } 7511 7512 /* 7513 * sched_setparam() passes in -1 for its policy, to let the functions 7514 * it calls know not to change it. 7515 */ 7516 #define SETPARAM_POLICY -1 7517 7518 static void __setscheduler_params(struct task_struct *p, 7519 const struct sched_attr *attr) 7520 { 7521 int policy = attr->sched_policy; 7522 7523 if (policy == SETPARAM_POLICY) 7524 policy = p->policy; 7525 7526 p->policy = policy; 7527 7528 if (dl_policy(policy)) 7529 __setparam_dl(p, attr); 7530 else if (fair_policy(policy)) 7531 p->static_prio = NICE_TO_PRIO(attr->sched_nice); 7532 7533 /* 7534 * __sched_setscheduler() ensures attr->sched_priority == 0 when 7535 * !rt_policy. Always setting this ensures that things like 7536 * getparam()/getattr() don't report silly values for !rt tasks. 7537 */ 7538 p->rt_priority = attr->sched_priority; 7539 p->normal_prio = normal_prio(p); 7540 set_load_weight(p, true); 7541 } 7542 7543 /* 7544 * Check the target process has a UID that matches the current process's: 7545 */ 7546 static bool check_same_owner(struct task_struct *p) 7547 { 7548 const struct cred *cred = current_cred(), *pcred; 7549 bool match; 7550 7551 rcu_read_lock(); 7552 pcred = __task_cred(p); 7553 match = (uid_eq(cred->euid, pcred->euid) || 7554 uid_eq(cred->euid, pcred->uid)); 7555 rcu_read_unlock(); 7556 return match; 7557 } 7558 7559 /* 7560 * Allow unprivileged RT tasks to decrease priority. 7561 * Only issue a capable test if needed and only once to avoid an audit 7562 * event on permitted non-privileged operations: 7563 */ 7564 static int user_check_sched_setscheduler(struct task_struct *p, 7565 const struct sched_attr *attr, 7566 int policy, int reset_on_fork) 7567 { 7568 if (fair_policy(policy)) { 7569 if (attr->sched_nice < task_nice(p) && 7570 !is_nice_reduction(p, attr->sched_nice)) 7571 goto req_priv; 7572 } 7573 7574 if (rt_policy(policy)) { 7575 unsigned long rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO); 7576 7577 /* Can't set/change the rt policy: */ 7578 if (policy != p->policy && !rlim_rtprio) 7579 goto req_priv; 7580 7581 /* Can't increase priority: */ 7582 if (attr->sched_priority > p->rt_priority && 7583 attr->sched_priority > rlim_rtprio) 7584 goto req_priv; 7585 } 7586 7587 /* 7588 * Can't set/change SCHED_DEADLINE policy at all for now 7589 * (safest behavior); in the future we would like to allow 7590 * unprivileged DL tasks to increase their relative deadline 7591 * or reduce their runtime (both ways reducing utilization) 7592 */ 7593 if (dl_policy(policy)) 7594 goto req_priv; 7595 7596 /* 7597 * Treat SCHED_IDLE as nice 20. Only allow a switch to 7598 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it. 7599 */ 7600 if (task_has_idle_policy(p) && !idle_policy(policy)) { 7601 if (!is_nice_reduction(p, task_nice(p))) 7602 goto req_priv; 7603 } 7604 7605 /* Can't change other user's priorities: */ 7606 if (!check_same_owner(p)) 7607 goto req_priv; 7608 7609 /* Normal users shall not reset the sched_reset_on_fork flag: */ 7610 if (p->sched_reset_on_fork && !reset_on_fork) 7611 goto req_priv; 7612 7613 return 0; 7614 7615 req_priv: 7616 if (!capable(CAP_SYS_NICE)) 7617 return -EPERM; 7618 7619 return 0; 7620 } 7621 7622 static int __sched_setscheduler(struct task_struct *p, 7623 const struct sched_attr *attr, 7624 bool user, bool pi) 7625 { 7626 int oldpolicy = -1, policy = attr->sched_policy; 7627 int retval, oldprio, newprio, queued, running; 7628 const struct sched_class *prev_class; 7629 struct balance_callback *head; 7630 struct rq_flags rf; 7631 int reset_on_fork; 7632 int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; 7633 struct rq *rq; 7634 bool cpuset_locked = false; 7635 7636 /* The pi code expects interrupts enabled */ 7637 BUG_ON(pi && in_interrupt()); 7638 recheck: 7639 /* Double check policy once rq lock held: */ 7640 if (policy < 0) { 7641 reset_on_fork = p->sched_reset_on_fork; 7642 policy = oldpolicy = p->policy; 7643 } else { 7644 reset_on_fork = !!(attr->sched_flags & SCHED_FLAG_RESET_ON_FORK); 7645 7646 if (!valid_policy(policy)) 7647 return -EINVAL; 7648 } 7649 7650 if (attr->sched_flags & ~(SCHED_FLAG_ALL | SCHED_FLAG_SUGOV)) 7651 return -EINVAL; 7652 7653 /* 7654 * Valid priorities for SCHED_FIFO and SCHED_RR are 7655 * 1..MAX_RT_PRIO-1, valid priority for SCHED_NORMAL, 7656 * SCHED_BATCH and SCHED_IDLE is 0. 7657 */ 7658 if (attr->sched_priority > MAX_RT_PRIO-1) 7659 return -EINVAL; 7660 if ((dl_policy(policy) && !__checkparam_dl(attr)) || 7661 (rt_policy(policy) != (attr->sched_priority != 0))) 7662 return -EINVAL; 7663 7664 if (user) { 7665 retval = user_check_sched_setscheduler(p, attr, policy, reset_on_fork); 7666 if (retval) 7667 return retval; 7668 7669 if (attr->sched_flags & SCHED_FLAG_SUGOV) 7670 return -EINVAL; 7671 7672 retval = security_task_setscheduler(p); 7673 if (retval) 7674 return retval; 7675 } 7676 7677 /* Update task specific "requested" clamps */ 7678 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) { 7679 retval = uclamp_validate(p, attr); 7680 if (retval) 7681 return retval; 7682 } 7683 7684 /* 7685 * SCHED_DEADLINE bandwidth accounting relies on stable cpusets 7686 * information. 7687 */ 7688 if (dl_policy(policy) || dl_policy(p->policy)) { 7689 cpuset_locked = true; 7690 cpuset_lock(); 7691 } 7692 7693 /* 7694 * Make sure no PI-waiters arrive (or leave) while we are 7695 * changing the priority of the task: 7696 * 7697 * To be able to change p->policy safely, the appropriate 7698 * runqueue lock must be held. 7699 */ 7700 rq = task_rq_lock(p, &rf); 7701 update_rq_clock(rq); 7702 7703 /* 7704 * Changing the policy of the stop threads its a very bad idea: 7705 */ 7706 if (p == rq->stop) { 7707 retval = -EINVAL; 7708 goto unlock; 7709 } 7710 7711 /* 7712 * If not changing anything there's no need to proceed further, 7713 * but store a possible modification of reset_on_fork. 7714 */ 7715 if (unlikely(policy == p->policy)) { 7716 if (fair_policy(policy) && attr->sched_nice != task_nice(p)) 7717 goto change; 7718 if (rt_policy(policy) && attr->sched_priority != p->rt_priority) 7719 goto change; 7720 if (dl_policy(policy) && dl_param_changed(p, attr)) 7721 goto change; 7722 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) 7723 goto change; 7724 7725 p->sched_reset_on_fork = reset_on_fork; 7726 retval = 0; 7727 goto unlock; 7728 } 7729 change: 7730 7731 if (user) { 7732 #ifdef CONFIG_RT_GROUP_SCHED 7733 /* 7734 * Do not allow realtime tasks into groups that have no runtime 7735 * assigned. 7736 */ 7737 if (rt_bandwidth_enabled() && rt_policy(policy) && 7738 task_group(p)->rt_bandwidth.rt_runtime == 0 && 7739 !task_group_is_autogroup(task_group(p))) { 7740 retval = -EPERM; 7741 goto unlock; 7742 } 7743 #endif 7744 #ifdef CONFIG_SMP 7745 if (dl_bandwidth_enabled() && dl_policy(policy) && 7746 !(attr->sched_flags & SCHED_FLAG_SUGOV)) { 7747 cpumask_t *span = rq->rd->span; 7748 7749 /* 7750 * Don't allow tasks with an affinity mask smaller than 7751 * the entire root_domain to become SCHED_DEADLINE. We 7752 * will also fail if there's no bandwidth available. 7753 */ 7754 if (!cpumask_subset(span, p->cpus_ptr) || 7755 rq->rd->dl_bw.bw == 0) { 7756 retval = -EPERM; 7757 goto unlock; 7758 } 7759 } 7760 #endif 7761 } 7762 7763 /* Re-check policy now with rq lock held: */ 7764 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { 7765 policy = oldpolicy = -1; 7766 task_rq_unlock(rq, p, &rf); 7767 if (cpuset_locked) 7768 cpuset_unlock(); 7769 goto recheck; 7770 } 7771 7772 /* 7773 * If setscheduling to SCHED_DEADLINE (or changing the parameters 7774 * of a SCHED_DEADLINE task) we need to check if enough bandwidth 7775 * is available. 7776 */ 7777 if ((dl_policy(policy) || dl_task(p)) && sched_dl_overflow(p, policy, attr)) { 7778 retval = -EBUSY; 7779 goto unlock; 7780 } 7781 7782 p->sched_reset_on_fork = reset_on_fork; 7783 oldprio = p->prio; 7784 7785 newprio = __normal_prio(policy, attr->sched_priority, attr->sched_nice); 7786 if (pi) { 7787 /* 7788 * Take priority boosted tasks into account. If the new 7789 * effective priority is unchanged, we just store the new 7790 * normal parameters and do not touch the scheduler class and 7791 * the runqueue. This will be done when the task deboost 7792 * itself. 7793 */ 7794 newprio = rt_effective_prio(p, newprio); 7795 if (newprio == oldprio) 7796 queue_flags &= ~DEQUEUE_MOVE; 7797 } 7798 7799 queued = task_on_rq_queued(p); 7800 running = task_current(rq, p); 7801 if (queued) 7802 dequeue_task(rq, p, queue_flags); 7803 if (running) 7804 put_prev_task(rq, p); 7805 7806 prev_class = p->sched_class; 7807 7808 if (!(attr->sched_flags & SCHED_FLAG_KEEP_PARAMS)) { 7809 __setscheduler_params(p, attr); 7810 __setscheduler_prio(p, newprio); 7811 } 7812 __setscheduler_uclamp(p, attr); 7813 7814 if (queued) { 7815 /* 7816 * We enqueue to tail when the priority of a task is 7817 * increased (user space view). 7818 */ 7819 if (oldprio < p->prio) 7820 queue_flags |= ENQUEUE_HEAD; 7821 7822 enqueue_task(rq, p, queue_flags); 7823 } 7824 if (running) 7825 set_next_task(rq, p); 7826 7827 check_class_changed(rq, p, prev_class, oldprio); 7828 7829 /* Avoid rq from going away on us: */ 7830 preempt_disable(); 7831 head = splice_balance_callbacks(rq); 7832 task_rq_unlock(rq, p, &rf); 7833 7834 if (pi) { 7835 if (cpuset_locked) 7836 cpuset_unlock(); 7837 rt_mutex_adjust_pi(p); 7838 } 7839 7840 /* Run balance callbacks after we've adjusted the PI chain: */ 7841 balance_callbacks(rq, head); 7842 preempt_enable(); 7843 7844 return 0; 7845 7846 unlock: 7847 task_rq_unlock(rq, p, &rf); 7848 if (cpuset_locked) 7849 cpuset_unlock(); 7850 return retval; 7851 } 7852 7853 static int _sched_setscheduler(struct task_struct *p, int policy, 7854 const struct sched_param *param, bool check) 7855 { 7856 struct sched_attr attr = { 7857 .sched_policy = policy, 7858 .sched_priority = param->sched_priority, 7859 .sched_nice = PRIO_TO_NICE(p->static_prio), 7860 }; 7861 7862 /* Fixup the legacy SCHED_RESET_ON_FORK hack. */ 7863 if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) { 7864 attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; 7865 policy &= ~SCHED_RESET_ON_FORK; 7866 attr.sched_policy = policy; 7867 } 7868 7869 return __sched_setscheduler(p, &attr, check, true); 7870 } 7871 /** 7872 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread. 7873 * @p: the task in question. 7874 * @policy: new policy. 7875 * @param: structure containing the new RT priority. 7876 * 7877 * Use sched_set_fifo(), read its comment. 7878 * 7879 * Return: 0 on success. An error code otherwise. 7880 * 7881 * NOTE that the task may be already dead. 7882 */ 7883 int sched_setscheduler(struct task_struct *p, int policy, 7884 const struct sched_param *param) 7885 { 7886 return _sched_setscheduler(p, policy, param, true); 7887 } 7888 7889 int sched_setattr(struct task_struct *p, const struct sched_attr *attr) 7890 { 7891 return __sched_setscheduler(p, attr, true, true); 7892 } 7893 7894 int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr) 7895 { 7896 return __sched_setscheduler(p, attr, false, true); 7897 } 7898 EXPORT_SYMBOL_GPL(sched_setattr_nocheck); 7899 7900 /** 7901 * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace. 7902 * @p: the task in question. 7903 * @policy: new policy. 7904 * @param: structure containing the new RT priority. 7905 * 7906 * Just like sched_setscheduler, only don't bother checking if the 7907 * current context has permission. For example, this is needed in 7908 * stop_machine(): we create temporary high priority worker threads, 7909 * but our caller might not have that capability. 7910 * 7911 * Return: 0 on success. An error code otherwise. 7912 */ 7913 int sched_setscheduler_nocheck(struct task_struct *p, int policy, 7914 const struct sched_param *param) 7915 { 7916 return _sched_setscheduler(p, policy, param, false); 7917 } 7918 7919 /* 7920 * SCHED_FIFO is a broken scheduler model; that is, it is fundamentally 7921 * incapable of resource management, which is the one thing an OS really should 7922 * be doing. 7923 * 7924 * This is of course the reason it is limited to privileged users only. 7925 * 7926 * Worse still; it is fundamentally impossible to compose static priority 7927 * workloads. You cannot take two correctly working static prio workloads 7928 * and smash them together and still expect them to work. 7929 * 7930 * For this reason 'all' FIFO tasks the kernel creates are basically at: 7931 * 7932 * MAX_RT_PRIO / 2 7933 * 7934 * The administrator _MUST_ configure the system, the kernel simply doesn't 7935 * know enough information to make a sensible choice. 7936 */ 7937 void sched_set_fifo(struct task_struct *p) 7938 { 7939 struct sched_param sp = { .sched_priority = MAX_RT_PRIO / 2 }; 7940 WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0); 7941 } 7942 EXPORT_SYMBOL_GPL(sched_set_fifo); 7943 7944 /* 7945 * For when you don't much care about FIFO, but want to be above SCHED_NORMAL. 7946 */ 7947 void sched_set_fifo_low(struct task_struct *p) 7948 { 7949 struct sched_param sp = { .sched_priority = 1 }; 7950 WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0); 7951 } 7952 EXPORT_SYMBOL_GPL(sched_set_fifo_low); 7953 7954 void sched_set_normal(struct task_struct *p, int nice) 7955 { 7956 struct sched_attr attr = { 7957 .sched_policy = SCHED_NORMAL, 7958 .sched_nice = nice, 7959 }; 7960 WARN_ON_ONCE(sched_setattr_nocheck(p, &attr) != 0); 7961 } 7962 EXPORT_SYMBOL_GPL(sched_set_normal); 7963 7964 static int 7965 do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) 7966 { 7967 struct sched_param lparam; 7968 struct task_struct *p; 7969 int retval; 7970 7971 if (!param || pid < 0) 7972 return -EINVAL; 7973 if (copy_from_user(&lparam, param, sizeof(struct sched_param))) 7974 return -EFAULT; 7975 7976 rcu_read_lock(); 7977 retval = -ESRCH; 7978 p = find_process_by_pid(pid); 7979 if (likely(p)) 7980 get_task_struct(p); 7981 rcu_read_unlock(); 7982 7983 if (likely(p)) { 7984 retval = sched_setscheduler(p, policy, &lparam); 7985 put_task_struct(p); 7986 } 7987 7988 return retval; 7989 } 7990 7991 /* 7992 * Mimics kernel/events/core.c perf_copy_attr(). 7993 */ 7994 static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *attr) 7995 { 7996 u32 size; 7997 int ret; 7998 7999 /* Zero the full structure, so that a short copy will be nice: */ 8000 memset(attr, 0, sizeof(*attr)); 8001 8002 ret = get_user(size, &uattr->size); 8003 if (ret) 8004 return ret; 8005 8006 /* ABI compatibility quirk: */ 8007 if (!size) 8008 size = SCHED_ATTR_SIZE_VER0; 8009 if (size < SCHED_ATTR_SIZE_VER0 || size > PAGE_SIZE) 8010 goto err_size; 8011 8012 ret = copy_struct_from_user(attr, sizeof(*attr), uattr, size); 8013 if (ret) { 8014 if (ret == -E2BIG) 8015 goto err_size; 8016 return ret; 8017 } 8018 8019 if ((attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) && 8020 size < SCHED_ATTR_SIZE_VER1) 8021 return -EINVAL; 8022 8023 /* 8024 * XXX: Do we want to be lenient like existing syscalls; or do we want 8025 * to be strict and return an error on out-of-bounds values? 8026 */ 8027 attr->sched_nice = clamp(attr->sched_nice, MIN_NICE, MAX_NICE); 8028 8029 return 0; 8030 8031 err_size: 8032 put_user(sizeof(*attr), &uattr->size); 8033 return -E2BIG; 8034 } 8035 8036 static void get_params(struct task_struct *p, struct sched_attr *attr) 8037 { 8038 if (task_has_dl_policy(p)) 8039 __getparam_dl(p, attr); 8040 else if (task_has_rt_policy(p)) 8041 attr->sched_priority = p->rt_priority; 8042 else 8043 attr->sched_nice = task_nice(p); 8044 } 8045 8046 /** 8047 * sys_sched_setscheduler - set/change the scheduler policy and RT priority 8048 * @pid: the pid in question. 8049 * @policy: new policy. 8050 * @param: structure containing the new RT priority. 8051 * 8052 * Return: 0 on success. An error code otherwise. 8053 */ 8054 SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param) 8055 { 8056 if (policy < 0) 8057 return -EINVAL; 8058 8059 return do_sched_setscheduler(pid, policy, param); 8060 } 8061 8062 /** 8063 * sys_sched_setparam - set/change the RT priority of a thread 8064 * @pid: the pid in question. 8065 * @param: structure containing the new RT priority. 8066 * 8067 * Return: 0 on success. An error code otherwise. 8068 */ 8069 SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) 8070 { 8071 return do_sched_setscheduler(pid, SETPARAM_POLICY, param); 8072 } 8073 8074 /** 8075 * sys_sched_setattr - same as above, but with extended sched_attr 8076 * @pid: the pid in question. 8077 * @uattr: structure containing the extended parameters. 8078 * @flags: for future extension. 8079 */ 8080 SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr, 8081 unsigned int, flags) 8082 { 8083 struct sched_attr attr; 8084 struct task_struct *p; 8085 int retval; 8086 8087 if (!uattr || pid < 0 || flags) 8088 return -EINVAL; 8089 8090 retval = sched_copy_attr(uattr, &attr); 8091 if (retval) 8092 return retval; 8093 8094 if ((int)attr.sched_policy < 0) 8095 return -EINVAL; 8096 if (attr.sched_flags & SCHED_FLAG_KEEP_POLICY) 8097 attr.sched_policy = SETPARAM_POLICY; 8098 8099 rcu_read_lock(); 8100 retval = -ESRCH; 8101 p = find_process_by_pid(pid); 8102 if (likely(p)) 8103 get_task_struct(p); 8104 rcu_read_unlock(); 8105 8106 if (likely(p)) { 8107 if (attr.sched_flags & SCHED_FLAG_KEEP_PARAMS) 8108 get_params(p, &attr); 8109 retval = sched_setattr(p, &attr); 8110 put_task_struct(p); 8111 } 8112 8113 return retval; 8114 } 8115 8116 /** 8117 * sys_sched_getscheduler - get the policy (scheduling class) of a thread 8118 * @pid: the pid in question. 8119 * 8120 * Return: On success, the policy of the thread. Otherwise, a negative error 8121 * code. 8122 */ 8123 SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) 8124 { 8125 struct task_struct *p; 8126 int retval; 8127 8128 if (pid < 0) 8129 return -EINVAL; 8130 8131 retval = -ESRCH; 8132 rcu_read_lock(); 8133 p = find_process_by_pid(pid); 8134 if (p) { 8135 retval = security_task_getscheduler(p); 8136 if (!retval) 8137 retval = p->policy 8138 | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0); 8139 } 8140 rcu_read_unlock(); 8141 return retval; 8142 } 8143 8144 /** 8145 * sys_sched_getparam - get the RT priority of a thread 8146 * @pid: the pid in question. 8147 * @param: structure containing the RT priority. 8148 * 8149 * Return: On success, 0 and the RT priority is in @param. Otherwise, an error 8150 * code. 8151 */ 8152 SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) 8153 { 8154 struct sched_param lp = { .sched_priority = 0 }; 8155 struct task_struct *p; 8156 int retval; 8157 8158 if (!param || pid < 0) 8159 return -EINVAL; 8160 8161 rcu_read_lock(); 8162 p = find_process_by_pid(pid); 8163 retval = -ESRCH; 8164 if (!p) 8165 goto out_unlock; 8166 8167 retval = security_task_getscheduler(p); 8168 if (retval) 8169 goto out_unlock; 8170 8171 if (task_has_rt_policy(p)) 8172 lp.sched_priority = p->rt_priority; 8173 rcu_read_unlock(); 8174 8175 /* 8176 * This one might sleep, we cannot do it with a spinlock held ... 8177 */ 8178 retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0; 8179 8180 return retval; 8181 8182 out_unlock: 8183 rcu_read_unlock(); 8184 return retval; 8185 } 8186 8187 /* 8188 * Copy the kernel size attribute structure (which might be larger 8189 * than what user-space knows about) to user-space. 8190 * 8191 * Note that all cases are valid: user-space buffer can be larger or 8192 * smaller than the kernel-space buffer. The usual case is that both 8193 * have the same size. 8194 */ 8195 static int 8196 sched_attr_copy_to_user(struct sched_attr __user *uattr, 8197 struct sched_attr *kattr, 8198 unsigned int usize) 8199 { 8200 unsigned int ksize = sizeof(*kattr); 8201 8202 if (!access_ok(uattr, usize)) 8203 return -EFAULT; 8204 8205 /* 8206 * sched_getattr() ABI forwards and backwards compatibility: 8207 * 8208 * If usize == ksize then we just copy everything to user-space and all is good. 8209 * 8210 * If usize < ksize then we only copy as much as user-space has space for, 8211 * this keeps ABI compatibility as well. We skip the rest. 8212 * 8213 * If usize > ksize then user-space is using a newer version of the ABI, 8214 * which part the kernel doesn't know about. Just ignore it - tooling can 8215 * detect the kernel's knowledge of attributes from the attr->size value 8216 * which is set to ksize in this case. 8217 */ 8218 kattr->size = min(usize, ksize); 8219 8220 if (copy_to_user(uattr, kattr, kattr->size)) 8221 return -EFAULT; 8222 8223 return 0; 8224 } 8225 8226 /** 8227 * sys_sched_getattr - similar to sched_getparam, but with sched_attr 8228 * @pid: the pid in question. 8229 * @uattr: structure containing the extended parameters. 8230 * @usize: sizeof(attr) for fwd/bwd comp. 8231 * @flags: for future extension. 8232 */ 8233 SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, 8234 unsigned int, usize, unsigned int, flags) 8235 { 8236 struct sched_attr kattr = { }; 8237 struct task_struct *p; 8238 int retval; 8239 8240 if (!uattr || pid < 0 || usize > PAGE_SIZE || 8241 usize < SCHED_ATTR_SIZE_VER0 || flags) 8242 return -EINVAL; 8243 8244 rcu_read_lock(); 8245 p = find_process_by_pid(pid); 8246 retval = -ESRCH; 8247 if (!p) 8248 goto out_unlock; 8249 8250 retval = security_task_getscheduler(p); 8251 if (retval) 8252 goto out_unlock; 8253 8254 kattr.sched_policy = p->policy; 8255 if (p->sched_reset_on_fork) 8256 kattr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; 8257 get_params(p, &kattr); 8258 kattr.sched_flags &= SCHED_FLAG_ALL; 8259 8260 #ifdef CONFIG_UCLAMP_TASK 8261 /* 8262 * This could race with another potential updater, but this is fine 8263 * because it'll correctly read the old or the new value. We don't need 8264 * to guarantee who wins the race as long as it doesn't return garbage. 8265 */ 8266 kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value; 8267 kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value; 8268 #endif 8269 8270 rcu_read_unlock(); 8271 8272 return sched_attr_copy_to_user(uattr, &kattr, usize); 8273 8274 out_unlock: 8275 rcu_read_unlock(); 8276 return retval; 8277 } 8278 8279 #ifdef CONFIG_SMP 8280 int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask) 8281 { 8282 int ret = 0; 8283 8284 /* 8285 * If the task isn't a deadline task or admission control is 8286 * disabled then we don't care about affinity changes. 8287 */ 8288 if (!task_has_dl_policy(p) || !dl_bandwidth_enabled()) 8289 return 0; 8290 8291 /* 8292 * Since bandwidth control happens on root_domain basis, 8293 * if admission test is enabled, we only admit -deadline 8294 * tasks allowed to run on all the CPUs in the task's 8295 * root_domain. 8296 */ 8297 rcu_read_lock(); 8298 if (!cpumask_subset(task_rq(p)->rd->span, mask)) 8299 ret = -EBUSY; 8300 rcu_read_unlock(); 8301 return ret; 8302 } 8303 #endif 8304 8305 static int 8306 __sched_setaffinity(struct task_struct *p, struct affinity_context *ctx) 8307 { 8308 int retval; 8309 cpumask_var_t cpus_allowed, new_mask; 8310 8311 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) 8312 return -ENOMEM; 8313 8314 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) { 8315 retval = -ENOMEM; 8316 goto out_free_cpus_allowed; 8317 } 8318 8319 cpuset_cpus_allowed(p, cpus_allowed); 8320 cpumask_and(new_mask, ctx->new_mask, cpus_allowed); 8321 8322 ctx->new_mask = new_mask; 8323 ctx->flags |= SCA_CHECK; 8324 8325 retval = dl_task_check_affinity(p, new_mask); 8326 if (retval) 8327 goto out_free_new_mask; 8328 8329 retval = __set_cpus_allowed_ptr(p, ctx); 8330 if (retval) 8331 goto out_free_new_mask; 8332 8333 cpuset_cpus_allowed(p, cpus_allowed); 8334 if (!cpumask_subset(new_mask, cpus_allowed)) { 8335 /* 8336 * We must have raced with a concurrent cpuset update. 8337 * Just reset the cpumask to the cpuset's cpus_allowed. 8338 */ 8339 cpumask_copy(new_mask, cpus_allowed); 8340 8341 /* 8342 * If SCA_USER is set, a 2nd call to __set_cpus_allowed_ptr() 8343 * will restore the previous user_cpus_ptr value. 8344 * 8345 * In the unlikely event a previous user_cpus_ptr exists, 8346 * we need to further restrict the mask to what is allowed 8347 * by that old user_cpus_ptr. 8348 */ 8349 if (unlikely((ctx->flags & SCA_USER) && ctx->user_mask)) { 8350 bool empty = !cpumask_and(new_mask, new_mask, 8351 ctx->user_mask); 8352 8353 if (WARN_ON_ONCE(empty)) 8354 cpumask_copy(new_mask, cpus_allowed); 8355 } 8356 __set_cpus_allowed_ptr(p, ctx); 8357 retval = -EINVAL; 8358 } 8359 8360 out_free_new_mask: 8361 free_cpumask_var(new_mask); 8362 out_free_cpus_allowed: 8363 free_cpumask_var(cpus_allowed); 8364 return retval; 8365 } 8366 8367 long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) 8368 { 8369 struct affinity_context ac; 8370 struct cpumask *user_mask; 8371 struct task_struct *p; 8372 int retval; 8373 8374 rcu_read_lock(); 8375 8376 p = find_process_by_pid(pid); 8377 if (!p) { 8378 rcu_read_unlock(); 8379 return -ESRCH; 8380 } 8381 8382 /* Prevent p going away */ 8383 get_task_struct(p); 8384 rcu_read_unlock(); 8385 8386 if (p->flags & PF_NO_SETAFFINITY) { 8387 retval = -EINVAL; 8388 goto out_put_task; 8389 } 8390 8391 if (!check_same_owner(p)) { 8392 rcu_read_lock(); 8393 if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) { 8394 rcu_read_unlock(); 8395 retval = -EPERM; 8396 goto out_put_task; 8397 } 8398 rcu_read_unlock(); 8399 } 8400 8401 retval = security_task_setscheduler(p); 8402 if (retval) 8403 goto out_put_task; 8404 8405 /* 8406 * With non-SMP configs, user_cpus_ptr/user_mask isn't used and 8407 * alloc_user_cpus_ptr() returns NULL. 8408 */ 8409 user_mask = alloc_user_cpus_ptr(NUMA_NO_NODE); 8410 if (user_mask) { 8411 cpumask_copy(user_mask, in_mask); 8412 } else if (IS_ENABLED(CONFIG_SMP)) { 8413 retval = -ENOMEM; 8414 goto out_put_task; 8415 } 8416 8417 ac = (struct affinity_context){ 8418 .new_mask = in_mask, 8419 .user_mask = user_mask, 8420 .flags = SCA_USER, 8421 }; 8422 8423 retval = __sched_setaffinity(p, &ac); 8424 kfree(ac.user_mask); 8425 8426 out_put_task: 8427 put_task_struct(p); 8428 return retval; 8429 } 8430 8431 static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, 8432 struct cpumask *new_mask) 8433 { 8434 if (len < cpumask_size()) 8435 cpumask_clear(new_mask); 8436 else if (len > cpumask_size()) 8437 len = cpumask_size(); 8438 8439 return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0; 8440 } 8441 8442 /** 8443 * sys_sched_setaffinity - set the CPU affinity of a process 8444 * @pid: pid of the process 8445 * @len: length in bytes of the bitmask pointed to by user_mask_ptr 8446 * @user_mask_ptr: user-space pointer to the new CPU mask 8447 * 8448 * Return: 0 on success. An error code otherwise. 8449 */ 8450 SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len, 8451 unsigned long __user *, user_mask_ptr) 8452 { 8453 cpumask_var_t new_mask; 8454 int retval; 8455 8456 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) 8457 return -ENOMEM; 8458 8459 retval = get_user_cpu_mask(user_mask_ptr, len, new_mask); 8460 if (retval == 0) 8461 retval = sched_setaffinity(pid, new_mask); 8462 free_cpumask_var(new_mask); 8463 return retval; 8464 } 8465 8466 long sched_getaffinity(pid_t pid, struct cpumask *mask) 8467 { 8468 struct task_struct *p; 8469 unsigned long flags; 8470 int retval; 8471 8472 rcu_read_lock(); 8473 8474 retval = -ESRCH; 8475 p = find_process_by_pid(pid); 8476 if (!p) 8477 goto out_unlock; 8478 8479 retval = security_task_getscheduler(p); 8480 if (retval) 8481 goto out_unlock; 8482 8483 raw_spin_lock_irqsave(&p->pi_lock, flags); 8484 cpumask_and(mask, &p->cpus_mask, cpu_active_mask); 8485 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 8486 8487 out_unlock: 8488 rcu_read_unlock(); 8489 8490 return retval; 8491 } 8492 8493 /** 8494 * sys_sched_getaffinity - get the CPU affinity of a process 8495 * @pid: pid of the process 8496 * @len: length in bytes of the bitmask pointed to by user_mask_ptr 8497 * @user_mask_ptr: user-space pointer to hold the current CPU mask 8498 * 8499 * Return: size of CPU mask copied to user_mask_ptr on success. An 8500 * error code otherwise. 8501 */ 8502 SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, 8503 unsigned long __user *, user_mask_ptr) 8504 { 8505 int ret; 8506 cpumask_var_t mask; 8507 8508 if ((len * BITS_PER_BYTE) < nr_cpu_ids) 8509 return -EINVAL; 8510 if (len & (sizeof(unsigned long)-1)) 8511 return -EINVAL; 8512 8513 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) 8514 return -ENOMEM; 8515 8516 ret = sched_getaffinity(pid, mask); 8517 if (ret == 0) { 8518 unsigned int retlen = min(len, cpumask_size()); 8519 8520 if (copy_to_user(user_mask_ptr, cpumask_bits(mask), retlen)) 8521 ret = -EFAULT; 8522 else 8523 ret = retlen; 8524 } 8525 free_cpumask_var(mask); 8526 8527 return ret; 8528 } 8529 8530 static void do_sched_yield(void) 8531 { 8532 struct rq_flags rf; 8533 struct rq *rq; 8534 8535 rq = this_rq_lock_irq(&rf); 8536 8537 schedstat_inc(rq->yld_count); 8538 current->sched_class->yield_task(rq); 8539 8540 preempt_disable(); 8541 rq_unlock_irq(rq, &rf); 8542 sched_preempt_enable_no_resched(); 8543 8544 schedule(); 8545 } 8546 8547 /** 8548 * sys_sched_yield - yield the current processor to other threads. 8549 * 8550 * This function yields the current CPU to other tasks. If there are no 8551 * other threads running on this CPU then this function will return. 8552 * 8553 * Return: 0. 8554 */ 8555 SYSCALL_DEFINE0(sched_yield) 8556 { 8557 do_sched_yield(); 8558 return 0; 8559 } 8560 8561 #if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC) 8562 int __sched __cond_resched(void) 8563 { 8564 if (should_resched(0)) { 8565 preempt_schedule_common(); 8566 return 1; 8567 } 8568 /* 8569 * In preemptible kernels, ->rcu_read_lock_nesting tells the tick 8570 * whether the current CPU is in an RCU read-side critical section, 8571 * so the tick can report quiescent states even for CPUs looping 8572 * in kernel context. In contrast, in non-preemptible kernels, 8573 * RCU readers leave no in-memory hints, which means that CPU-bound 8574 * processes executing in kernel context might never report an 8575 * RCU quiescent state. Therefore, the following code causes 8576 * cond_resched() to report a quiescent state, but only when RCU 8577 * is in urgent need of one. 8578 */ 8579 #ifndef CONFIG_PREEMPT_RCU 8580 rcu_all_qs(); 8581 #endif 8582 return 0; 8583 } 8584 EXPORT_SYMBOL(__cond_resched); 8585 #endif 8586 8587 #ifdef CONFIG_PREEMPT_DYNAMIC 8588 #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL) 8589 #define cond_resched_dynamic_enabled __cond_resched 8590 #define cond_resched_dynamic_disabled ((void *)&__static_call_return0) 8591 DEFINE_STATIC_CALL_RET0(cond_resched, __cond_resched); 8592 EXPORT_STATIC_CALL_TRAMP(cond_resched); 8593 8594 #define might_resched_dynamic_enabled __cond_resched 8595 #define might_resched_dynamic_disabled ((void *)&__static_call_return0) 8596 DEFINE_STATIC_CALL_RET0(might_resched, __cond_resched); 8597 EXPORT_STATIC_CALL_TRAMP(might_resched); 8598 #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY) 8599 static DEFINE_STATIC_KEY_FALSE(sk_dynamic_cond_resched); 8600 int __sched dynamic_cond_resched(void) 8601 { 8602 klp_sched_try_switch(); 8603 if (!static_branch_unlikely(&sk_dynamic_cond_resched)) 8604 return 0; 8605 return __cond_resched(); 8606 } 8607 EXPORT_SYMBOL(dynamic_cond_resched); 8608 8609 static DEFINE_STATIC_KEY_FALSE(sk_dynamic_might_resched); 8610 int __sched dynamic_might_resched(void) 8611 { 8612 if (!static_branch_unlikely(&sk_dynamic_might_resched)) 8613 return 0; 8614 return __cond_resched(); 8615 } 8616 EXPORT_SYMBOL(dynamic_might_resched); 8617 #endif 8618 #endif 8619 8620 /* 8621 * __cond_resched_lock() - if a reschedule is pending, drop the given lock, 8622 * call schedule, and on return reacquire the lock. 8623 * 8624 * This works OK both with and without CONFIG_PREEMPTION. We do strange low-level 8625 * operations here to prevent schedule() from being called twice (once via 8626 * spin_unlock(), once by hand). 8627 */ 8628 int __cond_resched_lock(spinlock_t *lock) 8629 { 8630 int resched = should_resched(PREEMPT_LOCK_OFFSET); 8631 int ret = 0; 8632 8633 lockdep_assert_held(lock); 8634 8635 if (spin_needbreak(lock) || resched) { 8636 spin_unlock(lock); 8637 if (!_cond_resched()) 8638 cpu_relax(); 8639 ret = 1; 8640 spin_lock(lock); 8641 } 8642 return ret; 8643 } 8644 EXPORT_SYMBOL(__cond_resched_lock); 8645 8646 int __cond_resched_rwlock_read(rwlock_t *lock) 8647 { 8648 int resched = should_resched(PREEMPT_LOCK_OFFSET); 8649 int ret = 0; 8650 8651 lockdep_assert_held_read(lock); 8652 8653 if (rwlock_needbreak(lock) || resched) { 8654 read_unlock(lock); 8655 if (!_cond_resched()) 8656 cpu_relax(); 8657 ret = 1; 8658 read_lock(lock); 8659 } 8660 return ret; 8661 } 8662 EXPORT_SYMBOL(__cond_resched_rwlock_read); 8663 8664 int __cond_resched_rwlock_write(rwlock_t *lock) 8665 { 8666 int resched = should_resched(PREEMPT_LOCK_OFFSET); 8667 int ret = 0; 8668 8669 lockdep_assert_held_write(lock); 8670 8671 if (rwlock_needbreak(lock) || resched) { 8672 write_unlock(lock); 8673 if (!_cond_resched()) 8674 cpu_relax(); 8675 ret = 1; 8676 write_lock(lock); 8677 } 8678 return ret; 8679 } 8680 EXPORT_SYMBOL(__cond_resched_rwlock_write); 8681 8682 #ifdef CONFIG_PREEMPT_DYNAMIC 8683 8684 #ifdef CONFIG_GENERIC_ENTRY 8685 #include <linux/entry-common.h> 8686 #endif 8687 8688 /* 8689 * SC:cond_resched 8690 * SC:might_resched 8691 * SC:preempt_schedule 8692 * SC:preempt_schedule_notrace 8693 * SC:irqentry_exit_cond_resched 8694 * 8695 * 8696 * NONE: 8697 * cond_resched <- __cond_resched 8698 * might_resched <- RET0 8699 * preempt_schedule <- NOP 8700 * preempt_schedule_notrace <- NOP 8701 * irqentry_exit_cond_resched <- NOP 8702 * 8703 * VOLUNTARY: 8704 * cond_resched <- __cond_resched 8705 * might_resched <- __cond_resched 8706 * preempt_schedule <- NOP 8707 * preempt_schedule_notrace <- NOP 8708 * irqentry_exit_cond_resched <- NOP 8709 * 8710 * FULL: 8711 * cond_resched <- RET0 8712 * might_resched <- RET0 8713 * preempt_schedule <- preempt_schedule 8714 * preempt_schedule_notrace <- preempt_schedule_notrace 8715 * irqentry_exit_cond_resched <- irqentry_exit_cond_resched 8716 */ 8717 8718 enum { 8719 preempt_dynamic_undefined = -1, 8720 preempt_dynamic_none, 8721 preempt_dynamic_voluntary, 8722 preempt_dynamic_full, 8723 }; 8724 8725 int preempt_dynamic_mode = preempt_dynamic_undefined; 8726 8727 int sched_dynamic_mode(const char *str) 8728 { 8729 if (!strcmp(str, "none")) 8730 return preempt_dynamic_none; 8731 8732 if (!strcmp(str, "voluntary")) 8733 return preempt_dynamic_voluntary; 8734 8735 if (!strcmp(str, "full")) 8736 return preempt_dynamic_full; 8737 8738 return -EINVAL; 8739 } 8740 8741 #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL) 8742 #define preempt_dynamic_enable(f) static_call_update(f, f##_dynamic_enabled) 8743 #define preempt_dynamic_disable(f) static_call_update(f, f##_dynamic_disabled) 8744 #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY) 8745 #define preempt_dynamic_enable(f) static_key_enable(&sk_dynamic_##f.key) 8746 #define preempt_dynamic_disable(f) static_key_disable(&sk_dynamic_##f.key) 8747 #else 8748 #error "Unsupported PREEMPT_DYNAMIC mechanism" 8749 #endif 8750 8751 static DEFINE_MUTEX(sched_dynamic_mutex); 8752 static bool klp_override; 8753 8754 static void __sched_dynamic_update(int mode) 8755 { 8756 /* 8757 * Avoid {NONE,VOLUNTARY} -> FULL transitions from ever ending up in 8758 * the ZERO state, which is invalid. 8759 */ 8760 if (!klp_override) 8761 preempt_dynamic_enable(cond_resched); 8762 preempt_dynamic_enable(might_resched); 8763 preempt_dynamic_enable(preempt_schedule); 8764 preempt_dynamic_enable(preempt_schedule_notrace); 8765 preempt_dynamic_enable(irqentry_exit_cond_resched); 8766 8767 switch (mode) { 8768 case preempt_dynamic_none: 8769 if (!klp_override) 8770 preempt_dynamic_enable(cond_resched); 8771 preempt_dynamic_disable(might_resched); 8772 preempt_dynamic_disable(preempt_schedule); 8773 preempt_dynamic_disable(preempt_schedule_notrace); 8774 preempt_dynamic_disable(irqentry_exit_cond_resched); 8775 if (mode != preempt_dynamic_mode) 8776 pr_info("Dynamic Preempt: none\n"); 8777 break; 8778 8779 case preempt_dynamic_voluntary: 8780 if (!klp_override) 8781 preempt_dynamic_enable(cond_resched); 8782 preempt_dynamic_enable(might_resched); 8783 preempt_dynamic_disable(preempt_schedule); 8784 preempt_dynamic_disable(preempt_schedule_notrace); 8785 preempt_dynamic_disable(irqentry_exit_cond_resched); 8786 if (mode != preempt_dynamic_mode) 8787 pr_info("Dynamic Preempt: voluntary\n"); 8788 break; 8789 8790 case preempt_dynamic_full: 8791 if (!klp_override) 8792 preempt_dynamic_disable(cond_resched); 8793 preempt_dynamic_disable(might_resched); 8794 preempt_dynamic_enable(preempt_schedule); 8795 preempt_dynamic_enable(preempt_schedule_notrace); 8796 preempt_dynamic_enable(irqentry_exit_cond_resched); 8797 if (mode != preempt_dynamic_mode) 8798 pr_info("Dynamic Preempt: full\n"); 8799 break; 8800 } 8801 8802 preempt_dynamic_mode = mode; 8803 } 8804 8805 void sched_dynamic_update(int mode) 8806 { 8807 mutex_lock(&sched_dynamic_mutex); 8808 __sched_dynamic_update(mode); 8809 mutex_unlock(&sched_dynamic_mutex); 8810 } 8811 8812 #ifdef CONFIG_HAVE_PREEMPT_DYNAMIC_CALL 8813 8814 static int klp_cond_resched(void) 8815 { 8816 __klp_sched_try_switch(); 8817 return __cond_resched(); 8818 } 8819 8820 void sched_dynamic_klp_enable(void) 8821 { 8822 mutex_lock(&sched_dynamic_mutex); 8823 8824 klp_override = true; 8825 static_call_update(cond_resched, klp_cond_resched); 8826 8827 mutex_unlock(&sched_dynamic_mutex); 8828 } 8829 8830 void sched_dynamic_klp_disable(void) 8831 { 8832 mutex_lock(&sched_dynamic_mutex); 8833 8834 klp_override = false; 8835 __sched_dynamic_update(preempt_dynamic_mode); 8836 8837 mutex_unlock(&sched_dynamic_mutex); 8838 } 8839 8840 #endif /* CONFIG_HAVE_PREEMPT_DYNAMIC_CALL */ 8841 8842 static int __init setup_preempt_mode(char *str) 8843 { 8844 int mode = sched_dynamic_mode(str); 8845 if (mode < 0) { 8846 pr_warn("Dynamic Preempt: unsupported mode: %s\n", str); 8847 return 0; 8848 } 8849 8850 sched_dynamic_update(mode); 8851 return 1; 8852 } 8853 __setup("preempt=", setup_preempt_mode); 8854 8855 static void __init preempt_dynamic_init(void) 8856 { 8857 if (preempt_dynamic_mode == preempt_dynamic_undefined) { 8858 if (IS_ENABLED(CONFIG_PREEMPT_NONE)) { 8859 sched_dynamic_update(preempt_dynamic_none); 8860 } else if (IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY)) { 8861 sched_dynamic_update(preempt_dynamic_voluntary); 8862 } else { 8863 /* Default static call setting, nothing to do */ 8864 WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT)); 8865 preempt_dynamic_mode = preempt_dynamic_full; 8866 pr_info("Dynamic Preempt: full\n"); 8867 } 8868 } 8869 } 8870 8871 #define PREEMPT_MODEL_ACCESSOR(mode) \ 8872 bool preempt_model_##mode(void) \ 8873 { \ 8874 WARN_ON_ONCE(preempt_dynamic_mode == preempt_dynamic_undefined); \ 8875 return preempt_dynamic_mode == preempt_dynamic_##mode; \ 8876 } \ 8877 EXPORT_SYMBOL_GPL(preempt_model_##mode) 8878 8879 PREEMPT_MODEL_ACCESSOR(none); 8880 PREEMPT_MODEL_ACCESSOR(voluntary); 8881 PREEMPT_MODEL_ACCESSOR(full); 8882 8883 #else /* !CONFIG_PREEMPT_DYNAMIC */ 8884 8885 static inline void preempt_dynamic_init(void) { } 8886 8887 #endif /* #ifdef CONFIG_PREEMPT_DYNAMIC */ 8888 8889 /** 8890 * yield - yield the current processor to other threads. 8891 * 8892 * Do not ever use this function, there's a 99% chance you're doing it wrong. 8893 * 8894 * The scheduler is at all times free to pick the calling task as the most 8895 * eligible task to run, if removing the yield() call from your code breaks 8896 * it, it's already broken. 8897 * 8898 * Typical broken usage is: 8899 * 8900 * while (!event) 8901 * yield(); 8902 * 8903 * where one assumes that yield() will let 'the other' process run that will 8904 * make event true. If the current task is a SCHED_FIFO task that will never 8905 * happen. Never use yield() as a progress guarantee!! 8906 * 8907 * If you want to use yield() to wait for something, use wait_event(). 8908 * If you want to use yield() to be 'nice' for others, use cond_resched(). 8909 * If you still want to use yield(), do not! 8910 */ 8911 void __sched yield(void) 8912 { 8913 set_current_state(TASK_RUNNING); 8914 do_sched_yield(); 8915 } 8916 EXPORT_SYMBOL(yield); 8917 8918 /** 8919 * yield_to - yield the current processor to another thread in 8920 * your thread group, or accelerate that thread toward the 8921 * processor it's on. 8922 * @p: target task 8923 * @preempt: whether task preemption is allowed or not 8924 * 8925 * It's the caller's job to ensure that the target task struct 8926 * can't go away on us before we can do any checks. 8927 * 8928 * Return: 8929 * true (>0) if we indeed boosted the target task. 8930 * false (0) if we failed to boost the target. 8931 * -ESRCH if there's no task to yield to. 8932 */ 8933 int __sched yield_to(struct task_struct *p, bool preempt) 8934 { 8935 struct task_struct *curr = current; 8936 struct rq *rq, *p_rq; 8937 unsigned long flags; 8938 int yielded = 0; 8939 8940 local_irq_save(flags); 8941 rq = this_rq(); 8942 8943 again: 8944 p_rq = task_rq(p); 8945 /* 8946 * If we're the only runnable task on the rq and target rq also 8947 * has only one task, there's absolutely no point in yielding. 8948 */ 8949 if (rq->nr_running == 1 && p_rq->nr_running == 1) { 8950 yielded = -ESRCH; 8951 goto out_irq; 8952 } 8953 8954 double_rq_lock(rq, p_rq); 8955 if (task_rq(p) != p_rq) { 8956 double_rq_unlock(rq, p_rq); 8957 goto again; 8958 } 8959 8960 if (!curr->sched_class->yield_to_task) 8961 goto out_unlock; 8962 8963 if (curr->sched_class != p->sched_class) 8964 goto out_unlock; 8965 8966 if (task_on_cpu(p_rq, p) || !task_is_running(p)) 8967 goto out_unlock; 8968 8969 yielded = curr->sched_class->yield_to_task(rq, p); 8970 if (yielded) { 8971 schedstat_inc(rq->yld_count); 8972 /* 8973 * Make p's CPU reschedule; pick_next_entity takes care of 8974 * fairness. 8975 */ 8976 if (preempt && rq != p_rq) 8977 resched_curr(p_rq); 8978 } 8979 8980 out_unlock: 8981 double_rq_unlock(rq, p_rq); 8982 out_irq: 8983 local_irq_restore(flags); 8984 8985 if (yielded > 0) 8986 schedule(); 8987 8988 return yielded; 8989 } 8990 EXPORT_SYMBOL_GPL(yield_to); 8991 8992 int io_schedule_prepare(void) 8993 { 8994 int old_iowait = current->in_iowait; 8995 8996 current->in_iowait = 1; 8997 blk_flush_plug(current->plug, true); 8998 return old_iowait; 8999 } 9000 9001 void io_schedule_finish(int token) 9002 { 9003 current->in_iowait = token; 9004 } 9005 9006 /* 9007 * This task is about to go to sleep on IO. Increment rq->nr_iowait so 9008 * that process accounting knows that this is a task in IO wait state. 9009 */ 9010 long __sched io_schedule_timeout(long timeout) 9011 { 9012 int token; 9013 long ret; 9014 9015 token = io_schedule_prepare(); 9016 ret = schedule_timeout(timeout); 9017 io_schedule_finish(token); 9018 9019 return ret; 9020 } 9021 EXPORT_SYMBOL(io_schedule_timeout); 9022 9023 void __sched io_schedule(void) 9024 { 9025 int token; 9026 9027 token = io_schedule_prepare(); 9028 schedule(); 9029 io_schedule_finish(token); 9030 } 9031 EXPORT_SYMBOL(io_schedule); 9032 9033 /** 9034 * sys_sched_get_priority_max - return maximum RT priority. 9035 * @policy: scheduling class. 9036 * 9037 * Return: On success, this syscall returns the maximum 9038 * rt_priority that can be used by a given scheduling class. 9039 * On failure, a negative error code is returned. 9040 */ 9041 SYSCALL_DEFINE1(sched_get_priority_max, int, policy) 9042 { 9043 int ret = -EINVAL; 9044 9045 switch (policy) { 9046 case SCHED_FIFO: 9047 case SCHED_RR: 9048 ret = MAX_RT_PRIO-1; 9049 break; 9050 case SCHED_DEADLINE: 9051 case SCHED_NORMAL: 9052 case SCHED_BATCH: 9053 case SCHED_IDLE: 9054 ret = 0; 9055 break; 9056 } 9057 return ret; 9058 } 9059 9060 /** 9061 * sys_sched_get_priority_min - return minimum RT priority. 9062 * @policy: scheduling class. 9063 * 9064 * Return: On success, this syscall returns the minimum 9065 * rt_priority that can be used by a given scheduling class. 9066 * On failure, a negative error code is returned. 9067 */ 9068 SYSCALL_DEFINE1(sched_get_priority_min, int, policy) 9069 { 9070 int ret = -EINVAL; 9071 9072 switch (policy) { 9073 case SCHED_FIFO: 9074 case SCHED_RR: 9075 ret = 1; 9076 break; 9077 case SCHED_DEADLINE: 9078 case SCHED_NORMAL: 9079 case SCHED_BATCH: 9080 case SCHED_IDLE: 9081 ret = 0; 9082 } 9083 return ret; 9084 } 9085 9086 static int sched_rr_get_interval(pid_t pid, struct timespec64 *t) 9087 { 9088 struct task_struct *p; 9089 unsigned int time_slice; 9090 struct rq_flags rf; 9091 struct rq *rq; 9092 int retval; 9093 9094 if (pid < 0) 9095 return -EINVAL; 9096 9097 retval = -ESRCH; 9098 rcu_read_lock(); 9099 p = find_process_by_pid(pid); 9100 if (!p) 9101 goto out_unlock; 9102 9103 retval = security_task_getscheduler(p); 9104 if (retval) 9105 goto out_unlock; 9106 9107 rq = task_rq_lock(p, &rf); 9108 time_slice = 0; 9109 if (p->sched_class->get_rr_interval) 9110 time_slice = p->sched_class->get_rr_interval(rq, p); 9111 task_rq_unlock(rq, p, &rf); 9112 9113 rcu_read_unlock(); 9114 jiffies_to_timespec64(time_slice, t); 9115 return 0; 9116 9117 out_unlock: 9118 rcu_read_unlock(); 9119 return retval; 9120 } 9121 9122 /** 9123 * sys_sched_rr_get_interval - return the default timeslice of a process. 9124 * @pid: pid of the process. 9125 * @interval: userspace pointer to the timeslice value. 9126 * 9127 * this syscall writes the default timeslice value of a given process 9128 * into the user-space timespec buffer. A value of '0' means infinity. 9129 * 9130 * Return: On success, 0 and the timeslice is in @interval. Otherwise, 9131 * an error code. 9132 */ 9133 SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, 9134 struct __kernel_timespec __user *, interval) 9135 { 9136 struct timespec64 t; 9137 int retval = sched_rr_get_interval(pid, &t); 9138 9139 if (retval == 0) 9140 retval = put_timespec64(&t, interval); 9141 9142 return retval; 9143 } 9144 9145 #ifdef CONFIG_COMPAT_32BIT_TIME 9146 SYSCALL_DEFINE2(sched_rr_get_interval_time32, pid_t, pid, 9147 struct old_timespec32 __user *, interval) 9148 { 9149 struct timespec64 t; 9150 int retval = sched_rr_get_interval(pid, &t); 9151 9152 if (retval == 0) 9153 retval = put_old_timespec32(&t, interval); 9154 return retval; 9155 } 9156 #endif 9157 9158 void sched_show_task(struct task_struct *p) 9159 { 9160 unsigned long free = 0; 9161 int ppid; 9162 9163 if (!try_get_task_stack(p)) 9164 return; 9165 9166 pr_info("task:%-15.15s state:%c", p->comm, task_state_to_char(p)); 9167 9168 if (task_is_running(p)) 9169 pr_cont(" running task "); 9170 #ifdef CONFIG_DEBUG_STACK_USAGE 9171 free = stack_not_used(p); 9172 #endif 9173 ppid = 0; 9174 rcu_read_lock(); 9175 if (pid_alive(p)) 9176 ppid = task_pid_nr(rcu_dereference(p->real_parent)); 9177 rcu_read_unlock(); 9178 pr_cont(" stack:%-5lu pid:%-5d ppid:%-6d flags:0x%08lx\n", 9179 free, task_pid_nr(p), ppid, 9180 read_task_thread_flags(p)); 9181 9182 print_worker_info(KERN_INFO, p); 9183 print_stop_info(KERN_INFO, p); 9184 show_stack(p, NULL, KERN_INFO); 9185 put_task_stack(p); 9186 } 9187 EXPORT_SYMBOL_GPL(sched_show_task); 9188 9189 static inline bool 9190 state_filter_match(unsigned long state_filter, struct task_struct *p) 9191 { 9192 unsigned int state = READ_ONCE(p->__state); 9193 9194 /* no filter, everything matches */ 9195 if (!state_filter) 9196 return true; 9197 9198 /* filter, but doesn't match */ 9199 if (!(state & state_filter)) 9200 return false; 9201 9202 /* 9203 * When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows 9204 * TASK_KILLABLE). 9205 */ 9206 if (state_filter == TASK_UNINTERRUPTIBLE && (state & TASK_NOLOAD)) 9207 return false; 9208 9209 return true; 9210 } 9211 9212 9213 void show_state_filter(unsigned int state_filter) 9214 { 9215 struct task_struct *g, *p; 9216 9217 rcu_read_lock(); 9218 for_each_process_thread(g, p) { 9219 /* 9220 * reset the NMI-timeout, listing all files on a slow 9221 * console might take a lot of time: 9222 * Also, reset softlockup watchdogs on all CPUs, because 9223 * another CPU might be blocked waiting for us to process 9224 * an IPI. 9225 */ 9226 touch_nmi_watchdog(); 9227 touch_all_softlockup_watchdogs(); 9228 if (state_filter_match(state_filter, p)) 9229 sched_show_task(p); 9230 } 9231 9232 #ifdef CONFIG_SCHED_DEBUG 9233 if (!state_filter) 9234 sysrq_sched_debug_show(); 9235 #endif 9236 rcu_read_unlock(); 9237 /* 9238 * Only show locks if all tasks are dumped: 9239 */ 9240 if (!state_filter) 9241 debug_show_all_locks(); 9242 } 9243 9244 /** 9245 * init_idle - set up an idle thread for a given CPU 9246 * @idle: task in question 9247 * @cpu: CPU the idle task belongs to 9248 * 9249 * NOTE: this function does not set the idle thread's NEED_RESCHED 9250 * flag, to make booting more robust. 9251 */ 9252 void __init init_idle(struct task_struct *idle, int cpu) 9253 { 9254 #ifdef CONFIG_SMP 9255 struct affinity_context ac = (struct affinity_context) { 9256 .new_mask = cpumask_of(cpu), 9257 .flags = 0, 9258 }; 9259 #endif 9260 struct rq *rq = cpu_rq(cpu); 9261 unsigned long flags; 9262 9263 __sched_fork(0, idle); 9264 9265 raw_spin_lock_irqsave(&idle->pi_lock, flags); 9266 raw_spin_rq_lock(rq); 9267 9268 idle->__state = TASK_RUNNING; 9269 idle->se.exec_start = sched_clock(); 9270 /* 9271 * PF_KTHREAD should already be set at this point; regardless, make it 9272 * look like a proper per-CPU kthread. 9273 */ 9274 idle->flags |= PF_KTHREAD | PF_NO_SETAFFINITY; 9275 kthread_set_per_cpu(idle, cpu); 9276 9277 #ifdef CONFIG_SMP 9278 /* 9279 * It's possible that init_idle() gets called multiple times on a task, 9280 * in that case do_set_cpus_allowed() will not do the right thing. 9281 * 9282 * And since this is boot we can forgo the serialization. 9283 */ 9284 set_cpus_allowed_common(idle, &ac); 9285 #endif 9286 /* 9287 * We're having a chicken and egg problem, even though we are 9288 * holding rq->lock, the CPU isn't yet set to this CPU so the 9289 * lockdep check in task_group() will fail. 9290 * 9291 * Similar case to sched_fork(). / Alternatively we could 9292 * use task_rq_lock() here and obtain the other rq->lock. 9293 * 9294 * Silence PROVE_RCU 9295 */ 9296 rcu_read_lock(); 9297 __set_task_cpu(idle, cpu); 9298 rcu_read_unlock(); 9299 9300 rq->idle = idle; 9301 rcu_assign_pointer(rq->curr, idle); 9302 idle->on_rq = TASK_ON_RQ_QUEUED; 9303 #ifdef CONFIG_SMP 9304 idle->on_cpu = 1; 9305 #endif 9306 raw_spin_rq_unlock(rq); 9307 raw_spin_unlock_irqrestore(&idle->pi_lock, flags); 9308 9309 /* Set the preempt count _outside_ the spinlocks! */ 9310 init_idle_preempt_count(idle, cpu); 9311 9312 /* 9313 * The idle tasks have their own, simple scheduling class: 9314 */ 9315 idle->sched_class = &idle_sched_class; 9316 ftrace_graph_init_idle_task(idle, cpu); 9317 vtime_init_idle(idle, cpu); 9318 #ifdef CONFIG_SMP 9319 sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu); 9320 #endif 9321 } 9322 9323 #ifdef CONFIG_SMP 9324 9325 int cpuset_cpumask_can_shrink(const struct cpumask *cur, 9326 const struct cpumask *trial) 9327 { 9328 int ret = 1; 9329 9330 if (cpumask_empty(cur)) 9331 return ret; 9332 9333 ret = dl_cpuset_cpumask_can_shrink(cur, trial); 9334 9335 return ret; 9336 } 9337 9338 int task_can_attach(struct task_struct *p) 9339 { 9340 int ret = 0; 9341 9342 /* 9343 * Kthreads which disallow setaffinity shouldn't be moved 9344 * to a new cpuset; we don't want to change their CPU 9345 * affinity and isolating such threads by their set of 9346 * allowed nodes is unnecessary. Thus, cpusets are not 9347 * applicable for such threads. This prevents checking for 9348 * success of set_cpus_allowed_ptr() on all attached tasks 9349 * before cpus_mask may be changed. 9350 */ 9351 if (p->flags & PF_NO_SETAFFINITY) 9352 ret = -EINVAL; 9353 9354 return ret; 9355 } 9356 9357 bool sched_smp_initialized __read_mostly; 9358 9359 #ifdef CONFIG_NUMA_BALANCING 9360 /* Migrate current task p to target_cpu */ 9361 int migrate_task_to(struct task_struct *p, int target_cpu) 9362 { 9363 struct migration_arg arg = { p, target_cpu }; 9364 int curr_cpu = task_cpu(p); 9365 9366 if (curr_cpu == target_cpu) 9367 return 0; 9368 9369 if (!cpumask_test_cpu(target_cpu, p->cpus_ptr)) 9370 return -EINVAL; 9371 9372 /* TODO: This is not properly updating schedstats */ 9373 9374 trace_sched_move_numa(p, curr_cpu, target_cpu); 9375 return stop_one_cpu(curr_cpu, migration_cpu_stop, &arg); 9376 } 9377 9378 /* 9379 * Requeue a task on a given node and accurately track the number of NUMA 9380 * tasks on the runqueues 9381 */ 9382 void sched_setnuma(struct task_struct *p, int nid) 9383 { 9384 bool queued, running; 9385 struct rq_flags rf; 9386 struct rq *rq; 9387 9388 rq = task_rq_lock(p, &rf); 9389 queued = task_on_rq_queued(p); 9390 running = task_current(rq, p); 9391 9392 if (queued) 9393 dequeue_task(rq, p, DEQUEUE_SAVE); 9394 if (running) 9395 put_prev_task(rq, p); 9396 9397 p->numa_preferred_nid = nid; 9398 9399 if (queued) 9400 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); 9401 if (running) 9402 set_next_task(rq, p); 9403 task_rq_unlock(rq, p, &rf); 9404 } 9405 #endif /* CONFIG_NUMA_BALANCING */ 9406 9407 #ifdef CONFIG_HOTPLUG_CPU 9408 /* 9409 * Ensure that the idle task is using init_mm right before its CPU goes 9410 * offline. 9411 */ 9412 void idle_task_exit(void) 9413 { 9414 struct mm_struct *mm = current->active_mm; 9415 9416 BUG_ON(cpu_online(smp_processor_id())); 9417 BUG_ON(current != this_rq()->idle); 9418 9419 if (mm != &init_mm) { 9420 switch_mm(mm, &init_mm, current); 9421 finish_arch_post_lock_switch(); 9422 } 9423 9424 /* finish_cpu(), as ran on the BP, will clean up the active_mm state */ 9425 } 9426 9427 static int __balance_push_cpu_stop(void *arg) 9428 { 9429 struct task_struct *p = arg; 9430 struct rq *rq = this_rq(); 9431 struct rq_flags rf; 9432 int cpu; 9433 9434 raw_spin_lock_irq(&p->pi_lock); 9435 rq_lock(rq, &rf); 9436 9437 update_rq_clock(rq); 9438 9439 if (task_rq(p) == rq && task_on_rq_queued(p)) { 9440 cpu = select_fallback_rq(rq->cpu, p); 9441 rq = __migrate_task(rq, &rf, p, cpu); 9442 } 9443 9444 rq_unlock(rq, &rf); 9445 raw_spin_unlock_irq(&p->pi_lock); 9446 9447 put_task_struct(p); 9448 9449 return 0; 9450 } 9451 9452 static DEFINE_PER_CPU(struct cpu_stop_work, push_work); 9453 9454 /* 9455 * Ensure we only run per-cpu kthreads once the CPU goes !active. 9456 * 9457 * This is enabled below SCHED_AP_ACTIVE; when !cpu_active(), but only 9458 * effective when the hotplug motion is down. 9459 */ 9460 static void balance_push(struct rq *rq) 9461 { 9462 struct task_struct *push_task = rq->curr; 9463 9464 lockdep_assert_rq_held(rq); 9465 9466 /* 9467 * Ensure the thing is persistent until balance_push_set(.on = false); 9468 */ 9469 rq->balance_callback = &balance_push_callback; 9470 9471 /* 9472 * Only active while going offline and when invoked on the outgoing 9473 * CPU. 9474 */ 9475 if (!cpu_dying(rq->cpu) || rq != this_rq()) 9476 return; 9477 9478 /* 9479 * Both the cpu-hotplug and stop task are in this case and are 9480 * required to complete the hotplug process. 9481 */ 9482 if (kthread_is_per_cpu(push_task) || 9483 is_migration_disabled(push_task)) { 9484 9485 /* 9486 * If this is the idle task on the outgoing CPU try to wake 9487 * up the hotplug control thread which might wait for the 9488 * last task to vanish. The rcuwait_active() check is 9489 * accurate here because the waiter is pinned on this CPU 9490 * and can't obviously be running in parallel. 9491 * 9492 * On RT kernels this also has to check whether there are 9493 * pinned and scheduled out tasks on the runqueue. They 9494 * need to leave the migrate disabled section first. 9495 */ 9496 if (!rq->nr_running && !rq_has_pinned_tasks(rq) && 9497 rcuwait_active(&rq->hotplug_wait)) { 9498 raw_spin_rq_unlock(rq); 9499 rcuwait_wake_up(&rq->hotplug_wait); 9500 raw_spin_rq_lock(rq); 9501 } 9502 return; 9503 } 9504 9505 get_task_struct(push_task); 9506 /* 9507 * Temporarily drop rq->lock such that we can wake-up the stop task. 9508 * Both preemption and IRQs are still disabled. 9509 */ 9510 preempt_disable(); 9511 raw_spin_rq_unlock(rq); 9512 stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task, 9513 this_cpu_ptr(&push_work)); 9514 preempt_enable(); 9515 /* 9516 * At this point need_resched() is true and we'll take the loop in 9517 * schedule(). The next pick is obviously going to be the stop task 9518 * which kthread_is_per_cpu() and will push this task away. 9519 */ 9520 raw_spin_rq_lock(rq); 9521 } 9522 9523 static void balance_push_set(int cpu, bool on) 9524 { 9525 struct rq *rq = cpu_rq(cpu); 9526 struct rq_flags rf; 9527 9528 rq_lock_irqsave(rq, &rf); 9529 if (on) { 9530 WARN_ON_ONCE(rq->balance_callback); 9531 rq->balance_callback = &balance_push_callback; 9532 } else if (rq->balance_callback == &balance_push_callback) { 9533 rq->balance_callback = NULL; 9534 } 9535 rq_unlock_irqrestore(rq, &rf); 9536 } 9537 9538 /* 9539 * Invoked from a CPUs hotplug control thread after the CPU has been marked 9540 * inactive. All tasks which are not per CPU kernel threads are either 9541 * pushed off this CPU now via balance_push() or placed on a different CPU 9542 * during wakeup. Wait until the CPU is quiescent. 9543 */ 9544 static void balance_hotplug_wait(void) 9545 { 9546 struct rq *rq = this_rq(); 9547 9548 rcuwait_wait_event(&rq->hotplug_wait, 9549 rq->nr_running == 1 && !rq_has_pinned_tasks(rq), 9550 TASK_UNINTERRUPTIBLE); 9551 } 9552 9553 #else 9554 9555 static inline void balance_push(struct rq *rq) 9556 { 9557 } 9558 9559 static inline void balance_push_set(int cpu, bool on) 9560 { 9561 } 9562 9563 static inline void balance_hotplug_wait(void) 9564 { 9565 } 9566 9567 #endif /* CONFIG_HOTPLUG_CPU */ 9568 9569 void set_rq_online(struct rq *rq) 9570 { 9571 if (!rq->online) { 9572 const struct sched_class *class; 9573 9574 cpumask_set_cpu(rq->cpu, rq->rd->online); 9575 rq->online = 1; 9576 9577 for_each_class(class) { 9578 if (class->rq_online) 9579 class->rq_online(rq); 9580 } 9581 } 9582 } 9583 9584 void set_rq_offline(struct rq *rq) 9585 { 9586 if (rq->online) { 9587 const struct sched_class *class; 9588 9589 update_rq_clock(rq); 9590 for_each_class(class) { 9591 if (class->rq_offline) 9592 class->rq_offline(rq); 9593 } 9594 9595 cpumask_clear_cpu(rq->cpu, rq->rd->online); 9596 rq->online = 0; 9597 } 9598 } 9599 9600 static inline void sched_set_rq_online(struct rq *rq, int cpu) 9601 { 9602 struct rq_flags rf; 9603 9604 rq_lock_irqsave(rq, &rf); 9605 if (rq->rd) { 9606 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 9607 set_rq_online(rq); 9608 } 9609 rq_unlock_irqrestore(rq, &rf); 9610 } 9611 9612 static inline void sched_set_rq_offline(struct rq *rq, int cpu) 9613 { 9614 struct rq_flags rf; 9615 9616 rq_lock_irqsave(rq, &rf); 9617 if (rq->rd) { 9618 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 9619 set_rq_offline(rq); 9620 } 9621 rq_unlock_irqrestore(rq, &rf); 9622 } 9623 9624 /* 9625 * used to mark begin/end of suspend/resume: 9626 */ 9627 static int num_cpus_frozen; 9628 9629 /* 9630 * Update cpusets according to cpu_active mask. If cpusets are 9631 * disabled, cpuset_update_active_cpus() becomes a simple wrapper 9632 * around partition_sched_domains(). 9633 * 9634 * If we come here as part of a suspend/resume, don't touch cpusets because we 9635 * want to restore it back to its original state upon resume anyway. 9636 */ 9637 static void cpuset_cpu_active(void) 9638 { 9639 if (cpuhp_tasks_frozen) { 9640 /* 9641 * num_cpus_frozen tracks how many CPUs are involved in suspend 9642 * resume sequence. As long as this is not the last online 9643 * operation in the resume sequence, just build a single sched 9644 * domain, ignoring cpusets. 9645 */ 9646 partition_sched_domains(1, NULL, NULL); 9647 if (--num_cpus_frozen) 9648 return; 9649 /* 9650 * This is the last CPU online operation. So fall through and 9651 * restore the original sched domains by considering the 9652 * cpuset configurations. 9653 */ 9654 cpuset_force_rebuild(); 9655 } 9656 cpuset_update_active_cpus(); 9657 } 9658 9659 static int cpuset_cpu_inactive(unsigned int cpu) 9660 { 9661 if (!cpuhp_tasks_frozen) { 9662 int ret = dl_bw_check_overflow(cpu); 9663 9664 if (ret) 9665 return ret; 9666 cpuset_update_active_cpus(); 9667 } else { 9668 num_cpus_frozen++; 9669 partition_sched_domains(1, NULL, NULL); 9670 } 9671 return 0; 9672 } 9673 9674 static inline void sched_smt_present_inc(int cpu) 9675 { 9676 #ifdef CONFIG_SCHED_SMT 9677 if (cpumask_weight(cpu_smt_mask(cpu)) == 2) 9678 static_branch_inc_cpuslocked(&sched_smt_present); 9679 #endif 9680 } 9681 9682 static inline void sched_smt_present_dec(int cpu) 9683 { 9684 #ifdef CONFIG_SCHED_SMT 9685 if (cpumask_weight(cpu_smt_mask(cpu)) == 2) 9686 static_branch_dec_cpuslocked(&sched_smt_present); 9687 #endif 9688 } 9689 9690 int sched_cpu_activate(unsigned int cpu) 9691 { 9692 struct rq *rq = cpu_rq(cpu); 9693 9694 /* 9695 * Clear the balance_push callback and prepare to schedule 9696 * regular tasks. 9697 */ 9698 balance_push_set(cpu, false); 9699 9700 /* 9701 * When going up, increment the number of cores with SMT present. 9702 */ 9703 sched_smt_present_inc(cpu); 9704 set_cpu_active(cpu, true); 9705 9706 if (sched_smp_initialized) { 9707 sched_update_numa(cpu, true); 9708 sched_domains_numa_masks_set(cpu); 9709 cpuset_cpu_active(); 9710 } 9711 9712 /* 9713 * Put the rq online, if not already. This happens: 9714 * 9715 * 1) In the early boot process, because we build the real domains 9716 * after all CPUs have been brought up. 9717 * 9718 * 2) At runtime, if cpuset_cpu_active() fails to rebuild the 9719 * domains. 9720 */ 9721 sched_set_rq_online(rq, cpu); 9722 9723 return 0; 9724 } 9725 9726 int sched_cpu_deactivate(unsigned int cpu) 9727 { 9728 struct rq *rq = cpu_rq(cpu); 9729 int ret; 9730 9731 /* 9732 * Remove CPU from nohz.idle_cpus_mask to prevent participating in 9733 * load balancing when not active 9734 */ 9735 nohz_balance_exit_idle(rq); 9736 9737 set_cpu_active(cpu, false); 9738 9739 /* 9740 * From this point forward, this CPU will refuse to run any task that 9741 * is not: migrate_disable() or KTHREAD_IS_PER_CPU, and will actively 9742 * push those tasks away until this gets cleared, see 9743 * sched_cpu_dying(). 9744 */ 9745 balance_push_set(cpu, true); 9746 9747 /* 9748 * We've cleared cpu_active_mask / set balance_push, wait for all 9749 * preempt-disabled and RCU users of this state to go away such that 9750 * all new such users will observe it. 9751 * 9752 * Specifically, we rely on ttwu to no longer target this CPU, see 9753 * ttwu_queue_cond() and is_cpu_allowed(). 9754 * 9755 * Do sync before park smpboot threads to take care the rcu boost case. 9756 */ 9757 synchronize_rcu(); 9758 9759 sched_set_rq_offline(rq, cpu); 9760 9761 /* 9762 * When going down, decrement the number of cores with SMT present. 9763 */ 9764 sched_smt_present_dec(cpu); 9765 9766 #ifdef CONFIG_SCHED_SMT 9767 sched_core_cpu_deactivate(cpu); 9768 #endif 9769 9770 if (!sched_smp_initialized) 9771 return 0; 9772 9773 sched_update_numa(cpu, false); 9774 ret = cpuset_cpu_inactive(cpu); 9775 if (ret) { 9776 sched_smt_present_inc(cpu); 9777 sched_set_rq_online(rq, cpu); 9778 balance_push_set(cpu, false); 9779 set_cpu_active(cpu, true); 9780 sched_update_numa(cpu, true); 9781 return ret; 9782 } 9783 sched_domains_numa_masks_clear(cpu); 9784 return 0; 9785 } 9786 9787 static void sched_rq_cpu_starting(unsigned int cpu) 9788 { 9789 struct rq *rq = cpu_rq(cpu); 9790 9791 rq->calc_load_update = calc_load_update; 9792 update_max_interval(); 9793 } 9794 9795 int sched_cpu_starting(unsigned int cpu) 9796 { 9797 sched_core_cpu_starting(cpu); 9798 sched_rq_cpu_starting(cpu); 9799 sched_tick_start(cpu); 9800 return 0; 9801 } 9802 9803 #ifdef CONFIG_HOTPLUG_CPU 9804 9805 /* 9806 * Invoked immediately before the stopper thread is invoked to bring the 9807 * CPU down completely. At this point all per CPU kthreads except the 9808 * hotplug thread (current) and the stopper thread (inactive) have been 9809 * either parked or have been unbound from the outgoing CPU. Ensure that 9810 * any of those which might be on the way out are gone. 9811 * 9812 * If after this point a bound task is being woken on this CPU then the 9813 * responsible hotplug callback has failed to do it's job. 9814 * sched_cpu_dying() will catch it with the appropriate fireworks. 9815 */ 9816 int sched_cpu_wait_empty(unsigned int cpu) 9817 { 9818 balance_hotplug_wait(); 9819 return 0; 9820 } 9821 9822 /* 9823 * Since this CPU is going 'away' for a while, fold any nr_active delta we 9824 * might have. Called from the CPU stopper task after ensuring that the 9825 * stopper is the last running task on the CPU, so nr_active count is 9826 * stable. We need to take the teardown thread which is calling this into 9827 * account, so we hand in adjust = 1 to the load calculation. 9828 * 9829 * Also see the comment "Global load-average calculations". 9830 */ 9831 static void calc_load_migrate(struct rq *rq) 9832 { 9833 long delta = calc_load_fold_active(rq, 1); 9834 9835 if (delta) 9836 atomic_long_add(delta, &calc_load_tasks); 9837 } 9838 9839 static void dump_rq_tasks(struct rq *rq, const char *loglvl) 9840 { 9841 struct task_struct *g, *p; 9842 int cpu = cpu_of(rq); 9843 9844 lockdep_assert_rq_held(rq); 9845 9846 printk("%sCPU%d enqueued tasks (%u total):\n", loglvl, cpu, rq->nr_running); 9847 for_each_process_thread(g, p) { 9848 if (task_cpu(p) != cpu) 9849 continue; 9850 9851 if (!task_on_rq_queued(p)) 9852 continue; 9853 9854 printk("%s\tpid: %d, name: %s\n", loglvl, p->pid, p->comm); 9855 } 9856 } 9857 9858 int sched_cpu_dying(unsigned int cpu) 9859 { 9860 struct rq *rq = cpu_rq(cpu); 9861 struct rq_flags rf; 9862 9863 /* Handle pending wakeups and then migrate everything off */ 9864 sched_tick_stop(cpu); 9865 9866 rq_lock_irqsave(rq, &rf); 9867 if (rq->nr_running != 1 || rq_has_pinned_tasks(rq)) { 9868 WARN(true, "Dying CPU not properly vacated!"); 9869 dump_rq_tasks(rq, KERN_WARNING); 9870 } 9871 rq_unlock_irqrestore(rq, &rf); 9872 9873 calc_load_migrate(rq); 9874 update_max_interval(); 9875 hrtick_clear(rq); 9876 sched_core_cpu_dying(cpu); 9877 return 0; 9878 } 9879 #endif 9880 9881 void __init sched_init_smp(void) 9882 { 9883 sched_init_numa(NUMA_NO_NODE); 9884 9885 /* 9886 * There's no userspace yet to cause hotplug operations; hence all the 9887 * CPU masks are stable and all blatant races in the below code cannot 9888 * happen. 9889 */ 9890 mutex_lock(&sched_domains_mutex); 9891 sched_init_domains(cpu_active_mask); 9892 mutex_unlock(&sched_domains_mutex); 9893 9894 /* Move init over to a non-isolated CPU */ 9895 if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_TYPE_DOMAIN)) < 0) 9896 BUG(); 9897 current->flags &= ~PF_NO_SETAFFINITY; 9898 sched_init_granularity(); 9899 9900 init_sched_rt_class(); 9901 init_sched_dl_class(); 9902 9903 sched_smp_initialized = true; 9904 } 9905 9906 static int __init migration_init(void) 9907 { 9908 sched_cpu_starting(smp_processor_id()); 9909 return 0; 9910 } 9911 early_initcall(migration_init); 9912 9913 #else 9914 void __init sched_init_smp(void) 9915 { 9916 sched_init_granularity(); 9917 } 9918 #endif /* CONFIG_SMP */ 9919 9920 int in_sched_functions(unsigned long addr) 9921 { 9922 return in_lock_functions(addr) || 9923 (addr >= (unsigned long)__sched_text_start 9924 && addr < (unsigned long)__sched_text_end); 9925 } 9926 9927 #ifdef CONFIG_CGROUP_SCHED 9928 /* 9929 * Default task group. 9930 * Every task in system belongs to this group at bootup. 9931 */ 9932 struct task_group root_task_group; 9933 LIST_HEAD(task_groups); 9934 9935 /* Cacheline aligned slab cache for task_group */ 9936 static struct kmem_cache *task_group_cache __read_mostly; 9937 #endif 9938 9939 void __init sched_init(void) 9940 { 9941 unsigned long ptr = 0; 9942 int i; 9943 9944 /* Make sure the linker didn't screw up */ 9945 BUG_ON(&idle_sched_class != &fair_sched_class + 1 || 9946 &fair_sched_class != &rt_sched_class + 1 || 9947 &rt_sched_class != &dl_sched_class + 1); 9948 #ifdef CONFIG_SMP 9949 BUG_ON(&dl_sched_class != &stop_sched_class + 1); 9950 #endif 9951 9952 wait_bit_init(); 9953 9954 #ifdef CONFIG_FAIR_GROUP_SCHED 9955 ptr += 2 * nr_cpu_ids * sizeof(void **); 9956 #endif 9957 #ifdef CONFIG_RT_GROUP_SCHED 9958 ptr += 2 * nr_cpu_ids * sizeof(void **); 9959 #endif 9960 if (ptr) { 9961 ptr = (unsigned long)kzalloc(ptr, GFP_NOWAIT); 9962 9963 #ifdef CONFIG_FAIR_GROUP_SCHED 9964 root_task_group.se = (struct sched_entity **)ptr; 9965 ptr += nr_cpu_ids * sizeof(void **); 9966 9967 root_task_group.cfs_rq = (struct cfs_rq **)ptr; 9968 ptr += nr_cpu_ids * sizeof(void **); 9969 9970 root_task_group.shares = ROOT_TASK_GROUP_LOAD; 9971 init_cfs_bandwidth(&root_task_group.cfs_bandwidth, NULL); 9972 #endif /* CONFIG_FAIR_GROUP_SCHED */ 9973 #ifdef CONFIG_RT_GROUP_SCHED 9974 root_task_group.rt_se = (struct sched_rt_entity **)ptr; 9975 ptr += nr_cpu_ids * sizeof(void **); 9976 9977 root_task_group.rt_rq = (struct rt_rq **)ptr; 9978 ptr += nr_cpu_ids * sizeof(void **); 9979 9980 #endif /* CONFIG_RT_GROUP_SCHED */ 9981 } 9982 9983 init_rt_bandwidth(&def_rt_bandwidth, global_rt_period(), global_rt_runtime()); 9984 9985 #ifdef CONFIG_SMP 9986 init_defrootdomain(); 9987 #endif 9988 9989 #ifdef CONFIG_RT_GROUP_SCHED 9990 init_rt_bandwidth(&root_task_group.rt_bandwidth, 9991 global_rt_period(), global_rt_runtime()); 9992 #endif /* CONFIG_RT_GROUP_SCHED */ 9993 9994 #ifdef CONFIG_CGROUP_SCHED 9995 task_group_cache = KMEM_CACHE(task_group, 0); 9996 9997 list_add(&root_task_group.list, &task_groups); 9998 INIT_LIST_HEAD(&root_task_group.children); 9999 INIT_LIST_HEAD(&root_task_group.siblings); 10000 autogroup_init(&init_task); 10001 #endif /* CONFIG_CGROUP_SCHED */ 10002 10003 for_each_possible_cpu(i) { 10004 struct rq *rq; 10005 10006 rq = cpu_rq(i); 10007 raw_spin_lock_init(&rq->__lock); 10008 rq->nr_running = 0; 10009 rq->calc_load_active = 0; 10010 rq->calc_load_update = jiffies + LOAD_FREQ; 10011 init_cfs_rq(&rq->cfs); 10012 init_rt_rq(&rq->rt); 10013 init_dl_rq(&rq->dl); 10014 #ifdef CONFIG_FAIR_GROUP_SCHED 10015 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list); 10016 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list; 10017 /* 10018 * How much CPU bandwidth does root_task_group get? 10019 * 10020 * In case of task-groups formed thr' the cgroup filesystem, it 10021 * gets 100% of the CPU resources in the system. This overall 10022 * system CPU resource is divided among the tasks of 10023 * root_task_group and its child task-groups in a fair manner, 10024 * based on each entity's (task or task-group's) weight 10025 * (se->load.weight). 10026 * 10027 * In other words, if root_task_group has 10 tasks of weight 10028 * 1024) and two child groups A0 and A1 (of weight 1024 each), 10029 * then A0's share of the CPU resource is: 10030 * 10031 * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33% 10032 * 10033 * We achieve this by letting root_task_group's tasks sit 10034 * directly in rq->cfs (i.e root_task_group->se[] = NULL). 10035 */ 10036 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL); 10037 #endif /* CONFIG_FAIR_GROUP_SCHED */ 10038 10039 rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime; 10040 #ifdef CONFIG_RT_GROUP_SCHED 10041 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL); 10042 #endif 10043 #ifdef CONFIG_SMP 10044 rq->sd = NULL; 10045 rq->rd = NULL; 10046 rq->cpu_capacity = rq->cpu_capacity_orig = SCHED_CAPACITY_SCALE; 10047 rq->balance_callback = &balance_push_callback; 10048 rq->active_balance = 0; 10049 rq->next_balance = jiffies; 10050 rq->push_cpu = 0; 10051 rq->cpu = i; 10052 rq->online = 0; 10053 rq->idle_stamp = 0; 10054 rq->avg_idle = 2*sysctl_sched_migration_cost; 10055 rq->wake_stamp = jiffies; 10056 rq->wake_avg_idle = rq->avg_idle; 10057 rq->max_idle_balance_cost = sysctl_sched_migration_cost; 10058 10059 INIT_LIST_HEAD(&rq->cfs_tasks); 10060 10061 rq_attach_root(rq, &def_root_domain); 10062 #ifdef CONFIG_NO_HZ_COMMON 10063 rq->last_blocked_load_update_tick = jiffies; 10064 atomic_set(&rq->nohz_flags, 0); 10065 10066 INIT_CSD(&rq->nohz_csd, nohz_csd_func, rq); 10067 #endif 10068 #ifdef CONFIG_HOTPLUG_CPU 10069 rcuwait_init(&rq->hotplug_wait); 10070 #endif 10071 #endif /* CONFIG_SMP */ 10072 hrtick_rq_init(rq); 10073 atomic_set(&rq->nr_iowait, 0); 10074 10075 #ifdef CONFIG_SCHED_CORE 10076 rq->core = rq; 10077 rq->core_pick = NULL; 10078 rq->core_enabled = 0; 10079 rq->core_tree = RB_ROOT; 10080 rq->core_forceidle_count = 0; 10081 rq->core_forceidle_occupation = 0; 10082 rq->core_forceidle_start = 0; 10083 10084 rq->core_cookie = 0UL; 10085 #endif 10086 zalloc_cpumask_var_node(&rq->scratch_mask, GFP_KERNEL, cpu_to_node(i)); 10087 } 10088 10089 set_load_weight(&init_task, false); 10090 10091 /* 10092 * The boot idle thread does lazy MMU switching as well: 10093 */ 10094 mmgrab_lazy_tlb(&init_mm); 10095 enter_lazy_tlb(&init_mm, current); 10096 10097 /* 10098 * The idle task doesn't need the kthread struct to function, but it 10099 * is dressed up as a per-CPU kthread and thus needs to play the part 10100 * if we want to avoid special-casing it in code that deals with per-CPU 10101 * kthreads. 10102 */ 10103 WARN_ON(!set_kthread_struct(current)); 10104 10105 /* 10106 * Make us the idle thread. Technically, schedule() should not be 10107 * called from this thread, however somewhere below it might be, 10108 * but because we are the idle thread, we just pick up running again 10109 * when this runqueue becomes "idle". 10110 */ 10111 init_idle(current, smp_processor_id()); 10112 10113 calc_load_update = jiffies + LOAD_FREQ; 10114 10115 #ifdef CONFIG_SMP 10116 idle_thread_set_boot_cpu(); 10117 balance_push_set(smp_processor_id(), false); 10118 #endif 10119 init_sched_fair_class(); 10120 10121 psi_init(); 10122 10123 init_uclamp(); 10124 10125 preempt_dynamic_init(); 10126 10127 scheduler_running = 1; 10128 } 10129 10130 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP 10131 10132 void __might_sleep(const char *file, int line) 10133 { 10134 unsigned int state = get_current_state(); 10135 /* 10136 * Blocking primitives will set (and therefore destroy) current->state, 10137 * since we will exit with TASK_RUNNING make sure we enter with it, 10138 * otherwise we will destroy state. 10139 */ 10140 WARN_ONCE(state != TASK_RUNNING && current->task_state_change, 10141 "do not call blocking ops when !TASK_RUNNING; " 10142 "state=%x set at [<%p>] %pS\n", state, 10143 (void *)current->task_state_change, 10144 (void *)current->task_state_change); 10145 10146 __might_resched(file, line, 0); 10147 } 10148 EXPORT_SYMBOL(__might_sleep); 10149 10150 static void print_preempt_disable_ip(int preempt_offset, unsigned long ip) 10151 { 10152 if (!IS_ENABLED(CONFIG_DEBUG_PREEMPT)) 10153 return; 10154 10155 if (preempt_count() == preempt_offset) 10156 return; 10157 10158 pr_err("Preemption disabled at:"); 10159 print_ip_sym(KERN_ERR, ip); 10160 } 10161 10162 static inline bool resched_offsets_ok(unsigned int offsets) 10163 { 10164 unsigned int nested = preempt_count(); 10165 10166 nested += rcu_preempt_depth() << MIGHT_RESCHED_RCU_SHIFT; 10167 10168 return nested == offsets; 10169 } 10170 10171 void __might_resched(const char *file, int line, unsigned int offsets) 10172 { 10173 /* Ratelimiting timestamp: */ 10174 static unsigned long prev_jiffy; 10175 10176 unsigned long preempt_disable_ip; 10177 10178 /* WARN_ON_ONCE() by default, no rate limit required: */ 10179 rcu_sleep_check(); 10180 10181 if ((resched_offsets_ok(offsets) && !irqs_disabled() && 10182 !is_idle_task(current) && !current->non_block_count) || 10183 system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING || 10184 oops_in_progress) 10185 return; 10186 10187 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) 10188 return; 10189 prev_jiffy = jiffies; 10190 10191 /* Save this before calling printk(), since that will clobber it: */ 10192 preempt_disable_ip = get_preempt_disable_ip(current); 10193 10194 pr_err("BUG: sleeping function called from invalid context at %s:%d\n", 10195 file, line); 10196 pr_err("in_atomic(): %d, irqs_disabled(): %d, non_block: %d, pid: %d, name: %s\n", 10197 in_atomic(), irqs_disabled(), current->non_block_count, 10198 current->pid, current->comm); 10199 pr_err("preempt_count: %x, expected: %x\n", preempt_count(), 10200 offsets & MIGHT_RESCHED_PREEMPT_MASK); 10201 10202 if (IS_ENABLED(CONFIG_PREEMPT_RCU)) { 10203 pr_err("RCU nest depth: %d, expected: %u\n", 10204 rcu_preempt_depth(), offsets >> MIGHT_RESCHED_RCU_SHIFT); 10205 } 10206 10207 if (task_stack_end_corrupted(current)) 10208 pr_emerg("Thread overran stack, or stack corrupted\n"); 10209 10210 debug_show_held_locks(current); 10211 if (irqs_disabled()) 10212 print_irqtrace_events(current); 10213 10214 print_preempt_disable_ip(offsets & MIGHT_RESCHED_PREEMPT_MASK, 10215 preempt_disable_ip); 10216 10217 dump_stack(); 10218 add_taint(TAINT_WARN, LOCKDEP_STILL_OK); 10219 } 10220 EXPORT_SYMBOL(__might_resched); 10221 10222 void __cant_sleep(const char *file, int line, int preempt_offset) 10223 { 10224 static unsigned long prev_jiffy; 10225 10226 if (irqs_disabled()) 10227 return; 10228 10229 if (!IS_ENABLED(CONFIG_PREEMPT_COUNT)) 10230 return; 10231 10232 if (preempt_count() > preempt_offset) 10233 return; 10234 10235 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) 10236 return; 10237 prev_jiffy = jiffies; 10238 10239 printk(KERN_ERR "BUG: assuming atomic context at %s:%d\n", file, line); 10240 printk(KERN_ERR "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n", 10241 in_atomic(), irqs_disabled(), 10242 current->pid, current->comm); 10243 10244 debug_show_held_locks(current); 10245 dump_stack(); 10246 add_taint(TAINT_WARN, LOCKDEP_STILL_OK); 10247 } 10248 EXPORT_SYMBOL_GPL(__cant_sleep); 10249 10250 #ifdef CONFIG_SMP 10251 void __cant_migrate(const char *file, int line) 10252 { 10253 static unsigned long prev_jiffy; 10254 10255 if (irqs_disabled()) 10256 return; 10257 10258 if (is_migration_disabled(current)) 10259 return; 10260 10261 if (!IS_ENABLED(CONFIG_PREEMPT_COUNT)) 10262 return; 10263 10264 if (preempt_count() > 0) 10265 return; 10266 10267 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) 10268 return; 10269 prev_jiffy = jiffies; 10270 10271 pr_err("BUG: assuming non migratable context at %s:%d\n", file, line); 10272 pr_err("in_atomic(): %d, irqs_disabled(): %d, migration_disabled() %u pid: %d, name: %s\n", 10273 in_atomic(), irqs_disabled(), is_migration_disabled(current), 10274 current->pid, current->comm); 10275 10276 debug_show_held_locks(current); 10277 dump_stack(); 10278 add_taint(TAINT_WARN, LOCKDEP_STILL_OK); 10279 } 10280 EXPORT_SYMBOL_GPL(__cant_migrate); 10281 #endif 10282 #endif 10283 10284 #ifdef CONFIG_MAGIC_SYSRQ 10285 void normalize_rt_tasks(void) 10286 { 10287 struct task_struct *g, *p; 10288 struct sched_attr attr = { 10289 .sched_policy = SCHED_NORMAL, 10290 }; 10291 10292 read_lock(&tasklist_lock); 10293 for_each_process_thread(g, p) { 10294 /* 10295 * Only normalize user tasks: 10296 */ 10297 if (p->flags & PF_KTHREAD) 10298 continue; 10299 10300 p->se.exec_start = 0; 10301 schedstat_set(p->stats.wait_start, 0); 10302 schedstat_set(p->stats.sleep_start, 0); 10303 schedstat_set(p->stats.block_start, 0); 10304 10305 if (!dl_task(p) && !rt_task(p)) { 10306 /* 10307 * Renice negative nice level userspace 10308 * tasks back to 0: 10309 */ 10310 if (task_nice(p) < 0) 10311 set_user_nice(p, 0); 10312 continue; 10313 } 10314 10315 __sched_setscheduler(p, &attr, false, false); 10316 } 10317 read_unlock(&tasklist_lock); 10318 } 10319 10320 #endif /* CONFIG_MAGIC_SYSRQ */ 10321 10322 #if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) 10323 /* 10324 * These functions are only useful for the IA64 MCA handling, or kdb. 10325 * 10326 * They can only be called when the whole system has been 10327 * stopped - every CPU needs to be quiescent, and no scheduling 10328 * activity can take place. Using them for anything else would 10329 * be a serious bug, and as a result, they aren't even visible 10330 * under any other configuration. 10331 */ 10332 10333 /** 10334 * curr_task - return the current task for a given CPU. 10335 * @cpu: the processor in question. 10336 * 10337 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! 10338 * 10339 * Return: The current task for @cpu. 10340 */ 10341 struct task_struct *curr_task(int cpu) 10342 { 10343 return cpu_curr(cpu); 10344 } 10345 10346 #endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */ 10347 10348 #ifdef CONFIG_IA64 10349 /** 10350 * ia64_set_curr_task - set the current task for a given CPU. 10351 * @cpu: the processor in question. 10352 * @p: the task pointer to set. 10353 * 10354 * Description: This function must only be used when non-maskable interrupts 10355 * are serviced on a separate stack. It allows the architecture to switch the 10356 * notion of the current task on a CPU in a non-blocking manner. This function 10357 * must be called with all CPU's synchronized, and interrupts disabled, the 10358 * and caller must save the original value of the current task (see 10359 * curr_task() above) and restore that value before reenabling interrupts and 10360 * re-starting the system. 10361 * 10362 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! 10363 */ 10364 void ia64_set_curr_task(int cpu, struct task_struct *p) 10365 { 10366 cpu_curr(cpu) = p; 10367 } 10368 10369 #endif 10370 10371 #ifdef CONFIG_CGROUP_SCHED 10372 /* task_group_lock serializes the addition/removal of task groups */ 10373 static DEFINE_SPINLOCK(task_group_lock); 10374 10375 static inline void alloc_uclamp_sched_group(struct task_group *tg, 10376 struct task_group *parent) 10377 { 10378 #ifdef CONFIG_UCLAMP_TASK_GROUP 10379 enum uclamp_id clamp_id; 10380 10381 for_each_clamp_id(clamp_id) { 10382 uclamp_se_set(&tg->uclamp_req[clamp_id], 10383 uclamp_none(clamp_id), false); 10384 tg->uclamp[clamp_id] = parent->uclamp[clamp_id]; 10385 } 10386 #endif 10387 } 10388 10389 static void sched_free_group(struct task_group *tg) 10390 { 10391 free_fair_sched_group(tg); 10392 free_rt_sched_group(tg); 10393 autogroup_free(tg); 10394 kmem_cache_free(task_group_cache, tg); 10395 } 10396 10397 static void sched_free_group_rcu(struct rcu_head *rcu) 10398 { 10399 sched_free_group(container_of(rcu, struct task_group, rcu)); 10400 } 10401 10402 static void sched_unregister_group(struct task_group *tg) 10403 { 10404 unregister_fair_sched_group(tg); 10405 unregister_rt_sched_group(tg); 10406 /* 10407 * We have to wait for yet another RCU grace period to expire, as 10408 * print_cfs_stats() might run concurrently. 10409 */ 10410 call_rcu(&tg->rcu, sched_free_group_rcu); 10411 } 10412 10413 /* allocate runqueue etc for a new task group */ 10414 struct task_group *sched_create_group(struct task_group *parent) 10415 { 10416 struct task_group *tg; 10417 10418 tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO); 10419 if (!tg) 10420 return ERR_PTR(-ENOMEM); 10421 10422 if (!alloc_fair_sched_group(tg, parent)) 10423 goto err; 10424 10425 if (!alloc_rt_sched_group(tg, parent)) 10426 goto err; 10427 10428 alloc_uclamp_sched_group(tg, parent); 10429 10430 return tg; 10431 10432 err: 10433 sched_free_group(tg); 10434 return ERR_PTR(-ENOMEM); 10435 } 10436 10437 void sched_online_group(struct task_group *tg, struct task_group *parent) 10438 { 10439 unsigned long flags; 10440 10441 spin_lock_irqsave(&task_group_lock, flags); 10442 list_add_rcu(&tg->list, &task_groups); 10443 10444 /* Root should already exist: */ 10445 WARN_ON(!parent); 10446 10447 tg->parent = parent; 10448 INIT_LIST_HEAD(&tg->children); 10449 list_add_rcu(&tg->siblings, &parent->children); 10450 spin_unlock_irqrestore(&task_group_lock, flags); 10451 10452 online_fair_sched_group(tg); 10453 } 10454 10455 /* rcu callback to free various structures associated with a task group */ 10456 static void sched_unregister_group_rcu(struct rcu_head *rhp) 10457 { 10458 /* Now it should be safe to free those cfs_rqs: */ 10459 sched_unregister_group(container_of(rhp, struct task_group, rcu)); 10460 } 10461 10462 void sched_destroy_group(struct task_group *tg) 10463 { 10464 /* Wait for possible concurrent references to cfs_rqs complete: */ 10465 call_rcu(&tg->rcu, sched_unregister_group_rcu); 10466 } 10467 10468 void sched_release_group(struct task_group *tg) 10469 { 10470 unsigned long flags; 10471 10472 /* 10473 * Unlink first, to avoid walk_tg_tree_from() from finding us (via 10474 * sched_cfs_period_timer()). 10475 * 10476 * For this to be effective, we have to wait for all pending users of 10477 * this task group to leave their RCU critical section to ensure no new 10478 * user will see our dying task group any more. Specifically ensure 10479 * that tg_unthrottle_up() won't add decayed cfs_rq's to it. 10480 * 10481 * We therefore defer calling unregister_fair_sched_group() to 10482 * sched_unregister_group() which is guarantied to get called only after the 10483 * current RCU grace period has expired. 10484 */ 10485 spin_lock_irqsave(&task_group_lock, flags); 10486 list_del_rcu(&tg->list); 10487 list_del_rcu(&tg->siblings); 10488 spin_unlock_irqrestore(&task_group_lock, flags); 10489 } 10490 10491 static struct task_group *sched_get_task_group(struct task_struct *tsk) 10492 { 10493 struct task_group *tg; 10494 10495 /* 10496 * All callers are synchronized by task_rq_lock(); we do not use RCU 10497 * which is pointless here. Thus, we pass "true" to task_css_check() 10498 * to prevent lockdep warnings. 10499 */ 10500 tg = container_of(task_css_check(tsk, cpu_cgrp_id, true), 10501 struct task_group, css); 10502 tg = autogroup_task_group(tsk, tg); 10503 10504 return tg; 10505 } 10506 10507 static void sched_change_group(struct task_struct *tsk, struct task_group *group) 10508 { 10509 tsk->sched_task_group = group; 10510 10511 #ifdef CONFIG_FAIR_GROUP_SCHED 10512 if (tsk->sched_class->task_change_group) 10513 tsk->sched_class->task_change_group(tsk); 10514 else 10515 #endif 10516 set_task_rq(tsk, task_cpu(tsk)); 10517 } 10518 10519 /* 10520 * Change task's runqueue when it moves between groups. 10521 * 10522 * The caller of this function should have put the task in its new group by 10523 * now. This function just updates tsk->se.cfs_rq and tsk->se.parent to reflect 10524 * its new group. 10525 */ 10526 void sched_move_task(struct task_struct *tsk) 10527 { 10528 int queued, running, queue_flags = 10529 DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; 10530 struct task_group *group; 10531 struct rq_flags rf; 10532 struct rq *rq; 10533 10534 rq = task_rq_lock(tsk, &rf); 10535 /* 10536 * Esp. with SCHED_AUTOGROUP enabled it is possible to get superfluous 10537 * group changes. 10538 */ 10539 group = sched_get_task_group(tsk); 10540 if (group == tsk->sched_task_group) 10541 goto unlock; 10542 10543 update_rq_clock(rq); 10544 10545 running = task_current(rq, tsk); 10546 queued = task_on_rq_queued(tsk); 10547 10548 if (queued) 10549 dequeue_task(rq, tsk, queue_flags); 10550 if (running) 10551 put_prev_task(rq, tsk); 10552 10553 sched_change_group(tsk, group); 10554 10555 if (queued) 10556 enqueue_task(rq, tsk, queue_flags); 10557 if (running) { 10558 set_next_task(rq, tsk); 10559 /* 10560 * After changing group, the running task may have joined a 10561 * throttled one but it's still the running task. Trigger a 10562 * resched to make sure that task can still run. 10563 */ 10564 resched_curr(rq); 10565 } 10566 10567 unlock: 10568 task_rq_unlock(rq, tsk, &rf); 10569 } 10570 10571 static inline struct task_group *css_tg(struct cgroup_subsys_state *css) 10572 { 10573 return css ? container_of(css, struct task_group, css) : NULL; 10574 } 10575 10576 static struct cgroup_subsys_state * 10577 cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) 10578 { 10579 struct task_group *parent = css_tg(parent_css); 10580 struct task_group *tg; 10581 10582 if (!parent) { 10583 /* This is early initialization for the top cgroup */ 10584 return &root_task_group.css; 10585 } 10586 10587 tg = sched_create_group(parent); 10588 if (IS_ERR(tg)) 10589 return ERR_PTR(-ENOMEM); 10590 10591 return &tg->css; 10592 } 10593 10594 /* Expose task group only after completing cgroup initialization */ 10595 static int cpu_cgroup_css_online(struct cgroup_subsys_state *css) 10596 { 10597 struct task_group *tg = css_tg(css); 10598 struct task_group *parent = css_tg(css->parent); 10599 10600 if (parent) 10601 sched_online_group(tg, parent); 10602 10603 #ifdef CONFIG_UCLAMP_TASK_GROUP 10604 /* Propagate the effective uclamp value for the new group */ 10605 mutex_lock(&uclamp_mutex); 10606 rcu_read_lock(); 10607 cpu_util_update_eff(css); 10608 rcu_read_unlock(); 10609 mutex_unlock(&uclamp_mutex); 10610 #endif 10611 10612 return 0; 10613 } 10614 10615 static void cpu_cgroup_css_released(struct cgroup_subsys_state *css) 10616 { 10617 struct task_group *tg = css_tg(css); 10618 10619 sched_release_group(tg); 10620 } 10621 10622 static void cpu_cgroup_css_free(struct cgroup_subsys_state *css) 10623 { 10624 struct task_group *tg = css_tg(css); 10625 10626 /* 10627 * Relies on the RCU grace period between css_released() and this. 10628 */ 10629 sched_unregister_group(tg); 10630 } 10631 10632 #ifdef CONFIG_RT_GROUP_SCHED 10633 static int cpu_cgroup_can_attach(struct cgroup_taskset *tset) 10634 { 10635 struct task_struct *task; 10636 struct cgroup_subsys_state *css; 10637 10638 cgroup_taskset_for_each(task, css, tset) { 10639 if (!sched_rt_can_attach(css_tg(css), task)) 10640 return -EINVAL; 10641 } 10642 return 0; 10643 } 10644 #endif 10645 10646 static void cpu_cgroup_attach(struct cgroup_taskset *tset) 10647 { 10648 struct task_struct *task; 10649 struct cgroup_subsys_state *css; 10650 10651 cgroup_taskset_for_each(task, css, tset) 10652 sched_move_task(task); 10653 } 10654 10655 #ifdef CONFIG_UCLAMP_TASK_GROUP 10656 static void cpu_util_update_eff(struct cgroup_subsys_state *css) 10657 { 10658 struct cgroup_subsys_state *top_css = css; 10659 struct uclamp_se *uc_parent = NULL; 10660 struct uclamp_se *uc_se = NULL; 10661 unsigned int eff[UCLAMP_CNT]; 10662 enum uclamp_id clamp_id; 10663 unsigned int clamps; 10664 10665 lockdep_assert_held(&uclamp_mutex); 10666 SCHED_WARN_ON(!rcu_read_lock_held()); 10667 10668 css_for_each_descendant_pre(css, top_css) { 10669 uc_parent = css_tg(css)->parent 10670 ? css_tg(css)->parent->uclamp : NULL; 10671 10672 for_each_clamp_id(clamp_id) { 10673 /* Assume effective clamps matches requested clamps */ 10674 eff[clamp_id] = css_tg(css)->uclamp_req[clamp_id].value; 10675 /* Cap effective clamps with parent's effective clamps */ 10676 if (uc_parent && 10677 eff[clamp_id] > uc_parent[clamp_id].value) { 10678 eff[clamp_id] = uc_parent[clamp_id].value; 10679 } 10680 } 10681 /* Ensure protection is always capped by limit */ 10682 eff[UCLAMP_MIN] = min(eff[UCLAMP_MIN], eff[UCLAMP_MAX]); 10683 10684 /* Propagate most restrictive effective clamps */ 10685 clamps = 0x0; 10686 uc_se = css_tg(css)->uclamp; 10687 for_each_clamp_id(clamp_id) { 10688 if (eff[clamp_id] == uc_se[clamp_id].value) 10689 continue; 10690 uc_se[clamp_id].value = eff[clamp_id]; 10691 uc_se[clamp_id].bucket_id = uclamp_bucket_id(eff[clamp_id]); 10692 clamps |= (0x1 << clamp_id); 10693 } 10694 if (!clamps) { 10695 css = css_rightmost_descendant(css); 10696 continue; 10697 } 10698 10699 /* Immediately update descendants RUNNABLE tasks */ 10700 uclamp_update_active_tasks(css); 10701 } 10702 } 10703 10704 /* 10705 * Integer 10^N with a given N exponent by casting to integer the literal "1eN" 10706 * C expression. Since there is no way to convert a macro argument (N) into a 10707 * character constant, use two levels of macros. 10708 */ 10709 #define _POW10(exp) ((unsigned int)1e##exp) 10710 #define POW10(exp) _POW10(exp) 10711 10712 struct uclamp_request { 10713 #define UCLAMP_PERCENT_SHIFT 2 10714 #define UCLAMP_PERCENT_SCALE (100 * POW10(UCLAMP_PERCENT_SHIFT)) 10715 s64 percent; 10716 u64 util; 10717 int ret; 10718 }; 10719 10720 static inline struct uclamp_request 10721 capacity_from_percent(char *buf) 10722 { 10723 struct uclamp_request req = { 10724 .percent = UCLAMP_PERCENT_SCALE, 10725 .util = SCHED_CAPACITY_SCALE, 10726 .ret = 0, 10727 }; 10728 10729 buf = strim(buf); 10730 if (strcmp(buf, "max")) { 10731 req.ret = cgroup_parse_float(buf, UCLAMP_PERCENT_SHIFT, 10732 &req.percent); 10733 if (req.ret) 10734 return req; 10735 if ((u64)req.percent > UCLAMP_PERCENT_SCALE) { 10736 req.ret = -ERANGE; 10737 return req; 10738 } 10739 10740 req.util = req.percent << SCHED_CAPACITY_SHIFT; 10741 req.util = DIV_ROUND_CLOSEST_ULL(req.util, UCLAMP_PERCENT_SCALE); 10742 } 10743 10744 return req; 10745 } 10746 10747 static ssize_t cpu_uclamp_write(struct kernfs_open_file *of, char *buf, 10748 size_t nbytes, loff_t off, 10749 enum uclamp_id clamp_id) 10750 { 10751 struct uclamp_request req; 10752 struct task_group *tg; 10753 10754 req = capacity_from_percent(buf); 10755 if (req.ret) 10756 return req.ret; 10757 10758 static_branch_enable(&sched_uclamp_used); 10759 10760 mutex_lock(&uclamp_mutex); 10761 rcu_read_lock(); 10762 10763 tg = css_tg(of_css(of)); 10764 if (tg->uclamp_req[clamp_id].value != req.util) 10765 uclamp_se_set(&tg->uclamp_req[clamp_id], req.util, false); 10766 10767 /* 10768 * Because of not recoverable conversion rounding we keep track of the 10769 * exact requested value 10770 */ 10771 tg->uclamp_pct[clamp_id] = req.percent; 10772 10773 /* Update effective clamps to track the most restrictive value */ 10774 cpu_util_update_eff(of_css(of)); 10775 10776 rcu_read_unlock(); 10777 mutex_unlock(&uclamp_mutex); 10778 10779 return nbytes; 10780 } 10781 10782 static ssize_t cpu_uclamp_min_write(struct kernfs_open_file *of, 10783 char *buf, size_t nbytes, 10784 loff_t off) 10785 { 10786 return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MIN); 10787 } 10788 10789 static ssize_t cpu_uclamp_max_write(struct kernfs_open_file *of, 10790 char *buf, size_t nbytes, 10791 loff_t off) 10792 { 10793 return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MAX); 10794 } 10795 10796 static inline void cpu_uclamp_print(struct seq_file *sf, 10797 enum uclamp_id clamp_id) 10798 { 10799 struct task_group *tg; 10800 u64 util_clamp; 10801 u64 percent; 10802 u32 rem; 10803 10804 rcu_read_lock(); 10805 tg = css_tg(seq_css(sf)); 10806 util_clamp = tg->uclamp_req[clamp_id].value; 10807 rcu_read_unlock(); 10808 10809 if (util_clamp == SCHED_CAPACITY_SCALE) { 10810 seq_puts(sf, "max\n"); 10811 return; 10812 } 10813 10814 percent = tg->uclamp_pct[clamp_id]; 10815 percent = div_u64_rem(percent, POW10(UCLAMP_PERCENT_SHIFT), &rem); 10816 seq_printf(sf, "%llu.%0*u\n", percent, UCLAMP_PERCENT_SHIFT, rem); 10817 } 10818 10819 static int cpu_uclamp_min_show(struct seq_file *sf, void *v) 10820 { 10821 cpu_uclamp_print(sf, UCLAMP_MIN); 10822 return 0; 10823 } 10824 10825 static int cpu_uclamp_max_show(struct seq_file *sf, void *v) 10826 { 10827 cpu_uclamp_print(sf, UCLAMP_MAX); 10828 return 0; 10829 } 10830 #endif /* CONFIG_UCLAMP_TASK_GROUP */ 10831 10832 #ifdef CONFIG_FAIR_GROUP_SCHED 10833 static int cpu_shares_write_u64(struct cgroup_subsys_state *css, 10834 struct cftype *cftype, u64 shareval) 10835 { 10836 if (shareval > scale_load_down(ULONG_MAX)) 10837 shareval = MAX_SHARES; 10838 return sched_group_set_shares(css_tg(css), scale_load(shareval)); 10839 } 10840 10841 static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css, 10842 struct cftype *cft) 10843 { 10844 struct task_group *tg = css_tg(css); 10845 10846 return (u64) scale_load_down(tg->shares); 10847 } 10848 10849 #ifdef CONFIG_CFS_BANDWIDTH 10850 static DEFINE_MUTEX(cfs_constraints_mutex); 10851 10852 const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */ 10853 static const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */ 10854 /* More than 203 days if BW_SHIFT equals 20. */ 10855 static const u64 max_cfs_runtime = MAX_BW * NSEC_PER_USEC; 10856 10857 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime); 10858 10859 static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota, 10860 u64 burst) 10861 { 10862 int i, ret = 0, runtime_enabled, runtime_was_enabled; 10863 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 10864 10865 if (tg == &root_task_group) 10866 return -EINVAL; 10867 10868 /* 10869 * Ensure we have at some amount of bandwidth every period. This is 10870 * to prevent reaching a state of large arrears when throttled via 10871 * entity_tick() resulting in prolonged exit starvation. 10872 */ 10873 if (quota < min_cfs_quota_period || period < min_cfs_quota_period) 10874 return -EINVAL; 10875 10876 /* 10877 * Likewise, bound things on the other side by preventing insane quota 10878 * periods. This also allows us to normalize in computing quota 10879 * feasibility. 10880 */ 10881 if (period > max_cfs_quota_period) 10882 return -EINVAL; 10883 10884 /* 10885 * Bound quota to defend quota against overflow during bandwidth shift. 10886 */ 10887 if (quota != RUNTIME_INF && quota > max_cfs_runtime) 10888 return -EINVAL; 10889 10890 if (quota != RUNTIME_INF && (burst > quota || 10891 burst + quota > max_cfs_runtime)) 10892 return -EINVAL; 10893 10894 /* 10895 * Prevent race between setting of cfs_rq->runtime_enabled and 10896 * unthrottle_offline_cfs_rqs(). 10897 */ 10898 guard(cpus_read_lock)(); 10899 guard(mutex)(&cfs_constraints_mutex); 10900 10901 ret = __cfs_schedulable(tg, period, quota); 10902 if (ret) 10903 return ret; 10904 10905 runtime_enabled = quota != RUNTIME_INF; 10906 runtime_was_enabled = cfs_b->quota != RUNTIME_INF; 10907 /* 10908 * If we need to toggle cfs_bandwidth_used, off->on must occur 10909 * before making related changes, and on->off must occur afterwards 10910 */ 10911 if (runtime_enabled && !runtime_was_enabled) 10912 cfs_bandwidth_usage_inc(); 10913 10914 scoped_guard (raw_spinlock_irq, &cfs_b->lock) { 10915 cfs_b->period = ns_to_ktime(period); 10916 cfs_b->quota = quota; 10917 cfs_b->burst = burst; 10918 10919 __refill_cfs_bandwidth_runtime(cfs_b); 10920 10921 /* 10922 * Restart the period timer (if active) to handle new 10923 * period expiry: 10924 */ 10925 if (runtime_enabled) 10926 start_cfs_bandwidth(cfs_b); 10927 } 10928 10929 for_each_online_cpu(i) { 10930 struct cfs_rq *cfs_rq = tg->cfs_rq[i]; 10931 struct rq *rq = cfs_rq->rq; 10932 10933 guard(rq_lock_irq)(rq); 10934 cfs_rq->runtime_enabled = runtime_enabled; 10935 cfs_rq->runtime_remaining = 0; 10936 10937 if (cfs_rq->throttled) 10938 unthrottle_cfs_rq(cfs_rq); 10939 } 10940 10941 if (runtime_was_enabled && !runtime_enabled) 10942 cfs_bandwidth_usage_dec(); 10943 10944 return 0; 10945 } 10946 10947 static int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us) 10948 { 10949 u64 quota, period, burst; 10950 10951 period = ktime_to_ns(tg->cfs_bandwidth.period); 10952 burst = tg->cfs_bandwidth.burst; 10953 if (cfs_quota_us < 0) 10954 quota = RUNTIME_INF; 10955 else if ((u64)cfs_quota_us <= U64_MAX / NSEC_PER_USEC) 10956 quota = (u64)cfs_quota_us * NSEC_PER_USEC; 10957 else 10958 return -EINVAL; 10959 10960 return tg_set_cfs_bandwidth(tg, period, quota, burst); 10961 } 10962 10963 static long tg_get_cfs_quota(struct task_group *tg) 10964 { 10965 u64 quota_us; 10966 10967 if (tg->cfs_bandwidth.quota == RUNTIME_INF) 10968 return -1; 10969 10970 quota_us = tg->cfs_bandwidth.quota; 10971 do_div(quota_us, NSEC_PER_USEC); 10972 10973 return quota_us; 10974 } 10975 10976 static int tg_set_cfs_period(struct task_group *tg, long cfs_period_us) 10977 { 10978 u64 quota, period, burst; 10979 10980 if ((u64)cfs_period_us > U64_MAX / NSEC_PER_USEC) 10981 return -EINVAL; 10982 10983 period = (u64)cfs_period_us * NSEC_PER_USEC; 10984 quota = tg->cfs_bandwidth.quota; 10985 burst = tg->cfs_bandwidth.burst; 10986 10987 return tg_set_cfs_bandwidth(tg, period, quota, burst); 10988 } 10989 10990 static long tg_get_cfs_period(struct task_group *tg) 10991 { 10992 u64 cfs_period_us; 10993 10994 cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period); 10995 do_div(cfs_period_us, NSEC_PER_USEC); 10996 10997 return cfs_period_us; 10998 } 10999 11000 static int tg_set_cfs_burst(struct task_group *tg, long cfs_burst_us) 11001 { 11002 u64 quota, period, burst; 11003 11004 if ((u64)cfs_burst_us > U64_MAX / NSEC_PER_USEC) 11005 return -EINVAL; 11006 11007 burst = (u64)cfs_burst_us * NSEC_PER_USEC; 11008 period = ktime_to_ns(tg->cfs_bandwidth.period); 11009 quota = tg->cfs_bandwidth.quota; 11010 11011 return tg_set_cfs_bandwidth(tg, period, quota, burst); 11012 } 11013 11014 static long tg_get_cfs_burst(struct task_group *tg) 11015 { 11016 u64 burst_us; 11017 11018 burst_us = tg->cfs_bandwidth.burst; 11019 do_div(burst_us, NSEC_PER_USEC); 11020 11021 return burst_us; 11022 } 11023 11024 static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css, 11025 struct cftype *cft) 11026 { 11027 return tg_get_cfs_quota(css_tg(css)); 11028 } 11029 11030 static int cpu_cfs_quota_write_s64(struct cgroup_subsys_state *css, 11031 struct cftype *cftype, s64 cfs_quota_us) 11032 { 11033 return tg_set_cfs_quota(css_tg(css), cfs_quota_us); 11034 } 11035 11036 static u64 cpu_cfs_period_read_u64(struct cgroup_subsys_state *css, 11037 struct cftype *cft) 11038 { 11039 return tg_get_cfs_period(css_tg(css)); 11040 } 11041 11042 static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css, 11043 struct cftype *cftype, u64 cfs_period_us) 11044 { 11045 return tg_set_cfs_period(css_tg(css), cfs_period_us); 11046 } 11047 11048 static u64 cpu_cfs_burst_read_u64(struct cgroup_subsys_state *css, 11049 struct cftype *cft) 11050 { 11051 return tg_get_cfs_burst(css_tg(css)); 11052 } 11053 11054 static int cpu_cfs_burst_write_u64(struct cgroup_subsys_state *css, 11055 struct cftype *cftype, u64 cfs_burst_us) 11056 { 11057 return tg_set_cfs_burst(css_tg(css), cfs_burst_us); 11058 } 11059 11060 struct cfs_schedulable_data { 11061 struct task_group *tg; 11062 u64 period, quota; 11063 }; 11064 11065 /* 11066 * normalize group quota/period to be quota/max_period 11067 * note: units are usecs 11068 */ 11069 static u64 normalize_cfs_quota(struct task_group *tg, 11070 struct cfs_schedulable_data *d) 11071 { 11072 u64 quota, period; 11073 11074 if (tg == d->tg) { 11075 period = d->period; 11076 quota = d->quota; 11077 } else { 11078 period = tg_get_cfs_period(tg); 11079 quota = tg_get_cfs_quota(tg); 11080 } 11081 11082 /* note: these should typically be equivalent */ 11083 if (quota == RUNTIME_INF || quota == -1) 11084 return RUNTIME_INF; 11085 11086 return to_ratio(period, quota); 11087 } 11088 11089 static int tg_cfs_schedulable_down(struct task_group *tg, void *data) 11090 { 11091 struct cfs_schedulable_data *d = data; 11092 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 11093 s64 quota = 0, parent_quota = -1; 11094 11095 if (!tg->parent) { 11096 quota = RUNTIME_INF; 11097 } else { 11098 struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth; 11099 11100 quota = normalize_cfs_quota(tg, d); 11101 parent_quota = parent_b->hierarchical_quota; 11102 11103 /* 11104 * Ensure max(child_quota) <= parent_quota. On cgroup2, 11105 * always take the non-RUNTIME_INF min. On cgroup1, only 11106 * inherit when no limit is set. In both cases this is used 11107 * by the scheduler to determine if a given CFS task has a 11108 * bandwidth constraint at some higher level. 11109 */ 11110 if (cgroup_subsys_on_dfl(cpu_cgrp_subsys)) { 11111 if (quota == RUNTIME_INF) 11112 quota = parent_quota; 11113 else if (parent_quota != RUNTIME_INF) 11114 quota = min(quota, parent_quota); 11115 } else { 11116 if (quota == RUNTIME_INF) 11117 quota = parent_quota; 11118 else if (parent_quota != RUNTIME_INF && quota > parent_quota) 11119 return -EINVAL; 11120 } 11121 } 11122 cfs_b->hierarchical_quota = quota; 11123 11124 return 0; 11125 } 11126 11127 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota) 11128 { 11129 int ret; 11130 struct cfs_schedulable_data data = { 11131 .tg = tg, 11132 .period = period, 11133 .quota = quota, 11134 }; 11135 11136 if (quota != RUNTIME_INF) { 11137 do_div(data.period, NSEC_PER_USEC); 11138 do_div(data.quota, NSEC_PER_USEC); 11139 } 11140 11141 rcu_read_lock(); 11142 ret = walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data); 11143 rcu_read_unlock(); 11144 11145 return ret; 11146 } 11147 11148 static int cpu_cfs_stat_show(struct seq_file *sf, void *v) 11149 { 11150 struct task_group *tg = css_tg(seq_css(sf)); 11151 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 11152 11153 seq_printf(sf, "nr_periods %d\n", cfs_b->nr_periods); 11154 seq_printf(sf, "nr_throttled %d\n", cfs_b->nr_throttled); 11155 seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time); 11156 11157 if (schedstat_enabled() && tg != &root_task_group) { 11158 struct sched_statistics *stats; 11159 u64 ws = 0; 11160 int i; 11161 11162 for_each_possible_cpu(i) { 11163 stats = __schedstats_from_se(tg->se[i]); 11164 ws += schedstat_val(stats->wait_sum); 11165 } 11166 11167 seq_printf(sf, "wait_sum %llu\n", ws); 11168 } 11169 11170 seq_printf(sf, "nr_bursts %d\n", cfs_b->nr_burst); 11171 seq_printf(sf, "burst_time %llu\n", cfs_b->burst_time); 11172 11173 return 0; 11174 } 11175 11176 static u64 throttled_time_self(struct task_group *tg) 11177 { 11178 int i; 11179 u64 total = 0; 11180 11181 for_each_possible_cpu(i) { 11182 total += READ_ONCE(tg->cfs_rq[i]->throttled_clock_self_time); 11183 } 11184 11185 return total; 11186 } 11187 11188 static int cpu_cfs_local_stat_show(struct seq_file *sf, void *v) 11189 { 11190 struct task_group *tg = css_tg(seq_css(sf)); 11191 11192 seq_printf(sf, "throttled_time %llu\n", throttled_time_self(tg)); 11193 11194 return 0; 11195 } 11196 #endif /* CONFIG_CFS_BANDWIDTH */ 11197 #endif /* CONFIG_FAIR_GROUP_SCHED */ 11198 11199 #ifdef CONFIG_RT_GROUP_SCHED 11200 static int cpu_rt_runtime_write(struct cgroup_subsys_state *css, 11201 struct cftype *cft, s64 val) 11202 { 11203 return sched_group_set_rt_runtime(css_tg(css), val); 11204 } 11205 11206 static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css, 11207 struct cftype *cft) 11208 { 11209 return sched_group_rt_runtime(css_tg(css)); 11210 } 11211 11212 static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css, 11213 struct cftype *cftype, u64 rt_period_us) 11214 { 11215 return sched_group_set_rt_period(css_tg(css), rt_period_us); 11216 } 11217 11218 static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css, 11219 struct cftype *cft) 11220 { 11221 return sched_group_rt_period(css_tg(css)); 11222 } 11223 #endif /* CONFIG_RT_GROUP_SCHED */ 11224 11225 #ifdef CONFIG_FAIR_GROUP_SCHED 11226 static s64 cpu_idle_read_s64(struct cgroup_subsys_state *css, 11227 struct cftype *cft) 11228 { 11229 return css_tg(css)->idle; 11230 } 11231 11232 static int cpu_idle_write_s64(struct cgroup_subsys_state *css, 11233 struct cftype *cft, s64 idle) 11234 { 11235 return sched_group_set_idle(css_tg(css), idle); 11236 } 11237 #endif 11238 11239 static struct cftype cpu_legacy_files[] = { 11240 #ifdef CONFIG_FAIR_GROUP_SCHED 11241 { 11242 .name = "shares", 11243 .read_u64 = cpu_shares_read_u64, 11244 .write_u64 = cpu_shares_write_u64, 11245 }, 11246 { 11247 .name = "idle", 11248 .read_s64 = cpu_idle_read_s64, 11249 .write_s64 = cpu_idle_write_s64, 11250 }, 11251 #endif 11252 #ifdef CONFIG_CFS_BANDWIDTH 11253 { 11254 .name = "cfs_quota_us", 11255 .read_s64 = cpu_cfs_quota_read_s64, 11256 .write_s64 = cpu_cfs_quota_write_s64, 11257 }, 11258 { 11259 .name = "cfs_period_us", 11260 .read_u64 = cpu_cfs_period_read_u64, 11261 .write_u64 = cpu_cfs_period_write_u64, 11262 }, 11263 { 11264 .name = "cfs_burst_us", 11265 .read_u64 = cpu_cfs_burst_read_u64, 11266 .write_u64 = cpu_cfs_burst_write_u64, 11267 }, 11268 { 11269 .name = "stat", 11270 .seq_show = cpu_cfs_stat_show, 11271 }, 11272 { 11273 .name = "stat.local", 11274 .seq_show = cpu_cfs_local_stat_show, 11275 }, 11276 #endif 11277 #ifdef CONFIG_RT_GROUP_SCHED 11278 { 11279 .name = "rt_runtime_us", 11280 .read_s64 = cpu_rt_runtime_read, 11281 .write_s64 = cpu_rt_runtime_write, 11282 }, 11283 { 11284 .name = "rt_period_us", 11285 .read_u64 = cpu_rt_period_read_uint, 11286 .write_u64 = cpu_rt_period_write_uint, 11287 }, 11288 #endif 11289 #ifdef CONFIG_UCLAMP_TASK_GROUP 11290 { 11291 .name = "uclamp.min", 11292 .flags = CFTYPE_NOT_ON_ROOT, 11293 .seq_show = cpu_uclamp_min_show, 11294 .write = cpu_uclamp_min_write, 11295 }, 11296 { 11297 .name = "uclamp.max", 11298 .flags = CFTYPE_NOT_ON_ROOT, 11299 .seq_show = cpu_uclamp_max_show, 11300 .write = cpu_uclamp_max_write, 11301 }, 11302 #endif 11303 { } /* Terminate */ 11304 }; 11305 11306 static int cpu_extra_stat_show(struct seq_file *sf, 11307 struct cgroup_subsys_state *css) 11308 { 11309 #ifdef CONFIG_CFS_BANDWIDTH 11310 { 11311 struct task_group *tg = css_tg(css); 11312 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 11313 u64 throttled_usec, burst_usec; 11314 11315 throttled_usec = cfs_b->throttled_time; 11316 do_div(throttled_usec, NSEC_PER_USEC); 11317 burst_usec = cfs_b->burst_time; 11318 do_div(burst_usec, NSEC_PER_USEC); 11319 11320 seq_printf(sf, "nr_periods %d\n" 11321 "nr_throttled %d\n" 11322 "throttled_usec %llu\n" 11323 "nr_bursts %d\n" 11324 "burst_usec %llu\n", 11325 cfs_b->nr_periods, cfs_b->nr_throttled, 11326 throttled_usec, cfs_b->nr_burst, burst_usec); 11327 } 11328 #endif 11329 return 0; 11330 } 11331 11332 static int cpu_local_stat_show(struct seq_file *sf, 11333 struct cgroup_subsys_state *css) 11334 { 11335 #ifdef CONFIG_CFS_BANDWIDTH 11336 { 11337 struct task_group *tg = css_tg(css); 11338 u64 throttled_self_usec; 11339 11340 throttled_self_usec = throttled_time_self(tg); 11341 do_div(throttled_self_usec, NSEC_PER_USEC); 11342 11343 seq_printf(sf, "throttled_usec %llu\n", 11344 throttled_self_usec); 11345 } 11346 #endif 11347 return 0; 11348 } 11349 11350 #ifdef CONFIG_FAIR_GROUP_SCHED 11351 static u64 cpu_weight_read_u64(struct cgroup_subsys_state *css, 11352 struct cftype *cft) 11353 { 11354 struct task_group *tg = css_tg(css); 11355 u64 weight = scale_load_down(tg->shares); 11356 11357 return DIV_ROUND_CLOSEST_ULL(weight * CGROUP_WEIGHT_DFL, 1024); 11358 } 11359 11360 static int cpu_weight_write_u64(struct cgroup_subsys_state *css, 11361 struct cftype *cft, u64 weight) 11362 { 11363 /* 11364 * cgroup weight knobs should use the common MIN, DFL and MAX 11365 * values which are 1, 100 and 10000 respectively. While it loses 11366 * a bit of range on both ends, it maps pretty well onto the shares 11367 * value used by scheduler and the round-trip conversions preserve 11368 * the original value over the entire range. 11369 */ 11370 if (weight < CGROUP_WEIGHT_MIN || weight > CGROUP_WEIGHT_MAX) 11371 return -ERANGE; 11372 11373 weight = DIV_ROUND_CLOSEST_ULL(weight * 1024, CGROUP_WEIGHT_DFL); 11374 11375 return sched_group_set_shares(css_tg(css), scale_load(weight)); 11376 } 11377 11378 static s64 cpu_weight_nice_read_s64(struct cgroup_subsys_state *css, 11379 struct cftype *cft) 11380 { 11381 unsigned long weight = scale_load_down(css_tg(css)->shares); 11382 int last_delta = INT_MAX; 11383 int prio, delta; 11384 11385 /* find the closest nice value to the current weight */ 11386 for (prio = 0; prio < ARRAY_SIZE(sched_prio_to_weight); prio++) { 11387 delta = abs(sched_prio_to_weight[prio] - weight); 11388 if (delta >= last_delta) 11389 break; 11390 last_delta = delta; 11391 } 11392 11393 return PRIO_TO_NICE(prio - 1 + MAX_RT_PRIO); 11394 } 11395 11396 static int cpu_weight_nice_write_s64(struct cgroup_subsys_state *css, 11397 struct cftype *cft, s64 nice) 11398 { 11399 unsigned long weight; 11400 int idx; 11401 11402 if (nice < MIN_NICE || nice > MAX_NICE) 11403 return -ERANGE; 11404 11405 idx = NICE_TO_PRIO(nice) - MAX_RT_PRIO; 11406 idx = array_index_nospec(idx, 40); 11407 weight = sched_prio_to_weight[idx]; 11408 11409 return sched_group_set_shares(css_tg(css), scale_load(weight)); 11410 } 11411 #endif 11412 11413 static void __maybe_unused cpu_period_quota_print(struct seq_file *sf, 11414 long period, long quota) 11415 { 11416 if (quota < 0) 11417 seq_puts(sf, "max"); 11418 else 11419 seq_printf(sf, "%ld", quota); 11420 11421 seq_printf(sf, " %ld\n", period); 11422 } 11423 11424 /* caller should put the current value in *@periodp before calling */ 11425 static int __maybe_unused cpu_period_quota_parse(char *buf, 11426 u64 *periodp, u64 *quotap) 11427 { 11428 char tok[21]; /* U64_MAX */ 11429 11430 if (sscanf(buf, "%20s %llu", tok, periodp) < 1) 11431 return -EINVAL; 11432 11433 *periodp *= NSEC_PER_USEC; 11434 11435 if (sscanf(tok, "%llu", quotap)) 11436 *quotap *= NSEC_PER_USEC; 11437 else if (!strcmp(tok, "max")) 11438 *quotap = RUNTIME_INF; 11439 else 11440 return -EINVAL; 11441 11442 return 0; 11443 } 11444 11445 #ifdef CONFIG_CFS_BANDWIDTH 11446 static int cpu_max_show(struct seq_file *sf, void *v) 11447 { 11448 struct task_group *tg = css_tg(seq_css(sf)); 11449 11450 cpu_period_quota_print(sf, tg_get_cfs_period(tg), tg_get_cfs_quota(tg)); 11451 return 0; 11452 } 11453 11454 static ssize_t cpu_max_write(struct kernfs_open_file *of, 11455 char *buf, size_t nbytes, loff_t off) 11456 { 11457 struct task_group *tg = css_tg(of_css(of)); 11458 u64 period = tg_get_cfs_period(tg); 11459 u64 burst = tg->cfs_bandwidth.burst; 11460 u64 quota; 11461 int ret; 11462 11463 ret = cpu_period_quota_parse(buf, &period, "a); 11464 if (!ret) 11465 ret = tg_set_cfs_bandwidth(tg, period, quota, burst); 11466 return ret ?: nbytes; 11467 } 11468 #endif 11469 11470 static struct cftype cpu_files[] = { 11471 #ifdef CONFIG_FAIR_GROUP_SCHED 11472 { 11473 .name = "weight", 11474 .flags = CFTYPE_NOT_ON_ROOT, 11475 .read_u64 = cpu_weight_read_u64, 11476 .write_u64 = cpu_weight_write_u64, 11477 }, 11478 { 11479 .name = "weight.nice", 11480 .flags = CFTYPE_NOT_ON_ROOT, 11481 .read_s64 = cpu_weight_nice_read_s64, 11482 .write_s64 = cpu_weight_nice_write_s64, 11483 }, 11484 { 11485 .name = "idle", 11486 .flags = CFTYPE_NOT_ON_ROOT, 11487 .read_s64 = cpu_idle_read_s64, 11488 .write_s64 = cpu_idle_write_s64, 11489 }, 11490 #endif 11491 #ifdef CONFIG_CFS_BANDWIDTH 11492 { 11493 .name = "max", 11494 .flags = CFTYPE_NOT_ON_ROOT, 11495 .seq_show = cpu_max_show, 11496 .write = cpu_max_write, 11497 }, 11498 { 11499 .name = "max.burst", 11500 .flags = CFTYPE_NOT_ON_ROOT, 11501 .read_u64 = cpu_cfs_burst_read_u64, 11502 .write_u64 = cpu_cfs_burst_write_u64, 11503 }, 11504 #endif 11505 #ifdef CONFIG_UCLAMP_TASK_GROUP 11506 { 11507 .name = "uclamp.min", 11508 .flags = CFTYPE_NOT_ON_ROOT, 11509 .seq_show = cpu_uclamp_min_show, 11510 .write = cpu_uclamp_min_write, 11511 }, 11512 { 11513 .name = "uclamp.max", 11514 .flags = CFTYPE_NOT_ON_ROOT, 11515 .seq_show = cpu_uclamp_max_show, 11516 .write = cpu_uclamp_max_write, 11517 }, 11518 #endif 11519 { } /* terminate */ 11520 }; 11521 11522 struct cgroup_subsys cpu_cgrp_subsys = { 11523 .css_alloc = cpu_cgroup_css_alloc, 11524 .css_online = cpu_cgroup_css_online, 11525 .css_released = cpu_cgroup_css_released, 11526 .css_free = cpu_cgroup_css_free, 11527 .css_extra_stat_show = cpu_extra_stat_show, 11528 .css_local_stat_show = cpu_local_stat_show, 11529 #ifdef CONFIG_RT_GROUP_SCHED 11530 .can_attach = cpu_cgroup_can_attach, 11531 #endif 11532 .attach = cpu_cgroup_attach, 11533 .legacy_cftypes = cpu_legacy_files, 11534 .dfl_cftypes = cpu_files, 11535 .early_init = true, 11536 .threaded = true, 11537 }; 11538 11539 #endif /* CONFIG_CGROUP_SCHED */ 11540 11541 void dump_cpu_task(int cpu) 11542 { 11543 if (cpu == smp_processor_id() && in_hardirq()) { 11544 struct pt_regs *regs; 11545 11546 regs = get_irq_regs(); 11547 if (regs) { 11548 show_regs(regs); 11549 return; 11550 } 11551 } 11552 11553 if (trigger_single_cpu_backtrace(cpu)) 11554 return; 11555 11556 pr_info("Task dump for CPU %d:\n", cpu); 11557 sched_show_task(cpu_curr(cpu)); 11558 } 11559 11560 /* 11561 * Nice levels are multiplicative, with a gentle 10% change for every 11562 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to 11563 * nice 1, it will get ~10% less CPU time than another CPU-bound task 11564 * that remained on nice 0. 11565 * 11566 * The "10% effect" is relative and cumulative: from _any_ nice level, 11567 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level 11568 * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25. 11569 * If a task goes up by ~10% and another task goes down by ~10% then 11570 * the relative distance between them is ~25%.) 11571 */ 11572 const int sched_prio_to_weight[40] = { 11573 /* -20 */ 88761, 71755, 56483, 46273, 36291, 11574 /* -15 */ 29154, 23254, 18705, 14949, 11916, 11575 /* -10 */ 9548, 7620, 6100, 4904, 3906, 11576 /* -5 */ 3121, 2501, 1991, 1586, 1277, 11577 /* 0 */ 1024, 820, 655, 526, 423, 11578 /* 5 */ 335, 272, 215, 172, 137, 11579 /* 10 */ 110, 87, 70, 56, 45, 11580 /* 15 */ 36, 29, 23, 18, 15, 11581 }; 11582 11583 /* 11584 * Inverse (2^32/x) values of the sched_prio_to_weight[] array, precalculated. 11585 * 11586 * In cases where the weight does not change often, we can use the 11587 * precalculated inverse to speed up arithmetics by turning divisions 11588 * into multiplications: 11589 */ 11590 const u32 sched_prio_to_wmult[40] = { 11591 /* -20 */ 48388, 59856, 76040, 92818, 118348, 11592 /* -15 */ 147320, 184698, 229616, 287308, 360437, 11593 /* -10 */ 449829, 563644, 704093, 875809, 1099582, 11594 /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326, 11595 /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587, 11596 /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126, 11597 /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717, 11598 /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153, 11599 }; 11600 11601 void call_trace_sched_update_nr_running(struct rq *rq, int count) 11602 { 11603 trace_sched_update_nr_running_tp(rq, count); 11604 } 11605 11606 #ifdef CONFIG_SCHED_MM_CID 11607 11608 /* 11609 * @cid_lock: Guarantee forward-progress of cid allocation. 11610 * 11611 * Concurrency ID allocation within a bitmap is mostly lock-free. The cid_lock 11612 * is only used when contention is detected by the lock-free allocation so 11613 * forward progress can be guaranteed. 11614 */ 11615 DEFINE_RAW_SPINLOCK(cid_lock); 11616 11617 /* 11618 * @use_cid_lock: Select cid allocation behavior: lock-free vs spinlock. 11619 * 11620 * When @use_cid_lock is 0, the cid allocation is lock-free. When contention is 11621 * detected, it is set to 1 to ensure that all newly coming allocations are 11622 * serialized by @cid_lock until the allocation which detected contention 11623 * completes and sets @use_cid_lock back to 0. This guarantees forward progress 11624 * of a cid allocation. 11625 */ 11626 int use_cid_lock; 11627 11628 /* 11629 * mm_cid remote-clear implements a lock-free algorithm to clear per-mm/cpu cid 11630 * concurrently with respect to the execution of the source runqueue context 11631 * switch. 11632 * 11633 * There is one basic properties we want to guarantee here: 11634 * 11635 * (1) Remote-clear should _never_ mark a per-cpu cid UNSET when it is actively 11636 * used by a task. That would lead to concurrent allocation of the cid and 11637 * userspace corruption. 11638 * 11639 * Provide this guarantee by introducing a Dekker memory ordering to guarantee 11640 * that a pair of loads observe at least one of a pair of stores, which can be 11641 * shown as: 11642 * 11643 * X = Y = 0 11644 * 11645 * w[X]=1 w[Y]=1 11646 * MB MB 11647 * r[Y]=y r[X]=x 11648 * 11649 * Which guarantees that x==0 && y==0 is impossible. But rather than using 11650 * values 0 and 1, this algorithm cares about specific state transitions of the 11651 * runqueue current task (as updated by the scheduler context switch), and the 11652 * per-mm/cpu cid value. 11653 * 11654 * Let's introduce task (Y) which has task->mm == mm and task (N) which has 11655 * task->mm != mm for the rest of the discussion. There are two scheduler state 11656 * transitions on context switch we care about: 11657 * 11658 * (TSA) Store to rq->curr with transition from (N) to (Y) 11659 * 11660 * (TSB) Store to rq->curr with transition from (Y) to (N) 11661 * 11662 * On the remote-clear side, there is one transition we care about: 11663 * 11664 * (TMA) cmpxchg to *pcpu_cid to set the LAZY flag 11665 * 11666 * There is also a transition to UNSET state which can be performed from all 11667 * sides (scheduler, remote-clear). It is always performed with a cmpxchg which 11668 * guarantees that only a single thread will succeed: 11669 * 11670 * (TMB) cmpxchg to *pcpu_cid to mark UNSET 11671 * 11672 * Just to be clear, what we do _not_ want to happen is a transition to UNSET 11673 * when a thread is actively using the cid (property (1)). 11674 * 11675 * Let's looks at the relevant combinations of TSA/TSB, and TMA transitions. 11676 * 11677 * Scenario A) (TSA)+(TMA) (from next task perspective) 11678 * 11679 * CPU0 CPU1 11680 * 11681 * Context switch CS-1 Remote-clear 11682 * - store to rq->curr: (N)->(Y) (TSA) - cmpxchg to *pcpu_id to LAZY (TMA) 11683 * (implied barrier after cmpxchg) 11684 * - switch_mm_cid() 11685 * - memory barrier (see switch_mm_cid() 11686 * comment explaining how this barrier 11687 * is combined with other scheduler 11688 * barriers) 11689 * - mm_cid_get (next) 11690 * - READ_ONCE(*pcpu_cid) - rcu_dereference(src_rq->curr) 11691 * 11692 * This Dekker ensures that either task (Y) is observed by the 11693 * rcu_dereference() or the LAZY flag is observed by READ_ONCE(), or both are 11694 * observed. 11695 * 11696 * If task (Y) store is observed by rcu_dereference(), it means that there is 11697 * still an active task on the cpu. Remote-clear will therefore not transition 11698 * to UNSET, which fulfills property (1). 11699 * 11700 * If task (Y) is not observed, but the lazy flag is observed by READ_ONCE(), 11701 * it will move its state to UNSET, which clears the percpu cid perhaps 11702 * uselessly (which is not an issue for correctness). Because task (Y) is not 11703 * observed, CPU1 can move ahead to set the state to UNSET. Because moving 11704 * state to UNSET is done with a cmpxchg expecting that the old state has the 11705 * LAZY flag set, only one thread will successfully UNSET. 11706 * 11707 * If both states (LAZY flag and task (Y)) are observed, the thread on CPU0 11708 * will observe the LAZY flag and transition to UNSET (perhaps uselessly), and 11709 * CPU1 will observe task (Y) and do nothing more, which is fine. 11710 * 11711 * What we are effectively preventing with this Dekker is a scenario where 11712 * neither LAZY flag nor store (Y) are observed, which would fail property (1) 11713 * because this would UNSET a cid which is actively used. 11714 */ 11715 11716 void sched_mm_cid_migrate_from(struct task_struct *t) 11717 { 11718 t->migrate_from_cpu = task_cpu(t); 11719 } 11720 11721 static 11722 int __sched_mm_cid_migrate_from_fetch_cid(struct rq *src_rq, 11723 struct task_struct *t, 11724 struct mm_cid *src_pcpu_cid) 11725 { 11726 struct mm_struct *mm = t->mm; 11727 struct task_struct *src_task; 11728 int src_cid, last_mm_cid; 11729 11730 if (!mm) 11731 return -1; 11732 11733 last_mm_cid = t->last_mm_cid; 11734 /* 11735 * If the migrated task has no last cid, or if the current 11736 * task on src rq uses the cid, it means the source cid does not need 11737 * to be moved to the destination cpu. 11738 */ 11739 if (last_mm_cid == -1) 11740 return -1; 11741 src_cid = READ_ONCE(src_pcpu_cid->cid); 11742 if (!mm_cid_is_valid(src_cid) || last_mm_cid != src_cid) 11743 return -1; 11744 11745 /* 11746 * If we observe an active task using the mm on this rq, it means we 11747 * are not the last task to be migrated from this cpu for this mm, so 11748 * there is no need to move src_cid to the destination cpu. 11749 */ 11750 rcu_read_lock(); 11751 src_task = rcu_dereference(src_rq->curr); 11752 if (READ_ONCE(src_task->mm_cid_active) && src_task->mm == mm) { 11753 rcu_read_unlock(); 11754 t->last_mm_cid = -1; 11755 return -1; 11756 } 11757 rcu_read_unlock(); 11758 11759 return src_cid; 11760 } 11761 11762 static 11763 int __sched_mm_cid_migrate_from_try_steal_cid(struct rq *src_rq, 11764 struct task_struct *t, 11765 struct mm_cid *src_pcpu_cid, 11766 int src_cid) 11767 { 11768 struct task_struct *src_task; 11769 struct mm_struct *mm = t->mm; 11770 int lazy_cid; 11771 11772 if (src_cid == -1) 11773 return -1; 11774 11775 /* 11776 * Attempt to clear the source cpu cid to move it to the destination 11777 * cpu. 11778 */ 11779 lazy_cid = mm_cid_set_lazy_put(src_cid); 11780 if (!try_cmpxchg(&src_pcpu_cid->cid, &src_cid, lazy_cid)) 11781 return -1; 11782 11783 /* 11784 * The implicit barrier after cmpxchg per-mm/cpu cid before loading 11785 * rq->curr->mm matches the scheduler barrier in context_switch() 11786 * between store to rq->curr and load of prev and next task's 11787 * per-mm/cpu cid. 11788 * 11789 * The implicit barrier after cmpxchg per-mm/cpu cid before loading 11790 * rq->curr->mm_cid_active matches the barrier in 11791 * sched_mm_cid_exit_signals(), sched_mm_cid_before_execve(), and 11792 * sched_mm_cid_after_execve() between store to t->mm_cid_active and 11793 * load of per-mm/cpu cid. 11794 */ 11795 11796 /* 11797 * If we observe an active task using the mm on this rq after setting 11798 * the lazy-put flag, this task will be responsible for transitioning 11799 * from lazy-put flag set to MM_CID_UNSET. 11800 */ 11801 rcu_read_lock(); 11802 src_task = rcu_dereference(src_rq->curr); 11803 if (READ_ONCE(src_task->mm_cid_active) && src_task->mm == mm) { 11804 rcu_read_unlock(); 11805 /* 11806 * We observed an active task for this mm, there is therefore 11807 * no point in moving this cid to the destination cpu. 11808 */ 11809 t->last_mm_cid = -1; 11810 return -1; 11811 } 11812 rcu_read_unlock(); 11813 11814 /* 11815 * The src_cid is unused, so it can be unset. 11816 */ 11817 if (!try_cmpxchg(&src_pcpu_cid->cid, &lazy_cid, MM_CID_UNSET)) 11818 return -1; 11819 return src_cid; 11820 } 11821 11822 /* 11823 * Migration to dst cpu. Called with dst_rq lock held. 11824 * Interrupts are disabled, which keeps the window of cid ownership without the 11825 * source rq lock held small. 11826 */ 11827 void sched_mm_cid_migrate_to(struct rq *dst_rq, struct task_struct *t) 11828 { 11829 struct mm_cid *src_pcpu_cid, *dst_pcpu_cid; 11830 struct mm_struct *mm = t->mm; 11831 int src_cid, dst_cid, src_cpu; 11832 struct rq *src_rq; 11833 11834 lockdep_assert_rq_held(dst_rq); 11835 11836 if (!mm) 11837 return; 11838 src_cpu = t->migrate_from_cpu; 11839 if (src_cpu == -1) { 11840 t->last_mm_cid = -1; 11841 return; 11842 } 11843 /* 11844 * Move the src cid if the dst cid is unset. This keeps id 11845 * allocation closest to 0 in cases where few threads migrate around 11846 * many cpus. 11847 * 11848 * If destination cid is already set, we may have to just clear 11849 * the src cid to ensure compactness in frequent migrations 11850 * scenarios. 11851 * 11852 * It is not useful to clear the src cid when the number of threads is 11853 * greater or equal to the number of allowed cpus, because user-space 11854 * can expect that the number of allowed cids can reach the number of 11855 * allowed cpus. 11856 */ 11857 dst_pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu_of(dst_rq)); 11858 dst_cid = READ_ONCE(dst_pcpu_cid->cid); 11859 if (!mm_cid_is_unset(dst_cid) && 11860 atomic_read(&mm->mm_users) >= t->nr_cpus_allowed) 11861 return; 11862 src_pcpu_cid = per_cpu_ptr(mm->pcpu_cid, src_cpu); 11863 src_rq = cpu_rq(src_cpu); 11864 src_cid = __sched_mm_cid_migrate_from_fetch_cid(src_rq, t, src_pcpu_cid); 11865 if (src_cid == -1) 11866 return; 11867 src_cid = __sched_mm_cid_migrate_from_try_steal_cid(src_rq, t, src_pcpu_cid, 11868 src_cid); 11869 if (src_cid == -1) 11870 return; 11871 if (!mm_cid_is_unset(dst_cid)) { 11872 __mm_cid_put(mm, src_cid); 11873 return; 11874 } 11875 /* Move src_cid to dst cpu. */ 11876 mm_cid_snapshot_time(dst_rq, mm); 11877 WRITE_ONCE(dst_pcpu_cid->cid, src_cid); 11878 } 11879 11880 static void sched_mm_cid_remote_clear(struct mm_struct *mm, struct mm_cid *pcpu_cid, 11881 int cpu) 11882 { 11883 struct rq *rq = cpu_rq(cpu); 11884 struct task_struct *t; 11885 unsigned long flags; 11886 int cid, lazy_cid; 11887 11888 cid = READ_ONCE(pcpu_cid->cid); 11889 if (!mm_cid_is_valid(cid)) 11890 return; 11891 11892 /* 11893 * Clear the cpu cid if it is set to keep cid allocation compact. If 11894 * there happens to be other tasks left on the source cpu using this 11895 * mm, the next task using this mm will reallocate its cid on context 11896 * switch. 11897 */ 11898 lazy_cid = mm_cid_set_lazy_put(cid); 11899 if (!try_cmpxchg(&pcpu_cid->cid, &cid, lazy_cid)) 11900 return; 11901 11902 /* 11903 * The implicit barrier after cmpxchg per-mm/cpu cid before loading 11904 * rq->curr->mm matches the scheduler barrier in context_switch() 11905 * between store to rq->curr and load of prev and next task's 11906 * per-mm/cpu cid. 11907 * 11908 * The implicit barrier after cmpxchg per-mm/cpu cid before loading 11909 * rq->curr->mm_cid_active matches the barrier in 11910 * sched_mm_cid_exit_signals(), sched_mm_cid_before_execve(), and 11911 * sched_mm_cid_after_execve() between store to t->mm_cid_active and 11912 * load of per-mm/cpu cid. 11913 */ 11914 11915 /* 11916 * If we observe an active task using the mm on this rq after setting 11917 * the lazy-put flag, that task will be responsible for transitioning 11918 * from lazy-put flag set to MM_CID_UNSET. 11919 */ 11920 rcu_read_lock(); 11921 t = rcu_dereference(rq->curr); 11922 if (READ_ONCE(t->mm_cid_active) && t->mm == mm) { 11923 rcu_read_unlock(); 11924 return; 11925 } 11926 rcu_read_unlock(); 11927 11928 /* 11929 * The cid is unused, so it can be unset. 11930 * Disable interrupts to keep the window of cid ownership without rq 11931 * lock small. 11932 */ 11933 local_irq_save(flags); 11934 if (try_cmpxchg(&pcpu_cid->cid, &lazy_cid, MM_CID_UNSET)) 11935 __mm_cid_put(mm, cid); 11936 local_irq_restore(flags); 11937 } 11938 11939 static void sched_mm_cid_remote_clear_old(struct mm_struct *mm, int cpu) 11940 { 11941 struct rq *rq = cpu_rq(cpu); 11942 struct mm_cid *pcpu_cid; 11943 struct task_struct *curr; 11944 u64 rq_clock; 11945 11946 /* 11947 * rq->clock load is racy on 32-bit but one spurious clear once in a 11948 * while is irrelevant. 11949 */ 11950 rq_clock = READ_ONCE(rq->clock); 11951 pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu); 11952 11953 /* 11954 * In order to take care of infrequently scheduled tasks, bump the time 11955 * snapshot associated with this cid if an active task using the mm is 11956 * observed on this rq. 11957 */ 11958 rcu_read_lock(); 11959 curr = rcu_dereference(rq->curr); 11960 if (READ_ONCE(curr->mm_cid_active) && curr->mm == mm) { 11961 WRITE_ONCE(pcpu_cid->time, rq_clock); 11962 rcu_read_unlock(); 11963 return; 11964 } 11965 rcu_read_unlock(); 11966 11967 if (rq_clock < pcpu_cid->time + SCHED_MM_CID_PERIOD_NS) 11968 return; 11969 sched_mm_cid_remote_clear(mm, pcpu_cid, cpu); 11970 } 11971 11972 static void sched_mm_cid_remote_clear_weight(struct mm_struct *mm, int cpu, 11973 int weight) 11974 { 11975 struct mm_cid *pcpu_cid; 11976 int cid; 11977 11978 pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu); 11979 cid = READ_ONCE(pcpu_cid->cid); 11980 if (!mm_cid_is_valid(cid) || cid < weight) 11981 return; 11982 sched_mm_cid_remote_clear(mm, pcpu_cid, cpu); 11983 } 11984 11985 static void task_mm_cid_work(struct callback_head *work) 11986 { 11987 unsigned long now = jiffies, old_scan, next_scan; 11988 struct task_struct *t = current; 11989 struct cpumask *cidmask; 11990 struct mm_struct *mm; 11991 int weight, cpu; 11992 11993 SCHED_WARN_ON(t != container_of(work, struct task_struct, cid_work)); 11994 11995 work->next = work; /* Prevent double-add */ 11996 if (t->flags & PF_EXITING) 11997 return; 11998 mm = t->mm; 11999 if (!mm) 12000 return; 12001 old_scan = READ_ONCE(mm->mm_cid_next_scan); 12002 next_scan = now + msecs_to_jiffies(MM_CID_SCAN_DELAY); 12003 if (!old_scan) { 12004 unsigned long res; 12005 12006 res = cmpxchg(&mm->mm_cid_next_scan, old_scan, next_scan); 12007 if (res != old_scan) 12008 old_scan = res; 12009 else 12010 old_scan = next_scan; 12011 } 12012 if (time_before(now, old_scan)) 12013 return; 12014 if (!try_cmpxchg(&mm->mm_cid_next_scan, &old_scan, next_scan)) 12015 return; 12016 cidmask = mm_cidmask(mm); 12017 /* Clear cids that were not recently used. */ 12018 for_each_possible_cpu(cpu) 12019 sched_mm_cid_remote_clear_old(mm, cpu); 12020 weight = cpumask_weight(cidmask); 12021 /* 12022 * Clear cids that are greater or equal to the cidmask weight to 12023 * recompact it. 12024 */ 12025 for_each_possible_cpu(cpu) 12026 sched_mm_cid_remote_clear_weight(mm, cpu, weight); 12027 } 12028 12029 void init_sched_mm_cid(struct task_struct *t) 12030 { 12031 struct mm_struct *mm = t->mm; 12032 int mm_users = 0; 12033 12034 if (mm) { 12035 mm_users = atomic_read(&mm->mm_users); 12036 if (mm_users == 1) 12037 mm->mm_cid_next_scan = jiffies + msecs_to_jiffies(MM_CID_SCAN_DELAY); 12038 } 12039 t->cid_work.next = &t->cid_work; /* Protect against double add */ 12040 init_task_work(&t->cid_work, task_mm_cid_work); 12041 } 12042 12043 void task_tick_mm_cid(struct rq *rq, struct task_struct *curr) 12044 { 12045 struct callback_head *work = &curr->cid_work; 12046 unsigned long now = jiffies; 12047 12048 if (!curr->mm || (curr->flags & (PF_EXITING | PF_KTHREAD)) || 12049 work->next != work) 12050 return; 12051 if (time_before(now, READ_ONCE(curr->mm->mm_cid_next_scan))) 12052 return; 12053 task_work_add(curr, work, TWA_RESUME); 12054 } 12055 12056 void sched_mm_cid_exit_signals(struct task_struct *t) 12057 { 12058 struct mm_struct *mm = t->mm; 12059 struct rq_flags rf; 12060 struct rq *rq; 12061 12062 if (!mm) 12063 return; 12064 12065 preempt_disable(); 12066 rq = this_rq(); 12067 rq_lock_irqsave(rq, &rf); 12068 preempt_enable_no_resched(); /* holding spinlock */ 12069 WRITE_ONCE(t->mm_cid_active, 0); 12070 /* 12071 * Store t->mm_cid_active before loading per-mm/cpu cid. 12072 * Matches barrier in sched_mm_cid_remote_clear_old(). 12073 */ 12074 smp_mb(); 12075 mm_cid_put(mm); 12076 t->last_mm_cid = t->mm_cid = -1; 12077 rq_unlock_irqrestore(rq, &rf); 12078 } 12079 12080 void sched_mm_cid_before_execve(struct task_struct *t) 12081 { 12082 struct mm_struct *mm = t->mm; 12083 struct rq_flags rf; 12084 struct rq *rq; 12085 12086 if (!mm) 12087 return; 12088 12089 preempt_disable(); 12090 rq = this_rq(); 12091 rq_lock_irqsave(rq, &rf); 12092 preempt_enable_no_resched(); /* holding spinlock */ 12093 WRITE_ONCE(t->mm_cid_active, 0); 12094 /* 12095 * Store t->mm_cid_active before loading per-mm/cpu cid. 12096 * Matches barrier in sched_mm_cid_remote_clear_old(). 12097 */ 12098 smp_mb(); 12099 mm_cid_put(mm); 12100 t->last_mm_cid = t->mm_cid = -1; 12101 rq_unlock_irqrestore(rq, &rf); 12102 } 12103 12104 void sched_mm_cid_after_execve(struct task_struct *t) 12105 { 12106 struct mm_struct *mm = t->mm; 12107 struct rq_flags rf; 12108 struct rq *rq; 12109 12110 if (!mm) 12111 return; 12112 12113 preempt_disable(); 12114 rq = this_rq(); 12115 rq_lock_irqsave(rq, &rf); 12116 preempt_enable_no_resched(); /* holding spinlock */ 12117 WRITE_ONCE(t->mm_cid_active, 1); 12118 /* 12119 * Store t->mm_cid_active before loading per-mm/cpu cid. 12120 * Matches barrier in sched_mm_cid_remote_clear_old(). 12121 */ 12122 smp_mb(); 12123 t->last_mm_cid = t->mm_cid = mm_cid_get(rq, mm); 12124 rq_unlock_irqrestore(rq, &rf); 12125 rseq_set_notify_resume(t); 12126 } 12127 12128 void sched_mm_cid_fork(struct task_struct *t) 12129 { 12130 WARN_ON_ONCE(!t->mm || t->mm_cid != -1); 12131 t->mm_cid_active = 1; 12132 } 12133 #endif 12134