1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * kernel/sched/core.c 4 * 5 * Core kernel scheduler code and related syscalls 6 * 7 * Copyright (C) 1991-2002 Linus Torvalds 8 */ 9 #define CREATE_TRACE_POINTS 10 #include <trace/events/sched.h> 11 #undef CREATE_TRACE_POINTS 12 13 #include "sched.h" 14 15 #include <linux/nospec.h> 16 17 #include <linux/kcov.h> 18 #include <linux/scs.h> 19 20 #include <asm/switch_to.h> 21 #include <asm/tlb.h> 22 23 #include "../workqueue_internal.h" 24 #include "../../fs/io-wq.h" 25 #include "../smpboot.h" 26 27 #include "pelt.h" 28 #include "smp.h" 29 30 /* 31 * Export tracepoints that act as a bare tracehook (ie: have no trace event 32 * associated with them) to allow external modules to probe them. 33 */ 34 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_cfs_tp); 35 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_rt_tp); 36 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_dl_tp); 37 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp); 38 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_se_tp); 39 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_cpu_capacity_tp); 40 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_overutilized_tp); 41 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_cfs_tp); 42 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_se_tp); 43 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_update_nr_running_tp); 44 45 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); 46 47 #ifdef CONFIG_SCHED_DEBUG 48 /* 49 * Debugging: various feature bits 50 * 51 * If SCHED_DEBUG is disabled, each compilation unit has its own copy of 52 * sysctl_sched_features, defined in sched.h, to allow constants propagation 53 * at compile time and compiler optimization based on features default. 54 */ 55 #define SCHED_FEAT(name, enabled) \ 56 (1UL << __SCHED_FEAT_##name) * enabled | 57 const_debug unsigned int sysctl_sched_features = 58 #include "features.h" 59 0; 60 #undef SCHED_FEAT 61 #endif 62 63 /* 64 * Number of tasks to iterate in a single balance run. 65 * Limited because this is done with IRQs disabled. 66 */ 67 const_debug unsigned int sysctl_sched_nr_migrate = 32; 68 69 /* 70 * period over which we measure -rt task CPU usage in us. 71 * default: 1s 72 */ 73 unsigned int sysctl_sched_rt_period = 1000000; 74 75 __read_mostly int scheduler_running; 76 77 /* 78 * part of the period that we allow rt tasks to run in us. 79 * default: 0.95s 80 */ 81 int sysctl_sched_rt_runtime = 950000; 82 83 84 /* 85 * Serialization rules: 86 * 87 * Lock order: 88 * 89 * p->pi_lock 90 * rq->lock 91 * hrtimer_cpu_base->lock (hrtimer_start() for bandwidth controls) 92 * 93 * rq1->lock 94 * rq2->lock where: rq1 < rq2 95 * 96 * Regular state: 97 * 98 * Normal scheduling state is serialized by rq->lock. __schedule() takes the 99 * local CPU's rq->lock, it optionally removes the task from the runqueue and 100 * always looks at the local rq data structures to find the most eligible task 101 * to run next. 102 * 103 * Task enqueue is also under rq->lock, possibly taken from another CPU. 104 * Wakeups from another LLC domain might use an IPI to transfer the enqueue to 105 * the local CPU to avoid bouncing the runqueue state around [ see 106 * ttwu_queue_wakelist() ] 107 * 108 * Task wakeup, specifically wakeups that involve migration, are horribly 109 * complicated to avoid having to take two rq->locks. 110 * 111 * Special state: 112 * 113 * System-calls and anything external will use task_rq_lock() which acquires 114 * both p->pi_lock and rq->lock. As a consequence the state they change is 115 * stable while holding either lock: 116 * 117 * - sched_setaffinity()/ 118 * set_cpus_allowed_ptr(): p->cpus_ptr, p->nr_cpus_allowed 119 * - set_user_nice(): p->se.load, p->*prio 120 * - __sched_setscheduler(): p->sched_class, p->policy, p->*prio, 121 * p->se.load, p->rt_priority, 122 * p->dl.dl_{runtime, deadline, period, flags, bw, density} 123 * - sched_setnuma(): p->numa_preferred_nid 124 * - sched_move_task()/ 125 * cpu_cgroup_fork(): p->sched_task_group 126 * - uclamp_update_active() p->uclamp* 127 * 128 * p->state <- TASK_*: 129 * 130 * is changed locklessly using set_current_state(), __set_current_state() or 131 * set_special_state(), see their respective comments, or by 132 * try_to_wake_up(). This latter uses p->pi_lock to serialize against 133 * concurrent self. 134 * 135 * p->on_rq <- { 0, 1 = TASK_ON_RQ_QUEUED, 2 = TASK_ON_RQ_MIGRATING }: 136 * 137 * is set by activate_task() and cleared by deactivate_task(), under 138 * rq->lock. Non-zero indicates the task is runnable, the special 139 * ON_RQ_MIGRATING state is used for migration without holding both 140 * rq->locks. It indicates task_cpu() is not stable, see task_rq_lock(). 141 * 142 * p->on_cpu <- { 0, 1 }: 143 * 144 * is set by prepare_task() and cleared by finish_task() such that it will be 145 * set before p is scheduled-in and cleared after p is scheduled-out, both 146 * under rq->lock. Non-zero indicates the task is running on its CPU. 147 * 148 * [ The astute reader will observe that it is possible for two tasks on one 149 * CPU to have ->on_cpu = 1 at the same time. ] 150 * 151 * task_cpu(p): is changed by set_task_cpu(), the rules are: 152 * 153 * - Don't call set_task_cpu() on a blocked task: 154 * 155 * We don't care what CPU we're not running on, this simplifies hotplug, 156 * the CPU assignment of blocked tasks isn't required to be valid. 157 * 158 * - for try_to_wake_up(), called under p->pi_lock: 159 * 160 * This allows try_to_wake_up() to only take one rq->lock, see its comment. 161 * 162 * - for migration called under rq->lock: 163 * [ see task_on_rq_migrating() in task_rq_lock() ] 164 * 165 * o move_queued_task() 166 * o detach_task() 167 * 168 * - for migration called under double_rq_lock(): 169 * 170 * o __migrate_swap_task() 171 * o push_rt_task() / pull_rt_task() 172 * o push_dl_task() / pull_dl_task() 173 * o dl_task_offline_migration() 174 * 175 */ 176 177 /* 178 * __task_rq_lock - lock the rq @p resides on. 179 */ 180 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) 181 __acquires(rq->lock) 182 { 183 struct rq *rq; 184 185 lockdep_assert_held(&p->pi_lock); 186 187 for (;;) { 188 rq = task_rq(p); 189 raw_spin_lock(&rq->lock); 190 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { 191 rq_pin_lock(rq, rf); 192 return rq; 193 } 194 raw_spin_unlock(&rq->lock); 195 196 while (unlikely(task_on_rq_migrating(p))) 197 cpu_relax(); 198 } 199 } 200 201 /* 202 * task_rq_lock - lock p->pi_lock and lock the rq @p resides on. 203 */ 204 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) 205 __acquires(p->pi_lock) 206 __acquires(rq->lock) 207 { 208 struct rq *rq; 209 210 for (;;) { 211 raw_spin_lock_irqsave(&p->pi_lock, rf->flags); 212 rq = task_rq(p); 213 raw_spin_lock(&rq->lock); 214 /* 215 * move_queued_task() task_rq_lock() 216 * 217 * ACQUIRE (rq->lock) 218 * [S] ->on_rq = MIGRATING [L] rq = task_rq() 219 * WMB (__set_task_cpu()) ACQUIRE (rq->lock); 220 * [S] ->cpu = new_cpu [L] task_rq() 221 * [L] ->on_rq 222 * RELEASE (rq->lock) 223 * 224 * If we observe the old CPU in task_rq_lock(), the acquire of 225 * the old rq->lock will fully serialize against the stores. 226 * 227 * If we observe the new CPU in task_rq_lock(), the address 228 * dependency headed by '[L] rq = task_rq()' and the acquire 229 * will pair with the WMB to ensure we then also see migrating. 230 */ 231 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { 232 rq_pin_lock(rq, rf); 233 return rq; 234 } 235 raw_spin_unlock(&rq->lock); 236 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); 237 238 while (unlikely(task_on_rq_migrating(p))) 239 cpu_relax(); 240 } 241 } 242 243 /* 244 * RQ-clock updating methods: 245 */ 246 247 static void update_rq_clock_task(struct rq *rq, s64 delta) 248 { 249 /* 250 * In theory, the compile should just see 0 here, and optimize out the call 251 * to sched_rt_avg_update. But I don't trust it... 252 */ 253 s64 __maybe_unused steal = 0, irq_delta = 0; 254 255 #ifdef CONFIG_IRQ_TIME_ACCOUNTING 256 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time; 257 258 /* 259 * Since irq_time is only updated on {soft,}irq_exit, we might run into 260 * this case when a previous update_rq_clock() happened inside a 261 * {soft,}irq region. 262 * 263 * When this happens, we stop ->clock_task and only update the 264 * prev_irq_time stamp to account for the part that fit, so that a next 265 * update will consume the rest. This ensures ->clock_task is 266 * monotonic. 267 * 268 * It does however cause some slight miss-attribution of {soft,}irq 269 * time, a more accurate solution would be to update the irq_time using 270 * the current rq->clock timestamp, except that would require using 271 * atomic ops. 272 */ 273 if (irq_delta > delta) 274 irq_delta = delta; 275 276 rq->prev_irq_time += irq_delta; 277 delta -= irq_delta; 278 #endif 279 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING 280 if (static_key_false((¶virt_steal_rq_enabled))) { 281 steal = paravirt_steal_clock(cpu_of(rq)); 282 steal -= rq->prev_steal_time_rq; 283 284 if (unlikely(steal > delta)) 285 steal = delta; 286 287 rq->prev_steal_time_rq += steal; 288 delta -= steal; 289 } 290 #endif 291 292 rq->clock_task += delta; 293 294 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ 295 if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY)) 296 update_irq_load_avg(rq, irq_delta + steal); 297 #endif 298 update_rq_clock_pelt(rq, delta); 299 } 300 301 void update_rq_clock(struct rq *rq) 302 { 303 s64 delta; 304 305 lockdep_assert_held(&rq->lock); 306 307 if (rq->clock_update_flags & RQCF_ACT_SKIP) 308 return; 309 310 #ifdef CONFIG_SCHED_DEBUG 311 if (sched_feat(WARN_DOUBLE_CLOCK)) 312 SCHED_WARN_ON(rq->clock_update_flags & RQCF_UPDATED); 313 rq->clock_update_flags |= RQCF_UPDATED; 314 #endif 315 316 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock; 317 if (delta < 0) 318 return; 319 rq->clock += delta; 320 update_rq_clock_task(rq, delta); 321 } 322 323 #ifdef CONFIG_SCHED_HRTICK 324 /* 325 * Use HR-timers to deliver accurate preemption points. 326 */ 327 328 static void hrtick_clear(struct rq *rq) 329 { 330 if (hrtimer_active(&rq->hrtick_timer)) 331 hrtimer_cancel(&rq->hrtick_timer); 332 } 333 334 /* 335 * High-resolution timer tick. 336 * Runs from hardirq context with interrupts disabled. 337 */ 338 static enum hrtimer_restart hrtick(struct hrtimer *timer) 339 { 340 struct rq *rq = container_of(timer, struct rq, hrtick_timer); 341 struct rq_flags rf; 342 343 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); 344 345 rq_lock(rq, &rf); 346 update_rq_clock(rq); 347 rq->curr->sched_class->task_tick(rq, rq->curr, 1); 348 rq_unlock(rq, &rf); 349 350 return HRTIMER_NORESTART; 351 } 352 353 #ifdef CONFIG_SMP 354 355 static void __hrtick_restart(struct rq *rq) 356 { 357 struct hrtimer *timer = &rq->hrtick_timer; 358 ktime_t time = rq->hrtick_time; 359 360 hrtimer_start(timer, time, HRTIMER_MODE_ABS_PINNED_HARD); 361 } 362 363 /* 364 * called from hardirq (IPI) context 365 */ 366 static void __hrtick_start(void *arg) 367 { 368 struct rq *rq = arg; 369 struct rq_flags rf; 370 371 rq_lock(rq, &rf); 372 __hrtick_restart(rq); 373 rq_unlock(rq, &rf); 374 } 375 376 /* 377 * Called to set the hrtick timer state. 378 * 379 * called with rq->lock held and irqs disabled 380 */ 381 void hrtick_start(struct rq *rq, u64 delay) 382 { 383 struct hrtimer *timer = &rq->hrtick_timer; 384 s64 delta; 385 386 /* 387 * Don't schedule slices shorter than 10000ns, that just 388 * doesn't make sense and can cause timer DoS. 389 */ 390 delta = max_t(s64, delay, 10000LL); 391 rq->hrtick_time = ktime_add_ns(timer->base->get_time(), delta); 392 393 if (rq == this_rq()) 394 __hrtick_restart(rq); 395 else 396 smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd); 397 } 398 399 #else 400 /* 401 * Called to set the hrtick timer state. 402 * 403 * called with rq->lock held and irqs disabled 404 */ 405 void hrtick_start(struct rq *rq, u64 delay) 406 { 407 /* 408 * Don't schedule slices shorter than 10000ns, that just 409 * doesn't make sense. Rely on vruntime for fairness. 410 */ 411 delay = max_t(u64, delay, 10000LL); 412 hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay), 413 HRTIMER_MODE_REL_PINNED_HARD); 414 } 415 416 #endif /* CONFIG_SMP */ 417 418 static void hrtick_rq_init(struct rq *rq) 419 { 420 #ifdef CONFIG_SMP 421 INIT_CSD(&rq->hrtick_csd, __hrtick_start, rq); 422 #endif 423 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); 424 rq->hrtick_timer.function = hrtick; 425 } 426 #else /* CONFIG_SCHED_HRTICK */ 427 static inline void hrtick_clear(struct rq *rq) 428 { 429 } 430 431 static inline void hrtick_rq_init(struct rq *rq) 432 { 433 } 434 #endif /* CONFIG_SCHED_HRTICK */ 435 436 /* 437 * cmpxchg based fetch_or, macro so it works for different integer types 438 */ 439 #define fetch_or(ptr, mask) \ 440 ({ \ 441 typeof(ptr) _ptr = (ptr); \ 442 typeof(mask) _mask = (mask); \ 443 typeof(*_ptr) _old, _val = *_ptr; \ 444 \ 445 for (;;) { \ 446 _old = cmpxchg(_ptr, _val, _val | _mask); \ 447 if (_old == _val) \ 448 break; \ 449 _val = _old; \ 450 } \ 451 _old; \ 452 }) 453 454 #if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG) 455 /* 456 * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG, 457 * this avoids any races wrt polling state changes and thereby avoids 458 * spurious IPIs. 459 */ 460 static bool set_nr_and_not_polling(struct task_struct *p) 461 { 462 struct thread_info *ti = task_thread_info(p); 463 return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG); 464 } 465 466 /* 467 * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set. 468 * 469 * If this returns true, then the idle task promises to call 470 * sched_ttwu_pending() and reschedule soon. 471 */ 472 static bool set_nr_if_polling(struct task_struct *p) 473 { 474 struct thread_info *ti = task_thread_info(p); 475 typeof(ti->flags) old, val = READ_ONCE(ti->flags); 476 477 for (;;) { 478 if (!(val & _TIF_POLLING_NRFLAG)) 479 return false; 480 if (val & _TIF_NEED_RESCHED) 481 return true; 482 old = cmpxchg(&ti->flags, val, val | _TIF_NEED_RESCHED); 483 if (old == val) 484 break; 485 val = old; 486 } 487 return true; 488 } 489 490 #else 491 static bool set_nr_and_not_polling(struct task_struct *p) 492 { 493 set_tsk_need_resched(p); 494 return true; 495 } 496 497 #ifdef CONFIG_SMP 498 static bool set_nr_if_polling(struct task_struct *p) 499 { 500 return false; 501 } 502 #endif 503 #endif 504 505 static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task) 506 { 507 struct wake_q_node *node = &task->wake_q; 508 509 /* 510 * Atomically grab the task, if ->wake_q is !nil already it means 511 * it's already queued (either by us or someone else) and will get the 512 * wakeup due to that. 513 * 514 * In order to ensure that a pending wakeup will observe our pending 515 * state, even in the failed case, an explicit smp_mb() must be used. 516 */ 517 smp_mb__before_atomic(); 518 if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL))) 519 return false; 520 521 /* 522 * The head is context local, there can be no concurrency. 523 */ 524 *head->lastp = node; 525 head->lastp = &node->next; 526 return true; 527 } 528 529 /** 530 * wake_q_add() - queue a wakeup for 'later' waking. 531 * @head: the wake_q_head to add @task to 532 * @task: the task to queue for 'later' wakeup 533 * 534 * Queue a task for later wakeup, most likely by the wake_up_q() call in the 535 * same context, _HOWEVER_ this is not guaranteed, the wakeup can come 536 * instantly. 537 * 538 * This function must be used as-if it were wake_up_process(); IOW the task 539 * must be ready to be woken at this location. 540 */ 541 void wake_q_add(struct wake_q_head *head, struct task_struct *task) 542 { 543 if (__wake_q_add(head, task)) 544 get_task_struct(task); 545 } 546 547 /** 548 * wake_q_add_safe() - safely queue a wakeup for 'later' waking. 549 * @head: the wake_q_head to add @task to 550 * @task: the task to queue for 'later' wakeup 551 * 552 * Queue a task for later wakeup, most likely by the wake_up_q() call in the 553 * same context, _HOWEVER_ this is not guaranteed, the wakeup can come 554 * instantly. 555 * 556 * This function must be used as-if it were wake_up_process(); IOW the task 557 * must be ready to be woken at this location. 558 * 559 * This function is essentially a task-safe equivalent to wake_q_add(). Callers 560 * that already hold reference to @task can call the 'safe' version and trust 561 * wake_q to do the right thing depending whether or not the @task is already 562 * queued for wakeup. 563 */ 564 void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task) 565 { 566 if (!__wake_q_add(head, task)) 567 put_task_struct(task); 568 } 569 570 void wake_up_q(struct wake_q_head *head) 571 { 572 struct wake_q_node *node = head->first; 573 574 while (node != WAKE_Q_TAIL) { 575 struct task_struct *task; 576 577 task = container_of(node, struct task_struct, wake_q); 578 BUG_ON(!task); 579 /* Task can safely be re-inserted now: */ 580 node = node->next; 581 task->wake_q.next = NULL; 582 583 /* 584 * wake_up_process() executes a full barrier, which pairs with 585 * the queueing in wake_q_add() so as not to miss wakeups. 586 */ 587 wake_up_process(task); 588 put_task_struct(task); 589 } 590 } 591 592 /* 593 * resched_curr - mark rq's current task 'to be rescheduled now'. 594 * 595 * On UP this means the setting of the need_resched flag, on SMP it 596 * might also involve a cross-CPU call to trigger the scheduler on 597 * the target CPU. 598 */ 599 void resched_curr(struct rq *rq) 600 { 601 struct task_struct *curr = rq->curr; 602 int cpu; 603 604 lockdep_assert_held(&rq->lock); 605 606 if (test_tsk_need_resched(curr)) 607 return; 608 609 cpu = cpu_of(rq); 610 611 if (cpu == smp_processor_id()) { 612 set_tsk_need_resched(curr); 613 set_preempt_need_resched(); 614 return; 615 } 616 617 if (set_nr_and_not_polling(curr)) 618 smp_send_reschedule(cpu); 619 else 620 trace_sched_wake_idle_without_ipi(cpu); 621 } 622 623 void resched_cpu(int cpu) 624 { 625 struct rq *rq = cpu_rq(cpu); 626 unsigned long flags; 627 628 raw_spin_lock_irqsave(&rq->lock, flags); 629 if (cpu_online(cpu) || cpu == smp_processor_id()) 630 resched_curr(rq); 631 raw_spin_unlock_irqrestore(&rq->lock, flags); 632 } 633 634 #ifdef CONFIG_SMP 635 #ifdef CONFIG_NO_HZ_COMMON 636 /* 637 * In the semi idle case, use the nearest busy CPU for migrating timers 638 * from an idle CPU. This is good for power-savings. 639 * 640 * We don't do similar optimization for completely idle system, as 641 * selecting an idle CPU will add more delays to the timers than intended 642 * (as that CPU's timer base may not be uptodate wrt jiffies etc). 643 */ 644 int get_nohz_timer_target(void) 645 { 646 int i, cpu = smp_processor_id(), default_cpu = -1; 647 struct sched_domain *sd; 648 649 if (housekeeping_cpu(cpu, HK_FLAG_TIMER)) { 650 if (!idle_cpu(cpu)) 651 return cpu; 652 default_cpu = cpu; 653 } 654 655 rcu_read_lock(); 656 for_each_domain(cpu, sd) { 657 for_each_cpu_and(i, sched_domain_span(sd), 658 housekeeping_cpumask(HK_FLAG_TIMER)) { 659 if (cpu == i) 660 continue; 661 662 if (!idle_cpu(i)) { 663 cpu = i; 664 goto unlock; 665 } 666 } 667 } 668 669 if (default_cpu == -1) 670 default_cpu = housekeeping_any_cpu(HK_FLAG_TIMER); 671 cpu = default_cpu; 672 unlock: 673 rcu_read_unlock(); 674 return cpu; 675 } 676 677 /* 678 * When add_timer_on() enqueues a timer into the timer wheel of an 679 * idle CPU then this timer might expire before the next timer event 680 * which is scheduled to wake up that CPU. In case of a completely 681 * idle system the next event might even be infinite time into the 682 * future. wake_up_idle_cpu() ensures that the CPU is woken up and 683 * leaves the inner idle loop so the newly added timer is taken into 684 * account when the CPU goes back to idle and evaluates the timer 685 * wheel for the next timer event. 686 */ 687 static void wake_up_idle_cpu(int cpu) 688 { 689 struct rq *rq = cpu_rq(cpu); 690 691 if (cpu == smp_processor_id()) 692 return; 693 694 if (set_nr_and_not_polling(rq->idle)) 695 smp_send_reschedule(cpu); 696 else 697 trace_sched_wake_idle_without_ipi(cpu); 698 } 699 700 static bool wake_up_full_nohz_cpu(int cpu) 701 { 702 /* 703 * We just need the target to call irq_exit() and re-evaluate 704 * the next tick. The nohz full kick at least implies that. 705 * If needed we can still optimize that later with an 706 * empty IRQ. 707 */ 708 if (cpu_is_offline(cpu)) 709 return true; /* Don't try to wake offline CPUs. */ 710 if (tick_nohz_full_cpu(cpu)) { 711 if (cpu != smp_processor_id() || 712 tick_nohz_tick_stopped()) 713 tick_nohz_full_kick_cpu(cpu); 714 return true; 715 } 716 717 return false; 718 } 719 720 /* 721 * Wake up the specified CPU. If the CPU is going offline, it is the 722 * caller's responsibility to deal with the lost wakeup, for example, 723 * by hooking into the CPU_DEAD notifier like timers and hrtimers do. 724 */ 725 void wake_up_nohz_cpu(int cpu) 726 { 727 if (!wake_up_full_nohz_cpu(cpu)) 728 wake_up_idle_cpu(cpu); 729 } 730 731 static void nohz_csd_func(void *info) 732 { 733 struct rq *rq = info; 734 int cpu = cpu_of(rq); 735 unsigned int flags; 736 737 /* 738 * Release the rq::nohz_csd. 739 */ 740 flags = atomic_fetch_andnot(NOHZ_KICK_MASK, nohz_flags(cpu)); 741 WARN_ON(!(flags & NOHZ_KICK_MASK)); 742 743 rq->idle_balance = idle_cpu(cpu); 744 if (rq->idle_balance && !need_resched()) { 745 rq->nohz_idle_balance = flags; 746 raise_softirq_irqoff(SCHED_SOFTIRQ); 747 } 748 } 749 750 #endif /* CONFIG_NO_HZ_COMMON */ 751 752 #ifdef CONFIG_NO_HZ_FULL 753 bool sched_can_stop_tick(struct rq *rq) 754 { 755 int fifo_nr_running; 756 757 /* Deadline tasks, even if single, need the tick */ 758 if (rq->dl.dl_nr_running) 759 return false; 760 761 /* 762 * If there are more than one RR tasks, we need the tick to affect the 763 * actual RR behaviour. 764 */ 765 if (rq->rt.rr_nr_running) { 766 if (rq->rt.rr_nr_running == 1) 767 return true; 768 else 769 return false; 770 } 771 772 /* 773 * If there's no RR tasks, but FIFO tasks, we can skip the tick, no 774 * forced preemption between FIFO tasks. 775 */ 776 fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running; 777 if (fifo_nr_running) 778 return true; 779 780 /* 781 * If there are no DL,RR/FIFO tasks, there must only be CFS tasks left; 782 * if there's more than one we need the tick for involuntary 783 * preemption. 784 */ 785 if (rq->nr_running > 1) 786 return false; 787 788 return true; 789 } 790 #endif /* CONFIG_NO_HZ_FULL */ 791 #endif /* CONFIG_SMP */ 792 793 #if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \ 794 (defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH))) 795 /* 796 * Iterate task_group tree rooted at *from, calling @down when first entering a 797 * node and @up when leaving it for the final time. 798 * 799 * Caller must hold rcu_lock or sufficient equivalent. 800 */ 801 int walk_tg_tree_from(struct task_group *from, 802 tg_visitor down, tg_visitor up, void *data) 803 { 804 struct task_group *parent, *child; 805 int ret; 806 807 parent = from; 808 809 down: 810 ret = (*down)(parent, data); 811 if (ret) 812 goto out; 813 list_for_each_entry_rcu(child, &parent->children, siblings) { 814 parent = child; 815 goto down; 816 817 up: 818 continue; 819 } 820 ret = (*up)(parent, data); 821 if (ret || parent == from) 822 goto out; 823 824 child = parent; 825 parent = parent->parent; 826 if (parent) 827 goto up; 828 out: 829 return ret; 830 } 831 832 int tg_nop(struct task_group *tg, void *data) 833 { 834 return 0; 835 } 836 #endif 837 838 static void set_load_weight(struct task_struct *p, bool update_load) 839 { 840 int prio = p->static_prio - MAX_RT_PRIO; 841 struct load_weight *load = &p->se.load; 842 843 /* 844 * SCHED_IDLE tasks get minimal weight: 845 */ 846 if (task_has_idle_policy(p)) { 847 load->weight = scale_load(WEIGHT_IDLEPRIO); 848 load->inv_weight = WMULT_IDLEPRIO; 849 return; 850 } 851 852 /* 853 * SCHED_OTHER tasks have to update their load when changing their 854 * weight 855 */ 856 if (update_load && p->sched_class == &fair_sched_class) { 857 reweight_task(p, prio); 858 } else { 859 load->weight = scale_load(sched_prio_to_weight[prio]); 860 load->inv_weight = sched_prio_to_wmult[prio]; 861 } 862 } 863 864 #ifdef CONFIG_UCLAMP_TASK 865 /* 866 * Serializes updates of utilization clamp values 867 * 868 * The (slow-path) user-space triggers utilization clamp value updates which 869 * can require updates on (fast-path) scheduler's data structures used to 870 * support enqueue/dequeue operations. 871 * While the per-CPU rq lock protects fast-path update operations, user-space 872 * requests are serialized using a mutex to reduce the risk of conflicting 873 * updates or API abuses. 874 */ 875 static DEFINE_MUTEX(uclamp_mutex); 876 877 /* Max allowed minimum utilization */ 878 unsigned int sysctl_sched_uclamp_util_min = SCHED_CAPACITY_SCALE; 879 880 /* Max allowed maximum utilization */ 881 unsigned int sysctl_sched_uclamp_util_max = SCHED_CAPACITY_SCALE; 882 883 /* 884 * By default RT tasks run at the maximum performance point/capacity of the 885 * system. Uclamp enforces this by always setting UCLAMP_MIN of RT tasks to 886 * SCHED_CAPACITY_SCALE. 887 * 888 * This knob allows admins to change the default behavior when uclamp is being 889 * used. In battery powered devices, particularly, running at the maximum 890 * capacity and frequency will increase energy consumption and shorten the 891 * battery life. 892 * 893 * This knob only affects RT tasks that their uclamp_se->user_defined == false. 894 * 895 * This knob will not override the system default sched_util_clamp_min defined 896 * above. 897 */ 898 unsigned int sysctl_sched_uclamp_util_min_rt_default = SCHED_CAPACITY_SCALE; 899 900 /* All clamps are required to be less or equal than these values */ 901 static struct uclamp_se uclamp_default[UCLAMP_CNT]; 902 903 /* 904 * This static key is used to reduce the uclamp overhead in the fast path. It 905 * primarily disables the call to uclamp_rq_{inc, dec}() in 906 * enqueue/dequeue_task(). 907 * 908 * This allows users to continue to enable uclamp in their kernel config with 909 * minimum uclamp overhead in the fast path. 910 * 911 * As soon as userspace modifies any of the uclamp knobs, the static key is 912 * enabled, since we have an actual users that make use of uclamp 913 * functionality. 914 * 915 * The knobs that would enable this static key are: 916 * 917 * * A task modifying its uclamp value with sched_setattr(). 918 * * An admin modifying the sysctl_sched_uclamp_{min, max} via procfs. 919 * * An admin modifying the cgroup cpu.uclamp.{min, max} 920 */ 921 DEFINE_STATIC_KEY_FALSE(sched_uclamp_used); 922 923 /* Integer rounded range for each bucket */ 924 #define UCLAMP_BUCKET_DELTA DIV_ROUND_CLOSEST(SCHED_CAPACITY_SCALE, UCLAMP_BUCKETS) 925 926 #define for_each_clamp_id(clamp_id) \ 927 for ((clamp_id) = 0; (clamp_id) < UCLAMP_CNT; (clamp_id)++) 928 929 static inline unsigned int uclamp_bucket_id(unsigned int clamp_value) 930 { 931 return clamp_value / UCLAMP_BUCKET_DELTA; 932 } 933 934 static inline unsigned int uclamp_none(enum uclamp_id clamp_id) 935 { 936 if (clamp_id == UCLAMP_MIN) 937 return 0; 938 return SCHED_CAPACITY_SCALE; 939 } 940 941 static inline void uclamp_se_set(struct uclamp_se *uc_se, 942 unsigned int value, bool user_defined) 943 { 944 uc_se->value = value; 945 uc_se->bucket_id = uclamp_bucket_id(value); 946 uc_se->user_defined = user_defined; 947 } 948 949 static inline unsigned int 950 uclamp_idle_value(struct rq *rq, enum uclamp_id clamp_id, 951 unsigned int clamp_value) 952 { 953 /* 954 * Avoid blocked utilization pushing up the frequency when we go 955 * idle (which drops the max-clamp) by retaining the last known 956 * max-clamp. 957 */ 958 if (clamp_id == UCLAMP_MAX) { 959 rq->uclamp_flags |= UCLAMP_FLAG_IDLE; 960 return clamp_value; 961 } 962 963 return uclamp_none(UCLAMP_MIN); 964 } 965 966 static inline void uclamp_idle_reset(struct rq *rq, enum uclamp_id clamp_id, 967 unsigned int clamp_value) 968 { 969 /* Reset max-clamp retention only on idle exit */ 970 if (!(rq->uclamp_flags & UCLAMP_FLAG_IDLE)) 971 return; 972 973 WRITE_ONCE(rq->uclamp[clamp_id].value, clamp_value); 974 } 975 976 static inline 977 unsigned int uclamp_rq_max_value(struct rq *rq, enum uclamp_id clamp_id, 978 unsigned int clamp_value) 979 { 980 struct uclamp_bucket *bucket = rq->uclamp[clamp_id].bucket; 981 int bucket_id = UCLAMP_BUCKETS - 1; 982 983 /* 984 * Since both min and max clamps are max aggregated, find the 985 * top most bucket with tasks in. 986 */ 987 for ( ; bucket_id >= 0; bucket_id--) { 988 if (!bucket[bucket_id].tasks) 989 continue; 990 return bucket[bucket_id].value; 991 } 992 993 /* No tasks -- default clamp values */ 994 return uclamp_idle_value(rq, clamp_id, clamp_value); 995 } 996 997 static void __uclamp_update_util_min_rt_default(struct task_struct *p) 998 { 999 unsigned int default_util_min; 1000 struct uclamp_se *uc_se; 1001 1002 lockdep_assert_held(&p->pi_lock); 1003 1004 uc_se = &p->uclamp_req[UCLAMP_MIN]; 1005 1006 /* Only sync if user didn't override the default */ 1007 if (uc_se->user_defined) 1008 return; 1009 1010 default_util_min = sysctl_sched_uclamp_util_min_rt_default; 1011 uclamp_se_set(uc_se, default_util_min, false); 1012 } 1013 1014 static void uclamp_update_util_min_rt_default(struct task_struct *p) 1015 { 1016 struct rq_flags rf; 1017 struct rq *rq; 1018 1019 if (!rt_task(p)) 1020 return; 1021 1022 /* Protect updates to p->uclamp_* */ 1023 rq = task_rq_lock(p, &rf); 1024 __uclamp_update_util_min_rt_default(p); 1025 task_rq_unlock(rq, p, &rf); 1026 } 1027 1028 static void uclamp_sync_util_min_rt_default(void) 1029 { 1030 struct task_struct *g, *p; 1031 1032 /* 1033 * copy_process() sysctl_uclamp 1034 * uclamp_min_rt = X; 1035 * write_lock(&tasklist_lock) read_lock(&tasklist_lock) 1036 * // link thread smp_mb__after_spinlock() 1037 * write_unlock(&tasklist_lock) read_unlock(&tasklist_lock); 1038 * sched_post_fork() for_each_process_thread() 1039 * __uclamp_sync_rt() __uclamp_sync_rt() 1040 * 1041 * Ensures that either sched_post_fork() will observe the new 1042 * uclamp_min_rt or for_each_process_thread() will observe the new 1043 * task. 1044 */ 1045 read_lock(&tasklist_lock); 1046 smp_mb__after_spinlock(); 1047 read_unlock(&tasklist_lock); 1048 1049 rcu_read_lock(); 1050 for_each_process_thread(g, p) 1051 uclamp_update_util_min_rt_default(p); 1052 rcu_read_unlock(); 1053 } 1054 1055 static inline struct uclamp_se 1056 uclamp_tg_restrict(struct task_struct *p, enum uclamp_id clamp_id) 1057 { 1058 struct uclamp_se uc_req = p->uclamp_req[clamp_id]; 1059 #ifdef CONFIG_UCLAMP_TASK_GROUP 1060 struct uclamp_se uc_max; 1061 1062 /* 1063 * Tasks in autogroups or root task group will be 1064 * restricted by system defaults. 1065 */ 1066 if (task_group_is_autogroup(task_group(p))) 1067 return uc_req; 1068 if (task_group(p) == &root_task_group) 1069 return uc_req; 1070 1071 uc_max = task_group(p)->uclamp[clamp_id]; 1072 if (uc_req.value > uc_max.value || !uc_req.user_defined) 1073 return uc_max; 1074 #endif 1075 1076 return uc_req; 1077 } 1078 1079 /* 1080 * The effective clamp bucket index of a task depends on, by increasing 1081 * priority: 1082 * - the task specific clamp value, when explicitly requested from userspace 1083 * - the task group effective clamp value, for tasks not either in the root 1084 * group or in an autogroup 1085 * - the system default clamp value, defined by the sysadmin 1086 */ 1087 static inline struct uclamp_se 1088 uclamp_eff_get(struct task_struct *p, enum uclamp_id clamp_id) 1089 { 1090 struct uclamp_se uc_req = uclamp_tg_restrict(p, clamp_id); 1091 struct uclamp_se uc_max = uclamp_default[clamp_id]; 1092 1093 /* System default restrictions always apply */ 1094 if (unlikely(uc_req.value > uc_max.value)) 1095 return uc_max; 1096 1097 return uc_req; 1098 } 1099 1100 unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id) 1101 { 1102 struct uclamp_se uc_eff; 1103 1104 /* Task currently refcounted: use back-annotated (effective) value */ 1105 if (p->uclamp[clamp_id].active) 1106 return (unsigned long)p->uclamp[clamp_id].value; 1107 1108 uc_eff = uclamp_eff_get(p, clamp_id); 1109 1110 return (unsigned long)uc_eff.value; 1111 } 1112 1113 /* 1114 * When a task is enqueued on a rq, the clamp bucket currently defined by the 1115 * task's uclamp::bucket_id is refcounted on that rq. This also immediately 1116 * updates the rq's clamp value if required. 1117 * 1118 * Tasks can have a task-specific value requested from user-space, track 1119 * within each bucket the maximum value for tasks refcounted in it. 1120 * This "local max aggregation" allows to track the exact "requested" value 1121 * for each bucket when all its RUNNABLE tasks require the same clamp. 1122 */ 1123 static inline void uclamp_rq_inc_id(struct rq *rq, struct task_struct *p, 1124 enum uclamp_id clamp_id) 1125 { 1126 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id]; 1127 struct uclamp_se *uc_se = &p->uclamp[clamp_id]; 1128 struct uclamp_bucket *bucket; 1129 1130 lockdep_assert_held(&rq->lock); 1131 1132 /* Update task effective clamp */ 1133 p->uclamp[clamp_id] = uclamp_eff_get(p, clamp_id); 1134 1135 bucket = &uc_rq->bucket[uc_se->bucket_id]; 1136 bucket->tasks++; 1137 uc_se->active = true; 1138 1139 uclamp_idle_reset(rq, clamp_id, uc_se->value); 1140 1141 /* 1142 * Local max aggregation: rq buckets always track the max 1143 * "requested" clamp value of its RUNNABLE tasks. 1144 */ 1145 if (bucket->tasks == 1 || uc_se->value > bucket->value) 1146 bucket->value = uc_se->value; 1147 1148 if (uc_se->value > READ_ONCE(uc_rq->value)) 1149 WRITE_ONCE(uc_rq->value, uc_se->value); 1150 } 1151 1152 /* 1153 * When a task is dequeued from a rq, the clamp bucket refcounted by the task 1154 * is released. If this is the last task reference counting the rq's max 1155 * active clamp value, then the rq's clamp value is updated. 1156 * 1157 * Both refcounted tasks and rq's cached clamp values are expected to be 1158 * always valid. If it's detected they are not, as defensive programming, 1159 * enforce the expected state and warn. 1160 */ 1161 static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p, 1162 enum uclamp_id clamp_id) 1163 { 1164 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id]; 1165 struct uclamp_se *uc_se = &p->uclamp[clamp_id]; 1166 struct uclamp_bucket *bucket; 1167 unsigned int bkt_clamp; 1168 unsigned int rq_clamp; 1169 1170 lockdep_assert_held(&rq->lock); 1171 1172 /* 1173 * If sched_uclamp_used was enabled after task @p was enqueued, 1174 * we could end up with unbalanced call to uclamp_rq_dec_id(). 1175 * 1176 * In this case the uc_se->active flag should be false since no uclamp 1177 * accounting was performed at enqueue time and we can just return 1178 * here. 1179 * 1180 * Need to be careful of the following enqueue/dequeue ordering 1181 * problem too 1182 * 1183 * enqueue(taskA) 1184 * // sched_uclamp_used gets enabled 1185 * enqueue(taskB) 1186 * dequeue(taskA) 1187 * // Must not decrement bucket->tasks here 1188 * dequeue(taskB) 1189 * 1190 * where we could end up with stale data in uc_se and 1191 * bucket[uc_se->bucket_id]. 1192 * 1193 * The following check here eliminates the possibility of such race. 1194 */ 1195 if (unlikely(!uc_se->active)) 1196 return; 1197 1198 bucket = &uc_rq->bucket[uc_se->bucket_id]; 1199 1200 SCHED_WARN_ON(!bucket->tasks); 1201 if (likely(bucket->tasks)) 1202 bucket->tasks--; 1203 1204 uc_se->active = false; 1205 1206 /* 1207 * Keep "local max aggregation" simple and accept to (possibly) 1208 * overboost some RUNNABLE tasks in the same bucket. 1209 * The rq clamp bucket value is reset to its base value whenever 1210 * there are no more RUNNABLE tasks refcounting it. 1211 */ 1212 if (likely(bucket->tasks)) 1213 return; 1214 1215 rq_clamp = READ_ONCE(uc_rq->value); 1216 /* 1217 * Defensive programming: this should never happen. If it happens, 1218 * e.g. due to future modification, warn and fixup the expected value. 1219 */ 1220 SCHED_WARN_ON(bucket->value > rq_clamp); 1221 if (bucket->value >= rq_clamp) { 1222 bkt_clamp = uclamp_rq_max_value(rq, clamp_id, uc_se->value); 1223 WRITE_ONCE(uc_rq->value, bkt_clamp); 1224 } 1225 } 1226 1227 static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) 1228 { 1229 enum uclamp_id clamp_id; 1230 1231 /* 1232 * Avoid any overhead until uclamp is actually used by the userspace. 1233 * 1234 * The condition is constructed such that a NOP is generated when 1235 * sched_uclamp_used is disabled. 1236 */ 1237 if (!static_branch_unlikely(&sched_uclamp_used)) 1238 return; 1239 1240 if (unlikely(!p->sched_class->uclamp_enabled)) 1241 return; 1242 1243 for_each_clamp_id(clamp_id) 1244 uclamp_rq_inc_id(rq, p, clamp_id); 1245 1246 /* Reset clamp idle holding when there is one RUNNABLE task */ 1247 if (rq->uclamp_flags & UCLAMP_FLAG_IDLE) 1248 rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE; 1249 } 1250 1251 static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) 1252 { 1253 enum uclamp_id clamp_id; 1254 1255 /* 1256 * Avoid any overhead until uclamp is actually used by the userspace. 1257 * 1258 * The condition is constructed such that a NOP is generated when 1259 * sched_uclamp_used is disabled. 1260 */ 1261 if (!static_branch_unlikely(&sched_uclamp_used)) 1262 return; 1263 1264 if (unlikely(!p->sched_class->uclamp_enabled)) 1265 return; 1266 1267 for_each_clamp_id(clamp_id) 1268 uclamp_rq_dec_id(rq, p, clamp_id); 1269 } 1270 1271 static inline void 1272 uclamp_update_active(struct task_struct *p, enum uclamp_id clamp_id) 1273 { 1274 struct rq_flags rf; 1275 struct rq *rq; 1276 1277 /* 1278 * Lock the task and the rq where the task is (or was) queued. 1279 * 1280 * We might lock the (previous) rq of a !RUNNABLE task, but that's the 1281 * price to pay to safely serialize util_{min,max} updates with 1282 * enqueues, dequeues and migration operations. 1283 * This is the same locking schema used by __set_cpus_allowed_ptr(). 1284 */ 1285 rq = task_rq_lock(p, &rf); 1286 1287 /* 1288 * Setting the clamp bucket is serialized by task_rq_lock(). 1289 * If the task is not yet RUNNABLE and its task_struct is not 1290 * affecting a valid clamp bucket, the next time it's enqueued, 1291 * it will already see the updated clamp bucket value. 1292 */ 1293 if (p->uclamp[clamp_id].active) { 1294 uclamp_rq_dec_id(rq, p, clamp_id); 1295 uclamp_rq_inc_id(rq, p, clamp_id); 1296 } 1297 1298 task_rq_unlock(rq, p, &rf); 1299 } 1300 1301 #ifdef CONFIG_UCLAMP_TASK_GROUP 1302 static inline void 1303 uclamp_update_active_tasks(struct cgroup_subsys_state *css, 1304 unsigned int clamps) 1305 { 1306 enum uclamp_id clamp_id; 1307 struct css_task_iter it; 1308 struct task_struct *p; 1309 1310 css_task_iter_start(css, 0, &it); 1311 while ((p = css_task_iter_next(&it))) { 1312 for_each_clamp_id(clamp_id) { 1313 if ((0x1 << clamp_id) & clamps) 1314 uclamp_update_active(p, clamp_id); 1315 } 1316 } 1317 css_task_iter_end(&it); 1318 } 1319 1320 static void cpu_util_update_eff(struct cgroup_subsys_state *css); 1321 static void uclamp_update_root_tg(void) 1322 { 1323 struct task_group *tg = &root_task_group; 1324 1325 uclamp_se_set(&tg->uclamp_req[UCLAMP_MIN], 1326 sysctl_sched_uclamp_util_min, false); 1327 uclamp_se_set(&tg->uclamp_req[UCLAMP_MAX], 1328 sysctl_sched_uclamp_util_max, false); 1329 1330 rcu_read_lock(); 1331 cpu_util_update_eff(&root_task_group.css); 1332 rcu_read_unlock(); 1333 } 1334 #else 1335 static void uclamp_update_root_tg(void) { } 1336 #endif 1337 1338 int sysctl_sched_uclamp_handler(struct ctl_table *table, int write, 1339 void *buffer, size_t *lenp, loff_t *ppos) 1340 { 1341 bool update_root_tg = false; 1342 int old_min, old_max, old_min_rt; 1343 int result; 1344 1345 mutex_lock(&uclamp_mutex); 1346 old_min = sysctl_sched_uclamp_util_min; 1347 old_max = sysctl_sched_uclamp_util_max; 1348 old_min_rt = sysctl_sched_uclamp_util_min_rt_default; 1349 1350 result = proc_dointvec(table, write, buffer, lenp, ppos); 1351 if (result) 1352 goto undo; 1353 if (!write) 1354 goto done; 1355 1356 if (sysctl_sched_uclamp_util_min > sysctl_sched_uclamp_util_max || 1357 sysctl_sched_uclamp_util_max > SCHED_CAPACITY_SCALE || 1358 sysctl_sched_uclamp_util_min_rt_default > SCHED_CAPACITY_SCALE) { 1359 1360 result = -EINVAL; 1361 goto undo; 1362 } 1363 1364 if (old_min != sysctl_sched_uclamp_util_min) { 1365 uclamp_se_set(&uclamp_default[UCLAMP_MIN], 1366 sysctl_sched_uclamp_util_min, false); 1367 update_root_tg = true; 1368 } 1369 if (old_max != sysctl_sched_uclamp_util_max) { 1370 uclamp_se_set(&uclamp_default[UCLAMP_MAX], 1371 sysctl_sched_uclamp_util_max, false); 1372 update_root_tg = true; 1373 } 1374 1375 if (update_root_tg) { 1376 static_branch_enable(&sched_uclamp_used); 1377 uclamp_update_root_tg(); 1378 } 1379 1380 if (old_min_rt != sysctl_sched_uclamp_util_min_rt_default) { 1381 static_branch_enable(&sched_uclamp_used); 1382 uclamp_sync_util_min_rt_default(); 1383 } 1384 1385 /* 1386 * We update all RUNNABLE tasks only when task groups are in use. 1387 * Otherwise, keep it simple and do just a lazy update at each next 1388 * task enqueue time. 1389 */ 1390 1391 goto done; 1392 1393 undo: 1394 sysctl_sched_uclamp_util_min = old_min; 1395 sysctl_sched_uclamp_util_max = old_max; 1396 sysctl_sched_uclamp_util_min_rt_default = old_min_rt; 1397 done: 1398 mutex_unlock(&uclamp_mutex); 1399 1400 return result; 1401 } 1402 1403 static int uclamp_validate(struct task_struct *p, 1404 const struct sched_attr *attr) 1405 { 1406 int util_min = p->uclamp_req[UCLAMP_MIN].value; 1407 int util_max = p->uclamp_req[UCLAMP_MAX].value; 1408 1409 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN) { 1410 util_min = attr->sched_util_min; 1411 1412 if (util_min + 1 > SCHED_CAPACITY_SCALE + 1) 1413 return -EINVAL; 1414 } 1415 1416 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX) { 1417 util_max = attr->sched_util_max; 1418 1419 if (util_max + 1 > SCHED_CAPACITY_SCALE + 1) 1420 return -EINVAL; 1421 } 1422 1423 if (util_min != -1 && util_max != -1 && util_min > util_max) 1424 return -EINVAL; 1425 1426 /* 1427 * We have valid uclamp attributes; make sure uclamp is enabled. 1428 * 1429 * We need to do that here, because enabling static branches is a 1430 * blocking operation which obviously cannot be done while holding 1431 * scheduler locks. 1432 */ 1433 static_branch_enable(&sched_uclamp_used); 1434 1435 return 0; 1436 } 1437 1438 static bool uclamp_reset(const struct sched_attr *attr, 1439 enum uclamp_id clamp_id, 1440 struct uclamp_se *uc_se) 1441 { 1442 /* Reset on sched class change for a non user-defined clamp value. */ 1443 if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)) && 1444 !uc_se->user_defined) 1445 return true; 1446 1447 /* Reset on sched_util_{min,max} == -1. */ 1448 if (clamp_id == UCLAMP_MIN && 1449 attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN && 1450 attr->sched_util_min == -1) { 1451 return true; 1452 } 1453 1454 if (clamp_id == UCLAMP_MAX && 1455 attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX && 1456 attr->sched_util_max == -1) { 1457 return true; 1458 } 1459 1460 return false; 1461 } 1462 1463 static void __setscheduler_uclamp(struct task_struct *p, 1464 const struct sched_attr *attr) 1465 { 1466 enum uclamp_id clamp_id; 1467 1468 for_each_clamp_id(clamp_id) { 1469 struct uclamp_se *uc_se = &p->uclamp_req[clamp_id]; 1470 unsigned int value; 1471 1472 if (!uclamp_reset(attr, clamp_id, uc_se)) 1473 continue; 1474 1475 /* 1476 * RT by default have a 100% boost value that could be modified 1477 * at runtime. 1478 */ 1479 if (unlikely(rt_task(p) && clamp_id == UCLAMP_MIN)) 1480 value = sysctl_sched_uclamp_util_min_rt_default; 1481 else 1482 value = uclamp_none(clamp_id); 1483 1484 uclamp_se_set(uc_se, value, false); 1485 1486 } 1487 1488 if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP))) 1489 return; 1490 1491 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN && 1492 attr->sched_util_min != -1) { 1493 uclamp_se_set(&p->uclamp_req[UCLAMP_MIN], 1494 attr->sched_util_min, true); 1495 } 1496 1497 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX && 1498 attr->sched_util_max != -1) { 1499 uclamp_se_set(&p->uclamp_req[UCLAMP_MAX], 1500 attr->sched_util_max, true); 1501 } 1502 } 1503 1504 static void uclamp_fork(struct task_struct *p) 1505 { 1506 enum uclamp_id clamp_id; 1507 1508 /* 1509 * We don't need to hold task_rq_lock() when updating p->uclamp_* here 1510 * as the task is still at its early fork stages. 1511 */ 1512 for_each_clamp_id(clamp_id) 1513 p->uclamp[clamp_id].active = false; 1514 1515 if (likely(!p->sched_reset_on_fork)) 1516 return; 1517 1518 for_each_clamp_id(clamp_id) { 1519 uclamp_se_set(&p->uclamp_req[clamp_id], 1520 uclamp_none(clamp_id), false); 1521 } 1522 } 1523 1524 static void uclamp_post_fork(struct task_struct *p) 1525 { 1526 uclamp_update_util_min_rt_default(p); 1527 } 1528 1529 static void __init init_uclamp_rq(struct rq *rq) 1530 { 1531 enum uclamp_id clamp_id; 1532 struct uclamp_rq *uc_rq = rq->uclamp; 1533 1534 for_each_clamp_id(clamp_id) { 1535 uc_rq[clamp_id] = (struct uclamp_rq) { 1536 .value = uclamp_none(clamp_id) 1537 }; 1538 } 1539 1540 rq->uclamp_flags = 0; 1541 } 1542 1543 static void __init init_uclamp(void) 1544 { 1545 struct uclamp_se uc_max = {}; 1546 enum uclamp_id clamp_id; 1547 int cpu; 1548 1549 for_each_possible_cpu(cpu) 1550 init_uclamp_rq(cpu_rq(cpu)); 1551 1552 for_each_clamp_id(clamp_id) { 1553 uclamp_se_set(&init_task.uclamp_req[clamp_id], 1554 uclamp_none(clamp_id), false); 1555 } 1556 1557 /* System defaults allow max clamp values for both indexes */ 1558 uclamp_se_set(&uc_max, uclamp_none(UCLAMP_MAX), false); 1559 for_each_clamp_id(clamp_id) { 1560 uclamp_default[clamp_id] = uc_max; 1561 #ifdef CONFIG_UCLAMP_TASK_GROUP 1562 root_task_group.uclamp_req[clamp_id] = uc_max; 1563 root_task_group.uclamp[clamp_id] = uc_max; 1564 #endif 1565 } 1566 } 1567 1568 #else /* CONFIG_UCLAMP_TASK */ 1569 static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) { } 1570 static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) { } 1571 static inline int uclamp_validate(struct task_struct *p, 1572 const struct sched_attr *attr) 1573 { 1574 return -EOPNOTSUPP; 1575 } 1576 static void __setscheduler_uclamp(struct task_struct *p, 1577 const struct sched_attr *attr) { } 1578 static inline void uclamp_fork(struct task_struct *p) { } 1579 static inline void uclamp_post_fork(struct task_struct *p) { } 1580 static inline void init_uclamp(void) { } 1581 #endif /* CONFIG_UCLAMP_TASK */ 1582 1583 static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags) 1584 { 1585 if (!(flags & ENQUEUE_NOCLOCK)) 1586 update_rq_clock(rq); 1587 1588 if (!(flags & ENQUEUE_RESTORE)) { 1589 sched_info_queued(rq, p); 1590 psi_enqueue(p, flags & ENQUEUE_WAKEUP); 1591 } 1592 1593 uclamp_rq_inc(rq, p); 1594 p->sched_class->enqueue_task(rq, p, flags); 1595 } 1596 1597 static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags) 1598 { 1599 if (!(flags & DEQUEUE_NOCLOCK)) 1600 update_rq_clock(rq); 1601 1602 if (!(flags & DEQUEUE_SAVE)) { 1603 sched_info_dequeued(rq, p); 1604 psi_dequeue(p, flags & DEQUEUE_SLEEP); 1605 } 1606 1607 uclamp_rq_dec(rq, p); 1608 p->sched_class->dequeue_task(rq, p, flags); 1609 } 1610 1611 void activate_task(struct rq *rq, struct task_struct *p, int flags) 1612 { 1613 enqueue_task(rq, p, flags); 1614 1615 p->on_rq = TASK_ON_RQ_QUEUED; 1616 } 1617 1618 void deactivate_task(struct rq *rq, struct task_struct *p, int flags) 1619 { 1620 p->on_rq = (flags & DEQUEUE_SLEEP) ? 0 : TASK_ON_RQ_MIGRATING; 1621 1622 dequeue_task(rq, p, flags); 1623 } 1624 1625 /* 1626 * __normal_prio - return the priority that is based on the static prio 1627 */ 1628 static inline int __normal_prio(struct task_struct *p) 1629 { 1630 return p->static_prio; 1631 } 1632 1633 /* 1634 * Calculate the expected normal priority: i.e. priority 1635 * without taking RT-inheritance into account. Might be 1636 * boosted by interactivity modifiers. Changes upon fork, 1637 * setprio syscalls, and whenever the interactivity 1638 * estimator recalculates. 1639 */ 1640 static inline int normal_prio(struct task_struct *p) 1641 { 1642 int prio; 1643 1644 if (task_has_dl_policy(p)) 1645 prio = MAX_DL_PRIO-1; 1646 else if (task_has_rt_policy(p)) 1647 prio = MAX_RT_PRIO-1 - p->rt_priority; 1648 else 1649 prio = __normal_prio(p); 1650 return prio; 1651 } 1652 1653 /* 1654 * Calculate the current priority, i.e. the priority 1655 * taken into account by the scheduler. This value might 1656 * be boosted by RT tasks, or might be boosted by 1657 * interactivity modifiers. Will be RT if the task got 1658 * RT-boosted. If not then it returns p->normal_prio. 1659 */ 1660 static int effective_prio(struct task_struct *p) 1661 { 1662 p->normal_prio = normal_prio(p); 1663 /* 1664 * If we are RT tasks or we were boosted to RT priority, 1665 * keep the priority unchanged. Otherwise, update priority 1666 * to the normal priority: 1667 */ 1668 if (!rt_prio(p->prio)) 1669 return p->normal_prio; 1670 return p->prio; 1671 } 1672 1673 /** 1674 * task_curr - is this task currently executing on a CPU? 1675 * @p: the task in question. 1676 * 1677 * Return: 1 if the task is currently executing. 0 otherwise. 1678 */ 1679 inline int task_curr(const struct task_struct *p) 1680 { 1681 return cpu_curr(task_cpu(p)) == p; 1682 } 1683 1684 /* 1685 * switched_from, switched_to and prio_changed must _NOT_ drop rq->lock, 1686 * use the balance_callback list if you want balancing. 1687 * 1688 * this means any call to check_class_changed() must be followed by a call to 1689 * balance_callback(). 1690 */ 1691 static inline void check_class_changed(struct rq *rq, struct task_struct *p, 1692 const struct sched_class *prev_class, 1693 int oldprio) 1694 { 1695 if (prev_class != p->sched_class) { 1696 if (prev_class->switched_from) 1697 prev_class->switched_from(rq, p); 1698 1699 p->sched_class->switched_to(rq, p); 1700 } else if (oldprio != p->prio || dl_task(p)) 1701 p->sched_class->prio_changed(rq, p, oldprio); 1702 } 1703 1704 void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) 1705 { 1706 if (p->sched_class == rq->curr->sched_class) 1707 rq->curr->sched_class->check_preempt_curr(rq, p, flags); 1708 else if (p->sched_class > rq->curr->sched_class) 1709 resched_curr(rq); 1710 1711 /* 1712 * A queue event has occurred, and we're going to schedule. In 1713 * this case, we can save a useless back to back clock update. 1714 */ 1715 if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr)) 1716 rq_clock_skip_update(rq); 1717 } 1718 1719 #ifdef CONFIG_SMP 1720 1721 static void 1722 __do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask, u32 flags); 1723 1724 static int __set_cpus_allowed_ptr(struct task_struct *p, 1725 const struct cpumask *new_mask, 1726 u32 flags); 1727 1728 static void migrate_disable_switch(struct rq *rq, struct task_struct *p) 1729 { 1730 if (likely(!p->migration_disabled)) 1731 return; 1732 1733 if (p->cpus_ptr != &p->cpus_mask) 1734 return; 1735 1736 /* 1737 * Violates locking rules! see comment in __do_set_cpus_allowed(). 1738 */ 1739 __do_set_cpus_allowed(p, cpumask_of(rq->cpu), SCA_MIGRATE_DISABLE); 1740 } 1741 1742 void migrate_disable(void) 1743 { 1744 struct task_struct *p = current; 1745 1746 if (p->migration_disabled) { 1747 p->migration_disabled++; 1748 return; 1749 } 1750 1751 preempt_disable(); 1752 this_rq()->nr_pinned++; 1753 p->migration_disabled = 1; 1754 preempt_enable(); 1755 } 1756 EXPORT_SYMBOL_GPL(migrate_disable); 1757 1758 void migrate_enable(void) 1759 { 1760 struct task_struct *p = current; 1761 1762 if (p->migration_disabled > 1) { 1763 p->migration_disabled--; 1764 return; 1765 } 1766 1767 /* 1768 * Ensure stop_task runs either before or after this, and that 1769 * __set_cpus_allowed_ptr(SCA_MIGRATE_ENABLE) doesn't schedule(). 1770 */ 1771 preempt_disable(); 1772 if (p->cpus_ptr != &p->cpus_mask) 1773 __set_cpus_allowed_ptr(p, &p->cpus_mask, SCA_MIGRATE_ENABLE); 1774 /* 1775 * Mustn't clear migration_disabled() until cpus_ptr points back at the 1776 * regular cpus_mask, otherwise things that race (eg. 1777 * select_fallback_rq) get confused. 1778 */ 1779 barrier(); 1780 p->migration_disabled = 0; 1781 this_rq()->nr_pinned--; 1782 preempt_enable(); 1783 } 1784 EXPORT_SYMBOL_GPL(migrate_enable); 1785 1786 static inline bool rq_has_pinned_tasks(struct rq *rq) 1787 { 1788 return rq->nr_pinned; 1789 } 1790 1791 /* 1792 * Per-CPU kthreads are allowed to run on !active && online CPUs, see 1793 * __set_cpus_allowed_ptr() and select_fallback_rq(). 1794 */ 1795 static inline bool is_cpu_allowed(struct task_struct *p, int cpu) 1796 { 1797 /* When not in the task's cpumask, no point in looking further. */ 1798 if (!cpumask_test_cpu(cpu, p->cpus_ptr)) 1799 return false; 1800 1801 /* migrate_disabled() must be allowed to finish. */ 1802 if (is_migration_disabled(p)) 1803 return cpu_online(cpu); 1804 1805 /* Non kernel threads are not allowed during either online or offline. */ 1806 if (!(p->flags & PF_KTHREAD)) 1807 return cpu_active(cpu); 1808 1809 /* KTHREAD_IS_PER_CPU is always allowed. */ 1810 if (kthread_is_per_cpu(p)) 1811 return cpu_online(cpu); 1812 1813 /* Regular kernel threads don't get to stay during offline. */ 1814 if (cpu_rq(cpu)->balance_push) 1815 return false; 1816 1817 /* But are allowed during online. */ 1818 return cpu_online(cpu); 1819 } 1820 1821 /* 1822 * This is how migration works: 1823 * 1824 * 1) we invoke migration_cpu_stop() on the target CPU using 1825 * stop_one_cpu(). 1826 * 2) stopper starts to run (implicitly forcing the migrated thread 1827 * off the CPU) 1828 * 3) it checks whether the migrated task is still in the wrong runqueue. 1829 * 4) if it's in the wrong runqueue then the migration thread removes 1830 * it and puts it into the right queue. 1831 * 5) stopper completes and stop_one_cpu() returns and the migration 1832 * is done. 1833 */ 1834 1835 /* 1836 * move_queued_task - move a queued task to new rq. 1837 * 1838 * Returns (locked) new rq. Old rq's lock is released. 1839 */ 1840 static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf, 1841 struct task_struct *p, int new_cpu) 1842 { 1843 lockdep_assert_held(&rq->lock); 1844 1845 deactivate_task(rq, p, DEQUEUE_NOCLOCK); 1846 set_task_cpu(p, new_cpu); 1847 rq_unlock(rq, rf); 1848 1849 rq = cpu_rq(new_cpu); 1850 1851 rq_lock(rq, rf); 1852 BUG_ON(task_cpu(p) != new_cpu); 1853 activate_task(rq, p, 0); 1854 check_preempt_curr(rq, p, 0); 1855 1856 return rq; 1857 } 1858 1859 struct migration_arg { 1860 struct task_struct *task; 1861 int dest_cpu; 1862 struct set_affinity_pending *pending; 1863 }; 1864 1865 struct set_affinity_pending { 1866 refcount_t refs; 1867 struct completion done; 1868 struct cpu_stop_work stop_work; 1869 struct migration_arg arg; 1870 }; 1871 1872 /* 1873 * Move (not current) task off this CPU, onto the destination CPU. We're doing 1874 * this because either it can't run here any more (set_cpus_allowed() 1875 * away from this CPU, or CPU going down), or because we're 1876 * attempting to rebalance this task on exec (sched_exec). 1877 * 1878 * So we race with normal scheduler movements, but that's OK, as long 1879 * as the task is no longer on this CPU. 1880 */ 1881 static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf, 1882 struct task_struct *p, int dest_cpu) 1883 { 1884 /* Affinity changed (again). */ 1885 if (!is_cpu_allowed(p, dest_cpu)) 1886 return rq; 1887 1888 update_rq_clock(rq); 1889 rq = move_queued_task(rq, rf, p, dest_cpu); 1890 1891 return rq; 1892 } 1893 1894 /* 1895 * migration_cpu_stop - this will be executed by a highprio stopper thread 1896 * and performs thread migration by bumping thread off CPU then 1897 * 'pushing' onto another runqueue. 1898 */ 1899 static int migration_cpu_stop(void *data) 1900 { 1901 struct migration_arg *arg = data; 1902 struct set_affinity_pending *pending = arg->pending; 1903 struct task_struct *p = arg->task; 1904 int dest_cpu = arg->dest_cpu; 1905 struct rq *rq = this_rq(); 1906 bool complete = false; 1907 struct rq_flags rf; 1908 1909 /* 1910 * The original target CPU might have gone down and we might 1911 * be on another CPU but it doesn't matter. 1912 */ 1913 local_irq_save(rf.flags); 1914 /* 1915 * We need to explicitly wake pending tasks before running 1916 * __migrate_task() such that we will not miss enforcing cpus_ptr 1917 * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test. 1918 */ 1919 flush_smp_call_function_from_idle(); 1920 1921 raw_spin_lock(&p->pi_lock); 1922 rq_lock(rq, &rf); 1923 1924 /* 1925 * If task_rq(p) != rq, it cannot be migrated here, because we're 1926 * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because 1927 * we're holding p->pi_lock. 1928 */ 1929 if (task_rq(p) == rq) { 1930 if (is_migration_disabled(p)) 1931 goto out; 1932 1933 if (pending) { 1934 if (p->migration_pending == pending) 1935 p->migration_pending = NULL; 1936 complete = true; 1937 } 1938 1939 if (dest_cpu < 0) 1940 dest_cpu = cpumask_any_distribute(&p->cpus_mask); 1941 1942 if (task_on_rq_queued(p)) 1943 rq = __migrate_task(rq, &rf, p, dest_cpu); 1944 else 1945 p->wake_cpu = dest_cpu; 1946 1947 } else if (pending) { 1948 /* 1949 * This happens when we get migrated between migrate_enable()'s 1950 * preempt_enable() and scheduling the stopper task. At that 1951 * point we're a regular task again and not current anymore. 1952 * 1953 * A !PREEMPT kernel has a giant hole here, which makes it far 1954 * more likely. 1955 */ 1956 1957 /* 1958 * The task moved before the stopper got to run. We're holding 1959 * ->pi_lock, so the allowed mask is stable - if it got 1960 * somewhere allowed, we're done. 1961 */ 1962 if (cpumask_test_cpu(task_cpu(p), p->cpus_ptr)) { 1963 if (p->migration_pending == pending) 1964 p->migration_pending = NULL; 1965 complete = true; 1966 goto out; 1967 } 1968 1969 /* 1970 * When migrate_enable() hits a rq mis-match we can't reliably 1971 * determine is_migration_disabled() and so have to chase after 1972 * it. 1973 */ 1974 task_rq_unlock(rq, p, &rf); 1975 stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop, 1976 &pending->arg, &pending->stop_work); 1977 return 0; 1978 } 1979 out: 1980 task_rq_unlock(rq, p, &rf); 1981 1982 if (complete) 1983 complete_all(&pending->done); 1984 1985 /* For pending->{arg,stop_work} */ 1986 if (pending && refcount_dec_and_test(&pending->refs)) 1987 wake_up_var(&pending->refs); 1988 1989 return 0; 1990 } 1991 1992 int push_cpu_stop(void *arg) 1993 { 1994 struct rq *lowest_rq = NULL, *rq = this_rq(); 1995 struct task_struct *p = arg; 1996 1997 raw_spin_lock_irq(&p->pi_lock); 1998 raw_spin_lock(&rq->lock); 1999 2000 if (task_rq(p) != rq) 2001 goto out_unlock; 2002 2003 if (is_migration_disabled(p)) { 2004 p->migration_flags |= MDF_PUSH; 2005 goto out_unlock; 2006 } 2007 2008 p->migration_flags &= ~MDF_PUSH; 2009 2010 if (p->sched_class->find_lock_rq) 2011 lowest_rq = p->sched_class->find_lock_rq(p, rq); 2012 2013 if (!lowest_rq) 2014 goto out_unlock; 2015 2016 // XXX validate p is still the highest prio task 2017 if (task_rq(p) == rq) { 2018 deactivate_task(rq, p, 0); 2019 set_task_cpu(p, lowest_rq->cpu); 2020 activate_task(lowest_rq, p, 0); 2021 resched_curr(lowest_rq); 2022 } 2023 2024 double_unlock_balance(rq, lowest_rq); 2025 2026 out_unlock: 2027 rq->push_busy = false; 2028 raw_spin_unlock(&rq->lock); 2029 raw_spin_unlock_irq(&p->pi_lock); 2030 2031 put_task_struct(p); 2032 return 0; 2033 } 2034 2035 /* 2036 * sched_class::set_cpus_allowed must do the below, but is not required to 2037 * actually call this function. 2038 */ 2039 void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask, u32 flags) 2040 { 2041 if (flags & (SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) { 2042 p->cpus_ptr = new_mask; 2043 return; 2044 } 2045 2046 cpumask_copy(&p->cpus_mask, new_mask); 2047 p->nr_cpus_allowed = cpumask_weight(new_mask); 2048 } 2049 2050 static void 2051 __do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask, u32 flags) 2052 { 2053 struct rq *rq = task_rq(p); 2054 bool queued, running; 2055 2056 /* 2057 * This here violates the locking rules for affinity, since we're only 2058 * supposed to change these variables while holding both rq->lock and 2059 * p->pi_lock. 2060 * 2061 * HOWEVER, it magically works, because ttwu() is the only code that 2062 * accesses these variables under p->pi_lock and only does so after 2063 * smp_cond_load_acquire(&p->on_cpu, !VAL), and we're in __schedule() 2064 * before finish_task(). 2065 * 2066 * XXX do further audits, this smells like something putrid. 2067 */ 2068 if (flags & SCA_MIGRATE_DISABLE) 2069 SCHED_WARN_ON(!p->on_cpu); 2070 else 2071 lockdep_assert_held(&p->pi_lock); 2072 2073 queued = task_on_rq_queued(p); 2074 running = task_current(rq, p); 2075 2076 if (queued) { 2077 /* 2078 * Because __kthread_bind() calls this on blocked tasks without 2079 * holding rq->lock. 2080 */ 2081 lockdep_assert_held(&rq->lock); 2082 dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK); 2083 } 2084 if (running) 2085 put_prev_task(rq, p); 2086 2087 p->sched_class->set_cpus_allowed(p, new_mask, flags); 2088 2089 if (queued) 2090 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); 2091 if (running) 2092 set_next_task(rq, p); 2093 } 2094 2095 void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) 2096 { 2097 __do_set_cpus_allowed(p, new_mask, 0); 2098 } 2099 2100 /* 2101 * This function is wildly self concurrent; here be dragons. 2102 * 2103 * 2104 * When given a valid mask, __set_cpus_allowed_ptr() must block until the 2105 * designated task is enqueued on an allowed CPU. If that task is currently 2106 * running, we have to kick it out using the CPU stopper. 2107 * 2108 * Migrate-Disable comes along and tramples all over our nice sandcastle. 2109 * Consider: 2110 * 2111 * Initial conditions: P0->cpus_mask = [0, 1] 2112 * 2113 * P0@CPU0 P1 2114 * 2115 * migrate_disable(); 2116 * <preempted> 2117 * set_cpus_allowed_ptr(P0, [1]); 2118 * 2119 * P1 *cannot* return from this set_cpus_allowed_ptr() call until P0 executes 2120 * its outermost migrate_enable() (i.e. it exits its Migrate-Disable region). 2121 * This means we need the following scheme: 2122 * 2123 * P0@CPU0 P1 2124 * 2125 * migrate_disable(); 2126 * <preempted> 2127 * set_cpus_allowed_ptr(P0, [1]); 2128 * <blocks> 2129 * <resumes> 2130 * migrate_enable(); 2131 * __set_cpus_allowed_ptr(); 2132 * <wakes local stopper> 2133 * `--> <woken on migration completion> 2134 * 2135 * Now the fun stuff: there may be several P1-like tasks, i.e. multiple 2136 * concurrent set_cpus_allowed_ptr(P0, [*]) calls. CPU affinity changes of any 2137 * task p are serialized by p->pi_lock, which we can leverage: the one that 2138 * should come into effect at the end of the Migrate-Disable region is the last 2139 * one. This means we only need to track a single cpumask (i.e. p->cpus_mask), 2140 * but we still need to properly signal those waiting tasks at the appropriate 2141 * moment. 2142 * 2143 * This is implemented using struct set_affinity_pending. The first 2144 * __set_cpus_allowed_ptr() caller within a given Migrate-Disable region will 2145 * setup an instance of that struct and install it on the targeted task_struct. 2146 * Any and all further callers will reuse that instance. Those then wait for 2147 * a completion signaled at the tail of the CPU stopper callback (1), triggered 2148 * on the end of the Migrate-Disable region (i.e. outermost migrate_enable()). 2149 * 2150 * 2151 * (1) In the cases covered above. There is one more where the completion is 2152 * signaled within affine_move_task() itself: when a subsequent affinity request 2153 * cancels the need for an active migration. Consider: 2154 * 2155 * Initial conditions: P0->cpus_mask = [0, 1] 2156 * 2157 * P0@CPU0 P1 P2 2158 * 2159 * migrate_disable(); 2160 * <preempted> 2161 * set_cpus_allowed_ptr(P0, [1]); 2162 * <blocks> 2163 * set_cpus_allowed_ptr(P0, [0, 1]); 2164 * <signal completion> 2165 * <awakes> 2166 * 2167 * Note that the above is safe vs a concurrent migrate_enable(), as any 2168 * pending affinity completion is preceded by an uninstallation of 2169 * p->migration_pending done with p->pi_lock held. 2170 */ 2171 static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flags *rf, 2172 int dest_cpu, unsigned int flags) 2173 { 2174 struct set_affinity_pending my_pending = { }, *pending = NULL; 2175 bool complete = false; 2176 2177 /* Can the task run on the task's current CPU? If so, we're done */ 2178 if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) { 2179 struct task_struct *push_task = NULL; 2180 2181 if ((flags & SCA_MIGRATE_ENABLE) && 2182 (p->migration_flags & MDF_PUSH) && !rq->push_busy) { 2183 rq->push_busy = true; 2184 push_task = get_task_struct(p); 2185 } 2186 2187 pending = p->migration_pending; 2188 if (pending) { 2189 refcount_inc(&pending->refs); 2190 p->migration_pending = NULL; 2191 complete = true; 2192 } 2193 task_rq_unlock(rq, p, rf); 2194 2195 if (push_task) { 2196 stop_one_cpu_nowait(rq->cpu, push_cpu_stop, 2197 p, &rq->push_work); 2198 } 2199 2200 if (complete) 2201 goto do_complete; 2202 2203 return 0; 2204 } 2205 2206 if (!(flags & SCA_MIGRATE_ENABLE)) { 2207 /* serialized by p->pi_lock */ 2208 if (!p->migration_pending) { 2209 /* Install the request */ 2210 refcount_set(&my_pending.refs, 1); 2211 init_completion(&my_pending.done); 2212 my_pending.arg = (struct migration_arg) { 2213 .task = p, 2214 .dest_cpu = -1, /* any */ 2215 .pending = &my_pending, 2216 }; 2217 2218 p->migration_pending = &my_pending; 2219 } else { 2220 pending = p->migration_pending; 2221 refcount_inc(&pending->refs); 2222 } 2223 } 2224 pending = p->migration_pending; 2225 /* 2226 * - !MIGRATE_ENABLE: 2227 * we'll have installed a pending if there wasn't one already. 2228 * 2229 * - MIGRATE_ENABLE: 2230 * we're here because the current CPU isn't matching anymore, 2231 * the only way that can happen is because of a concurrent 2232 * set_cpus_allowed_ptr() call, which should then still be 2233 * pending completion. 2234 * 2235 * Either way, we really should have a @pending here. 2236 */ 2237 if (WARN_ON_ONCE(!pending)) { 2238 task_rq_unlock(rq, p, rf); 2239 return -EINVAL; 2240 } 2241 2242 if (flags & SCA_MIGRATE_ENABLE) { 2243 2244 refcount_inc(&pending->refs); /* pending->{arg,stop_work} */ 2245 p->migration_flags &= ~MDF_PUSH; 2246 task_rq_unlock(rq, p, rf); 2247 2248 stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop, 2249 &pending->arg, &pending->stop_work); 2250 2251 return 0; 2252 } 2253 2254 if (task_running(rq, p) || p->state == TASK_WAKING) { 2255 /* 2256 * Lessen races (and headaches) by delegating 2257 * is_migration_disabled(p) checks to the stopper, which will 2258 * run on the same CPU as said p. 2259 */ 2260 refcount_inc(&pending->refs); /* pending->{arg,stop_work} */ 2261 task_rq_unlock(rq, p, rf); 2262 2263 stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop, 2264 &pending->arg, &pending->stop_work); 2265 2266 } else { 2267 2268 if (!is_migration_disabled(p)) { 2269 if (task_on_rq_queued(p)) 2270 rq = move_queued_task(rq, rf, p, dest_cpu); 2271 2272 p->migration_pending = NULL; 2273 complete = true; 2274 } 2275 task_rq_unlock(rq, p, rf); 2276 2277 do_complete: 2278 if (complete) 2279 complete_all(&pending->done); 2280 } 2281 2282 wait_for_completion(&pending->done); 2283 2284 if (refcount_dec_and_test(&pending->refs)) 2285 wake_up_var(&pending->refs); 2286 2287 /* 2288 * Block the original owner of &pending until all subsequent callers 2289 * have seen the completion and decremented the refcount 2290 */ 2291 wait_var_event(&my_pending.refs, !refcount_read(&my_pending.refs)); 2292 2293 return 0; 2294 } 2295 2296 /* 2297 * Change a given task's CPU affinity. Migrate the thread to a 2298 * proper CPU and schedule it away if the CPU it's executing on 2299 * is removed from the allowed bitmask. 2300 * 2301 * NOTE: the caller must have a valid reference to the task, the 2302 * task must not exit() & deallocate itself prematurely. The 2303 * call is not atomic; no spinlocks may be held. 2304 */ 2305 static int __set_cpus_allowed_ptr(struct task_struct *p, 2306 const struct cpumask *new_mask, 2307 u32 flags) 2308 { 2309 const struct cpumask *cpu_valid_mask = cpu_active_mask; 2310 unsigned int dest_cpu; 2311 struct rq_flags rf; 2312 struct rq *rq; 2313 int ret = 0; 2314 2315 rq = task_rq_lock(p, &rf); 2316 update_rq_clock(rq); 2317 2318 if (p->flags & PF_KTHREAD || is_migration_disabled(p)) { 2319 /* 2320 * Kernel threads are allowed on online && !active CPUs, 2321 * however, during cpu-hot-unplug, even these might get pushed 2322 * away if not KTHREAD_IS_PER_CPU. 2323 * 2324 * Specifically, migration_disabled() tasks must not fail the 2325 * cpumask_any_and_distribute() pick below, esp. so on 2326 * SCA_MIGRATE_ENABLE, otherwise we'll not call 2327 * set_cpus_allowed_common() and actually reset p->cpus_ptr. 2328 */ 2329 cpu_valid_mask = cpu_online_mask; 2330 } 2331 2332 /* 2333 * Must re-check here, to close a race against __kthread_bind(), 2334 * sched_setaffinity() is not guaranteed to observe the flag. 2335 */ 2336 if ((flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) { 2337 ret = -EINVAL; 2338 goto out; 2339 } 2340 2341 if (!(flags & SCA_MIGRATE_ENABLE)) { 2342 if (cpumask_equal(&p->cpus_mask, new_mask)) 2343 goto out; 2344 2345 if (WARN_ON_ONCE(p == current && 2346 is_migration_disabled(p) && 2347 !cpumask_test_cpu(task_cpu(p), new_mask))) { 2348 ret = -EBUSY; 2349 goto out; 2350 } 2351 } 2352 2353 /* 2354 * Picking a ~random cpu helps in cases where we are changing affinity 2355 * for groups of tasks (ie. cpuset), so that load balancing is not 2356 * immediately required to distribute the tasks within their new mask. 2357 */ 2358 dest_cpu = cpumask_any_and_distribute(cpu_valid_mask, new_mask); 2359 if (dest_cpu >= nr_cpu_ids) { 2360 ret = -EINVAL; 2361 goto out; 2362 } 2363 2364 __do_set_cpus_allowed(p, new_mask, flags); 2365 2366 return affine_move_task(rq, p, &rf, dest_cpu, flags); 2367 2368 out: 2369 task_rq_unlock(rq, p, &rf); 2370 2371 return ret; 2372 } 2373 2374 int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) 2375 { 2376 return __set_cpus_allowed_ptr(p, new_mask, 0); 2377 } 2378 EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr); 2379 2380 void set_task_cpu(struct task_struct *p, unsigned int new_cpu) 2381 { 2382 #ifdef CONFIG_SCHED_DEBUG 2383 /* 2384 * We should never call set_task_cpu() on a blocked task, 2385 * ttwu() will sort out the placement. 2386 */ 2387 WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING && 2388 !p->on_rq); 2389 2390 /* 2391 * Migrating fair class task must have p->on_rq = TASK_ON_RQ_MIGRATING, 2392 * because schedstat_wait_{start,end} rebase migrating task's wait_start 2393 * time relying on p->on_rq. 2394 */ 2395 WARN_ON_ONCE(p->state == TASK_RUNNING && 2396 p->sched_class == &fair_sched_class && 2397 (p->on_rq && !task_on_rq_migrating(p))); 2398 2399 #ifdef CONFIG_LOCKDEP 2400 /* 2401 * The caller should hold either p->pi_lock or rq->lock, when changing 2402 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks. 2403 * 2404 * sched_move_task() holds both and thus holding either pins the cgroup, 2405 * see task_group(). 2406 * 2407 * Furthermore, all task_rq users should acquire both locks, see 2408 * task_rq_lock(). 2409 */ 2410 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) || 2411 lockdep_is_held(&task_rq(p)->lock))); 2412 #endif 2413 /* 2414 * Clearly, migrating tasks to offline CPUs is a fairly daft thing. 2415 */ 2416 WARN_ON_ONCE(!cpu_online(new_cpu)); 2417 2418 WARN_ON_ONCE(is_migration_disabled(p)); 2419 #endif 2420 2421 trace_sched_migrate_task(p, new_cpu); 2422 2423 if (task_cpu(p) != new_cpu) { 2424 if (p->sched_class->migrate_task_rq) 2425 p->sched_class->migrate_task_rq(p, new_cpu); 2426 p->se.nr_migrations++; 2427 rseq_migrate(p); 2428 perf_event_task_migrate(p); 2429 } 2430 2431 __set_task_cpu(p, new_cpu); 2432 } 2433 2434 #ifdef CONFIG_NUMA_BALANCING 2435 static void __migrate_swap_task(struct task_struct *p, int cpu) 2436 { 2437 if (task_on_rq_queued(p)) { 2438 struct rq *src_rq, *dst_rq; 2439 struct rq_flags srf, drf; 2440 2441 src_rq = task_rq(p); 2442 dst_rq = cpu_rq(cpu); 2443 2444 rq_pin_lock(src_rq, &srf); 2445 rq_pin_lock(dst_rq, &drf); 2446 2447 deactivate_task(src_rq, p, 0); 2448 set_task_cpu(p, cpu); 2449 activate_task(dst_rq, p, 0); 2450 check_preempt_curr(dst_rq, p, 0); 2451 2452 rq_unpin_lock(dst_rq, &drf); 2453 rq_unpin_lock(src_rq, &srf); 2454 2455 } else { 2456 /* 2457 * Task isn't running anymore; make it appear like we migrated 2458 * it before it went to sleep. This means on wakeup we make the 2459 * previous CPU our target instead of where it really is. 2460 */ 2461 p->wake_cpu = cpu; 2462 } 2463 } 2464 2465 struct migration_swap_arg { 2466 struct task_struct *src_task, *dst_task; 2467 int src_cpu, dst_cpu; 2468 }; 2469 2470 static int migrate_swap_stop(void *data) 2471 { 2472 struct migration_swap_arg *arg = data; 2473 struct rq *src_rq, *dst_rq; 2474 int ret = -EAGAIN; 2475 2476 if (!cpu_active(arg->src_cpu) || !cpu_active(arg->dst_cpu)) 2477 return -EAGAIN; 2478 2479 src_rq = cpu_rq(arg->src_cpu); 2480 dst_rq = cpu_rq(arg->dst_cpu); 2481 2482 double_raw_lock(&arg->src_task->pi_lock, 2483 &arg->dst_task->pi_lock); 2484 double_rq_lock(src_rq, dst_rq); 2485 2486 if (task_cpu(arg->dst_task) != arg->dst_cpu) 2487 goto unlock; 2488 2489 if (task_cpu(arg->src_task) != arg->src_cpu) 2490 goto unlock; 2491 2492 if (!cpumask_test_cpu(arg->dst_cpu, arg->src_task->cpus_ptr)) 2493 goto unlock; 2494 2495 if (!cpumask_test_cpu(arg->src_cpu, arg->dst_task->cpus_ptr)) 2496 goto unlock; 2497 2498 __migrate_swap_task(arg->src_task, arg->dst_cpu); 2499 __migrate_swap_task(arg->dst_task, arg->src_cpu); 2500 2501 ret = 0; 2502 2503 unlock: 2504 double_rq_unlock(src_rq, dst_rq); 2505 raw_spin_unlock(&arg->dst_task->pi_lock); 2506 raw_spin_unlock(&arg->src_task->pi_lock); 2507 2508 return ret; 2509 } 2510 2511 /* 2512 * Cross migrate two tasks 2513 */ 2514 int migrate_swap(struct task_struct *cur, struct task_struct *p, 2515 int target_cpu, int curr_cpu) 2516 { 2517 struct migration_swap_arg arg; 2518 int ret = -EINVAL; 2519 2520 arg = (struct migration_swap_arg){ 2521 .src_task = cur, 2522 .src_cpu = curr_cpu, 2523 .dst_task = p, 2524 .dst_cpu = target_cpu, 2525 }; 2526 2527 if (arg.src_cpu == arg.dst_cpu) 2528 goto out; 2529 2530 /* 2531 * These three tests are all lockless; this is OK since all of them 2532 * will be re-checked with proper locks held further down the line. 2533 */ 2534 if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu)) 2535 goto out; 2536 2537 if (!cpumask_test_cpu(arg.dst_cpu, arg.src_task->cpus_ptr)) 2538 goto out; 2539 2540 if (!cpumask_test_cpu(arg.src_cpu, arg.dst_task->cpus_ptr)) 2541 goto out; 2542 2543 trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu); 2544 ret = stop_two_cpus(arg.dst_cpu, arg.src_cpu, migrate_swap_stop, &arg); 2545 2546 out: 2547 return ret; 2548 } 2549 #endif /* CONFIG_NUMA_BALANCING */ 2550 2551 /* 2552 * wait_task_inactive - wait for a thread to unschedule. 2553 * 2554 * If @match_state is nonzero, it's the @p->state value just checked and 2555 * not expected to change. If it changes, i.e. @p might have woken up, 2556 * then return zero. When we succeed in waiting for @p to be off its CPU, 2557 * we return a positive number (its total switch count). If a second call 2558 * a short while later returns the same number, the caller can be sure that 2559 * @p has remained unscheduled the whole time. 2560 * 2561 * The caller must ensure that the task *will* unschedule sometime soon, 2562 * else this function might spin for a *long* time. This function can't 2563 * be called with interrupts off, or it may introduce deadlock with 2564 * smp_call_function() if an IPI is sent by the same process we are 2565 * waiting to become inactive. 2566 */ 2567 unsigned long wait_task_inactive(struct task_struct *p, long match_state) 2568 { 2569 int running, queued; 2570 struct rq_flags rf; 2571 unsigned long ncsw; 2572 struct rq *rq; 2573 2574 for (;;) { 2575 /* 2576 * We do the initial early heuristics without holding 2577 * any task-queue locks at all. We'll only try to get 2578 * the runqueue lock when things look like they will 2579 * work out! 2580 */ 2581 rq = task_rq(p); 2582 2583 /* 2584 * If the task is actively running on another CPU 2585 * still, just relax and busy-wait without holding 2586 * any locks. 2587 * 2588 * NOTE! Since we don't hold any locks, it's not 2589 * even sure that "rq" stays as the right runqueue! 2590 * But we don't care, since "task_running()" will 2591 * return false if the runqueue has changed and p 2592 * is actually now running somewhere else! 2593 */ 2594 while (task_running(rq, p)) { 2595 if (match_state && unlikely(p->state != match_state)) 2596 return 0; 2597 cpu_relax(); 2598 } 2599 2600 /* 2601 * Ok, time to look more closely! We need the rq 2602 * lock now, to be *sure*. If we're wrong, we'll 2603 * just go back and repeat. 2604 */ 2605 rq = task_rq_lock(p, &rf); 2606 trace_sched_wait_task(p); 2607 running = task_running(rq, p); 2608 queued = task_on_rq_queued(p); 2609 ncsw = 0; 2610 if (!match_state || p->state == match_state) 2611 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ 2612 task_rq_unlock(rq, p, &rf); 2613 2614 /* 2615 * If it changed from the expected state, bail out now. 2616 */ 2617 if (unlikely(!ncsw)) 2618 break; 2619 2620 /* 2621 * Was it really running after all now that we 2622 * checked with the proper locks actually held? 2623 * 2624 * Oops. Go back and try again.. 2625 */ 2626 if (unlikely(running)) { 2627 cpu_relax(); 2628 continue; 2629 } 2630 2631 /* 2632 * It's not enough that it's not actively running, 2633 * it must be off the runqueue _entirely_, and not 2634 * preempted! 2635 * 2636 * So if it was still runnable (but just not actively 2637 * running right now), it's preempted, and we should 2638 * yield - it could be a while. 2639 */ 2640 if (unlikely(queued)) { 2641 ktime_t to = NSEC_PER_SEC / HZ; 2642 2643 set_current_state(TASK_UNINTERRUPTIBLE); 2644 schedule_hrtimeout(&to, HRTIMER_MODE_REL); 2645 continue; 2646 } 2647 2648 /* 2649 * Ahh, all good. It wasn't running, and it wasn't 2650 * runnable, which means that it will never become 2651 * running in the future either. We're all done! 2652 */ 2653 break; 2654 } 2655 2656 return ncsw; 2657 } 2658 2659 /*** 2660 * kick_process - kick a running thread to enter/exit the kernel 2661 * @p: the to-be-kicked thread 2662 * 2663 * Cause a process which is running on another CPU to enter 2664 * kernel-mode, without any delay. (to get signals handled.) 2665 * 2666 * NOTE: this function doesn't have to take the runqueue lock, 2667 * because all it wants to ensure is that the remote task enters 2668 * the kernel. If the IPI races and the task has been migrated 2669 * to another CPU then no harm is done and the purpose has been 2670 * achieved as well. 2671 */ 2672 void kick_process(struct task_struct *p) 2673 { 2674 int cpu; 2675 2676 preempt_disable(); 2677 cpu = task_cpu(p); 2678 if ((cpu != smp_processor_id()) && task_curr(p)) 2679 smp_send_reschedule(cpu); 2680 preempt_enable(); 2681 } 2682 EXPORT_SYMBOL_GPL(kick_process); 2683 2684 /* 2685 * ->cpus_ptr is protected by both rq->lock and p->pi_lock 2686 * 2687 * A few notes on cpu_active vs cpu_online: 2688 * 2689 * - cpu_active must be a subset of cpu_online 2690 * 2691 * - on CPU-up we allow per-CPU kthreads on the online && !active CPU, 2692 * see __set_cpus_allowed_ptr(). At this point the newly online 2693 * CPU isn't yet part of the sched domains, and balancing will not 2694 * see it. 2695 * 2696 * - on CPU-down we clear cpu_active() to mask the sched domains and 2697 * avoid the load balancer to place new tasks on the to be removed 2698 * CPU. Existing tasks will remain running there and will be taken 2699 * off. 2700 * 2701 * This means that fallback selection must not select !active CPUs. 2702 * And can assume that any active CPU must be online. Conversely 2703 * select_task_rq() below may allow selection of !active CPUs in order 2704 * to satisfy the above rules. 2705 */ 2706 static int select_fallback_rq(int cpu, struct task_struct *p) 2707 { 2708 int nid = cpu_to_node(cpu); 2709 const struct cpumask *nodemask = NULL; 2710 enum { cpuset, possible, fail } state = cpuset; 2711 int dest_cpu; 2712 2713 /* 2714 * If the node that the CPU is on has been offlined, cpu_to_node() 2715 * will return -1. There is no CPU on the node, and we should 2716 * select the CPU on the other node. 2717 */ 2718 if (nid != -1) { 2719 nodemask = cpumask_of_node(nid); 2720 2721 /* Look for allowed, online CPU in same node. */ 2722 for_each_cpu(dest_cpu, nodemask) { 2723 if (!cpu_active(dest_cpu)) 2724 continue; 2725 if (cpumask_test_cpu(dest_cpu, p->cpus_ptr)) 2726 return dest_cpu; 2727 } 2728 } 2729 2730 for (;;) { 2731 /* Any allowed, online CPU? */ 2732 for_each_cpu(dest_cpu, p->cpus_ptr) { 2733 if (!is_cpu_allowed(p, dest_cpu)) 2734 continue; 2735 2736 goto out; 2737 } 2738 2739 /* No more Mr. Nice Guy. */ 2740 switch (state) { 2741 case cpuset: 2742 if (IS_ENABLED(CONFIG_CPUSETS)) { 2743 cpuset_cpus_allowed_fallback(p); 2744 state = possible; 2745 break; 2746 } 2747 fallthrough; 2748 case possible: 2749 /* 2750 * XXX When called from select_task_rq() we only 2751 * hold p->pi_lock and again violate locking order. 2752 * 2753 * More yuck to audit. 2754 */ 2755 do_set_cpus_allowed(p, cpu_possible_mask); 2756 state = fail; 2757 break; 2758 2759 case fail: 2760 BUG(); 2761 break; 2762 } 2763 } 2764 2765 out: 2766 if (state != cpuset) { 2767 /* 2768 * Don't tell them about moving exiting tasks or 2769 * kernel threads (both mm NULL), since they never 2770 * leave kernel. 2771 */ 2772 if (p->mm && printk_ratelimit()) { 2773 printk_deferred("process %d (%s) no longer affine to cpu%d\n", 2774 task_pid_nr(p), p->comm, cpu); 2775 } 2776 } 2777 2778 return dest_cpu; 2779 } 2780 2781 /* 2782 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_ptr is stable. 2783 */ 2784 static inline 2785 int select_task_rq(struct task_struct *p, int cpu, int wake_flags) 2786 { 2787 lockdep_assert_held(&p->pi_lock); 2788 2789 if (p->nr_cpus_allowed > 1 && !is_migration_disabled(p)) 2790 cpu = p->sched_class->select_task_rq(p, cpu, wake_flags); 2791 else 2792 cpu = cpumask_any(p->cpus_ptr); 2793 2794 /* 2795 * In order not to call set_task_cpu() on a blocking task we need 2796 * to rely on ttwu() to place the task on a valid ->cpus_ptr 2797 * CPU. 2798 * 2799 * Since this is common to all placement strategies, this lives here. 2800 * 2801 * [ this allows ->select_task() to simply return task_cpu(p) and 2802 * not worry about this generic constraint ] 2803 */ 2804 if (unlikely(!is_cpu_allowed(p, cpu))) 2805 cpu = select_fallback_rq(task_cpu(p), p); 2806 2807 return cpu; 2808 } 2809 2810 void sched_set_stop_task(int cpu, struct task_struct *stop) 2811 { 2812 static struct lock_class_key stop_pi_lock; 2813 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; 2814 struct task_struct *old_stop = cpu_rq(cpu)->stop; 2815 2816 if (stop) { 2817 /* 2818 * Make it appear like a SCHED_FIFO task, its something 2819 * userspace knows about and won't get confused about. 2820 * 2821 * Also, it will make PI more or less work without too 2822 * much confusion -- but then, stop work should not 2823 * rely on PI working anyway. 2824 */ 2825 sched_setscheduler_nocheck(stop, SCHED_FIFO, ¶m); 2826 2827 stop->sched_class = &stop_sched_class; 2828 2829 /* 2830 * The PI code calls rt_mutex_setprio() with ->pi_lock held to 2831 * adjust the effective priority of a task. As a result, 2832 * rt_mutex_setprio() can trigger (RT) balancing operations, 2833 * which can then trigger wakeups of the stop thread to push 2834 * around the current task. 2835 * 2836 * The stop task itself will never be part of the PI-chain, it 2837 * never blocks, therefore that ->pi_lock recursion is safe. 2838 * Tell lockdep about this by placing the stop->pi_lock in its 2839 * own class. 2840 */ 2841 lockdep_set_class(&stop->pi_lock, &stop_pi_lock); 2842 } 2843 2844 cpu_rq(cpu)->stop = stop; 2845 2846 if (old_stop) { 2847 /* 2848 * Reset it back to a normal scheduling class so that 2849 * it can die in pieces. 2850 */ 2851 old_stop->sched_class = &rt_sched_class; 2852 } 2853 } 2854 2855 #else /* CONFIG_SMP */ 2856 2857 static inline int __set_cpus_allowed_ptr(struct task_struct *p, 2858 const struct cpumask *new_mask, 2859 u32 flags) 2860 { 2861 return set_cpus_allowed_ptr(p, new_mask); 2862 } 2863 2864 static inline void migrate_disable_switch(struct rq *rq, struct task_struct *p) { } 2865 2866 static inline bool rq_has_pinned_tasks(struct rq *rq) 2867 { 2868 return false; 2869 } 2870 2871 #endif /* !CONFIG_SMP */ 2872 2873 static void 2874 ttwu_stat(struct task_struct *p, int cpu, int wake_flags) 2875 { 2876 struct rq *rq; 2877 2878 if (!schedstat_enabled()) 2879 return; 2880 2881 rq = this_rq(); 2882 2883 #ifdef CONFIG_SMP 2884 if (cpu == rq->cpu) { 2885 __schedstat_inc(rq->ttwu_local); 2886 __schedstat_inc(p->se.statistics.nr_wakeups_local); 2887 } else { 2888 struct sched_domain *sd; 2889 2890 __schedstat_inc(p->se.statistics.nr_wakeups_remote); 2891 rcu_read_lock(); 2892 for_each_domain(rq->cpu, sd) { 2893 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) { 2894 __schedstat_inc(sd->ttwu_wake_remote); 2895 break; 2896 } 2897 } 2898 rcu_read_unlock(); 2899 } 2900 2901 if (wake_flags & WF_MIGRATED) 2902 __schedstat_inc(p->se.statistics.nr_wakeups_migrate); 2903 #endif /* CONFIG_SMP */ 2904 2905 __schedstat_inc(rq->ttwu_count); 2906 __schedstat_inc(p->se.statistics.nr_wakeups); 2907 2908 if (wake_flags & WF_SYNC) 2909 __schedstat_inc(p->se.statistics.nr_wakeups_sync); 2910 } 2911 2912 /* 2913 * Mark the task runnable and perform wakeup-preemption. 2914 */ 2915 static void ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags, 2916 struct rq_flags *rf) 2917 { 2918 check_preempt_curr(rq, p, wake_flags); 2919 p->state = TASK_RUNNING; 2920 trace_sched_wakeup(p); 2921 2922 #ifdef CONFIG_SMP 2923 if (p->sched_class->task_woken) { 2924 /* 2925 * Our task @p is fully woken up and running; so it's safe to 2926 * drop the rq->lock, hereafter rq is only used for statistics. 2927 */ 2928 rq_unpin_lock(rq, rf); 2929 p->sched_class->task_woken(rq, p); 2930 rq_repin_lock(rq, rf); 2931 } 2932 2933 if (rq->idle_stamp) { 2934 u64 delta = rq_clock(rq) - rq->idle_stamp; 2935 u64 max = 2*rq->max_idle_balance_cost; 2936 2937 update_avg(&rq->avg_idle, delta); 2938 2939 if (rq->avg_idle > max) 2940 rq->avg_idle = max; 2941 2942 rq->idle_stamp = 0; 2943 } 2944 #endif 2945 } 2946 2947 static void 2948 ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags, 2949 struct rq_flags *rf) 2950 { 2951 int en_flags = ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK; 2952 2953 lockdep_assert_held(&rq->lock); 2954 2955 if (p->sched_contributes_to_load) 2956 rq->nr_uninterruptible--; 2957 2958 #ifdef CONFIG_SMP 2959 if (wake_flags & WF_MIGRATED) 2960 en_flags |= ENQUEUE_MIGRATED; 2961 else 2962 #endif 2963 if (p->in_iowait) { 2964 delayacct_blkio_end(p); 2965 atomic_dec(&task_rq(p)->nr_iowait); 2966 } 2967 2968 activate_task(rq, p, en_flags); 2969 ttwu_do_wakeup(rq, p, wake_flags, rf); 2970 } 2971 2972 /* 2973 * Consider @p being inside a wait loop: 2974 * 2975 * for (;;) { 2976 * set_current_state(TASK_UNINTERRUPTIBLE); 2977 * 2978 * if (CONDITION) 2979 * break; 2980 * 2981 * schedule(); 2982 * } 2983 * __set_current_state(TASK_RUNNING); 2984 * 2985 * between set_current_state() and schedule(). In this case @p is still 2986 * runnable, so all that needs doing is change p->state back to TASK_RUNNING in 2987 * an atomic manner. 2988 * 2989 * By taking task_rq(p)->lock we serialize against schedule(), if @p->on_rq 2990 * then schedule() must still happen and p->state can be changed to 2991 * TASK_RUNNING. Otherwise we lost the race, schedule() has happened, and we 2992 * need to do a full wakeup with enqueue. 2993 * 2994 * Returns: %true when the wakeup is done, 2995 * %false otherwise. 2996 */ 2997 static int ttwu_runnable(struct task_struct *p, int wake_flags) 2998 { 2999 struct rq_flags rf; 3000 struct rq *rq; 3001 int ret = 0; 3002 3003 rq = __task_rq_lock(p, &rf); 3004 if (task_on_rq_queued(p)) { 3005 /* check_preempt_curr() may use rq clock */ 3006 update_rq_clock(rq); 3007 ttwu_do_wakeup(rq, p, wake_flags, &rf); 3008 ret = 1; 3009 } 3010 __task_rq_unlock(rq, &rf); 3011 3012 return ret; 3013 } 3014 3015 #ifdef CONFIG_SMP 3016 void sched_ttwu_pending(void *arg) 3017 { 3018 struct llist_node *llist = arg; 3019 struct rq *rq = this_rq(); 3020 struct task_struct *p, *t; 3021 struct rq_flags rf; 3022 3023 if (!llist) 3024 return; 3025 3026 /* 3027 * rq::ttwu_pending racy indication of out-standing wakeups. 3028 * Races such that false-negatives are possible, since they 3029 * are shorter lived that false-positives would be. 3030 */ 3031 WRITE_ONCE(rq->ttwu_pending, 0); 3032 3033 rq_lock_irqsave(rq, &rf); 3034 update_rq_clock(rq); 3035 3036 llist_for_each_entry_safe(p, t, llist, wake_entry.llist) { 3037 if (WARN_ON_ONCE(p->on_cpu)) 3038 smp_cond_load_acquire(&p->on_cpu, !VAL); 3039 3040 if (WARN_ON_ONCE(task_cpu(p) != cpu_of(rq))) 3041 set_task_cpu(p, cpu_of(rq)); 3042 3043 ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0, &rf); 3044 } 3045 3046 rq_unlock_irqrestore(rq, &rf); 3047 } 3048 3049 void send_call_function_single_ipi(int cpu) 3050 { 3051 struct rq *rq = cpu_rq(cpu); 3052 3053 if (!set_nr_if_polling(rq->idle)) 3054 arch_send_call_function_single_ipi(cpu); 3055 else 3056 trace_sched_wake_idle_without_ipi(cpu); 3057 } 3058 3059 /* 3060 * Queue a task on the target CPUs wake_list and wake the CPU via IPI if 3061 * necessary. The wakee CPU on receipt of the IPI will queue the task 3062 * via sched_ttwu_wakeup() for activation so the wakee incurs the cost 3063 * of the wakeup instead of the waker. 3064 */ 3065 static void __ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags) 3066 { 3067 struct rq *rq = cpu_rq(cpu); 3068 3069 p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED); 3070 3071 WRITE_ONCE(rq->ttwu_pending, 1); 3072 __smp_call_single_queue(cpu, &p->wake_entry.llist); 3073 } 3074 3075 void wake_up_if_idle(int cpu) 3076 { 3077 struct rq *rq = cpu_rq(cpu); 3078 struct rq_flags rf; 3079 3080 rcu_read_lock(); 3081 3082 if (!is_idle_task(rcu_dereference(rq->curr))) 3083 goto out; 3084 3085 if (set_nr_if_polling(rq->idle)) { 3086 trace_sched_wake_idle_without_ipi(cpu); 3087 } else { 3088 rq_lock_irqsave(rq, &rf); 3089 if (is_idle_task(rq->curr)) 3090 smp_send_reschedule(cpu); 3091 /* Else CPU is not idle, do nothing here: */ 3092 rq_unlock_irqrestore(rq, &rf); 3093 } 3094 3095 out: 3096 rcu_read_unlock(); 3097 } 3098 3099 bool cpus_share_cache(int this_cpu, int that_cpu) 3100 { 3101 return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu); 3102 } 3103 3104 static inline bool ttwu_queue_cond(int cpu, int wake_flags) 3105 { 3106 /* 3107 * Do not complicate things with the async wake_list while the CPU is 3108 * in hotplug state. 3109 */ 3110 if (!cpu_active(cpu)) 3111 return false; 3112 3113 /* 3114 * If the CPU does not share cache, then queue the task on the 3115 * remote rqs wakelist to avoid accessing remote data. 3116 */ 3117 if (!cpus_share_cache(smp_processor_id(), cpu)) 3118 return true; 3119 3120 /* 3121 * If the task is descheduling and the only running task on the 3122 * CPU then use the wakelist to offload the task activation to 3123 * the soon-to-be-idle CPU as the current CPU is likely busy. 3124 * nr_running is checked to avoid unnecessary task stacking. 3125 */ 3126 if ((wake_flags & WF_ON_CPU) && cpu_rq(cpu)->nr_running <= 1) 3127 return true; 3128 3129 return false; 3130 } 3131 3132 static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags) 3133 { 3134 if (sched_feat(TTWU_QUEUE) && ttwu_queue_cond(cpu, wake_flags)) { 3135 if (WARN_ON_ONCE(cpu == smp_processor_id())) 3136 return false; 3137 3138 sched_clock_cpu(cpu); /* Sync clocks across CPUs */ 3139 __ttwu_queue_wakelist(p, cpu, wake_flags); 3140 return true; 3141 } 3142 3143 return false; 3144 } 3145 3146 #else /* !CONFIG_SMP */ 3147 3148 static inline bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags) 3149 { 3150 return false; 3151 } 3152 3153 #endif /* CONFIG_SMP */ 3154 3155 static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags) 3156 { 3157 struct rq *rq = cpu_rq(cpu); 3158 struct rq_flags rf; 3159 3160 if (ttwu_queue_wakelist(p, cpu, wake_flags)) 3161 return; 3162 3163 rq_lock(rq, &rf); 3164 update_rq_clock(rq); 3165 ttwu_do_activate(rq, p, wake_flags, &rf); 3166 rq_unlock(rq, &rf); 3167 } 3168 3169 /* 3170 * Notes on Program-Order guarantees on SMP systems. 3171 * 3172 * MIGRATION 3173 * 3174 * The basic program-order guarantee on SMP systems is that when a task [t] 3175 * migrates, all its activity on its old CPU [c0] happens-before any subsequent 3176 * execution on its new CPU [c1]. 3177 * 3178 * For migration (of runnable tasks) this is provided by the following means: 3179 * 3180 * A) UNLOCK of the rq(c0)->lock scheduling out task t 3181 * B) migration for t is required to synchronize *both* rq(c0)->lock and 3182 * rq(c1)->lock (if not at the same time, then in that order). 3183 * C) LOCK of the rq(c1)->lock scheduling in task 3184 * 3185 * Release/acquire chaining guarantees that B happens after A and C after B. 3186 * Note: the CPU doing B need not be c0 or c1 3187 * 3188 * Example: 3189 * 3190 * CPU0 CPU1 CPU2 3191 * 3192 * LOCK rq(0)->lock 3193 * sched-out X 3194 * sched-in Y 3195 * UNLOCK rq(0)->lock 3196 * 3197 * LOCK rq(0)->lock // orders against CPU0 3198 * dequeue X 3199 * UNLOCK rq(0)->lock 3200 * 3201 * LOCK rq(1)->lock 3202 * enqueue X 3203 * UNLOCK rq(1)->lock 3204 * 3205 * LOCK rq(1)->lock // orders against CPU2 3206 * sched-out Z 3207 * sched-in X 3208 * UNLOCK rq(1)->lock 3209 * 3210 * 3211 * BLOCKING -- aka. SLEEP + WAKEUP 3212 * 3213 * For blocking we (obviously) need to provide the same guarantee as for 3214 * migration. However the means are completely different as there is no lock 3215 * chain to provide order. Instead we do: 3216 * 3217 * 1) smp_store_release(X->on_cpu, 0) -- finish_task() 3218 * 2) smp_cond_load_acquire(!X->on_cpu) -- try_to_wake_up() 3219 * 3220 * Example: 3221 * 3222 * CPU0 (schedule) CPU1 (try_to_wake_up) CPU2 (schedule) 3223 * 3224 * LOCK rq(0)->lock LOCK X->pi_lock 3225 * dequeue X 3226 * sched-out X 3227 * smp_store_release(X->on_cpu, 0); 3228 * 3229 * smp_cond_load_acquire(&X->on_cpu, !VAL); 3230 * X->state = WAKING 3231 * set_task_cpu(X,2) 3232 * 3233 * LOCK rq(2)->lock 3234 * enqueue X 3235 * X->state = RUNNING 3236 * UNLOCK rq(2)->lock 3237 * 3238 * LOCK rq(2)->lock // orders against CPU1 3239 * sched-out Z 3240 * sched-in X 3241 * UNLOCK rq(2)->lock 3242 * 3243 * UNLOCK X->pi_lock 3244 * UNLOCK rq(0)->lock 3245 * 3246 * 3247 * However, for wakeups there is a second guarantee we must provide, namely we 3248 * must ensure that CONDITION=1 done by the caller can not be reordered with 3249 * accesses to the task state; see try_to_wake_up() and set_current_state(). 3250 */ 3251 3252 /** 3253 * try_to_wake_up - wake up a thread 3254 * @p: the thread to be awakened 3255 * @state: the mask of task states that can be woken 3256 * @wake_flags: wake modifier flags (WF_*) 3257 * 3258 * Conceptually does: 3259 * 3260 * If (@state & @p->state) @p->state = TASK_RUNNING. 3261 * 3262 * If the task was not queued/runnable, also place it back on a runqueue. 3263 * 3264 * This function is atomic against schedule() which would dequeue the task. 3265 * 3266 * It issues a full memory barrier before accessing @p->state, see the comment 3267 * with set_current_state(). 3268 * 3269 * Uses p->pi_lock to serialize against concurrent wake-ups. 3270 * 3271 * Relies on p->pi_lock stabilizing: 3272 * - p->sched_class 3273 * - p->cpus_ptr 3274 * - p->sched_task_group 3275 * in order to do migration, see its use of select_task_rq()/set_task_cpu(). 3276 * 3277 * Tries really hard to only take one task_rq(p)->lock for performance. 3278 * Takes rq->lock in: 3279 * - ttwu_runnable() -- old rq, unavoidable, see comment there; 3280 * - ttwu_queue() -- new rq, for enqueue of the task; 3281 * - psi_ttwu_dequeue() -- much sadness :-( accounting will kill us. 3282 * 3283 * As a consequence we race really badly with just about everything. See the 3284 * many memory barriers and their comments for details. 3285 * 3286 * Return: %true if @p->state changes (an actual wakeup was done), 3287 * %false otherwise. 3288 */ 3289 static int 3290 try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) 3291 { 3292 unsigned long flags; 3293 int cpu, success = 0; 3294 3295 preempt_disable(); 3296 if (p == current) { 3297 /* 3298 * We're waking current, this means 'p->on_rq' and 'task_cpu(p) 3299 * == smp_processor_id()'. Together this means we can special 3300 * case the whole 'p->on_rq && ttwu_runnable()' case below 3301 * without taking any locks. 3302 * 3303 * In particular: 3304 * - we rely on Program-Order guarantees for all the ordering, 3305 * - we're serialized against set_special_state() by virtue of 3306 * it disabling IRQs (this allows not taking ->pi_lock). 3307 */ 3308 if (!(p->state & state)) 3309 goto out; 3310 3311 success = 1; 3312 trace_sched_waking(p); 3313 p->state = TASK_RUNNING; 3314 trace_sched_wakeup(p); 3315 goto out; 3316 } 3317 3318 /* 3319 * If we are going to wake up a thread waiting for CONDITION we 3320 * need to ensure that CONDITION=1 done by the caller can not be 3321 * reordered with p->state check below. This pairs with smp_store_mb() 3322 * in set_current_state() that the waiting thread does. 3323 */ 3324 raw_spin_lock_irqsave(&p->pi_lock, flags); 3325 smp_mb__after_spinlock(); 3326 if (!(p->state & state)) 3327 goto unlock; 3328 3329 trace_sched_waking(p); 3330 3331 /* We're going to change ->state: */ 3332 success = 1; 3333 3334 /* 3335 * Ensure we load p->on_rq _after_ p->state, otherwise it would 3336 * be possible to, falsely, observe p->on_rq == 0 and get stuck 3337 * in smp_cond_load_acquire() below. 3338 * 3339 * sched_ttwu_pending() try_to_wake_up() 3340 * STORE p->on_rq = 1 LOAD p->state 3341 * UNLOCK rq->lock 3342 * 3343 * __schedule() (switch to task 'p') 3344 * LOCK rq->lock smp_rmb(); 3345 * smp_mb__after_spinlock(); 3346 * UNLOCK rq->lock 3347 * 3348 * [task p] 3349 * STORE p->state = UNINTERRUPTIBLE LOAD p->on_rq 3350 * 3351 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in 3352 * __schedule(). See the comment for smp_mb__after_spinlock(). 3353 * 3354 * A similar smb_rmb() lives in try_invoke_on_locked_down_task(). 3355 */ 3356 smp_rmb(); 3357 if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags)) 3358 goto unlock; 3359 3360 #ifdef CONFIG_SMP 3361 /* 3362 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be 3363 * possible to, falsely, observe p->on_cpu == 0. 3364 * 3365 * One must be running (->on_cpu == 1) in order to remove oneself 3366 * from the runqueue. 3367 * 3368 * __schedule() (switch to task 'p') try_to_wake_up() 3369 * STORE p->on_cpu = 1 LOAD p->on_rq 3370 * UNLOCK rq->lock 3371 * 3372 * __schedule() (put 'p' to sleep) 3373 * LOCK rq->lock smp_rmb(); 3374 * smp_mb__after_spinlock(); 3375 * STORE p->on_rq = 0 LOAD p->on_cpu 3376 * 3377 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in 3378 * __schedule(). See the comment for smp_mb__after_spinlock(). 3379 * 3380 * Form a control-dep-acquire with p->on_rq == 0 above, to ensure 3381 * schedule()'s deactivate_task() has 'happened' and p will no longer 3382 * care about it's own p->state. See the comment in __schedule(). 3383 */ 3384 smp_acquire__after_ctrl_dep(); 3385 3386 /* 3387 * We're doing the wakeup (@success == 1), they did a dequeue (p->on_rq 3388 * == 0), which means we need to do an enqueue, change p->state to 3389 * TASK_WAKING such that we can unlock p->pi_lock before doing the 3390 * enqueue, such as ttwu_queue_wakelist(). 3391 */ 3392 p->state = TASK_WAKING; 3393 3394 /* 3395 * If the owning (remote) CPU is still in the middle of schedule() with 3396 * this task as prev, considering queueing p on the remote CPUs wake_list 3397 * which potentially sends an IPI instead of spinning on p->on_cpu to 3398 * let the waker make forward progress. This is safe because IRQs are 3399 * disabled and the IPI will deliver after on_cpu is cleared. 3400 * 3401 * Ensure we load task_cpu(p) after p->on_cpu: 3402 * 3403 * set_task_cpu(p, cpu); 3404 * STORE p->cpu = @cpu 3405 * __schedule() (switch to task 'p') 3406 * LOCK rq->lock 3407 * smp_mb__after_spin_lock() smp_cond_load_acquire(&p->on_cpu) 3408 * STORE p->on_cpu = 1 LOAD p->cpu 3409 * 3410 * to ensure we observe the correct CPU on which the task is currently 3411 * scheduling. 3412 */ 3413 if (smp_load_acquire(&p->on_cpu) && 3414 ttwu_queue_wakelist(p, task_cpu(p), wake_flags | WF_ON_CPU)) 3415 goto unlock; 3416 3417 /* 3418 * If the owning (remote) CPU is still in the middle of schedule() with 3419 * this task as prev, wait until it's done referencing the task. 3420 * 3421 * Pairs with the smp_store_release() in finish_task(). 3422 * 3423 * This ensures that tasks getting woken will be fully ordered against 3424 * their previous state and preserve Program Order. 3425 */ 3426 smp_cond_load_acquire(&p->on_cpu, !VAL); 3427 3428 cpu = select_task_rq(p, p->wake_cpu, wake_flags | WF_TTWU); 3429 if (task_cpu(p) != cpu) { 3430 if (p->in_iowait) { 3431 delayacct_blkio_end(p); 3432 atomic_dec(&task_rq(p)->nr_iowait); 3433 } 3434 3435 wake_flags |= WF_MIGRATED; 3436 psi_ttwu_dequeue(p); 3437 set_task_cpu(p, cpu); 3438 } 3439 #else 3440 cpu = task_cpu(p); 3441 #endif /* CONFIG_SMP */ 3442 3443 ttwu_queue(p, cpu, wake_flags); 3444 unlock: 3445 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 3446 out: 3447 if (success) 3448 ttwu_stat(p, task_cpu(p), wake_flags); 3449 preempt_enable(); 3450 3451 return success; 3452 } 3453 3454 /** 3455 * try_invoke_on_locked_down_task - Invoke a function on task in fixed state 3456 * @p: Process for which the function is to be invoked, can be @current. 3457 * @func: Function to invoke. 3458 * @arg: Argument to function. 3459 * 3460 * If the specified task can be quickly locked into a definite state 3461 * (either sleeping or on a given runqueue), arrange to keep it in that 3462 * state while invoking @func(@arg). This function can use ->on_rq and 3463 * task_curr() to work out what the state is, if required. Given that 3464 * @func can be invoked with a runqueue lock held, it had better be quite 3465 * lightweight. 3466 * 3467 * Returns: 3468 * @false if the task slipped out from under the locks. 3469 * @true if the task was locked onto a runqueue or is sleeping. 3470 * However, @func can override this by returning @false. 3471 */ 3472 bool try_invoke_on_locked_down_task(struct task_struct *p, bool (*func)(struct task_struct *t, void *arg), void *arg) 3473 { 3474 struct rq_flags rf; 3475 bool ret = false; 3476 struct rq *rq; 3477 3478 raw_spin_lock_irqsave(&p->pi_lock, rf.flags); 3479 if (p->on_rq) { 3480 rq = __task_rq_lock(p, &rf); 3481 if (task_rq(p) == rq) 3482 ret = func(p, arg); 3483 rq_unlock(rq, &rf); 3484 } else { 3485 switch (p->state) { 3486 case TASK_RUNNING: 3487 case TASK_WAKING: 3488 break; 3489 default: 3490 smp_rmb(); // See smp_rmb() comment in try_to_wake_up(). 3491 if (!p->on_rq) 3492 ret = func(p, arg); 3493 } 3494 } 3495 raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags); 3496 return ret; 3497 } 3498 3499 /** 3500 * wake_up_process - Wake up a specific process 3501 * @p: The process to be woken up. 3502 * 3503 * Attempt to wake up the nominated process and move it to the set of runnable 3504 * processes. 3505 * 3506 * Return: 1 if the process was woken up, 0 if it was already running. 3507 * 3508 * This function executes a full memory barrier before accessing the task state. 3509 */ 3510 int wake_up_process(struct task_struct *p) 3511 { 3512 return try_to_wake_up(p, TASK_NORMAL, 0); 3513 } 3514 EXPORT_SYMBOL(wake_up_process); 3515 3516 int wake_up_state(struct task_struct *p, unsigned int state) 3517 { 3518 return try_to_wake_up(p, state, 0); 3519 } 3520 3521 /* 3522 * Perform scheduler related setup for a newly forked process p. 3523 * p is forked by current. 3524 * 3525 * __sched_fork() is basic setup used by init_idle() too: 3526 */ 3527 static void __sched_fork(unsigned long clone_flags, struct task_struct *p) 3528 { 3529 p->on_rq = 0; 3530 3531 p->se.on_rq = 0; 3532 p->se.exec_start = 0; 3533 p->se.sum_exec_runtime = 0; 3534 p->se.prev_sum_exec_runtime = 0; 3535 p->se.nr_migrations = 0; 3536 p->se.vruntime = 0; 3537 INIT_LIST_HEAD(&p->se.group_node); 3538 3539 #ifdef CONFIG_FAIR_GROUP_SCHED 3540 p->se.cfs_rq = NULL; 3541 #endif 3542 3543 #ifdef CONFIG_SCHEDSTATS 3544 /* Even if schedstat is disabled, there should not be garbage */ 3545 memset(&p->se.statistics, 0, sizeof(p->se.statistics)); 3546 #endif 3547 3548 RB_CLEAR_NODE(&p->dl.rb_node); 3549 init_dl_task_timer(&p->dl); 3550 init_dl_inactive_task_timer(&p->dl); 3551 __dl_clear_params(p); 3552 3553 INIT_LIST_HEAD(&p->rt.run_list); 3554 p->rt.timeout = 0; 3555 p->rt.time_slice = sched_rr_timeslice; 3556 p->rt.on_rq = 0; 3557 p->rt.on_list = 0; 3558 3559 #ifdef CONFIG_PREEMPT_NOTIFIERS 3560 INIT_HLIST_HEAD(&p->preempt_notifiers); 3561 #endif 3562 3563 #ifdef CONFIG_COMPACTION 3564 p->capture_control = NULL; 3565 #endif 3566 init_numa_balancing(clone_flags, p); 3567 #ifdef CONFIG_SMP 3568 p->wake_entry.u_flags = CSD_TYPE_TTWU; 3569 p->migration_pending = NULL; 3570 #endif 3571 } 3572 3573 DEFINE_STATIC_KEY_FALSE(sched_numa_balancing); 3574 3575 #ifdef CONFIG_NUMA_BALANCING 3576 3577 void set_numabalancing_state(bool enabled) 3578 { 3579 if (enabled) 3580 static_branch_enable(&sched_numa_balancing); 3581 else 3582 static_branch_disable(&sched_numa_balancing); 3583 } 3584 3585 #ifdef CONFIG_PROC_SYSCTL 3586 int sysctl_numa_balancing(struct ctl_table *table, int write, 3587 void *buffer, size_t *lenp, loff_t *ppos) 3588 { 3589 struct ctl_table t; 3590 int err; 3591 int state = static_branch_likely(&sched_numa_balancing); 3592 3593 if (write && !capable(CAP_SYS_ADMIN)) 3594 return -EPERM; 3595 3596 t = *table; 3597 t.data = &state; 3598 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); 3599 if (err < 0) 3600 return err; 3601 if (write) 3602 set_numabalancing_state(state); 3603 return err; 3604 } 3605 #endif 3606 #endif 3607 3608 #ifdef CONFIG_SCHEDSTATS 3609 3610 DEFINE_STATIC_KEY_FALSE(sched_schedstats); 3611 static bool __initdata __sched_schedstats = false; 3612 3613 static void set_schedstats(bool enabled) 3614 { 3615 if (enabled) 3616 static_branch_enable(&sched_schedstats); 3617 else 3618 static_branch_disable(&sched_schedstats); 3619 } 3620 3621 void force_schedstat_enabled(void) 3622 { 3623 if (!schedstat_enabled()) { 3624 pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n"); 3625 static_branch_enable(&sched_schedstats); 3626 } 3627 } 3628 3629 static int __init setup_schedstats(char *str) 3630 { 3631 int ret = 0; 3632 if (!str) 3633 goto out; 3634 3635 /* 3636 * This code is called before jump labels have been set up, so we can't 3637 * change the static branch directly just yet. Instead set a temporary 3638 * variable so init_schedstats() can do it later. 3639 */ 3640 if (!strcmp(str, "enable")) { 3641 __sched_schedstats = true; 3642 ret = 1; 3643 } else if (!strcmp(str, "disable")) { 3644 __sched_schedstats = false; 3645 ret = 1; 3646 } 3647 out: 3648 if (!ret) 3649 pr_warn("Unable to parse schedstats=\n"); 3650 3651 return ret; 3652 } 3653 __setup("schedstats=", setup_schedstats); 3654 3655 static void __init init_schedstats(void) 3656 { 3657 set_schedstats(__sched_schedstats); 3658 } 3659 3660 #ifdef CONFIG_PROC_SYSCTL 3661 int sysctl_schedstats(struct ctl_table *table, int write, void *buffer, 3662 size_t *lenp, loff_t *ppos) 3663 { 3664 struct ctl_table t; 3665 int err; 3666 int state = static_branch_likely(&sched_schedstats); 3667 3668 if (write && !capable(CAP_SYS_ADMIN)) 3669 return -EPERM; 3670 3671 t = *table; 3672 t.data = &state; 3673 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); 3674 if (err < 0) 3675 return err; 3676 if (write) 3677 set_schedstats(state); 3678 return err; 3679 } 3680 #endif /* CONFIG_PROC_SYSCTL */ 3681 #else /* !CONFIG_SCHEDSTATS */ 3682 static inline void init_schedstats(void) {} 3683 #endif /* CONFIG_SCHEDSTATS */ 3684 3685 /* 3686 * fork()/clone()-time setup: 3687 */ 3688 int sched_fork(unsigned long clone_flags, struct task_struct *p) 3689 { 3690 unsigned long flags; 3691 3692 __sched_fork(clone_flags, p); 3693 /* 3694 * We mark the process as NEW here. This guarantees that 3695 * nobody will actually run it, and a signal or other external 3696 * event cannot wake it up and insert it on the runqueue either. 3697 */ 3698 p->state = TASK_NEW; 3699 3700 /* 3701 * Make sure we do not leak PI boosting priority to the child. 3702 */ 3703 p->prio = current->normal_prio; 3704 3705 uclamp_fork(p); 3706 3707 /* 3708 * Revert to default priority/policy on fork if requested. 3709 */ 3710 if (unlikely(p->sched_reset_on_fork)) { 3711 if (task_has_dl_policy(p) || task_has_rt_policy(p)) { 3712 p->policy = SCHED_NORMAL; 3713 p->static_prio = NICE_TO_PRIO(0); 3714 p->rt_priority = 0; 3715 } else if (PRIO_TO_NICE(p->static_prio) < 0) 3716 p->static_prio = NICE_TO_PRIO(0); 3717 3718 p->prio = p->normal_prio = __normal_prio(p); 3719 set_load_weight(p, false); 3720 3721 /* 3722 * We don't need the reset flag anymore after the fork. It has 3723 * fulfilled its duty: 3724 */ 3725 p->sched_reset_on_fork = 0; 3726 } 3727 3728 if (dl_prio(p->prio)) 3729 return -EAGAIN; 3730 else if (rt_prio(p->prio)) 3731 p->sched_class = &rt_sched_class; 3732 else 3733 p->sched_class = &fair_sched_class; 3734 3735 init_entity_runnable_average(&p->se); 3736 3737 /* 3738 * The child is not yet in the pid-hash so no cgroup attach races, 3739 * and the cgroup is pinned to this child due to cgroup_fork() 3740 * is ran before sched_fork(). 3741 * 3742 * Silence PROVE_RCU. 3743 */ 3744 raw_spin_lock_irqsave(&p->pi_lock, flags); 3745 rseq_migrate(p); 3746 /* 3747 * We're setting the CPU for the first time, we don't migrate, 3748 * so use __set_task_cpu(). 3749 */ 3750 __set_task_cpu(p, smp_processor_id()); 3751 if (p->sched_class->task_fork) 3752 p->sched_class->task_fork(p); 3753 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 3754 3755 #ifdef CONFIG_SCHED_INFO 3756 if (likely(sched_info_on())) 3757 memset(&p->sched_info, 0, sizeof(p->sched_info)); 3758 #endif 3759 #if defined(CONFIG_SMP) 3760 p->on_cpu = 0; 3761 #endif 3762 init_task_preempt_count(p); 3763 #ifdef CONFIG_SMP 3764 plist_node_init(&p->pushable_tasks, MAX_PRIO); 3765 RB_CLEAR_NODE(&p->pushable_dl_tasks); 3766 #endif 3767 return 0; 3768 } 3769 3770 void sched_post_fork(struct task_struct *p) 3771 { 3772 uclamp_post_fork(p); 3773 } 3774 3775 unsigned long to_ratio(u64 period, u64 runtime) 3776 { 3777 if (runtime == RUNTIME_INF) 3778 return BW_UNIT; 3779 3780 /* 3781 * Doing this here saves a lot of checks in all 3782 * the calling paths, and returning zero seems 3783 * safe for them anyway. 3784 */ 3785 if (period == 0) 3786 return 0; 3787 3788 return div64_u64(runtime << BW_SHIFT, period); 3789 } 3790 3791 /* 3792 * wake_up_new_task - wake up a newly created task for the first time. 3793 * 3794 * This function will do some initial scheduler statistics housekeeping 3795 * that must be done for every newly created context, then puts the task 3796 * on the runqueue and wakes it. 3797 */ 3798 void wake_up_new_task(struct task_struct *p) 3799 { 3800 struct rq_flags rf; 3801 struct rq *rq; 3802 3803 raw_spin_lock_irqsave(&p->pi_lock, rf.flags); 3804 p->state = TASK_RUNNING; 3805 #ifdef CONFIG_SMP 3806 /* 3807 * Fork balancing, do it here and not earlier because: 3808 * - cpus_ptr can change in the fork path 3809 * - any previously selected CPU might disappear through hotplug 3810 * 3811 * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq, 3812 * as we're not fully set-up yet. 3813 */ 3814 p->recent_used_cpu = task_cpu(p); 3815 rseq_migrate(p); 3816 __set_task_cpu(p, select_task_rq(p, task_cpu(p), WF_FORK)); 3817 #endif 3818 rq = __task_rq_lock(p, &rf); 3819 update_rq_clock(rq); 3820 post_init_entity_util_avg(p); 3821 3822 activate_task(rq, p, ENQUEUE_NOCLOCK); 3823 trace_sched_wakeup_new(p); 3824 check_preempt_curr(rq, p, WF_FORK); 3825 #ifdef CONFIG_SMP 3826 if (p->sched_class->task_woken) { 3827 /* 3828 * Nothing relies on rq->lock after this, so it's fine to 3829 * drop it. 3830 */ 3831 rq_unpin_lock(rq, &rf); 3832 p->sched_class->task_woken(rq, p); 3833 rq_repin_lock(rq, &rf); 3834 } 3835 #endif 3836 task_rq_unlock(rq, p, &rf); 3837 } 3838 3839 #ifdef CONFIG_PREEMPT_NOTIFIERS 3840 3841 static DEFINE_STATIC_KEY_FALSE(preempt_notifier_key); 3842 3843 void preempt_notifier_inc(void) 3844 { 3845 static_branch_inc(&preempt_notifier_key); 3846 } 3847 EXPORT_SYMBOL_GPL(preempt_notifier_inc); 3848 3849 void preempt_notifier_dec(void) 3850 { 3851 static_branch_dec(&preempt_notifier_key); 3852 } 3853 EXPORT_SYMBOL_GPL(preempt_notifier_dec); 3854 3855 /** 3856 * preempt_notifier_register - tell me when current is being preempted & rescheduled 3857 * @notifier: notifier struct to register 3858 */ 3859 void preempt_notifier_register(struct preempt_notifier *notifier) 3860 { 3861 if (!static_branch_unlikely(&preempt_notifier_key)) 3862 WARN(1, "registering preempt_notifier while notifiers disabled\n"); 3863 3864 hlist_add_head(¬ifier->link, ¤t->preempt_notifiers); 3865 } 3866 EXPORT_SYMBOL_GPL(preempt_notifier_register); 3867 3868 /** 3869 * preempt_notifier_unregister - no longer interested in preemption notifications 3870 * @notifier: notifier struct to unregister 3871 * 3872 * This is *not* safe to call from within a preemption notifier. 3873 */ 3874 void preempt_notifier_unregister(struct preempt_notifier *notifier) 3875 { 3876 hlist_del(¬ifier->link); 3877 } 3878 EXPORT_SYMBOL_GPL(preempt_notifier_unregister); 3879 3880 static void __fire_sched_in_preempt_notifiers(struct task_struct *curr) 3881 { 3882 struct preempt_notifier *notifier; 3883 3884 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) 3885 notifier->ops->sched_in(notifier, raw_smp_processor_id()); 3886 } 3887 3888 static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr) 3889 { 3890 if (static_branch_unlikely(&preempt_notifier_key)) 3891 __fire_sched_in_preempt_notifiers(curr); 3892 } 3893 3894 static void 3895 __fire_sched_out_preempt_notifiers(struct task_struct *curr, 3896 struct task_struct *next) 3897 { 3898 struct preempt_notifier *notifier; 3899 3900 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) 3901 notifier->ops->sched_out(notifier, next); 3902 } 3903 3904 static __always_inline void 3905 fire_sched_out_preempt_notifiers(struct task_struct *curr, 3906 struct task_struct *next) 3907 { 3908 if (static_branch_unlikely(&preempt_notifier_key)) 3909 __fire_sched_out_preempt_notifiers(curr, next); 3910 } 3911 3912 #else /* !CONFIG_PREEMPT_NOTIFIERS */ 3913 3914 static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr) 3915 { 3916 } 3917 3918 static inline void 3919 fire_sched_out_preempt_notifiers(struct task_struct *curr, 3920 struct task_struct *next) 3921 { 3922 } 3923 3924 #endif /* CONFIG_PREEMPT_NOTIFIERS */ 3925 3926 static inline void prepare_task(struct task_struct *next) 3927 { 3928 #ifdef CONFIG_SMP 3929 /* 3930 * Claim the task as running, we do this before switching to it 3931 * such that any running task will have this set. 3932 * 3933 * See the ttwu() WF_ON_CPU case and its ordering comment. 3934 */ 3935 WRITE_ONCE(next->on_cpu, 1); 3936 #endif 3937 } 3938 3939 static inline void finish_task(struct task_struct *prev) 3940 { 3941 #ifdef CONFIG_SMP 3942 /* 3943 * This must be the very last reference to @prev from this CPU. After 3944 * p->on_cpu is cleared, the task can be moved to a different CPU. We 3945 * must ensure this doesn't happen until the switch is completely 3946 * finished. 3947 * 3948 * In particular, the load of prev->state in finish_task_switch() must 3949 * happen before this. 3950 * 3951 * Pairs with the smp_cond_load_acquire() in try_to_wake_up(). 3952 */ 3953 smp_store_release(&prev->on_cpu, 0); 3954 #endif 3955 } 3956 3957 #ifdef CONFIG_SMP 3958 3959 static void do_balance_callbacks(struct rq *rq, struct callback_head *head) 3960 { 3961 void (*func)(struct rq *rq); 3962 struct callback_head *next; 3963 3964 lockdep_assert_held(&rq->lock); 3965 3966 while (head) { 3967 func = (void (*)(struct rq *))head->func; 3968 next = head->next; 3969 head->next = NULL; 3970 head = next; 3971 3972 func(rq); 3973 } 3974 } 3975 3976 static void balance_push(struct rq *rq); 3977 3978 struct callback_head balance_push_callback = { 3979 .next = NULL, 3980 .func = (void (*)(struct callback_head *))balance_push, 3981 }; 3982 3983 static inline struct callback_head *splice_balance_callbacks(struct rq *rq) 3984 { 3985 struct callback_head *head = rq->balance_callback; 3986 3987 lockdep_assert_held(&rq->lock); 3988 if (head) 3989 rq->balance_callback = NULL; 3990 3991 return head; 3992 } 3993 3994 static void __balance_callbacks(struct rq *rq) 3995 { 3996 do_balance_callbacks(rq, splice_balance_callbacks(rq)); 3997 } 3998 3999 static inline void balance_callbacks(struct rq *rq, struct callback_head *head) 4000 { 4001 unsigned long flags; 4002 4003 if (unlikely(head)) { 4004 raw_spin_lock_irqsave(&rq->lock, flags); 4005 do_balance_callbacks(rq, head); 4006 raw_spin_unlock_irqrestore(&rq->lock, flags); 4007 } 4008 } 4009 4010 #else 4011 4012 static inline void __balance_callbacks(struct rq *rq) 4013 { 4014 } 4015 4016 static inline struct callback_head *splice_balance_callbacks(struct rq *rq) 4017 { 4018 return NULL; 4019 } 4020 4021 static inline void balance_callbacks(struct rq *rq, struct callback_head *head) 4022 { 4023 } 4024 4025 #endif 4026 4027 static inline void 4028 prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf) 4029 { 4030 /* 4031 * Since the runqueue lock will be released by the next 4032 * task (which is an invalid locking op but in the case 4033 * of the scheduler it's an obvious special-case), so we 4034 * do an early lockdep release here: 4035 */ 4036 rq_unpin_lock(rq, rf); 4037 spin_release(&rq->lock.dep_map, _THIS_IP_); 4038 #ifdef CONFIG_DEBUG_SPINLOCK 4039 /* this is a valid case when another task releases the spinlock */ 4040 rq->lock.owner = next; 4041 #endif 4042 } 4043 4044 static inline void finish_lock_switch(struct rq *rq) 4045 { 4046 /* 4047 * If we are tracking spinlock dependencies then we have to 4048 * fix up the runqueue lock - which gets 'carried over' from 4049 * prev into current: 4050 */ 4051 spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_); 4052 __balance_callbacks(rq); 4053 raw_spin_unlock_irq(&rq->lock); 4054 } 4055 4056 /* 4057 * NOP if the arch has not defined these: 4058 */ 4059 4060 #ifndef prepare_arch_switch 4061 # define prepare_arch_switch(next) do { } while (0) 4062 #endif 4063 4064 #ifndef finish_arch_post_lock_switch 4065 # define finish_arch_post_lock_switch() do { } while (0) 4066 #endif 4067 4068 static inline void kmap_local_sched_out(void) 4069 { 4070 #ifdef CONFIG_KMAP_LOCAL 4071 if (unlikely(current->kmap_ctrl.idx)) 4072 __kmap_local_sched_out(); 4073 #endif 4074 } 4075 4076 static inline void kmap_local_sched_in(void) 4077 { 4078 #ifdef CONFIG_KMAP_LOCAL 4079 if (unlikely(current->kmap_ctrl.idx)) 4080 __kmap_local_sched_in(); 4081 #endif 4082 } 4083 4084 /** 4085 * prepare_task_switch - prepare to switch tasks 4086 * @rq: the runqueue preparing to switch 4087 * @prev: the current task that is being switched out 4088 * @next: the task we are going to switch to. 4089 * 4090 * This is called with the rq lock held and interrupts off. It must 4091 * be paired with a subsequent finish_task_switch after the context 4092 * switch. 4093 * 4094 * prepare_task_switch sets up locking and calls architecture specific 4095 * hooks. 4096 */ 4097 static inline void 4098 prepare_task_switch(struct rq *rq, struct task_struct *prev, 4099 struct task_struct *next) 4100 { 4101 kcov_prepare_switch(prev); 4102 sched_info_switch(rq, prev, next); 4103 perf_event_task_sched_out(prev, next); 4104 rseq_preempt(prev); 4105 fire_sched_out_preempt_notifiers(prev, next); 4106 kmap_local_sched_out(); 4107 prepare_task(next); 4108 prepare_arch_switch(next); 4109 } 4110 4111 /** 4112 * finish_task_switch - clean up after a task-switch 4113 * @prev: the thread we just switched away from. 4114 * 4115 * finish_task_switch must be called after the context switch, paired 4116 * with a prepare_task_switch call before the context switch. 4117 * finish_task_switch will reconcile locking set up by prepare_task_switch, 4118 * and do any other architecture-specific cleanup actions. 4119 * 4120 * Note that we may have delayed dropping an mm in context_switch(). If 4121 * so, we finish that here outside of the runqueue lock. (Doing it 4122 * with the lock held can cause deadlocks; see schedule() for 4123 * details.) 4124 * 4125 * The context switch have flipped the stack from under us and restored the 4126 * local variables which were saved when this task called schedule() in the 4127 * past. prev == current is still correct but we need to recalculate this_rq 4128 * because prev may have moved to another CPU. 4129 */ 4130 static struct rq *finish_task_switch(struct task_struct *prev) 4131 __releases(rq->lock) 4132 { 4133 struct rq *rq = this_rq(); 4134 struct mm_struct *mm = rq->prev_mm; 4135 long prev_state; 4136 4137 /* 4138 * The previous task will have left us with a preempt_count of 2 4139 * because it left us after: 4140 * 4141 * schedule() 4142 * preempt_disable(); // 1 4143 * __schedule() 4144 * raw_spin_lock_irq(&rq->lock) // 2 4145 * 4146 * Also, see FORK_PREEMPT_COUNT. 4147 */ 4148 if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET, 4149 "corrupted preempt_count: %s/%d/0x%x\n", 4150 current->comm, current->pid, preempt_count())) 4151 preempt_count_set(FORK_PREEMPT_COUNT); 4152 4153 rq->prev_mm = NULL; 4154 4155 /* 4156 * A task struct has one reference for the use as "current". 4157 * If a task dies, then it sets TASK_DEAD in tsk->state and calls 4158 * schedule one last time. The schedule call will never return, and 4159 * the scheduled task must drop that reference. 4160 * 4161 * We must observe prev->state before clearing prev->on_cpu (in 4162 * finish_task), otherwise a concurrent wakeup can get prev 4163 * running on another CPU and we could rave with its RUNNING -> DEAD 4164 * transition, resulting in a double drop. 4165 */ 4166 prev_state = prev->state; 4167 vtime_task_switch(prev); 4168 perf_event_task_sched_in(prev, current); 4169 finish_task(prev); 4170 finish_lock_switch(rq); 4171 finish_arch_post_lock_switch(); 4172 kcov_finish_switch(current); 4173 /* 4174 * kmap_local_sched_out() is invoked with rq::lock held and 4175 * interrupts disabled. There is no requirement for that, but the 4176 * sched out code does not have an interrupt enabled section. 4177 * Restoring the maps on sched in does not require interrupts being 4178 * disabled either. 4179 */ 4180 kmap_local_sched_in(); 4181 4182 fire_sched_in_preempt_notifiers(current); 4183 /* 4184 * When switching through a kernel thread, the loop in 4185 * membarrier_{private,global}_expedited() may have observed that 4186 * kernel thread and not issued an IPI. It is therefore possible to 4187 * schedule between user->kernel->user threads without passing though 4188 * switch_mm(). Membarrier requires a barrier after storing to 4189 * rq->curr, before returning to userspace, so provide them here: 4190 * 4191 * - a full memory barrier for {PRIVATE,GLOBAL}_EXPEDITED, implicitly 4192 * provided by mmdrop(), 4193 * - a sync_core for SYNC_CORE. 4194 */ 4195 if (mm) { 4196 membarrier_mm_sync_core_before_usermode(mm); 4197 mmdrop(mm); 4198 } 4199 if (unlikely(prev_state == TASK_DEAD)) { 4200 if (prev->sched_class->task_dead) 4201 prev->sched_class->task_dead(prev); 4202 4203 /* 4204 * Remove function-return probe instances associated with this 4205 * task and put them back on the free list. 4206 */ 4207 kprobe_flush_task(prev); 4208 4209 /* Task is done with its stack. */ 4210 put_task_stack(prev); 4211 4212 put_task_struct_rcu_user(prev); 4213 } 4214 4215 tick_nohz_task_switch(); 4216 return rq; 4217 } 4218 4219 /** 4220 * schedule_tail - first thing a freshly forked thread must call. 4221 * @prev: the thread we just switched away from. 4222 */ 4223 asmlinkage __visible void schedule_tail(struct task_struct *prev) 4224 __releases(rq->lock) 4225 { 4226 struct rq *rq; 4227 4228 /* 4229 * New tasks start with FORK_PREEMPT_COUNT, see there and 4230 * finish_task_switch() for details. 4231 * 4232 * finish_task_switch() will drop rq->lock() and lower preempt_count 4233 * and the preempt_enable() will end up enabling preemption (on 4234 * PREEMPT_COUNT kernels). 4235 */ 4236 4237 rq = finish_task_switch(prev); 4238 preempt_enable(); 4239 4240 if (current->set_child_tid) 4241 put_user(task_pid_vnr(current), current->set_child_tid); 4242 4243 calculate_sigpending(); 4244 } 4245 4246 /* 4247 * context_switch - switch to the new MM and the new thread's register state. 4248 */ 4249 static __always_inline struct rq * 4250 context_switch(struct rq *rq, struct task_struct *prev, 4251 struct task_struct *next, struct rq_flags *rf) 4252 { 4253 prepare_task_switch(rq, prev, next); 4254 4255 /* 4256 * For paravirt, this is coupled with an exit in switch_to to 4257 * combine the page table reload and the switch backend into 4258 * one hypercall. 4259 */ 4260 arch_start_context_switch(prev); 4261 4262 /* 4263 * kernel -> kernel lazy + transfer active 4264 * user -> kernel lazy + mmgrab() active 4265 * 4266 * kernel -> user switch + mmdrop() active 4267 * user -> user switch 4268 */ 4269 if (!next->mm) { // to kernel 4270 enter_lazy_tlb(prev->active_mm, next); 4271 4272 next->active_mm = prev->active_mm; 4273 if (prev->mm) // from user 4274 mmgrab(prev->active_mm); 4275 else 4276 prev->active_mm = NULL; 4277 } else { // to user 4278 membarrier_switch_mm(rq, prev->active_mm, next->mm); 4279 /* 4280 * sys_membarrier() requires an smp_mb() between setting 4281 * rq->curr / membarrier_switch_mm() and returning to userspace. 4282 * 4283 * The below provides this either through switch_mm(), or in 4284 * case 'prev->active_mm == next->mm' through 4285 * finish_task_switch()'s mmdrop(). 4286 */ 4287 switch_mm_irqs_off(prev->active_mm, next->mm, next); 4288 4289 if (!prev->mm) { // from kernel 4290 /* will mmdrop() in finish_task_switch(). */ 4291 rq->prev_mm = prev->active_mm; 4292 prev->active_mm = NULL; 4293 } 4294 } 4295 4296 rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP); 4297 4298 prepare_lock_switch(rq, next, rf); 4299 4300 /* Here we just switch the register state and the stack. */ 4301 switch_to(prev, next, prev); 4302 barrier(); 4303 4304 return finish_task_switch(prev); 4305 } 4306 4307 /* 4308 * nr_running and nr_context_switches: 4309 * 4310 * externally visible scheduler statistics: current number of runnable 4311 * threads, total number of context switches performed since bootup. 4312 */ 4313 unsigned long nr_running(void) 4314 { 4315 unsigned long i, sum = 0; 4316 4317 for_each_online_cpu(i) 4318 sum += cpu_rq(i)->nr_running; 4319 4320 return sum; 4321 } 4322 4323 /* 4324 * Check if only the current task is running on the CPU. 4325 * 4326 * Caution: this function does not check that the caller has disabled 4327 * preemption, thus the result might have a time-of-check-to-time-of-use 4328 * race. The caller is responsible to use it correctly, for example: 4329 * 4330 * - from a non-preemptible section (of course) 4331 * 4332 * - from a thread that is bound to a single CPU 4333 * 4334 * - in a loop with very short iterations (e.g. a polling loop) 4335 */ 4336 bool single_task_running(void) 4337 { 4338 return raw_rq()->nr_running == 1; 4339 } 4340 EXPORT_SYMBOL(single_task_running); 4341 4342 unsigned long long nr_context_switches(void) 4343 { 4344 int i; 4345 unsigned long long sum = 0; 4346 4347 for_each_possible_cpu(i) 4348 sum += cpu_rq(i)->nr_switches; 4349 4350 return sum; 4351 } 4352 4353 /* 4354 * Consumers of these two interfaces, like for example the cpuidle menu 4355 * governor, are using nonsensical data. Preferring shallow idle state selection 4356 * for a CPU that has IO-wait which might not even end up running the task when 4357 * it does become runnable. 4358 */ 4359 4360 unsigned long nr_iowait_cpu(int cpu) 4361 { 4362 return atomic_read(&cpu_rq(cpu)->nr_iowait); 4363 } 4364 4365 /* 4366 * IO-wait accounting, and how it's mostly bollocks (on SMP). 4367 * 4368 * The idea behind IO-wait account is to account the idle time that we could 4369 * have spend running if it were not for IO. That is, if we were to improve the 4370 * storage performance, we'd have a proportional reduction in IO-wait time. 4371 * 4372 * This all works nicely on UP, where, when a task blocks on IO, we account 4373 * idle time as IO-wait, because if the storage were faster, it could've been 4374 * running and we'd not be idle. 4375 * 4376 * This has been extended to SMP, by doing the same for each CPU. This however 4377 * is broken. 4378 * 4379 * Imagine for instance the case where two tasks block on one CPU, only the one 4380 * CPU will have IO-wait accounted, while the other has regular idle. Even 4381 * though, if the storage were faster, both could've ran at the same time, 4382 * utilising both CPUs. 4383 * 4384 * This means, that when looking globally, the current IO-wait accounting on 4385 * SMP is a lower bound, by reason of under accounting. 4386 * 4387 * Worse, since the numbers are provided per CPU, they are sometimes 4388 * interpreted per CPU, and that is nonsensical. A blocked task isn't strictly 4389 * associated with any one particular CPU, it can wake to another CPU than it 4390 * blocked on. This means the per CPU IO-wait number is meaningless. 4391 * 4392 * Task CPU affinities can make all that even more 'interesting'. 4393 */ 4394 4395 unsigned long nr_iowait(void) 4396 { 4397 unsigned long i, sum = 0; 4398 4399 for_each_possible_cpu(i) 4400 sum += nr_iowait_cpu(i); 4401 4402 return sum; 4403 } 4404 4405 #ifdef CONFIG_SMP 4406 4407 /* 4408 * sched_exec - execve() is a valuable balancing opportunity, because at 4409 * this point the task has the smallest effective memory and cache footprint. 4410 */ 4411 void sched_exec(void) 4412 { 4413 struct task_struct *p = current; 4414 unsigned long flags; 4415 int dest_cpu; 4416 4417 raw_spin_lock_irqsave(&p->pi_lock, flags); 4418 dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), WF_EXEC); 4419 if (dest_cpu == smp_processor_id()) 4420 goto unlock; 4421 4422 if (likely(cpu_active(dest_cpu))) { 4423 struct migration_arg arg = { p, dest_cpu }; 4424 4425 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 4426 stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg); 4427 return; 4428 } 4429 unlock: 4430 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 4431 } 4432 4433 #endif 4434 4435 DEFINE_PER_CPU(struct kernel_stat, kstat); 4436 DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat); 4437 4438 EXPORT_PER_CPU_SYMBOL(kstat); 4439 EXPORT_PER_CPU_SYMBOL(kernel_cpustat); 4440 4441 /* 4442 * The function fair_sched_class.update_curr accesses the struct curr 4443 * and its field curr->exec_start; when called from task_sched_runtime(), 4444 * we observe a high rate of cache misses in practice. 4445 * Prefetching this data results in improved performance. 4446 */ 4447 static inline void prefetch_curr_exec_start(struct task_struct *p) 4448 { 4449 #ifdef CONFIG_FAIR_GROUP_SCHED 4450 struct sched_entity *curr = (&p->se)->cfs_rq->curr; 4451 #else 4452 struct sched_entity *curr = (&task_rq(p)->cfs)->curr; 4453 #endif 4454 prefetch(curr); 4455 prefetch(&curr->exec_start); 4456 } 4457 4458 /* 4459 * Return accounted runtime for the task. 4460 * In case the task is currently running, return the runtime plus current's 4461 * pending runtime that have not been accounted yet. 4462 */ 4463 unsigned long long task_sched_runtime(struct task_struct *p) 4464 { 4465 struct rq_flags rf; 4466 struct rq *rq; 4467 u64 ns; 4468 4469 #if defined(CONFIG_64BIT) && defined(CONFIG_SMP) 4470 /* 4471 * 64-bit doesn't need locks to atomically read a 64-bit value. 4472 * So we have a optimization chance when the task's delta_exec is 0. 4473 * Reading ->on_cpu is racy, but this is ok. 4474 * 4475 * If we race with it leaving CPU, we'll take a lock. So we're correct. 4476 * If we race with it entering CPU, unaccounted time is 0. This is 4477 * indistinguishable from the read occurring a few cycles earlier. 4478 * If we see ->on_cpu without ->on_rq, the task is leaving, and has 4479 * been accounted, so we're correct here as well. 4480 */ 4481 if (!p->on_cpu || !task_on_rq_queued(p)) 4482 return p->se.sum_exec_runtime; 4483 #endif 4484 4485 rq = task_rq_lock(p, &rf); 4486 /* 4487 * Must be ->curr _and_ ->on_rq. If dequeued, we would 4488 * project cycles that may never be accounted to this 4489 * thread, breaking clock_gettime(). 4490 */ 4491 if (task_current(rq, p) && task_on_rq_queued(p)) { 4492 prefetch_curr_exec_start(p); 4493 update_rq_clock(rq); 4494 p->sched_class->update_curr(rq); 4495 } 4496 ns = p->se.sum_exec_runtime; 4497 task_rq_unlock(rq, p, &rf); 4498 4499 return ns; 4500 } 4501 4502 /* 4503 * This function gets called by the timer code, with HZ frequency. 4504 * We call it with interrupts disabled. 4505 */ 4506 void scheduler_tick(void) 4507 { 4508 int cpu = smp_processor_id(); 4509 struct rq *rq = cpu_rq(cpu); 4510 struct task_struct *curr = rq->curr; 4511 struct rq_flags rf; 4512 unsigned long thermal_pressure; 4513 4514 arch_scale_freq_tick(); 4515 sched_clock_tick(); 4516 4517 rq_lock(rq, &rf); 4518 4519 update_rq_clock(rq); 4520 thermal_pressure = arch_scale_thermal_pressure(cpu_of(rq)); 4521 update_thermal_load_avg(rq_clock_thermal(rq), rq, thermal_pressure); 4522 curr->sched_class->task_tick(rq, curr, 0); 4523 calc_global_load_tick(rq); 4524 psi_task_tick(rq); 4525 4526 rq_unlock(rq, &rf); 4527 4528 perf_event_task_tick(); 4529 4530 #ifdef CONFIG_SMP 4531 rq->idle_balance = idle_cpu(cpu); 4532 trigger_load_balance(rq); 4533 #endif 4534 } 4535 4536 #ifdef CONFIG_NO_HZ_FULL 4537 4538 struct tick_work { 4539 int cpu; 4540 atomic_t state; 4541 struct delayed_work work; 4542 }; 4543 /* Values for ->state, see diagram below. */ 4544 #define TICK_SCHED_REMOTE_OFFLINE 0 4545 #define TICK_SCHED_REMOTE_OFFLINING 1 4546 #define TICK_SCHED_REMOTE_RUNNING 2 4547 4548 /* 4549 * State diagram for ->state: 4550 * 4551 * 4552 * TICK_SCHED_REMOTE_OFFLINE 4553 * | ^ 4554 * | | 4555 * | | sched_tick_remote() 4556 * | | 4557 * | | 4558 * +--TICK_SCHED_REMOTE_OFFLINING 4559 * | ^ 4560 * | | 4561 * sched_tick_start() | | sched_tick_stop() 4562 * | | 4563 * V | 4564 * TICK_SCHED_REMOTE_RUNNING 4565 * 4566 * 4567 * Other transitions get WARN_ON_ONCE(), except that sched_tick_remote() 4568 * and sched_tick_start() are happy to leave the state in RUNNING. 4569 */ 4570 4571 static struct tick_work __percpu *tick_work_cpu; 4572 4573 static void sched_tick_remote(struct work_struct *work) 4574 { 4575 struct delayed_work *dwork = to_delayed_work(work); 4576 struct tick_work *twork = container_of(dwork, struct tick_work, work); 4577 int cpu = twork->cpu; 4578 struct rq *rq = cpu_rq(cpu); 4579 struct task_struct *curr; 4580 struct rq_flags rf; 4581 u64 delta; 4582 int os; 4583 4584 /* 4585 * Handle the tick only if it appears the remote CPU is running in full 4586 * dynticks mode. The check is racy by nature, but missing a tick or 4587 * having one too much is no big deal because the scheduler tick updates 4588 * statistics and checks timeslices in a time-independent way, regardless 4589 * of when exactly it is running. 4590 */ 4591 if (!tick_nohz_tick_stopped_cpu(cpu)) 4592 goto out_requeue; 4593 4594 rq_lock_irq(rq, &rf); 4595 curr = rq->curr; 4596 if (cpu_is_offline(cpu)) 4597 goto out_unlock; 4598 4599 update_rq_clock(rq); 4600 4601 if (!is_idle_task(curr)) { 4602 /* 4603 * Make sure the next tick runs within a reasonable 4604 * amount of time. 4605 */ 4606 delta = rq_clock_task(rq) - curr->se.exec_start; 4607 WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3); 4608 } 4609 curr->sched_class->task_tick(rq, curr, 0); 4610 4611 calc_load_nohz_remote(rq); 4612 out_unlock: 4613 rq_unlock_irq(rq, &rf); 4614 out_requeue: 4615 4616 /* 4617 * Run the remote tick once per second (1Hz). This arbitrary 4618 * frequency is large enough to avoid overload but short enough 4619 * to keep scheduler internal stats reasonably up to date. But 4620 * first update state to reflect hotplug activity if required. 4621 */ 4622 os = atomic_fetch_add_unless(&twork->state, -1, TICK_SCHED_REMOTE_RUNNING); 4623 WARN_ON_ONCE(os == TICK_SCHED_REMOTE_OFFLINE); 4624 if (os == TICK_SCHED_REMOTE_RUNNING) 4625 queue_delayed_work(system_unbound_wq, dwork, HZ); 4626 } 4627 4628 static void sched_tick_start(int cpu) 4629 { 4630 int os; 4631 struct tick_work *twork; 4632 4633 if (housekeeping_cpu(cpu, HK_FLAG_TICK)) 4634 return; 4635 4636 WARN_ON_ONCE(!tick_work_cpu); 4637 4638 twork = per_cpu_ptr(tick_work_cpu, cpu); 4639 os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_RUNNING); 4640 WARN_ON_ONCE(os == TICK_SCHED_REMOTE_RUNNING); 4641 if (os == TICK_SCHED_REMOTE_OFFLINE) { 4642 twork->cpu = cpu; 4643 INIT_DELAYED_WORK(&twork->work, sched_tick_remote); 4644 queue_delayed_work(system_unbound_wq, &twork->work, HZ); 4645 } 4646 } 4647 4648 #ifdef CONFIG_HOTPLUG_CPU 4649 static void sched_tick_stop(int cpu) 4650 { 4651 struct tick_work *twork; 4652 int os; 4653 4654 if (housekeeping_cpu(cpu, HK_FLAG_TICK)) 4655 return; 4656 4657 WARN_ON_ONCE(!tick_work_cpu); 4658 4659 twork = per_cpu_ptr(tick_work_cpu, cpu); 4660 /* There cannot be competing actions, but don't rely on stop-machine. */ 4661 os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_OFFLINING); 4662 WARN_ON_ONCE(os != TICK_SCHED_REMOTE_RUNNING); 4663 /* Don't cancel, as this would mess up the state machine. */ 4664 } 4665 #endif /* CONFIG_HOTPLUG_CPU */ 4666 4667 int __init sched_tick_offload_init(void) 4668 { 4669 tick_work_cpu = alloc_percpu(struct tick_work); 4670 BUG_ON(!tick_work_cpu); 4671 return 0; 4672 } 4673 4674 #else /* !CONFIG_NO_HZ_FULL */ 4675 static inline void sched_tick_start(int cpu) { } 4676 static inline void sched_tick_stop(int cpu) { } 4677 #endif 4678 4679 #if defined(CONFIG_PREEMPTION) && (defined(CONFIG_DEBUG_PREEMPT) || \ 4680 defined(CONFIG_TRACE_PREEMPT_TOGGLE)) 4681 /* 4682 * If the value passed in is equal to the current preempt count 4683 * then we just disabled preemption. Start timing the latency. 4684 */ 4685 static inline void preempt_latency_start(int val) 4686 { 4687 if (preempt_count() == val) { 4688 unsigned long ip = get_lock_parent_ip(); 4689 #ifdef CONFIG_DEBUG_PREEMPT 4690 current->preempt_disable_ip = ip; 4691 #endif 4692 trace_preempt_off(CALLER_ADDR0, ip); 4693 } 4694 } 4695 4696 void preempt_count_add(int val) 4697 { 4698 #ifdef CONFIG_DEBUG_PREEMPT 4699 /* 4700 * Underflow? 4701 */ 4702 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0))) 4703 return; 4704 #endif 4705 __preempt_count_add(val); 4706 #ifdef CONFIG_DEBUG_PREEMPT 4707 /* 4708 * Spinlock count overflowing soon? 4709 */ 4710 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >= 4711 PREEMPT_MASK - 10); 4712 #endif 4713 preempt_latency_start(val); 4714 } 4715 EXPORT_SYMBOL(preempt_count_add); 4716 NOKPROBE_SYMBOL(preempt_count_add); 4717 4718 /* 4719 * If the value passed in equals to the current preempt count 4720 * then we just enabled preemption. Stop timing the latency. 4721 */ 4722 static inline void preempt_latency_stop(int val) 4723 { 4724 if (preempt_count() == val) 4725 trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip()); 4726 } 4727 4728 void preempt_count_sub(int val) 4729 { 4730 #ifdef CONFIG_DEBUG_PREEMPT 4731 /* 4732 * Underflow? 4733 */ 4734 if (DEBUG_LOCKS_WARN_ON(val > preempt_count())) 4735 return; 4736 /* 4737 * Is the spinlock portion underflowing? 4738 */ 4739 if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) && 4740 !(preempt_count() & PREEMPT_MASK))) 4741 return; 4742 #endif 4743 4744 preempt_latency_stop(val); 4745 __preempt_count_sub(val); 4746 } 4747 EXPORT_SYMBOL(preempt_count_sub); 4748 NOKPROBE_SYMBOL(preempt_count_sub); 4749 4750 #else 4751 static inline void preempt_latency_start(int val) { } 4752 static inline void preempt_latency_stop(int val) { } 4753 #endif 4754 4755 static inline unsigned long get_preempt_disable_ip(struct task_struct *p) 4756 { 4757 #ifdef CONFIG_DEBUG_PREEMPT 4758 return p->preempt_disable_ip; 4759 #else 4760 return 0; 4761 #endif 4762 } 4763 4764 /* 4765 * Print scheduling while atomic bug: 4766 */ 4767 static noinline void __schedule_bug(struct task_struct *prev) 4768 { 4769 /* Save this before calling printk(), since that will clobber it */ 4770 unsigned long preempt_disable_ip = get_preempt_disable_ip(current); 4771 4772 if (oops_in_progress) 4773 return; 4774 4775 printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n", 4776 prev->comm, prev->pid, preempt_count()); 4777 4778 debug_show_held_locks(prev); 4779 print_modules(); 4780 if (irqs_disabled()) 4781 print_irqtrace_events(prev); 4782 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT) 4783 && in_atomic_preempt_off()) { 4784 pr_err("Preemption disabled at:"); 4785 print_ip_sym(KERN_ERR, preempt_disable_ip); 4786 } 4787 if (panic_on_warn) 4788 panic("scheduling while atomic\n"); 4789 4790 dump_stack(); 4791 add_taint(TAINT_WARN, LOCKDEP_STILL_OK); 4792 } 4793 4794 /* 4795 * Various schedule()-time debugging checks and statistics: 4796 */ 4797 static inline void schedule_debug(struct task_struct *prev, bool preempt) 4798 { 4799 #ifdef CONFIG_SCHED_STACK_END_CHECK 4800 if (task_stack_end_corrupted(prev)) 4801 panic("corrupted stack end detected inside scheduler\n"); 4802 4803 if (task_scs_end_corrupted(prev)) 4804 panic("corrupted shadow stack detected inside scheduler\n"); 4805 #endif 4806 4807 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP 4808 if (!preempt && prev->state && prev->non_block_count) { 4809 printk(KERN_ERR "BUG: scheduling in a non-blocking section: %s/%d/%i\n", 4810 prev->comm, prev->pid, prev->non_block_count); 4811 dump_stack(); 4812 add_taint(TAINT_WARN, LOCKDEP_STILL_OK); 4813 } 4814 #endif 4815 4816 if (unlikely(in_atomic_preempt_off())) { 4817 __schedule_bug(prev); 4818 preempt_count_set(PREEMPT_DISABLED); 4819 } 4820 rcu_sleep_check(); 4821 SCHED_WARN_ON(ct_state() == CONTEXT_USER); 4822 4823 profile_hit(SCHED_PROFILING, __builtin_return_address(0)); 4824 4825 schedstat_inc(this_rq()->sched_count); 4826 } 4827 4828 static void put_prev_task_balance(struct rq *rq, struct task_struct *prev, 4829 struct rq_flags *rf) 4830 { 4831 #ifdef CONFIG_SMP 4832 const struct sched_class *class; 4833 /* 4834 * We must do the balancing pass before put_prev_task(), such 4835 * that when we release the rq->lock the task is in the same 4836 * state as before we took rq->lock. 4837 * 4838 * We can terminate the balance pass as soon as we know there is 4839 * a runnable task of @class priority or higher. 4840 */ 4841 for_class_range(class, prev->sched_class, &idle_sched_class) { 4842 if (class->balance(rq, prev, rf)) 4843 break; 4844 } 4845 #endif 4846 4847 put_prev_task(rq, prev); 4848 } 4849 4850 /* 4851 * Pick up the highest-prio task: 4852 */ 4853 static inline struct task_struct * 4854 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) 4855 { 4856 const struct sched_class *class; 4857 struct task_struct *p; 4858 4859 /* 4860 * Optimization: we know that if all tasks are in the fair class we can 4861 * call that function directly, but only if the @prev task wasn't of a 4862 * higher scheduling class, because otherwise those lose the 4863 * opportunity to pull in more work from other CPUs. 4864 */ 4865 if (likely(prev->sched_class <= &fair_sched_class && 4866 rq->nr_running == rq->cfs.h_nr_running)) { 4867 4868 p = pick_next_task_fair(rq, prev, rf); 4869 if (unlikely(p == RETRY_TASK)) 4870 goto restart; 4871 4872 /* Assumes fair_sched_class->next == idle_sched_class */ 4873 if (!p) { 4874 put_prev_task(rq, prev); 4875 p = pick_next_task_idle(rq); 4876 } 4877 4878 return p; 4879 } 4880 4881 restart: 4882 put_prev_task_balance(rq, prev, rf); 4883 4884 for_each_class(class) { 4885 p = class->pick_next_task(rq); 4886 if (p) 4887 return p; 4888 } 4889 4890 /* The idle class should always have a runnable task: */ 4891 BUG(); 4892 } 4893 4894 /* 4895 * __schedule() is the main scheduler function. 4896 * 4897 * The main means of driving the scheduler and thus entering this function are: 4898 * 4899 * 1. Explicit blocking: mutex, semaphore, waitqueue, etc. 4900 * 4901 * 2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return 4902 * paths. For example, see arch/x86/entry_64.S. 4903 * 4904 * To drive preemption between tasks, the scheduler sets the flag in timer 4905 * interrupt handler scheduler_tick(). 4906 * 4907 * 3. Wakeups don't really cause entry into schedule(). They add a 4908 * task to the run-queue and that's it. 4909 * 4910 * Now, if the new task added to the run-queue preempts the current 4911 * task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets 4912 * called on the nearest possible occasion: 4913 * 4914 * - If the kernel is preemptible (CONFIG_PREEMPTION=y): 4915 * 4916 * - in syscall or exception context, at the next outmost 4917 * preempt_enable(). (this might be as soon as the wake_up()'s 4918 * spin_unlock()!) 4919 * 4920 * - in IRQ context, return from interrupt-handler to 4921 * preemptible context 4922 * 4923 * - If the kernel is not preemptible (CONFIG_PREEMPTION is not set) 4924 * then at the next: 4925 * 4926 * - cond_resched() call 4927 * - explicit schedule() call 4928 * - return from syscall or exception to user-space 4929 * - return from interrupt-handler to user-space 4930 * 4931 * WARNING: must be called with preemption disabled! 4932 */ 4933 static void __sched notrace __schedule(bool preempt) 4934 { 4935 struct task_struct *prev, *next; 4936 unsigned long *switch_count; 4937 unsigned long prev_state; 4938 struct rq_flags rf; 4939 struct rq *rq; 4940 int cpu; 4941 4942 cpu = smp_processor_id(); 4943 rq = cpu_rq(cpu); 4944 prev = rq->curr; 4945 4946 schedule_debug(prev, preempt); 4947 4948 if (sched_feat(HRTICK) || sched_feat(HRTICK_DL)) 4949 hrtick_clear(rq); 4950 4951 local_irq_disable(); 4952 rcu_note_context_switch(preempt); 4953 4954 /* 4955 * Make sure that signal_pending_state()->signal_pending() below 4956 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE) 4957 * done by the caller to avoid the race with signal_wake_up(): 4958 * 4959 * __set_current_state(@state) signal_wake_up() 4960 * schedule() set_tsk_thread_flag(p, TIF_SIGPENDING) 4961 * wake_up_state(p, state) 4962 * LOCK rq->lock LOCK p->pi_state 4963 * smp_mb__after_spinlock() smp_mb__after_spinlock() 4964 * if (signal_pending_state()) if (p->state & @state) 4965 * 4966 * Also, the membarrier system call requires a full memory barrier 4967 * after coming from user-space, before storing to rq->curr. 4968 */ 4969 rq_lock(rq, &rf); 4970 smp_mb__after_spinlock(); 4971 4972 /* Promote REQ to ACT */ 4973 rq->clock_update_flags <<= 1; 4974 update_rq_clock(rq); 4975 4976 switch_count = &prev->nivcsw; 4977 4978 /* 4979 * We must load prev->state once (task_struct::state is volatile), such 4980 * that: 4981 * 4982 * - we form a control dependency vs deactivate_task() below. 4983 * - ptrace_{,un}freeze_traced() can change ->state underneath us. 4984 */ 4985 prev_state = prev->state; 4986 if (!preempt && prev_state) { 4987 if (signal_pending_state(prev_state, prev)) { 4988 prev->state = TASK_RUNNING; 4989 } else { 4990 prev->sched_contributes_to_load = 4991 (prev_state & TASK_UNINTERRUPTIBLE) && 4992 !(prev_state & TASK_NOLOAD) && 4993 !(prev->flags & PF_FROZEN); 4994 4995 if (prev->sched_contributes_to_load) 4996 rq->nr_uninterruptible++; 4997 4998 /* 4999 * __schedule() ttwu() 5000 * prev_state = prev->state; if (p->on_rq && ...) 5001 * if (prev_state) goto out; 5002 * p->on_rq = 0; smp_acquire__after_ctrl_dep(); 5003 * p->state = TASK_WAKING 5004 * 5005 * Where __schedule() and ttwu() have matching control dependencies. 5006 * 5007 * After this, schedule() must not care about p->state any more. 5008 */ 5009 deactivate_task(rq, prev, DEQUEUE_SLEEP | DEQUEUE_NOCLOCK); 5010 5011 if (prev->in_iowait) { 5012 atomic_inc(&rq->nr_iowait); 5013 delayacct_blkio_start(); 5014 } 5015 } 5016 switch_count = &prev->nvcsw; 5017 } 5018 5019 next = pick_next_task(rq, prev, &rf); 5020 clear_tsk_need_resched(prev); 5021 clear_preempt_need_resched(); 5022 5023 if (likely(prev != next)) { 5024 rq->nr_switches++; 5025 /* 5026 * RCU users of rcu_dereference(rq->curr) may not see 5027 * changes to task_struct made by pick_next_task(). 5028 */ 5029 RCU_INIT_POINTER(rq->curr, next); 5030 /* 5031 * The membarrier system call requires each architecture 5032 * to have a full memory barrier after updating 5033 * rq->curr, before returning to user-space. 5034 * 5035 * Here are the schemes providing that barrier on the 5036 * various architectures: 5037 * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC. 5038 * switch_mm() rely on membarrier_arch_switch_mm() on PowerPC. 5039 * - finish_lock_switch() for weakly-ordered 5040 * architectures where spin_unlock is a full barrier, 5041 * - switch_to() for arm64 (weakly-ordered, spin_unlock 5042 * is a RELEASE barrier), 5043 */ 5044 ++*switch_count; 5045 5046 migrate_disable_switch(rq, prev); 5047 psi_sched_switch(prev, next, !task_on_rq_queued(prev)); 5048 5049 trace_sched_switch(preempt, prev, next); 5050 5051 /* Also unlocks the rq: */ 5052 rq = context_switch(rq, prev, next, &rf); 5053 } else { 5054 rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP); 5055 5056 rq_unpin_lock(rq, &rf); 5057 __balance_callbacks(rq); 5058 raw_spin_unlock_irq(&rq->lock); 5059 } 5060 } 5061 5062 void __noreturn do_task_dead(void) 5063 { 5064 /* Causes final put_task_struct in finish_task_switch(): */ 5065 set_special_state(TASK_DEAD); 5066 5067 /* Tell freezer to ignore us: */ 5068 current->flags |= PF_NOFREEZE; 5069 5070 __schedule(false); 5071 BUG(); 5072 5073 /* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */ 5074 for (;;) 5075 cpu_relax(); 5076 } 5077 5078 static inline void sched_submit_work(struct task_struct *tsk) 5079 { 5080 unsigned int task_flags; 5081 5082 if (!tsk->state) 5083 return; 5084 5085 task_flags = tsk->flags; 5086 /* 5087 * If a worker went to sleep, notify and ask workqueue whether 5088 * it wants to wake up a task to maintain concurrency. 5089 * As this function is called inside the schedule() context, 5090 * we disable preemption to avoid it calling schedule() again 5091 * in the possible wakeup of a kworker and because wq_worker_sleeping() 5092 * requires it. 5093 */ 5094 if (task_flags & (PF_WQ_WORKER | PF_IO_WORKER)) { 5095 preempt_disable(); 5096 if (task_flags & PF_WQ_WORKER) 5097 wq_worker_sleeping(tsk); 5098 else 5099 io_wq_worker_sleeping(tsk); 5100 preempt_enable_no_resched(); 5101 } 5102 5103 if (tsk_is_pi_blocked(tsk)) 5104 return; 5105 5106 /* 5107 * If we are going to sleep and we have plugged IO queued, 5108 * make sure to submit it to avoid deadlocks. 5109 */ 5110 if (blk_needs_flush_plug(tsk)) 5111 blk_schedule_flush_plug(tsk); 5112 } 5113 5114 static void sched_update_worker(struct task_struct *tsk) 5115 { 5116 if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER)) { 5117 if (tsk->flags & PF_WQ_WORKER) 5118 wq_worker_running(tsk); 5119 else 5120 io_wq_worker_running(tsk); 5121 } 5122 } 5123 5124 asmlinkage __visible void __sched schedule(void) 5125 { 5126 struct task_struct *tsk = current; 5127 5128 sched_submit_work(tsk); 5129 do { 5130 preempt_disable(); 5131 __schedule(false); 5132 sched_preempt_enable_no_resched(); 5133 } while (need_resched()); 5134 sched_update_worker(tsk); 5135 } 5136 EXPORT_SYMBOL(schedule); 5137 5138 /* 5139 * synchronize_rcu_tasks() makes sure that no task is stuck in preempted 5140 * state (have scheduled out non-voluntarily) by making sure that all 5141 * tasks have either left the run queue or have gone into user space. 5142 * As idle tasks do not do either, they must not ever be preempted 5143 * (schedule out non-voluntarily). 5144 * 5145 * schedule_idle() is similar to schedule_preempt_disable() except that it 5146 * never enables preemption because it does not call sched_submit_work(). 5147 */ 5148 void __sched schedule_idle(void) 5149 { 5150 /* 5151 * As this skips calling sched_submit_work(), which the idle task does 5152 * regardless because that function is a nop when the task is in a 5153 * TASK_RUNNING state, make sure this isn't used someplace that the 5154 * current task can be in any other state. Note, idle is always in the 5155 * TASK_RUNNING state. 5156 */ 5157 WARN_ON_ONCE(current->state); 5158 do { 5159 __schedule(false); 5160 } while (need_resched()); 5161 } 5162 5163 #if defined(CONFIG_CONTEXT_TRACKING) && !defined(CONFIG_HAVE_CONTEXT_TRACKING_OFFSTACK) 5164 asmlinkage __visible void __sched schedule_user(void) 5165 { 5166 /* 5167 * If we come here after a random call to set_need_resched(), 5168 * or we have been woken up remotely but the IPI has not yet arrived, 5169 * we haven't yet exited the RCU idle mode. Do it here manually until 5170 * we find a better solution. 5171 * 5172 * NB: There are buggy callers of this function. Ideally we 5173 * should warn if prev_state != CONTEXT_USER, but that will trigger 5174 * too frequently to make sense yet. 5175 */ 5176 enum ctx_state prev_state = exception_enter(); 5177 schedule(); 5178 exception_exit(prev_state); 5179 } 5180 #endif 5181 5182 /** 5183 * schedule_preempt_disabled - called with preemption disabled 5184 * 5185 * Returns with preemption disabled. Note: preempt_count must be 1 5186 */ 5187 void __sched schedule_preempt_disabled(void) 5188 { 5189 sched_preempt_enable_no_resched(); 5190 schedule(); 5191 preempt_disable(); 5192 } 5193 5194 static void __sched notrace preempt_schedule_common(void) 5195 { 5196 do { 5197 /* 5198 * Because the function tracer can trace preempt_count_sub() 5199 * and it also uses preempt_enable/disable_notrace(), if 5200 * NEED_RESCHED is set, the preempt_enable_notrace() called 5201 * by the function tracer will call this function again and 5202 * cause infinite recursion. 5203 * 5204 * Preemption must be disabled here before the function 5205 * tracer can trace. Break up preempt_disable() into two 5206 * calls. One to disable preemption without fear of being 5207 * traced. The other to still record the preemption latency, 5208 * which can also be traced by the function tracer. 5209 */ 5210 preempt_disable_notrace(); 5211 preempt_latency_start(1); 5212 __schedule(true); 5213 preempt_latency_stop(1); 5214 preempt_enable_no_resched_notrace(); 5215 5216 /* 5217 * Check again in case we missed a preemption opportunity 5218 * between schedule and now. 5219 */ 5220 } while (need_resched()); 5221 } 5222 5223 #ifdef CONFIG_PREEMPTION 5224 /* 5225 * This is the entry point to schedule() from in-kernel preemption 5226 * off of preempt_enable. 5227 */ 5228 asmlinkage __visible void __sched notrace preempt_schedule(void) 5229 { 5230 /* 5231 * If there is a non-zero preempt_count or interrupts are disabled, 5232 * we do not want to preempt the current task. Just return.. 5233 */ 5234 if (likely(!preemptible())) 5235 return; 5236 5237 preempt_schedule_common(); 5238 } 5239 NOKPROBE_SYMBOL(preempt_schedule); 5240 EXPORT_SYMBOL(preempt_schedule); 5241 5242 #ifdef CONFIG_PREEMPT_DYNAMIC 5243 DEFINE_STATIC_CALL(preempt_schedule, __preempt_schedule_func); 5244 EXPORT_STATIC_CALL_TRAMP(preempt_schedule); 5245 #endif 5246 5247 5248 /** 5249 * preempt_schedule_notrace - preempt_schedule called by tracing 5250 * 5251 * The tracing infrastructure uses preempt_enable_notrace to prevent 5252 * recursion and tracing preempt enabling caused by the tracing 5253 * infrastructure itself. But as tracing can happen in areas coming 5254 * from userspace or just about to enter userspace, a preempt enable 5255 * can occur before user_exit() is called. This will cause the scheduler 5256 * to be called when the system is still in usermode. 5257 * 5258 * To prevent this, the preempt_enable_notrace will use this function 5259 * instead of preempt_schedule() to exit user context if needed before 5260 * calling the scheduler. 5261 */ 5262 asmlinkage __visible void __sched notrace preempt_schedule_notrace(void) 5263 { 5264 enum ctx_state prev_ctx; 5265 5266 if (likely(!preemptible())) 5267 return; 5268 5269 do { 5270 /* 5271 * Because the function tracer can trace preempt_count_sub() 5272 * and it also uses preempt_enable/disable_notrace(), if 5273 * NEED_RESCHED is set, the preempt_enable_notrace() called 5274 * by the function tracer will call this function again and 5275 * cause infinite recursion. 5276 * 5277 * Preemption must be disabled here before the function 5278 * tracer can trace. Break up preempt_disable() into two 5279 * calls. One to disable preemption without fear of being 5280 * traced. The other to still record the preemption latency, 5281 * which can also be traced by the function tracer. 5282 */ 5283 preempt_disable_notrace(); 5284 preempt_latency_start(1); 5285 /* 5286 * Needs preempt disabled in case user_exit() is traced 5287 * and the tracer calls preempt_enable_notrace() causing 5288 * an infinite recursion. 5289 */ 5290 prev_ctx = exception_enter(); 5291 __schedule(true); 5292 exception_exit(prev_ctx); 5293 5294 preempt_latency_stop(1); 5295 preempt_enable_no_resched_notrace(); 5296 } while (need_resched()); 5297 } 5298 EXPORT_SYMBOL_GPL(preempt_schedule_notrace); 5299 5300 #ifdef CONFIG_PREEMPT_DYNAMIC 5301 DEFINE_STATIC_CALL(preempt_schedule_notrace, __preempt_schedule_notrace_func); 5302 EXPORT_STATIC_CALL_TRAMP(preempt_schedule_notrace); 5303 #endif 5304 5305 #endif /* CONFIG_PREEMPTION */ 5306 5307 #ifdef CONFIG_PREEMPT_DYNAMIC 5308 5309 #include <linux/entry-common.h> 5310 5311 /* 5312 * SC:cond_resched 5313 * SC:might_resched 5314 * SC:preempt_schedule 5315 * SC:preempt_schedule_notrace 5316 * SC:irqentry_exit_cond_resched 5317 * 5318 * 5319 * NONE: 5320 * cond_resched <- __cond_resched 5321 * might_resched <- RET0 5322 * preempt_schedule <- NOP 5323 * preempt_schedule_notrace <- NOP 5324 * irqentry_exit_cond_resched <- NOP 5325 * 5326 * VOLUNTARY: 5327 * cond_resched <- __cond_resched 5328 * might_resched <- __cond_resched 5329 * preempt_schedule <- NOP 5330 * preempt_schedule_notrace <- NOP 5331 * irqentry_exit_cond_resched <- NOP 5332 * 5333 * FULL: 5334 * cond_resched <- RET0 5335 * might_resched <- RET0 5336 * preempt_schedule <- preempt_schedule 5337 * preempt_schedule_notrace <- preempt_schedule_notrace 5338 * irqentry_exit_cond_resched <- irqentry_exit_cond_resched 5339 */ 5340 5341 enum { 5342 preempt_dynamic_none = 0, 5343 preempt_dynamic_voluntary, 5344 preempt_dynamic_full, 5345 }; 5346 5347 static int preempt_dynamic_mode = preempt_dynamic_full; 5348 5349 static int sched_dynamic_mode(const char *str) 5350 { 5351 if (!strcmp(str, "none")) 5352 return 0; 5353 5354 if (!strcmp(str, "voluntary")) 5355 return 1; 5356 5357 if (!strcmp(str, "full")) 5358 return 2; 5359 5360 return -1; 5361 } 5362 5363 static void sched_dynamic_update(int mode) 5364 { 5365 /* 5366 * Avoid {NONE,VOLUNTARY} -> FULL transitions from ever ending up in 5367 * the ZERO state, which is invalid. 5368 */ 5369 static_call_update(cond_resched, __cond_resched); 5370 static_call_update(might_resched, __cond_resched); 5371 static_call_update(preempt_schedule, __preempt_schedule_func); 5372 static_call_update(preempt_schedule_notrace, __preempt_schedule_notrace_func); 5373 static_call_update(irqentry_exit_cond_resched, irqentry_exit_cond_resched); 5374 5375 switch (mode) { 5376 case preempt_dynamic_none: 5377 static_call_update(cond_resched, __cond_resched); 5378 static_call_update(might_resched, (typeof(&__cond_resched)) __static_call_return0); 5379 static_call_update(preempt_schedule, (typeof(&preempt_schedule)) NULL); 5380 static_call_update(preempt_schedule_notrace, (typeof(&preempt_schedule_notrace)) NULL); 5381 static_call_update(irqentry_exit_cond_resched, (typeof(&irqentry_exit_cond_resched)) NULL); 5382 pr_info("Dynamic Preempt: none\n"); 5383 break; 5384 5385 case preempt_dynamic_voluntary: 5386 static_call_update(cond_resched, __cond_resched); 5387 static_call_update(might_resched, __cond_resched); 5388 static_call_update(preempt_schedule, (typeof(&preempt_schedule)) NULL); 5389 static_call_update(preempt_schedule_notrace, (typeof(&preempt_schedule_notrace)) NULL); 5390 static_call_update(irqentry_exit_cond_resched, (typeof(&irqentry_exit_cond_resched)) NULL); 5391 pr_info("Dynamic Preempt: voluntary\n"); 5392 break; 5393 5394 case preempt_dynamic_full: 5395 static_call_update(cond_resched, (typeof(&__cond_resched)) __static_call_return0); 5396 static_call_update(might_resched, (typeof(&__cond_resched)) __static_call_return0); 5397 static_call_update(preempt_schedule, __preempt_schedule_func); 5398 static_call_update(preempt_schedule_notrace, __preempt_schedule_notrace_func); 5399 static_call_update(irqentry_exit_cond_resched, irqentry_exit_cond_resched); 5400 pr_info("Dynamic Preempt: full\n"); 5401 break; 5402 } 5403 5404 preempt_dynamic_mode = mode; 5405 } 5406 5407 static int __init setup_preempt_mode(char *str) 5408 { 5409 int mode = sched_dynamic_mode(str); 5410 if (mode < 0) { 5411 pr_warn("Dynamic Preempt: unsupported mode: %s\n", str); 5412 return 1; 5413 } 5414 5415 sched_dynamic_update(mode); 5416 return 0; 5417 } 5418 __setup("preempt=", setup_preempt_mode); 5419 5420 #ifdef CONFIG_SCHED_DEBUG 5421 5422 static ssize_t sched_dynamic_write(struct file *filp, const char __user *ubuf, 5423 size_t cnt, loff_t *ppos) 5424 { 5425 char buf[16]; 5426 int mode; 5427 5428 if (cnt > 15) 5429 cnt = 15; 5430 5431 if (copy_from_user(&buf, ubuf, cnt)) 5432 return -EFAULT; 5433 5434 buf[cnt] = 0; 5435 mode = sched_dynamic_mode(strstrip(buf)); 5436 if (mode < 0) 5437 return mode; 5438 5439 sched_dynamic_update(mode); 5440 5441 *ppos += cnt; 5442 5443 return cnt; 5444 } 5445 5446 static int sched_dynamic_show(struct seq_file *m, void *v) 5447 { 5448 static const char * preempt_modes[] = { 5449 "none", "voluntary", "full" 5450 }; 5451 int i; 5452 5453 for (i = 0; i < ARRAY_SIZE(preempt_modes); i++) { 5454 if (preempt_dynamic_mode == i) 5455 seq_puts(m, "("); 5456 seq_puts(m, preempt_modes[i]); 5457 if (preempt_dynamic_mode == i) 5458 seq_puts(m, ")"); 5459 5460 seq_puts(m, " "); 5461 } 5462 5463 seq_puts(m, "\n"); 5464 return 0; 5465 } 5466 5467 static int sched_dynamic_open(struct inode *inode, struct file *filp) 5468 { 5469 return single_open(filp, sched_dynamic_show, NULL); 5470 } 5471 5472 static const struct file_operations sched_dynamic_fops = { 5473 .open = sched_dynamic_open, 5474 .write = sched_dynamic_write, 5475 .read = seq_read, 5476 .llseek = seq_lseek, 5477 .release = single_release, 5478 }; 5479 5480 static __init int sched_init_debug_dynamic(void) 5481 { 5482 debugfs_create_file("sched_preempt", 0644, NULL, NULL, &sched_dynamic_fops); 5483 return 0; 5484 } 5485 late_initcall(sched_init_debug_dynamic); 5486 5487 #endif /* CONFIG_SCHED_DEBUG */ 5488 #endif /* CONFIG_PREEMPT_DYNAMIC */ 5489 5490 5491 /* 5492 * This is the entry point to schedule() from kernel preemption 5493 * off of irq context. 5494 * Note, that this is called and return with irqs disabled. This will 5495 * protect us against recursive calling from irq. 5496 */ 5497 asmlinkage __visible void __sched preempt_schedule_irq(void) 5498 { 5499 enum ctx_state prev_state; 5500 5501 /* Catch callers which need to be fixed */ 5502 BUG_ON(preempt_count() || !irqs_disabled()); 5503 5504 prev_state = exception_enter(); 5505 5506 do { 5507 preempt_disable(); 5508 local_irq_enable(); 5509 __schedule(true); 5510 local_irq_disable(); 5511 sched_preempt_enable_no_resched(); 5512 } while (need_resched()); 5513 5514 exception_exit(prev_state); 5515 } 5516 5517 int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags, 5518 void *key) 5519 { 5520 WARN_ON_ONCE(IS_ENABLED(CONFIG_SCHED_DEBUG) && wake_flags & ~WF_SYNC); 5521 return try_to_wake_up(curr->private, mode, wake_flags); 5522 } 5523 EXPORT_SYMBOL(default_wake_function); 5524 5525 #ifdef CONFIG_RT_MUTEXES 5526 5527 static inline int __rt_effective_prio(struct task_struct *pi_task, int prio) 5528 { 5529 if (pi_task) 5530 prio = min(prio, pi_task->prio); 5531 5532 return prio; 5533 } 5534 5535 static inline int rt_effective_prio(struct task_struct *p, int prio) 5536 { 5537 struct task_struct *pi_task = rt_mutex_get_top_task(p); 5538 5539 return __rt_effective_prio(pi_task, prio); 5540 } 5541 5542 /* 5543 * rt_mutex_setprio - set the current priority of a task 5544 * @p: task to boost 5545 * @pi_task: donor task 5546 * 5547 * This function changes the 'effective' priority of a task. It does 5548 * not touch ->normal_prio like __setscheduler(). 5549 * 5550 * Used by the rt_mutex code to implement priority inheritance 5551 * logic. Call site only calls if the priority of the task changed. 5552 */ 5553 void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task) 5554 { 5555 int prio, oldprio, queued, running, queue_flag = 5556 DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; 5557 const struct sched_class *prev_class; 5558 struct rq_flags rf; 5559 struct rq *rq; 5560 5561 /* XXX used to be waiter->prio, not waiter->task->prio */ 5562 prio = __rt_effective_prio(pi_task, p->normal_prio); 5563 5564 /* 5565 * If nothing changed; bail early. 5566 */ 5567 if (p->pi_top_task == pi_task && prio == p->prio && !dl_prio(prio)) 5568 return; 5569 5570 rq = __task_rq_lock(p, &rf); 5571 update_rq_clock(rq); 5572 /* 5573 * Set under pi_lock && rq->lock, such that the value can be used under 5574 * either lock. 5575 * 5576 * Note that there is loads of tricky to make this pointer cache work 5577 * right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to 5578 * ensure a task is de-boosted (pi_task is set to NULL) before the 5579 * task is allowed to run again (and can exit). This ensures the pointer 5580 * points to a blocked task -- which guarantees the task is present. 5581 */ 5582 p->pi_top_task = pi_task; 5583 5584 /* 5585 * For FIFO/RR we only need to set prio, if that matches we're done. 5586 */ 5587 if (prio == p->prio && !dl_prio(prio)) 5588 goto out_unlock; 5589 5590 /* 5591 * Idle task boosting is a nono in general. There is one 5592 * exception, when PREEMPT_RT and NOHZ is active: 5593 * 5594 * The idle task calls get_next_timer_interrupt() and holds 5595 * the timer wheel base->lock on the CPU and another CPU wants 5596 * to access the timer (probably to cancel it). We can safely 5597 * ignore the boosting request, as the idle CPU runs this code 5598 * with interrupts disabled and will complete the lock 5599 * protected section without being interrupted. So there is no 5600 * real need to boost. 5601 */ 5602 if (unlikely(p == rq->idle)) { 5603 WARN_ON(p != rq->curr); 5604 WARN_ON(p->pi_blocked_on); 5605 goto out_unlock; 5606 } 5607 5608 trace_sched_pi_setprio(p, pi_task); 5609 oldprio = p->prio; 5610 5611 if (oldprio == prio) 5612 queue_flag &= ~DEQUEUE_MOVE; 5613 5614 prev_class = p->sched_class; 5615 queued = task_on_rq_queued(p); 5616 running = task_current(rq, p); 5617 if (queued) 5618 dequeue_task(rq, p, queue_flag); 5619 if (running) 5620 put_prev_task(rq, p); 5621 5622 /* 5623 * Boosting condition are: 5624 * 1. -rt task is running and holds mutex A 5625 * --> -dl task blocks on mutex A 5626 * 5627 * 2. -dl task is running and holds mutex A 5628 * --> -dl task blocks on mutex A and could preempt the 5629 * running task 5630 */ 5631 if (dl_prio(prio)) { 5632 if (!dl_prio(p->normal_prio) || 5633 (pi_task && dl_prio(pi_task->prio) && 5634 dl_entity_preempt(&pi_task->dl, &p->dl))) { 5635 p->dl.pi_se = pi_task->dl.pi_se; 5636 queue_flag |= ENQUEUE_REPLENISH; 5637 } else { 5638 p->dl.pi_se = &p->dl; 5639 } 5640 p->sched_class = &dl_sched_class; 5641 } else if (rt_prio(prio)) { 5642 if (dl_prio(oldprio)) 5643 p->dl.pi_se = &p->dl; 5644 if (oldprio < prio) 5645 queue_flag |= ENQUEUE_HEAD; 5646 p->sched_class = &rt_sched_class; 5647 } else { 5648 if (dl_prio(oldprio)) 5649 p->dl.pi_se = &p->dl; 5650 if (rt_prio(oldprio)) 5651 p->rt.timeout = 0; 5652 p->sched_class = &fair_sched_class; 5653 } 5654 5655 p->prio = prio; 5656 5657 if (queued) 5658 enqueue_task(rq, p, queue_flag); 5659 if (running) 5660 set_next_task(rq, p); 5661 5662 check_class_changed(rq, p, prev_class, oldprio); 5663 out_unlock: 5664 /* Avoid rq from going away on us: */ 5665 preempt_disable(); 5666 5667 rq_unpin_lock(rq, &rf); 5668 __balance_callbacks(rq); 5669 raw_spin_unlock(&rq->lock); 5670 5671 preempt_enable(); 5672 } 5673 #else 5674 static inline int rt_effective_prio(struct task_struct *p, int prio) 5675 { 5676 return prio; 5677 } 5678 #endif 5679 5680 void set_user_nice(struct task_struct *p, long nice) 5681 { 5682 bool queued, running; 5683 int old_prio; 5684 struct rq_flags rf; 5685 struct rq *rq; 5686 5687 if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE) 5688 return; 5689 /* 5690 * We have to be careful, if called from sys_setpriority(), 5691 * the task might be in the middle of scheduling on another CPU. 5692 */ 5693 rq = task_rq_lock(p, &rf); 5694 update_rq_clock(rq); 5695 5696 /* 5697 * The RT priorities are set via sched_setscheduler(), but we still 5698 * allow the 'normal' nice value to be set - but as expected 5699 * it won't have any effect on scheduling until the task is 5700 * SCHED_DEADLINE, SCHED_FIFO or SCHED_RR: 5701 */ 5702 if (task_has_dl_policy(p) || task_has_rt_policy(p)) { 5703 p->static_prio = NICE_TO_PRIO(nice); 5704 goto out_unlock; 5705 } 5706 queued = task_on_rq_queued(p); 5707 running = task_current(rq, p); 5708 if (queued) 5709 dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK); 5710 if (running) 5711 put_prev_task(rq, p); 5712 5713 p->static_prio = NICE_TO_PRIO(nice); 5714 set_load_weight(p, true); 5715 old_prio = p->prio; 5716 p->prio = effective_prio(p); 5717 5718 if (queued) 5719 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); 5720 if (running) 5721 set_next_task(rq, p); 5722 5723 /* 5724 * If the task increased its priority or is running and 5725 * lowered its priority, then reschedule its CPU: 5726 */ 5727 p->sched_class->prio_changed(rq, p, old_prio); 5728 5729 out_unlock: 5730 task_rq_unlock(rq, p, &rf); 5731 } 5732 EXPORT_SYMBOL(set_user_nice); 5733 5734 /* 5735 * can_nice - check if a task can reduce its nice value 5736 * @p: task 5737 * @nice: nice value 5738 */ 5739 int can_nice(const struct task_struct *p, const int nice) 5740 { 5741 /* Convert nice value [19,-20] to rlimit style value [1,40]: */ 5742 int nice_rlim = nice_to_rlimit(nice); 5743 5744 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) || 5745 capable(CAP_SYS_NICE)); 5746 } 5747 5748 #ifdef __ARCH_WANT_SYS_NICE 5749 5750 /* 5751 * sys_nice - change the priority of the current process. 5752 * @increment: priority increment 5753 * 5754 * sys_setpriority is a more generic, but much slower function that 5755 * does similar things. 5756 */ 5757 SYSCALL_DEFINE1(nice, int, increment) 5758 { 5759 long nice, retval; 5760 5761 /* 5762 * Setpriority might change our priority at the same moment. 5763 * We don't have to worry. Conceptually one call occurs first 5764 * and we have a single winner. 5765 */ 5766 increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH); 5767 nice = task_nice(current) + increment; 5768 5769 nice = clamp_val(nice, MIN_NICE, MAX_NICE); 5770 if (increment < 0 && !can_nice(current, nice)) 5771 return -EPERM; 5772 5773 retval = security_task_setnice(current, nice); 5774 if (retval) 5775 return retval; 5776 5777 set_user_nice(current, nice); 5778 return 0; 5779 } 5780 5781 #endif 5782 5783 /** 5784 * task_prio - return the priority value of a given task. 5785 * @p: the task in question. 5786 * 5787 * Return: The priority value as seen by users in /proc. 5788 * 5789 * sched policy return value kernel prio user prio/nice 5790 * 5791 * normal, batch, idle [0 ... 39] [100 ... 139] 0/[-20 ... 19] 5792 * fifo, rr [-2 ... -100] [98 ... 0] [1 ... 99] 5793 * deadline -101 -1 0 5794 */ 5795 int task_prio(const struct task_struct *p) 5796 { 5797 return p->prio - MAX_RT_PRIO; 5798 } 5799 5800 /** 5801 * idle_cpu - is a given CPU idle currently? 5802 * @cpu: the processor in question. 5803 * 5804 * Return: 1 if the CPU is currently idle. 0 otherwise. 5805 */ 5806 int idle_cpu(int cpu) 5807 { 5808 struct rq *rq = cpu_rq(cpu); 5809 5810 if (rq->curr != rq->idle) 5811 return 0; 5812 5813 if (rq->nr_running) 5814 return 0; 5815 5816 #ifdef CONFIG_SMP 5817 if (rq->ttwu_pending) 5818 return 0; 5819 #endif 5820 5821 return 1; 5822 } 5823 5824 /** 5825 * available_idle_cpu - is a given CPU idle for enqueuing work. 5826 * @cpu: the CPU in question. 5827 * 5828 * Return: 1 if the CPU is currently idle. 0 otherwise. 5829 */ 5830 int available_idle_cpu(int cpu) 5831 { 5832 if (!idle_cpu(cpu)) 5833 return 0; 5834 5835 if (vcpu_is_preempted(cpu)) 5836 return 0; 5837 5838 return 1; 5839 } 5840 5841 /** 5842 * idle_task - return the idle task for a given CPU. 5843 * @cpu: the processor in question. 5844 * 5845 * Return: The idle task for the CPU @cpu. 5846 */ 5847 struct task_struct *idle_task(int cpu) 5848 { 5849 return cpu_rq(cpu)->idle; 5850 } 5851 5852 #ifdef CONFIG_SMP 5853 /* 5854 * This function computes an effective utilization for the given CPU, to be 5855 * used for frequency selection given the linear relation: f = u * f_max. 5856 * 5857 * The scheduler tracks the following metrics: 5858 * 5859 * cpu_util_{cfs,rt,dl,irq}() 5860 * cpu_bw_dl() 5861 * 5862 * Where the cfs,rt and dl util numbers are tracked with the same metric and 5863 * synchronized windows and are thus directly comparable. 5864 * 5865 * The cfs,rt,dl utilization are the running times measured with rq->clock_task 5866 * which excludes things like IRQ and steal-time. These latter are then accrued 5867 * in the irq utilization. 5868 * 5869 * The DL bandwidth number otoh is not a measured metric but a value computed 5870 * based on the task model parameters and gives the minimal utilization 5871 * required to meet deadlines. 5872 */ 5873 unsigned long effective_cpu_util(int cpu, unsigned long util_cfs, 5874 unsigned long max, enum cpu_util_type type, 5875 struct task_struct *p) 5876 { 5877 unsigned long dl_util, util, irq; 5878 struct rq *rq = cpu_rq(cpu); 5879 5880 if (!uclamp_is_used() && 5881 type == FREQUENCY_UTIL && rt_rq_is_runnable(&rq->rt)) { 5882 return max; 5883 } 5884 5885 /* 5886 * Early check to see if IRQ/steal time saturates the CPU, can be 5887 * because of inaccuracies in how we track these -- see 5888 * update_irq_load_avg(). 5889 */ 5890 irq = cpu_util_irq(rq); 5891 if (unlikely(irq >= max)) 5892 return max; 5893 5894 /* 5895 * Because the time spend on RT/DL tasks is visible as 'lost' time to 5896 * CFS tasks and we use the same metric to track the effective 5897 * utilization (PELT windows are synchronized) we can directly add them 5898 * to obtain the CPU's actual utilization. 5899 * 5900 * CFS and RT utilization can be boosted or capped, depending on 5901 * utilization clamp constraints requested by currently RUNNABLE 5902 * tasks. 5903 * When there are no CFS RUNNABLE tasks, clamps are released and 5904 * frequency will be gracefully reduced with the utilization decay. 5905 */ 5906 util = util_cfs + cpu_util_rt(rq); 5907 if (type == FREQUENCY_UTIL) 5908 util = uclamp_rq_util_with(rq, util, p); 5909 5910 dl_util = cpu_util_dl(rq); 5911 5912 /* 5913 * For frequency selection we do not make cpu_util_dl() a permanent part 5914 * of this sum because we want to use cpu_bw_dl() later on, but we need 5915 * to check if the CFS+RT+DL sum is saturated (ie. no idle time) such 5916 * that we select f_max when there is no idle time. 5917 * 5918 * NOTE: numerical errors or stop class might cause us to not quite hit 5919 * saturation when we should -- something for later. 5920 */ 5921 if (util + dl_util >= max) 5922 return max; 5923 5924 /* 5925 * OTOH, for energy computation we need the estimated running time, so 5926 * include util_dl and ignore dl_bw. 5927 */ 5928 if (type == ENERGY_UTIL) 5929 util += dl_util; 5930 5931 /* 5932 * There is still idle time; further improve the number by using the 5933 * irq metric. Because IRQ/steal time is hidden from the task clock we 5934 * need to scale the task numbers: 5935 * 5936 * max - irq 5937 * U' = irq + --------- * U 5938 * max 5939 */ 5940 util = scale_irq_capacity(util, irq, max); 5941 util += irq; 5942 5943 /* 5944 * Bandwidth required by DEADLINE must always be granted while, for 5945 * FAIR and RT, we use blocked utilization of IDLE CPUs as a mechanism 5946 * to gracefully reduce the frequency when no tasks show up for longer 5947 * periods of time. 5948 * 5949 * Ideally we would like to set bw_dl as min/guaranteed freq and util + 5950 * bw_dl as requested freq. However, cpufreq is not yet ready for such 5951 * an interface. So, we only do the latter for now. 5952 */ 5953 if (type == FREQUENCY_UTIL) 5954 util += cpu_bw_dl(rq); 5955 5956 return min(max, util); 5957 } 5958 5959 unsigned long sched_cpu_util(int cpu, unsigned long max) 5960 { 5961 return effective_cpu_util(cpu, cpu_util_cfs(cpu_rq(cpu)), max, 5962 ENERGY_UTIL, NULL); 5963 } 5964 #endif /* CONFIG_SMP */ 5965 5966 /** 5967 * find_process_by_pid - find a process with a matching PID value. 5968 * @pid: the pid in question. 5969 * 5970 * The task of @pid, if found. %NULL otherwise. 5971 */ 5972 static struct task_struct *find_process_by_pid(pid_t pid) 5973 { 5974 return pid ? find_task_by_vpid(pid) : current; 5975 } 5976 5977 /* 5978 * sched_setparam() passes in -1 for its policy, to let the functions 5979 * it calls know not to change it. 5980 */ 5981 #define SETPARAM_POLICY -1 5982 5983 static void __setscheduler_params(struct task_struct *p, 5984 const struct sched_attr *attr) 5985 { 5986 int policy = attr->sched_policy; 5987 5988 if (policy == SETPARAM_POLICY) 5989 policy = p->policy; 5990 5991 p->policy = policy; 5992 5993 if (dl_policy(policy)) 5994 __setparam_dl(p, attr); 5995 else if (fair_policy(policy)) 5996 p->static_prio = NICE_TO_PRIO(attr->sched_nice); 5997 5998 /* 5999 * __sched_setscheduler() ensures attr->sched_priority == 0 when 6000 * !rt_policy. Always setting this ensures that things like 6001 * getparam()/getattr() don't report silly values for !rt tasks. 6002 */ 6003 p->rt_priority = attr->sched_priority; 6004 p->normal_prio = normal_prio(p); 6005 set_load_weight(p, true); 6006 } 6007 6008 /* Actually do priority change: must hold pi & rq lock. */ 6009 static void __setscheduler(struct rq *rq, struct task_struct *p, 6010 const struct sched_attr *attr, bool keep_boost) 6011 { 6012 /* 6013 * If params can't change scheduling class changes aren't allowed 6014 * either. 6015 */ 6016 if (attr->sched_flags & SCHED_FLAG_KEEP_PARAMS) 6017 return; 6018 6019 __setscheduler_params(p, attr); 6020 6021 /* 6022 * Keep a potential priority boosting if called from 6023 * sched_setscheduler(). 6024 */ 6025 p->prio = normal_prio(p); 6026 if (keep_boost) 6027 p->prio = rt_effective_prio(p, p->prio); 6028 6029 if (dl_prio(p->prio)) 6030 p->sched_class = &dl_sched_class; 6031 else if (rt_prio(p->prio)) 6032 p->sched_class = &rt_sched_class; 6033 else 6034 p->sched_class = &fair_sched_class; 6035 } 6036 6037 /* 6038 * Check the target process has a UID that matches the current process's: 6039 */ 6040 static bool check_same_owner(struct task_struct *p) 6041 { 6042 const struct cred *cred = current_cred(), *pcred; 6043 bool match; 6044 6045 rcu_read_lock(); 6046 pcred = __task_cred(p); 6047 match = (uid_eq(cred->euid, pcred->euid) || 6048 uid_eq(cred->euid, pcred->uid)); 6049 rcu_read_unlock(); 6050 return match; 6051 } 6052 6053 static int __sched_setscheduler(struct task_struct *p, 6054 const struct sched_attr *attr, 6055 bool user, bool pi) 6056 { 6057 int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 : 6058 MAX_RT_PRIO - 1 - attr->sched_priority; 6059 int retval, oldprio, oldpolicy = -1, queued, running; 6060 int new_effective_prio, policy = attr->sched_policy; 6061 const struct sched_class *prev_class; 6062 struct callback_head *head; 6063 struct rq_flags rf; 6064 int reset_on_fork; 6065 int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; 6066 struct rq *rq; 6067 6068 /* The pi code expects interrupts enabled */ 6069 BUG_ON(pi && in_interrupt()); 6070 recheck: 6071 /* Double check policy once rq lock held: */ 6072 if (policy < 0) { 6073 reset_on_fork = p->sched_reset_on_fork; 6074 policy = oldpolicy = p->policy; 6075 } else { 6076 reset_on_fork = !!(attr->sched_flags & SCHED_FLAG_RESET_ON_FORK); 6077 6078 if (!valid_policy(policy)) 6079 return -EINVAL; 6080 } 6081 6082 if (attr->sched_flags & ~(SCHED_FLAG_ALL | SCHED_FLAG_SUGOV)) 6083 return -EINVAL; 6084 6085 /* 6086 * Valid priorities for SCHED_FIFO and SCHED_RR are 6087 * 1..MAX_RT_PRIO-1, valid priority for SCHED_NORMAL, 6088 * SCHED_BATCH and SCHED_IDLE is 0. 6089 */ 6090 if (attr->sched_priority > MAX_RT_PRIO-1) 6091 return -EINVAL; 6092 if ((dl_policy(policy) && !__checkparam_dl(attr)) || 6093 (rt_policy(policy) != (attr->sched_priority != 0))) 6094 return -EINVAL; 6095 6096 /* 6097 * Allow unprivileged RT tasks to decrease priority: 6098 */ 6099 if (user && !capable(CAP_SYS_NICE)) { 6100 if (fair_policy(policy)) { 6101 if (attr->sched_nice < task_nice(p) && 6102 !can_nice(p, attr->sched_nice)) 6103 return -EPERM; 6104 } 6105 6106 if (rt_policy(policy)) { 6107 unsigned long rlim_rtprio = 6108 task_rlimit(p, RLIMIT_RTPRIO); 6109 6110 /* Can't set/change the rt policy: */ 6111 if (policy != p->policy && !rlim_rtprio) 6112 return -EPERM; 6113 6114 /* Can't increase priority: */ 6115 if (attr->sched_priority > p->rt_priority && 6116 attr->sched_priority > rlim_rtprio) 6117 return -EPERM; 6118 } 6119 6120 /* 6121 * Can't set/change SCHED_DEADLINE policy at all for now 6122 * (safest behavior); in the future we would like to allow 6123 * unprivileged DL tasks to increase their relative deadline 6124 * or reduce their runtime (both ways reducing utilization) 6125 */ 6126 if (dl_policy(policy)) 6127 return -EPERM; 6128 6129 /* 6130 * Treat SCHED_IDLE as nice 20. Only allow a switch to 6131 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it. 6132 */ 6133 if (task_has_idle_policy(p) && !idle_policy(policy)) { 6134 if (!can_nice(p, task_nice(p))) 6135 return -EPERM; 6136 } 6137 6138 /* Can't change other user's priorities: */ 6139 if (!check_same_owner(p)) 6140 return -EPERM; 6141 6142 /* Normal users shall not reset the sched_reset_on_fork flag: */ 6143 if (p->sched_reset_on_fork && !reset_on_fork) 6144 return -EPERM; 6145 } 6146 6147 if (user) { 6148 if (attr->sched_flags & SCHED_FLAG_SUGOV) 6149 return -EINVAL; 6150 6151 retval = security_task_setscheduler(p); 6152 if (retval) 6153 return retval; 6154 } 6155 6156 /* Update task specific "requested" clamps */ 6157 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) { 6158 retval = uclamp_validate(p, attr); 6159 if (retval) 6160 return retval; 6161 } 6162 6163 if (pi) 6164 cpuset_read_lock(); 6165 6166 /* 6167 * Make sure no PI-waiters arrive (or leave) while we are 6168 * changing the priority of the task: 6169 * 6170 * To be able to change p->policy safely, the appropriate 6171 * runqueue lock must be held. 6172 */ 6173 rq = task_rq_lock(p, &rf); 6174 update_rq_clock(rq); 6175 6176 /* 6177 * Changing the policy of the stop threads its a very bad idea: 6178 */ 6179 if (p == rq->stop) { 6180 retval = -EINVAL; 6181 goto unlock; 6182 } 6183 6184 /* 6185 * If not changing anything there's no need to proceed further, 6186 * but store a possible modification of reset_on_fork. 6187 */ 6188 if (unlikely(policy == p->policy)) { 6189 if (fair_policy(policy) && attr->sched_nice != task_nice(p)) 6190 goto change; 6191 if (rt_policy(policy) && attr->sched_priority != p->rt_priority) 6192 goto change; 6193 if (dl_policy(policy) && dl_param_changed(p, attr)) 6194 goto change; 6195 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) 6196 goto change; 6197 6198 p->sched_reset_on_fork = reset_on_fork; 6199 retval = 0; 6200 goto unlock; 6201 } 6202 change: 6203 6204 if (user) { 6205 #ifdef CONFIG_RT_GROUP_SCHED 6206 /* 6207 * Do not allow realtime tasks into groups that have no runtime 6208 * assigned. 6209 */ 6210 if (rt_bandwidth_enabled() && rt_policy(policy) && 6211 task_group(p)->rt_bandwidth.rt_runtime == 0 && 6212 !task_group_is_autogroup(task_group(p))) { 6213 retval = -EPERM; 6214 goto unlock; 6215 } 6216 #endif 6217 #ifdef CONFIG_SMP 6218 if (dl_bandwidth_enabled() && dl_policy(policy) && 6219 !(attr->sched_flags & SCHED_FLAG_SUGOV)) { 6220 cpumask_t *span = rq->rd->span; 6221 6222 /* 6223 * Don't allow tasks with an affinity mask smaller than 6224 * the entire root_domain to become SCHED_DEADLINE. We 6225 * will also fail if there's no bandwidth available. 6226 */ 6227 if (!cpumask_subset(span, p->cpus_ptr) || 6228 rq->rd->dl_bw.bw == 0) { 6229 retval = -EPERM; 6230 goto unlock; 6231 } 6232 } 6233 #endif 6234 } 6235 6236 /* Re-check policy now with rq lock held: */ 6237 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { 6238 policy = oldpolicy = -1; 6239 task_rq_unlock(rq, p, &rf); 6240 if (pi) 6241 cpuset_read_unlock(); 6242 goto recheck; 6243 } 6244 6245 /* 6246 * If setscheduling to SCHED_DEADLINE (or changing the parameters 6247 * of a SCHED_DEADLINE task) we need to check if enough bandwidth 6248 * is available. 6249 */ 6250 if ((dl_policy(policy) || dl_task(p)) && sched_dl_overflow(p, policy, attr)) { 6251 retval = -EBUSY; 6252 goto unlock; 6253 } 6254 6255 p->sched_reset_on_fork = reset_on_fork; 6256 oldprio = p->prio; 6257 6258 if (pi) { 6259 /* 6260 * Take priority boosted tasks into account. If the new 6261 * effective priority is unchanged, we just store the new 6262 * normal parameters and do not touch the scheduler class and 6263 * the runqueue. This will be done when the task deboost 6264 * itself. 6265 */ 6266 new_effective_prio = rt_effective_prio(p, newprio); 6267 if (new_effective_prio == oldprio) 6268 queue_flags &= ~DEQUEUE_MOVE; 6269 } 6270 6271 queued = task_on_rq_queued(p); 6272 running = task_current(rq, p); 6273 if (queued) 6274 dequeue_task(rq, p, queue_flags); 6275 if (running) 6276 put_prev_task(rq, p); 6277 6278 prev_class = p->sched_class; 6279 6280 __setscheduler(rq, p, attr, pi); 6281 __setscheduler_uclamp(p, attr); 6282 6283 if (queued) { 6284 /* 6285 * We enqueue to tail when the priority of a task is 6286 * increased (user space view). 6287 */ 6288 if (oldprio < p->prio) 6289 queue_flags |= ENQUEUE_HEAD; 6290 6291 enqueue_task(rq, p, queue_flags); 6292 } 6293 if (running) 6294 set_next_task(rq, p); 6295 6296 check_class_changed(rq, p, prev_class, oldprio); 6297 6298 /* Avoid rq from going away on us: */ 6299 preempt_disable(); 6300 head = splice_balance_callbacks(rq); 6301 task_rq_unlock(rq, p, &rf); 6302 6303 if (pi) { 6304 cpuset_read_unlock(); 6305 rt_mutex_adjust_pi(p); 6306 } 6307 6308 /* Run balance callbacks after we've adjusted the PI chain: */ 6309 balance_callbacks(rq, head); 6310 preempt_enable(); 6311 6312 return 0; 6313 6314 unlock: 6315 task_rq_unlock(rq, p, &rf); 6316 if (pi) 6317 cpuset_read_unlock(); 6318 return retval; 6319 } 6320 6321 static int _sched_setscheduler(struct task_struct *p, int policy, 6322 const struct sched_param *param, bool check) 6323 { 6324 struct sched_attr attr = { 6325 .sched_policy = policy, 6326 .sched_priority = param->sched_priority, 6327 .sched_nice = PRIO_TO_NICE(p->static_prio), 6328 }; 6329 6330 /* Fixup the legacy SCHED_RESET_ON_FORK hack. */ 6331 if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) { 6332 attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; 6333 policy &= ~SCHED_RESET_ON_FORK; 6334 attr.sched_policy = policy; 6335 } 6336 6337 return __sched_setscheduler(p, &attr, check, true); 6338 } 6339 /** 6340 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread. 6341 * @p: the task in question. 6342 * @policy: new policy. 6343 * @param: structure containing the new RT priority. 6344 * 6345 * Use sched_set_fifo(), read its comment. 6346 * 6347 * Return: 0 on success. An error code otherwise. 6348 * 6349 * NOTE that the task may be already dead. 6350 */ 6351 int sched_setscheduler(struct task_struct *p, int policy, 6352 const struct sched_param *param) 6353 { 6354 return _sched_setscheduler(p, policy, param, true); 6355 } 6356 6357 int sched_setattr(struct task_struct *p, const struct sched_attr *attr) 6358 { 6359 return __sched_setscheduler(p, attr, true, true); 6360 } 6361 6362 int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr) 6363 { 6364 return __sched_setscheduler(p, attr, false, true); 6365 } 6366 6367 /** 6368 * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace. 6369 * @p: the task in question. 6370 * @policy: new policy. 6371 * @param: structure containing the new RT priority. 6372 * 6373 * Just like sched_setscheduler, only don't bother checking if the 6374 * current context has permission. For example, this is needed in 6375 * stop_machine(): we create temporary high priority worker threads, 6376 * but our caller might not have that capability. 6377 * 6378 * Return: 0 on success. An error code otherwise. 6379 */ 6380 int sched_setscheduler_nocheck(struct task_struct *p, int policy, 6381 const struct sched_param *param) 6382 { 6383 return _sched_setscheduler(p, policy, param, false); 6384 } 6385 6386 /* 6387 * SCHED_FIFO is a broken scheduler model; that is, it is fundamentally 6388 * incapable of resource management, which is the one thing an OS really should 6389 * be doing. 6390 * 6391 * This is of course the reason it is limited to privileged users only. 6392 * 6393 * Worse still; it is fundamentally impossible to compose static priority 6394 * workloads. You cannot take two correctly working static prio workloads 6395 * and smash them together and still expect them to work. 6396 * 6397 * For this reason 'all' FIFO tasks the kernel creates are basically at: 6398 * 6399 * MAX_RT_PRIO / 2 6400 * 6401 * The administrator _MUST_ configure the system, the kernel simply doesn't 6402 * know enough information to make a sensible choice. 6403 */ 6404 void sched_set_fifo(struct task_struct *p) 6405 { 6406 struct sched_param sp = { .sched_priority = MAX_RT_PRIO / 2 }; 6407 WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0); 6408 } 6409 EXPORT_SYMBOL_GPL(sched_set_fifo); 6410 6411 /* 6412 * For when you don't much care about FIFO, but want to be above SCHED_NORMAL. 6413 */ 6414 void sched_set_fifo_low(struct task_struct *p) 6415 { 6416 struct sched_param sp = { .sched_priority = 1 }; 6417 WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0); 6418 } 6419 EXPORT_SYMBOL_GPL(sched_set_fifo_low); 6420 6421 void sched_set_normal(struct task_struct *p, int nice) 6422 { 6423 struct sched_attr attr = { 6424 .sched_policy = SCHED_NORMAL, 6425 .sched_nice = nice, 6426 }; 6427 WARN_ON_ONCE(sched_setattr_nocheck(p, &attr) != 0); 6428 } 6429 EXPORT_SYMBOL_GPL(sched_set_normal); 6430 6431 static int 6432 do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) 6433 { 6434 struct sched_param lparam; 6435 struct task_struct *p; 6436 int retval; 6437 6438 if (!param || pid < 0) 6439 return -EINVAL; 6440 if (copy_from_user(&lparam, param, sizeof(struct sched_param))) 6441 return -EFAULT; 6442 6443 rcu_read_lock(); 6444 retval = -ESRCH; 6445 p = find_process_by_pid(pid); 6446 if (likely(p)) 6447 get_task_struct(p); 6448 rcu_read_unlock(); 6449 6450 if (likely(p)) { 6451 retval = sched_setscheduler(p, policy, &lparam); 6452 put_task_struct(p); 6453 } 6454 6455 return retval; 6456 } 6457 6458 /* 6459 * Mimics kernel/events/core.c perf_copy_attr(). 6460 */ 6461 static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *attr) 6462 { 6463 u32 size; 6464 int ret; 6465 6466 /* Zero the full structure, so that a short copy will be nice: */ 6467 memset(attr, 0, sizeof(*attr)); 6468 6469 ret = get_user(size, &uattr->size); 6470 if (ret) 6471 return ret; 6472 6473 /* ABI compatibility quirk: */ 6474 if (!size) 6475 size = SCHED_ATTR_SIZE_VER0; 6476 if (size < SCHED_ATTR_SIZE_VER0 || size > PAGE_SIZE) 6477 goto err_size; 6478 6479 ret = copy_struct_from_user(attr, sizeof(*attr), uattr, size); 6480 if (ret) { 6481 if (ret == -E2BIG) 6482 goto err_size; 6483 return ret; 6484 } 6485 6486 if ((attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) && 6487 size < SCHED_ATTR_SIZE_VER1) 6488 return -EINVAL; 6489 6490 /* 6491 * XXX: Do we want to be lenient like existing syscalls; or do we want 6492 * to be strict and return an error on out-of-bounds values? 6493 */ 6494 attr->sched_nice = clamp(attr->sched_nice, MIN_NICE, MAX_NICE); 6495 6496 return 0; 6497 6498 err_size: 6499 put_user(sizeof(*attr), &uattr->size); 6500 return -E2BIG; 6501 } 6502 6503 /** 6504 * sys_sched_setscheduler - set/change the scheduler policy and RT priority 6505 * @pid: the pid in question. 6506 * @policy: new policy. 6507 * @param: structure containing the new RT priority. 6508 * 6509 * Return: 0 on success. An error code otherwise. 6510 */ 6511 SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param) 6512 { 6513 if (policy < 0) 6514 return -EINVAL; 6515 6516 return do_sched_setscheduler(pid, policy, param); 6517 } 6518 6519 /** 6520 * sys_sched_setparam - set/change the RT priority of a thread 6521 * @pid: the pid in question. 6522 * @param: structure containing the new RT priority. 6523 * 6524 * Return: 0 on success. An error code otherwise. 6525 */ 6526 SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) 6527 { 6528 return do_sched_setscheduler(pid, SETPARAM_POLICY, param); 6529 } 6530 6531 /** 6532 * sys_sched_setattr - same as above, but with extended sched_attr 6533 * @pid: the pid in question. 6534 * @uattr: structure containing the extended parameters. 6535 * @flags: for future extension. 6536 */ 6537 SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr, 6538 unsigned int, flags) 6539 { 6540 struct sched_attr attr; 6541 struct task_struct *p; 6542 int retval; 6543 6544 if (!uattr || pid < 0 || flags) 6545 return -EINVAL; 6546 6547 retval = sched_copy_attr(uattr, &attr); 6548 if (retval) 6549 return retval; 6550 6551 if ((int)attr.sched_policy < 0) 6552 return -EINVAL; 6553 if (attr.sched_flags & SCHED_FLAG_KEEP_POLICY) 6554 attr.sched_policy = SETPARAM_POLICY; 6555 6556 rcu_read_lock(); 6557 retval = -ESRCH; 6558 p = find_process_by_pid(pid); 6559 if (likely(p)) 6560 get_task_struct(p); 6561 rcu_read_unlock(); 6562 6563 if (likely(p)) { 6564 retval = sched_setattr(p, &attr); 6565 put_task_struct(p); 6566 } 6567 6568 return retval; 6569 } 6570 6571 /** 6572 * sys_sched_getscheduler - get the policy (scheduling class) of a thread 6573 * @pid: the pid in question. 6574 * 6575 * Return: On success, the policy of the thread. Otherwise, a negative error 6576 * code. 6577 */ 6578 SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) 6579 { 6580 struct task_struct *p; 6581 int retval; 6582 6583 if (pid < 0) 6584 return -EINVAL; 6585 6586 retval = -ESRCH; 6587 rcu_read_lock(); 6588 p = find_process_by_pid(pid); 6589 if (p) { 6590 retval = security_task_getscheduler(p); 6591 if (!retval) 6592 retval = p->policy 6593 | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0); 6594 } 6595 rcu_read_unlock(); 6596 return retval; 6597 } 6598 6599 /** 6600 * sys_sched_getparam - get the RT priority of a thread 6601 * @pid: the pid in question. 6602 * @param: structure containing the RT priority. 6603 * 6604 * Return: On success, 0 and the RT priority is in @param. Otherwise, an error 6605 * code. 6606 */ 6607 SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) 6608 { 6609 struct sched_param lp = { .sched_priority = 0 }; 6610 struct task_struct *p; 6611 int retval; 6612 6613 if (!param || pid < 0) 6614 return -EINVAL; 6615 6616 rcu_read_lock(); 6617 p = find_process_by_pid(pid); 6618 retval = -ESRCH; 6619 if (!p) 6620 goto out_unlock; 6621 6622 retval = security_task_getscheduler(p); 6623 if (retval) 6624 goto out_unlock; 6625 6626 if (task_has_rt_policy(p)) 6627 lp.sched_priority = p->rt_priority; 6628 rcu_read_unlock(); 6629 6630 /* 6631 * This one might sleep, we cannot do it with a spinlock held ... 6632 */ 6633 retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0; 6634 6635 return retval; 6636 6637 out_unlock: 6638 rcu_read_unlock(); 6639 return retval; 6640 } 6641 6642 /* 6643 * Copy the kernel size attribute structure (which might be larger 6644 * than what user-space knows about) to user-space. 6645 * 6646 * Note that all cases are valid: user-space buffer can be larger or 6647 * smaller than the kernel-space buffer. The usual case is that both 6648 * have the same size. 6649 */ 6650 static int 6651 sched_attr_copy_to_user(struct sched_attr __user *uattr, 6652 struct sched_attr *kattr, 6653 unsigned int usize) 6654 { 6655 unsigned int ksize = sizeof(*kattr); 6656 6657 if (!access_ok(uattr, usize)) 6658 return -EFAULT; 6659 6660 /* 6661 * sched_getattr() ABI forwards and backwards compatibility: 6662 * 6663 * If usize == ksize then we just copy everything to user-space and all is good. 6664 * 6665 * If usize < ksize then we only copy as much as user-space has space for, 6666 * this keeps ABI compatibility as well. We skip the rest. 6667 * 6668 * If usize > ksize then user-space is using a newer version of the ABI, 6669 * which part the kernel doesn't know about. Just ignore it - tooling can 6670 * detect the kernel's knowledge of attributes from the attr->size value 6671 * which is set to ksize in this case. 6672 */ 6673 kattr->size = min(usize, ksize); 6674 6675 if (copy_to_user(uattr, kattr, kattr->size)) 6676 return -EFAULT; 6677 6678 return 0; 6679 } 6680 6681 /** 6682 * sys_sched_getattr - similar to sched_getparam, but with sched_attr 6683 * @pid: the pid in question. 6684 * @uattr: structure containing the extended parameters. 6685 * @usize: sizeof(attr) for fwd/bwd comp. 6686 * @flags: for future extension. 6687 */ 6688 SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, 6689 unsigned int, usize, unsigned int, flags) 6690 { 6691 struct sched_attr kattr = { }; 6692 struct task_struct *p; 6693 int retval; 6694 6695 if (!uattr || pid < 0 || usize > PAGE_SIZE || 6696 usize < SCHED_ATTR_SIZE_VER0 || flags) 6697 return -EINVAL; 6698 6699 rcu_read_lock(); 6700 p = find_process_by_pid(pid); 6701 retval = -ESRCH; 6702 if (!p) 6703 goto out_unlock; 6704 6705 retval = security_task_getscheduler(p); 6706 if (retval) 6707 goto out_unlock; 6708 6709 kattr.sched_policy = p->policy; 6710 if (p->sched_reset_on_fork) 6711 kattr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; 6712 if (task_has_dl_policy(p)) 6713 __getparam_dl(p, &kattr); 6714 else if (task_has_rt_policy(p)) 6715 kattr.sched_priority = p->rt_priority; 6716 else 6717 kattr.sched_nice = task_nice(p); 6718 6719 #ifdef CONFIG_UCLAMP_TASK 6720 /* 6721 * This could race with another potential updater, but this is fine 6722 * because it'll correctly read the old or the new value. We don't need 6723 * to guarantee who wins the race as long as it doesn't return garbage. 6724 */ 6725 kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value; 6726 kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value; 6727 #endif 6728 6729 rcu_read_unlock(); 6730 6731 return sched_attr_copy_to_user(uattr, &kattr, usize); 6732 6733 out_unlock: 6734 rcu_read_unlock(); 6735 return retval; 6736 } 6737 6738 long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) 6739 { 6740 cpumask_var_t cpus_allowed, new_mask; 6741 struct task_struct *p; 6742 int retval; 6743 6744 rcu_read_lock(); 6745 6746 p = find_process_by_pid(pid); 6747 if (!p) { 6748 rcu_read_unlock(); 6749 return -ESRCH; 6750 } 6751 6752 /* Prevent p going away */ 6753 get_task_struct(p); 6754 rcu_read_unlock(); 6755 6756 if (p->flags & PF_NO_SETAFFINITY) { 6757 retval = -EINVAL; 6758 goto out_put_task; 6759 } 6760 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) { 6761 retval = -ENOMEM; 6762 goto out_put_task; 6763 } 6764 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) { 6765 retval = -ENOMEM; 6766 goto out_free_cpus_allowed; 6767 } 6768 retval = -EPERM; 6769 if (!check_same_owner(p)) { 6770 rcu_read_lock(); 6771 if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) { 6772 rcu_read_unlock(); 6773 goto out_free_new_mask; 6774 } 6775 rcu_read_unlock(); 6776 } 6777 6778 retval = security_task_setscheduler(p); 6779 if (retval) 6780 goto out_free_new_mask; 6781 6782 6783 cpuset_cpus_allowed(p, cpus_allowed); 6784 cpumask_and(new_mask, in_mask, cpus_allowed); 6785 6786 /* 6787 * Since bandwidth control happens on root_domain basis, 6788 * if admission test is enabled, we only admit -deadline 6789 * tasks allowed to run on all the CPUs in the task's 6790 * root_domain. 6791 */ 6792 #ifdef CONFIG_SMP 6793 if (task_has_dl_policy(p) && dl_bandwidth_enabled()) { 6794 rcu_read_lock(); 6795 if (!cpumask_subset(task_rq(p)->rd->span, new_mask)) { 6796 retval = -EBUSY; 6797 rcu_read_unlock(); 6798 goto out_free_new_mask; 6799 } 6800 rcu_read_unlock(); 6801 } 6802 #endif 6803 again: 6804 retval = __set_cpus_allowed_ptr(p, new_mask, SCA_CHECK); 6805 6806 if (!retval) { 6807 cpuset_cpus_allowed(p, cpus_allowed); 6808 if (!cpumask_subset(new_mask, cpus_allowed)) { 6809 /* 6810 * We must have raced with a concurrent cpuset 6811 * update. Just reset the cpus_allowed to the 6812 * cpuset's cpus_allowed 6813 */ 6814 cpumask_copy(new_mask, cpus_allowed); 6815 goto again; 6816 } 6817 } 6818 out_free_new_mask: 6819 free_cpumask_var(new_mask); 6820 out_free_cpus_allowed: 6821 free_cpumask_var(cpus_allowed); 6822 out_put_task: 6823 put_task_struct(p); 6824 return retval; 6825 } 6826 6827 static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, 6828 struct cpumask *new_mask) 6829 { 6830 if (len < cpumask_size()) 6831 cpumask_clear(new_mask); 6832 else if (len > cpumask_size()) 6833 len = cpumask_size(); 6834 6835 return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0; 6836 } 6837 6838 /** 6839 * sys_sched_setaffinity - set the CPU affinity of a process 6840 * @pid: pid of the process 6841 * @len: length in bytes of the bitmask pointed to by user_mask_ptr 6842 * @user_mask_ptr: user-space pointer to the new CPU mask 6843 * 6844 * Return: 0 on success. An error code otherwise. 6845 */ 6846 SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len, 6847 unsigned long __user *, user_mask_ptr) 6848 { 6849 cpumask_var_t new_mask; 6850 int retval; 6851 6852 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) 6853 return -ENOMEM; 6854 6855 retval = get_user_cpu_mask(user_mask_ptr, len, new_mask); 6856 if (retval == 0) 6857 retval = sched_setaffinity(pid, new_mask); 6858 free_cpumask_var(new_mask); 6859 return retval; 6860 } 6861 6862 long sched_getaffinity(pid_t pid, struct cpumask *mask) 6863 { 6864 struct task_struct *p; 6865 unsigned long flags; 6866 int retval; 6867 6868 rcu_read_lock(); 6869 6870 retval = -ESRCH; 6871 p = find_process_by_pid(pid); 6872 if (!p) 6873 goto out_unlock; 6874 6875 retval = security_task_getscheduler(p); 6876 if (retval) 6877 goto out_unlock; 6878 6879 raw_spin_lock_irqsave(&p->pi_lock, flags); 6880 cpumask_and(mask, &p->cpus_mask, cpu_active_mask); 6881 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 6882 6883 out_unlock: 6884 rcu_read_unlock(); 6885 6886 return retval; 6887 } 6888 6889 /** 6890 * sys_sched_getaffinity - get the CPU affinity of a process 6891 * @pid: pid of the process 6892 * @len: length in bytes of the bitmask pointed to by user_mask_ptr 6893 * @user_mask_ptr: user-space pointer to hold the current CPU mask 6894 * 6895 * Return: size of CPU mask copied to user_mask_ptr on success. An 6896 * error code otherwise. 6897 */ 6898 SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, 6899 unsigned long __user *, user_mask_ptr) 6900 { 6901 int ret; 6902 cpumask_var_t mask; 6903 6904 if ((len * BITS_PER_BYTE) < nr_cpu_ids) 6905 return -EINVAL; 6906 if (len & (sizeof(unsigned long)-1)) 6907 return -EINVAL; 6908 6909 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) 6910 return -ENOMEM; 6911 6912 ret = sched_getaffinity(pid, mask); 6913 if (ret == 0) { 6914 unsigned int retlen = min(len, cpumask_size()); 6915 6916 if (copy_to_user(user_mask_ptr, mask, retlen)) 6917 ret = -EFAULT; 6918 else 6919 ret = retlen; 6920 } 6921 free_cpumask_var(mask); 6922 6923 return ret; 6924 } 6925 6926 static void do_sched_yield(void) 6927 { 6928 struct rq_flags rf; 6929 struct rq *rq; 6930 6931 rq = this_rq_lock_irq(&rf); 6932 6933 schedstat_inc(rq->yld_count); 6934 current->sched_class->yield_task(rq); 6935 6936 preempt_disable(); 6937 rq_unlock_irq(rq, &rf); 6938 sched_preempt_enable_no_resched(); 6939 6940 schedule(); 6941 } 6942 6943 /** 6944 * sys_sched_yield - yield the current processor to other threads. 6945 * 6946 * This function yields the current CPU to other tasks. If there are no 6947 * other threads running on this CPU then this function will return. 6948 * 6949 * Return: 0. 6950 */ 6951 SYSCALL_DEFINE0(sched_yield) 6952 { 6953 do_sched_yield(); 6954 return 0; 6955 } 6956 6957 #if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC) 6958 int __sched __cond_resched(void) 6959 { 6960 if (should_resched(0)) { 6961 preempt_schedule_common(); 6962 return 1; 6963 } 6964 #ifndef CONFIG_PREEMPT_RCU 6965 rcu_all_qs(); 6966 #endif 6967 return 0; 6968 } 6969 EXPORT_SYMBOL(__cond_resched); 6970 #endif 6971 6972 #ifdef CONFIG_PREEMPT_DYNAMIC 6973 DEFINE_STATIC_CALL_RET0(cond_resched, __cond_resched); 6974 EXPORT_STATIC_CALL_TRAMP(cond_resched); 6975 6976 DEFINE_STATIC_CALL_RET0(might_resched, __cond_resched); 6977 EXPORT_STATIC_CALL_TRAMP(might_resched); 6978 #endif 6979 6980 /* 6981 * __cond_resched_lock() - if a reschedule is pending, drop the given lock, 6982 * call schedule, and on return reacquire the lock. 6983 * 6984 * This works OK both with and without CONFIG_PREEMPTION. We do strange low-level 6985 * operations here to prevent schedule() from being called twice (once via 6986 * spin_unlock(), once by hand). 6987 */ 6988 int __cond_resched_lock(spinlock_t *lock) 6989 { 6990 int resched = should_resched(PREEMPT_LOCK_OFFSET); 6991 int ret = 0; 6992 6993 lockdep_assert_held(lock); 6994 6995 if (spin_needbreak(lock) || resched) { 6996 spin_unlock(lock); 6997 if (resched) 6998 preempt_schedule_common(); 6999 else 7000 cpu_relax(); 7001 ret = 1; 7002 spin_lock(lock); 7003 } 7004 return ret; 7005 } 7006 EXPORT_SYMBOL(__cond_resched_lock); 7007 7008 int __cond_resched_rwlock_read(rwlock_t *lock) 7009 { 7010 int resched = should_resched(PREEMPT_LOCK_OFFSET); 7011 int ret = 0; 7012 7013 lockdep_assert_held_read(lock); 7014 7015 if (rwlock_needbreak(lock) || resched) { 7016 read_unlock(lock); 7017 if (resched) 7018 preempt_schedule_common(); 7019 else 7020 cpu_relax(); 7021 ret = 1; 7022 read_lock(lock); 7023 } 7024 return ret; 7025 } 7026 EXPORT_SYMBOL(__cond_resched_rwlock_read); 7027 7028 int __cond_resched_rwlock_write(rwlock_t *lock) 7029 { 7030 int resched = should_resched(PREEMPT_LOCK_OFFSET); 7031 int ret = 0; 7032 7033 lockdep_assert_held_write(lock); 7034 7035 if (rwlock_needbreak(lock) || resched) { 7036 write_unlock(lock); 7037 if (resched) 7038 preempt_schedule_common(); 7039 else 7040 cpu_relax(); 7041 ret = 1; 7042 write_lock(lock); 7043 } 7044 return ret; 7045 } 7046 EXPORT_SYMBOL(__cond_resched_rwlock_write); 7047 7048 /** 7049 * yield - yield the current processor to other threads. 7050 * 7051 * Do not ever use this function, there's a 99% chance you're doing it wrong. 7052 * 7053 * The scheduler is at all times free to pick the calling task as the most 7054 * eligible task to run, if removing the yield() call from your code breaks 7055 * it, it's already broken. 7056 * 7057 * Typical broken usage is: 7058 * 7059 * while (!event) 7060 * yield(); 7061 * 7062 * where one assumes that yield() will let 'the other' process run that will 7063 * make event true. If the current task is a SCHED_FIFO task that will never 7064 * happen. Never use yield() as a progress guarantee!! 7065 * 7066 * If you want to use yield() to wait for something, use wait_event(). 7067 * If you want to use yield() to be 'nice' for others, use cond_resched(). 7068 * If you still want to use yield(), do not! 7069 */ 7070 void __sched yield(void) 7071 { 7072 set_current_state(TASK_RUNNING); 7073 do_sched_yield(); 7074 } 7075 EXPORT_SYMBOL(yield); 7076 7077 /** 7078 * yield_to - yield the current processor to another thread in 7079 * your thread group, or accelerate that thread toward the 7080 * processor it's on. 7081 * @p: target task 7082 * @preempt: whether task preemption is allowed or not 7083 * 7084 * It's the caller's job to ensure that the target task struct 7085 * can't go away on us before we can do any checks. 7086 * 7087 * Return: 7088 * true (>0) if we indeed boosted the target task. 7089 * false (0) if we failed to boost the target. 7090 * -ESRCH if there's no task to yield to. 7091 */ 7092 int __sched yield_to(struct task_struct *p, bool preempt) 7093 { 7094 struct task_struct *curr = current; 7095 struct rq *rq, *p_rq; 7096 unsigned long flags; 7097 int yielded = 0; 7098 7099 local_irq_save(flags); 7100 rq = this_rq(); 7101 7102 again: 7103 p_rq = task_rq(p); 7104 /* 7105 * If we're the only runnable task on the rq and target rq also 7106 * has only one task, there's absolutely no point in yielding. 7107 */ 7108 if (rq->nr_running == 1 && p_rq->nr_running == 1) { 7109 yielded = -ESRCH; 7110 goto out_irq; 7111 } 7112 7113 double_rq_lock(rq, p_rq); 7114 if (task_rq(p) != p_rq) { 7115 double_rq_unlock(rq, p_rq); 7116 goto again; 7117 } 7118 7119 if (!curr->sched_class->yield_to_task) 7120 goto out_unlock; 7121 7122 if (curr->sched_class != p->sched_class) 7123 goto out_unlock; 7124 7125 if (task_running(p_rq, p) || p->state) 7126 goto out_unlock; 7127 7128 yielded = curr->sched_class->yield_to_task(rq, p); 7129 if (yielded) { 7130 schedstat_inc(rq->yld_count); 7131 /* 7132 * Make p's CPU reschedule; pick_next_entity takes care of 7133 * fairness. 7134 */ 7135 if (preempt && rq != p_rq) 7136 resched_curr(p_rq); 7137 } 7138 7139 out_unlock: 7140 double_rq_unlock(rq, p_rq); 7141 out_irq: 7142 local_irq_restore(flags); 7143 7144 if (yielded > 0) 7145 schedule(); 7146 7147 return yielded; 7148 } 7149 EXPORT_SYMBOL_GPL(yield_to); 7150 7151 int io_schedule_prepare(void) 7152 { 7153 int old_iowait = current->in_iowait; 7154 7155 current->in_iowait = 1; 7156 blk_schedule_flush_plug(current); 7157 7158 return old_iowait; 7159 } 7160 7161 void io_schedule_finish(int token) 7162 { 7163 current->in_iowait = token; 7164 } 7165 7166 /* 7167 * This task is about to go to sleep on IO. Increment rq->nr_iowait so 7168 * that process accounting knows that this is a task in IO wait state. 7169 */ 7170 long __sched io_schedule_timeout(long timeout) 7171 { 7172 int token; 7173 long ret; 7174 7175 token = io_schedule_prepare(); 7176 ret = schedule_timeout(timeout); 7177 io_schedule_finish(token); 7178 7179 return ret; 7180 } 7181 EXPORT_SYMBOL(io_schedule_timeout); 7182 7183 void __sched io_schedule(void) 7184 { 7185 int token; 7186 7187 token = io_schedule_prepare(); 7188 schedule(); 7189 io_schedule_finish(token); 7190 } 7191 EXPORT_SYMBOL(io_schedule); 7192 7193 /** 7194 * sys_sched_get_priority_max - return maximum RT priority. 7195 * @policy: scheduling class. 7196 * 7197 * Return: On success, this syscall returns the maximum 7198 * rt_priority that can be used by a given scheduling class. 7199 * On failure, a negative error code is returned. 7200 */ 7201 SYSCALL_DEFINE1(sched_get_priority_max, int, policy) 7202 { 7203 int ret = -EINVAL; 7204 7205 switch (policy) { 7206 case SCHED_FIFO: 7207 case SCHED_RR: 7208 ret = MAX_RT_PRIO-1; 7209 break; 7210 case SCHED_DEADLINE: 7211 case SCHED_NORMAL: 7212 case SCHED_BATCH: 7213 case SCHED_IDLE: 7214 ret = 0; 7215 break; 7216 } 7217 return ret; 7218 } 7219 7220 /** 7221 * sys_sched_get_priority_min - return minimum RT priority. 7222 * @policy: scheduling class. 7223 * 7224 * Return: On success, this syscall returns the minimum 7225 * rt_priority that can be used by a given scheduling class. 7226 * On failure, a negative error code is returned. 7227 */ 7228 SYSCALL_DEFINE1(sched_get_priority_min, int, policy) 7229 { 7230 int ret = -EINVAL; 7231 7232 switch (policy) { 7233 case SCHED_FIFO: 7234 case SCHED_RR: 7235 ret = 1; 7236 break; 7237 case SCHED_DEADLINE: 7238 case SCHED_NORMAL: 7239 case SCHED_BATCH: 7240 case SCHED_IDLE: 7241 ret = 0; 7242 } 7243 return ret; 7244 } 7245 7246 static int sched_rr_get_interval(pid_t pid, struct timespec64 *t) 7247 { 7248 struct task_struct *p; 7249 unsigned int time_slice; 7250 struct rq_flags rf; 7251 struct rq *rq; 7252 int retval; 7253 7254 if (pid < 0) 7255 return -EINVAL; 7256 7257 retval = -ESRCH; 7258 rcu_read_lock(); 7259 p = find_process_by_pid(pid); 7260 if (!p) 7261 goto out_unlock; 7262 7263 retval = security_task_getscheduler(p); 7264 if (retval) 7265 goto out_unlock; 7266 7267 rq = task_rq_lock(p, &rf); 7268 time_slice = 0; 7269 if (p->sched_class->get_rr_interval) 7270 time_slice = p->sched_class->get_rr_interval(rq, p); 7271 task_rq_unlock(rq, p, &rf); 7272 7273 rcu_read_unlock(); 7274 jiffies_to_timespec64(time_slice, t); 7275 return 0; 7276 7277 out_unlock: 7278 rcu_read_unlock(); 7279 return retval; 7280 } 7281 7282 /** 7283 * sys_sched_rr_get_interval - return the default timeslice of a process. 7284 * @pid: pid of the process. 7285 * @interval: userspace pointer to the timeslice value. 7286 * 7287 * this syscall writes the default timeslice value of a given process 7288 * into the user-space timespec buffer. A value of '0' means infinity. 7289 * 7290 * Return: On success, 0 and the timeslice is in @interval. Otherwise, 7291 * an error code. 7292 */ 7293 SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, 7294 struct __kernel_timespec __user *, interval) 7295 { 7296 struct timespec64 t; 7297 int retval = sched_rr_get_interval(pid, &t); 7298 7299 if (retval == 0) 7300 retval = put_timespec64(&t, interval); 7301 7302 return retval; 7303 } 7304 7305 #ifdef CONFIG_COMPAT_32BIT_TIME 7306 SYSCALL_DEFINE2(sched_rr_get_interval_time32, pid_t, pid, 7307 struct old_timespec32 __user *, interval) 7308 { 7309 struct timespec64 t; 7310 int retval = sched_rr_get_interval(pid, &t); 7311 7312 if (retval == 0) 7313 retval = put_old_timespec32(&t, interval); 7314 return retval; 7315 } 7316 #endif 7317 7318 void sched_show_task(struct task_struct *p) 7319 { 7320 unsigned long free = 0; 7321 int ppid; 7322 7323 if (!try_get_task_stack(p)) 7324 return; 7325 7326 pr_info("task:%-15.15s state:%c", p->comm, task_state_to_char(p)); 7327 7328 if (p->state == TASK_RUNNING) 7329 pr_cont(" running task "); 7330 #ifdef CONFIG_DEBUG_STACK_USAGE 7331 free = stack_not_used(p); 7332 #endif 7333 ppid = 0; 7334 rcu_read_lock(); 7335 if (pid_alive(p)) 7336 ppid = task_pid_nr(rcu_dereference(p->real_parent)); 7337 rcu_read_unlock(); 7338 pr_cont(" stack:%5lu pid:%5d ppid:%6d flags:0x%08lx\n", 7339 free, task_pid_nr(p), ppid, 7340 (unsigned long)task_thread_info(p)->flags); 7341 7342 print_worker_info(KERN_INFO, p); 7343 print_stop_info(KERN_INFO, p); 7344 show_stack(p, NULL, KERN_INFO); 7345 put_task_stack(p); 7346 } 7347 EXPORT_SYMBOL_GPL(sched_show_task); 7348 7349 static inline bool 7350 state_filter_match(unsigned long state_filter, struct task_struct *p) 7351 { 7352 /* no filter, everything matches */ 7353 if (!state_filter) 7354 return true; 7355 7356 /* filter, but doesn't match */ 7357 if (!(p->state & state_filter)) 7358 return false; 7359 7360 /* 7361 * When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows 7362 * TASK_KILLABLE). 7363 */ 7364 if (state_filter == TASK_UNINTERRUPTIBLE && p->state == TASK_IDLE) 7365 return false; 7366 7367 return true; 7368 } 7369 7370 7371 void show_state_filter(unsigned long state_filter) 7372 { 7373 struct task_struct *g, *p; 7374 7375 rcu_read_lock(); 7376 for_each_process_thread(g, p) { 7377 /* 7378 * reset the NMI-timeout, listing all files on a slow 7379 * console might take a lot of time: 7380 * Also, reset softlockup watchdogs on all CPUs, because 7381 * another CPU might be blocked waiting for us to process 7382 * an IPI. 7383 */ 7384 touch_nmi_watchdog(); 7385 touch_all_softlockup_watchdogs(); 7386 if (state_filter_match(state_filter, p)) 7387 sched_show_task(p); 7388 } 7389 7390 #ifdef CONFIG_SCHED_DEBUG 7391 if (!state_filter) 7392 sysrq_sched_debug_show(); 7393 #endif 7394 rcu_read_unlock(); 7395 /* 7396 * Only show locks if all tasks are dumped: 7397 */ 7398 if (!state_filter) 7399 debug_show_all_locks(); 7400 } 7401 7402 /** 7403 * init_idle - set up an idle thread for a given CPU 7404 * @idle: task in question 7405 * @cpu: CPU the idle task belongs to 7406 * 7407 * NOTE: this function does not set the idle thread's NEED_RESCHED 7408 * flag, to make booting more robust. 7409 */ 7410 void init_idle(struct task_struct *idle, int cpu) 7411 { 7412 struct rq *rq = cpu_rq(cpu); 7413 unsigned long flags; 7414 7415 __sched_fork(0, idle); 7416 7417 raw_spin_lock_irqsave(&idle->pi_lock, flags); 7418 raw_spin_lock(&rq->lock); 7419 7420 idle->state = TASK_RUNNING; 7421 idle->se.exec_start = sched_clock(); 7422 idle->flags |= PF_IDLE; 7423 7424 scs_task_reset(idle); 7425 kasan_unpoison_task_stack(idle); 7426 7427 #ifdef CONFIG_SMP 7428 /* 7429 * It's possible that init_idle() gets called multiple times on a task, 7430 * in that case do_set_cpus_allowed() will not do the right thing. 7431 * 7432 * And since this is boot we can forgo the serialization. 7433 */ 7434 set_cpus_allowed_common(idle, cpumask_of(cpu), 0); 7435 #endif 7436 /* 7437 * We're having a chicken and egg problem, even though we are 7438 * holding rq->lock, the CPU isn't yet set to this CPU so the 7439 * lockdep check in task_group() will fail. 7440 * 7441 * Similar case to sched_fork(). / Alternatively we could 7442 * use task_rq_lock() here and obtain the other rq->lock. 7443 * 7444 * Silence PROVE_RCU 7445 */ 7446 rcu_read_lock(); 7447 __set_task_cpu(idle, cpu); 7448 rcu_read_unlock(); 7449 7450 rq->idle = idle; 7451 rcu_assign_pointer(rq->curr, idle); 7452 idle->on_rq = TASK_ON_RQ_QUEUED; 7453 #ifdef CONFIG_SMP 7454 idle->on_cpu = 1; 7455 #endif 7456 raw_spin_unlock(&rq->lock); 7457 raw_spin_unlock_irqrestore(&idle->pi_lock, flags); 7458 7459 /* Set the preempt count _outside_ the spinlocks! */ 7460 init_idle_preempt_count(idle, cpu); 7461 7462 /* 7463 * The idle tasks have their own, simple scheduling class: 7464 */ 7465 idle->sched_class = &idle_sched_class; 7466 ftrace_graph_init_idle_task(idle, cpu); 7467 vtime_init_idle(idle, cpu); 7468 #ifdef CONFIG_SMP 7469 sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu); 7470 #endif 7471 } 7472 7473 #ifdef CONFIG_SMP 7474 7475 int cpuset_cpumask_can_shrink(const struct cpumask *cur, 7476 const struct cpumask *trial) 7477 { 7478 int ret = 1; 7479 7480 if (!cpumask_weight(cur)) 7481 return ret; 7482 7483 ret = dl_cpuset_cpumask_can_shrink(cur, trial); 7484 7485 return ret; 7486 } 7487 7488 int task_can_attach(struct task_struct *p, 7489 const struct cpumask *cs_cpus_allowed) 7490 { 7491 int ret = 0; 7492 7493 /* 7494 * Kthreads which disallow setaffinity shouldn't be moved 7495 * to a new cpuset; we don't want to change their CPU 7496 * affinity and isolating such threads by their set of 7497 * allowed nodes is unnecessary. Thus, cpusets are not 7498 * applicable for such threads. This prevents checking for 7499 * success of set_cpus_allowed_ptr() on all attached tasks 7500 * before cpus_mask may be changed. 7501 */ 7502 if (p->flags & PF_NO_SETAFFINITY) { 7503 ret = -EINVAL; 7504 goto out; 7505 } 7506 7507 if (dl_task(p) && !cpumask_intersects(task_rq(p)->rd->span, 7508 cs_cpus_allowed)) 7509 ret = dl_task_can_attach(p, cs_cpus_allowed); 7510 7511 out: 7512 return ret; 7513 } 7514 7515 bool sched_smp_initialized __read_mostly; 7516 7517 #ifdef CONFIG_NUMA_BALANCING 7518 /* Migrate current task p to target_cpu */ 7519 int migrate_task_to(struct task_struct *p, int target_cpu) 7520 { 7521 struct migration_arg arg = { p, target_cpu }; 7522 int curr_cpu = task_cpu(p); 7523 7524 if (curr_cpu == target_cpu) 7525 return 0; 7526 7527 if (!cpumask_test_cpu(target_cpu, p->cpus_ptr)) 7528 return -EINVAL; 7529 7530 /* TODO: This is not properly updating schedstats */ 7531 7532 trace_sched_move_numa(p, curr_cpu, target_cpu); 7533 return stop_one_cpu(curr_cpu, migration_cpu_stop, &arg); 7534 } 7535 7536 /* 7537 * Requeue a task on a given node and accurately track the number of NUMA 7538 * tasks on the runqueues 7539 */ 7540 void sched_setnuma(struct task_struct *p, int nid) 7541 { 7542 bool queued, running; 7543 struct rq_flags rf; 7544 struct rq *rq; 7545 7546 rq = task_rq_lock(p, &rf); 7547 queued = task_on_rq_queued(p); 7548 running = task_current(rq, p); 7549 7550 if (queued) 7551 dequeue_task(rq, p, DEQUEUE_SAVE); 7552 if (running) 7553 put_prev_task(rq, p); 7554 7555 p->numa_preferred_nid = nid; 7556 7557 if (queued) 7558 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); 7559 if (running) 7560 set_next_task(rq, p); 7561 task_rq_unlock(rq, p, &rf); 7562 } 7563 #endif /* CONFIG_NUMA_BALANCING */ 7564 7565 #ifdef CONFIG_HOTPLUG_CPU 7566 /* 7567 * Ensure that the idle task is using init_mm right before its CPU goes 7568 * offline. 7569 */ 7570 void idle_task_exit(void) 7571 { 7572 struct mm_struct *mm = current->active_mm; 7573 7574 BUG_ON(cpu_online(smp_processor_id())); 7575 BUG_ON(current != this_rq()->idle); 7576 7577 if (mm != &init_mm) { 7578 switch_mm(mm, &init_mm, current); 7579 finish_arch_post_lock_switch(); 7580 } 7581 7582 /* finish_cpu(), as ran on the BP, will clean up the active_mm state */ 7583 } 7584 7585 static int __balance_push_cpu_stop(void *arg) 7586 { 7587 struct task_struct *p = arg; 7588 struct rq *rq = this_rq(); 7589 struct rq_flags rf; 7590 int cpu; 7591 7592 raw_spin_lock_irq(&p->pi_lock); 7593 rq_lock(rq, &rf); 7594 7595 update_rq_clock(rq); 7596 7597 if (task_rq(p) == rq && task_on_rq_queued(p)) { 7598 cpu = select_fallback_rq(rq->cpu, p); 7599 rq = __migrate_task(rq, &rf, p, cpu); 7600 } 7601 7602 rq_unlock(rq, &rf); 7603 raw_spin_unlock_irq(&p->pi_lock); 7604 7605 put_task_struct(p); 7606 7607 return 0; 7608 } 7609 7610 static DEFINE_PER_CPU(struct cpu_stop_work, push_work); 7611 7612 /* 7613 * Ensure we only run per-cpu kthreads once the CPU goes !active. 7614 */ 7615 static void balance_push(struct rq *rq) 7616 { 7617 struct task_struct *push_task = rq->curr; 7618 7619 lockdep_assert_held(&rq->lock); 7620 SCHED_WARN_ON(rq->cpu != smp_processor_id()); 7621 /* 7622 * Ensure the thing is persistent until balance_push_set(.on = false); 7623 */ 7624 rq->balance_callback = &balance_push_callback; 7625 7626 /* 7627 * Both the cpu-hotplug and stop task are in this case and are 7628 * required to complete the hotplug process. 7629 * 7630 * XXX: the idle task does not match kthread_is_per_cpu() due to 7631 * histerical raisins. 7632 */ 7633 if (rq->idle == push_task || 7634 ((push_task->flags & PF_KTHREAD) && kthread_is_per_cpu(push_task)) || 7635 is_migration_disabled(push_task)) { 7636 7637 /* 7638 * If this is the idle task on the outgoing CPU try to wake 7639 * up the hotplug control thread which might wait for the 7640 * last task to vanish. The rcuwait_active() check is 7641 * accurate here because the waiter is pinned on this CPU 7642 * and can't obviously be running in parallel. 7643 * 7644 * On RT kernels this also has to check whether there are 7645 * pinned and scheduled out tasks on the runqueue. They 7646 * need to leave the migrate disabled section first. 7647 */ 7648 if (!rq->nr_running && !rq_has_pinned_tasks(rq) && 7649 rcuwait_active(&rq->hotplug_wait)) { 7650 raw_spin_unlock(&rq->lock); 7651 rcuwait_wake_up(&rq->hotplug_wait); 7652 raw_spin_lock(&rq->lock); 7653 } 7654 return; 7655 } 7656 7657 get_task_struct(push_task); 7658 /* 7659 * Temporarily drop rq->lock such that we can wake-up the stop task. 7660 * Both preemption and IRQs are still disabled. 7661 */ 7662 raw_spin_unlock(&rq->lock); 7663 stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task, 7664 this_cpu_ptr(&push_work)); 7665 /* 7666 * At this point need_resched() is true and we'll take the loop in 7667 * schedule(). The next pick is obviously going to be the stop task 7668 * which kthread_is_per_cpu() and will push this task away. 7669 */ 7670 raw_spin_lock(&rq->lock); 7671 } 7672 7673 static void balance_push_set(int cpu, bool on) 7674 { 7675 struct rq *rq = cpu_rq(cpu); 7676 struct rq_flags rf; 7677 7678 rq_lock_irqsave(rq, &rf); 7679 rq->balance_push = on; 7680 if (on) { 7681 WARN_ON_ONCE(rq->balance_callback); 7682 rq->balance_callback = &balance_push_callback; 7683 } else if (rq->balance_callback == &balance_push_callback) { 7684 rq->balance_callback = NULL; 7685 } 7686 rq_unlock_irqrestore(rq, &rf); 7687 } 7688 7689 /* 7690 * Invoked from a CPUs hotplug control thread after the CPU has been marked 7691 * inactive. All tasks which are not per CPU kernel threads are either 7692 * pushed off this CPU now via balance_push() or placed on a different CPU 7693 * during wakeup. Wait until the CPU is quiescent. 7694 */ 7695 static void balance_hotplug_wait(void) 7696 { 7697 struct rq *rq = this_rq(); 7698 7699 rcuwait_wait_event(&rq->hotplug_wait, 7700 rq->nr_running == 1 && !rq_has_pinned_tasks(rq), 7701 TASK_UNINTERRUPTIBLE); 7702 } 7703 7704 #else 7705 7706 static inline void balance_push(struct rq *rq) 7707 { 7708 } 7709 7710 static inline void balance_push_set(int cpu, bool on) 7711 { 7712 } 7713 7714 static inline void balance_hotplug_wait(void) 7715 { 7716 } 7717 7718 #endif /* CONFIG_HOTPLUG_CPU */ 7719 7720 void set_rq_online(struct rq *rq) 7721 { 7722 if (!rq->online) { 7723 const struct sched_class *class; 7724 7725 cpumask_set_cpu(rq->cpu, rq->rd->online); 7726 rq->online = 1; 7727 7728 for_each_class(class) { 7729 if (class->rq_online) 7730 class->rq_online(rq); 7731 } 7732 } 7733 } 7734 7735 void set_rq_offline(struct rq *rq) 7736 { 7737 if (rq->online) { 7738 const struct sched_class *class; 7739 7740 for_each_class(class) { 7741 if (class->rq_offline) 7742 class->rq_offline(rq); 7743 } 7744 7745 cpumask_clear_cpu(rq->cpu, rq->rd->online); 7746 rq->online = 0; 7747 } 7748 } 7749 7750 /* 7751 * used to mark begin/end of suspend/resume: 7752 */ 7753 static int num_cpus_frozen; 7754 7755 /* 7756 * Update cpusets according to cpu_active mask. If cpusets are 7757 * disabled, cpuset_update_active_cpus() becomes a simple wrapper 7758 * around partition_sched_domains(). 7759 * 7760 * If we come here as part of a suspend/resume, don't touch cpusets because we 7761 * want to restore it back to its original state upon resume anyway. 7762 */ 7763 static void cpuset_cpu_active(void) 7764 { 7765 if (cpuhp_tasks_frozen) { 7766 /* 7767 * num_cpus_frozen tracks how many CPUs are involved in suspend 7768 * resume sequence. As long as this is not the last online 7769 * operation in the resume sequence, just build a single sched 7770 * domain, ignoring cpusets. 7771 */ 7772 partition_sched_domains(1, NULL, NULL); 7773 if (--num_cpus_frozen) 7774 return; 7775 /* 7776 * This is the last CPU online operation. So fall through and 7777 * restore the original sched domains by considering the 7778 * cpuset configurations. 7779 */ 7780 cpuset_force_rebuild(); 7781 } 7782 cpuset_update_active_cpus(); 7783 } 7784 7785 static int cpuset_cpu_inactive(unsigned int cpu) 7786 { 7787 if (!cpuhp_tasks_frozen) { 7788 if (dl_cpu_busy(cpu)) 7789 return -EBUSY; 7790 cpuset_update_active_cpus(); 7791 } else { 7792 num_cpus_frozen++; 7793 partition_sched_domains(1, NULL, NULL); 7794 } 7795 return 0; 7796 } 7797 7798 int sched_cpu_activate(unsigned int cpu) 7799 { 7800 struct rq *rq = cpu_rq(cpu); 7801 struct rq_flags rf; 7802 7803 /* 7804 * Make sure that when the hotplug state machine does a roll-back 7805 * we clear balance_push. Ideally that would happen earlier... 7806 */ 7807 balance_push_set(cpu, false); 7808 7809 #ifdef CONFIG_SCHED_SMT 7810 /* 7811 * When going up, increment the number of cores with SMT present. 7812 */ 7813 if (cpumask_weight(cpu_smt_mask(cpu)) == 2) 7814 static_branch_inc_cpuslocked(&sched_smt_present); 7815 #endif 7816 set_cpu_active(cpu, true); 7817 7818 if (sched_smp_initialized) { 7819 sched_domains_numa_masks_set(cpu); 7820 cpuset_cpu_active(); 7821 } 7822 7823 /* 7824 * Put the rq online, if not already. This happens: 7825 * 7826 * 1) In the early boot process, because we build the real domains 7827 * after all CPUs have been brought up. 7828 * 7829 * 2) At runtime, if cpuset_cpu_active() fails to rebuild the 7830 * domains. 7831 */ 7832 rq_lock_irqsave(rq, &rf); 7833 if (rq->rd) { 7834 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 7835 set_rq_online(rq); 7836 } 7837 rq_unlock_irqrestore(rq, &rf); 7838 7839 return 0; 7840 } 7841 7842 int sched_cpu_deactivate(unsigned int cpu) 7843 { 7844 struct rq *rq = cpu_rq(cpu); 7845 struct rq_flags rf; 7846 int ret; 7847 7848 /* 7849 * Remove CPU from nohz.idle_cpus_mask to prevent participating in 7850 * load balancing when not active 7851 */ 7852 nohz_balance_exit_idle(rq); 7853 7854 set_cpu_active(cpu, false); 7855 7856 /* 7857 * From this point forward, this CPU will refuse to run any task that 7858 * is not: migrate_disable() or KTHREAD_IS_PER_CPU, and will actively 7859 * push those tasks away until this gets cleared, see 7860 * sched_cpu_dying(). 7861 */ 7862 balance_push_set(cpu, true); 7863 7864 /* 7865 * We've cleared cpu_active_mask / set balance_push, wait for all 7866 * preempt-disabled and RCU users of this state to go away such that 7867 * all new such users will observe it. 7868 * 7869 * Specifically, we rely on ttwu to no longer target this CPU, see 7870 * ttwu_queue_cond() and is_cpu_allowed(). 7871 * 7872 * Do sync before park smpboot threads to take care the rcu boost case. 7873 */ 7874 synchronize_rcu(); 7875 7876 rq_lock_irqsave(rq, &rf); 7877 if (rq->rd) { 7878 update_rq_clock(rq); 7879 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 7880 set_rq_offline(rq); 7881 } 7882 rq_unlock_irqrestore(rq, &rf); 7883 7884 #ifdef CONFIG_SCHED_SMT 7885 /* 7886 * When going down, decrement the number of cores with SMT present. 7887 */ 7888 if (cpumask_weight(cpu_smt_mask(cpu)) == 2) 7889 static_branch_dec_cpuslocked(&sched_smt_present); 7890 #endif 7891 7892 if (!sched_smp_initialized) 7893 return 0; 7894 7895 ret = cpuset_cpu_inactive(cpu); 7896 if (ret) { 7897 balance_push_set(cpu, false); 7898 set_cpu_active(cpu, true); 7899 return ret; 7900 } 7901 sched_domains_numa_masks_clear(cpu); 7902 return 0; 7903 } 7904 7905 static void sched_rq_cpu_starting(unsigned int cpu) 7906 { 7907 struct rq *rq = cpu_rq(cpu); 7908 7909 rq->calc_load_update = calc_load_update; 7910 update_max_interval(); 7911 } 7912 7913 int sched_cpu_starting(unsigned int cpu) 7914 { 7915 sched_rq_cpu_starting(cpu); 7916 sched_tick_start(cpu); 7917 return 0; 7918 } 7919 7920 #ifdef CONFIG_HOTPLUG_CPU 7921 7922 /* 7923 * Invoked immediately before the stopper thread is invoked to bring the 7924 * CPU down completely. At this point all per CPU kthreads except the 7925 * hotplug thread (current) and the stopper thread (inactive) have been 7926 * either parked or have been unbound from the outgoing CPU. Ensure that 7927 * any of those which might be on the way out are gone. 7928 * 7929 * If after this point a bound task is being woken on this CPU then the 7930 * responsible hotplug callback has failed to do it's job. 7931 * sched_cpu_dying() will catch it with the appropriate fireworks. 7932 */ 7933 int sched_cpu_wait_empty(unsigned int cpu) 7934 { 7935 balance_hotplug_wait(); 7936 return 0; 7937 } 7938 7939 /* 7940 * Since this CPU is going 'away' for a while, fold any nr_active delta we 7941 * might have. Called from the CPU stopper task after ensuring that the 7942 * stopper is the last running task on the CPU, so nr_active count is 7943 * stable. We need to take the teardown thread which is calling this into 7944 * account, so we hand in adjust = 1 to the load calculation. 7945 * 7946 * Also see the comment "Global load-average calculations". 7947 */ 7948 static void calc_load_migrate(struct rq *rq) 7949 { 7950 long delta = calc_load_fold_active(rq, 1); 7951 7952 if (delta) 7953 atomic_long_add(delta, &calc_load_tasks); 7954 } 7955 7956 static void dump_rq_tasks(struct rq *rq, const char *loglvl) 7957 { 7958 struct task_struct *g, *p; 7959 int cpu = cpu_of(rq); 7960 7961 lockdep_assert_held(&rq->lock); 7962 7963 printk("%sCPU%d enqueued tasks (%u total):\n", loglvl, cpu, rq->nr_running); 7964 for_each_process_thread(g, p) { 7965 if (task_cpu(p) != cpu) 7966 continue; 7967 7968 if (!task_on_rq_queued(p)) 7969 continue; 7970 7971 printk("%s\tpid: %d, name: %s\n", loglvl, p->pid, p->comm); 7972 } 7973 } 7974 7975 int sched_cpu_dying(unsigned int cpu) 7976 { 7977 struct rq *rq = cpu_rq(cpu); 7978 struct rq_flags rf; 7979 7980 /* Handle pending wakeups and then migrate everything off */ 7981 sched_tick_stop(cpu); 7982 7983 rq_lock_irqsave(rq, &rf); 7984 if (rq->nr_running != 1 || rq_has_pinned_tasks(rq)) { 7985 WARN(true, "Dying CPU not properly vacated!"); 7986 dump_rq_tasks(rq, KERN_WARNING); 7987 } 7988 rq_unlock_irqrestore(rq, &rf); 7989 7990 /* 7991 * Now that the CPU is offline, make sure we're welcome 7992 * to new tasks once we come back up. 7993 */ 7994 balance_push_set(cpu, false); 7995 7996 calc_load_migrate(rq); 7997 update_max_interval(); 7998 hrtick_clear(rq); 7999 return 0; 8000 } 8001 #endif 8002 8003 void __init sched_init_smp(void) 8004 { 8005 sched_init_numa(); 8006 8007 /* 8008 * There's no userspace yet to cause hotplug operations; hence all the 8009 * CPU masks are stable and all blatant races in the below code cannot 8010 * happen. 8011 */ 8012 mutex_lock(&sched_domains_mutex); 8013 sched_init_domains(cpu_active_mask); 8014 mutex_unlock(&sched_domains_mutex); 8015 8016 /* Move init over to a non-isolated CPU */ 8017 if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_FLAG_DOMAIN)) < 0) 8018 BUG(); 8019 sched_init_granularity(); 8020 8021 init_sched_rt_class(); 8022 init_sched_dl_class(); 8023 8024 sched_smp_initialized = true; 8025 } 8026 8027 static int __init migration_init(void) 8028 { 8029 sched_cpu_starting(smp_processor_id()); 8030 return 0; 8031 } 8032 early_initcall(migration_init); 8033 8034 #else 8035 void __init sched_init_smp(void) 8036 { 8037 sched_init_granularity(); 8038 } 8039 #endif /* CONFIG_SMP */ 8040 8041 int in_sched_functions(unsigned long addr) 8042 { 8043 return in_lock_functions(addr) || 8044 (addr >= (unsigned long)__sched_text_start 8045 && addr < (unsigned long)__sched_text_end); 8046 } 8047 8048 #ifdef CONFIG_CGROUP_SCHED 8049 /* 8050 * Default task group. 8051 * Every task in system belongs to this group at bootup. 8052 */ 8053 struct task_group root_task_group; 8054 LIST_HEAD(task_groups); 8055 8056 /* Cacheline aligned slab cache for task_group */ 8057 static struct kmem_cache *task_group_cache __read_mostly; 8058 #endif 8059 8060 DECLARE_PER_CPU(cpumask_var_t, load_balance_mask); 8061 DECLARE_PER_CPU(cpumask_var_t, select_idle_mask); 8062 8063 void __init sched_init(void) 8064 { 8065 unsigned long ptr = 0; 8066 int i; 8067 8068 /* Make sure the linker didn't screw up */ 8069 BUG_ON(&idle_sched_class + 1 != &fair_sched_class || 8070 &fair_sched_class + 1 != &rt_sched_class || 8071 &rt_sched_class + 1 != &dl_sched_class); 8072 #ifdef CONFIG_SMP 8073 BUG_ON(&dl_sched_class + 1 != &stop_sched_class); 8074 #endif 8075 8076 wait_bit_init(); 8077 8078 #ifdef CONFIG_FAIR_GROUP_SCHED 8079 ptr += 2 * nr_cpu_ids * sizeof(void **); 8080 #endif 8081 #ifdef CONFIG_RT_GROUP_SCHED 8082 ptr += 2 * nr_cpu_ids * sizeof(void **); 8083 #endif 8084 if (ptr) { 8085 ptr = (unsigned long)kzalloc(ptr, GFP_NOWAIT); 8086 8087 #ifdef CONFIG_FAIR_GROUP_SCHED 8088 root_task_group.se = (struct sched_entity **)ptr; 8089 ptr += nr_cpu_ids * sizeof(void **); 8090 8091 root_task_group.cfs_rq = (struct cfs_rq **)ptr; 8092 ptr += nr_cpu_ids * sizeof(void **); 8093 8094 root_task_group.shares = ROOT_TASK_GROUP_LOAD; 8095 init_cfs_bandwidth(&root_task_group.cfs_bandwidth); 8096 #endif /* CONFIG_FAIR_GROUP_SCHED */ 8097 #ifdef CONFIG_RT_GROUP_SCHED 8098 root_task_group.rt_se = (struct sched_rt_entity **)ptr; 8099 ptr += nr_cpu_ids * sizeof(void **); 8100 8101 root_task_group.rt_rq = (struct rt_rq **)ptr; 8102 ptr += nr_cpu_ids * sizeof(void **); 8103 8104 #endif /* CONFIG_RT_GROUP_SCHED */ 8105 } 8106 #ifdef CONFIG_CPUMASK_OFFSTACK 8107 for_each_possible_cpu(i) { 8108 per_cpu(load_balance_mask, i) = (cpumask_var_t)kzalloc_node( 8109 cpumask_size(), GFP_KERNEL, cpu_to_node(i)); 8110 per_cpu(select_idle_mask, i) = (cpumask_var_t)kzalloc_node( 8111 cpumask_size(), GFP_KERNEL, cpu_to_node(i)); 8112 } 8113 #endif /* CONFIG_CPUMASK_OFFSTACK */ 8114 8115 init_rt_bandwidth(&def_rt_bandwidth, global_rt_period(), global_rt_runtime()); 8116 init_dl_bandwidth(&def_dl_bandwidth, global_rt_period(), global_rt_runtime()); 8117 8118 #ifdef CONFIG_SMP 8119 init_defrootdomain(); 8120 #endif 8121 8122 #ifdef CONFIG_RT_GROUP_SCHED 8123 init_rt_bandwidth(&root_task_group.rt_bandwidth, 8124 global_rt_period(), global_rt_runtime()); 8125 #endif /* CONFIG_RT_GROUP_SCHED */ 8126 8127 #ifdef CONFIG_CGROUP_SCHED 8128 task_group_cache = KMEM_CACHE(task_group, 0); 8129 8130 list_add(&root_task_group.list, &task_groups); 8131 INIT_LIST_HEAD(&root_task_group.children); 8132 INIT_LIST_HEAD(&root_task_group.siblings); 8133 autogroup_init(&init_task); 8134 #endif /* CONFIG_CGROUP_SCHED */ 8135 8136 for_each_possible_cpu(i) { 8137 struct rq *rq; 8138 8139 rq = cpu_rq(i); 8140 raw_spin_lock_init(&rq->lock); 8141 rq->nr_running = 0; 8142 rq->calc_load_active = 0; 8143 rq->calc_load_update = jiffies + LOAD_FREQ; 8144 init_cfs_rq(&rq->cfs); 8145 init_rt_rq(&rq->rt); 8146 init_dl_rq(&rq->dl); 8147 #ifdef CONFIG_FAIR_GROUP_SCHED 8148 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list); 8149 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list; 8150 /* 8151 * How much CPU bandwidth does root_task_group get? 8152 * 8153 * In case of task-groups formed thr' the cgroup filesystem, it 8154 * gets 100% of the CPU resources in the system. This overall 8155 * system CPU resource is divided among the tasks of 8156 * root_task_group and its child task-groups in a fair manner, 8157 * based on each entity's (task or task-group's) weight 8158 * (se->load.weight). 8159 * 8160 * In other words, if root_task_group has 10 tasks of weight 8161 * 1024) and two child groups A0 and A1 (of weight 1024 each), 8162 * then A0's share of the CPU resource is: 8163 * 8164 * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33% 8165 * 8166 * We achieve this by letting root_task_group's tasks sit 8167 * directly in rq->cfs (i.e root_task_group->se[] = NULL). 8168 */ 8169 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL); 8170 #endif /* CONFIG_FAIR_GROUP_SCHED */ 8171 8172 rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime; 8173 #ifdef CONFIG_RT_GROUP_SCHED 8174 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL); 8175 #endif 8176 #ifdef CONFIG_SMP 8177 rq->sd = NULL; 8178 rq->rd = NULL; 8179 rq->cpu_capacity = rq->cpu_capacity_orig = SCHED_CAPACITY_SCALE; 8180 rq->balance_callback = NULL; 8181 rq->active_balance = 0; 8182 rq->next_balance = jiffies; 8183 rq->push_cpu = 0; 8184 rq->cpu = i; 8185 rq->online = 0; 8186 rq->idle_stamp = 0; 8187 rq->avg_idle = 2*sysctl_sched_migration_cost; 8188 rq->max_idle_balance_cost = sysctl_sched_migration_cost; 8189 8190 INIT_LIST_HEAD(&rq->cfs_tasks); 8191 8192 rq_attach_root(rq, &def_root_domain); 8193 #ifdef CONFIG_NO_HZ_COMMON 8194 rq->last_blocked_load_update_tick = jiffies; 8195 atomic_set(&rq->nohz_flags, 0); 8196 8197 INIT_CSD(&rq->nohz_csd, nohz_csd_func, rq); 8198 #endif 8199 #ifdef CONFIG_HOTPLUG_CPU 8200 rcuwait_init(&rq->hotplug_wait); 8201 #endif 8202 #endif /* CONFIG_SMP */ 8203 hrtick_rq_init(rq); 8204 atomic_set(&rq->nr_iowait, 0); 8205 } 8206 8207 set_load_weight(&init_task, false); 8208 8209 /* 8210 * The boot idle thread does lazy MMU switching as well: 8211 */ 8212 mmgrab(&init_mm); 8213 enter_lazy_tlb(&init_mm, current); 8214 8215 /* 8216 * Make us the idle thread. Technically, schedule() should not be 8217 * called from this thread, however somewhere below it might be, 8218 * but because we are the idle thread, we just pick up running again 8219 * when this runqueue becomes "idle". 8220 */ 8221 init_idle(current, smp_processor_id()); 8222 8223 calc_load_update = jiffies + LOAD_FREQ; 8224 8225 #ifdef CONFIG_SMP 8226 idle_thread_set_boot_cpu(); 8227 #endif 8228 init_sched_fair_class(); 8229 8230 init_schedstats(); 8231 8232 psi_init(); 8233 8234 init_uclamp(); 8235 8236 scheduler_running = 1; 8237 } 8238 8239 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP 8240 static inline int preempt_count_equals(int preempt_offset) 8241 { 8242 int nested = preempt_count() + rcu_preempt_depth(); 8243 8244 return (nested == preempt_offset); 8245 } 8246 8247 void __might_sleep(const char *file, int line, int preempt_offset) 8248 { 8249 /* 8250 * Blocking primitives will set (and therefore destroy) current->state, 8251 * since we will exit with TASK_RUNNING make sure we enter with it, 8252 * otherwise we will destroy state. 8253 */ 8254 WARN_ONCE(current->state != TASK_RUNNING && current->task_state_change, 8255 "do not call blocking ops when !TASK_RUNNING; " 8256 "state=%lx set at [<%p>] %pS\n", 8257 current->state, 8258 (void *)current->task_state_change, 8259 (void *)current->task_state_change); 8260 8261 ___might_sleep(file, line, preempt_offset); 8262 } 8263 EXPORT_SYMBOL(__might_sleep); 8264 8265 void ___might_sleep(const char *file, int line, int preempt_offset) 8266 { 8267 /* Ratelimiting timestamp: */ 8268 static unsigned long prev_jiffy; 8269 8270 unsigned long preempt_disable_ip; 8271 8272 /* WARN_ON_ONCE() by default, no rate limit required: */ 8273 rcu_sleep_check(); 8274 8275 if ((preempt_count_equals(preempt_offset) && !irqs_disabled() && 8276 !is_idle_task(current) && !current->non_block_count) || 8277 system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING || 8278 oops_in_progress) 8279 return; 8280 8281 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) 8282 return; 8283 prev_jiffy = jiffies; 8284 8285 /* Save this before calling printk(), since that will clobber it: */ 8286 preempt_disable_ip = get_preempt_disable_ip(current); 8287 8288 printk(KERN_ERR 8289 "BUG: sleeping function called from invalid context at %s:%d\n", 8290 file, line); 8291 printk(KERN_ERR 8292 "in_atomic(): %d, irqs_disabled(): %d, non_block: %d, pid: %d, name: %s\n", 8293 in_atomic(), irqs_disabled(), current->non_block_count, 8294 current->pid, current->comm); 8295 8296 if (task_stack_end_corrupted(current)) 8297 printk(KERN_EMERG "Thread overran stack, or stack corrupted\n"); 8298 8299 debug_show_held_locks(current); 8300 if (irqs_disabled()) 8301 print_irqtrace_events(current); 8302 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT) 8303 && !preempt_count_equals(preempt_offset)) { 8304 pr_err("Preemption disabled at:"); 8305 print_ip_sym(KERN_ERR, preempt_disable_ip); 8306 } 8307 dump_stack(); 8308 add_taint(TAINT_WARN, LOCKDEP_STILL_OK); 8309 } 8310 EXPORT_SYMBOL(___might_sleep); 8311 8312 void __cant_sleep(const char *file, int line, int preempt_offset) 8313 { 8314 static unsigned long prev_jiffy; 8315 8316 if (irqs_disabled()) 8317 return; 8318 8319 if (!IS_ENABLED(CONFIG_PREEMPT_COUNT)) 8320 return; 8321 8322 if (preempt_count() > preempt_offset) 8323 return; 8324 8325 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) 8326 return; 8327 prev_jiffy = jiffies; 8328 8329 printk(KERN_ERR "BUG: assuming atomic context at %s:%d\n", file, line); 8330 printk(KERN_ERR "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n", 8331 in_atomic(), irqs_disabled(), 8332 current->pid, current->comm); 8333 8334 debug_show_held_locks(current); 8335 dump_stack(); 8336 add_taint(TAINT_WARN, LOCKDEP_STILL_OK); 8337 } 8338 EXPORT_SYMBOL_GPL(__cant_sleep); 8339 8340 #ifdef CONFIG_SMP 8341 void __cant_migrate(const char *file, int line) 8342 { 8343 static unsigned long prev_jiffy; 8344 8345 if (irqs_disabled()) 8346 return; 8347 8348 if (is_migration_disabled(current)) 8349 return; 8350 8351 if (!IS_ENABLED(CONFIG_PREEMPT_COUNT)) 8352 return; 8353 8354 if (preempt_count() > 0) 8355 return; 8356 8357 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) 8358 return; 8359 prev_jiffy = jiffies; 8360 8361 pr_err("BUG: assuming non migratable context at %s:%d\n", file, line); 8362 pr_err("in_atomic(): %d, irqs_disabled(): %d, migration_disabled() %u pid: %d, name: %s\n", 8363 in_atomic(), irqs_disabled(), is_migration_disabled(current), 8364 current->pid, current->comm); 8365 8366 debug_show_held_locks(current); 8367 dump_stack(); 8368 add_taint(TAINT_WARN, LOCKDEP_STILL_OK); 8369 } 8370 EXPORT_SYMBOL_GPL(__cant_migrate); 8371 #endif 8372 #endif 8373 8374 #ifdef CONFIG_MAGIC_SYSRQ 8375 void normalize_rt_tasks(void) 8376 { 8377 struct task_struct *g, *p; 8378 struct sched_attr attr = { 8379 .sched_policy = SCHED_NORMAL, 8380 }; 8381 8382 read_lock(&tasklist_lock); 8383 for_each_process_thread(g, p) { 8384 /* 8385 * Only normalize user tasks: 8386 */ 8387 if (p->flags & PF_KTHREAD) 8388 continue; 8389 8390 p->se.exec_start = 0; 8391 schedstat_set(p->se.statistics.wait_start, 0); 8392 schedstat_set(p->se.statistics.sleep_start, 0); 8393 schedstat_set(p->se.statistics.block_start, 0); 8394 8395 if (!dl_task(p) && !rt_task(p)) { 8396 /* 8397 * Renice negative nice level userspace 8398 * tasks back to 0: 8399 */ 8400 if (task_nice(p) < 0) 8401 set_user_nice(p, 0); 8402 continue; 8403 } 8404 8405 __sched_setscheduler(p, &attr, false, false); 8406 } 8407 read_unlock(&tasklist_lock); 8408 } 8409 8410 #endif /* CONFIG_MAGIC_SYSRQ */ 8411 8412 #if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) 8413 /* 8414 * These functions are only useful for the IA64 MCA handling, or kdb. 8415 * 8416 * They can only be called when the whole system has been 8417 * stopped - every CPU needs to be quiescent, and no scheduling 8418 * activity can take place. Using them for anything else would 8419 * be a serious bug, and as a result, they aren't even visible 8420 * under any other configuration. 8421 */ 8422 8423 /** 8424 * curr_task - return the current task for a given CPU. 8425 * @cpu: the processor in question. 8426 * 8427 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! 8428 * 8429 * Return: The current task for @cpu. 8430 */ 8431 struct task_struct *curr_task(int cpu) 8432 { 8433 return cpu_curr(cpu); 8434 } 8435 8436 #endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */ 8437 8438 #ifdef CONFIG_IA64 8439 /** 8440 * ia64_set_curr_task - set the current task for a given CPU. 8441 * @cpu: the processor in question. 8442 * @p: the task pointer to set. 8443 * 8444 * Description: This function must only be used when non-maskable interrupts 8445 * are serviced on a separate stack. It allows the architecture to switch the 8446 * notion of the current task on a CPU in a non-blocking manner. This function 8447 * must be called with all CPU's synchronized, and interrupts disabled, the 8448 * and caller must save the original value of the current task (see 8449 * curr_task() above) and restore that value before reenabling interrupts and 8450 * re-starting the system. 8451 * 8452 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! 8453 */ 8454 void ia64_set_curr_task(int cpu, struct task_struct *p) 8455 { 8456 cpu_curr(cpu) = p; 8457 } 8458 8459 #endif 8460 8461 #ifdef CONFIG_CGROUP_SCHED 8462 /* task_group_lock serializes the addition/removal of task groups */ 8463 static DEFINE_SPINLOCK(task_group_lock); 8464 8465 static inline void alloc_uclamp_sched_group(struct task_group *tg, 8466 struct task_group *parent) 8467 { 8468 #ifdef CONFIG_UCLAMP_TASK_GROUP 8469 enum uclamp_id clamp_id; 8470 8471 for_each_clamp_id(clamp_id) { 8472 uclamp_se_set(&tg->uclamp_req[clamp_id], 8473 uclamp_none(clamp_id), false); 8474 tg->uclamp[clamp_id] = parent->uclamp[clamp_id]; 8475 } 8476 #endif 8477 } 8478 8479 static void sched_free_group(struct task_group *tg) 8480 { 8481 free_fair_sched_group(tg); 8482 free_rt_sched_group(tg); 8483 autogroup_free(tg); 8484 kmem_cache_free(task_group_cache, tg); 8485 } 8486 8487 /* allocate runqueue etc for a new task group */ 8488 struct task_group *sched_create_group(struct task_group *parent) 8489 { 8490 struct task_group *tg; 8491 8492 tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO); 8493 if (!tg) 8494 return ERR_PTR(-ENOMEM); 8495 8496 if (!alloc_fair_sched_group(tg, parent)) 8497 goto err; 8498 8499 if (!alloc_rt_sched_group(tg, parent)) 8500 goto err; 8501 8502 alloc_uclamp_sched_group(tg, parent); 8503 8504 return tg; 8505 8506 err: 8507 sched_free_group(tg); 8508 return ERR_PTR(-ENOMEM); 8509 } 8510 8511 void sched_online_group(struct task_group *tg, struct task_group *parent) 8512 { 8513 unsigned long flags; 8514 8515 spin_lock_irqsave(&task_group_lock, flags); 8516 list_add_rcu(&tg->list, &task_groups); 8517 8518 /* Root should already exist: */ 8519 WARN_ON(!parent); 8520 8521 tg->parent = parent; 8522 INIT_LIST_HEAD(&tg->children); 8523 list_add_rcu(&tg->siblings, &parent->children); 8524 spin_unlock_irqrestore(&task_group_lock, flags); 8525 8526 online_fair_sched_group(tg); 8527 } 8528 8529 /* rcu callback to free various structures associated with a task group */ 8530 static void sched_free_group_rcu(struct rcu_head *rhp) 8531 { 8532 /* Now it should be safe to free those cfs_rqs: */ 8533 sched_free_group(container_of(rhp, struct task_group, rcu)); 8534 } 8535 8536 void sched_destroy_group(struct task_group *tg) 8537 { 8538 /* Wait for possible concurrent references to cfs_rqs complete: */ 8539 call_rcu(&tg->rcu, sched_free_group_rcu); 8540 } 8541 8542 void sched_offline_group(struct task_group *tg) 8543 { 8544 unsigned long flags; 8545 8546 /* End participation in shares distribution: */ 8547 unregister_fair_sched_group(tg); 8548 8549 spin_lock_irqsave(&task_group_lock, flags); 8550 list_del_rcu(&tg->list); 8551 list_del_rcu(&tg->siblings); 8552 spin_unlock_irqrestore(&task_group_lock, flags); 8553 } 8554 8555 static void sched_change_group(struct task_struct *tsk, int type) 8556 { 8557 struct task_group *tg; 8558 8559 /* 8560 * All callers are synchronized by task_rq_lock(); we do not use RCU 8561 * which is pointless here. Thus, we pass "true" to task_css_check() 8562 * to prevent lockdep warnings. 8563 */ 8564 tg = container_of(task_css_check(tsk, cpu_cgrp_id, true), 8565 struct task_group, css); 8566 tg = autogroup_task_group(tsk, tg); 8567 tsk->sched_task_group = tg; 8568 8569 #ifdef CONFIG_FAIR_GROUP_SCHED 8570 if (tsk->sched_class->task_change_group) 8571 tsk->sched_class->task_change_group(tsk, type); 8572 else 8573 #endif 8574 set_task_rq(tsk, task_cpu(tsk)); 8575 } 8576 8577 /* 8578 * Change task's runqueue when it moves between groups. 8579 * 8580 * The caller of this function should have put the task in its new group by 8581 * now. This function just updates tsk->se.cfs_rq and tsk->se.parent to reflect 8582 * its new group. 8583 */ 8584 void sched_move_task(struct task_struct *tsk) 8585 { 8586 int queued, running, queue_flags = 8587 DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; 8588 struct rq_flags rf; 8589 struct rq *rq; 8590 8591 rq = task_rq_lock(tsk, &rf); 8592 update_rq_clock(rq); 8593 8594 running = task_current(rq, tsk); 8595 queued = task_on_rq_queued(tsk); 8596 8597 if (queued) 8598 dequeue_task(rq, tsk, queue_flags); 8599 if (running) 8600 put_prev_task(rq, tsk); 8601 8602 sched_change_group(tsk, TASK_MOVE_GROUP); 8603 8604 if (queued) 8605 enqueue_task(rq, tsk, queue_flags); 8606 if (running) { 8607 set_next_task(rq, tsk); 8608 /* 8609 * After changing group, the running task may have joined a 8610 * throttled one but it's still the running task. Trigger a 8611 * resched to make sure that task can still run. 8612 */ 8613 resched_curr(rq); 8614 } 8615 8616 task_rq_unlock(rq, tsk, &rf); 8617 } 8618 8619 static inline struct task_group *css_tg(struct cgroup_subsys_state *css) 8620 { 8621 return css ? container_of(css, struct task_group, css) : NULL; 8622 } 8623 8624 static struct cgroup_subsys_state * 8625 cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) 8626 { 8627 struct task_group *parent = css_tg(parent_css); 8628 struct task_group *tg; 8629 8630 if (!parent) { 8631 /* This is early initialization for the top cgroup */ 8632 return &root_task_group.css; 8633 } 8634 8635 tg = sched_create_group(parent); 8636 if (IS_ERR(tg)) 8637 return ERR_PTR(-ENOMEM); 8638 8639 return &tg->css; 8640 } 8641 8642 /* Expose task group only after completing cgroup initialization */ 8643 static int cpu_cgroup_css_online(struct cgroup_subsys_state *css) 8644 { 8645 struct task_group *tg = css_tg(css); 8646 struct task_group *parent = css_tg(css->parent); 8647 8648 if (parent) 8649 sched_online_group(tg, parent); 8650 8651 #ifdef CONFIG_UCLAMP_TASK_GROUP 8652 /* Propagate the effective uclamp value for the new group */ 8653 cpu_util_update_eff(css); 8654 #endif 8655 8656 return 0; 8657 } 8658 8659 static void cpu_cgroup_css_released(struct cgroup_subsys_state *css) 8660 { 8661 struct task_group *tg = css_tg(css); 8662 8663 sched_offline_group(tg); 8664 } 8665 8666 static void cpu_cgroup_css_free(struct cgroup_subsys_state *css) 8667 { 8668 struct task_group *tg = css_tg(css); 8669 8670 /* 8671 * Relies on the RCU grace period between css_released() and this. 8672 */ 8673 sched_free_group(tg); 8674 } 8675 8676 /* 8677 * This is called before wake_up_new_task(), therefore we really only 8678 * have to set its group bits, all the other stuff does not apply. 8679 */ 8680 static void cpu_cgroup_fork(struct task_struct *task) 8681 { 8682 struct rq_flags rf; 8683 struct rq *rq; 8684 8685 rq = task_rq_lock(task, &rf); 8686 8687 update_rq_clock(rq); 8688 sched_change_group(task, TASK_SET_GROUP); 8689 8690 task_rq_unlock(rq, task, &rf); 8691 } 8692 8693 static int cpu_cgroup_can_attach(struct cgroup_taskset *tset) 8694 { 8695 struct task_struct *task; 8696 struct cgroup_subsys_state *css; 8697 int ret = 0; 8698 8699 cgroup_taskset_for_each(task, css, tset) { 8700 #ifdef CONFIG_RT_GROUP_SCHED 8701 if (!sched_rt_can_attach(css_tg(css), task)) 8702 return -EINVAL; 8703 #endif 8704 /* 8705 * Serialize against wake_up_new_task() such that if it's 8706 * running, we're sure to observe its full state. 8707 */ 8708 raw_spin_lock_irq(&task->pi_lock); 8709 /* 8710 * Avoid calling sched_move_task() before wake_up_new_task() 8711 * has happened. This would lead to problems with PELT, due to 8712 * move wanting to detach+attach while we're not attached yet. 8713 */ 8714 if (task->state == TASK_NEW) 8715 ret = -EINVAL; 8716 raw_spin_unlock_irq(&task->pi_lock); 8717 8718 if (ret) 8719 break; 8720 } 8721 return ret; 8722 } 8723 8724 static void cpu_cgroup_attach(struct cgroup_taskset *tset) 8725 { 8726 struct task_struct *task; 8727 struct cgroup_subsys_state *css; 8728 8729 cgroup_taskset_for_each(task, css, tset) 8730 sched_move_task(task); 8731 } 8732 8733 #ifdef CONFIG_UCLAMP_TASK_GROUP 8734 static void cpu_util_update_eff(struct cgroup_subsys_state *css) 8735 { 8736 struct cgroup_subsys_state *top_css = css; 8737 struct uclamp_se *uc_parent = NULL; 8738 struct uclamp_se *uc_se = NULL; 8739 unsigned int eff[UCLAMP_CNT]; 8740 enum uclamp_id clamp_id; 8741 unsigned int clamps; 8742 8743 css_for_each_descendant_pre(css, top_css) { 8744 uc_parent = css_tg(css)->parent 8745 ? css_tg(css)->parent->uclamp : NULL; 8746 8747 for_each_clamp_id(clamp_id) { 8748 /* Assume effective clamps matches requested clamps */ 8749 eff[clamp_id] = css_tg(css)->uclamp_req[clamp_id].value; 8750 /* Cap effective clamps with parent's effective clamps */ 8751 if (uc_parent && 8752 eff[clamp_id] > uc_parent[clamp_id].value) { 8753 eff[clamp_id] = uc_parent[clamp_id].value; 8754 } 8755 } 8756 /* Ensure protection is always capped by limit */ 8757 eff[UCLAMP_MIN] = min(eff[UCLAMP_MIN], eff[UCLAMP_MAX]); 8758 8759 /* Propagate most restrictive effective clamps */ 8760 clamps = 0x0; 8761 uc_se = css_tg(css)->uclamp; 8762 for_each_clamp_id(clamp_id) { 8763 if (eff[clamp_id] == uc_se[clamp_id].value) 8764 continue; 8765 uc_se[clamp_id].value = eff[clamp_id]; 8766 uc_se[clamp_id].bucket_id = uclamp_bucket_id(eff[clamp_id]); 8767 clamps |= (0x1 << clamp_id); 8768 } 8769 if (!clamps) { 8770 css = css_rightmost_descendant(css); 8771 continue; 8772 } 8773 8774 /* Immediately update descendants RUNNABLE tasks */ 8775 uclamp_update_active_tasks(css, clamps); 8776 } 8777 } 8778 8779 /* 8780 * Integer 10^N with a given N exponent by casting to integer the literal "1eN" 8781 * C expression. Since there is no way to convert a macro argument (N) into a 8782 * character constant, use two levels of macros. 8783 */ 8784 #define _POW10(exp) ((unsigned int)1e##exp) 8785 #define POW10(exp) _POW10(exp) 8786 8787 struct uclamp_request { 8788 #define UCLAMP_PERCENT_SHIFT 2 8789 #define UCLAMP_PERCENT_SCALE (100 * POW10(UCLAMP_PERCENT_SHIFT)) 8790 s64 percent; 8791 u64 util; 8792 int ret; 8793 }; 8794 8795 static inline struct uclamp_request 8796 capacity_from_percent(char *buf) 8797 { 8798 struct uclamp_request req = { 8799 .percent = UCLAMP_PERCENT_SCALE, 8800 .util = SCHED_CAPACITY_SCALE, 8801 .ret = 0, 8802 }; 8803 8804 buf = strim(buf); 8805 if (strcmp(buf, "max")) { 8806 req.ret = cgroup_parse_float(buf, UCLAMP_PERCENT_SHIFT, 8807 &req.percent); 8808 if (req.ret) 8809 return req; 8810 if ((u64)req.percent > UCLAMP_PERCENT_SCALE) { 8811 req.ret = -ERANGE; 8812 return req; 8813 } 8814 8815 req.util = req.percent << SCHED_CAPACITY_SHIFT; 8816 req.util = DIV_ROUND_CLOSEST_ULL(req.util, UCLAMP_PERCENT_SCALE); 8817 } 8818 8819 return req; 8820 } 8821 8822 static ssize_t cpu_uclamp_write(struct kernfs_open_file *of, char *buf, 8823 size_t nbytes, loff_t off, 8824 enum uclamp_id clamp_id) 8825 { 8826 struct uclamp_request req; 8827 struct task_group *tg; 8828 8829 req = capacity_from_percent(buf); 8830 if (req.ret) 8831 return req.ret; 8832 8833 static_branch_enable(&sched_uclamp_used); 8834 8835 mutex_lock(&uclamp_mutex); 8836 rcu_read_lock(); 8837 8838 tg = css_tg(of_css(of)); 8839 if (tg->uclamp_req[clamp_id].value != req.util) 8840 uclamp_se_set(&tg->uclamp_req[clamp_id], req.util, false); 8841 8842 /* 8843 * Because of not recoverable conversion rounding we keep track of the 8844 * exact requested value 8845 */ 8846 tg->uclamp_pct[clamp_id] = req.percent; 8847 8848 /* Update effective clamps to track the most restrictive value */ 8849 cpu_util_update_eff(of_css(of)); 8850 8851 rcu_read_unlock(); 8852 mutex_unlock(&uclamp_mutex); 8853 8854 return nbytes; 8855 } 8856 8857 static ssize_t cpu_uclamp_min_write(struct kernfs_open_file *of, 8858 char *buf, size_t nbytes, 8859 loff_t off) 8860 { 8861 return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MIN); 8862 } 8863 8864 static ssize_t cpu_uclamp_max_write(struct kernfs_open_file *of, 8865 char *buf, size_t nbytes, 8866 loff_t off) 8867 { 8868 return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MAX); 8869 } 8870 8871 static inline void cpu_uclamp_print(struct seq_file *sf, 8872 enum uclamp_id clamp_id) 8873 { 8874 struct task_group *tg; 8875 u64 util_clamp; 8876 u64 percent; 8877 u32 rem; 8878 8879 rcu_read_lock(); 8880 tg = css_tg(seq_css(sf)); 8881 util_clamp = tg->uclamp_req[clamp_id].value; 8882 rcu_read_unlock(); 8883 8884 if (util_clamp == SCHED_CAPACITY_SCALE) { 8885 seq_puts(sf, "max\n"); 8886 return; 8887 } 8888 8889 percent = tg->uclamp_pct[clamp_id]; 8890 percent = div_u64_rem(percent, POW10(UCLAMP_PERCENT_SHIFT), &rem); 8891 seq_printf(sf, "%llu.%0*u\n", percent, UCLAMP_PERCENT_SHIFT, rem); 8892 } 8893 8894 static int cpu_uclamp_min_show(struct seq_file *sf, void *v) 8895 { 8896 cpu_uclamp_print(sf, UCLAMP_MIN); 8897 return 0; 8898 } 8899 8900 static int cpu_uclamp_max_show(struct seq_file *sf, void *v) 8901 { 8902 cpu_uclamp_print(sf, UCLAMP_MAX); 8903 return 0; 8904 } 8905 #endif /* CONFIG_UCLAMP_TASK_GROUP */ 8906 8907 #ifdef CONFIG_FAIR_GROUP_SCHED 8908 static int cpu_shares_write_u64(struct cgroup_subsys_state *css, 8909 struct cftype *cftype, u64 shareval) 8910 { 8911 if (shareval > scale_load_down(ULONG_MAX)) 8912 shareval = MAX_SHARES; 8913 return sched_group_set_shares(css_tg(css), scale_load(shareval)); 8914 } 8915 8916 static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css, 8917 struct cftype *cft) 8918 { 8919 struct task_group *tg = css_tg(css); 8920 8921 return (u64) scale_load_down(tg->shares); 8922 } 8923 8924 #ifdef CONFIG_CFS_BANDWIDTH 8925 static DEFINE_MUTEX(cfs_constraints_mutex); 8926 8927 const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */ 8928 static const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */ 8929 /* More than 203 days if BW_SHIFT equals 20. */ 8930 static const u64 max_cfs_runtime = MAX_BW * NSEC_PER_USEC; 8931 8932 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime); 8933 8934 static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota) 8935 { 8936 int i, ret = 0, runtime_enabled, runtime_was_enabled; 8937 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 8938 8939 if (tg == &root_task_group) 8940 return -EINVAL; 8941 8942 /* 8943 * Ensure we have at some amount of bandwidth every period. This is 8944 * to prevent reaching a state of large arrears when throttled via 8945 * entity_tick() resulting in prolonged exit starvation. 8946 */ 8947 if (quota < min_cfs_quota_period || period < min_cfs_quota_period) 8948 return -EINVAL; 8949 8950 /* 8951 * Likewise, bound things on the otherside by preventing insane quota 8952 * periods. This also allows us to normalize in computing quota 8953 * feasibility. 8954 */ 8955 if (period > max_cfs_quota_period) 8956 return -EINVAL; 8957 8958 /* 8959 * Bound quota to defend quota against overflow during bandwidth shift. 8960 */ 8961 if (quota != RUNTIME_INF && quota > max_cfs_runtime) 8962 return -EINVAL; 8963 8964 /* 8965 * Prevent race between setting of cfs_rq->runtime_enabled and 8966 * unthrottle_offline_cfs_rqs(). 8967 */ 8968 get_online_cpus(); 8969 mutex_lock(&cfs_constraints_mutex); 8970 ret = __cfs_schedulable(tg, period, quota); 8971 if (ret) 8972 goto out_unlock; 8973 8974 runtime_enabled = quota != RUNTIME_INF; 8975 runtime_was_enabled = cfs_b->quota != RUNTIME_INF; 8976 /* 8977 * If we need to toggle cfs_bandwidth_used, off->on must occur 8978 * before making related changes, and on->off must occur afterwards 8979 */ 8980 if (runtime_enabled && !runtime_was_enabled) 8981 cfs_bandwidth_usage_inc(); 8982 raw_spin_lock_irq(&cfs_b->lock); 8983 cfs_b->period = ns_to_ktime(period); 8984 cfs_b->quota = quota; 8985 8986 __refill_cfs_bandwidth_runtime(cfs_b); 8987 8988 /* Restart the period timer (if active) to handle new period expiry: */ 8989 if (runtime_enabled) 8990 start_cfs_bandwidth(cfs_b); 8991 8992 raw_spin_unlock_irq(&cfs_b->lock); 8993 8994 for_each_online_cpu(i) { 8995 struct cfs_rq *cfs_rq = tg->cfs_rq[i]; 8996 struct rq *rq = cfs_rq->rq; 8997 struct rq_flags rf; 8998 8999 rq_lock_irq(rq, &rf); 9000 cfs_rq->runtime_enabled = runtime_enabled; 9001 cfs_rq->runtime_remaining = 0; 9002 9003 if (cfs_rq->throttled) 9004 unthrottle_cfs_rq(cfs_rq); 9005 rq_unlock_irq(rq, &rf); 9006 } 9007 if (runtime_was_enabled && !runtime_enabled) 9008 cfs_bandwidth_usage_dec(); 9009 out_unlock: 9010 mutex_unlock(&cfs_constraints_mutex); 9011 put_online_cpus(); 9012 9013 return ret; 9014 } 9015 9016 static int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us) 9017 { 9018 u64 quota, period; 9019 9020 period = ktime_to_ns(tg->cfs_bandwidth.period); 9021 if (cfs_quota_us < 0) 9022 quota = RUNTIME_INF; 9023 else if ((u64)cfs_quota_us <= U64_MAX / NSEC_PER_USEC) 9024 quota = (u64)cfs_quota_us * NSEC_PER_USEC; 9025 else 9026 return -EINVAL; 9027 9028 return tg_set_cfs_bandwidth(tg, period, quota); 9029 } 9030 9031 static long tg_get_cfs_quota(struct task_group *tg) 9032 { 9033 u64 quota_us; 9034 9035 if (tg->cfs_bandwidth.quota == RUNTIME_INF) 9036 return -1; 9037 9038 quota_us = tg->cfs_bandwidth.quota; 9039 do_div(quota_us, NSEC_PER_USEC); 9040 9041 return quota_us; 9042 } 9043 9044 static int tg_set_cfs_period(struct task_group *tg, long cfs_period_us) 9045 { 9046 u64 quota, period; 9047 9048 if ((u64)cfs_period_us > U64_MAX / NSEC_PER_USEC) 9049 return -EINVAL; 9050 9051 period = (u64)cfs_period_us * NSEC_PER_USEC; 9052 quota = tg->cfs_bandwidth.quota; 9053 9054 return tg_set_cfs_bandwidth(tg, period, quota); 9055 } 9056 9057 static long tg_get_cfs_period(struct task_group *tg) 9058 { 9059 u64 cfs_period_us; 9060 9061 cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period); 9062 do_div(cfs_period_us, NSEC_PER_USEC); 9063 9064 return cfs_period_us; 9065 } 9066 9067 static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css, 9068 struct cftype *cft) 9069 { 9070 return tg_get_cfs_quota(css_tg(css)); 9071 } 9072 9073 static int cpu_cfs_quota_write_s64(struct cgroup_subsys_state *css, 9074 struct cftype *cftype, s64 cfs_quota_us) 9075 { 9076 return tg_set_cfs_quota(css_tg(css), cfs_quota_us); 9077 } 9078 9079 static u64 cpu_cfs_period_read_u64(struct cgroup_subsys_state *css, 9080 struct cftype *cft) 9081 { 9082 return tg_get_cfs_period(css_tg(css)); 9083 } 9084 9085 static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css, 9086 struct cftype *cftype, u64 cfs_period_us) 9087 { 9088 return tg_set_cfs_period(css_tg(css), cfs_period_us); 9089 } 9090 9091 struct cfs_schedulable_data { 9092 struct task_group *tg; 9093 u64 period, quota; 9094 }; 9095 9096 /* 9097 * normalize group quota/period to be quota/max_period 9098 * note: units are usecs 9099 */ 9100 static u64 normalize_cfs_quota(struct task_group *tg, 9101 struct cfs_schedulable_data *d) 9102 { 9103 u64 quota, period; 9104 9105 if (tg == d->tg) { 9106 period = d->period; 9107 quota = d->quota; 9108 } else { 9109 period = tg_get_cfs_period(tg); 9110 quota = tg_get_cfs_quota(tg); 9111 } 9112 9113 /* note: these should typically be equivalent */ 9114 if (quota == RUNTIME_INF || quota == -1) 9115 return RUNTIME_INF; 9116 9117 return to_ratio(period, quota); 9118 } 9119 9120 static int tg_cfs_schedulable_down(struct task_group *tg, void *data) 9121 { 9122 struct cfs_schedulable_data *d = data; 9123 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 9124 s64 quota = 0, parent_quota = -1; 9125 9126 if (!tg->parent) { 9127 quota = RUNTIME_INF; 9128 } else { 9129 struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth; 9130 9131 quota = normalize_cfs_quota(tg, d); 9132 parent_quota = parent_b->hierarchical_quota; 9133 9134 /* 9135 * Ensure max(child_quota) <= parent_quota. On cgroup2, 9136 * always take the min. On cgroup1, only inherit when no 9137 * limit is set: 9138 */ 9139 if (cgroup_subsys_on_dfl(cpu_cgrp_subsys)) { 9140 quota = min(quota, parent_quota); 9141 } else { 9142 if (quota == RUNTIME_INF) 9143 quota = parent_quota; 9144 else if (parent_quota != RUNTIME_INF && quota > parent_quota) 9145 return -EINVAL; 9146 } 9147 } 9148 cfs_b->hierarchical_quota = quota; 9149 9150 return 0; 9151 } 9152 9153 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota) 9154 { 9155 int ret; 9156 struct cfs_schedulable_data data = { 9157 .tg = tg, 9158 .period = period, 9159 .quota = quota, 9160 }; 9161 9162 if (quota != RUNTIME_INF) { 9163 do_div(data.period, NSEC_PER_USEC); 9164 do_div(data.quota, NSEC_PER_USEC); 9165 } 9166 9167 rcu_read_lock(); 9168 ret = walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data); 9169 rcu_read_unlock(); 9170 9171 return ret; 9172 } 9173 9174 static int cpu_cfs_stat_show(struct seq_file *sf, void *v) 9175 { 9176 struct task_group *tg = css_tg(seq_css(sf)); 9177 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 9178 9179 seq_printf(sf, "nr_periods %d\n", cfs_b->nr_periods); 9180 seq_printf(sf, "nr_throttled %d\n", cfs_b->nr_throttled); 9181 seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time); 9182 9183 if (schedstat_enabled() && tg != &root_task_group) { 9184 u64 ws = 0; 9185 int i; 9186 9187 for_each_possible_cpu(i) 9188 ws += schedstat_val(tg->se[i]->statistics.wait_sum); 9189 9190 seq_printf(sf, "wait_sum %llu\n", ws); 9191 } 9192 9193 return 0; 9194 } 9195 #endif /* CONFIG_CFS_BANDWIDTH */ 9196 #endif /* CONFIG_FAIR_GROUP_SCHED */ 9197 9198 #ifdef CONFIG_RT_GROUP_SCHED 9199 static int cpu_rt_runtime_write(struct cgroup_subsys_state *css, 9200 struct cftype *cft, s64 val) 9201 { 9202 return sched_group_set_rt_runtime(css_tg(css), val); 9203 } 9204 9205 static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css, 9206 struct cftype *cft) 9207 { 9208 return sched_group_rt_runtime(css_tg(css)); 9209 } 9210 9211 static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css, 9212 struct cftype *cftype, u64 rt_period_us) 9213 { 9214 return sched_group_set_rt_period(css_tg(css), rt_period_us); 9215 } 9216 9217 static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css, 9218 struct cftype *cft) 9219 { 9220 return sched_group_rt_period(css_tg(css)); 9221 } 9222 #endif /* CONFIG_RT_GROUP_SCHED */ 9223 9224 static struct cftype cpu_legacy_files[] = { 9225 #ifdef CONFIG_FAIR_GROUP_SCHED 9226 { 9227 .name = "shares", 9228 .read_u64 = cpu_shares_read_u64, 9229 .write_u64 = cpu_shares_write_u64, 9230 }, 9231 #endif 9232 #ifdef CONFIG_CFS_BANDWIDTH 9233 { 9234 .name = "cfs_quota_us", 9235 .read_s64 = cpu_cfs_quota_read_s64, 9236 .write_s64 = cpu_cfs_quota_write_s64, 9237 }, 9238 { 9239 .name = "cfs_period_us", 9240 .read_u64 = cpu_cfs_period_read_u64, 9241 .write_u64 = cpu_cfs_period_write_u64, 9242 }, 9243 { 9244 .name = "stat", 9245 .seq_show = cpu_cfs_stat_show, 9246 }, 9247 #endif 9248 #ifdef CONFIG_RT_GROUP_SCHED 9249 { 9250 .name = "rt_runtime_us", 9251 .read_s64 = cpu_rt_runtime_read, 9252 .write_s64 = cpu_rt_runtime_write, 9253 }, 9254 { 9255 .name = "rt_period_us", 9256 .read_u64 = cpu_rt_period_read_uint, 9257 .write_u64 = cpu_rt_period_write_uint, 9258 }, 9259 #endif 9260 #ifdef CONFIG_UCLAMP_TASK_GROUP 9261 { 9262 .name = "uclamp.min", 9263 .flags = CFTYPE_NOT_ON_ROOT, 9264 .seq_show = cpu_uclamp_min_show, 9265 .write = cpu_uclamp_min_write, 9266 }, 9267 { 9268 .name = "uclamp.max", 9269 .flags = CFTYPE_NOT_ON_ROOT, 9270 .seq_show = cpu_uclamp_max_show, 9271 .write = cpu_uclamp_max_write, 9272 }, 9273 #endif 9274 { } /* Terminate */ 9275 }; 9276 9277 static int cpu_extra_stat_show(struct seq_file *sf, 9278 struct cgroup_subsys_state *css) 9279 { 9280 #ifdef CONFIG_CFS_BANDWIDTH 9281 { 9282 struct task_group *tg = css_tg(css); 9283 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 9284 u64 throttled_usec; 9285 9286 throttled_usec = cfs_b->throttled_time; 9287 do_div(throttled_usec, NSEC_PER_USEC); 9288 9289 seq_printf(sf, "nr_periods %d\n" 9290 "nr_throttled %d\n" 9291 "throttled_usec %llu\n", 9292 cfs_b->nr_periods, cfs_b->nr_throttled, 9293 throttled_usec); 9294 } 9295 #endif 9296 return 0; 9297 } 9298 9299 #ifdef CONFIG_FAIR_GROUP_SCHED 9300 static u64 cpu_weight_read_u64(struct cgroup_subsys_state *css, 9301 struct cftype *cft) 9302 { 9303 struct task_group *tg = css_tg(css); 9304 u64 weight = scale_load_down(tg->shares); 9305 9306 return DIV_ROUND_CLOSEST_ULL(weight * CGROUP_WEIGHT_DFL, 1024); 9307 } 9308 9309 static int cpu_weight_write_u64(struct cgroup_subsys_state *css, 9310 struct cftype *cft, u64 weight) 9311 { 9312 /* 9313 * cgroup weight knobs should use the common MIN, DFL and MAX 9314 * values which are 1, 100 and 10000 respectively. While it loses 9315 * a bit of range on both ends, it maps pretty well onto the shares 9316 * value used by scheduler and the round-trip conversions preserve 9317 * the original value over the entire range. 9318 */ 9319 if (weight < CGROUP_WEIGHT_MIN || weight > CGROUP_WEIGHT_MAX) 9320 return -ERANGE; 9321 9322 weight = DIV_ROUND_CLOSEST_ULL(weight * 1024, CGROUP_WEIGHT_DFL); 9323 9324 return sched_group_set_shares(css_tg(css), scale_load(weight)); 9325 } 9326 9327 static s64 cpu_weight_nice_read_s64(struct cgroup_subsys_state *css, 9328 struct cftype *cft) 9329 { 9330 unsigned long weight = scale_load_down(css_tg(css)->shares); 9331 int last_delta = INT_MAX; 9332 int prio, delta; 9333 9334 /* find the closest nice value to the current weight */ 9335 for (prio = 0; prio < ARRAY_SIZE(sched_prio_to_weight); prio++) { 9336 delta = abs(sched_prio_to_weight[prio] - weight); 9337 if (delta >= last_delta) 9338 break; 9339 last_delta = delta; 9340 } 9341 9342 return PRIO_TO_NICE(prio - 1 + MAX_RT_PRIO); 9343 } 9344 9345 static int cpu_weight_nice_write_s64(struct cgroup_subsys_state *css, 9346 struct cftype *cft, s64 nice) 9347 { 9348 unsigned long weight; 9349 int idx; 9350 9351 if (nice < MIN_NICE || nice > MAX_NICE) 9352 return -ERANGE; 9353 9354 idx = NICE_TO_PRIO(nice) - MAX_RT_PRIO; 9355 idx = array_index_nospec(idx, 40); 9356 weight = sched_prio_to_weight[idx]; 9357 9358 return sched_group_set_shares(css_tg(css), scale_load(weight)); 9359 } 9360 #endif 9361 9362 static void __maybe_unused cpu_period_quota_print(struct seq_file *sf, 9363 long period, long quota) 9364 { 9365 if (quota < 0) 9366 seq_puts(sf, "max"); 9367 else 9368 seq_printf(sf, "%ld", quota); 9369 9370 seq_printf(sf, " %ld\n", period); 9371 } 9372 9373 /* caller should put the current value in *@periodp before calling */ 9374 static int __maybe_unused cpu_period_quota_parse(char *buf, 9375 u64 *periodp, u64 *quotap) 9376 { 9377 char tok[21]; /* U64_MAX */ 9378 9379 if (sscanf(buf, "%20s %llu", tok, periodp) < 1) 9380 return -EINVAL; 9381 9382 *periodp *= NSEC_PER_USEC; 9383 9384 if (sscanf(tok, "%llu", quotap)) 9385 *quotap *= NSEC_PER_USEC; 9386 else if (!strcmp(tok, "max")) 9387 *quotap = RUNTIME_INF; 9388 else 9389 return -EINVAL; 9390 9391 return 0; 9392 } 9393 9394 #ifdef CONFIG_CFS_BANDWIDTH 9395 static int cpu_max_show(struct seq_file *sf, void *v) 9396 { 9397 struct task_group *tg = css_tg(seq_css(sf)); 9398 9399 cpu_period_quota_print(sf, tg_get_cfs_period(tg), tg_get_cfs_quota(tg)); 9400 return 0; 9401 } 9402 9403 static ssize_t cpu_max_write(struct kernfs_open_file *of, 9404 char *buf, size_t nbytes, loff_t off) 9405 { 9406 struct task_group *tg = css_tg(of_css(of)); 9407 u64 period = tg_get_cfs_period(tg); 9408 u64 quota; 9409 int ret; 9410 9411 ret = cpu_period_quota_parse(buf, &period, "a); 9412 if (!ret) 9413 ret = tg_set_cfs_bandwidth(tg, period, quota); 9414 return ret ?: nbytes; 9415 } 9416 #endif 9417 9418 static struct cftype cpu_files[] = { 9419 #ifdef CONFIG_FAIR_GROUP_SCHED 9420 { 9421 .name = "weight", 9422 .flags = CFTYPE_NOT_ON_ROOT, 9423 .read_u64 = cpu_weight_read_u64, 9424 .write_u64 = cpu_weight_write_u64, 9425 }, 9426 { 9427 .name = "weight.nice", 9428 .flags = CFTYPE_NOT_ON_ROOT, 9429 .read_s64 = cpu_weight_nice_read_s64, 9430 .write_s64 = cpu_weight_nice_write_s64, 9431 }, 9432 #endif 9433 #ifdef CONFIG_CFS_BANDWIDTH 9434 { 9435 .name = "max", 9436 .flags = CFTYPE_NOT_ON_ROOT, 9437 .seq_show = cpu_max_show, 9438 .write = cpu_max_write, 9439 }, 9440 #endif 9441 #ifdef CONFIG_UCLAMP_TASK_GROUP 9442 { 9443 .name = "uclamp.min", 9444 .flags = CFTYPE_NOT_ON_ROOT, 9445 .seq_show = cpu_uclamp_min_show, 9446 .write = cpu_uclamp_min_write, 9447 }, 9448 { 9449 .name = "uclamp.max", 9450 .flags = CFTYPE_NOT_ON_ROOT, 9451 .seq_show = cpu_uclamp_max_show, 9452 .write = cpu_uclamp_max_write, 9453 }, 9454 #endif 9455 { } /* terminate */ 9456 }; 9457 9458 struct cgroup_subsys cpu_cgrp_subsys = { 9459 .css_alloc = cpu_cgroup_css_alloc, 9460 .css_online = cpu_cgroup_css_online, 9461 .css_released = cpu_cgroup_css_released, 9462 .css_free = cpu_cgroup_css_free, 9463 .css_extra_stat_show = cpu_extra_stat_show, 9464 .fork = cpu_cgroup_fork, 9465 .can_attach = cpu_cgroup_can_attach, 9466 .attach = cpu_cgroup_attach, 9467 .legacy_cftypes = cpu_legacy_files, 9468 .dfl_cftypes = cpu_files, 9469 .early_init = true, 9470 .threaded = true, 9471 }; 9472 9473 #endif /* CONFIG_CGROUP_SCHED */ 9474 9475 void dump_cpu_task(int cpu) 9476 { 9477 pr_info("Task dump for CPU %d:\n", cpu); 9478 sched_show_task(cpu_curr(cpu)); 9479 } 9480 9481 /* 9482 * Nice levels are multiplicative, with a gentle 10% change for every 9483 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to 9484 * nice 1, it will get ~10% less CPU time than another CPU-bound task 9485 * that remained on nice 0. 9486 * 9487 * The "10% effect" is relative and cumulative: from _any_ nice level, 9488 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level 9489 * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25. 9490 * If a task goes up by ~10% and another task goes down by ~10% then 9491 * the relative distance between them is ~25%.) 9492 */ 9493 const int sched_prio_to_weight[40] = { 9494 /* -20 */ 88761, 71755, 56483, 46273, 36291, 9495 /* -15 */ 29154, 23254, 18705, 14949, 11916, 9496 /* -10 */ 9548, 7620, 6100, 4904, 3906, 9497 /* -5 */ 3121, 2501, 1991, 1586, 1277, 9498 /* 0 */ 1024, 820, 655, 526, 423, 9499 /* 5 */ 335, 272, 215, 172, 137, 9500 /* 10 */ 110, 87, 70, 56, 45, 9501 /* 15 */ 36, 29, 23, 18, 15, 9502 }; 9503 9504 /* 9505 * Inverse (2^32/x) values of the sched_prio_to_weight[] array, precalculated. 9506 * 9507 * In cases where the weight does not change often, we can use the 9508 * precalculated inverse to speed up arithmetics by turning divisions 9509 * into multiplications: 9510 */ 9511 const u32 sched_prio_to_wmult[40] = { 9512 /* -20 */ 48388, 59856, 76040, 92818, 118348, 9513 /* -15 */ 147320, 184698, 229616, 287308, 360437, 9514 /* -10 */ 449829, 563644, 704093, 875809, 1099582, 9515 /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326, 9516 /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587, 9517 /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126, 9518 /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717, 9519 /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153, 9520 }; 9521 9522 void call_trace_sched_update_nr_running(struct rq *rq, int count) 9523 { 9524 trace_sched_update_nr_running_tp(rq, count); 9525 } 9526