1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * kernel/sched/core.c 4 * 5 * Core kernel scheduler code and related syscalls 6 * 7 * Copyright (C) 1991-2002 Linus Torvalds 8 */ 9 #define CREATE_TRACE_POINTS 10 #include <trace/events/sched.h> 11 #undef CREATE_TRACE_POINTS 12 13 #include "sched.h" 14 15 #include <linux/nospec.h> 16 17 #include <linux/kcov.h> 18 #include <linux/scs.h> 19 20 #include <asm/switch_to.h> 21 #include <asm/tlb.h> 22 23 #include "../workqueue_internal.h" 24 #include "../../fs/io-wq.h" 25 #include "../smpboot.h" 26 27 #include "pelt.h" 28 #include "smp.h" 29 30 /* 31 * Export tracepoints that act as a bare tracehook (ie: have no trace event 32 * associated with them) to allow external modules to probe them. 33 */ 34 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_cfs_tp); 35 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_rt_tp); 36 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_dl_tp); 37 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp); 38 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_se_tp); 39 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_cpu_capacity_tp); 40 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_overutilized_tp); 41 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_cfs_tp); 42 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_se_tp); 43 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_update_nr_running_tp); 44 45 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); 46 47 #ifdef CONFIG_SCHED_DEBUG 48 /* 49 * Debugging: various feature bits 50 * 51 * If SCHED_DEBUG is disabled, each compilation unit has its own copy of 52 * sysctl_sched_features, defined in sched.h, to allow constants propagation 53 * at compile time and compiler optimization based on features default. 54 */ 55 #define SCHED_FEAT(name, enabled) \ 56 (1UL << __SCHED_FEAT_##name) * enabled | 57 const_debug unsigned int sysctl_sched_features = 58 #include "features.h" 59 0; 60 #undef SCHED_FEAT 61 #endif 62 63 /* 64 * Number of tasks to iterate in a single balance run. 65 * Limited because this is done with IRQs disabled. 66 */ 67 const_debug unsigned int sysctl_sched_nr_migrate = 32; 68 69 /* 70 * period over which we measure -rt task CPU usage in us. 71 * default: 1s 72 */ 73 unsigned int sysctl_sched_rt_period = 1000000; 74 75 __read_mostly int scheduler_running; 76 77 /* 78 * part of the period that we allow rt tasks to run in us. 79 * default: 0.95s 80 */ 81 int sysctl_sched_rt_runtime = 950000; 82 83 84 /* 85 * Serialization rules: 86 * 87 * Lock order: 88 * 89 * p->pi_lock 90 * rq->lock 91 * hrtimer_cpu_base->lock (hrtimer_start() for bandwidth controls) 92 * 93 * rq1->lock 94 * rq2->lock where: rq1 < rq2 95 * 96 * Regular state: 97 * 98 * Normal scheduling state is serialized by rq->lock. __schedule() takes the 99 * local CPU's rq->lock, it optionally removes the task from the runqueue and 100 * always looks at the local rq data structures to find the most eligible task 101 * to run next. 102 * 103 * Task enqueue is also under rq->lock, possibly taken from another CPU. 104 * Wakeups from another LLC domain might use an IPI to transfer the enqueue to 105 * the local CPU to avoid bouncing the runqueue state around [ see 106 * ttwu_queue_wakelist() ] 107 * 108 * Task wakeup, specifically wakeups that involve migration, are horribly 109 * complicated to avoid having to take two rq->locks. 110 * 111 * Special state: 112 * 113 * System-calls and anything external will use task_rq_lock() which acquires 114 * both p->pi_lock and rq->lock. As a consequence the state they change is 115 * stable while holding either lock: 116 * 117 * - sched_setaffinity()/ 118 * set_cpus_allowed_ptr(): p->cpus_ptr, p->nr_cpus_allowed 119 * - set_user_nice(): p->se.load, p->*prio 120 * - __sched_setscheduler(): p->sched_class, p->policy, p->*prio, 121 * p->se.load, p->rt_priority, 122 * p->dl.dl_{runtime, deadline, period, flags, bw, density} 123 * - sched_setnuma(): p->numa_preferred_nid 124 * - sched_move_task()/ 125 * cpu_cgroup_fork(): p->sched_task_group 126 * - uclamp_update_active() p->uclamp* 127 * 128 * p->state <- TASK_*: 129 * 130 * is changed locklessly using set_current_state(), __set_current_state() or 131 * set_special_state(), see their respective comments, or by 132 * try_to_wake_up(). This latter uses p->pi_lock to serialize against 133 * concurrent self. 134 * 135 * p->on_rq <- { 0, 1 = TASK_ON_RQ_QUEUED, 2 = TASK_ON_RQ_MIGRATING }: 136 * 137 * is set by activate_task() and cleared by deactivate_task(), under 138 * rq->lock. Non-zero indicates the task is runnable, the special 139 * ON_RQ_MIGRATING state is used for migration without holding both 140 * rq->locks. It indicates task_cpu() is not stable, see task_rq_lock(). 141 * 142 * p->on_cpu <- { 0, 1 }: 143 * 144 * is set by prepare_task() and cleared by finish_task() such that it will be 145 * set before p is scheduled-in and cleared after p is scheduled-out, both 146 * under rq->lock. Non-zero indicates the task is running on its CPU. 147 * 148 * [ The astute reader will observe that it is possible for two tasks on one 149 * CPU to have ->on_cpu = 1 at the same time. ] 150 * 151 * task_cpu(p): is changed by set_task_cpu(), the rules are: 152 * 153 * - Don't call set_task_cpu() on a blocked task: 154 * 155 * We don't care what CPU we're not running on, this simplifies hotplug, 156 * the CPU assignment of blocked tasks isn't required to be valid. 157 * 158 * - for try_to_wake_up(), called under p->pi_lock: 159 * 160 * This allows try_to_wake_up() to only take one rq->lock, see its comment. 161 * 162 * - for migration called under rq->lock: 163 * [ see task_on_rq_migrating() in task_rq_lock() ] 164 * 165 * o move_queued_task() 166 * o detach_task() 167 * 168 * - for migration called under double_rq_lock(): 169 * 170 * o __migrate_swap_task() 171 * o push_rt_task() / pull_rt_task() 172 * o push_dl_task() / pull_dl_task() 173 * o dl_task_offline_migration() 174 * 175 */ 176 177 /* 178 * __task_rq_lock - lock the rq @p resides on. 179 */ 180 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) 181 __acquires(rq->lock) 182 { 183 struct rq *rq; 184 185 lockdep_assert_held(&p->pi_lock); 186 187 for (;;) { 188 rq = task_rq(p); 189 raw_spin_lock(&rq->lock); 190 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { 191 rq_pin_lock(rq, rf); 192 return rq; 193 } 194 raw_spin_unlock(&rq->lock); 195 196 while (unlikely(task_on_rq_migrating(p))) 197 cpu_relax(); 198 } 199 } 200 201 /* 202 * task_rq_lock - lock p->pi_lock and lock the rq @p resides on. 203 */ 204 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) 205 __acquires(p->pi_lock) 206 __acquires(rq->lock) 207 { 208 struct rq *rq; 209 210 for (;;) { 211 raw_spin_lock_irqsave(&p->pi_lock, rf->flags); 212 rq = task_rq(p); 213 raw_spin_lock(&rq->lock); 214 /* 215 * move_queued_task() task_rq_lock() 216 * 217 * ACQUIRE (rq->lock) 218 * [S] ->on_rq = MIGRATING [L] rq = task_rq() 219 * WMB (__set_task_cpu()) ACQUIRE (rq->lock); 220 * [S] ->cpu = new_cpu [L] task_rq() 221 * [L] ->on_rq 222 * RELEASE (rq->lock) 223 * 224 * If we observe the old CPU in task_rq_lock(), the acquire of 225 * the old rq->lock will fully serialize against the stores. 226 * 227 * If we observe the new CPU in task_rq_lock(), the address 228 * dependency headed by '[L] rq = task_rq()' and the acquire 229 * will pair with the WMB to ensure we then also see migrating. 230 */ 231 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { 232 rq_pin_lock(rq, rf); 233 return rq; 234 } 235 raw_spin_unlock(&rq->lock); 236 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); 237 238 while (unlikely(task_on_rq_migrating(p))) 239 cpu_relax(); 240 } 241 } 242 243 /* 244 * RQ-clock updating methods: 245 */ 246 247 static void update_rq_clock_task(struct rq *rq, s64 delta) 248 { 249 /* 250 * In theory, the compile should just see 0 here, and optimize out the call 251 * to sched_rt_avg_update. But I don't trust it... 252 */ 253 s64 __maybe_unused steal = 0, irq_delta = 0; 254 255 #ifdef CONFIG_IRQ_TIME_ACCOUNTING 256 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time; 257 258 /* 259 * Since irq_time is only updated on {soft,}irq_exit, we might run into 260 * this case when a previous update_rq_clock() happened inside a 261 * {soft,}irq region. 262 * 263 * When this happens, we stop ->clock_task and only update the 264 * prev_irq_time stamp to account for the part that fit, so that a next 265 * update will consume the rest. This ensures ->clock_task is 266 * monotonic. 267 * 268 * It does however cause some slight miss-attribution of {soft,}irq 269 * time, a more accurate solution would be to update the irq_time using 270 * the current rq->clock timestamp, except that would require using 271 * atomic ops. 272 */ 273 if (irq_delta > delta) 274 irq_delta = delta; 275 276 rq->prev_irq_time += irq_delta; 277 delta -= irq_delta; 278 #endif 279 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING 280 if (static_key_false((¶virt_steal_rq_enabled))) { 281 steal = paravirt_steal_clock(cpu_of(rq)); 282 steal -= rq->prev_steal_time_rq; 283 284 if (unlikely(steal > delta)) 285 steal = delta; 286 287 rq->prev_steal_time_rq += steal; 288 delta -= steal; 289 } 290 #endif 291 292 rq->clock_task += delta; 293 294 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ 295 if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY)) 296 update_irq_load_avg(rq, irq_delta + steal); 297 #endif 298 update_rq_clock_pelt(rq, delta); 299 } 300 301 void update_rq_clock(struct rq *rq) 302 { 303 s64 delta; 304 305 lockdep_assert_held(&rq->lock); 306 307 if (rq->clock_update_flags & RQCF_ACT_SKIP) 308 return; 309 310 #ifdef CONFIG_SCHED_DEBUG 311 if (sched_feat(WARN_DOUBLE_CLOCK)) 312 SCHED_WARN_ON(rq->clock_update_flags & RQCF_UPDATED); 313 rq->clock_update_flags |= RQCF_UPDATED; 314 #endif 315 316 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock; 317 if (delta < 0) 318 return; 319 rq->clock += delta; 320 update_rq_clock_task(rq, delta); 321 } 322 323 #ifdef CONFIG_SCHED_HRTICK 324 /* 325 * Use HR-timers to deliver accurate preemption points. 326 */ 327 328 static void hrtick_clear(struct rq *rq) 329 { 330 if (hrtimer_active(&rq->hrtick_timer)) 331 hrtimer_cancel(&rq->hrtick_timer); 332 } 333 334 /* 335 * High-resolution timer tick. 336 * Runs from hardirq context with interrupts disabled. 337 */ 338 static enum hrtimer_restart hrtick(struct hrtimer *timer) 339 { 340 struct rq *rq = container_of(timer, struct rq, hrtick_timer); 341 struct rq_flags rf; 342 343 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); 344 345 rq_lock(rq, &rf); 346 update_rq_clock(rq); 347 rq->curr->sched_class->task_tick(rq, rq->curr, 1); 348 rq_unlock(rq, &rf); 349 350 return HRTIMER_NORESTART; 351 } 352 353 #ifdef CONFIG_SMP 354 355 static void __hrtick_restart(struct rq *rq) 356 { 357 struct hrtimer *timer = &rq->hrtick_timer; 358 359 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED_HARD); 360 } 361 362 /* 363 * called from hardirq (IPI) context 364 */ 365 static void __hrtick_start(void *arg) 366 { 367 struct rq *rq = arg; 368 struct rq_flags rf; 369 370 rq_lock(rq, &rf); 371 __hrtick_restart(rq); 372 rq_unlock(rq, &rf); 373 } 374 375 /* 376 * Called to set the hrtick timer state. 377 * 378 * called with rq->lock held and irqs disabled 379 */ 380 void hrtick_start(struct rq *rq, u64 delay) 381 { 382 struct hrtimer *timer = &rq->hrtick_timer; 383 ktime_t time; 384 s64 delta; 385 386 /* 387 * Don't schedule slices shorter than 10000ns, that just 388 * doesn't make sense and can cause timer DoS. 389 */ 390 delta = max_t(s64, delay, 10000LL); 391 time = ktime_add_ns(timer->base->get_time(), delta); 392 393 hrtimer_set_expires(timer, time); 394 395 if (rq == this_rq()) 396 __hrtick_restart(rq); 397 else 398 smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd); 399 } 400 401 #else 402 /* 403 * Called to set the hrtick timer state. 404 * 405 * called with rq->lock held and irqs disabled 406 */ 407 void hrtick_start(struct rq *rq, u64 delay) 408 { 409 /* 410 * Don't schedule slices shorter than 10000ns, that just 411 * doesn't make sense. Rely on vruntime for fairness. 412 */ 413 delay = max_t(u64, delay, 10000LL); 414 hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay), 415 HRTIMER_MODE_REL_PINNED_HARD); 416 } 417 418 #endif /* CONFIG_SMP */ 419 420 static void hrtick_rq_init(struct rq *rq) 421 { 422 #ifdef CONFIG_SMP 423 INIT_CSD(&rq->hrtick_csd, __hrtick_start, rq); 424 #endif 425 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); 426 rq->hrtick_timer.function = hrtick; 427 } 428 #else /* CONFIG_SCHED_HRTICK */ 429 static inline void hrtick_clear(struct rq *rq) 430 { 431 } 432 433 static inline void hrtick_rq_init(struct rq *rq) 434 { 435 } 436 #endif /* CONFIG_SCHED_HRTICK */ 437 438 /* 439 * cmpxchg based fetch_or, macro so it works for different integer types 440 */ 441 #define fetch_or(ptr, mask) \ 442 ({ \ 443 typeof(ptr) _ptr = (ptr); \ 444 typeof(mask) _mask = (mask); \ 445 typeof(*_ptr) _old, _val = *_ptr; \ 446 \ 447 for (;;) { \ 448 _old = cmpxchg(_ptr, _val, _val | _mask); \ 449 if (_old == _val) \ 450 break; \ 451 _val = _old; \ 452 } \ 453 _old; \ 454 }) 455 456 #if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG) 457 /* 458 * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG, 459 * this avoids any races wrt polling state changes and thereby avoids 460 * spurious IPIs. 461 */ 462 static bool set_nr_and_not_polling(struct task_struct *p) 463 { 464 struct thread_info *ti = task_thread_info(p); 465 return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG); 466 } 467 468 /* 469 * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set. 470 * 471 * If this returns true, then the idle task promises to call 472 * sched_ttwu_pending() and reschedule soon. 473 */ 474 static bool set_nr_if_polling(struct task_struct *p) 475 { 476 struct thread_info *ti = task_thread_info(p); 477 typeof(ti->flags) old, val = READ_ONCE(ti->flags); 478 479 for (;;) { 480 if (!(val & _TIF_POLLING_NRFLAG)) 481 return false; 482 if (val & _TIF_NEED_RESCHED) 483 return true; 484 old = cmpxchg(&ti->flags, val, val | _TIF_NEED_RESCHED); 485 if (old == val) 486 break; 487 val = old; 488 } 489 return true; 490 } 491 492 #else 493 static bool set_nr_and_not_polling(struct task_struct *p) 494 { 495 set_tsk_need_resched(p); 496 return true; 497 } 498 499 #ifdef CONFIG_SMP 500 static bool set_nr_if_polling(struct task_struct *p) 501 { 502 return false; 503 } 504 #endif 505 #endif 506 507 static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task) 508 { 509 struct wake_q_node *node = &task->wake_q; 510 511 /* 512 * Atomically grab the task, if ->wake_q is !nil already it means 513 * it's already queued (either by us or someone else) and will get the 514 * wakeup due to that. 515 * 516 * In order to ensure that a pending wakeup will observe our pending 517 * state, even in the failed case, an explicit smp_mb() must be used. 518 */ 519 smp_mb__before_atomic(); 520 if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL))) 521 return false; 522 523 /* 524 * The head is context local, there can be no concurrency. 525 */ 526 *head->lastp = node; 527 head->lastp = &node->next; 528 return true; 529 } 530 531 /** 532 * wake_q_add() - queue a wakeup for 'later' waking. 533 * @head: the wake_q_head to add @task to 534 * @task: the task to queue for 'later' wakeup 535 * 536 * Queue a task for later wakeup, most likely by the wake_up_q() call in the 537 * same context, _HOWEVER_ this is not guaranteed, the wakeup can come 538 * instantly. 539 * 540 * This function must be used as-if it were wake_up_process(); IOW the task 541 * must be ready to be woken at this location. 542 */ 543 void wake_q_add(struct wake_q_head *head, struct task_struct *task) 544 { 545 if (__wake_q_add(head, task)) 546 get_task_struct(task); 547 } 548 549 /** 550 * wake_q_add_safe() - safely queue a wakeup for 'later' waking. 551 * @head: the wake_q_head to add @task to 552 * @task: the task to queue for 'later' wakeup 553 * 554 * Queue a task for later wakeup, most likely by the wake_up_q() call in the 555 * same context, _HOWEVER_ this is not guaranteed, the wakeup can come 556 * instantly. 557 * 558 * This function must be used as-if it were wake_up_process(); IOW the task 559 * must be ready to be woken at this location. 560 * 561 * This function is essentially a task-safe equivalent to wake_q_add(). Callers 562 * that already hold reference to @task can call the 'safe' version and trust 563 * wake_q to do the right thing depending whether or not the @task is already 564 * queued for wakeup. 565 */ 566 void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task) 567 { 568 if (!__wake_q_add(head, task)) 569 put_task_struct(task); 570 } 571 572 void wake_up_q(struct wake_q_head *head) 573 { 574 struct wake_q_node *node = head->first; 575 576 while (node != WAKE_Q_TAIL) { 577 struct task_struct *task; 578 579 task = container_of(node, struct task_struct, wake_q); 580 BUG_ON(!task); 581 /* Task can safely be re-inserted now: */ 582 node = node->next; 583 task->wake_q.next = NULL; 584 585 /* 586 * wake_up_process() executes a full barrier, which pairs with 587 * the queueing in wake_q_add() so as not to miss wakeups. 588 */ 589 wake_up_process(task); 590 put_task_struct(task); 591 } 592 } 593 594 /* 595 * resched_curr - mark rq's current task 'to be rescheduled now'. 596 * 597 * On UP this means the setting of the need_resched flag, on SMP it 598 * might also involve a cross-CPU call to trigger the scheduler on 599 * the target CPU. 600 */ 601 void resched_curr(struct rq *rq) 602 { 603 struct task_struct *curr = rq->curr; 604 int cpu; 605 606 lockdep_assert_held(&rq->lock); 607 608 if (test_tsk_need_resched(curr)) 609 return; 610 611 cpu = cpu_of(rq); 612 613 if (cpu == smp_processor_id()) { 614 set_tsk_need_resched(curr); 615 set_preempt_need_resched(); 616 return; 617 } 618 619 if (set_nr_and_not_polling(curr)) 620 smp_send_reschedule(cpu); 621 else 622 trace_sched_wake_idle_without_ipi(cpu); 623 } 624 625 void resched_cpu(int cpu) 626 { 627 struct rq *rq = cpu_rq(cpu); 628 unsigned long flags; 629 630 raw_spin_lock_irqsave(&rq->lock, flags); 631 if (cpu_online(cpu) || cpu == smp_processor_id()) 632 resched_curr(rq); 633 raw_spin_unlock_irqrestore(&rq->lock, flags); 634 } 635 636 #ifdef CONFIG_SMP 637 #ifdef CONFIG_NO_HZ_COMMON 638 /* 639 * In the semi idle case, use the nearest busy CPU for migrating timers 640 * from an idle CPU. This is good for power-savings. 641 * 642 * We don't do similar optimization for completely idle system, as 643 * selecting an idle CPU will add more delays to the timers than intended 644 * (as that CPU's timer base may not be uptodate wrt jiffies etc). 645 */ 646 int get_nohz_timer_target(void) 647 { 648 int i, cpu = smp_processor_id(), default_cpu = -1; 649 struct sched_domain *sd; 650 651 if (housekeeping_cpu(cpu, HK_FLAG_TIMER)) { 652 if (!idle_cpu(cpu)) 653 return cpu; 654 default_cpu = cpu; 655 } 656 657 rcu_read_lock(); 658 for_each_domain(cpu, sd) { 659 for_each_cpu_and(i, sched_domain_span(sd), 660 housekeeping_cpumask(HK_FLAG_TIMER)) { 661 if (cpu == i) 662 continue; 663 664 if (!idle_cpu(i)) { 665 cpu = i; 666 goto unlock; 667 } 668 } 669 } 670 671 if (default_cpu == -1) 672 default_cpu = housekeeping_any_cpu(HK_FLAG_TIMER); 673 cpu = default_cpu; 674 unlock: 675 rcu_read_unlock(); 676 return cpu; 677 } 678 679 /* 680 * When add_timer_on() enqueues a timer into the timer wheel of an 681 * idle CPU then this timer might expire before the next timer event 682 * which is scheduled to wake up that CPU. In case of a completely 683 * idle system the next event might even be infinite time into the 684 * future. wake_up_idle_cpu() ensures that the CPU is woken up and 685 * leaves the inner idle loop so the newly added timer is taken into 686 * account when the CPU goes back to idle and evaluates the timer 687 * wheel for the next timer event. 688 */ 689 static void wake_up_idle_cpu(int cpu) 690 { 691 struct rq *rq = cpu_rq(cpu); 692 693 if (cpu == smp_processor_id()) 694 return; 695 696 if (set_nr_and_not_polling(rq->idle)) 697 smp_send_reschedule(cpu); 698 else 699 trace_sched_wake_idle_without_ipi(cpu); 700 } 701 702 static bool wake_up_full_nohz_cpu(int cpu) 703 { 704 /* 705 * We just need the target to call irq_exit() and re-evaluate 706 * the next tick. The nohz full kick at least implies that. 707 * If needed we can still optimize that later with an 708 * empty IRQ. 709 */ 710 if (cpu_is_offline(cpu)) 711 return true; /* Don't try to wake offline CPUs. */ 712 if (tick_nohz_full_cpu(cpu)) { 713 if (cpu != smp_processor_id() || 714 tick_nohz_tick_stopped()) 715 tick_nohz_full_kick_cpu(cpu); 716 return true; 717 } 718 719 return false; 720 } 721 722 /* 723 * Wake up the specified CPU. If the CPU is going offline, it is the 724 * caller's responsibility to deal with the lost wakeup, for example, 725 * by hooking into the CPU_DEAD notifier like timers and hrtimers do. 726 */ 727 void wake_up_nohz_cpu(int cpu) 728 { 729 if (!wake_up_full_nohz_cpu(cpu)) 730 wake_up_idle_cpu(cpu); 731 } 732 733 static void nohz_csd_func(void *info) 734 { 735 struct rq *rq = info; 736 int cpu = cpu_of(rq); 737 unsigned int flags; 738 739 /* 740 * Release the rq::nohz_csd. 741 */ 742 flags = atomic_fetch_andnot(NOHZ_KICK_MASK, nohz_flags(cpu)); 743 WARN_ON(!(flags & NOHZ_KICK_MASK)); 744 745 rq->idle_balance = idle_cpu(cpu); 746 if (rq->idle_balance && !need_resched()) { 747 rq->nohz_idle_balance = flags; 748 raise_softirq_irqoff(SCHED_SOFTIRQ); 749 } 750 } 751 752 #endif /* CONFIG_NO_HZ_COMMON */ 753 754 #ifdef CONFIG_NO_HZ_FULL 755 bool sched_can_stop_tick(struct rq *rq) 756 { 757 int fifo_nr_running; 758 759 /* Deadline tasks, even if single, need the tick */ 760 if (rq->dl.dl_nr_running) 761 return false; 762 763 /* 764 * If there are more than one RR tasks, we need the tick to affect the 765 * actual RR behaviour. 766 */ 767 if (rq->rt.rr_nr_running) { 768 if (rq->rt.rr_nr_running == 1) 769 return true; 770 else 771 return false; 772 } 773 774 /* 775 * If there's no RR tasks, but FIFO tasks, we can skip the tick, no 776 * forced preemption between FIFO tasks. 777 */ 778 fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running; 779 if (fifo_nr_running) 780 return true; 781 782 /* 783 * If there are no DL,RR/FIFO tasks, there must only be CFS tasks left; 784 * if there's more than one we need the tick for involuntary 785 * preemption. 786 */ 787 if (rq->nr_running > 1) 788 return false; 789 790 return true; 791 } 792 #endif /* CONFIG_NO_HZ_FULL */ 793 #endif /* CONFIG_SMP */ 794 795 #if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \ 796 (defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH))) 797 /* 798 * Iterate task_group tree rooted at *from, calling @down when first entering a 799 * node and @up when leaving it for the final time. 800 * 801 * Caller must hold rcu_lock or sufficient equivalent. 802 */ 803 int walk_tg_tree_from(struct task_group *from, 804 tg_visitor down, tg_visitor up, void *data) 805 { 806 struct task_group *parent, *child; 807 int ret; 808 809 parent = from; 810 811 down: 812 ret = (*down)(parent, data); 813 if (ret) 814 goto out; 815 list_for_each_entry_rcu(child, &parent->children, siblings) { 816 parent = child; 817 goto down; 818 819 up: 820 continue; 821 } 822 ret = (*up)(parent, data); 823 if (ret || parent == from) 824 goto out; 825 826 child = parent; 827 parent = parent->parent; 828 if (parent) 829 goto up; 830 out: 831 return ret; 832 } 833 834 int tg_nop(struct task_group *tg, void *data) 835 { 836 return 0; 837 } 838 #endif 839 840 static void set_load_weight(struct task_struct *p, bool update_load) 841 { 842 int prio = p->static_prio - MAX_RT_PRIO; 843 struct load_weight *load = &p->se.load; 844 845 /* 846 * SCHED_IDLE tasks get minimal weight: 847 */ 848 if (task_has_idle_policy(p)) { 849 load->weight = scale_load(WEIGHT_IDLEPRIO); 850 load->inv_weight = WMULT_IDLEPRIO; 851 return; 852 } 853 854 /* 855 * SCHED_OTHER tasks have to update their load when changing their 856 * weight 857 */ 858 if (update_load && p->sched_class == &fair_sched_class) { 859 reweight_task(p, prio); 860 } else { 861 load->weight = scale_load(sched_prio_to_weight[prio]); 862 load->inv_weight = sched_prio_to_wmult[prio]; 863 } 864 } 865 866 #ifdef CONFIG_UCLAMP_TASK 867 /* 868 * Serializes updates of utilization clamp values 869 * 870 * The (slow-path) user-space triggers utilization clamp value updates which 871 * can require updates on (fast-path) scheduler's data structures used to 872 * support enqueue/dequeue operations. 873 * While the per-CPU rq lock protects fast-path update operations, user-space 874 * requests are serialized using a mutex to reduce the risk of conflicting 875 * updates or API abuses. 876 */ 877 static DEFINE_MUTEX(uclamp_mutex); 878 879 /* Max allowed minimum utilization */ 880 unsigned int sysctl_sched_uclamp_util_min = SCHED_CAPACITY_SCALE; 881 882 /* Max allowed maximum utilization */ 883 unsigned int sysctl_sched_uclamp_util_max = SCHED_CAPACITY_SCALE; 884 885 /* 886 * By default RT tasks run at the maximum performance point/capacity of the 887 * system. Uclamp enforces this by always setting UCLAMP_MIN of RT tasks to 888 * SCHED_CAPACITY_SCALE. 889 * 890 * This knob allows admins to change the default behavior when uclamp is being 891 * used. In battery powered devices, particularly, running at the maximum 892 * capacity and frequency will increase energy consumption and shorten the 893 * battery life. 894 * 895 * This knob only affects RT tasks that their uclamp_se->user_defined == false. 896 * 897 * This knob will not override the system default sched_util_clamp_min defined 898 * above. 899 */ 900 unsigned int sysctl_sched_uclamp_util_min_rt_default = SCHED_CAPACITY_SCALE; 901 902 /* All clamps are required to be less or equal than these values */ 903 static struct uclamp_se uclamp_default[UCLAMP_CNT]; 904 905 /* 906 * This static key is used to reduce the uclamp overhead in the fast path. It 907 * primarily disables the call to uclamp_rq_{inc, dec}() in 908 * enqueue/dequeue_task(). 909 * 910 * This allows users to continue to enable uclamp in their kernel config with 911 * minimum uclamp overhead in the fast path. 912 * 913 * As soon as userspace modifies any of the uclamp knobs, the static key is 914 * enabled, since we have an actual users that make use of uclamp 915 * functionality. 916 * 917 * The knobs that would enable this static key are: 918 * 919 * * A task modifying its uclamp value with sched_setattr(). 920 * * An admin modifying the sysctl_sched_uclamp_{min, max} via procfs. 921 * * An admin modifying the cgroup cpu.uclamp.{min, max} 922 */ 923 DEFINE_STATIC_KEY_FALSE(sched_uclamp_used); 924 925 /* Integer rounded range for each bucket */ 926 #define UCLAMP_BUCKET_DELTA DIV_ROUND_CLOSEST(SCHED_CAPACITY_SCALE, UCLAMP_BUCKETS) 927 928 #define for_each_clamp_id(clamp_id) \ 929 for ((clamp_id) = 0; (clamp_id) < UCLAMP_CNT; (clamp_id)++) 930 931 static inline unsigned int uclamp_bucket_id(unsigned int clamp_value) 932 { 933 return clamp_value / UCLAMP_BUCKET_DELTA; 934 } 935 936 static inline unsigned int uclamp_none(enum uclamp_id clamp_id) 937 { 938 if (clamp_id == UCLAMP_MIN) 939 return 0; 940 return SCHED_CAPACITY_SCALE; 941 } 942 943 static inline void uclamp_se_set(struct uclamp_se *uc_se, 944 unsigned int value, bool user_defined) 945 { 946 uc_se->value = value; 947 uc_se->bucket_id = uclamp_bucket_id(value); 948 uc_se->user_defined = user_defined; 949 } 950 951 static inline unsigned int 952 uclamp_idle_value(struct rq *rq, enum uclamp_id clamp_id, 953 unsigned int clamp_value) 954 { 955 /* 956 * Avoid blocked utilization pushing up the frequency when we go 957 * idle (which drops the max-clamp) by retaining the last known 958 * max-clamp. 959 */ 960 if (clamp_id == UCLAMP_MAX) { 961 rq->uclamp_flags |= UCLAMP_FLAG_IDLE; 962 return clamp_value; 963 } 964 965 return uclamp_none(UCLAMP_MIN); 966 } 967 968 static inline void uclamp_idle_reset(struct rq *rq, enum uclamp_id clamp_id, 969 unsigned int clamp_value) 970 { 971 /* Reset max-clamp retention only on idle exit */ 972 if (!(rq->uclamp_flags & UCLAMP_FLAG_IDLE)) 973 return; 974 975 WRITE_ONCE(rq->uclamp[clamp_id].value, clamp_value); 976 } 977 978 static inline 979 unsigned int uclamp_rq_max_value(struct rq *rq, enum uclamp_id clamp_id, 980 unsigned int clamp_value) 981 { 982 struct uclamp_bucket *bucket = rq->uclamp[clamp_id].bucket; 983 int bucket_id = UCLAMP_BUCKETS - 1; 984 985 /* 986 * Since both min and max clamps are max aggregated, find the 987 * top most bucket with tasks in. 988 */ 989 for ( ; bucket_id >= 0; bucket_id--) { 990 if (!bucket[bucket_id].tasks) 991 continue; 992 return bucket[bucket_id].value; 993 } 994 995 /* No tasks -- default clamp values */ 996 return uclamp_idle_value(rq, clamp_id, clamp_value); 997 } 998 999 static void __uclamp_update_util_min_rt_default(struct task_struct *p) 1000 { 1001 unsigned int default_util_min; 1002 struct uclamp_se *uc_se; 1003 1004 lockdep_assert_held(&p->pi_lock); 1005 1006 uc_se = &p->uclamp_req[UCLAMP_MIN]; 1007 1008 /* Only sync if user didn't override the default */ 1009 if (uc_se->user_defined) 1010 return; 1011 1012 default_util_min = sysctl_sched_uclamp_util_min_rt_default; 1013 uclamp_se_set(uc_se, default_util_min, false); 1014 } 1015 1016 static void uclamp_update_util_min_rt_default(struct task_struct *p) 1017 { 1018 struct rq_flags rf; 1019 struct rq *rq; 1020 1021 if (!rt_task(p)) 1022 return; 1023 1024 /* Protect updates to p->uclamp_* */ 1025 rq = task_rq_lock(p, &rf); 1026 __uclamp_update_util_min_rt_default(p); 1027 task_rq_unlock(rq, p, &rf); 1028 } 1029 1030 static void uclamp_sync_util_min_rt_default(void) 1031 { 1032 struct task_struct *g, *p; 1033 1034 /* 1035 * copy_process() sysctl_uclamp 1036 * uclamp_min_rt = X; 1037 * write_lock(&tasklist_lock) read_lock(&tasklist_lock) 1038 * // link thread smp_mb__after_spinlock() 1039 * write_unlock(&tasklist_lock) read_unlock(&tasklist_lock); 1040 * sched_post_fork() for_each_process_thread() 1041 * __uclamp_sync_rt() __uclamp_sync_rt() 1042 * 1043 * Ensures that either sched_post_fork() will observe the new 1044 * uclamp_min_rt or for_each_process_thread() will observe the new 1045 * task. 1046 */ 1047 read_lock(&tasklist_lock); 1048 smp_mb__after_spinlock(); 1049 read_unlock(&tasklist_lock); 1050 1051 rcu_read_lock(); 1052 for_each_process_thread(g, p) 1053 uclamp_update_util_min_rt_default(p); 1054 rcu_read_unlock(); 1055 } 1056 1057 static inline struct uclamp_se 1058 uclamp_tg_restrict(struct task_struct *p, enum uclamp_id clamp_id) 1059 { 1060 struct uclamp_se uc_req = p->uclamp_req[clamp_id]; 1061 #ifdef CONFIG_UCLAMP_TASK_GROUP 1062 struct uclamp_se uc_max; 1063 1064 /* 1065 * Tasks in autogroups or root task group will be 1066 * restricted by system defaults. 1067 */ 1068 if (task_group_is_autogroup(task_group(p))) 1069 return uc_req; 1070 if (task_group(p) == &root_task_group) 1071 return uc_req; 1072 1073 uc_max = task_group(p)->uclamp[clamp_id]; 1074 if (uc_req.value > uc_max.value || !uc_req.user_defined) 1075 return uc_max; 1076 #endif 1077 1078 return uc_req; 1079 } 1080 1081 /* 1082 * The effective clamp bucket index of a task depends on, by increasing 1083 * priority: 1084 * - the task specific clamp value, when explicitly requested from userspace 1085 * - the task group effective clamp value, for tasks not either in the root 1086 * group or in an autogroup 1087 * - the system default clamp value, defined by the sysadmin 1088 */ 1089 static inline struct uclamp_se 1090 uclamp_eff_get(struct task_struct *p, enum uclamp_id clamp_id) 1091 { 1092 struct uclamp_se uc_req = uclamp_tg_restrict(p, clamp_id); 1093 struct uclamp_se uc_max = uclamp_default[clamp_id]; 1094 1095 /* System default restrictions always apply */ 1096 if (unlikely(uc_req.value > uc_max.value)) 1097 return uc_max; 1098 1099 return uc_req; 1100 } 1101 1102 unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id) 1103 { 1104 struct uclamp_se uc_eff; 1105 1106 /* Task currently refcounted: use back-annotated (effective) value */ 1107 if (p->uclamp[clamp_id].active) 1108 return (unsigned long)p->uclamp[clamp_id].value; 1109 1110 uc_eff = uclamp_eff_get(p, clamp_id); 1111 1112 return (unsigned long)uc_eff.value; 1113 } 1114 1115 /* 1116 * When a task is enqueued on a rq, the clamp bucket currently defined by the 1117 * task's uclamp::bucket_id is refcounted on that rq. This also immediately 1118 * updates the rq's clamp value if required. 1119 * 1120 * Tasks can have a task-specific value requested from user-space, track 1121 * within each bucket the maximum value for tasks refcounted in it. 1122 * This "local max aggregation" allows to track the exact "requested" value 1123 * for each bucket when all its RUNNABLE tasks require the same clamp. 1124 */ 1125 static inline void uclamp_rq_inc_id(struct rq *rq, struct task_struct *p, 1126 enum uclamp_id clamp_id) 1127 { 1128 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id]; 1129 struct uclamp_se *uc_se = &p->uclamp[clamp_id]; 1130 struct uclamp_bucket *bucket; 1131 1132 lockdep_assert_held(&rq->lock); 1133 1134 /* Update task effective clamp */ 1135 p->uclamp[clamp_id] = uclamp_eff_get(p, clamp_id); 1136 1137 bucket = &uc_rq->bucket[uc_se->bucket_id]; 1138 bucket->tasks++; 1139 uc_se->active = true; 1140 1141 uclamp_idle_reset(rq, clamp_id, uc_se->value); 1142 1143 /* 1144 * Local max aggregation: rq buckets always track the max 1145 * "requested" clamp value of its RUNNABLE tasks. 1146 */ 1147 if (bucket->tasks == 1 || uc_se->value > bucket->value) 1148 bucket->value = uc_se->value; 1149 1150 if (uc_se->value > READ_ONCE(uc_rq->value)) 1151 WRITE_ONCE(uc_rq->value, uc_se->value); 1152 } 1153 1154 /* 1155 * When a task is dequeued from a rq, the clamp bucket refcounted by the task 1156 * is released. If this is the last task reference counting the rq's max 1157 * active clamp value, then the rq's clamp value is updated. 1158 * 1159 * Both refcounted tasks and rq's cached clamp values are expected to be 1160 * always valid. If it's detected they are not, as defensive programming, 1161 * enforce the expected state and warn. 1162 */ 1163 static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p, 1164 enum uclamp_id clamp_id) 1165 { 1166 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id]; 1167 struct uclamp_se *uc_se = &p->uclamp[clamp_id]; 1168 struct uclamp_bucket *bucket; 1169 unsigned int bkt_clamp; 1170 unsigned int rq_clamp; 1171 1172 lockdep_assert_held(&rq->lock); 1173 1174 /* 1175 * If sched_uclamp_used was enabled after task @p was enqueued, 1176 * we could end up with unbalanced call to uclamp_rq_dec_id(). 1177 * 1178 * In this case the uc_se->active flag should be false since no uclamp 1179 * accounting was performed at enqueue time and we can just return 1180 * here. 1181 * 1182 * Need to be careful of the following enqueue/dequeue ordering 1183 * problem too 1184 * 1185 * enqueue(taskA) 1186 * // sched_uclamp_used gets enabled 1187 * enqueue(taskB) 1188 * dequeue(taskA) 1189 * // Must not decrement bucket->tasks here 1190 * dequeue(taskB) 1191 * 1192 * where we could end up with stale data in uc_se and 1193 * bucket[uc_se->bucket_id]. 1194 * 1195 * The following check here eliminates the possibility of such race. 1196 */ 1197 if (unlikely(!uc_se->active)) 1198 return; 1199 1200 bucket = &uc_rq->bucket[uc_se->bucket_id]; 1201 1202 SCHED_WARN_ON(!bucket->tasks); 1203 if (likely(bucket->tasks)) 1204 bucket->tasks--; 1205 1206 uc_se->active = false; 1207 1208 /* 1209 * Keep "local max aggregation" simple and accept to (possibly) 1210 * overboost some RUNNABLE tasks in the same bucket. 1211 * The rq clamp bucket value is reset to its base value whenever 1212 * there are no more RUNNABLE tasks refcounting it. 1213 */ 1214 if (likely(bucket->tasks)) 1215 return; 1216 1217 rq_clamp = READ_ONCE(uc_rq->value); 1218 /* 1219 * Defensive programming: this should never happen. If it happens, 1220 * e.g. due to future modification, warn and fixup the expected value. 1221 */ 1222 SCHED_WARN_ON(bucket->value > rq_clamp); 1223 if (bucket->value >= rq_clamp) { 1224 bkt_clamp = uclamp_rq_max_value(rq, clamp_id, uc_se->value); 1225 WRITE_ONCE(uc_rq->value, bkt_clamp); 1226 } 1227 } 1228 1229 static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) 1230 { 1231 enum uclamp_id clamp_id; 1232 1233 /* 1234 * Avoid any overhead until uclamp is actually used by the userspace. 1235 * 1236 * The condition is constructed such that a NOP is generated when 1237 * sched_uclamp_used is disabled. 1238 */ 1239 if (!static_branch_unlikely(&sched_uclamp_used)) 1240 return; 1241 1242 if (unlikely(!p->sched_class->uclamp_enabled)) 1243 return; 1244 1245 for_each_clamp_id(clamp_id) 1246 uclamp_rq_inc_id(rq, p, clamp_id); 1247 1248 /* Reset clamp idle holding when there is one RUNNABLE task */ 1249 if (rq->uclamp_flags & UCLAMP_FLAG_IDLE) 1250 rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE; 1251 } 1252 1253 static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) 1254 { 1255 enum uclamp_id clamp_id; 1256 1257 /* 1258 * Avoid any overhead until uclamp is actually used by the userspace. 1259 * 1260 * The condition is constructed such that a NOP is generated when 1261 * sched_uclamp_used is disabled. 1262 */ 1263 if (!static_branch_unlikely(&sched_uclamp_used)) 1264 return; 1265 1266 if (unlikely(!p->sched_class->uclamp_enabled)) 1267 return; 1268 1269 for_each_clamp_id(clamp_id) 1270 uclamp_rq_dec_id(rq, p, clamp_id); 1271 } 1272 1273 static inline void 1274 uclamp_update_active(struct task_struct *p, enum uclamp_id clamp_id) 1275 { 1276 struct rq_flags rf; 1277 struct rq *rq; 1278 1279 /* 1280 * Lock the task and the rq where the task is (or was) queued. 1281 * 1282 * We might lock the (previous) rq of a !RUNNABLE task, but that's the 1283 * price to pay to safely serialize util_{min,max} updates with 1284 * enqueues, dequeues and migration operations. 1285 * This is the same locking schema used by __set_cpus_allowed_ptr(). 1286 */ 1287 rq = task_rq_lock(p, &rf); 1288 1289 /* 1290 * Setting the clamp bucket is serialized by task_rq_lock(). 1291 * If the task is not yet RUNNABLE and its task_struct is not 1292 * affecting a valid clamp bucket, the next time it's enqueued, 1293 * it will already see the updated clamp bucket value. 1294 */ 1295 if (p->uclamp[clamp_id].active) { 1296 uclamp_rq_dec_id(rq, p, clamp_id); 1297 uclamp_rq_inc_id(rq, p, clamp_id); 1298 } 1299 1300 task_rq_unlock(rq, p, &rf); 1301 } 1302 1303 #ifdef CONFIG_UCLAMP_TASK_GROUP 1304 static inline void 1305 uclamp_update_active_tasks(struct cgroup_subsys_state *css, 1306 unsigned int clamps) 1307 { 1308 enum uclamp_id clamp_id; 1309 struct css_task_iter it; 1310 struct task_struct *p; 1311 1312 css_task_iter_start(css, 0, &it); 1313 while ((p = css_task_iter_next(&it))) { 1314 for_each_clamp_id(clamp_id) { 1315 if ((0x1 << clamp_id) & clamps) 1316 uclamp_update_active(p, clamp_id); 1317 } 1318 } 1319 css_task_iter_end(&it); 1320 } 1321 1322 static void cpu_util_update_eff(struct cgroup_subsys_state *css); 1323 static void uclamp_update_root_tg(void) 1324 { 1325 struct task_group *tg = &root_task_group; 1326 1327 uclamp_se_set(&tg->uclamp_req[UCLAMP_MIN], 1328 sysctl_sched_uclamp_util_min, false); 1329 uclamp_se_set(&tg->uclamp_req[UCLAMP_MAX], 1330 sysctl_sched_uclamp_util_max, false); 1331 1332 rcu_read_lock(); 1333 cpu_util_update_eff(&root_task_group.css); 1334 rcu_read_unlock(); 1335 } 1336 #else 1337 static void uclamp_update_root_tg(void) { } 1338 #endif 1339 1340 int sysctl_sched_uclamp_handler(struct ctl_table *table, int write, 1341 void *buffer, size_t *lenp, loff_t *ppos) 1342 { 1343 bool update_root_tg = false; 1344 int old_min, old_max, old_min_rt; 1345 int result; 1346 1347 mutex_lock(&uclamp_mutex); 1348 old_min = sysctl_sched_uclamp_util_min; 1349 old_max = sysctl_sched_uclamp_util_max; 1350 old_min_rt = sysctl_sched_uclamp_util_min_rt_default; 1351 1352 result = proc_dointvec(table, write, buffer, lenp, ppos); 1353 if (result) 1354 goto undo; 1355 if (!write) 1356 goto done; 1357 1358 if (sysctl_sched_uclamp_util_min > sysctl_sched_uclamp_util_max || 1359 sysctl_sched_uclamp_util_max > SCHED_CAPACITY_SCALE || 1360 sysctl_sched_uclamp_util_min_rt_default > SCHED_CAPACITY_SCALE) { 1361 1362 result = -EINVAL; 1363 goto undo; 1364 } 1365 1366 if (old_min != sysctl_sched_uclamp_util_min) { 1367 uclamp_se_set(&uclamp_default[UCLAMP_MIN], 1368 sysctl_sched_uclamp_util_min, false); 1369 update_root_tg = true; 1370 } 1371 if (old_max != sysctl_sched_uclamp_util_max) { 1372 uclamp_se_set(&uclamp_default[UCLAMP_MAX], 1373 sysctl_sched_uclamp_util_max, false); 1374 update_root_tg = true; 1375 } 1376 1377 if (update_root_tg) { 1378 static_branch_enable(&sched_uclamp_used); 1379 uclamp_update_root_tg(); 1380 } 1381 1382 if (old_min_rt != sysctl_sched_uclamp_util_min_rt_default) { 1383 static_branch_enable(&sched_uclamp_used); 1384 uclamp_sync_util_min_rt_default(); 1385 } 1386 1387 /* 1388 * We update all RUNNABLE tasks only when task groups are in use. 1389 * Otherwise, keep it simple and do just a lazy update at each next 1390 * task enqueue time. 1391 */ 1392 1393 goto done; 1394 1395 undo: 1396 sysctl_sched_uclamp_util_min = old_min; 1397 sysctl_sched_uclamp_util_max = old_max; 1398 sysctl_sched_uclamp_util_min_rt_default = old_min_rt; 1399 done: 1400 mutex_unlock(&uclamp_mutex); 1401 1402 return result; 1403 } 1404 1405 static int uclamp_validate(struct task_struct *p, 1406 const struct sched_attr *attr) 1407 { 1408 int util_min = p->uclamp_req[UCLAMP_MIN].value; 1409 int util_max = p->uclamp_req[UCLAMP_MAX].value; 1410 1411 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN) { 1412 util_min = attr->sched_util_min; 1413 1414 if (util_min + 1 > SCHED_CAPACITY_SCALE + 1) 1415 return -EINVAL; 1416 } 1417 1418 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX) { 1419 util_max = attr->sched_util_max; 1420 1421 if (util_max + 1 > SCHED_CAPACITY_SCALE + 1) 1422 return -EINVAL; 1423 } 1424 1425 if (util_min != -1 && util_max != -1 && util_min > util_max) 1426 return -EINVAL; 1427 1428 /* 1429 * We have valid uclamp attributes; make sure uclamp is enabled. 1430 * 1431 * We need to do that here, because enabling static branches is a 1432 * blocking operation which obviously cannot be done while holding 1433 * scheduler locks. 1434 */ 1435 static_branch_enable(&sched_uclamp_used); 1436 1437 return 0; 1438 } 1439 1440 static bool uclamp_reset(const struct sched_attr *attr, 1441 enum uclamp_id clamp_id, 1442 struct uclamp_se *uc_se) 1443 { 1444 /* Reset on sched class change for a non user-defined clamp value. */ 1445 if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)) && 1446 !uc_se->user_defined) 1447 return true; 1448 1449 /* Reset on sched_util_{min,max} == -1. */ 1450 if (clamp_id == UCLAMP_MIN && 1451 attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN && 1452 attr->sched_util_min == -1) { 1453 return true; 1454 } 1455 1456 if (clamp_id == UCLAMP_MAX && 1457 attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX && 1458 attr->sched_util_max == -1) { 1459 return true; 1460 } 1461 1462 return false; 1463 } 1464 1465 static void __setscheduler_uclamp(struct task_struct *p, 1466 const struct sched_attr *attr) 1467 { 1468 enum uclamp_id clamp_id; 1469 1470 for_each_clamp_id(clamp_id) { 1471 struct uclamp_se *uc_se = &p->uclamp_req[clamp_id]; 1472 unsigned int value; 1473 1474 if (!uclamp_reset(attr, clamp_id, uc_se)) 1475 continue; 1476 1477 /* 1478 * RT by default have a 100% boost value that could be modified 1479 * at runtime. 1480 */ 1481 if (unlikely(rt_task(p) && clamp_id == UCLAMP_MIN)) 1482 value = sysctl_sched_uclamp_util_min_rt_default; 1483 else 1484 value = uclamp_none(clamp_id); 1485 1486 uclamp_se_set(uc_se, value, false); 1487 1488 } 1489 1490 if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP))) 1491 return; 1492 1493 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN && 1494 attr->sched_util_min != -1) { 1495 uclamp_se_set(&p->uclamp_req[UCLAMP_MIN], 1496 attr->sched_util_min, true); 1497 } 1498 1499 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX && 1500 attr->sched_util_max != -1) { 1501 uclamp_se_set(&p->uclamp_req[UCLAMP_MAX], 1502 attr->sched_util_max, true); 1503 } 1504 } 1505 1506 static void uclamp_fork(struct task_struct *p) 1507 { 1508 enum uclamp_id clamp_id; 1509 1510 /* 1511 * We don't need to hold task_rq_lock() when updating p->uclamp_* here 1512 * as the task is still at its early fork stages. 1513 */ 1514 for_each_clamp_id(clamp_id) 1515 p->uclamp[clamp_id].active = false; 1516 1517 if (likely(!p->sched_reset_on_fork)) 1518 return; 1519 1520 for_each_clamp_id(clamp_id) { 1521 uclamp_se_set(&p->uclamp_req[clamp_id], 1522 uclamp_none(clamp_id), false); 1523 } 1524 } 1525 1526 static void uclamp_post_fork(struct task_struct *p) 1527 { 1528 uclamp_update_util_min_rt_default(p); 1529 } 1530 1531 static void __init init_uclamp_rq(struct rq *rq) 1532 { 1533 enum uclamp_id clamp_id; 1534 struct uclamp_rq *uc_rq = rq->uclamp; 1535 1536 for_each_clamp_id(clamp_id) { 1537 uc_rq[clamp_id] = (struct uclamp_rq) { 1538 .value = uclamp_none(clamp_id) 1539 }; 1540 } 1541 1542 rq->uclamp_flags = 0; 1543 } 1544 1545 static void __init init_uclamp(void) 1546 { 1547 struct uclamp_se uc_max = {}; 1548 enum uclamp_id clamp_id; 1549 int cpu; 1550 1551 for_each_possible_cpu(cpu) 1552 init_uclamp_rq(cpu_rq(cpu)); 1553 1554 for_each_clamp_id(clamp_id) { 1555 uclamp_se_set(&init_task.uclamp_req[clamp_id], 1556 uclamp_none(clamp_id), false); 1557 } 1558 1559 /* System defaults allow max clamp values for both indexes */ 1560 uclamp_se_set(&uc_max, uclamp_none(UCLAMP_MAX), false); 1561 for_each_clamp_id(clamp_id) { 1562 uclamp_default[clamp_id] = uc_max; 1563 #ifdef CONFIG_UCLAMP_TASK_GROUP 1564 root_task_group.uclamp_req[clamp_id] = uc_max; 1565 root_task_group.uclamp[clamp_id] = uc_max; 1566 #endif 1567 } 1568 } 1569 1570 #else /* CONFIG_UCLAMP_TASK */ 1571 static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) { } 1572 static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) { } 1573 static inline int uclamp_validate(struct task_struct *p, 1574 const struct sched_attr *attr) 1575 { 1576 return -EOPNOTSUPP; 1577 } 1578 static void __setscheduler_uclamp(struct task_struct *p, 1579 const struct sched_attr *attr) { } 1580 static inline void uclamp_fork(struct task_struct *p) { } 1581 static inline void uclamp_post_fork(struct task_struct *p) { } 1582 static inline void init_uclamp(void) { } 1583 #endif /* CONFIG_UCLAMP_TASK */ 1584 1585 static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags) 1586 { 1587 if (!(flags & ENQUEUE_NOCLOCK)) 1588 update_rq_clock(rq); 1589 1590 if (!(flags & ENQUEUE_RESTORE)) { 1591 sched_info_queued(rq, p); 1592 psi_enqueue(p, flags & ENQUEUE_WAKEUP); 1593 } 1594 1595 uclamp_rq_inc(rq, p); 1596 p->sched_class->enqueue_task(rq, p, flags); 1597 } 1598 1599 static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags) 1600 { 1601 if (!(flags & DEQUEUE_NOCLOCK)) 1602 update_rq_clock(rq); 1603 1604 if (!(flags & DEQUEUE_SAVE)) { 1605 sched_info_dequeued(rq, p); 1606 psi_dequeue(p, flags & DEQUEUE_SLEEP); 1607 } 1608 1609 uclamp_rq_dec(rq, p); 1610 p->sched_class->dequeue_task(rq, p, flags); 1611 } 1612 1613 void activate_task(struct rq *rq, struct task_struct *p, int flags) 1614 { 1615 enqueue_task(rq, p, flags); 1616 1617 p->on_rq = TASK_ON_RQ_QUEUED; 1618 } 1619 1620 void deactivate_task(struct rq *rq, struct task_struct *p, int flags) 1621 { 1622 p->on_rq = (flags & DEQUEUE_SLEEP) ? 0 : TASK_ON_RQ_MIGRATING; 1623 1624 dequeue_task(rq, p, flags); 1625 } 1626 1627 /* 1628 * __normal_prio - return the priority that is based on the static prio 1629 */ 1630 static inline int __normal_prio(struct task_struct *p) 1631 { 1632 return p->static_prio; 1633 } 1634 1635 /* 1636 * Calculate the expected normal priority: i.e. priority 1637 * without taking RT-inheritance into account. Might be 1638 * boosted by interactivity modifiers. Changes upon fork, 1639 * setprio syscalls, and whenever the interactivity 1640 * estimator recalculates. 1641 */ 1642 static inline int normal_prio(struct task_struct *p) 1643 { 1644 int prio; 1645 1646 if (task_has_dl_policy(p)) 1647 prio = MAX_DL_PRIO-1; 1648 else if (task_has_rt_policy(p)) 1649 prio = MAX_RT_PRIO-1 - p->rt_priority; 1650 else 1651 prio = __normal_prio(p); 1652 return prio; 1653 } 1654 1655 /* 1656 * Calculate the current priority, i.e. the priority 1657 * taken into account by the scheduler. This value might 1658 * be boosted by RT tasks, or might be boosted by 1659 * interactivity modifiers. Will be RT if the task got 1660 * RT-boosted. If not then it returns p->normal_prio. 1661 */ 1662 static int effective_prio(struct task_struct *p) 1663 { 1664 p->normal_prio = normal_prio(p); 1665 /* 1666 * If we are RT tasks or we were boosted to RT priority, 1667 * keep the priority unchanged. Otherwise, update priority 1668 * to the normal priority: 1669 */ 1670 if (!rt_prio(p->prio)) 1671 return p->normal_prio; 1672 return p->prio; 1673 } 1674 1675 /** 1676 * task_curr - is this task currently executing on a CPU? 1677 * @p: the task in question. 1678 * 1679 * Return: 1 if the task is currently executing. 0 otherwise. 1680 */ 1681 inline int task_curr(const struct task_struct *p) 1682 { 1683 return cpu_curr(task_cpu(p)) == p; 1684 } 1685 1686 /* 1687 * switched_from, switched_to and prio_changed must _NOT_ drop rq->lock, 1688 * use the balance_callback list if you want balancing. 1689 * 1690 * this means any call to check_class_changed() must be followed by a call to 1691 * balance_callback(). 1692 */ 1693 static inline void check_class_changed(struct rq *rq, struct task_struct *p, 1694 const struct sched_class *prev_class, 1695 int oldprio) 1696 { 1697 if (prev_class != p->sched_class) { 1698 if (prev_class->switched_from) 1699 prev_class->switched_from(rq, p); 1700 1701 p->sched_class->switched_to(rq, p); 1702 } else if (oldprio != p->prio || dl_task(p)) 1703 p->sched_class->prio_changed(rq, p, oldprio); 1704 } 1705 1706 void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) 1707 { 1708 if (p->sched_class == rq->curr->sched_class) 1709 rq->curr->sched_class->check_preempt_curr(rq, p, flags); 1710 else if (p->sched_class > rq->curr->sched_class) 1711 resched_curr(rq); 1712 1713 /* 1714 * A queue event has occurred, and we're going to schedule. In 1715 * this case, we can save a useless back to back clock update. 1716 */ 1717 if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr)) 1718 rq_clock_skip_update(rq); 1719 } 1720 1721 #ifdef CONFIG_SMP 1722 1723 static void 1724 __do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask, u32 flags); 1725 1726 static int __set_cpus_allowed_ptr(struct task_struct *p, 1727 const struct cpumask *new_mask, 1728 u32 flags); 1729 1730 static void migrate_disable_switch(struct rq *rq, struct task_struct *p) 1731 { 1732 if (likely(!p->migration_disabled)) 1733 return; 1734 1735 if (p->cpus_ptr != &p->cpus_mask) 1736 return; 1737 1738 /* 1739 * Violates locking rules! see comment in __do_set_cpus_allowed(). 1740 */ 1741 __do_set_cpus_allowed(p, cpumask_of(rq->cpu), SCA_MIGRATE_DISABLE); 1742 } 1743 1744 void migrate_disable(void) 1745 { 1746 struct task_struct *p = current; 1747 1748 if (p->migration_disabled) { 1749 p->migration_disabled++; 1750 return; 1751 } 1752 1753 preempt_disable(); 1754 this_rq()->nr_pinned++; 1755 p->migration_disabled = 1; 1756 preempt_enable(); 1757 } 1758 EXPORT_SYMBOL_GPL(migrate_disable); 1759 1760 void migrate_enable(void) 1761 { 1762 struct task_struct *p = current; 1763 1764 if (p->migration_disabled > 1) { 1765 p->migration_disabled--; 1766 return; 1767 } 1768 1769 /* 1770 * Ensure stop_task runs either before or after this, and that 1771 * __set_cpus_allowed_ptr(SCA_MIGRATE_ENABLE) doesn't schedule(). 1772 */ 1773 preempt_disable(); 1774 if (p->cpus_ptr != &p->cpus_mask) 1775 __set_cpus_allowed_ptr(p, &p->cpus_mask, SCA_MIGRATE_ENABLE); 1776 /* 1777 * Mustn't clear migration_disabled() until cpus_ptr points back at the 1778 * regular cpus_mask, otherwise things that race (eg. 1779 * select_fallback_rq) get confused. 1780 */ 1781 barrier(); 1782 p->migration_disabled = 0; 1783 this_rq()->nr_pinned--; 1784 preempt_enable(); 1785 } 1786 EXPORT_SYMBOL_GPL(migrate_enable); 1787 1788 static inline bool rq_has_pinned_tasks(struct rq *rq) 1789 { 1790 return rq->nr_pinned; 1791 } 1792 1793 /* 1794 * Per-CPU kthreads are allowed to run on !active && online CPUs, see 1795 * __set_cpus_allowed_ptr() and select_fallback_rq(). 1796 */ 1797 static inline bool is_cpu_allowed(struct task_struct *p, int cpu) 1798 { 1799 /* When not in the task's cpumask, no point in looking further. */ 1800 if (!cpumask_test_cpu(cpu, p->cpus_ptr)) 1801 return false; 1802 1803 /* migrate_disabled() must be allowed to finish. */ 1804 if (is_migration_disabled(p)) 1805 return cpu_online(cpu); 1806 1807 /* Non kernel threads are not allowed during either online or offline. */ 1808 if (!(p->flags & PF_KTHREAD)) 1809 return cpu_active(cpu); 1810 1811 /* KTHREAD_IS_PER_CPU is always allowed. */ 1812 if (kthread_is_per_cpu(p)) 1813 return cpu_online(cpu); 1814 1815 /* Regular kernel threads don't get to stay during offline. */ 1816 if (cpu_rq(cpu)->balance_push) 1817 return false; 1818 1819 /* But are allowed during online. */ 1820 return cpu_online(cpu); 1821 } 1822 1823 /* 1824 * This is how migration works: 1825 * 1826 * 1) we invoke migration_cpu_stop() on the target CPU using 1827 * stop_one_cpu(). 1828 * 2) stopper starts to run (implicitly forcing the migrated thread 1829 * off the CPU) 1830 * 3) it checks whether the migrated task is still in the wrong runqueue. 1831 * 4) if it's in the wrong runqueue then the migration thread removes 1832 * it and puts it into the right queue. 1833 * 5) stopper completes and stop_one_cpu() returns and the migration 1834 * is done. 1835 */ 1836 1837 /* 1838 * move_queued_task - move a queued task to new rq. 1839 * 1840 * Returns (locked) new rq. Old rq's lock is released. 1841 */ 1842 static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf, 1843 struct task_struct *p, int new_cpu) 1844 { 1845 lockdep_assert_held(&rq->lock); 1846 1847 deactivate_task(rq, p, DEQUEUE_NOCLOCK); 1848 set_task_cpu(p, new_cpu); 1849 rq_unlock(rq, rf); 1850 1851 rq = cpu_rq(new_cpu); 1852 1853 rq_lock(rq, rf); 1854 BUG_ON(task_cpu(p) != new_cpu); 1855 activate_task(rq, p, 0); 1856 check_preempt_curr(rq, p, 0); 1857 1858 return rq; 1859 } 1860 1861 struct migration_arg { 1862 struct task_struct *task; 1863 int dest_cpu; 1864 struct set_affinity_pending *pending; 1865 }; 1866 1867 struct set_affinity_pending { 1868 refcount_t refs; 1869 struct completion done; 1870 struct cpu_stop_work stop_work; 1871 struct migration_arg arg; 1872 }; 1873 1874 /* 1875 * Move (not current) task off this CPU, onto the destination CPU. We're doing 1876 * this because either it can't run here any more (set_cpus_allowed() 1877 * away from this CPU, or CPU going down), or because we're 1878 * attempting to rebalance this task on exec (sched_exec). 1879 * 1880 * So we race with normal scheduler movements, but that's OK, as long 1881 * as the task is no longer on this CPU. 1882 */ 1883 static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf, 1884 struct task_struct *p, int dest_cpu) 1885 { 1886 /* Affinity changed (again). */ 1887 if (!is_cpu_allowed(p, dest_cpu)) 1888 return rq; 1889 1890 update_rq_clock(rq); 1891 rq = move_queued_task(rq, rf, p, dest_cpu); 1892 1893 return rq; 1894 } 1895 1896 /* 1897 * migration_cpu_stop - this will be executed by a highprio stopper thread 1898 * and performs thread migration by bumping thread off CPU then 1899 * 'pushing' onto another runqueue. 1900 */ 1901 static int migration_cpu_stop(void *data) 1902 { 1903 struct set_affinity_pending *pending; 1904 struct migration_arg *arg = data; 1905 struct task_struct *p = arg->task; 1906 int dest_cpu = arg->dest_cpu; 1907 struct rq *rq = this_rq(); 1908 bool complete = false; 1909 struct rq_flags rf; 1910 1911 /* 1912 * The original target CPU might have gone down and we might 1913 * be on another CPU but it doesn't matter. 1914 */ 1915 local_irq_save(rf.flags); 1916 /* 1917 * We need to explicitly wake pending tasks before running 1918 * __migrate_task() such that we will not miss enforcing cpus_ptr 1919 * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test. 1920 */ 1921 flush_smp_call_function_from_idle(); 1922 1923 raw_spin_lock(&p->pi_lock); 1924 rq_lock(rq, &rf); 1925 1926 pending = p->migration_pending; 1927 /* 1928 * If task_rq(p) != rq, it cannot be migrated here, because we're 1929 * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because 1930 * we're holding p->pi_lock. 1931 */ 1932 if (task_rq(p) == rq) { 1933 if (is_migration_disabled(p)) 1934 goto out; 1935 1936 if (pending) { 1937 p->migration_pending = NULL; 1938 complete = true; 1939 } 1940 1941 /* migrate_enable() -- we must not race against SCA */ 1942 if (dest_cpu < 0) { 1943 /* 1944 * When this was migrate_enable() but we no longer 1945 * have a @pending, a concurrent SCA 'fixed' things 1946 * and we should be valid again. Nothing to do. 1947 */ 1948 if (!pending) { 1949 WARN_ON_ONCE(!cpumask_test_cpu(task_cpu(p), &p->cpus_mask)); 1950 goto out; 1951 } 1952 1953 dest_cpu = cpumask_any_distribute(&p->cpus_mask); 1954 } 1955 1956 if (task_on_rq_queued(p)) 1957 rq = __migrate_task(rq, &rf, p, dest_cpu); 1958 else 1959 p->wake_cpu = dest_cpu; 1960 1961 } else if (dest_cpu < 0 || pending) { 1962 /* 1963 * This happens when we get migrated between migrate_enable()'s 1964 * preempt_enable() and scheduling the stopper task. At that 1965 * point we're a regular task again and not current anymore. 1966 * 1967 * A !PREEMPT kernel has a giant hole here, which makes it far 1968 * more likely. 1969 */ 1970 1971 /* 1972 * The task moved before the stopper got to run. We're holding 1973 * ->pi_lock, so the allowed mask is stable - if it got 1974 * somewhere allowed, we're done. 1975 */ 1976 if (pending && cpumask_test_cpu(task_cpu(p), p->cpus_ptr)) { 1977 p->migration_pending = NULL; 1978 complete = true; 1979 goto out; 1980 } 1981 1982 /* 1983 * When this was migrate_enable() but we no longer have an 1984 * @pending, a concurrent SCA 'fixed' things and we should be 1985 * valid again. Nothing to do. 1986 */ 1987 if (!pending) { 1988 WARN_ON_ONCE(!cpumask_test_cpu(task_cpu(p), &p->cpus_mask)); 1989 goto out; 1990 } 1991 1992 /* 1993 * When migrate_enable() hits a rq mis-match we can't reliably 1994 * determine is_migration_disabled() and so have to chase after 1995 * it. 1996 */ 1997 task_rq_unlock(rq, p, &rf); 1998 stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop, 1999 &pending->arg, &pending->stop_work); 2000 return 0; 2001 } 2002 out: 2003 task_rq_unlock(rq, p, &rf); 2004 2005 if (complete) 2006 complete_all(&pending->done); 2007 2008 /* For pending->{arg,stop_work} */ 2009 pending = arg->pending; 2010 if (pending && refcount_dec_and_test(&pending->refs)) 2011 wake_up_var(&pending->refs); 2012 2013 return 0; 2014 } 2015 2016 int push_cpu_stop(void *arg) 2017 { 2018 struct rq *lowest_rq = NULL, *rq = this_rq(); 2019 struct task_struct *p = arg; 2020 2021 raw_spin_lock_irq(&p->pi_lock); 2022 raw_spin_lock(&rq->lock); 2023 2024 if (task_rq(p) != rq) 2025 goto out_unlock; 2026 2027 if (is_migration_disabled(p)) { 2028 p->migration_flags |= MDF_PUSH; 2029 goto out_unlock; 2030 } 2031 2032 p->migration_flags &= ~MDF_PUSH; 2033 2034 if (p->sched_class->find_lock_rq) 2035 lowest_rq = p->sched_class->find_lock_rq(p, rq); 2036 2037 if (!lowest_rq) 2038 goto out_unlock; 2039 2040 // XXX validate p is still the highest prio task 2041 if (task_rq(p) == rq) { 2042 deactivate_task(rq, p, 0); 2043 set_task_cpu(p, lowest_rq->cpu); 2044 activate_task(lowest_rq, p, 0); 2045 resched_curr(lowest_rq); 2046 } 2047 2048 double_unlock_balance(rq, lowest_rq); 2049 2050 out_unlock: 2051 rq->push_busy = false; 2052 raw_spin_unlock(&rq->lock); 2053 raw_spin_unlock_irq(&p->pi_lock); 2054 2055 put_task_struct(p); 2056 return 0; 2057 } 2058 2059 /* 2060 * sched_class::set_cpus_allowed must do the below, but is not required to 2061 * actually call this function. 2062 */ 2063 void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask, u32 flags) 2064 { 2065 if (flags & (SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) { 2066 p->cpus_ptr = new_mask; 2067 return; 2068 } 2069 2070 cpumask_copy(&p->cpus_mask, new_mask); 2071 p->nr_cpus_allowed = cpumask_weight(new_mask); 2072 } 2073 2074 static void 2075 __do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask, u32 flags) 2076 { 2077 struct rq *rq = task_rq(p); 2078 bool queued, running; 2079 2080 /* 2081 * This here violates the locking rules for affinity, since we're only 2082 * supposed to change these variables while holding both rq->lock and 2083 * p->pi_lock. 2084 * 2085 * HOWEVER, it magically works, because ttwu() is the only code that 2086 * accesses these variables under p->pi_lock and only does so after 2087 * smp_cond_load_acquire(&p->on_cpu, !VAL), and we're in __schedule() 2088 * before finish_task(). 2089 * 2090 * XXX do further audits, this smells like something putrid. 2091 */ 2092 if (flags & SCA_MIGRATE_DISABLE) 2093 SCHED_WARN_ON(!p->on_cpu); 2094 else 2095 lockdep_assert_held(&p->pi_lock); 2096 2097 queued = task_on_rq_queued(p); 2098 running = task_current(rq, p); 2099 2100 if (queued) { 2101 /* 2102 * Because __kthread_bind() calls this on blocked tasks without 2103 * holding rq->lock. 2104 */ 2105 lockdep_assert_held(&rq->lock); 2106 dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK); 2107 } 2108 if (running) 2109 put_prev_task(rq, p); 2110 2111 p->sched_class->set_cpus_allowed(p, new_mask, flags); 2112 2113 if (queued) 2114 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); 2115 if (running) 2116 set_next_task(rq, p); 2117 } 2118 2119 void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) 2120 { 2121 __do_set_cpus_allowed(p, new_mask, 0); 2122 } 2123 2124 /* 2125 * This function is wildly self concurrent; here be dragons. 2126 * 2127 * 2128 * When given a valid mask, __set_cpus_allowed_ptr() must block until the 2129 * designated task is enqueued on an allowed CPU. If that task is currently 2130 * running, we have to kick it out using the CPU stopper. 2131 * 2132 * Migrate-Disable comes along and tramples all over our nice sandcastle. 2133 * Consider: 2134 * 2135 * Initial conditions: P0->cpus_mask = [0, 1] 2136 * 2137 * P0@CPU0 P1 2138 * 2139 * migrate_disable(); 2140 * <preempted> 2141 * set_cpus_allowed_ptr(P0, [1]); 2142 * 2143 * P1 *cannot* return from this set_cpus_allowed_ptr() call until P0 executes 2144 * its outermost migrate_enable() (i.e. it exits its Migrate-Disable region). 2145 * This means we need the following scheme: 2146 * 2147 * P0@CPU0 P1 2148 * 2149 * migrate_disable(); 2150 * <preempted> 2151 * set_cpus_allowed_ptr(P0, [1]); 2152 * <blocks> 2153 * <resumes> 2154 * migrate_enable(); 2155 * __set_cpus_allowed_ptr(); 2156 * <wakes local stopper> 2157 * `--> <woken on migration completion> 2158 * 2159 * Now the fun stuff: there may be several P1-like tasks, i.e. multiple 2160 * concurrent set_cpus_allowed_ptr(P0, [*]) calls. CPU affinity changes of any 2161 * task p are serialized by p->pi_lock, which we can leverage: the one that 2162 * should come into effect at the end of the Migrate-Disable region is the last 2163 * one. This means we only need to track a single cpumask (i.e. p->cpus_mask), 2164 * but we still need to properly signal those waiting tasks at the appropriate 2165 * moment. 2166 * 2167 * This is implemented using struct set_affinity_pending. The first 2168 * __set_cpus_allowed_ptr() caller within a given Migrate-Disable region will 2169 * setup an instance of that struct and install it on the targeted task_struct. 2170 * Any and all further callers will reuse that instance. Those then wait for 2171 * a completion signaled at the tail of the CPU stopper callback (1), triggered 2172 * on the end of the Migrate-Disable region (i.e. outermost migrate_enable()). 2173 * 2174 * 2175 * (1) In the cases covered above. There is one more where the completion is 2176 * signaled within affine_move_task() itself: when a subsequent affinity request 2177 * cancels the need for an active migration. Consider: 2178 * 2179 * Initial conditions: P0->cpus_mask = [0, 1] 2180 * 2181 * P0@CPU0 P1 P2 2182 * 2183 * migrate_disable(); 2184 * <preempted> 2185 * set_cpus_allowed_ptr(P0, [1]); 2186 * <blocks> 2187 * set_cpus_allowed_ptr(P0, [0, 1]); 2188 * <signal completion> 2189 * <awakes> 2190 * 2191 * Note that the above is safe vs a concurrent migrate_enable(), as any 2192 * pending affinity completion is preceded by an uninstallation of 2193 * p->migration_pending done with p->pi_lock held. 2194 */ 2195 static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flags *rf, 2196 int dest_cpu, unsigned int flags) 2197 { 2198 struct set_affinity_pending my_pending = { }, *pending = NULL; 2199 struct migration_arg arg = { 2200 .task = p, 2201 .dest_cpu = dest_cpu, 2202 }; 2203 bool complete = false; 2204 2205 /* Can the task run on the task's current CPU? If so, we're done */ 2206 if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) { 2207 struct task_struct *push_task = NULL; 2208 2209 if ((flags & SCA_MIGRATE_ENABLE) && 2210 (p->migration_flags & MDF_PUSH) && !rq->push_busy) { 2211 rq->push_busy = true; 2212 push_task = get_task_struct(p); 2213 } 2214 2215 pending = p->migration_pending; 2216 if (pending) { 2217 refcount_inc(&pending->refs); 2218 p->migration_pending = NULL; 2219 complete = true; 2220 } 2221 task_rq_unlock(rq, p, rf); 2222 2223 if (push_task) { 2224 stop_one_cpu_nowait(rq->cpu, push_cpu_stop, 2225 p, &rq->push_work); 2226 } 2227 2228 if (complete) 2229 goto do_complete; 2230 2231 return 0; 2232 } 2233 2234 if (!(flags & SCA_MIGRATE_ENABLE)) { 2235 /* serialized by p->pi_lock */ 2236 if (!p->migration_pending) { 2237 /* Install the request */ 2238 refcount_set(&my_pending.refs, 1); 2239 init_completion(&my_pending.done); 2240 p->migration_pending = &my_pending; 2241 } else { 2242 pending = p->migration_pending; 2243 refcount_inc(&pending->refs); 2244 } 2245 } 2246 pending = p->migration_pending; 2247 /* 2248 * - !MIGRATE_ENABLE: 2249 * we'll have installed a pending if there wasn't one already. 2250 * 2251 * - MIGRATE_ENABLE: 2252 * we're here because the current CPU isn't matching anymore, 2253 * the only way that can happen is because of a concurrent 2254 * set_cpus_allowed_ptr() call, which should then still be 2255 * pending completion. 2256 * 2257 * Either way, we really should have a @pending here. 2258 */ 2259 if (WARN_ON_ONCE(!pending)) { 2260 task_rq_unlock(rq, p, rf); 2261 return -EINVAL; 2262 } 2263 2264 if (flags & SCA_MIGRATE_ENABLE) { 2265 2266 refcount_inc(&pending->refs); /* pending->{arg,stop_work} */ 2267 p->migration_flags &= ~MDF_PUSH; 2268 task_rq_unlock(rq, p, rf); 2269 2270 pending->arg = (struct migration_arg) { 2271 .task = p, 2272 .dest_cpu = -1, 2273 .pending = pending, 2274 }; 2275 2276 stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop, 2277 &pending->arg, &pending->stop_work); 2278 2279 return 0; 2280 } 2281 2282 if (task_running(rq, p) || p->state == TASK_WAKING) { 2283 /* 2284 * Lessen races (and headaches) by delegating 2285 * is_migration_disabled(p) checks to the stopper, which will 2286 * run on the same CPU as said p. 2287 */ 2288 task_rq_unlock(rq, p, rf); 2289 stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg); 2290 2291 } else { 2292 2293 if (!is_migration_disabled(p)) { 2294 if (task_on_rq_queued(p)) 2295 rq = move_queued_task(rq, rf, p, dest_cpu); 2296 2297 p->migration_pending = NULL; 2298 complete = true; 2299 } 2300 task_rq_unlock(rq, p, rf); 2301 2302 do_complete: 2303 if (complete) 2304 complete_all(&pending->done); 2305 } 2306 2307 wait_for_completion(&pending->done); 2308 2309 if (refcount_dec_and_test(&pending->refs)) 2310 wake_up_var(&pending->refs); 2311 2312 /* 2313 * Block the original owner of &pending until all subsequent callers 2314 * have seen the completion and decremented the refcount 2315 */ 2316 wait_var_event(&my_pending.refs, !refcount_read(&my_pending.refs)); 2317 2318 return 0; 2319 } 2320 2321 /* 2322 * Change a given task's CPU affinity. Migrate the thread to a 2323 * proper CPU and schedule it away if the CPU it's executing on 2324 * is removed from the allowed bitmask. 2325 * 2326 * NOTE: the caller must have a valid reference to the task, the 2327 * task must not exit() & deallocate itself prematurely. The 2328 * call is not atomic; no spinlocks may be held. 2329 */ 2330 static int __set_cpus_allowed_ptr(struct task_struct *p, 2331 const struct cpumask *new_mask, 2332 u32 flags) 2333 { 2334 const struct cpumask *cpu_valid_mask = cpu_active_mask; 2335 unsigned int dest_cpu; 2336 struct rq_flags rf; 2337 struct rq *rq; 2338 int ret = 0; 2339 2340 rq = task_rq_lock(p, &rf); 2341 update_rq_clock(rq); 2342 2343 if (p->flags & PF_KTHREAD || is_migration_disabled(p)) { 2344 /* 2345 * Kernel threads are allowed on online && !active CPUs, 2346 * however, during cpu-hot-unplug, even these might get pushed 2347 * away if not KTHREAD_IS_PER_CPU. 2348 * 2349 * Specifically, migration_disabled() tasks must not fail the 2350 * cpumask_any_and_distribute() pick below, esp. so on 2351 * SCA_MIGRATE_ENABLE, otherwise we'll not call 2352 * set_cpus_allowed_common() and actually reset p->cpus_ptr. 2353 */ 2354 cpu_valid_mask = cpu_online_mask; 2355 } 2356 2357 /* 2358 * Must re-check here, to close a race against __kthread_bind(), 2359 * sched_setaffinity() is not guaranteed to observe the flag. 2360 */ 2361 if ((flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) { 2362 ret = -EINVAL; 2363 goto out; 2364 } 2365 2366 if (!(flags & SCA_MIGRATE_ENABLE)) { 2367 if (cpumask_equal(&p->cpus_mask, new_mask)) 2368 goto out; 2369 2370 if (WARN_ON_ONCE(p == current && 2371 is_migration_disabled(p) && 2372 !cpumask_test_cpu(task_cpu(p), new_mask))) { 2373 ret = -EBUSY; 2374 goto out; 2375 } 2376 } 2377 2378 /* 2379 * Picking a ~random cpu helps in cases where we are changing affinity 2380 * for groups of tasks (ie. cpuset), so that load balancing is not 2381 * immediately required to distribute the tasks within their new mask. 2382 */ 2383 dest_cpu = cpumask_any_and_distribute(cpu_valid_mask, new_mask); 2384 if (dest_cpu >= nr_cpu_ids) { 2385 ret = -EINVAL; 2386 goto out; 2387 } 2388 2389 __do_set_cpus_allowed(p, new_mask, flags); 2390 2391 return affine_move_task(rq, p, &rf, dest_cpu, flags); 2392 2393 out: 2394 task_rq_unlock(rq, p, &rf); 2395 2396 return ret; 2397 } 2398 2399 int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) 2400 { 2401 return __set_cpus_allowed_ptr(p, new_mask, 0); 2402 } 2403 EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr); 2404 2405 void set_task_cpu(struct task_struct *p, unsigned int new_cpu) 2406 { 2407 #ifdef CONFIG_SCHED_DEBUG 2408 /* 2409 * We should never call set_task_cpu() on a blocked task, 2410 * ttwu() will sort out the placement. 2411 */ 2412 WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING && 2413 !p->on_rq); 2414 2415 /* 2416 * Migrating fair class task must have p->on_rq = TASK_ON_RQ_MIGRATING, 2417 * because schedstat_wait_{start,end} rebase migrating task's wait_start 2418 * time relying on p->on_rq. 2419 */ 2420 WARN_ON_ONCE(p->state == TASK_RUNNING && 2421 p->sched_class == &fair_sched_class && 2422 (p->on_rq && !task_on_rq_migrating(p))); 2423 2424 #ifdef CONFIG_LOCKDEP 2425 /* 2426 * The caller should hold either p->pi_lock or rq->lock, when changing 2427 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks. 2428 * 2429 * sched_move_task() holds both and thus holding either pins the cgroup, 2430 * see task_group(). 2431 * 2432 * Furthermore, all task_rq users should acquire both locks, see 2433 * task_rq_lock(). 2434 */ 2435 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) || 2436 lockdep_is_held(&task_rq(p)->lock))); 2437 #endif 2438 /* 2439 * Clearly, migrating tasks to offline CPUs is a fairly daft thing. 2440 */ 2441 WARN_ON_ONCE(!cpu_online(new_cpu)); 2442 2443 WARN_ON_ONCE(is_migration_disabled(p)); 2444 #endif 2445 2446 trace_sched_migrate_task(p, new_cpu); 2447 2448 if (task_cpu(p) != new_cpu) { 2449 if (p->sched_class->migrate_task_rq) 2450 p->sched_class->migrate_task_rq(p, new_cpu); 2451 p->se.nr_migrations++; 2452 rseq_migrate(p); 2453 perf_event_task_migrate(p); 2454 } 2455 2456 __set_task_cpu(p, new_cpu); 2457 } 2458 2459 #ifdef CONFIG_NUMA_BALANCING 2460 static void __migrate_swap_task(struct task_struct *p, int cpu) 2461 { 2462 if (task_on_rq_queued(p)) { 2463 struct rq *src_rq, *dst_rq; 2464 struct rq_flags srf, drf; 2465 2466 src_rq = task_rq(p); 2467 dst_rq = cpu_rq(cpu); 2468 2469 rq_pin_lock(src_rq, &srf); 2470 rq_pin_lock(dst_rq, &drf); 2471 2472 deactivate_task(src_rq, p, 0); 2473 set_task_cpu(p, cpu); 2474 activate_task(dst_rq, p, 0); 2475 check_preempt_curr(dst_rq, p, 0); 2476 2477 rq_unpin_lock(dst_rq, &drf); 2478 rq_unpin_lock(src_rq, &srf); 2479 2480 } else { 2481 /* 2482 * Task isn't running anymore; make it appear like we migrated 2483 * it before it went to sleep. This means on wakeup we make the 2484 * previous CPU our target instead of where it really is. 2485 */ 2486 p->wake_cpu = cpu; 2487 } 2488 } 2489 2490 struct migration_swap_arg { 2491 struct task_struct *src_task, *dst_task; 2492 int src_cpu, dst_cpu; 2493 }; 2494 2495 static int migrate_swap_stop(void *data) 2496 { 2497 struct migration_swap_arg *arg = data; 2498 struct rq *src_rq, *dst_rq; 2499 int ret = -EAGAIN; 2500 2501 if (!cpu_active(arg->src_cpu) || !cpu_active(arg->dst_cpu)) 2502 return -EAGAIN; 2503 2504 src_rq = cpu_rq(arg->src_cpu); 2505 dst_rq = cpu_rq(arg->dst_cpu); 2506 2507 double_raw_lock(&arg->src_task->pi_lock, 2508 &arg->dst_task->pi_lock); 2509 double_rq_lock(src_rq, dst_rq); 2510 2511 if (task_cpu(arg->dst_task) != arg->dst_cpu) 2512 goto unlock; 2513 2514 if (task_cpu(arg->src_task) != arg->src_cpu) 2515 goto unlock; 2516 2517 if (!cpumask_test_cpu(arg->dst_cpu, arg->src_task->cpus_ptr)) 2518 goto unlock; 2519 2520 if (!cpumask_test_cpu(arg->src_cpu, arg->dst_task->cpus_ptr)) 2521 goto unlock; 2522 2523 __migrate_swap_task(arg->src_task, arg->dst_cpu); 2524 __migrate_swap_task(arg->dst_task, arg->src_cpu); 2525 2526 ret = 0; 2527 2528 unlock: 2529 double_rq_unlock(src_rq, dst_rq); 2530 raw_spin_unlock(&arg->dst_task->pi_lock); 2531 raw_spin_unlock(&arg->src_task->pi_lock); 2532 2533 return ret; 2534 } 2535 2536 /* 2537 * Cross migrate two tasks 2538 */ 2539 int migrate_swap(struct task_struct *cur, struct task_struct *p, 2540 int target_cpu, int curr_cpu) 2541 { 2542 struct migration_swap_arg arg; 2543 int ret = -EINVAL; 2544 2545 arg = (struct migration_swap_arg){ 2546 .src_task = cur, 2547 .src_cpu = curr_cpu, 2548 .dst_task = p, 2549 .dst_cpu = target_cpu, 2550 }; 2551 2552 if (arg.src_cpu == arg.dst_cpu) 2553 goto out; 2554 2555 /* 2556 * These three tests are all lockless; this is OK since all of them 2557 * will be re-checked with proper locks held further down the line. 2558 */ 2559 if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu)) 2560 goto out; 2561 2562 if (!cpumask_test_cpu(arg.dst_cpu, arg.src_task->cpus_ptr)) 2563 goto out; 2564 2565 if (!cpumask_test_cpu(arg.src_cpu, arg.dst_task->cpus_ptr)) 2566 goto out; 2567 2568 trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu); 2569 ret = stop_two_cpus(arg.dst_cpu, arg.src_cpu, migrate_swap_stop, &arg); 2570 2571 out: 2572 return ret; 2573 } 2574 #endif /* CONFIG_NUMA_BALANCING */ 2575 2576 /* 2577 * wait_task_inactive - wait for a thread to unschedule. 2578 * 2579 * If @match_state is nonzero, it's the @p->state value just checked and 2580 * not expected to change. If it changes, i.e. @p might have woken up, 2581 * then return zero. When we succeed in waiting for @p to be off its CPU, 2582 * we return a positive number (its total switch count). If a second call 2583 * a short while later returns the same number, the caller can be sure that 2584 * @p has remained unscheduled the whole time. 2585 * 2586 * The caller must ensure that the task *will* unschedule sometime soon, 2587 * else this function might spin for a *long* time. This function can't 2588 * be called with interrupts off, or it may introduce deadlock with 2589 * smp_call_function() if an IPI is sent by the same process we are 2590 * waiting to become inactive. 2591 */ 2592 unsigned long wait_task_inactive(struct task_struct *p, long match_state) 2593 { 2594 int running, queued; 2595 struct rq_flags rf; 2596 unsigned long ncsw; 2597 struct rq *rq; 2598 2599 for (;;) { 2600 /* 2601 * We do the initial early heuristics without holding 2602 * any task-queue locks at all. We'll only try to get 2603 * the runqueue lock when things look like they will 2604 * work out! 2605 */ 2606 rq = task_rq(p); 2607 2608 /* 2609 * If the task is actively running on another CPU 2610 * still, just relax and busy-wait without holding 2611 * any locks. 2612 * 2613 * NOTE! Since we don't hold any locks, it's not 2614 * even sure that "rq" stays as the right runqueue! 2615 * But we don't care, since "task_running()" will 2616 * return false if the runqueue has changed and p 2617 * is actually now running somewhere else! 2618 */ 2619 while (task_running(rq, p)) { 2620 if (match_state && unlikely(p->state != match_state)) 2621 return 0; 2622 cpu_relax(); 2623 } 2624 2625 /* 2626 * Ok, time to look more closely! We need the rq 2627 * lock now, to be *sure*. If we're wrong, we'll 2628 * just go back and repeat. 2629 */ 2630 rq = task_rq_lock(p, &rf); 2631 trace_sched_wait_task(p); 2632 running = task_running(rq, p); 2633 queued = task_on_rq_queued(p); 2634 ncsw = 0; 2635 if (!match_state || p->state == match_state) 2636 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ 2637 task_rq_unlock(rq, p, &rf); 2638 2639 /* 2640 * If it changed from the expected state, bail out now. 2641 */ 2642 if (unlikely(!ncsw)) 2643 break; 2644 2645 /* 2646 * Was it really running after all now that we 2647 * checked with the proper locks actually held? 2648 * 2649 * Oops. Go back and try again.. 2650 */ 2651 if (unlikely(running)) { 2652 cpu_relax(); 2653 continue; 2654 } 2655 2656 /* 2657 * It's not enough that it's not actively running, 2658 * it must be off the runqueue _entirely_, and not 2659 * preempted! 2660 * 2661 * So if it was still runnable (but just not actively 2662 * running right now), it's preempted, and we should 2663 * yield - it could be a while. 2664 */ 2665 if (unlikely(queued)) { 2666 ktime_t to = NSEC_PER_SEC / HZ; 2667 2668 set_current_state(TASK_UNINTERRUPTIBLE); 2669 schedule_hrtimeout(&to, HRTIMER_MODE_REL); 2670 continue; 2671 } 2672 2673 /* 2674 * Ahh, all good. It wasn't running, and it wasn't 2675 * runnable, which means that it will never become 2676 * running in the future either. We're all done! 2677 */ 2678 break; 2679 } 2680 2681 return ncsw; 2682 } 2683 2684 /*** 2685 * kick_process - kick a running thread to enter/exit the kernel 2686 * @p: the to-be-kicked thread 2687 * 2688 * Cause a process which is running on another CPU to enter 2689 * kernel-mode, without any delay. (to get signals handled.) 2690 * 2691 * NOTE: this function doesn't have to take the runqueue lock, 2692 * because all it wants to ensure is that the remote task enters 2693 * the kernel. If the IPI races and the task has been migrated 2694 * to another CPU then no harm is done and the purpose has been 2695 * achieved as well. 2696 */ 2697 void kick_process(struct task_struct *p) 2698 { 2699 int cpu; 2700 2701 preempt_disable(); 2702 cpu = task_cpu(p); 2703 if ((cpu != smp_processor_id()) && task_curr(p)) 2704 smp_send_reschedule(cpu); 2705 preempt_enable(); 2706 } 2707 EXPORT_SYMBOL_GPL(kick_process); 2708 2709 /* 2710 * ->cpus_ptr is protected by both rq->lock and p->pi_lock 2711 * 2712 * A few notes on cpu_active vs cpu_online: 2713 * 2714 * - cpu_active must be a subset of cpu_online 2715 * 2716 * - on CPU-up we allow per-CPU kthreads on the online && !active CPU, 2717 * see __set_cpus_allowed_ptr(). At this point the newly online 2718 * CPU isn't yet part of the sched domains, and balancing will not 2719 * see it. 2720 * 2721 * - on CPU-down we clear cpu_active() to mask the sched domains and 2722 * avoid the load balancer to place new tasks on the to be removed 2723 * CPU. Existing tasks will remain running there and will be taken 2724 * off. 2725 * 2726 * This means that fallback selection must not select !active CPUs. 2727 * And can assume that any active CPU must be online. Conversely 2728 * select_task_rq() below may allow selection of !active CPUs in order 2729 * to satisfy the above rules. 2730 */ 2731 static int select_fallback_rq(int cpu, struct task_struct *p) 2732 { 2733 int nid = cpu_to_node(cpu); 2734 const struct cpumask *nodemask = NULL; 2735 enum { cpuset, possible, fail } state = cpuset; 2736 int dest_cpu; 2737 2738 /* 2739 * If the node that the CPU is on has been offlined, cpu_to_node() 2740 * will return -1. There is no CPU on the node, and we should 2741 * select the CPU on the other node. 2742 */ 2743 if (nid != -1) { 2744 nodemask = cpumask_of_node(nid); 2745 2746 /* Look for allowed, online CPU in same node. */ 2747 for_each_cpu(dest_cpu, nodemask) { 2748 if (!cpu_active(dest_cpu)) 2749 continue; 2750 if (cpumask_test_cpu(dest_cpu, p->cpus_ptr)) 2751 return dest_cpu; 2752 } 2753 } 2754 2755 for (;;) { 2756 /* Any allowed, online CPU? */ 2757 for_each_cpu(dest_cpu, p->cpus_ptr) { 2758 if (!is_cpu_allowed(p, dest_cpu)) 2759 continue; 2760 2761 goto out; 2762 } 2763 2764 /* No more Mr. Nice Guy. */ 2765 switch (state) { 2766 case cpuset: 2767 if (IS_ENABLED(CONFIG_CPUSETS)) { 2768 cpuset_cpus_allowed_fallback(p); 2769 state = possible; 2770 break; 2771 } 2772 fallthrough; 2773 case possible: 2774 /* 2775 * XXX When called from select_task_rq() we only 2776 * hold p->pi_lock and again violate locking order. 2777 * 2778 * More yuck to audit. 2779 */ 2780 do_set_cpus_allowed(p, cpu_possible_mask); 2781 state = fail; 2782 break; 2783 2784 case fail: 2785 BUG(); 2786 break; 2787 } 2788 } 2789 2790 out: 2791 if (state != cpuset) { 2792 /* 2793 * Don't tell them about moving exiting tasks or 2794 * kernel threads (both mm NULL), since they never 2795 * leave kernel. 2796 */ 2797 if (p->mm && printk_ratelimit()) { 2798 printk_deferred("process %d (%s) no longer affine to cpu%d\n", 2799 task_pid_nr(p), p->comm, cpu); 2800 } 2801 } 2802 2803 return dest_cpu; 2804 } 2805 2806 /* 2807 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_ptr is stable. 2808 */ 2809 static inline 2810 int select_task_rq(struct task_struct *p, int cpu, int wake_flags) 2811 { 2812 lockdep_assert_held(&p->pi_lock); 2813 2814 if (p->nr_cpus_allowed > 1 && !is_migration_disabled(p)) 2815 cpu = p->sched_class->select_task_rq(p, cpu, wake_flags); 2816 else 2817 cpu = cpumask_any(p->cpus_ptr); 2818 2819 /* 2820 * In order not to call set_task_cpu() on a blocking task we need 2821 * to rely on ttwu() to place the task on a valid ->cpus_ptr 2822 * CPU. 2823 * 2824 * Since this is common to all placement strategies, this lives here. 2825 * 2826 * [ this allows ->select_task() to simply return task_cpu(p) and 2827 * not worry about this generic constraint ] 2828 */ 2829 if (unlikely(!is_cpu_allowed(p, cpu))) 2830 cpu = select_fallback_rq(task_cpu(p), p); 2831 2832 return cpu; 2833 } 2834 2835 void sched_set_stop_task(int cpu, struct task_struct *stop) 2836 { 2837 static struct lock_class_key stop_pi_lock; 2838 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; 2839 struct task_struct *old_stop = cpu_rq(cpu)->stop; 2840 2841 if (stop) { 2842 /* 2843 * Make it appear like a SCHED_FIFO task, its something 2844 * userspace knows about and won't get confused about. 2845 * 2846 * Also, it will make PI more or less work without too 2847 * much confusion -- but then, stop work should not 2848 * rely on PI working anyway. 2849 */ 2850 sched_setscheduler_nocheck(stop, SCHED_FIFO, ¶m); 2851 2852 stop->sched_class = &stop_sched_class; 2853 2854 /* 2855 * The PI code calls rt_mutex_setprio() with ->pi_lock held to 2856 * adjust the effective priority of a task. As a result, 2857 * rt_mutex_setprio() can trigger (RT) balancing operations, 2858 * which can then trigger wakeups of the stop thread to push 2859 * around the current task. 2860 * 2861 * The stop task itself will never be part of the PI-chain, it 2862 * never blocks, therefore that ->pi_lock recursion is safe. 2863 * Tell lockdep about this by placing the stop->pi_lock in its 2864 * own class. 2865 */ 2866 lockdep_set_class(&stop->pi_lock, &stop_pi_lock); 2867 } 2868 2869 cpu_rq(cpu)->stop = stop; 2870 2871 if (old_stop) { 2872 /* 2873 * Reset it back to a normal scheduling class so that 2874 * it can die in pieces. 2875 */ 2876 old_stop->sched_class = &rt_sched_class; 2877 } 2878 } 2879 2880 #else /* CONFIG_SMP */ 2881 2882 static inline int __set_cpus_allowed_ptr(struct task_struct *p, 2883 const struct cpumask *new_mask, 2884 u32 flags) 2885 { 2886 return set_cpus_allowed_ptr(p, new_mask); 2887 } 2888 2889 static inline void migrate_disable_switch(struct rq *rq, struct task_struct *p) { } 2890 2891 static inline bool rq_has_pinned_tasks(struct rq *rq) 2892 { 2893 return false; 2894 } 2895 2896 #endif /* !CONFIG_SMP */ 2897 2898 static void 2899 ttwu_stat(struct task_struct *p, int cpu, int wake_flags) 2900 { 2901 struct rq *rq; 2902 2903 if (!schedstat_enabled()) 2904 return; 2905 2906 rq = this_rq(); 2907 2908 #ifdef CONFIG_SMP 2909 if (cpu == rq->cpu) { 2910 __schedstat_inc(rq->ttwu_local); 2911 __schedstat_inc(p->se.statistics.nr_wakeups_local); 2912 } else { 2913 struct sched_domain *sd; 2914 2915 __schedstat_inc(p->se.statistics.nr_wakeups_remote); 2916 rcu_read_lock(); 2917 for_each_domain(rq->cpu, sd) { 2918 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) { 2919 __schedstat_inc(sd->ttwu_wake_remote); 2920 break; 2921 } 2922 } 2923 rcu_read_unlock(); 2924 } 2925 2926 if (wake_flags & WF_MIGRATED) 2927 __schedstat_inc(p->se.statistics.nr_wakeups_migrate); 2928 #endif /* CONFIG_SMP */ 2929 2930 __schedstat_inc(rq->ttwu_count); 2931 __schedstat_inc(p->se.statistics.nr_wakeups); 2932 2933 if (wake_flags & WF_SYNC) 2934 __schedstat_inc(p->se.statistics.nr_wakeups_sync); 2935 } 2936 2937 /* 2938 * Mark the task runnable and perform wakeup-preemption. 2939 */ 2940 static void ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags, 2941 struct rq_flags *rf) 2942 { 2943 check_preempt_curr(rq, p, wake_flags); 2944 p->state = TASK_RUNNING; 2945 trace_sched_wakeup(p); 2946 2947 #ifdef CONFIG_SMP 2948 if (p->sched_class->task_woken) { 2949 /* 2950 * Our task @p is fully woken up and running; so it's safe to 2951 * drop the rq->lock, hereafter rq is only used for statistics. 2952 */ 2953 rq_unpin_lock(rq, rf); 2954 p->sched_class->task_woken(rq, p); 2955 rq_repin_lock(rq, rf); 2956 } 2957 2958 if (rq->idle_stamp) { 2959 u64 delta = rq_clock(rq) - rq->idle_stamp; 2960 u64 max = 2*rq->max_idle_balance_cost; 2961 2962 update_avg(&rq->avg_idle, delta); 2963 2964 if (rq->avg_idle > max) 2965 rq->avg_idle = max; 2966 2967 rq->idle_stamp = 0; 2968 } 2969 #endif 2970 } 2971 2972 static void 2973 ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags, 2974 struct rq_flags *rf) 2975 { 2976 int en_flags = ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK; 2977 2978 lockdep_assert_held(&rq->lock); 2979 2980 if (p->sched_contributes_to_load) 2981 rq->nr_uninterruptible--; 2982 2983 #ifdef CONFIG_SMP 2984 if (wake_flags & WF_MIGRATED) 2985 en_flags |= ENQUEUE_MIGRATED; 2986 else 2987 #endif 2988 if (p->in_iowait) { 2989 delayacct_blkio_end(p); 2990 atomic_dec(&task_rq(p)->nr_iowait); 2991 } 2992 2993 activate_task(rq, p, en_flags); 2994 ttwu_do_wakeup(rq, p, wake_flags, rf); 2995 } 2996 2997 /* 2998 * Consider @p being inside a wait loop: 2999 * 3000 * for (;;) { 3001 * set_current_state(TASK_UNINTERRUPTIBLE); 3002 * 3003 * if (CONDITION) 3004 * break; 3005 * 3006 * schedule(); 3007 * } 3008 * __set_current_state(TASK_RUNNING); 3009 * 3010 * between set_current_state() and schedule(). In this case @p is still 3011 * runnable, so all that needs doing is change p->state back to TASK_RUNNING in 3012 * an atomic manner. 3013 * 3014 * By taking task_rq(p)->lock we serialize against schedule(), if @p->on_rq 3015 * then schedule() must still happen and p->state can be changed to 3016 * TASK_RUNNING. Otherwise we lost the race, schedule() has happened, and we 3017 * need to do a full wakeup with enqueue. 3018 * 3019 * Returns: %true when the wakeup is done, 3020 * %false otherwise. 3021 */ 3022 static int ttwu_runnable(struct task_struct *p, int wake_flags) 3023 { 3024 struct rq_flags rf; 3025 struct rq *rq; 3026 int ret = 0; 3027 3028 rq = __task_rq_lock(p, &rf); 3029 if (task_on_rq_queued(p)) { 3030 /* check_preempt_curr() may use rq clock */ 3031 update_rq_clock(rq); 3032 ttwu_do_wakeup(rq, p, wake_flags, &rf); 3033 ret = 1; 3034 } 3035 __task_rq_unlock(rq, &rf); 3036 3037 return ret; 3038 } 3039 3040 #ifdef CONFIG_SMP 3041 void sched_ttwu_pending(void *arg) 3042 { 3043 struct llist_node *llist = arg; 3044 struct rq *rq = this_rq(); 3045 struct task_struct *p, *t; 3046 struct rq_flags rf; 3047 3048 if (!llist) 3049 return; 3050 3051 /* 3052 * rq::ttwu_pending racy indication of out-standing wakeups. 3053 * Races such that false-negatives are possible, since they 3054 * are shorter lived that false-positives would be. 3055 */ 3056 WRITE_ONCE(rq->ttwu_pending, 0); 3057 3058 rq_lock_irqsave(rq, &rf); 3059 update_rq_clock(rq); 3060 3061 llist_for_each_entry_safe(p, t, llist, wake_entry.llist) { 3062 if (WARN_ON_ONCE(p->on_cpu)) 3063 smp_cond_load_acquire(&p->on_cpu, !VAL); 3064 3065 if (WARN_ON_ONCE(task_cpu(p) != cpu_of(rq))) 3066 set_task_cpu(p, cpu_of(rq)); 3067 3068 ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0, &rf); 3069 } 3070 3071 rq_unlock_irqrestore(rq, &rf); 3072 } 3073 3074 void send_call_function_single_ipi(int cpu) 3075 { 3076 struct rq *rq = cpu_rq(cpu); 3077 3078 if (!set_nr_if_polling(rq->idle)) 3079 arch_send_call_function_single_ipi(cpu); 3080 else 3081 trace_sched_wake_idle_without_ipi(cpu); 3082 } 3083 3084 /* 3085 * Queue a task on the target CPUs wake_list and wake the CPU via IPI if 3086 * necessary. The wakee CPU on receipt of the IPI will queue the task 3087 * via sched_ttwu_wakeup() for activation so the wakee incurs the cost 3088 * of the wakeup instead of the waker. 3089 */ 3090 static void __ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags) 3091 { 3092 struct rq *rq = cpu_rq(cpu); 3093 3094 p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED); 3095 3096 WRITE_ONCE(rq->ttwu_pending, 1); 3097 __smp_call_single_queue(cpu, &p->wake_entry.llist); 3098 } 3099 3100 void wake_up_if_idle(int cpu) 3101 { 3102 struct rq *rq = cpu_rq(cpu); 3103 struct rq_flags rf; 3104 3105 rcu_read_lock(); 3106 3107 if (!is_idle_task(rcu_dereference(rq->curr))) 3108 goto out; 3109 3110 if (set_nr_if_polling(rq->idle)) { 3111 trace_sched_wake_idle_without_ipi(cpu); 3112 } else { 3113 rq_lock_irqsave(rq, &rf); 3114 if (is_idle_task(rq->curr)) 3115 smp_send_reschedule(cpu); 3116 /* Else CPU is not idle, do nothing here: */ 3117 rq_unlock_irqrestore(rq, &rf); 3118 } 3119 3120 out: 3121 rcu_read_unlock(); 3122 } 3123 3124 bool cpus_share_cache(int this_cpu, int that_cpu) 3125 { 3126 return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu); 3127 } 3128 3129 static inline bool ttwu_queue_cond(int cpu, int wake_flags) 3130 { 3131 /* 3132 * Do not complicate things with the async wake_list while the CPU is 3133 * in hotplug state. 3134 */ 3135 if (!cpu_active(cpu)) 3136 return false; 3137 3138 /* 3139 * If the CPU does not share cache, then queue the task on the 3140 * remote rqs wakelist to avoid accessing remote data. 3141 */ 3142 if (!cpus_share_cache(smp_processor_id(), cpu)) 3143 return true; 3144 3145 /* 3146 * If the task is descheduling and the only running task on the 3147 * CPU then use the wakelist to offload the task activation to 3148 * the soon-to-be-idle CPU as the current CPU is likely busy. 3149 * nr_running is checked to avoid unnecessary task stacking. 3150 */ 3151 if ((wake_flags & WF_ON_CPU) && cpu_rq(cpu)->nr_running <= 1) 3152 return true; 3153 3154 return false; 3155 } 3156 3157 static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags) 3158 { 3159 if (sched_feat(TTWU_QUEUE) && ttwu_queue_cond(cpu, wake_flags)) { 3160 if (WARN_ON_ONCE(cpu == smp_processor_id())) 3161 return false; 3162 3163 sched_clock_cpu(cpu); /* Sync clocks across CPUs */ 3164 __ttwu_queue_wakelist(p, cpu, wake_flags); 3165 return true; 3166 } 3167 3168 return false; 3169 } 3170 3171 #else /* !CONFIG_SMP */ 3172 3173 static inline bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags) 3174 { 3175 return false; 3176 } 3177 3178 #endif /* CONFIG_SMP */ 3179 3180 static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags) 3181 { 3182 struct rq *rq = cpu_rq(cpu); 3183 struct rq_flags rf; 3184 3185 if (ttwu_queue_wakelist(p, cpu, wake_flags)) 3186 return; 3187 3188 rq_lock(rq, &rf); 3189 update_rq_clock(rq); 3190 ttwu_do_activate(rq, p, wake_flags, &rf); 3191 rq_unlock(rq, &rf); 3192 } 3193 3194 /* 3195 * Notes on Program-Order guarantees on SMP systems. 3196 * 3197 * MIGRATION 3198 * 3199 * The basic program-order guarantee on SMP systems is that when a task [t] 3200 * migrates, all its activity on its old CPU [c0] happens-before any subsequent 3201 * execution on its new CPU [c1]. 3202 * 3203 * For migration (of runnable tasks) this is provided by the following means: 3204 * 3205 * A) UNLOCK of the rq(c0)->lock scheduling out task t 3206 * B) migration for t is required to synchronize *both* rq(c0)->lock and 3207 * rq(c1)->lock (if not at the same time, then in that order). 3208 * C) LOCK of the rq(c1)->lock scheduling in task 3209 * 3210 * Release/acquire chaining guarantees that B happens after A and C after B. 3211 * Note: the CPU doing B need not be c0 or c1 3212 * 3213 * Example: 3214 * 3215 * CPU0 CPU1 CPU2 3216 * 3217 * LOCK rq(0)->lock 3218 * sched-out X 3219 * sched-in Y 3220 * UNLOCK rq(0)->lock 3221 * 3222 * LOCK rq(0)->lock // orders against CPU0 3223 * dequeue X 3224 * UNLOCK rq(0)->lock 3225 * 3226 * LOCK rq(1)->lock 3227 * enqueue X 3228 * UNLOCK rq(1)->lock 3229 * 3230 * LOCK rq(1)->lock // orders against CPU2 3231 * sched-out Z 3232 * sched-in X 3233 * UNLOCK rq(1)->lock 3234 * 3235 * 3236 * BLOCKING -- aka. SLEEP + WAKEUP 3237 * 3238 * For blocking we (obviously) need to provide the same guarantee as for 3239 * migration. However the means are completely different as there is no lock 3240 * chain to provide order. Instead we do: 3241 * 3242 * 1) smp_store_release(X->on_cpu, 0) -- finish_task() 3243 * 2) smp_cond_load_acquire(!X->on_cpu) -- try_to_wake_up() 3244 * 3245 * Example: 3246 * 3247 * CPU0 (schedule) CPU1 (try_to_wake_up) CPU2 (schedule) 3248 * 3249 * LOCK rq(0)->lock LOCK X->pi_lock 3250 * dequeue X 3251 * sched-out X 3252 * smp_store_release(X->on_cpu, 0); 3253 * 3254 * smp_cond_load_acquire(&X->on_cpu, !VAL); 3255 * X->state = WAKING 3256 * set_task_cpu(X,2) 3257 * 3258 * LOCK rq(2)->lock 3259 * enqueue X 3260 * X->state = RUNNING 3261 * UNLOCK rq(2)->lock 3262 * 3263 * LOCK rq(2)->lock // orders against CPU1 3264 * sched-out Z 3265 * sched-in X 3266 * UNLOCK rq(2)->lock 3267 * 3268 * UNLOCK X->pi_lock 3269 * UNLOCK rq(0)->lock 3270 * 3271 * 3272 * However, for wakeups there is a second guarantee we must provide, namely we 3273 * must ensure that CONDITION=1 done by the caller can not be reordered with 3274 * accesses to the task state; see try_to_wake_up() and set_current_state(). 3275 */ 3276 3277 /** 3278 * try_to_wake_up - wake up a thread 3279 * @p: the thread to be awakened 3280 * @state: the mask of task states that can be woken 3281 * @wake_flags: wake modifier flags (WF_*) 3282 * 3283 * Conceptually does: 3284 * 3285 * If (@state & @p->state) @p->state = TASK_RUNNING. 3286 * 3287 * If the task was not queued/runnable, also place it back on a runqueue. 3288 * 3289 * This function is atomic against schedule() which would dequeue the task. 3290 * 3291 * It issues a full memory barrier before accessing @p->state, see the comment 3292 * with set_current_state(). 3293 * 3294 * Uses p->pi_lock to serialize against concurrent wake-ups. 3295 * 3296 * Relies on p->pi_lock stabilizing: 3297 * - p->sched_class 3298 * - p->cpus_ptr 3299 * - p->sched_task_group 3300 * in order to do migration, see its use of select_task_rq()/set_task_cpu(). 3301 * 3302 * Tries really hard to only take one task_rq(p)->lock for performance. 3303 * Takes rq->lock in: 3304 * - ttwu_runnable() -- old rq, unavoidable, see comment there; 3305 * - ttwu_queue() -- new rq, for enqueue of the task; 3306 * - psi_ttwu_dequeue() -- much sadness :-( accounting will kill us. 3307 * 3308 * As a consequence we race really badly with just about everything. See the 3309 * many memory barriers and their comments for details. 3310 * 3311 * Return: %true if @p->state changes (an actual wakeup was done), 3312 * %false otherwise. 3313 */ 3314 static int 3315 try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) 3316 { 3317 unsigned long flags; 3318 int cpu, success = 0; 3319 3320 preempt_disable(); 3321 if (p == current) { 3322 /* 3323 * We're waking current, this means 'p->on_rq' and 'task_cpu(p) 3324 * == smp_processor_id()'. Together this means we can special 3325 * case the whole 'p->on_rq && ttwu_runnable()' case below 3326 * without taking any locks. 3327 * 3328 * In particular: 3329 * - we rely on Program-Order guarantees for all the ordering, 3330 * - we're serialized against set_special_state() by virtue of 3331 * it disabling IRQs (this allows not taking ->pi_lock). 3332 */ 3333 if (!(p->state & state)) 3334 goto out; 3335 3336 success = 1; 3337 trace_sched_waking(p); 3338 p->state = TASK_RUNNING; 3339 trace_sched_wakeup(p); 3340 goto out; 3341 } 3342 3343 /* 3344 * If we are going to wake up a thread waiting for CONDITION we 3345 * need to ensure that CONDITION=1 done by the caller can not be 3346 * reordered with p->state check below. This pairs with smp_store_mb() 3347 * in set_current_state() that the waiting thread does. 3348 */ 3349 raw_spin_lock_irqsave(&p->pi_lock, flags); 3350 smp_mb__after_spinlock(); 3351 if (!(p->state & state)) 3352 goto unlock; 3353 3354 trace_sched_waking(p); 3355 3356 /* We're going to change ->state: */ 3357 success = 1; 3358 3359 /* 3360 * Ensure we load p->on_rq _after_ p->state, otherwise it would 3361 * be possible to, falsely, observe p->on_rq == 0 and get stuck 3362 * in smp_cond_load_acquire() below. 3363 * 3364 * sched_ttwu_pending() try_to_wake_up() 3365 * STORE p->on_rq = 1 LOAD p->state 3366 * UNLOCK rq->lock 3367 * 3368 * __schedule() (switch to task 'p') 3369 * LOCK rq->lock smp_rmb(); 3370 * smp_mb__after_spinlock(); 3371 * UNLOCK rq->lock 3372 * 3373 * [task p] 3374 * STORE p->state = UNINTERRUPTIBLE LOAD p->on_rq 3375 * 3376 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in 3377 * __schedule(). See the comment for smp_mb__after_spinlock(). 3378 * 3379 * A similar smb_rmb() lives in try_invoke_on_locked_down_task(). 3380 */ 3381 smp_rmb(); 3382 if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags)) 3383 goto unlock; 3384 3385 #ifdef CONFIG_SMP 3386 /* 3387 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be 3388 * possible to, falsely, observe p->on_cpu == 0. 3389 * 3390 * One must be running (->on_cpu == 1) in order to remove oneself 3391 * from the runqueue. 3392 * 3393 * __schedule() (switch to task 'p') try_to_wake_up() 3394 * STORE p->on_cpu = 1 LOAD p->on_rq 3395 * UNLOCK rq->lock 3396 * 3397 * __schedule() (put 'p' to sleep) 3398 * LOCK rq->lock smp_rmb(); 3399 * smp_mb__after_spinlock(); 3400 * STORE p->on_rq = 0 LOAD p->on_cpu 3401 * 3402 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in 3403 * __schedule(). See the comment for smp_mb__after_spinlock(). 3404 * 3405 * Form a control-dep-acquire with p->on_rq == 0 above, to ensure 3406 * schedule()'s deactivate_task() has 'happened' and p will no longer 3407 * care about it's own p->state. See the comment in __schedule(). 3408 */ 3409 smp_acquire__after_ctrl_dep(); 3410 3411 /* 3412 * We're doing the wakeup (@success == 1), they did a dequeue (p->on_rq 3413 * == 0), which means we need to do an enqueue, change p->state to 3414 * TASK_WAKING such that we can unlock p->pi_lock before doing the 3415 * enqueue, such as ttwu_queue_wakelist(). 3416 */ 3417 p->state = TASK_WAKING; 3418 3419 /* 3420 * If the owning (remote) CPU is still in the middle of schedule() with 3421 * this task as prev, considering queueing p on the remote CPUs wake_list 3422 * which potentially sends an IPI instead of spinning on p->on_cpu to 3423 * let the waker make forward progress. This is safe because IRQs are 3424 * disabled and the IPI will deliver after on_cpu is cleared. 3425 * 3426 * Ensure we load task_cpu(p) after p->on_cpu: 3427 * 3428 * set_task_cpu(p, cpu); 3429 * STORE p->cpu = @cpu 3430 * __schedule() (switch to task 'p') 3431 * LOCK rq->lock 3432 * smp_mb__after_spin_lock() smp_cond_load_acquire(&p->on_cpu) 3433 * STORE p->on_cpu = 1 LOAD p->cpu 3434 * 3435 * to ensure we observe the correct CPU on which the task is currently 3436 * scheduling. 3437 */ 3438 if (smp_load_acquire(&p->on_cpu) && 3439 ttwu_queue_wakelist(p, task_cpu(p), wake_flags | WF_ON_CPU)) 3440 goto unlock; 3441 3442 /* 3443 * If the owning (remote) CPU is still in the middle of schedule() with 3444 * this task as prev, wait until it's done referencing the task. 3445 * 3446 * Pairs with the smp_store_release() in finish_task(). 3447 * 3448 * This ensures that tasks getting woken will be fully ordered against 3449 * their previous state and preserve Program Order. 3450 */ 3451 smp_cond_load_acquire(&p->on_cpu, !VAL); 3452 3453 cpu = select_task_rq(p, p->wake_cpu, wake_flags | WF_TTWU); 3454 if (task_cpu(p) != cpu) { 3455 if (p->in_iowait) { 3456 delayacct_blkio_end(p); 3457 atomic_dec(&task_rq(p)->nr_iowait); 3458 } 3459 3460 wake_flags |= WF_MIGRATED; 3461 psi_ttwu_dequeue(p); 3462 set_task_cpu(p, cpu); 3463 } 3464 #else 3465 cpu = task_cpu(p); 3466 #endif /* CONFIG_SMP */ 3467 3468 ttwu_queue(p, cpu, wake_flags); 3469 unlock: 3470 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 3471 out: 3472 if (success) 3473 ttwu_stat(p, task_cpu(p), wake_flags); 3474 preempt_enable(); 3475 3476 return success; 3477 } 3478 3479 /** 3480 * try_invoke_on_locked_down_task - Invoke a function on task in fixed state 3481 * @p: Process for which the function is to be invoked. 3482 * @func: Function to invoke. 3483 * @arg: Argument to function. 3484 * 3485 * If the specified task can be quickly locked into a definite state 3486 * (either sleeping or on a given runqueue), arrange to keep it in that 3487 * state while invoking @func(@arg). This function can use ->on_rq and 3488 * task_curr() to work out what the state is, if required. Given that 3489 * @func can be invoked with a runqueue lock held, it had better be quite 3490 * lightweight. 3491 * 3492 * Returns: 3493 * @false if the task slipped out from under the locks. 3494 * @true if the task was locked onto a runqueue or is sleeping. 3495 * However, @func can override this by returning @false. 3496 */ 3497 bool try_invoke_on_locked_down_task(struct task_struct *p, bool (*func)(struct task_struct *t, void *arg), void *arg) 3498 { 3499 bool ret = false; 3500 struct rq_flags rf; 3501 struct rq *rq; 3502 3503 lockdep_assert_irqs_enabled(); 3504 raw_spin_lock_irq(&p->pi_lock); 3505 if (p->on_rq) { 3506 rq = __task_rq_lock(p, &rf); 3507 if (task_rq(p) == rq) 3508 ret = func(p, arg); 3509 rq_unlock(rq, &rf); 3510 } else { 3511 switch (p->state) { 3512 case TASK_RUNNING: 3513 case TASK_WAKING: 3514 break; 3515 default: 3516 smp_rmb(); // See smp_rmb() comment in try_to_wake_up(). 3517 if (!p->on_rq) 3518 ret = func(p, arg); 3519 } 3520 } 3521 raw_spin_unlock_irq(&p->pi_lock); 3522 return ret; 3523 } 3524 3525 /** 3526 * wake_up_process - Wake up a specific process 3527 * @p: The process to be woken up. 3528 * 3529 * Attempt to wake up the nominated process and move it to the set of runnable 3530 * processes. 3531 * 3532 * Return: 1 if the process was woken up, 0 if it was already running. 3533 * 3534 * This function executes a full memory barrier before accessing the task state. 3535 */ 3536 int wake_up_process(struct task_struct *p) 3537 { 3538 return try_to_wake_up(p, TASK_NORMAL, 0); 3539 } 3540 EXPORT_SYMBOL(wake_up_process); 3541 3542 int wake_up_state(struct task_struct *p, unsigned int state) 3543 { 3544 return try_to_wake_up(p, state, 0); 3545 } 3546 3547 /* 3548 * Perform scheduler related setup for a newly forked process p. 3549 * p is forked by current. 3550 * 3551 * __sched_fork() is basic setup used by init_idle() too: 3552 */ 3553 static void __sched_fork(unsigned long clone_flags, struct task_struct *p) 3554 { 3555 p->on_rq = 0; 3556 3557 p->se.on_rq = 0; 3558 p->se.exec_start = 0; 3559 p->se.sum_exec_runtime = 0; 3560 p->se.prev_sum_exec_runtime = 0; 3561 p->se.nr_migrations = 0; 3562 p->se.vruntime = 0; 3563 INIT_LIST_HEAD(&p->se.group_node); 3564 3565 #ifdef CONFIG_FAIR_GROUP_SCHED 3566 p->se.cfs_rq = NULL; 3567 #endif 3568 3569 #ifdef CONFIG_SCHEDSTATS 3570 /* Even if schedstat is disabled, there should not be garbage */ 3571 memset(&p->se.statistics, 0, sizeof(p->se.statistics)); 3572 #endif 3573 3574 RB_CLEAR_NODE(&p->dl.rb_node); 3575 init_dl_task_timer(&p->dl); 3576 init_dl_inactive_task_timer(&p->dl); 3577 __dl_clear_params(p); 3578 3579 INIT_LIST_HEAD(&p->rt.run_list); 3580 p->rt.timeout = 0; 3581 p->rt.time_slice = sched_rr_timeslice; 3582 p->rt.on_rq = 0; 3583 p->rt.on_list = 0; 3584 3585 #ifdef CONFIG_PREEMPT_NOTIFIERS 3586 INIT_HLIST_HEAD(&p->preempt_notifiers); 3587 #endif 3588 3589 #ifdef CONFIG_COMPACTION 3590 p->capture_control = NULL; 3591 #endif 3592 init_numa_balancing(clone_flags, p); 3593 #ifdef CONFIG_SMP 3594 p->wake_entry.u_flags = CSD_TYPE_TTWU; 3595 p->migration_pending = NULL; 3596 #endif 3597 } 3598 3599 DEFINE_STATIC_KEY_FALSE(sched_numa_balancing); 3600 3601 #ifdef CONFIG_NUMA_BALANCING 3602 3603 void set_numabalancing_state(bool enabled) 3604 { 3605 if (enabled) 3606 static_branch_enable(&sched_numa_balancing); 3607 else 3608 static_branch_disable(&sched_numa_balancing); 3609 } 3610 3611 #ifdef CONFIG_PROC_SYSCTL 3612 int sysctl_numa_balancing(struct ctl_table *table, int write, 3613 void *buffer, size_t *lenp, loff_t *ppos) 3614 { 3615 struct ctl_table t; 3616 int err; 3617 int state = static_branch_likely(&sched_numa_balancing); 3618 3619 if (write && !capable(CAP_SYS_ADMIN)) 3620 return -EPERM; 3621 3622 t = *table; 3623 t.data = &state; 3624 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); 3625 if (err < 0) 3626 return err; 3627 if (write) 3628 set_numabalancing_state(state); 3629 return err; 3630 } 3631 #endif 3632 #endif 3633 3634 #ifdef CONFIG_SCHEDSTATS 3635 3636 DEFINE_STATIC_KEY_FALSE(sched_schedstats); 3637 static bool __initdata __sched_schedstats = false; 3638 3639 static void set_schedstats(bool enabled) 3640 { 3641 if (enabled) 3642 static_branch_enable(&sched_schedstats); 3643 else 3644 static_branch_disable(&sched_schedstats); 3645 } 3646 3647 void force_schedstat_enabled(void) 3648 { 3649 if (!schedstat_enabled()) { 3650 pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n"); 3651 static_branch_enable(&sched_schedstats); 3652 } 3653 } 3654 3655 static int __init setup_schedstats(char *str) 3656 { 3657 int ret = 0; 3658 if (!str) 3659 goto out; 3660 3661 /* 3662 * This code is called before jump labels have been set up, so we can't 3663 * change the static branch directly just yet. Instead set a temporary 3664 * variable so init_schedstats() can do it later. 3665 */ 3666 if (!strcmp(str, "enable")) { 3667 __sched_schedstats = true; 3668 ret = 1; 3669 } else if (!strcmp(str, "disable")) { 3670 __sched_schedstats = false; 3671 ret = 1; 3672 } 3673 out: 3674 if (!ret) 3675 pr_warn("Unable to parse schedstats=\n"); 3676 3677 return ret; 3678 } 3679 __setup("schedstats=", setup_schedstats); 3680 3681 static void __init init_schedstats(void) 3682 { 3683 set_schedstats(__sched_schedstats); 3684 } 3685 3686 #ifdef CONFIG_PROC_SYSCTL 3687 int sysctl_schedstats(struct ctl_table *table, int write, void *buffer, 3688 size_t *lenp, loff_t *ppos) 3689 { 3690 struct ctl_table t; 3691 int err; 3692 int state = static_branch_likely(&sched_schedstats); 3693 3694 if (write && !capable(CAP_SYS_ADMIN)) 3695 return -EPERM; 3696 3697 t = *table; 3698 t.data = &state; 3699 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); 3700 if (err < 0) 3701 return err; 3702 if (write) 3703 set_schedstats(state); 3704 return err; 3705 } 3706 #endif /* CONFIG_PROC_SYSCTL */ 3707 #else /* !CONFIG_SCHEDSTATS */ 3708 static inline void init_schedstats(void) {} 3709 #endif /* CONFIG_SCHEDSTATS */ 3710 3711 /* 3712 * fork()/clone()-time setup: 3713 */ 3714 int sched_fork(unsigned long clone_flags, struct task_struct *p) 3715 { 3716 unsigned long flags; 3717 3718 __sched_fork(clone_flags, p); 3719 /* 3720 * We mark the process as NEW here. This guarantees that 3721 * nobody will actually run it, and a signal or other external 3722 * event cannot wake it up and insert it on the runqueue either. 3723 */ 3724 p->state = TASK_NEW; 3725 3726 /* 3727 * Make sure we do not leak PI boosting priority to the child. 3728 */ 3729 p->prio = current->normal_prio; 3730 3731 uclamp_fork(p); 3732 3733 /* 3734 * Revert to default priority/policy on fork if requested. 3735 */ 3736 if (unlikely(p->sched_reset_on_fork)) { 3737 if (task_has_dl_policy(p) || task_has_rt_policy(p)) { 3738 p->policy = SCHED_NORMAL; 3739 p->static_prio = NICE_TO_PRIO(0); 3740 p->rt_priority = 0; 3741 } else if (PRIO_TO_NICE(p->static_prio) < 0) 3742 p->static_prio = NICE_TO_PRIO(0); 3743 3744 p->prio = p->normal_prio = __normal_prio(p); 3745 set_load_weight(p, false); 3746 3747 /* 3748 * We don't need the reset flag anymore after the fork. It has 3749 * fulfilled its duty: 3750 */ 3751 p->sched_reset_on_fork = 0; 3752 } 3753 3754 if (dl_prio(p->prio)) 3755 return -EAGAIN; 3756 else if (rt_prio(p->prio)) 3757 p->sched_class = &rt_sched_class; 3758 else 3759 p->sched_class = &fair_sched_class; 3760 3761 init_entity_runnable_average(&p->se); 3762 3763 /* 3764 * The child is not yet in the pid-hash so no cgroup attach races, 3765 * and the cgroup is pinned to this child due to cgroup_fork() 3766 * is ran before sched_fork(). 3767 * 3768 * Silence PROVE_RCU. 3769 */ 3770 raw_spin_lock_irqsave(&p->pi_lock, flags); 3771 rseq_migrate(p); 3772 /* 3773 * We're setting the CPU for the first time, we don't migrate, 3774 * so use __set_task_cpu(). 3775 */ 3776 __set_task_cpu(p, smp_processor_id()); 3777 if (p->sched_class->task_fork) 3778 p->sched_class->task_fork(p); 3779 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 3780 3781 #ifdef CONFIG_SCHED_INFO 3782 if (likely(sched_info_on())) 3783 memset(&p->sched_info, 0, sizeof(p->sched_info)); 3784 #endif 3785 #if defined(CONFIG_SMP) 3786 p->on_cpu = 0; 3787 #endif 3788 init_task_preempt_count(p); 3789 #ifdef CONFIG_SMP 3790 plist_node_init(&p->pushable_tasks, MAX_PRIO); 3791 RB_CLEAR_NODE(&p->pushable_dl_tasks); 3792 #endif 3793 return 0; 3794 } 3795 3796 void sched_post_fork(struct task_struct *p) 3797 { 3798 uclamp_post_fork(p); 3799 } 3800 3801 unsigned long to_ratio(u64 period, u64 runtime) 3802 { 3803 if (runtime == RUNTIME_INF) 3804 return BW_UNIT; 3805 3806 /* 3807 * Doing this here saves a lot of checks in all 3808 * the calling paths, and returning zero seems 3809 * safe for them anyway. 3810 */ 3811 if (period == 0) 3812 return 0; 3813 3814 return div64_u64(runtime << BW_SHIFT, period); 3815 } 3816 3817 /* 3818 * wake_up_new_task - wake up a newly created task for the first time. 3819 * 3820 * This function will do some initial scheduler statistics housekeeping 3821 * that must be done for every newly created context, then puts the task 3822 * on the runqueue and wakes it. 3823 */ 3824 void wake_up_new_task(struct task_struct *p) 3825 { 3826 struct rq_flags rf; 3827 struct rq *rq; 3828 3829 raw_spin_lock_irqsave(&p->pi_lock, rf.flags); 3830 p->state = TASK_RUNNING; 3831 #ifdef CONFIG_SMP 3832 /* 3833 * Fork balancing, do it here and not earlier because: 3834 * - cpus_ptr can change in the fork path 3835 * - any previously selected CPU might disappear through hotplug 3836 * 3837 * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq, 3838 * as we're not fully set-up yet. 3839 */ 3840 p->recent_used_cpu = task_cpu(p); 3841 rseq_migrate(p); 3842 __set_task_cpu(p, select_task_rq(p, task_cpu(p), WF_FORK)); 3843 #endif 3844 rq = __task_rq_lock(p, &rf); 3845 update_rq_clock(rq); 3846 post_init_entity_util_avg(p); 3847 3848 activate_task(rq, p, ENQUEUE_NOCLOCK); 3849 trace_sched_wakeup_new(p); 3850 check_preempt_curr(rq, p, WF_FORK); 3851 #ifdef CONFIG_SMP 3852 if (p->sched_class->task_woken) { 3853 /* 3854 * Nothing relies on rq->lock after this, so it's fine to 3855 * drop it. 3856 */ 3857 rq_unpin_lock(rq, &rf); 3858 p->sched_class->task_woken(rq, p); 3859 rq_repin_lock(rq, &rf); 3860 } 3861 #endif 3862 task_rq_unlock(rq, p, &rf); 3863 } 3864 3865 #ifdef CONFIG_PREEMPT_NOTIFIERS 3866 3867 static DEFINE_STATIC_KEY_FALSE(preempt_notifier_key); 3868 3869 void preempt_notifier_inc(void) 3870 { 3871 static_branch_inc(&preempt_notifier_key); 3872 } 3873 EXPORT_SYMBOL_GPL(preempt_notifier_inc); 3874 3875 void preempt_notifier_dec(void) 3876 { 3877 static_branch_dec(&preempt_notifier_key); 3878 } 3879 EXPORT_SYMBOL_GPL(preempt_notifier_dec); 3880 3881 /** 3882 * preempt_notifier_register - tell me when current is being preempted & rescheduled 3883 * @notifier: notifier struct to register 3884 */ 3885 void preempt_notifier_register(struct preempt_notifier *notifier) 3886 { 3887 if (!static_branch_unlikely(&preempt_notifier_key)) 3888 WARN(1, "registering preempt_notifier while notifiers disabled\n"); 3889 3890 hlist_add_head(¬ifier->link, ¤t->preempt_notifiers); 3891 } 3892 EXPORT_SYMBOL_GPL(preempt_notifier_register); 3893 3894 /** 3895 * preempt_notifier_unregister - no longer interested in preemption notifications 3896 * @notifier: notifier struct to unregister 3897 * 3898 * This is *not* safe to call from within a preemption notifier. 3899 */ 3900 void preempt_notifier_unregister(struct preempt_notifier *notifier) 3901 { 3902 hlist_del(¬ifier->link); 3903 } 3904 EXPORT_SYMBOL_GPL(preempt_notifier_unregister); 3905 3906 static void __fire_sched_in_preempt_notifiers(struct task_struct *curr) 3907 { 3908 struct preempt_notifier *notifier; 3909 3910 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) 3911 notifier->ops->sched_in(notifier, raw_smp_processor_id()); 3912 } 3913 3914 static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr) 3915 { 3916 if (static_branch_unlikely(&preempt_notifier_key)) 3917 __fire_sched_in_preempt_notifiers(curr); 3918 } 3919 3920 static void 3921 __fire_sched_out_preempt_notifiers(struct task_struct *curr, 3922 struct task_struct *next) 3923 { 3924 struct preempt_notifier *notifier; 3925 3926 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) 3927 notifier->ops->sched_out(notifier, next); 3928 } 3929 3930 static __always_inline void 3931 fire_sched_out_preempt_notifiers(struct task_struct *curr, 3932 struct task_struct *next) 3933 { 3934 if (static_branch_unlikely(&preempt_notifier_key)) 3935 __fire_sched_out_preempt_notifiers(curr, next); 3936 } 3937 3938 #else /* !CONFIG_PREEMPT_NOTIFIERS */ 3939 3940 static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr) 3941 { 3942 } 3943 3944 static inline void 3945 fire_sched_out_preempt_notifiers(struct task_struct *curr, 3946 struct task_struct *next) 3947 { 3948 } 3949 3950 #endif /* CONFIG_PREEMPT_NOTIFIERS */ 3951 3952 static inline void prepare_task(struct task_struct *next) 3953 { 3954 #ifdef CONFIG_SMP 3955 /* 3956 * Claim the task as running, we do this before switching to it 3957 * such that any running task will have this set. 3958 * 3959 * See the ttwu() WF_ON_CPU case and its ordering comment. 3960 */ 3961 WRITE_ONCE(next->on_cpu, 1); 3962 #endif 3963 } 3964 3965 static inline void finish_task(struct task_struct *prev) 3966 { 3967 #ifdef CONFIG_SMP 3968 /* 3969 * This must be the very last reference to @prev from this CPU. After 3970 * p->on_cpu is cleared, the task can be moved to a different CPU. We 3971 * must ensure this doesn't happen until the switch is completely 3972 * finished. 3973 * 3974 * In particular, the load of prev->state in finish_task_switch() must 3975 * happen before this. 3976 * 3977 * Pairs with the smp_cond_load_acquire() in try_to_wake_up(). 3978 */ 3979 smp_store_release(&prev->on_cpu, 0); 3980 #endif 3981 } 3982 3983 #ifdef CONFIG_SMP 3984 3985 static void do_balance_callbacks(struct rq *rq, struct callback_head *head) 3986 { 3987 void (*func)(struct rq *rq); 3988 struct callback_head *next; 3989 3990 lockdep_assert_held(&rq->lock); 3991 3992 while (head) { 3993 func = (void (*)(struct rq *))head->func; 3994 next = head->next; 3995 head->next = NULL; 3996 head = next; 3997 3998 func(rq); 3999 } 4000 } 4001 4002 static void balance_push(struct rq *rq); 4003 4004 struct callback_head balance_push_callback = { 4005 .next = NULL, 4006 .func = (void (*)(struct callback_head *))balance_push, 4007 }; 4008 4009 static inline struct callback_head *splice_balance_callbacks(struct rq *rq) 4010 { 4011 struct callback_head *head = rq->balance_callback; 4012 4013 lockdep_assert_held(&rq->lock); 4014 if (head) 4015 rq->balance_callback = NULL; 4016 4017 return head; 4018 } 4019 4020 static void __balance_callbacks(struct rq *rq) 4021 { 4022 do_balance_callbacks(rq, splice_balance_callbacks(rq)); 4023 } 4024 4025 static inline void balance_callbacks(struct rq *rq, struct callback_head *head) 4026 { 4027 unsigned long flags; 4028 4029 if (unlikely(head)) { 4030 raw_spin_lock_irqsave(&rq->lock, flags); 4031 do_balance_callbacks(rq, head); 4032 raw_spin_unlock_irqrestore(&rq->lock, flags); 4033 } 4034 } 4035 4036 #else 4037 4038 static inline void __balance_callbacks(struct rq *rq) 4039 { 4040 } 4041 4042 static inline struct callback_head *splice_balance_callbacks(struct rq *rq) 4043 { 4044 return NULL; 4045 } 4046 4047 static inline void balance_callbacks(struct rq *rq, struct callback_head *head) 4048 { 4049 } 4050 4051 #endif 4052 4053 static inline void 4054 prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf) 4055 { 4056 /* 4057 * Since the runqueue lock will be released by the next 4058 * task (which is an invalid locking op but in the case 4059 * of the scheduler it's an obvious special-case), so we 4060 * do an early lockdep release here: 4061 */ 4062 rq_unpin_lock(rq, rf); 4063 spin_release(&rq->lock.dep_map, _THIS_IP_); 4064 #ifdef CONFIG_DEBUG_SPINLOCK 4065 /* this is a valid case when another task releases the spinlock */ 4066 rq->lock.owner = next; 4067 #endif 4068 } 4069 4070 static inline void finish_lock_switch(struct rq *rq) 4071 { 4072 /* 4073 * If we are tracking spinlock dependencies then we have to 4074 * fix up the runqueue lock - which gets 'carried over' from 4075 * prev into current: 4076 */ 4077 spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_); 4078 __balance_callbacks(rq); 4079 raw_spin_unlock_irq(&rq->lock); 4080 } 4081 4082 /* 4083 * NOP if the arch has not defined these: 4084 */ 4085 4086 #ifndef prepare_arch_switch 4087 # define prepare_arch_switch(next) do { } while (0) 4088 #endif 4089 4090 #ifndef finish_arch_post_lock_switch 4091 # define finish_arch_post_lock_switch() do { } while (0) 4092 #endif 4093 4094 static inline void kmap_local_sched_out(void) 4095 { 4096 #ifdef CONFIG_KMAP_LOCAL 4097 if (unlikely(current->kmap_ctrl.idx)) 4098 __kmap_local_sched_out(); 4099 #endif 4100 } 4101 4102 static inline void kmap_local_sched_in(void) 4103 { 4104 #ifdef CONFIG_KMAP_LOCAL 4105 if (unlikely(current->kmap_ctrl.idx)) 4106 __kmap_local_sched_in(); 4107 #endif 4108 } 4109 4110 /** 4111 * prepare_task_switch - prepare to switch tasks 4112 * @rq: the runqueue preparing to switch 4113 * @prev: the current task that is being switched out 4114 * @next: the task we are going to switch to. 4115 * 4116 * This is called with the rq lock held and interrupts off. It must 4117 * be paired with a subsequent finish_task_switch after the context 4118 * switch. 4119 * 4120 * prepare_task_switch sets up locking and calls architecture specific 4121 * hooks. 4122 */ 4123 static inline void 4124 prepare_task_switch(struct rq *rq, struct task_struct *prev, 4125 struct task_struct *next) 4126 { 4127 kcov_prepare_switch(prev); 4128 sched_info_switch(rq, prev, next); 4129 perf_event_task_sched_out(prev, next); 4130 rseq_preempt(prev); 4131 fire_sched_out_preempt_notifiers(prev, next); 4132 kmap_local_sched_out(); 4133 prepare_task(next); 4134 prepare_arch_switch(next); 4135 } 4136 4137 /** 4138 * finish_task_switch - clean up after a task-switch 4139 * @prev: the thread we just switched away from. 4140 * 4141 * finish_task_switch must be called after the context switch, paired 4142 * with a prepare_task_switch call before the context switch. 4143 * finish_task_switch will reconcile locking set up by prepare_task_switch, 4144 * and do any other architecture-specific cleanup actions. 4145 * 4146 * Note that we may have delayed dropping an mm in context_switch(). If 4147 * so, we finish that here outside of the runqueue lock. (Doing it 4148 * with the lock held can cause deadlocks; see schedule() for 4149 * details.) 4150 * 4151 * The context switch have flipped the stack from under us and restored the 4152 * local variables which were saved when this task called schedule() in the 4153 * past. prev == current is still correct but we need to recalculate this_rq 4154 * because prev may have moved to another CPU. 4155 */ 4156 static struct rq *finish_task_switch(struct task_struct *prev) 4157 __releases(rq->lock) 4158 { 4159 struct rq *rq = this_rq(); 4160 struct mm_struct *mm = rq->prev_mm; 4161 long prev_state; 4162 4163 /* 4164 * The previous task will have left us with a preempt_count of 2 4165 * because it left us after: 4166 * 4167 * schedule() 4168 * preempt_disable(); // 1 4169 * __schedule() 4170 * raw_spin_lock_irq(&rq->lock) // 2 4171 * 4172 * Also, see FORK_PREEMPT_COUNT. 4173 */ 4174 if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET, 4175 "corrupted preempt_count: %s/%d/0x%x\n", 4176 current->comm, current->pid, preempt_count())) 4177 preempt_count_set(FORK_PREEMPT_COUNT); 4178 4179 rq->prev_mm = NULL; 4180 4181 /* 4182 * A task struct has one reference for the use as "current". 4183 * If a task dies, then it sets TASK_DEAD in tsk->state and calls 4184 * schedule one last time. The schedule call will never return, and 4185 * the scheduled task must drop that reference. 4186 * 4187 * We must observe prev->state before clearing prev->on_cpu (in 4188 * finish_task), otherwise a concurrent wakeup can get prev 4189 * running on another CPU and we could rave with its RUNNING -> DEAD 4190 * transition, resulting in a double drop. 4191 */ 4192 prev_state = prev->state; 4193 vtime_task_switch(prev); 4194 perf_event_task_sched_in(prev, current); 4195 finish_task(prev); 4196 finish_lock_switch(rq); 4197 finish_arch_post_lock_switch(); 4198 kcov_finish_switch(current); 4199 /* 4200 * kmap_local_sched_out() is invoked with rq::lock held and 4201 * interrupts disabled. There is no requirement for that, but the 4202 * sched out code does not have an interrupt enabled section. 4203 * Restoring the maps on sched in does not require interrupts being 4204 * disabled either. 4205 */ 4206 kmap_local_sched_in(); 4207 4208 fire_sched_in_preempt_notifiers(current); 4209 /* 4210 * When switching through a kernel thread, the loop in 4211 * membarrier_{private,global}_expedited() may have observed that 4212 * kernel thread and not issued an IPI. It is therefore possible to 4213 * schedule between user->kernel->user threads without passing though 4214 * switch_mm(). Membarrier requires a barrier after storing to 4215 * rq->curr, before returning to userspace, so provide them here: 4216 * 4217 * - a full memory barrier for {PRIVATE,GLOBAL}_EXPEDITED, implicitly 4218 * provided by mmdrop(), 4219 * - a sync_core for SYNC_CORE. 4220 */ 4221 if (mm) { 4222 membarrier_mm_sync_core_before_usermode(mm); 4223 mmdrop(mm); 4224 } 4225 if (unlikely(prev_state == TASK_DEAD)) { 4226 if (prev->sched_class->task_dead) 4227 prev->sched_class->task_dead(prev); 4228 4229 /* 4230 * Remove function-return probe instances associated with this 4231 * task and put them back on the free list. 4232 */ 4233 kprobe_flush_task(prev); 4234 4235 /* Task is done with its stack. */ 4236 put_task_stack(prev); 4237 4238 put_task_struct_rcu_user(prev); 4239 } 4240 4241 tick_nohz_task_switch(); 4242 return rq; 4243 } 4244 4245 /** 4246 * schedule_tail - first thing a freshly forked thread must call. 4247 * @prev: the thread we just switched away from. 4248 */ 4249 asmlinkage __visible void schedule_tail(struct task_struct *prev) 4250 __releases(rq->lock) 4251 { 4252 struct rq *rq; 4253 4254 /* 4255 * New tasks start with FORK_PREEMPT_COUNT, see there and 4256 * finish_task_switch() for details. 4257 * 4258 * finish_task_switch() will drop rq->lock() and lower preempt_count 4259 * and the preempt_enable() will end up enabling preemption (on 4260 * PREEMPT_COUNT kernels). 4261 */ 4262 4263 rq = finish_task_switch(prev); 4264 preempt_enable(); 4265 4266 if (current->set_child_tid) 4267 put_user(task_pid_vnr(current), current->set_child_tid); 4268 4269 calculate_sigpending(); 4270 } 4271 4272 /* 4273 * context_switch - switch to the new MM and the new thread's register state. 4274 */ 4275 static __always_inline struct rq * 4276 context_switch(struct rq *rq, struct task_struct *prev, 4277 struct task_struct *next, struct rq_flags *rf) 4278 { 4279 prepare_task_switch(rq, prev, next); 4280 4281 /* 4282 * For paravirt, this is coupled with an exit in switch_to to 4283 * combine the page table reload and the switch backend into 4284 * one hypercall. 4285 */ 4286 arch_start_context_switch(prev); 4287 4288 /* 4289 * kernel -> kernel lazy + transfer active 4290 * user -> kernel lazy + mmgrab() active 4291 * 4292 * kernel -> user switch + mmdrop() active 4293 * user -> user switch 4294 */ 4295 if (!next->mm) { // to kernel 4296 enter_lazy_tlb(prev->active_mm, next); 4297 4298 next->active_mm = prev->active_mm; 4299 if (prev->mm) // from user 4300 mmgrab(prev->active_mm); 4301 else 4302 prev->active_mm = NULL; 4303 } else { // to user 4304 membarrier_switch_mm(rq, prev->active_mm, next->mm); 4305 /* 4306 * sys_membarrier() requires an smp_mb() between setting 4307 * rq->curr / membarrier_switch_mm() and returning to userspace. 4308 * 4309 * The below provides this either through switch_mm(), or in 4310 * case 'prev->active_mm == next->mm' through 4311 * finish_task_switch()'s mmdrop(). 4312 */ 4313 switch_mm_irqs_off(prev->active_mm, next->mm, next); 4314 4315 if (!prev->mm) { // from kernel 4316 /* will mmdrop() in finish_task_switch(). */ 4317 rq->prev_mm = prev->active_mm; 4318 prev->active_mm = NULL; 4319 } 4320 } 4321 4322 rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP); 4323 4324 prepare_lock_switch(rq, next, rf); 4325 4326 /* Here we just switch the register state and the stack. */ 4327 switch_to(prev, next, prev); 4328 barrier(); 4329 4330 return finish_task_switch(prev); 4331 } 4332 4333 /* 4334 * nr_running and nr_context_switches: 4335 * 4336 * externally visible scheduler statistics: current number of runnable 4337 * threads, total number of context switches performed since bootup. 4338 */ 4339 unsigned long nr_running(void) 4340 { 4341 unsigned long i, sum = 0; 4342 4343 for_each_online_cpu(i) 4344 sum += cpu_rq(i)->nr_running; 4345 4346 return sum; 4347 } 4348 4349 /* 4350 * Check if only the current task is running on the CPU. 4351 * 4352 * Caution: this function does not check that the caller has disabled 4353 * preemption, thus the result might have a time-of-check-to-time-of-use 4354 * race. The caller is responsible to use it correctly, for example: 4355 * 4356 * - from a non-preemptible section (of course) 4357 * 4358 * - from a thread that is bound to a single CPU 4359 * 4360 * - in a loop with very short iterations (e.g. a polling loop) 4361 */ 4362 bool single_task_running(void) 4363 { 4364 return raw_rq()->nr_running == 1; 4365 } 4366 EXPORT_SYMBOL(single_task_running); 4367 4368 unsigned long long nr_context_switches(void) 4369 { 4370 int i; 4371 unsigned long long sum = 0; 4372 4373 for_each_possible_cpu(i) 4374 sum += cpu_rq(i)->nr_switches; 4375 4376 return sum; 4377 } 4378 4379 /* 4380 * Consumers of these two interfaces, like for example the cpuidle menu 4381 * governor, are using nonsensical data. Preferring shallow idle state selection 4382 * for a CPU that has IO-wait which might not even end up running the task when 4383 * it does become runnable. 4384 */ 4385 4386 unsigned long nr_iowait_cpu(int cpu) 4387 { 4388 return atomic_read(&cpu_rq(cpu)->nr_iowait); 4389 } 4390 4391 /* 4392 * IO-wait accounting, and how it's mostly bollocks (on SMP). 4393 * 4394 * The idea behind IO-wait account is to account the idle time that we could 4395 * have spend running if it were not for IO. That is, if we were to improve the 4396 * storage performance, we'd have a proportional reduction in IO-wait time. 4397 * 4398 * This all works nicely on UP, where, when a task blocks on IO, we account 4399 * idle time as IO-wait, because if the storage were faster, it could've been 4400 * running and we'd not be idle. 4401 * 4402 * This has been extended to SMP, by doing the same for each CPU. This however 4403 * is broken. 4404 * 4405 * Imagine for instance the case where two tasks block on one CPU, only the one 4406 * CPU will have IO-wait accounted, while the other has regular idle. Even 4407 * though, if the storage were faster, both could've ran at the same time, 4408 * utilising both CPUs. 4409 * 4410 * This means, that when looking globally, the current IO-wait accounting on 4411 * SMP is a lower bound, by reason of under accounting. 4412 * 4413 * Worse, since the numbers are provided per CPU, they are sometimes 4414 * interpreted per CPU, and that is nonsensical. A blocked task isn't strictly 4415 * associated with any one particular CPU, it can wake to another CPU than it 4416 * blocked on. This means the per CPU IO-wait number is meaningless. 4417 * 4418 * Task CPU affinities can make all that even more 'interesting'. 4419 */ 4420 4421 unsigned long nr_iowait(void) 4422 { 4423 unsigned long i, sum = 0; 4424 4425 for_each_possible_cpu(i) 4426 sum += nr_iowait_cpu(i); 4427 4428 return sum; 4429 } 4430 4431 #ifdef CONFIG_SMP 4432 4433 /* 4434 * sched_exec - execve() is a valuable balancing opportunity, because at 4435 * this point the task has the smallest effective memory and cache footprint. 4436 */ 4437 void sched_exec(void) 4438 { 4439 struct task_struct *p = current; 4440 unsigned long flags; 4441 int dest_cpu; 4442 4443 raw_spin_lock_irqsave(&p->pi_lock, flags); 4444 dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), WF_EXEC); 4445 if (dest_cpu == smp_processor_id()) 4446 goto unlock; 4447 4448 if (likely(cpu_active(dest_cpu))) { 4449 struct migration_arg arg = { p, dest_cpu }; 4450 4451 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 4452 stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg); 4453 return; 4454 } 4455 unlock: 4456 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 4457 } 4458 4459 #endif 4460 4461 DEFINE_PER_CPU(struct kernel_stat, kstat); 4462 DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat); 4463 4464 EXPORT_PER_CPU_SYMBOL(kstat); 4465 EXPORT_PER_CPU_SYMBOL(kernel_cpustat); 4466 4467 /* 4468 * The function fair_sched_class.update_curr accesses the struct curr 4469 * and its field curr->exec_start; when called from task_sched_runtime(), 4470 * we observe a high rate of cache misses in practice. 4471 * Prefetching this data results in improved performance. 4472 */ 4473 static inline void prefetch_curr_exec_start(struct task_struct *p) 4474 { 4475 #ifdef CONFIG_FAIR_GROUP_SCHED 4476 struct sched_entity *curr = (&p->se)->cfs_rq->curr; 4477 #else 4478 struct sched_entity *curr = (&task_rq(p)->cfs)->curr; 4479 #endif 4480 prefetch(curr); 4481 prefetch(&curr->exec_start); 4482 } 4483 4484 /* 4485 * Return accounted runtime for the task. 4486 * In case the task is currently running, return the runtime plus current's 4487 * pending runtime that have not been accounted yet. 4488 */ 4489 unsigned long long task_sched_runtime(struct task_struct *p) 4490 { 4491 struct rq_flags rf; 4492 struct rq *rq; 4493 u64 ns; 4494 4495 #if defined(CONFIG_64BIT) && defined(CONFIG_SMP) 4496 /* 4497 * 64-bit doesn't need locks to atomically read a 64-bit value. 4498 * So we have a optimization chance when the task's delta_exec is 0. 4499 * Reading ->on_cpu is racy, but this is ok. 4500 * 4501 * If we race with it leaving CPU, we'll take a lock. So we're correct. 4502 * If we race with it entering CPU, unaccounted time is 0. This is 4503 * indistinguishable from the read occurring a few cycles earlier. 4504 * If we see ->on_cpu without ->on_rq, the task is leaving, and has 4505 * been accounted, so we're correct here as well. 4506 */ 4507 if (!p->on_cpu || !task_on_rq_queued(p)) 4508 return p->se.sum_exec_runtime; 4509 #endif 4510 4511 rq = task_rq_lock(p, &rf); 4512 /* 4513 * Must be ->curr _and_ ->on_rq. If dequeued, we would 4514 * project cycles that may never be accounted to this 4515 * thread, breaking clock_gettime(). 4516 */ 4517 if (task_current(rq, p) && task_on_rq_queued(p)) { 4518 prefetch_curr_exec_start(p); 4519 update_rq_clock(rq); 4520 p->sched_class->update_curr(rq); 4521 } 4522 ns = p->se.sum_exec_runtime; 4523 task_rq_unlock(rq, p, &rf); 4524 4525 return ns; 4526 } 4527 4528 /* 4529 * This function gets called by the timer code, with HZ frequency. 4530 * We call it with interrupts disabled. 4531 */ 4532 void scheduler_tick(void) 4533 { 4534 int cpu = smp_processor_id(); 4535 struct rq *rq = cpu_rq(cpu); 4536 struct task_struct *curr = rq->curr; 4537 struct rq_flags rf; 4538 unsigned long thermal_pressure; 4539 4540 arch_scale_freq_tick(); 4541 sched_clock_tick(); 4542 4543 rq_lock(rq, &rf); 4544 4545 update_rq_clock(rq); 4546 thermal_pressure = arch_scale_thermal_pressure(cpu_of(rq)); 4547 update_thermal_load_avg(rq_clock_thermal(rq), rq, thermal_pressure); 4548 curr->sched_class->task_tick(rq, curr, 0); 4549 calc_global_load_tick(rq); 4550 psi_task_tick(rq); 4551 4552 rq_unlock(rq, &rf); 4553 4554 perf_event_task_tick(); 4555 4556 #ifdef CONFIG_SMP 4557 rq->idle_balance = idle_cpu(cpu); 4558 trigger_load_balance(rq); 4559 #endif 4560 } 4561 4562 #ifdef CONFIG_NO_HZ_FULL 4563 4564 struct tick_work { 4565 int cpu; 4566 atomic_t state; 4567 struct delayed_work work; 4568 }; 4569 /* Values for ->state, see diagram below. */ 4570 #define TICK_SCHED_REMOTE_OFFLINE 0 4571 #define TICK_SCHED_REMOTE_OFFLINING 1 4572 #define TICK_SCHED_REMOTE_RUNNING 2 4573 4574 /* 4575 * State diagram for ->state: 4576 * 4577 * 4578 * TICK_SCHED_REMOTE_OFFLINE 4579 * | ^ 4580 * | | 4581 * | | sched_tick_remote() 4582 * | | 4583 * | | 4584 * +--TICK_SCHED_REMOTE_OFFLINING 4585 * | ^ 4586 * | | 4587 * sched_tick_start() | | sched_tick_stop() 4588 * | | 4589 * V | 4590 * TICK_SCHED_REMOTE_RUNNING 4591 * 4592 * 4593 * Other transitions get WARN_ON_ONCE(), except that sched_tick_remote() 4594 * and sched_tick_start() are happy to leave the state in RUNNING. 4595 */ 4596 4597 static struct tick_work __percpu *tick_work_cpu; 4598 4599 static void sched_tick_remote(struct work_struct *work) 4600 { 4601 struct delayed_work *dwork = to_delayed_work(work); 4602 struct tick_work *twork = container_of(dwork, struct tick_work, work); 4603 int cpu = twork->cpu; 4604 struct rq *rq = cpu_rq(cpu); 4605 struct task_struct *curr; 4606 struct rq_flags rf; 4607 u64 delta; 4608 int os; 4609 4610 /* 4611 * Handle the tick only if it appears the remote CPU is running in full 4612 * dynticks mode. The check is racy by nature, but missing a tick or 4613 * having one too much is no big deal because the scheduler tick updates 4614 * statistics and checks timeslices in a time-independent way, regardless 4615 * of when exactly it is running. 4616 */ 4617 if (!tick_nohz_tick_stopped_cpu(cpu)) 4618 goto out_requeue; 4619 4620 rq_lock_irq(rq, &rf); 4621 curr = rq->curr; 4622 if (cpu_is_offline(cpu)) 4623 goto out_unlock; 4624 4625 update_rq_clock(rq); 4626 4627 if (!is_idle_task(curr)) { 4628 /* 4629 * Make sure the next tick runs within a reasonable 4630 * amount of time. 4631 */ 4632 delta = rq_clock_task(rq) - curr->se.exec_start; 4633 WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3); 4634 } 4635 curr->sched_class->task_tick(rq, curr, 0); 4636 4637 calc_load_nohz_remote(rq); 4638 out_unlock: 4639 rq_unlock_irq(rq, &rf); 4640 out_requeue: 4641 4642 /* 4643 * Run the remote tick once per second (1Hz). This arbitrary 4644 * frequency is large enough to avoid overload but short enough 4645 * to keep scheduler internal stats reasonably up to date. But 4646 * first update state to reflect hotplug activity if required. 4647 */ 4648 os = atomic_fetch_add_unless(&twork->state, -1, TICK_SCHED_REMOTE_RUNNING); 4649 WARN_ON_ONCE(os == TICK_SCHED_REMOTE_OFFLINE); 4650 if (os == TICK_SCHED_REMOTE_RUNNING) 4651 queue_delayed_work(system_unbound_wq, dwork, HZ); 4652 } 4653 4654 static void sched_tick_start(int cpu) 4655 { 4656 int os; 4657 struct tick_work *twork; 4658 4659 if (housekeeping_cpu(cpu, HK_FLAG_TICK)) 4660 return; 4661 4662 WARN_ON_ONCE(!tick_work_cpu); 4663 4664 twork = per_cpu_ptr(tick_work_cpu, cpu); 4665 os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_RUNNING); 4666 WARN_ON_ONCE(os == TICK_SCHED_REMOTE_RUNNING); 4667 if (os == TICK_SCHED_REMOTE_OFFLINE) { 4668 twork->cpu = cpu; 4669 INIT_DELAYED_WORK(&twork->work, sched_tick_remote); 4670 queue_delayed_work(system_unbound_wq, &twork->work, HZ); 4671 } 4672 } 4673 4674 #ifdef CONFIG_HOTPLUG_CPU 4675 static void sched_tick_stop(int cpu) 4676 { 4677 struct tick_work *twork; 4678 int os; 4679 4680 if (housekeeping_cpu(cpu, HK_FLAG_TICK)) 4681 return; 4682 4683 WARN_ON_ONCE(!tick_work_cpu); 4684 4685 twork = per_cpu_ptr(tick_work_cpu, cpu); 4686 /* There cannot be competing actions, but don't rely on stop-machine. */ 4687 os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_OFFLINING); 4688 WARN_ON_ONCE(os != TICK_SCHED_REMOTE_RUNNING); 4689 /* Don't cancel, as this would mess up the state machine. */ 4690 } 4691 #endif /* CONFIG_HOTPLUG_CPU */ 4692 4693 int __init sched_tick_offload_init(void) 4694 { 4695 tick_work_cpu = alloc_percpu(struct tick_work); 4696 BUG_ON(!tick_work_cpu); 4697 return 0; 4698 } 4699 4700 #else /* !CONFIG_NO_HZ_FULL */ 4701 static inline void sched_tick_start(int cpu) { } 4702 static inline void sched_tick_stop(int cpu) { } 4703 #endif 4704 4705 #if defined(CONFIG_PREEMPTION) && (defined(CONFIG_DEBUG_PREEMPT) || \ 4706 defined(CONFIG_TRACE_PREEMPT_TOGGLE)) 4707 /* 4708 * If the value passed in is equal to the current preempt count 4709 * then we just disabled preemption. Start timing the latency. 4710 */ 4711 static inline void preempt_latency_start(int val) 4712 { 4713 if (preempt_count() == val) { 4714 unsigned long ip = get_lock_parent_ip(); 4715 #ifdef CONFIG_DEBUG_PREEMPT 4716 current->preempt_disable_ip = ip; 4717 #endif 4718 trace_preempt_off(CALLER_ADDR0, ip); 4719 } 4720 } 4721 4722 void preempt_count_add(int val) 4723 { 4724 #ifdef CONFIG_DEBUG_PREEMPT 4725 /* 4726 * Underflow? 4727 */ 4728 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0))) 4729 return; 4730 #endif 4731 __preempt_count_add(val); 4732 #ifdef CONFIG_DEBUG_PREEMPT 4733 /* 4734 * Spinlock count overflowing soon? 4735 */ 4736 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >= 4737 PREEMPT_MASK - 10); 4738 #endif 4739 preempt_latency_start(val); 4740 } 4741 EXPORT_SYMBOL(preempt_count_add); 4742 NOKPROBE_SYMBOL(preempt_count_add); 4743 4744 /* 4745 * If the value passed in equals to the current preempt count 4746 * then we just enabled preemption. Stop timing the latency. 4747 */ 4748 static inline void preempt_latency_stop(int val) 4749 { 4750 if (preempt_count() == val) 4751 trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip()); 4752 } 4753 4754 void preempt_count_sub(int val) 4755 { 4756 #ifdef CONFIG_DEBUG_PREEMPT 4757 /* 4758 * Underflow? 4759 */ 4760 if (DEBUG_LOCKS_WARN_ON(val > preempt_count())) 4761 return; 4762 /* 4763 * Is the spinlock portion underflowing? 4764 */ 4765 if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) && 4766 !(preempt_count() & PREEMPT_MASK))) 4767 return; 4768 #endif 4769 4770 preempt_latency_stop(val); 4771 __preempt_count_sub(val); 4772 } 4773 EXPORT_SYMBOL(preempt_count_sub); 4774 NOKPROBE_SYMBOL(preempt_count_sub); 4775 4776 #else 4777 static inline void preempt_latency_start(int val) { } 4778 static inline void preempt_latency_stop(int val) { } 4779 #endif 4780 4781 static inline unsigned long get_preempt_disable_ip(struct task_struct *p) 4782 { 4783 #ifdef CONFIG_DEBUG_PREEMPT 4784 return p->preempt_disable_ip; 4785 #else 4786 return 0; 4787 #endif 4788 } 4789 4790 /* 4791 * Print scheduling while atomic bug: 4792 */ 4793 static noinline void __schedule_bug(struct task_struct *prev) 4794 { 4795 /* Save this before calling printk(), since that will clobber it */ 4796 unsigned long preempt_disable_ip = get_preempt_disable_ip(current); 4797 4798 if (oops_in_progress) 4799 return; 4800 4801 printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n", 4802 prev->comm, prev->pid, preempt_count()); 4803 4804 debug_show_held_locks(prev); 4805 print_modules(); 4806 if (irqs_disabled()) 4807 print_irqtrace_events(prev); 4808 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT) 4809 && in_atomic_preempt_off()) { 4810 pr_err("Preemption disabled at:"); 4811 print_ip_sym(KERN_ERR, preempt_disable_ip); 4812 } 4813 if (panic_on_warn) 4814 panic("scheduling while atomic\n"); 4815 4816 dump_stack(); 4817 add_taint(TAINT_WARN, LOCKDEP_STILL_OK); 4818 } 4819 4820 /* 4821 * Various schedule()-time debugging checks and statistics: 4822 */ 4823 static inline void schedule_debug(struct task_struct *prev, bool preempt) 4824 { 4825 #ifdef CONFIG_SCHED_STACK_END_CHECK 4826 if (task_stack_end_corrupted(prev)) 4827 panic("corrupted stack end detected inside scheduler\n"); 4828 4829 if (task_scs_end_corrupted(prev)) 4830 panic("corrupted shadow stack detected inside scheduler\n"); 4831 #endif 4832 4833 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP 4834 if (!preempt && prev->state && prev->non_block_count) { 4835 printk(KERN_ERR "BUG: scheduling in a non-blocking section: %s/%d/%i\n", 4836 prev->comm, prev->pid, prev->non_block_count); 4837 dump_stack(); 4838 add_taint(TAINT_WARN, LOCKDEP_STILL_OK); 4839 } 4840 #endif 4841 4842 if (unlikely(in_atomic_preempt_off())) { 4843 __schedule_bug(prev); 4844 preempt_count_set(PREEMPT_DISABLED); 4845 } 4846 rcu_sleep_check(); 4847 SCHED_WARN_ON(ct_state() == CONTEXT_USER); 4848 4849 profile_hit(SCHED_PROFILING, __builtin_return_address(0)); 4850 4851 schedstat_inc(this_rq()->sched_count); 4852 } 4853 4854 static void put_prev_task_balance(struct rq *rq, struct task_struct *prev, 4855 struct rq_flags *rf) 4856 { 4857 #ifdef CONFIG_SMP 4858 const struct sched_class *class; 4859 /* 4860 * We must do the balancing pass before put_prev_task(), such 4861 * that when we release the rq->lock the task is in the same 4862 * state as before we took rq->lock. 4863 * 4864 * We can terminate the balance pass as soon as we know there is 4865 * a runnable task of @class priority or higher. 4866 */ 4867 for_class_range(class, prev->sched_class, &idle_sched_class) { 4868 if (class->balance(rq, prev, rf)) 4869 break; 4870 } 4871 #endif 4872 4873 put_prev_task(rq, prev); 4874 } 4875 4876 /* 4877 * Pick up the highest-prio task: 4878 */ 4879 static inline struct task_struct * 4880 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) 4881 { 4882 const struct sched_class *class; 4883 struct task_struct *p; 4884 4885 /* 4886 * Optimization: we know that if all tasks are in the fair class we can 4887 * call that function directly, but only if the @prev task wasn't of a 4888 * higher scheduling class, because otherwise those lose the 4889 * opportunity to pull in more work from other CPUs. 4890 */ 4891 if (likely(prev->sched_class <= &fair_sched_class && 4892 rq->nr_running == rq->cfs.h_nr_running)) { 4893 4894 p = pick_next_task_fair(rq, prev, rf); 4895 if (unlikely(p == RETRY_TASK)) 4896 goto restart; 4897 4898 /* Assumes fair_sched_class->next == idle_sched_class */ 4899 if (!p) { 4900 put_prev_task(rq, prev); 4901 p = pick_next_task_idle(rq); 4902 } 4903 4904 return p; 4905 } 4906 4907 restart: 4908 put_prev_task_balance(rq, prev, rf); 4909 4910 for_each_class(class) { 4911 p = class->pick_next_task(rq); 4912 if (p) 4913 return p; 4914 } 4915 4916 /* The idle class should always have a runnable task: */ 4917 BUG(); 4918 } 4919 4920 /* 4921 * __schedule() is the main scheduler function. 4922 * 4923 * The main means of driving the scheduler and thus entering this function are: 4924 * 4925 * 1. Explicit blocking: mutex, semaphore, waitqueue, etc. 4926 * 4927 * 2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return 4928 * paths. For example, see arch/x86/entry_64.S. 4929 * 4930 * To drive preemption between tasks, the scheduler sets the flag in timer 4931 * interrupt handler scheduler_tick(). 4932 * 4933 * 3. Wakeups don't really cause entry into schedule(). They add a 4934 * task to the run-queue and that's it. 4935 * 4936 * Now, if the new task added to the run-queue preempts the current 4937 * task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets 4938 * called on the nearest possible occasion: 4939 * 4940 * - If the kernel is preemptible (CONFIG_PREEMPTION=y): 4941 * 4942 * - in syscall or exception context, at the next outmost 4943 * preempt_enable(). (this might be as soon as the wake_up()'s 4944 * spin_unlock()!) 4945 * 4946 * - in IRQ context, return from interrupt-handler to 4947 * preemptible context 4948 * 4949 * - If the kernel is not preemptible (CONFIG_PREEMPTION is not set) 4950 * then at the next: 4951 * 4952 * - cond_resched() call 4953 * - explicit schedule() call 4954 * - return from syscall or exception to user-space 4955 * - return from interrupt-handler to user-space 4956 * 4957 * WARNING: must be called with preemption disabled! 4958 */ 4959 static void __sched notrace __schedule(bool preempt) 4960 { 4961 struct task_struct *prev, *next; 4962 unsigned long *switch_count; 4963 unsigned long prev_state; 4964 struct rq_flags rf; 4965 struct rq *rq; 4966 int cpu; 4967 4968 cpu = smp_processor_id(); 4969 rq = cpu_rq(cpu); 4970 prev = rq->curr; 4971 4972 schedule_debug(prev, preempt); 4973 4974 if (sched_feat(HRTICK)) 4975 hrtick_clear(rq); 4976 4977 local_irq_disable(); 4978 rcu_note_context_switch(preempt); 4979 4980 /* 4981 * Make sure that signal_pending_state()->signal_pending() below 4982 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE) 4983 * done by the caller to avoid the race with signal_wake_up(): 4984 * 4985 * __set_current_state(@state) signal_wake_up() 4986 * schedule() set_tsk_thread_flag(p, TIF_SIGPENDING) 4987 * wake_up_state(p, state) 4988 * LOCK rq->lock LOCK p->pi_state 4989 * smp_mb__after_spinlock() smp_mb__after_spinlock() 4990 * if (signal_pending_state()) if (p->state & @state) 4991 * 4992 * Also, the membarrier system call requires a full memory barrier 4993 * after coming from user-space, before storing to rq->curr. 4994 */ 4995 rq_lock(rq, &rf); 4996 smp_mb__after_spinlock(); 4997 4998 /* Promote REQ to ACT */ 4999 rq->clock_update_flags <<= 1; 5000 update_rq_clock(rq); 5001 5002 switch_count = &prev->nivcsw; 5003 5004 /* 5005 * We must load prev->state once (task_struct::state is volatile), such 5006 * that: 5007 * 5008 * - we form a control dependency vs deactivate_task() below. 5009 * - ptrace_{,un}freeze_traced() can change ->state underneath us. 5010 */ 5011 prev_state = prev->state; 5012 if (!preempt && prev_state) { 5013 if (signal_pending_state(prev_state, prev)) { 5014 prev->state = TASK_RUNNING; 5015 } else { 5016 prev->sched_contributes_to_load = 5017 (prev_state & TASK_UNINTERRUPTIBLE) && 5018 !(prev_state & TASK_NOLOAD) && 5019 !(prev->flags & PF_FROZEN); 5020 5021 if (prev->sched_contributes_to_load) 5022 rq->nr_uninterruptible++; 5023 5024 /* 5025 * __schedule() ttwu() 5026 * prev_state = prev->state; if (p->on_rq && ...) 5027 * if (prev_state) goto out; 5028 * p->on_rq = 0; smp_acquire__after_ctrl_dep(); 5029 * p->state = TASK_WAKING 5030 * 5031 * Where __schedule() and ttwu() have matching control dependencies. 5032 * 5033 * After this, schedule() must not care about p->state any more. 5034 */ 5035 deactivate_task(rq, prev, DEQUEUE_SLEEP | DEQUEUE_NOCLOCK); 5036 5037 if (prev->in_iowait) { 5038 atomic_inc(&rq->nr_iowait); 5039 delayacct_blkio_start(); 5040 } 5041 } 5042 switch_count = &prev->nvcsw; 5043 } 5044 5045 next = pick_next_task(rq, prev, &rf); 5046 clear_tsk_need_resched(prev); 5047 clear_preempt_need_resched(); 5048 5049 if (likely(prev != next)) { 5050 rq->nr_switches++; 5051 /* 5052 * RCU users of rcu_dereference(rq->curr) may not see 5053 * changes to task_struct made by pick_next_task(). 5054 */ 5055 RCU_INIT_POINTER(rq->curr, next); 5056 /* 5057 * The membarrier system call requires each architecture 5058 * to have a full memory barrier after updating 5059 * rq->curr, before returning to user-space. 5060 * 5061 * Here are the schemes providing that barrier on the 5062 * various architectures: 5063 * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC. 5064 * switch_mm() rely on membarrier_arch_switch_mm() on PowerPC. 5065 * - finish_lock_switch() for weakly-ordered 5066 * architectures where spin_unlock is a full barrier, 5067 * - switch_to() for arm64 (weakly-ordered, spin_unlock 5068 * is a RELEASE barrier), 5069 */ 5070 ++*switch_count; 5071 5072 migrate_disable_switch(rq, prev); 5073 psi_sched_switch(prev, next, !task_on_rq_queued(prev)); 5074 5075 trace_sched_switch(preempt, prev, next); 5076 5077 /* Also unlocks the rq: */ 5078 rq = context_switch(rq, prev, next, &rf); 5079 } else { 5080 rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP); 5081 5082 rq_unpin_lock(rq, &rf); 5083 __balance_callbacks(rq); 5084 raw_spin_unlock_irq(&rq->lock); 5085 } 5086 } 5087 5088 void __noreturn do_task_dead(void) 5089 { 5090 /* Causes final put_task_struct in finish_task_switch(): */ 5091 set_special_state(TASK_DEAD); 5092 5093 /* Tell freezer to ignore us: */ 5094 current->flags |= PF_NOFREEZE; 5095 5096 __schedule(false); 5097 BUG(); 5098 5099 /* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */ 5100 for (;;) 5101 cpu_relax(); 5102 } 5103 5104 static inline void sched_submit_work(struct task_struct *tsk) 5105 { 5106 unsigned int task_flags; 5107 5108 if (!tsk->state) 5109 return; 5110 5111 task_flags = tsk->flags; 5112 /* 5113 * If a worker went to sleep, notify and ask workqueue whether 5114 * it wants to wake up a task to maintain concurrency. 5115 * As this function is called inside the schedule() context, 5116 * we disable preemption to avoid it calling schedule() again 5117 * in the possible wakeup of a kworker and because wq_worker_sleeping() 5118 * requires it. 5119 */ 5120 if (task_flags & (PF_WQ_WORKER | PF_IO_WORKER)) { 5121 preempt_disable(); 5122 if (task_flags & PF_WQ_WORKER) 5123 wq_worker_sleeping(tsk); 5124 else 5125 io_wq_worker_sleeping(tsk); 5126 preempt_enable_no_resched(); 5127 } 5128 5129 if (tsk_is_pi_blocked(tsk)) 5130 return; 5131 5132 /* 5133 * If we are going to sleep and we have plugged IO queued, 5134 * make sure to submit it to avoid deadlocks. 5135 */ 5136 if (blk_needs_flush_plug(tsk)) 5137 blk_schedule_flush_plug(tsk); 5138 } 5139 5140 static void sched_update_worker(struct task_struct *tsk) 5141 { 5142 if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER)) { 5143 if (tsk->flags & PF_WQ_WORKER) 5144 wq_worker_running(tsk); 5145 else 5146 io_wq_worker_running(tsk); 5147 } 5148 } 5149 5150 asmlinkage __visible void __sched schedule(void) 5151 { 5152 struct task_struct *tsk = current; 5153 5154 sched_submit_work(tsk); 5155 do { 5156 preempt_disable(); 5157 __schedule(false); 5158 sched_preempt_enable_no_resched(); 5159 } while (need_resched()); 5160 sched_update_worker(tsk); 5161 } 5162 EXPORT_SYMBOL(schedule); 5163 5164 /* 5165 * synchronize_rcu_tasks() makes sure that no task is stuck in preempted 5166 * state (have scheduled out non-voluntarily) by making sure that all 5167 * tasks have either left the run queue or have gone into user space. 5168 * As idle tasks do not do either, they must not ever be preempted 5169 * (schedule out non-voluntarily). 5170 * 5171 * schedule_idle() is similar to schedule_preempt_disable() except that it 5172 * never enables preemption because it does not call sched_submit_work(). 5173 */ 5174 void __sched schedule_idle(void) 5175 { 5176 /* 5177 * As this skips calling sched_submit_work(), which the idle task does 5178 * regardless because that function is a nop when the task is in a 5179 * TASK_RUNNING state, make sure this isn't used someplace that the 5180 * current task can be in any other state. Note, idle is always in the 5181 * TASK_RUNNING state. 5182 */ 5183 WARN_ON_ONCE(current->state); 5184 do { 5185 __schedule(false); 5186 } while (need_resched()); 5187 } 5188 5189 #if defined(CONFIG_CONTEXT_TRACKING) && !defined(CONFIG_HAVE_CONTEXT_TRACKING_OFFSTACK) 5190 asmlinkage __visible void __sched schedule_user(void) 5191 { 5192 /* 5193 * If we come here after a random call to set_need_resched(), 5194 * or we have been woken up remotely but the IPI has not yet arrived, 5195 * we haven't yet exited the RCU idle mode. Do it here manually until 5196 * we find a better solution. 5197 * 5198 * NB: There are buggy callers of this function. Ideally we 5199 * should warn if prev_state != CONTEXT_USER, but that will trigger 5200 * too frequently to make sense yet. 5201 */ 5202 enum ctx_state prev_state = exception_enter(); 5203 schedule(); 5204 exception_exit(prev_state); 5205 } 5206 #endif 5207 5208 /** 5209 * schedule_preempt_disabled - called with preemption disabled 5210 * 5211 * Returns with preemption disabled. Note: preempt_count must be 1 5212 */ 5213 void __sched schedule_preempt_disabled(void) 5214 { 5215 sched_preempt_enable_no_resched(); 5216 schedule(); 5217 preempt_disable(); 5218 } 5219 5220 static void __sched notrace preempt_schedule_common(void) 5221 { 5222 do { 5223 /* 5224 * Because the function tracer can trace preempt_count_sub() 5225 * and it also uses preempt_enable/disable_notrace(), if 5226 * NEED_RESCHED is set, the preempt_enable_notrace() called 5227 * by the function tracer will call this function again and 5228 * cause infinite recursion. 5229 * 5230 * Preemption must be disabled here before the function 5231 * tracer can trace. Break up preempt_disable() into two 5232 * calls. One to disable preemption without fear of being 5233 * traced. The other to still record the preemption latency, 5234 * which can also be traced by the function tracer. 5235 */ 5236 preempt_disable_notrace(); 5237 preempt_latency_start(1); 5238 __schedule(true); 5239 preempt_latency_stop(1); 5240 preempt_enable_no_resched_notrace(); 5241 5242 /* 5243 * Check again in case we missed a preemption opportunity 5244 * between schedule and now. 5245 */ 5246 } while (need_resched()); 5247 } 5248 5249 #ifdef CONFIG_PREEMPTION 5250 /* 5251 * This is the entry point to schedule() from in-kernel preemption 5252 * off of preempt_enable. 5253 */ 5254 asmlinkage __visible void __sched notrace preempt_schedule(void) 5255 { 5256 /* 5257 * If there is a non-zero preempt_count or interrupts are disabled, 5258 * we do not want to preempt the current task. Just return.. 5259 */ 5260 if (likely(!preemptible())) 5261 return; 5262 5263 preempt_schedule_common(); 5264 } 5265 NOKPROBE_SYMBOL(preempt_schedule); 5266 EXPORT_SYMBOL(preempt_schedule); 5267 5268 #ifdef CONFIG_PREEMPT_DYNAMIC 5269 DEFINE_STATIC_CALL(preempt_schedule, __preempt_schedule_func); 5270 EXPORT_STATIC_CALL(preempt_schedule); 5271 #endif 5272 5273 5274 /** 5275 * preempt_schedule_notrace - preempt_schedule called by tracing 5276 * 5277 * The tracing infrastructure uses preempt_enable_notrace to prevent 5278 * recursion and tracing preempt enabling caused by the tracing 5279 * infrastructure itself. But as tracing can happen in areas coming 5280 * from userspace or just about to enter userspace, a preempt enable 5281 * can occur before user_exit() is called. This will cause the scheduler 5282 * to be called when the system is still in usermode. 5283 * 5284 * To prevent this, the preempt_enable_notrace will use this function 5285 * instead of preempt_schedule() to exit user context if needed before 5286 * calling the scheduler. 5287 */ 5288 asmlinkage __visible void __sched notrace preempt_schedule_notrace(void) 5289 { 5290 enum ctx_state prev_ctx; 5291 5292 if (likely(!preemptible())) 5293 return; 5294 5295 do { 5296 /* 5297 * Because the function tracer can trace preempt_count_sub() 5298 * and it also uses preempt_enable/disable_notrace(), if 5299 * NEED_RESCHED is set, the preempt_enable_notrace() called 5300 * by the function tracer will call this function again and 5301 * cause infinite recursion. 5302 * 5303 * Preemption must be disabled here before the function 5304 * tracer can trace. Break up preempt_disable() into two 5305 * calls. One to disable preemption without fear of being 5306 * traced. The other to still record the preemption latency, 5307 * which can also be traced by the function tracer. 5308 */ 5309 preempt_disable_notrace(); 5310 preempt_latency_start(1); 5311 /* 5312 * Needs preempt disabled in case user_exit() is traced 5313 * and the tracer calls preempt_enable_notrace() causing 5314 * an infinite recursion. 5315 */ 5316 prev_ctx = exception_enter(); 5317 __schedule(true); 5318 exception_exit(prev_ctx); 5319 5320 preempt_latency_stop(1); 5321 preempt_enable_no_resched_notrace(); 5322 } while (need_resched()); 5323 } 5324 EXPORT_SYMBOL_GPL(preempt_schedule_notrace); 5325 5326 #ifdef CONFIG_PREEMPT_DYNAMIC 5327 DEFINE_STATIC_CALL(preempt_schedule_notrace, __preempt_schedule_notrace_func); 5328 EXPORT_STATIC_CALL(preempt_schedule_notrace); 5329 #endif 5330 5331 #endif /* CONFIG_PREEMPTION */ 5332 5333 #ifdef CONFIG_PREEMPT_DYNAMIC 5334 5335 #include <linux/entry-common.h> 5336 5337 /* 5338 * SC:cond_resched 5339 * SC:might_resched 5340 * SC:preempt_schedule 5341 * SC:preempt_schedule_notrace 5342 * SC:irqentry_exit_cond_resched 5343 * 5344 * 5345 * NONE: 5346 * cond_resched <- __cond_resched 5347 * might_resched <- RET0 5348 * preempt_schedule <- NOP 5349 * preempt_schedule_notrace <- NOP 5350 * irqentry_exit_cond_resched <- NOP 5351 * 5352 * VOLUNTARY: 5353 * cond_resched <- __cond_resched 5354 * might_resched <- __cond_resched 5355 * preempt_schedule <- NOP 5356 * preempt_schedule_notrace <- NOP 5357 * irqentry_exit_cond_resched <- NOP 5358 * 5359 * FULL: 5360 * cond_resched <- RET0 5361 * might_resched <- RET0 5362 * preempt_schedule <- preempt_schedule 5363 * preempt_schedule_notrace <- preempt_schedule_notrace 5364 * irqentry_exit_cond_resched <- irqentry_exit_cond_resched 5365 */ 5366 static int __init setup_preempt_mode(char *str) 5367 { 5368 if (!strcmp(str, "none")) { 5369 static_call_update(cond_resched, __cond_resched); 5370 static_call_update(might_resched, (typeof(&__cond_resched)) __static_call_return0); 5371 static_call_update(preempt_schedule, (typeof(&preempt_schedule)) NULL); 5372 static_call_update(preempt_schedule_notrace, (typeof(&preempt_schedule_notrace)) NULL); 5373 static_call_update(irqentry_exit_cond_resched, (typeof(&irqentry_exit_cond_resched)) NULL); 5374 pr_info("Dynamic Preempt: %s\n", str); 5375 } else if (!strcmp(str, "voluntary")) { 5376 static_call_update(cond_resched, __cond_resched); 5377 static_call_update(might_resched, __cond_resched); 5378 static_call_update(preempt_schedule, (typeof(&preempt_schedule)) NULL); 5379 static_call_update(preempt_schedule_notrace, (typeof(&preempt_schedule_notrace)) NULL); 5380 static_call_update(irqentry_exit_cond_resched, (typeof(&irqentry_exit_cond_resched)) NULL); 5381 pr_info("Dynamic Preempt: %s\n", str); 5382 } else if (!strcmp(str, "full")) { 5383 static_call_update(cond_resched, (typeof(&__cond_resched)) __static_call_return0); 5384 static_call_update(might_resched, (typeof(&__cond_resched)) __static_call_return0); 5385 static_call_update(preempt_schedule, __preempt_schedule_func); 5386 static_call_update(preempt_schedule_notrace, __preempt_schedule_notrace_func); 5387 static_call_update(irqentry_exit_cond_resched, irqentry_exit_cond_resched); 5388 pr_info("Dynamic Preempt: %s\n", str); 5389 } else { 5390 pr_warn("Dynamic Preempt: Unsupported preempt mode %s, default to full\n", str); 5391 return 1; 5392 } 5393 return 0; 5394 } 5395 __setup("preempt=", setup_preempt_mode); 5396 5397 #endif /* CONFIG_PREEMPT_DYNAMIC */ 5398 5399 5400 /* 5401 * This is the entry point to schedule() from kernel preemption 5402 * off of irq context. 5403 * Note, that this is called and return with irqs disabled. This will 5404 * protect us against recursive calling from irq. 5405 */ 5406 asmlinkage __visible void __sched preempt_schedule_irq(void) 5407 { 5408 enum ctx_state prev_state; 5409 5410 /* Catch callers which need to be fixed */ 5411 BUG_ON(preempt_count() || !irqs_disabled()); 5412 5413 prev_state = exception_enter(); 5414 5415 do { 5416 preempt_disable(); 5417 local_irq_enable(); 5418 __schedule(true); 5419 local_irq_disable(); 5420 sched_preempt_enable_no_resched(); 5421 } while (need_resched()); 5422 5423 exception_exit(prev_state); 5424 } 5425 5426 int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags, 5427 void *key) 5428 { 5429 WARN_ON_ONCE(IS_ENABLED(CONFIG_SCHED_DEBUG) && wake_flags & ~WF_SYNC); 5430 return try_to_wake_up(curr->private, mode, wake_flags); 5431 } 5432 EXPORT_SYMBOL(default_wake_function); 5433 5434 #ifdef CONFIG_RT_MUTEXES 5435 5436 static inline int __rt_effective_prio(struct task_struct *pi_task, int prio) 5437 { 5438 if (pi_task) 5439 prio = min(prio, pi_task->prio); 5440 5441 return prio; 5442 } 5443 5444 static inline int rt_effective_prio(struct task_struct *p, int prio) 5445 { 5446 struct task_struct *pi_task = rt_mutex_get_top_task(p); 5447 5448 return __rt_effective_prio(pi_task, prio); 5449 } 5450 5451 /* 5452 * rt_mutex_setprio - set the current priority of a task 5453 * @p: task to boost 5454 * @pi_task: donor task 5455 * 5456 * This function changes the 'effective' priority of a task. It does 5457 * not touch ->normal_prio like __setscheduler(). 5458 * 5459 * Used by the rt_mutex code to implement priority inheritance 5460 * logic. Call site only calls if the priority of the task changed. 5461 */ 5462 void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task) 5463 { 5464 int prio, oldprio, queued, running, queue_flag = 5465 DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; 5466 const struct sched_class *prev_class; 5467 struct rq_flags rf; 5468 struct rq *rq; 5469 5470 /* XXX used to be waiter->prio, not waiter->task->prio */ 5471 prio = __rt_effective_prio(pi_task, p->normal_prio); 5472 5473 /* 5474 * If nothing changed; bail early. 5475 */ 5476 if (p->pi_top_task == pi_task && prio == p->prio && !dl_prio(prio)) 5477 return; 5478 5479 rq = __task_rq_lock(p, &rf); 5480 update_rq_clock(rq); 5481 /* 5482 * Set under pi_lock && rq->lock, such that the value can be used under 5483 * either lock. 5484 * 5485 * Note that there is loads of tricky to make this pointer cache work 5486 * right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to 5487 * ensure a task is de-boosted (pi_task is set to NULL) before the 5488 * task is allowed to run again (and can exit). This ensures the pointer 5489 * points to a blocked task -- which guarantees the task is present. 5490 */ 5491 p->pi_top_task = pi_task; 5492 5493 /* 5494 * For FIFO/RR we only need to set prio, if that matches we're done. 5495 */ 5496 if (prio == p->prio && !dl_prio(prio)) 5497 goto out_unlock; 5498 5499 /* 5500 * Idle task boosting is a nono in general. There is one 5501 * exception, when PREEMPT_RT and NOHZ is active: 5502 * 5503 * The idle task calls get_next_timer_interrupt() and holds 5504 * the timer wheel base->lock on the CPU and another CPU wants 5505 * to access the timer (probably to cancel it). We can safely 5506 * ignore the boosting request, as the idle CPU runs this code 5507 * with interrupts disabled and will complete the lock 5508 * protected section without being interrupted. So there is no 5509 * real need to boost. 5510 */ 5511 if (unlikely(p == rq->idle)) { 5512 WARN_ON(p != rq->curr); 5513 WARN_ON(p->pi_blocked_on); 5514 goto out_unlock; 5515 } 5516 5517 trace_sched_pi_setprio(p, pi_task); 5518 oldprio = p->prio; 5519 5520 if (oldprio == prio) 5521 queue_flag &= ~DEQUEUE_MOVE; 5522 5523 prev_class = p->sched_class; 5524 queued = task_on_rq_queued(p); 5525 running = task_current(rq, p); 5526 if (queued) 5527 dequeue_task(rq, p, queue_flag); 5528 if (running) 5529 put_prev_task(rq, p); 5530 5531 /* 5532 * Boosting condition are: 5533 * 1. -rt task is running and holds mutex A 5534 * --> -dl task blocks on mutex A 5535 * 5536 * 2. -dl task is running and holds mutex A 5537 * --> -dl task blocks on mutex A and could preempt the 5538 * running task 5539 */ 5540 if (dl_prio(prio)) { 5541 if (!dl_prio(p->normal_prio) || 5542 (pi_task && dl_prio(pi_task->prio) && 5543 dl_entity_preempt(&pi_task->dl, &p->dl))) { 5544 p->dl.pi_se = pi_task->dl.pi_se; 5545 queue_flag |= ENQUEUE_REPLENISH; 5546 } else { 5547 p->dl.pi_se = &p->dl; 5548 } 5549 p->sched_class = &dl_sched_class; 5550 } else if (rt_prio(prio)) { 5551 if (dl_prio(oldprio)) 5552 p->dl.pi_se = &p->dl; 5553 if (oldprio < prio) 5554 queue_flag |= ENQUEUE_HEAD; 5555 p->sched_class = &rt_sched_class; 5556 } else { 5557 if (dl_prio(oldprio)) 5558 p->dl.pi_se = &p->dl; 5559 if (rt_prio(oldprio)) 5560 p->rt.timeout = 0; 5561 p->sched_class = &fair_sched_class; 5562 } 5563 5564 p->prio = prio; 5565 5566 if (queued) 5567 enqueue_task(rq, p, queue_flag); 5568 if (running) 5569 set_next_task(rq, p); 5570 5571 check_class_changed(rq, p, prev_class, oldprio); 5572 out_unlock: 5573 /* Avoid rq from going away on us: */ 5574 preempt_disable(); 5575 5576 rq_unpin_lock(rq, &rf); 5577 __balance_callbacks(rq); 5578 raw_spin_unlock(&rq->lock); 5579 5580 preempt_enable(); 5581 } 5582 #else 5583 static inline int rt_effective_prio(struct task_struct *p, int prio) 5584 { 5585 return prio; 5586 } 5587 #endif 5588 5589 void set_user_nice(struct task_struct *p, long nice) 5590 { 5591 bool queued, running; 5592 int old_prio; 5593 struct rq_flags rf; 5594 struct rq *rq; 5595 5596 if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE) 5597 return; 5598 /* 5599 * We have to be careful, if called from sys_setpriority(), 5600 * the task might be in the middle of scheduling on another CPU. 5601 */ 5602 rq = task_rq_lock(p, &rf); 5603 update_rq_clock(rq); 5604 5605 /* 5606 * The RT priorities are set via sched_setscheduler(), but we still 5607 * allow the 'normal' nice value to be set - but as expected 5608 * it won't have any effect on scheduling until the task is 5609 * SCHED_DEADLINE, SCHED_FIFO or SCHED_RR: 5610 */ 5611 if (task_has_dl_policy(p) || task_has_rt_policy(p)) { 5612 p->static_prio = NICE_TO_PRIO(nice); 5613 goto out_unlock; 5614 } 5615 queued = task_on_rq_queued(p); 5616 running = task_current(rq, p); 5617 if (queued) 5618 dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK); 5619 if (running) 5620 put_prev_task(rq, p); 5621 5622 p->static_prio = NICE_TO_PRIO(nice); 5623 set_load_weight(p, true); 5624 old_prio = p->prio; 5625 p->prio = effective_prio(p); 5626 5627 if (queued) 5628 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); 5629 if (running) 5630 set_next_task(rq, p); 5631 5632 /* 5633 * If the task increased its priority or is running and 5634 * lowered its priority, then reschedule its CPU: 5635 */ 5636 p->sched_class->prio_changed(rq, p, old_prio); 5637 5638 out_unlock: 5639 task_rq_unlock(rq, p, &rf); 5640 } 5641 EXPORT_SYMBOL(set_user_nice); 5642 5643 /* 5644 * can_nice - check if a task can reduce its nice value 5645 * @p: task 5646 * @nice: nice value 5647 */ 5648 int can_nice(const struct task_struct *p, const int nice) 5649 { 5650 /* Convert nice value [19,-20] to rlimit style value [1,40]: */ 5651 int nice_rlim = nice_to_rlimit(nice); 5652 5653 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) || 5654 capable(CAP_SYS_NICE)); 5655 } 5656 5657 #ifdef __ARCH_WANT_SYS_NICE 5658 5659 /* 5660 * sys_nice - change the priority of the current process. 5661 * @increment: priority increment 5662 * 5663 * sys_setpriority is a more generic, but much slower function that 5664 * does similar things. 5665 */ 5666 SYSCALL_DEFINE1(nice, int, increment) 5667 { 5668 long nice, retval; 5669 5670 /* 5671 * Setpriority might change our priority at the same moment. 5672 * We don't have to worry. Conceptually one call occurs first 5673 * and we have a single winner. 5674 */ 5675 increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH); 5676 nice = task_nice(current) + increment; 5677 5678 nice = clamp_val(nice, MIN_NICE, MAX_NICE); 5679 if (increment < 0 && !can_nice(current, nice)) 5680 return -EPERM; 5681 5682 retval = security_task_setnice(current, nice); 5683 if (retval) 5684 return retval; 5685 5686 set_user_nice(current, nice); 5687 return 0; 5688 } 5689 5690 #endif 5691 5692 /** 5693 * task_prio - return the priority value of a given task. 5694 * @p: the task in question. 5695 * 5696 * Return: The priority value as seen by users in /proc. 5697 * 5698 * sched policy return value kernel prio user prio/nice 5699 * 5700 * normal, batch, idle [0 ... 39] [100 ... 139] 0/[-20 ... 19] 5701 * fifo, rr [-2 ... -100] [98 ... 0] [1 ... 99] 5702 * deadline -101 -1 0 5703 */ 5704 int task_prio(const struct task_struct *p) 5705 { 5706 return p->prio - MAX_RT_PRIO; 5707 } 5708 5709 /** 5710 * idle_cpu - is a given CPU idle currently? 5711 * @cpu: the processor in question. 5712 * 5713 * Return: 1 if the CPU is currently idle. 0 otherwise. 5714 */ 5715 int idle_cpu(int cpu) 5716 { 5717 struct rq *rq = cpu_rq(cpu); 5718 5719 if (rq->curr != rq->idle) 5720 return 0; 5721 5722 if (rq->nr_running) 5723 return 0; 5724 5725 #ifdef CONFIG_SMP 5726 if (rq->ttwu_pending) 5727 return 0; 5728 #endif 5729 5730 return 1; 5731 } 5732 5733 /** 5734 * available_idle_cpu - is a given CPU idle for enqueuing work. 5735 * @cpu: the CPU in question. 5736 * 5737 * Return: 1 if the CPU is currently idle. 0 otherwise. 5738 */ 5739 int available_idle_cpu(int cpu) 5740 { 5741 if (!idle_cpu(cpu)) 5742 return 0; 5743 5744 if (vcpu_is_preempted(cpu)) 5745 return 0; 5746 5747 return 1; 5748 } 5749 5750 /** 5751 * idle_task - return the idle task for a given CPU. 5752 * @cpu: the processor in question. 5753 * 5754 * Return: The idle task for the CPU @cpu. 5755 */ 5756 struct task_struct *idle_task(int cpu) 5757 { 5758 return cpu_rq(cpu)->idle; 5759 } 5760 5761 #ifdef CONFIG_SMP 5762 /* 5763 * This function computes an effective utilization for the given CPU, to be 5764 * used for frequency selection given the linear relation: f = u * f_max. 5765 * 5766 * The scheduler tracks the following metrics: 5767 * 5768 * cpu_util_{cfs,rt,dl,irq}() 5769 * cpu_bw_dl() 5770 * 5771 * Where the cfs,rt and dl util numbers are tracked with the same metric and 5772 * synchronized windows and are thus directly comparable. 5773 * 5774 * The cfs,rt,dl utilization are the running times measured with rq->clock_task 5775 * which excludes things like IRQ and steal-time. These latter are then accrued 5776 * in the irq utilization. 5777 * 5778 * The DL bandwidth number otoh is not a measured metric but a value computed 5779 * based on the task model parameters and gives the minimal utilization 5780 * required to meet deadlines. 5781 */ 5782 unsigned long effective_cpu_util(int cpu, unsigned long util_cfs, 5783 unsigned long max, enum cpu_util_type type, 5784 struct task_struct *p) 5785 { 5786 unsigned long dl_util, util, irq; 5787 struct rq *rq = cpu_rq(cpu); 5788 5789 if (!uclamp_is_used() && 5790 type == FREQUENCY_UTIL && rt_rq_is_runnable(&rq->rt)) { 5791 return max; 5792 } 5793 5794 /* 5795 * Early check to see if IRQ/steal time saturates the CPU, can be 5796 * because of inaccuracies in how we track these -- see 5797 * update_irq_load_avg(). 5798 */ 5799 irq = cpu_util_irq(rq); 5800 if (unlikely(irq >= max)) 5801 return max; 5802 5803 /* 5804 * Because the time spend on RT/DL tasks is visible as 'lost' time to 5805 * CFS tasks and we use the same metric to track the effective 5806 * utilization (PELT windows are synchronized) we can directly add them 5807 * to obtain the CPU's actual utilization. 5808 * 5809 * CFS and RT utilization can be boosted or capped, depending on 5810 * utilization clamp constraints requested by currently RUNNABLE 5811 * tasks. 5812 * When there are no CFS RUNNABLE tasks, clamps are released and 5813 * frequency will be gracefully reduced with the utilization decay. 5814 */ 5815 util = util_cfs + cpu_util_rt(rq); 5816 if (type == FREQUENCY_UTIL) 5817 util = uclamp_rq_util_with(rq, util, p); 5818 5819 dl_util = cpu_util_dl(rq); 5820 5821 /* 5822 * For frequency selection we do not make cpu_util_dl() a permanent part 5823 * of this sum because we want to use cpu_bw_dl() later on, but we need 5824 * to check if the CFS+RT+DL sum is saturated (ie. no idle time) such 5825 * that we select f_max when there is no idle time. 5826 * 5827 * NOTE: numerical errors or stop class might cause us to not quite hit 5828 * saturation when we should -- something for later. 5829 */ 5830 if (util + dl_util >= max) 5831 return max; 5832 5833 /* 5834 * OTOH, for energy computation we need the estimated running time, so 5835 * include util_dl and ignore dl_bw. 5836 */ 5837 if (type == ENERGY_UTIL) 5838 util += dl_util; 5839 5840 /* 5841 * There is still idle time; further improve the number by using the 5842 * irq metric. Because IRQ/steal time is hidden from the task clock we 5843 * need to scale the task numbers: 5844 * 5845 * max - irq 5846 * U' = irq + --------- * U 5847 * max 5848 */ 5849 util = scale_irq_capacity(util, irq, max); 5850 util += irq; 5851 5852 /* 5853 * Bandwidth required by DEADLINE must always be granted while, for 5854 * FAIR and RT, we use blocked utilization of IDLE CPUs as a mechanism 5855 * to gracefully reduce the frequency when no tasks show up for longer 5856 * periods of time. 5857 * 5858 * Ideally we would like to set bw_dl as min/guaranteed freq and util + 5859 * bw_dl as requested freq. However, cpufreq is not yet ready for such 5860 * an interface. So, we only do the latter for now. 5861 */ 5862 if (type == FREQUENCY_UTIL) 5863 util += cpu_bw_dl(rq); 5864 5865 return min(max, util); 5866 } 5867 5868 unsigned long sched_cpu_util(int cpu, unsigned long max) 5869 { 5870 return effective_cpu_util(cpu, cpu_util_cfs(cpu_rq(cpu)), max, 5871 ENERGY_UTIL, NULL); 5872 } 5873 #endif /* CONFIG_SMP */ 5874 5875 /** 5876 * find_process_by_pid - find a process with a matching PID value. 5877 * @pid: the pid in question. 5878 * 5879 * The task of @pid, if found. %NULL otherwise. 5880 */ 5881 static struct task_struct *find_process_by_pid(pid_t pid) 5882 { 5883 return pid ? find_task_by_vpid(pid) : current; 5884 } 5885 5886 /* 5887 * sched_setparam() passes in -1 for its policy, to let the functions 5888 * it calls know not to change it. 5889 */ 5890 #define SETPARAM_POLICY -1 5891 5892 static void __setscheduler_params(struct task_struct *p, 5893 const struct sched_attr *attr) 5894 { 5895 int policy = attr->sched_policy; 5896 5897 if (policy == SETPARAM_POLICY) 5898 policy = p->policy; 5899 5900 p->policy = policy; 5901 5902 if (dl_policy(policy)) 5903 __setparam_dl(p, attr); 5904 else if (fair_policy(policy)) 5905 p->static_prio = NICE_TO_PRIO(attr->sched_nice); 5906 5907 /* 5908 * __sched_setscheduler() ensures attr->sched_priority == 0 when 5909 * !rt_policy. Always setting this ensures that things like 5910 * getparam()/getattr() don't report silly values for !rt tasks. 5911 */ 5912 p->rt_priority = attr->sched_priority; 5913 p->normal_prio = normal_prio(p); 5914 set_load_weight(p, true); 5915 } 5916 5917 /* Actually do priority change: must hold pi & rq lock. */ 5918 static void __setscheduler(struct rq *rq, struct task_struct *p, 5919 const struct sched_attr *attr, bool keep_boost) 5920 { 5921 /* 5922 * If params can't change scheduling class changes aren't allowed 5923 * either. 5924 */ 5925 if (attr->sched_flags & SCHED_FLAG_KEEP_PARAMS) 5926 return; 5927 5928 __setscheduler_params(p, attr); 5929 5930 /* 5931 * Keep a potential priority boosting if called from 5932 * sched_setscheduler(). 5933 */ 5934 p->prio = normal_prio(p); 5935 if (keep_boost) 5936 p->prio = rt_effective_prio(p, p->prio); 5937 5938 if (dl_prio(p->prio)) 5939 p->sched_class = &dl_sched_class; 5940 else if (rt_prio(p->prio)) 5941 p->sched_class = &rt_sched_class; 5942 else 5943 p->sched_class = &fair_sched_class; 5944 } 5945 5946 /* 5947 * Check the target process has a UID that matches the current process's: 5948 */ 5949 static bool check_same_owner(struct task_struct *p) 5950 { 5951 const struct cred *cred = current_cred(), *pcred; 5952 bool match; 5953 5954 rcu_read_lock(); 5955 pcred = __task_cred(p); 5956 match = (uid_eq(cred->euid, pcred->euid) || 5957 uid_eq(cred->euid, pcred->uid)); 5958 rcu_read_unlock(); 5959 return match; 5960 } 5961 5962 static int __sched_setscheduler(struct task_struct *p, 5963 const struct sched_attr *attr, 5964 bool user, bool pi) 5965 { 5966 int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 : 5967 MAX_RT_PRIO - 1 - attr->sched_priority; 5968 int retval, oldprio, oldpolicy = -1, queued, running; 5969 int new_effective_prio, policy = attr->sched_policy; 5970 const struct sched_class *prev_class; 5971 struct callback_head *head; 5972 struct rq_flags rf; 5973 int reset_on_fork; 5974 int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; 5975 struct rq *rq; 5976 5977 /* The pi code expects interrupts enabled */ 5978 BUG_ON(pi && in_interrupt()); 5979 recheck: 5980 /* Double check policy once rq lock held: */ 5981 if (policy < 0) { 5982 reset_on_fork = p->sched_reset_on_fork; 5983 policy = oldpolicy = p->policy; 5984 } else { 5985 reset_on_fork = !!(attr->sched_flags & SCHED_FLAG_RESET_ON_FORK); 5986 5987 if (!valid_policy(policy)) 5988 return -EINVAL; 5989 } 5990 5991 if (attr->sched_flags & ~(SCHED_FLAG_ALL | SCHED_FLAG_SUGOV)) 5992 return -EINVAL; 5993 5994 /* 5995 * Valid priorities for SCHED_FIFO and SCHED_RR are 5996 * 1..MAX_RT_PRIO-1, valid priority for SCHED_NORMAL, 5997 * SCHED_BATCH and SCHED_IDLE is 0. 5998 */ 5999 if (attr->sched_priority > MAX_RT_PRIO-1) 6000 return -EINVAL; 6001 if ((dl_policy(policy) && !__checkparam_dl(attr)) || 6002 (rt_policy(policy) != (attr->sched_priority != 0))) 6003 return -EINVAL; 6004 6005 /* 6006 * Allow unprivileged RT tasks to decrease priority: 6007 */ 6008 if (user && !capable(CAP_SYS_NICE)) { 6009 if (fair_policy(policy)) { 6010 if (attr->sched_nice < task_nice(p) && 6011 !can_nice(p, attr->sched_nice)) 6012 return -EPERM; 6013 } 6014 6015 if (rt_policy(policy)) { 6016 unsigned long rlim_rtprio = 6017 task_rlimit(p, RLIMIT_RTPRIO); 6018 6019 /* Can't set/change the rt policy: */ 6020 if (policy != p->policy && !rlim_rtprio) 6021 return -EPERM; 6022 6023 /* Can't increase priority: */ 6024 if (attr->sched_priority > p->rt_priority && 6025 attr->sched_priority > rlim_rtprio) 6026 return -EPERM; 6027 } 6028 6029 /* 6030 * Can't set/change SCHED_DEADLINE policy at all for now 6031 * (safest behavior); in the future we would like to allow 6032 * unprivileged DL tasks to increase their relative deadline 6033 * or reduce their runtime (both ways reducing utilization) 6034 */ 6035 if (dl_policy(policy)) 6036 return -EPERM; 6037 6038 /* 6039 * Treat SCHED_IDLE as nice 20. Only allow a switch to 6040 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it. 6041 */ 6042 if (task_has_idle_policy(p) && !idle_policy(policy)) { 6043 if (!can_nice(p, task_nice(p))) 6044 return -EPERM; 6045 } 6046 6047 /* Can't change other user's priorities: */ 6048 if (!check_same_owner(p)) 6049 return -EPERM; 6050 6051 /* Normal users shall not reset the sched_reset_on_fork flag: */ 6052 if (p->sched_reset_on_fork && !reset_on_fork) 6053 return -EPERM; 6054 } 6055 6056 if (user) { 6057 if (attr->sched_flags & SCHED_FLAG_SUGOV) 6058 return -EINVAL; 6059 6060 retval = security_task_setscheduler(p); 6061 if (retval) 6062 return retval; 6063 } 6064 6065 /* Update task specific "requested" clamps */ 6066 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) { 6067 retval = uclamp_validate(p, attr); 6068 if (retval) 6069 return retval; 6070 } 6071 6072 if (pi) 6073 cpuset_read_lock(); 6074 6075 /* 6076 * Make sure no PI-waiters arrive (or leave) while we are 6077 * changing the priority of the task: 6078 * 6079 * To be able to change p->policy safely, the appropriate 6080 * runqueue lock must be held. 6081 */ 6082 rq = task_rq_lock(p, &rf); 6083 update_rq_clock(rq); 6084 6085 /* 6086 * Changing the policy of the stop threads its a very bad idea: 6087 */ 6088 if (p == rq->stop) { 6089 retval = -EINVAL; 6090 goto unlock; 6091 } 6092 6093 /* 6094 * If not changing anything there's no need to proceed further, 6095 * but store a possible modification of reset_on_fork. 6096 */ 6097 if (unlikely(policy == p->policy)) { 6098 if (fair_policy(policy) && attr->sched_nice != task_nice(p)) 6099 goto change; 6100 if (rt_policy(policy) && attr->sched_priority != p->rt_priority) 6101 goto change; 6102 if (dl_policy(policy) && dl_param_changed(p, attr)) 6103 goto change; 6104 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) 6105 goto change; 6106 6107 p->sched_reset_on_fork = reset_on_fork; 6108 retval = 0; 6109 goto unlock; 6110 } 6111 change: 6112 6113 if (user) { 6114 #ifdef CONFIG_RT_GROUP_SCHED 6115 /* 6116 * Do not allow realtime tasks into groups that have no runtime 6117 * assigned. 6118 */ 6119 if (rt_bandwidth_enabled() && rt_policy(policy) && 6120 task_group(p)->rt_bandwidth.rt_runtime == 0 && 6121 !task_group_is_autogroup(task_group(p))) { 6122 retval = -EPERM; 6123 goto unlock; 6124 } 6125 #endif 6126 #ifdef CONFIG_SMP 6127 if (dl_bandwidth_enabled() && dl_policy(policy) && 6128 !(attr->sched_flags & SCHED_FLAG_SUGOV)) { 6129 cpumask_t *span = rq->rd->span; 6130 6131 /* 6132 * Don't allow tasks with an affinity mask smaller than 6133 * the entire root_domain to become SCHED_DEADLINE. We 6134 * will also fail if there's no bandwidth available. 6135 */ 6136 if (!cpumask_subset(span, p->cpus_ptr) || 6137 rq->rd->dl_bw.bw == 0) { 6138 retval = -EPERM; 6139 goto unlock; 6140 } 6141 } 6142 #endif 6143 } 6144 6145 /* Re-check policy now with rq lock held: */ 6146 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { 6147 policy = oldpolicy = -1; 6148 task_rq_unlock(rq, p, &rf); 6149 if (pi) 6150 cpuset_read_unlock(); 6151 goto recheck; 6152 } 6153 6154 /* 6155 * If setscheduling to SCHED_DEADLINE (or changing the parameters 6156 * of a SCHED_DEADLINE task) we need to check if enough bandwidth 6157 * is available. 6158 */ 6159 if ((dl_policy(policy) || dl_task(p)) && sched_dl_overflow(p, policy, attr)) { 6160 retval = -EBUSY; 6161 goto unlock; 6162 } 6163 6164 p->sched_reset_on_fork = reset_on_fork; 6165 oldprio = p->prio; 6166 6167 if (pi) { 6168 /* 6169 * Take priority boosted tasks into account. If the new 6170 * effective priority is unchanged, we just store the new 6171 * normal parameters and do not touch the scheduler class and 6172 * the runqueue. This will be done when the task deboost 6173 * itself. 6174 */ 6175 new_effective_prio = rt_effective_prio(p, newprio); 6176 if (new_effective_prio == oldprio) 6177 queue_flags &= ~DEQUEUE_MOVE; 6178 } 6179 6180 queued = task_on_rq_queued(p); 6181 running = task_current(rq, p); 6182 if (queued) 6183 dequeue_task(rq, p, queue_flags); 6184 if (running) 6185 put_prev_task(rq, p); 6186 6187 prev_class = p->sched_class; 6188 6189 __setscheduler(rq, p, attr, pi); 6190 __setscheduler_uclamp(p, attr); 6191 6192 if (queued) { 6193 /* 6194 * We enqueue to tail when the priority of a task is 6195 * increased (user space view). 6196 */ 6197 if (oldprio < p->prio) 6198 queue_flags |= ENQUEUE_HEAD; 6199 6200 enqueue_task(rq, p, queue_flags); 6201 } 6202 if (running) 6203 set_next_task(rq, p); 6204 6205 check_class_changed(rq, p, prev_class, oldprio); 6206 6207 /* Avoid rq from going away on us: */ 6208 preempt_disable(); 6209 head = splice_balance_callbacks(rq); 6210 task_rq_unlock(rq, p, &rf); 6211 6212 if (pi) { 6213 cpuset_read_unlock(); 6214 rt_mutex_adjust_pi(p); 6215 } 6216 6217 /* Run balance callbacks after we've adjusted the PI chain: */ 6218 balance_callbacks(rq, head); 6219 preempt_enable(); 6220 6221 return 0; 6222 6223 unlock: 6224 task_rq_unlock(rq, p, &rf); 6225 if (pi) 6226 cpuset_read_unlock(); 6227 return retval; 6228 } 6229 6230 static int _sched_setscheduler(struct task_struct *p, int policy, 6231 const struct sched_param *param, bool check) 6232 { 6233 struct sched_attr attr = { 6234 .sched_policy = policy, 6235 .sched_priority = param->sched_priority, 6236 .sched_nice = PRIO_TO_NICE(p->static_prio), 6237 }; 6238 6239 /* Fixup the legacy SCHED_RESET_ON_FORK hack. */ 6240 if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) { 6241 attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; 6242 policy &= ~SCHED_RESET_ON_FORK; 6243 attr.sched_policy = policy; 6244 } 6245 6246 return __sched_setscheduler(p, &attr, check, true); 6247 } 6248 /** 6249 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread. 6250 * @p: the task in question. 6251 * @policy: new policy. 6252 * @param: structure containing the new RT priority. 6253 * 6254 * Use sched_set_fifo(), read its comment. 6255 * 6256 * Return: 0 on success. An error code otherwise. 6257 * 6258 * NOTE that the task may be already dead. 6259 */ 6260 int sched_setscheduler(struct task_struct *p, int policy, 6261 const struct sched_param *param) 6262 { 6263 return _sched_setscheduler(p, policy, param, true); 6264 } 6265 6266 int sched_setattr(struct task_struct *p, const struct sched_attr *attr) 6267 { 6268 return __sched_setscheduler(p, attr, true, true); 6269 } 6270 6271 int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr) 6272 { 6273 return __sched_setscheduler(p, attr, false, true); 6274 } 6275 6276 /** 6277 * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace. 6278 * @p: the task in question. 6279 * @policy: new policy. 6280 * @param: structure containing the new RT priority. 6281 * 6282 * Just like sched_setscheduler, only don't bother checking if the 6283 * current context has permission. For example, this is needed in 6284 * stop_machine(): we create temporary high priority worker threads, 6285 * but our caller might not have that capability. 6286 * 6287 * Return: 0 on success. An error code otherwise. 6288 */ 6289 int sched_setscheduler_nocheck(struct task_struct *p, int policy, 6290 const struct sched_param *param) 6291 { 6292 return _sched_setscheduler(p, policy, param, false); 6293 } 6294 6295 /* 6296 * SCHED_FIFO is a broken scheduler model; that is, it is fundamentally 6297 * incapable of resource management, which is the one thing an OS really should 6298 * be doing. 6299 * 6300 * This is of course the reason it is limited to privileged users only. 6301 * 6302 * Worse still; it is fundamentally impossible to compose static priority 6303 * workloads. You cannot take two correctly working static prio workloads 6304 * and smash them together and still expect them to work. 6305 * 6306 * For this reason 'all' FIFO tasks the kernel creates are basically at: 6307 * 6308 * MAX_RT_PRIO / 2 6309 * 6310 * The administrator _MUST_ configure the system, the kernel simply doesn't 6311 * know enough information to make a sensible choice. 6312 */ 6313 void sched_set_fifo(struct task_struct *p) 6314 { 6315 struct sched_param sp = { .sched_priority = MAX_RT_PRIO / 2 }; 6316 WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0); 6317 } 6318 EXPORT_SYMBOL_GPL(sched_set_fifo); 6319 6320 /* 6321 * For when you don't much care about FIFO, but want to be above SCHED_NORMAL. 6322 */ 6323 void sched_set_fifo_low(struct task_struct *p) 6324 { 6325 struct sched_param sp = { .sched_priority = 1 }; 6326 WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0); 6327 } 6328 EXPORT_SYMBOL_GPL(sched_set_fifo_low); 6329 6330 void sched_set_normal(struct task_struct *p, int nice) 6331 { 6332 struct sched_attr attr = { 6333 .sched_policy = SCHED_NORMAL, 6334 .sched_nice = nice, 6335 }; 6336 WARN_ON_ONCE(sched_setattr_nocheck(p, &attr) != 0); 6337 } 6338 EXPORT_SYMBOL_GPL(sched_set_normal); 6339 6340 static int 6341 do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) 6342 { 6343 struct sched_param lparam; 6344 struct task_struct *p; 6345 int retval; 6346 6347 if (!param || pid < 0) 6348 return -EINVAL; 6349 if (copy_from_user(&lparam, param, sizeof(struct sched_param))) 6350 return -EFAULT; 6351 6352 rcu_read_lock(); 6353 retval = -ESRCH; 6354 p = find_process_by_pid(pid); 6355 if (likely(p)) 6356 get_task_struct(p); 6357 rcu_read_unlock(); 6358 6359 if (likely(p)) { 6360 retval = sched_setscheduler(p, policy, &lparam); 6361 put_task_struct(p); 6362 } 6363 6364 return retval; 6365 } 6366 6367 /* 6368 * Mimics kernel/events/core.c perf_copy_attr(). 6369 */ 6370 static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *attr) 6371 { 6372 u32 size; 6373 int ret; 6374 6375 /* Zero the full structure, so that a short copy will be nice: */ 6376 memset(attr, 0, sizeof(*attr)); 6377 6378 ret = get_user(size, &uattr->size); 6379 if (ret) 6380 return ret; 6381 6382 /* ABI compatibility quirk: */ 6383 if (!size) 6384 size = SCHED_ATTR_SIZE_VER0; 6385 if (size < SCHED_ATTR_SIZE_VER0 || size > PAGE_SIZE) 6386 goto err_size; 6387 6388 ret = copy_struct_from_user(attr, sizeof(*attr), uattr, size); 6389 if (ret) { 6390 if (ret == -E2BIG) 6391 goto err_size; 6392 return ret; 6393 } 6394 6395 if ((attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) && 6396 size < SCHED_ATTR_SIZE_VER1) 6397 return -EINVAL; 6398 6399 /* 6400 * XXX: Do we want to be lenient like existing syscalls; or do we want 6401 * to be strict and return an error on out-of-bounds values? 6402 */ 6403 attr->sched_nice = clamp(attr->sched_nice, MIN_NICE, MAX_NICE); 6404 6405 return 0; 6406 6407 err_size: 6408 put_user(sizeof(*attr), &uattr->size); 6409 return -E2BIG; 6410 } 6411 6412 /** 6413 * sys_sched_setscheduler - set/change the scheduler policy and RT priority 6414 * @pid: the pid in question. 6415 * @policy: new policy. 6416 * @param: structure containing the new RT priority. 6417 * 6418 * Return: 0 on success. An error code otherwise. 6419 */ 6420 SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param) 6421 { 6422 if (policy < 0) 6423 return -EINVAL; 6424 6425 return do_sched_setscheduler(pid, policy, param); 6426 } 6427 6428 /** 6429 * sys_sched_setparam - set/change the RT priority of a thread 6430 * @pid: the pid in question. 6431 * @param: structure containing the new RT priority. 6432 * 6433 * Return: 0 on success. An error code otherwise. 6434 */ 6435 SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) 6436 { 6437 return do_sched_setscheduler(pid, SETPARAM_POLICY, param); 6438 } 6439 6440 /** 6441 * sys_sched_setattr - same as above, but with extended sched_attr 6442 * @pid: the pid in question. 6443 * @uattr: structure containing the extended parameters. 6444 * @flags: for future extension. 6445 */ 6446 SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr, 6447 unsigned int, flags) 6448 { 6449 struct sched_attr attr; 6450 struct task_struct *p; 6451 int retval; 6452 6453 if (!uattr || pid < 0 || flags) 6454 return -EINVAL; 6455 6456 retval = sched_copy_attr(uattr, &attr); 6457 if (retval) 6458 return retval; 6459 6460 if ((int)attr.sched_policy < 0) 6461 return -EINVAL; 6462 if (attr.sched_flags & SCHED_FLAG_KEEP_POLICY) 6463 attr.sched_policy = SETPARAM_POLICY; 6464 6465 rcu_read_lock(); 6466 retval = -ESRCH; 6467 p = find_process_by_pid(pid); 6468 if (likely(p)) 6469 get_task_struct(p); 6470 rcu_read_unlock(); 6471 6472 if (likely(p)) { 6473 retval = sched_setattr(p, &attr); 6474 put_task_struct(p); 6475 } 6476 6477 return retval; 6478 } 6479 6480 /** 6481 * sys_sched_getscheduler - get the policy (scheduling class) of a thread 6482 * @pid: the pid in question. 6483 * 6484 * Return: On success, the policy of the thread. Otherwise, a negative error 6485 * code. 6486 */ 6487 SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) 6488 { 6489 struct task_struct *p; 6490 int retval; 6491 6492 if (pid < 0) 6493 return -EINVAL; 6494 6495 retval = -ESRCH; 6496 rcu_read_lock(); 6497 p = find_process_by_pid(pid); 6498 if (p) { 6499 retval = security_task_getscheduler(p); 6500 if (!retval) 6501 retval = p->policy 6502 | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0); 6503 } 6504 rcu_read_unlock(); 6505 return retval; 6506 } 6507 6508 /** 6509 * sys_sched_getparam - get the RT priority of a thread 6510 * @pid: the pid in question. 6511 * @param: structure containing the RT priority. 6512 * 6513 * Return: On success, 0 and the RT priority is in @param. Otherwise, an error 6514 * code. 6515 */ 6516 SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) 6517 { 6518 struct sched_param lp = { .sched_priority = 0 }; 6519 struct task_struct *p; 6520 int retval; 6521 6522 if (!param || pid < 0) 6523 return -EINVAL; 6524 6525 rcu_read_lock(); 6526 p = find_process_by_pid(pid); 6527 retval = -ESRCH; 6528 if (!p) 6529 goto out_unlock; 6530 6531 retval = security_task_getscheduler(p); 6532 if (retval) 6533 goto out_unlock; 6534 6535 if (task_has_rt_policy(p)) 6536 lp.sched_priority = p->rt_priority; 6537 rcu_read_unlock(); 6538 6539 /* 6540 * This one might sleep, we cannot do it with a spinlock held ... 6541 */ 6542 retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0; 6543 6544 return retval; 6545 6546 out_unlock: 6547 rcu_read_unlock(); 6548 return retval; 6549 } 6550 6551 /* 6552 * Copy the kernel size attribute structure (which might be larger 6553 * than what user-space knows about) to user-space. 6554 * 6555 * Note that all cases are valid: user-space buffer can be larger or 6556 * smaller than the kernel-space buffer. The usual case is that both 6557 * have the same size. 6558 */ 6559 static int 6560 sched_attr_copy_to_user(struct sched_attr __user *uattr, 6561 struct sched_attr *kattr, 6562 unsigned int usize) 6563 { 6564 unsigned int ksize = sizeof(*kattr); 6565 6566 if (!access_ok(uattr, usize)) 6567 return -EFAULT; 6568 6569 /* 6570 * sched_getattr() ABI forwards and backwards compatibility: 6571 * 6572 * If usize == ksize then we just copy everything to user-space and all is good. 6573 * 6574 * If usize < ksize then we only copy as much as user-space has space for, 6575 * this keeps ABI compatibility as well. We skip the rest. 6576 * 6577 * If usize > ksize then user-space is using a newer version of the ABI, 6578 * which part the kernel doesn't know about. Just ignore it - tooling can 6579 * detect the kernel's knowledge of attributes from the attr->size value 6580 * which is set to ksize in this case. 6581 */ 6582 kattr->size = min(usize, ksize); 6583 6584 if (copy_to_user(uattr, kattr, kattr->size)) 6585 return -EFAULT; 6586 6587 return 0; 6588 } 6589 6590 /** 6591 * sys_sched_getattr - similar to sched_getparam, but with sched_attr 6592 * @pid: the pid in question. 6593 * @uattr: structure containing the extended parameters. 6594 * @usize: sizeof(attr) for fwd/bwd comp. 6595 * @flags: for future extension. 6596 */ 6597 SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, 6598 unsigned int, usize, unsigned int, flags) 6599 { 6600 struct sched_attr kattr = { }; 6601 struct task_struct *p; 6602 int retval; 6603 6604 if (!uattr || pid < 0 || usize > PAGE_SIZE || 6605 usize < SCHED_ATTR_SIZE_VER0 || flags) 6606 return -EINVAL; 6607 6608 rcu_read_lock(); 6609 p = find_process_by_pid(pid); 6610 retval = -ESRCH; 6611 if (!p) 6612 goto out_unlock; 6613 6614 retval = security_task_getscheduler(p); 6615 if (retval) 6616 goto out_unlock; 6617 6618 kattr.sched_policy = p->policy; 6619 if (p->sched_reset_on_fork) 6620 kattr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; 6621 if (task_has_dl_policy(p)) 6622 __getparam_dl(p, &kattr); 6623 else if (task_has_rt_policy(p)) 6624 kattr.sched_priority = p->rt_priority; 6625 else 6626 kattr.sched_nice = task_nice(p); 6627 6628 #ifdef CONFIG_UCLAMP_TASK 6629 /* 6630 * This could race with another potential updater, but this is fine 6631 * because it'll correctly read the old or the new value. We don't need 6632 * to guarantee who wins the race as long as it doesn't return garbage. 6633 */ 6634 kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value; 6635 kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value; 6636 #endif 6637 6638 rcu_read_unlock(); 6639 6640 return sched_attr_copy_to_user(uattr, &kattr, usize); 6641 6642 out_unlock: 6643 rcu_read_unlock(); 6644 return retval; 6645 } 6646 6647 long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) 6648 { 6649 cpumask_var_t cpus_allowed, new_mask; 6650 struct task_struct *p; 6651 int retval; 6652 6653 rcu_read_lock(); 6654 6655 p = find_process_by_pid(pid); 6656 if (!p) { 6657 rcu_read_unlock(); 6658 return -ESRCH; 6659 } 6660 6661 /* Prevent p going away */ 6662 get_task_struct(p); 6663 rcu_read_unlock(); 6664 6665 if (p->flags & PF_NO_SETAFFINITY) { 6666 retval = -EINVAL; 6667 goto out_put_task; 6668 } 6669 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) { 6670 retval = -ENOMEM; 6671 goto out_put_task; 6672 } 6673 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) { 6674 retval = -ENOMEM; 6675 goto out_free_cpus_allowed; 6676 } 6677 retval = -EPERM; 6678 if (!check_same_owner(p)) { 6679 rcu_read_lock(); 6680 if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) { 6681 rcu_read_unlock(); 6682 goto out_free_new_mask; 6683 } 6684 rcu_read_unlock(); 6685 } 6686 6687 retval = security_task_setscheduler(p); 6688 if (retval) 6689 goto out_free_new_mask; 6690 6691 6692 cpuset_cpus_allowed(p, cpus_allowed); 6693 cpumask_and(new_mask, in_mask, cpus_allowed); 6694 6695 /* 6696 * Since bandwidth control happens on root_domain basis, 6697 * if admission test is enabled, we only admit -deadline 6698 * tasks allowed to run on all the CPUs in the task's 6699 * root_domain. 6700 */ 6701 #ifdef CONFIG_SMP 6702 if (task_has_dl_policy(p) && dl_bandwidth_enabled()) { 6703 rcu_read_lock(); 6704 if (!cpumask_subset(task_rq(p)->rd->span, new_mask)) { 6705 retval = -EBUSY; 6706 rcu_read_unlock(); 6707 goto out_free_new_mask; 6708 } 6709 rcu_read_unlock(); 6710 } 6711 #endif 6712 again: 6713 retval = __set_cpus_allowed_ptr(p, new_mask, SCA_CHECK); 6714 6715 if (!retval) { 6716 cpuset_cpus_allowed(p, cpus_allowed); 6717 if (!cpumask_subset(new_mask, cpus_allowed)) { 6718 /* 6719 * We must have raced with a concurrent cpuset 6720 * update. Just reset the cpus_allowed to the 6721 * cpuset's cpus_allowed 6722 */ 6723 cpumask_copy(new_mask, cpus_allowed); 6724 goto again; 6725 } 6726 } 6727 out_free_new_mask: 6728 free_cpumask_var(new_mask); 6729 out_free_cpus_allowed: 6730 free_cpumask_var(cpus_allowed); 6731 out_put_task: 6732 put_task_struct(p); 6733 return retval; 6734 } 6735 6736 static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, 6737 struct cpumask *new_mask) 6738 { 6739 if (len < cpumask_size()) 6740 cpumask_clear(new_mask); 6741 else if (len > cpumask_size()) 6742 len = cpumask_size(); 6743 6744 return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0; 6745 } 6746 6747 /** 6748 * sys_sched_setaffinity - set the CPU affinity of a process 6749 * @pid: pid of the process 6750 * @len: length in bytes of the bitmask pointed to by user_mask_ptr 6751 * @user_mask_ptr: user-space pointer to the new CPU mask 6752 * 6753 * Return: 0 on success. An error code otherwise. 6754 */ 6755 SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len, 6756 unsigned long __user *, user_mask_ptr) 6757 { 6758 cpumask_var_t new_mask; 6759 int retval; 6760 6761 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) 6762 return -ENOMEM; 6763 6764 retval = get_user_cpu_mask(user_mask_ptr, len, new_mask); 6765 if (retval == 0) 6766 retval = sched_setaffinity(pid, new_mask); 6767 free_cpumask_var(new_mask); 6768 return retval; 6769 } 6770 6771 long sched_getaffinity(pid_t pid, struct cpumask *mask) 6772 { 6773 struct task_struct *p; 6774 unsigned long flags; 6775 int retval; 6776 6777 rcu_read_lock(); 6778 6779 retval = -ESRCH; 6780 p = find_process_by_pid(pid); 6781 if (!p) 6782 goto out_unlock; 6783 6784 retval = security_task_getscheduler(p); 6785 if (retval) 6786 goto out_unlock; 6787 6788 raw_spin_lock_irqsave(&p->pi_lock, flags); 6789 cpumask_and(mask, &p->cpus_mask, cpu_active_mask); 6790 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 6791 6792 out_unlock: 6793 rcu_read_unlock(); 6794 6795 return retval; 6796 } 6797 6798 /** 6799 * sys_sched_getaffinity - get the CPU affinity of a process 6800 * @pid: pid of the process 6801 * @len: length in bytes of the bitmask pointed to by user_mask_ptr 6802 * @user_mask_ptr: user-space pointer to hold the current CPU mask 6803 * 6804 * Return: size of CPU mask copied to user_mask_ptr on success. An 6805 * error code otherwise. 6806 */ 6807 SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, 6808 unsigned long __user *, user_mask_ptr) 6809 { 6810 int ret; 6811 cpumask_var_t mask; 6812 6813 if ((len * BITS_PER_BYTE) < nr_cpu_ids) 6814 return -EINVAL; 6815 if (len & (sizeof(unsigned long)-1)) 6816 return -EINVAL; 6817 6818 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) 6819 return -ENOMEM; 6820 6821 ret = sched_getaffinity(pid, mask); 6822 if (ret == 0) { 6823 unsigned int retlen = min(len, cpumask_size()); 6824 6825 if (copy_to_user(user_mask_ptr, mask, retlen)) 6826 ret = -EFAULT; 6827 else 6828 ret = retlen; 6829 } 6830 free_cpumask_var(mask); 6831 6832 return ret; 6833 } 6834 6835 static void do_sched_yield(void) 6836 { 6837 struct rq_flags rf; 6838 struct rq *rq; 6839 6840 rq = this_rq_lock_irq(&rf); 6841 6842 schedstat_inc(rq->yld_count); 6843 current->sched_class->yield_task(rq); 6844 6845 preempt_disable(); 6846 rq_unlock_irq(rq, &rf); 6847 sched_preempt_enable_no_resched(); 6848 6849 schedule(); 6850 } 6851 6852 /** 6853 * sys_sched_yield - yield the current processor to other threads. 6854 * 6855 * This function yields the current CPU to other tasks. If there are no 6856 * other threads running on this CPU then this function will return. 6857 * 6858 * Return: 0. 6859 */ 6860 SYSCALL_DEFINE0(sched_yield) 6861 { 6862 do_sched_yield(); 6863 return 0; 6864 } 6865 6866 #if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC) 6867 int __sched __cond_resched(void) 6868 { 6869 if (should_resched(0)) { 6870 preempt_schedule_common(); 6871 return 1; 6872 } 6873 #ifndef CONFIG_PREEMPT_RCU 6874 rcu_all_qs(); 6875 #endif 6876 return 0; 6877 } 6878 EXPORT_SYMBOL(__cond_resched); 6879 #endif 6880 6881 #ifdef CONFIG_PREEMPT_DYNAMIC 6882 DEFINE_STATIC_CALL_RET0(cond_resched, __cond_resched); 6883 EXPORT_STATIC_CALL(cond_resched); 6884 6885 DEFINE_STATIC_CALL_RET0(might_resched, __cond_resched); 6886 EXPORT_STATIC_CALL(might_resched); 6887 #endif 6888 6889 /* 6890 * __cond_resched_lock() - if a reschedule is pending, drop the given lock, 6891 * call schedule, and on return reacquire the lock. 6892 * 6893 * This works OK both with and without CONFIG_PREEMPTION. We do strange low-level 6894 * operations here to prevent schedule() from being called twice (once via 6895 * spin_unlock(), once by hand). 6896 */ 6897 int __cond_resched_lock(spinlock_t *lock) 6898 { 6899 int resched = should_resched(PREEMPT_LOCK_OFFSET); 6900 int ret = 0; 6901 6902 lockdep_assert_held(lock); 6903 6904 if (spin_needbreak(lock) || resched) { 6905 spin_unlock(lock); 6906 if (resched) 6907 preempt_schedule_common(); 6908 else 6909 cpu_relax(); 6910 ret = 1; 6911 spin_lock(lock); 6912 } 6913 return ret; 6914 } 6915 EXPORT_SYMBOL(__cond_resched_lock); 6916 6917 /** 6918 * yield - yield the current processor to other threads. 6919 * 6920 * Do not ever use this function, there's a 99% chance you're doing it wrong. 6921 * 6922 * The scheduler is at all times free to pick the calling task as the most 6923 * eligible task to run, if removing the yield() call from your code breaks 6924 * it, it's already broken. 6925 * 6926 * Typical broken usage is: 6927 * 6928 * while (!event) 6929 * yield(); 6930 * 6931 * where one assumes that yield() will let 'the other' process run that will 6932 * make event true. If the current task is a SCHED_FIFO task that will never 6933 * happen. Never use yield() as a progress guarantee!! 6934 * 6935 * If you want to use yield() to wait for something, use wait_event(). 6936 * If you want to use yield() to be 'nice' for others, use cond_resched(). 6937 * If you still want to use yield(), do not! 6938 */ 6939 void __sched yield(void) 6940 { 6941 set_current_state(TASK_RUNNING); 6942 do_sched_yield(); 6943 } 6944 EXPORT_SYMBOL(yield); 6945 6946 /** 6947 * yield_to - yield the current processor to another thread in 6948 * your thread group, or accelerate that thread toward the 6949 * processor it's on. 6950 * @p: target task 6951 * @preempt: whether task preemption is allowed or not 6952 * 6953 * It's the caller's job to ensure that the target task struct 6954 * can't go away on us before we can do any checks. 6955 * 6956 * Return: 6957 * true (>0) if we indeed boosted the target task. 6958 * false (0) if we failed to boost the target. 6959 * -ESRCH if there's no task to yield to. 6960 */ 6961 int __sched yield_to(struct task_struct *p, bool preempt) 6962 { 6963 struct task_struct *curr = current; 6964 struct rq *rq, *p_rq; 6965 unsigned long flags; 6966 int yielded = 0; 6967 6968 local_irq_save(flags); 6969 rq = this_rq(); 6970 6971 again: 6972 p_rq = task_rq(p); 6973 /* 6974 * If we're the only runnable task on the rq and target rq also 6975 * has only one task, there's absolutely no point in yielding. 6976 */ 6977 if (rq->nr_running == 1 && p_rq->nr_running == 1) { 6978 yielded = -ESRCH; 6979 goto out_irq; 6980 } 6981 6982 double_rq_lock(rq, p_rq); 6983 if (task_rq(p) != p_rq) { 6984 double_rq_unlock(rq, p_rq); 6985 goto again; 6986 } 6987 6988 if (!curr->sched_class->yield_to_task) 6989 goto out_unlock; 6990 6991 if (curr->sched_class != p->sched_class) 6992 goto out_unlock; 6993 6994 if (task_running(p_rq, p) || p->state) 6995 goto out_unlock; 6996 6997 yielded = curr->sched_class->yield_to_task(rq, p); 6998 if (yielded) { 6999 schedstat_inc(rq->yld_count); 7000 /* 7001 * Make p's CPU reschedule; pick_next_entity takes care of 7002 * fairness. 7003 */ 7004 if (preempt && rq != p_rq) 7005 resched_curr(p_rq); 7006 } 7007 7008 out_unlock: 7009 double_rq_unlock(rq, p_rq); 7010 out_irq: 7011 local_irq_restore(flags); 7012 7013 if (yielded > 0) 7014 schedule(); 7015 7016 return yielded; 7017 } 7018 EXPORT_SYMBOL_GPL(yield_to); 7019 7020 int io_schedule_prepare(void) 7021 { 7022 int old_iowait = current->in_iowait; 7023 7024 current->in_iowait = 1; 7025 blk_schedule_flush_plug(current); 7026 7027 return old_iowait; 7028 } 7029 7030 void io_schedule_finish(int token) 7031 { 7032 current->in_iowait = token; 7033 } 7034 7035 /* 7036 * This task is about to go to sleep on IO. Increment rq->nr_iowait so 7037 * that process accounting knows that this is a task in IO wait state. 7038 */ 7039 long __sched io_schedule_timeout(long timeout) 7040 { 7041 int token; 7042 long ret; 7043 7044 token = io_schedule_prepare(); 7045 ret = schedule_timeout(timeout); 7046 io_schedule_finish(token); 7047 7048 return ret; 7049 } 7050 EXPORT_SYMBOL(io_schedule_timeout); 7051 7052 void __sched io_schedule(void) 7053 { 7054 int token; 7055 7056 token = io_schedule_prepare(); 7057 schedule(); 7058 io_schedule_finish(token); 7059 } 7060 EXPORT_SYMBOL(io_schedule); 7061 7062 /** 7063 * sys_sched_get_priority_max - return maximum RT priority. 7064 * @policy: scheduling class. 7065 * 7066 * Return: On success, this syscall returns the maximum 7067 * rt_priority that can be used by a given scheduling class. 7068 * On failure, a negative error code is returned. 7069 */ 7070 SYSCALL_DEFINE1(sched_get_priority_max, int, policy) 7071 { 7072 int ret = -EINVAL; 7073 7074 switch (policy) { 7075 case SCHED_FIFO: 7076 case SCHED_RR: 7077 ret = MAX_RT_PRIO-1; 7078 break; 7079 case SCHED_DEADLINE: 7080 case SCHED_NORMAL: 7081 case SCHED_BATCH: 7082 case SCHED_IDLE: 7083 ret = 0; 7084 break; 7085 } 7086 return ret; 7087 } 7088 7089 /** 7090 * sys_sched_get_priority_min - return minimum RT priority. 7091 * @policy: scheduling class. 7092 * 7093 * Return: On success, this syscall returns the minimum 7094 * rt_priority that can be used by a given scheduling class. 7095 * On failure, a negative error code is returned. 7096 */ 7097 SYSCALL_DEFINE1(sched_get_priority_min, int, policy) 7098 { 7099 int ret = -EINVAL; 7100 7101 switch (policy) { 7102 case SCHED_FIFO: 7103 case SCHED_RR: 7104 ret = 1; 7105 break; 7106 case SCHED_DEADLINE: 7107 case SCHED_NORMAL: 7108 case SCHED_BATCH: 7109 case SCHED_IDLE: 7110 ret = 0; 7111 } 7112 return ret; 7113 } 7114 7115 static int sched_rr_get_interval(pid_t pid, struct timespec64 *t) 7116 { 7117 struct task_struct *p; 7118 unsigned int time_slice; 7119 struct rq_flags rf; 7120 struct rq *rq; 7121 int retval; 7122 7123 if (pid < 0) 7124 return -EINVAL; 7125 7126 retval = -ESRCH; 7127 rcu_read_lock(); 7128 p = find_process_by_pid(pid); 7129 if (!p) 7130 goto out_unlock; 7131 7132 retval = security_task_getscheduler(p); 7133 if (retval) 7134 goto out_unlock; 7135 7136 rq = task_rq_lock(p, &rf); 7137 time_slice = 0; 7138 if (p->sched_class->get_rr_interval) 7139 time_slice = p->sched_class->get_rr_interval(rq, p); 7140 task_rq_unlock(rq, p, &rf); 7141 7142 rcu_read_unlock(); 7143 jiffies_to_timespec64(time_slice, t); 7144 return 0; 7145 7146 out_unlock: 7147 rcu_read_unlock(); 7148 return retval; 7149 } 7150 7151 /** 7152 * sys_sched_rr_get_interval - return the default timeslice of a process. 7153 * @pid: pid of the process. 7154 * @interval: userspace pointer to the timeslice value. 7155 * 7156 * this syscall writes the default timeslice value of a given process 7157 * into the user-space timespec buffer. A value of '0' means infinity. 7158 * 7159 * Return: On success, 0 and the timeslice is in @interval. Otherwise, 7160 * an error code. 7161 */ 7162 SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, 7163 struct __kernel_timespec __user *, interval) 7164 { 7165 struct timespec64 t; 7166 int retval = sched_rr_get_interval(pid, &t); 7167 7168 if (retval == 0) 7169 retval = put_timespec64(&t, interval); 7170 7171 return retval; 7172 } 7173 7174 #ifdef CONFIG_COMPAT_32BIT_TIME 7175 SYSCALL_DEFINE2(sched_rr_get_interval_time32, pid_t, pid, 7176 struct old_timespec32 __user *, interval) 7177 { 7178 struct timespec64 t; 7179 int retval = sched_rr_get_interval(pid, &t); 7180 7181 if (retval == 0) 7182 retval = put_old_timespec32(&t, interval); 7183 return retval; 7184 } 7185 #endif 7186 7187 void sched_show_task(struct task_struct *p) 7188 { 7189 unsigned long free = 0; 7190 int ppid; 7191 7192 if (!try_get_task_stack(p)) 7193 return; 7194 7195 pr_info("task:%-15.15s state:%c", p->comm, task_state_to_char(p)); 7196 7197 if (p->state == TASK_RUNNING) 7198 pr_cont(" running task "); 7199 #ifdef CONFIG_DEBUG_STACK_USAGE 7200 free = stack_not_used(p); 7201 #endif 7202 ppid = 0; 7203 rcu_read_lock(); 7204 if (pid_alive(p)) 7205 ppid = task_pid_nr(rcu_dereference(p->real_parent)); 7206 rcu_read_unlock(); 7207 pr_cont(" stack:%5lu pid:%5d ppid:%6d flags:0x%08lx\n", 7208 free, task_pid_nr(p), ppid, 7209 (unsigned long)task_thread_info(p)->flags); 7210 7211 print_worker_info(KERN_INFO, p); 7212 print_stop_info(KERN_INFO, p); 7213 show_stack(p, NULL, KERN_INFO); 7214 put_task_stack(p); 7215 } 7216 EXPORT_SYMBOL_GPL(sched_show_task); 7217 7218 static inline bool 7219 state_filter_match(unsigned long state_filter, struct task_struct *p) 7220 { 7221 /* no filter, everything matches */ 7222 if (!state_filter) 7223 return true; 7224 7225 /* filter, but doesn't match */ 7226 if (!(p->state & state_filter)) 7227 return false; 7228 7229 /* 7230 * When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows 7231 * TASK_KILLABLE). 7232 */ 7233 if (state_filter == TASK_UNINTERRUPTIBLE && p->state == TASK_IDLE) 7234 return false; 7235 7236 return true; 7237 } 7238 7239 7240 void show_state_filter(unsigned long state_filter) 7241 { 7242 struct task_struct *g, *p; 7243 7244 rcu_read_lock(); 7245 for_each_process_thread(g, p) { 7246 /* 7247 * reset the NMI-timeout, listing all files on a slow 7248 * console might take a lot of time: 7249 * Also, reset softlockup watchdogs on all CPUs, because 7250 * another CPU might be blocked waiting for us to process 7251 * an IPI. 7252 */ 7253 touch_nmi_watchdog(); 7254 touch_all_softlockup_watchdogs(); 7255 if (state_filter_match(state_filter, p)) 7256 sched_show_task(p); 7257 } 7258 7259 #ifdef CONFIG_SCHED_DEBUG 7260 if (!state_filter) 7261 sysrq_sched_debug_show(); 7262 #endif 7263 rcu_read_unlock(); 7264 /* 7265 * Only show locks if all tasks are dumped: 7266 */ 7267 if (!state_filter) 7268 debug_show_all_locks(); 7269 } 7270 7271 /** 7272 * init_idle - set up an idle thread for a given CPU 7273 * @idle: task in question 7274 * @cpu: CPU the idle task belongs to 7275 * 7276 * NOTE: this function does not set the idle thread's NEED_RESCHED 7277 * flag, to make booting more robust. 7278 */ 7279 void init_idle(struct task_struct *idle, int cpu) 7280 { 7281 struct rq *rq = cpu_rq(cpu); 7282 unsigned long flags; 7283 7284 __sched_fork(0, idle); 7285 7286 raw_spin_lock_irqsave(&idle->pi_lock, flags); 7287 raw_spin_lock(&rq->lock); 7288 7289 idle->state = TASK_RUNNING; 7290 idle->se.exec_start = sched_clock(); 7291 idle->flags |= PF_IDLE; 7292 7293 scs_task_reset(idle); 7294 kasan_unpoison_task_stack(idle); 7295 7296 #ifdef CONFIG_SMP 7297 /* 7298 * It's possible that init_idle() gets called multiple times on a task, 7299 * in that case do_set_cpus_allowed() will not do the right thing. 7300 * 7301 * And since this is boot we can forgo the serialization. 7302 */ 7303 set_cpus_allowed_common(idle, cpumask_of(cpu), 0); 7304 #endif 7305 /* 7306 * We're having a chicken and egg problem, even though we are 7307 * holding rq->lock, the CPU isn't yet set to this CPU so the 7308 * lockdep check in task_group() will fail. 7309 * 7310 * Similar case to sched_fork(). / Alternatively we could 7311 * use task_rq_lock() here and obtain the other rq->lock. 7312 * 7313 * Silence PROVE_RCU 7314 */ 7315 rcu_read_lock(); 7316 __set_task_cpu(idle, cpu); 7317 rcu_read_unlock(); 7318 7319 rq->idle = idle; 7320 rcu_assign_pointer(rq->curr, idle); 7321 idle->on_rq = TASK_ON_RQ_QUEUED; 7322 #ifdef CONFIG_SMP 7323 idle->on_cpu = 1; 7324 #endif 7325 raw_spin_unlock(&rq->lock); 7326 raw_spin_unlock_irqrestore(&idle->pi_lock, flags); 7327 7328 /* Set the preempt count _outside_ the spinlocks! */ 7329 init_idle_preempt_count(idle, cpu); 7330 7331 /* 7332 * The idle tasks have their own, simple scheduling class: 7333 */ 7334 idle->sched_class = &idle_sched_class; 7335 ftrace_graph_init_idle_task(idle, cpu); 7336 vtime_init_idle(idle, cpu); 7337 #ifdef CONFIG_SMP 7338 sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu); 7339 #endif 7340 } 7341 7342 #ifdef CONFIG_SMP 7343 7344 int cpuset_cpumask_can_shrink(const struct cpumask *cur, 7345 const struct cpumask *trial) 7346 { 7347 int ret = 1; 7348 7349 if (!cpumask_weight(cur)) 7350 return ret; 7351 7352 ret = dl_cpuset_cpumask_can_shrink(cur, trial); 7353 7354 return ret; 7355 } 7356 7357 int task_can_attach(struct task_struct *p, 7358 const struct cpumask *cs_cpus_allowed) 7359 { 7360 int ret = 0; 7361 7362 /* 7363 * Kthreads which disallow setaffinity shouldn't be moved 7364 * to a new cpuset; we don't want to change their CPU 7365 * affinity and isolating such threads by their set of 7366 * allowed nodes is unnecessary. Thus, cpusets are not 7367 * applicable for such threads. This prevents checking for 7368 * success of set_cpus_allowed_ptr() on all attached tasks 7369 * before cpus_mask may be changed. 7370 */ 7371 if (p->flags & PF_NO_SETAFFINITY) { 7372 ret = -EINVAL; 7373 goto out; 7374 } 7375 7376 if (dl_task(p) && !cpumask_intersects(task_rq(p)->rd->span, 7377 cs_cpus_allowed)) 7378 ret = dl_task_can_attach(p, cs_cpus_allowed); 7379 7380 out: 7381 return ret; 7382 } 7383 7384 bool sched_smp_initialized __read_mostly; 7385 7386 #ifdef CONFIG_NUMA_BALANCING 7387 /* Migrate current task p to target_cpu */ 7388 int migrate_task_to(struct task_struct *p, int target_cpu) 7389 { 7390 struct migration_arg arg = { p, target_cpu }; 7391 int curr_cpu = task_cpu(p); 7392 7393 if (curr_cpu == target_cpu) 7394 return 0; 7395 7396 if (!cpumask_test_cpu(target_cpu, p->cpus_ptr)) 7397 return -EINVAL; 7398 7399 /* TODO: This is not properly updating schedstats */ 7400 7401 trace_sched_move_numa(p, curr_cpu, target_cpu); 7402 return stop_one_cpu(curr_cpu, migration_cpu_stop, &arg); 7403 } 7404 7405 /* 7406 * Requeue a task on a given node and accurately track the number of NUMA 7407 * tasks on the runqueues 7408 */ 7409 void sched_setnuma(struct task_struct *p, int nid) 7410 { 7411 bool queued, running; 7412 struct rq_flags rf; 7413 struct rq *rq; 7414 7415 rq = task_rq_lock(p, &rf); 7416 queued = task_on_rq_queued(p); 7417 running = task_current(rq, p); 7418 7419 if (queued) 7420 dequeue_task(rq, p, DEQUEUE_SAVE); 7421 if (running) 7422 put_prev_task(rq, p); 7423 7424 p->numa_preferred_nid = nid; 7425 7426 if (queued) 7427 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); 7428 if (running) 7429 set_next_task(rq, p); 7430 task_rq_unlock(rq, p, &rf); 7431 } 7432 #endif /* CONFIG_NUMA_BALANCING */ 7433 7434 #ifdef CONFIG_HOTPLUG_CPU 7435 /* 7436 * Ensure that the idle task is using init_mm right before its CPU goes 7437 * offline. 7438 */ 7439 void idle_task_exit(void) 7440 { 7441 struct mm_struct *mm = current->active_mm; 7442 7443 BUG_ON(cpu_online(smp_processor_id())); 7444 BUG_ON(current != this_rq()->idle); 7445 7446 if (mm != &init_mm) { 7447 switch_mm(mm, &init_mm, current); 7448 finish_arch_post_lock_switch(); 7449 } 7450 7451 /* finish_cpu(), as ran on the BP, will clean up the active_mm state */ 7452 } 7453 7454 static int __balance_push_cpu_stop(void *arg) 7455 { 7456 struct task_struct *p = arg; 7457 struct rq *rq = this_rq(); 7458 struct rq_flags rf; 7459 int cpu; 7460 7461 raw_spin_lock_irq(&p->pi_lock); 7462 rq_lock(rq, &rf); 7463 7464 update_rq_clock(rq); 7465 7466 if (task_rq(p) == rq && task_on_rq_queued(p)) { 7467 cpu = select_fallback_rq(rq->cpu, p); 7468 rq = __migrate_task(rq, &rf, p, cpu); 7469 } 7470 7471 rq_unlock(rq, &rf); 7472 raw_spin_unlock_irq(&p->pi_lock); 7473 7474 put_task_struct(p); 7475 7476 return 0; 7477 } 7478 7479 static DEFINE_PER_CPU(struct cpu_stop_work, push_work); 7480 7481 /* 7482 * Ensure we only run per-cpu kthreads once the CPU goes !active. 7483 */ 7484 static void balance_push(struct rq *rq) 7485 { 7486 struct task_struct *push_task = rq->curr; 7487 7488 lockdep_assert_held(&rq->lock); 7489 SCHED_WARN_ON(rq->cpu != smp_processor_id()); 7490 /* 7491 * Ensure the thing is persistent until balance_push_set(.on = false); 7492 */ 7493 rq->balance_callback = &balance_push_callback; 7494 7495 /* 7496 * Both the cpu-hotplug and stop task are in this case and are 7497 * required to complete the hotplug process. 7498 * 7499 * XXX: the idle task does not match kthread_is_per_cpu() due to 7500 * histerical raisins. 7501 */ 7502 if (rq->idle == push_task || 7503 ((push_task->flags & PF_KTHREAD) && kthread_is_per_cpu(push_task)) || 7504 is_migration_disabled(push_task)) { 7505 7506 /* 7507 * If this is the idle task on the outgoing CPU try to wake 7508 * up the hotplug control thread which might wait for the 7509 * last task to vanish. The rcuwait_active() check is 7510 * accurate here because the waiter is pinned on this CPU 7511 * and can't obviously be running in parallel. 7512 * 7513 * On RT kernels this also has to check whether there are 7514 * pinned and scheduled out tasks on the runqueue. They 7515 * need to leave the migrate disabled section first. 7516 */ 7517 if (!rq->nr_running && !rq_has_pinned_tasks(rq) && 7518 rcuwait_active(&rq->hotplug_wait)) { 7519 raw_spin_unlock(&rq->lock); 7520 rcuwait_wake_up(&rq->hotplug_wait); 7521 raw_spin_lock(&rq->lock); 7522 } 7523 return; 7524 } 7525 7526 get_task_struct(push_task); 7527 /* 7528 * Temporarily drop rq->lock such that we can wake-up the stop task. 7529 * Both preemption and IRQs are still disabled. 7530 */ 7531 raw_spin_unlock(&rq->lock); 7532 stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task, 7533 this_cpu_ptr(&push_work)); 7534 /* 7535 * At this point need_resched() is true and we'll take the loop in 7536 * schedule(). The next pick is obviously going to be the stop task 7537 * which kthread_is_per_cpu() and will push this task away. 7538 */ 7539 raw_spin_lock(&rq->lock); 7540 } 7541 7542 static void balance_push_set(int cpu, bool on) 7543 { 7544 struct rq *rq = cpu_rq(cpu); 7545 struct rq_flags rf; 7546 7547 rq_lock_irqsave(rq, &rf); 7548 rq->balance_push = on; 7549 if (on) { 7550 WARN_ON_ONCE(rq->balance_callback); 7551 rq->balance_callback = &balance_push_callback; 7552 } else if (rq->balance_callback == &balance_push_callback) { 7553 rq->balance_callback = NULL; 7554 } 7555 rq_unlock_irqrestore(rq, &rf); 7556 } 7557 7558 /* 7559 * Invoked from a CPUs hotplug control thread after the CPU has been marked 7560 * inactive. All tasks which are not per CPU kernel threads are either 7561 * pushed off this CPU now via balance_push() or placed on a different CPU 7562 * during wakeup. Wait until the CPU is quiescent. 7563 */ 7564 static void balance_hotplug_wait(void) 7565 { 7566 struct rq *rq = this_rq(); 7567 7568 rcuwait_wait_event(&rq->hotplug_wait, 7569 rq->nr_running == 1 && !rq_has_pinned_tasks(rq), 7570 TASK_UNINTERRUPTIBLE); 7571 } 7572 7573 #else 7574 7575 static inline void balance_push(struct rq *rq) 7576 { 7577 } 7578 7579 static inline void balance_push_set(int cpu, bool on) 7580 { 7581 } 7582 7583 static inline void balance_hotplug_wait(void) 7584 { 7585 } 7586 7587 #endif /* CONFIG_HOTPLUG_CPU */ 7588 7589 void set_rq_online(struct rq *rq) 7590 { 7591 if (!rq->online) { 7592 const struct sched_class *class; 7593 7594 cpumask_set_cpu(rq->cpu, rq->rd->online); 7595 rq->online = 1; 7596 7597 for_each_class(class) { 7598 if (class->rq_online) 7599 class->rq_online(rq); 7600 } 7601 } 7602 } 7603 7604 void set_rq_offline(struct rq *rq) 7605 { 7606 if (rq->online) { 7607 const struct sched_class *class; 7608 7609 for_each_class(class) { 7610 if (class->rq_offline) 7611 class->rq_offline(rq); 7612 } 7613 7614 cpumask_clear_cpu(rq->cpu, rq->rd->online); 7615 rq->online = 0; 7616 } 7617 } 7618 7619 /* 7620 * used to mark begin/end of suspend/resume: 7621 */ 7622 static int num_cpus_frozen; 7623 7624 /* 7625 * Update cpusets according to cpu_active mask. If cpusets are 7626 * disabled, cpuset_update_active_cpus() becomes a simple wrapper 7627 * around partition_sched_domains(). 7628 * 7629 * If we come here as part of a suspend/resume, don't touch cpusets because we 7630 * want to restore it back to its original state upon resume anyway. 7631 */ 7632 static void cpuset_cpu_active(void) 7633 { 7634 if (cpuhp_tasks_frozen) { 7635 /* 7636 * num_cpus_frozen tracks how many CPUs are involved in suspend 7637 * resume sequence. As long as this is not the last online 7638 * operation in the resume sequence, just build a single sched 7639 * domain, ignoring cpusets. 7640 */ 7641 partition_sched_domains(1, NULL, NULL); 7642 if (--num_cpus_frozen) 7643 return; 7644 /* 7645 * This is the last CPU online operation. So fall through and 7646 * restore the original sched domains by considering the 7647 * cpuset configurations. 7648 */ 7649 cpuset_force_rebuild(); 7650 } 7651 cpuset_update_active_cpus(); 7652 } 7653 7654 static int cpuset_cpu_inactive(unsigned int cpu) 7655 { 7656 if (!cpuhp_tasks_frozen) { 7657 if (dl_cpu_busy(cpu)) 7658 return -EBUSY; 7659 cpuset_update_active_cpus(); 7660 } else { 7661 num_cpus_frozen++; 7662 partition_sched_domains(1, NULL, NULL); 7663 } 7664 return 0; 7665 } 7666 7667 int sched_cpu_activate(unsigned int cpu) 7668 { 7669 struct rq *rq = cpu_rq(cpu); 7670 struct rq_flags rf; 7671 7672 /* 7673 * Make sure that when the hotplug state machine does a roll-back 7674 * we clear balance_push. Ideally that would happen earlier... 7675 */ 7676 balance_push_set(cpu, false); 7677 7678 #ifdef CONFIG_SCHED_SMT 7679 /* 7680 * When going up, increment the number of cores with SMT present. 7681 */ 7682 if (cpumask_weight(cpu_smt_mask(cpu)) == 2) 7683 static_branch_inc_cpuslocked(&sched_smt_present); 7684 #endif 7685 set_cpu_active(cpu, true); 7686 7687 if (sched_smp_initialized) { 7688 sched_domains_numa_masks_set(cpu); 7689 cpuset_cpu_active(); 7690 } 7691 7692 /* 7693 * Put the rq online, if not already. This happens: 7694 * 7695 * 1) In the early boot process, because we build the real domains 7696 * after all CPUs have been brought up. 7697 * 7698 * 2) At runtime, if cpuset_cpu_active() fails to rebuild the 7699 * domains. 7700 */ 7701 rq_lock_irqsave(rq, &rf); 7702 if (rq->rd) { 7703 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 7704 set_rq_online(rq); 7705 } 7706 rq_unlock_irqrestore(rq, &rf); 7707 7708 return 0; 7709 } 7710 7711 int sched_cpu_deactivate(unsigned int cpu) 7712 { 7713 struct rq *rq = cpu_rq(cpu); 7714 struct rq_flags rf; 7715 int ret; 7716 7717 /* 7718 * Remove CPU from nohz.idle_cpus_mask to prevent participating in 7719 * load balancing when not active 7720 */ 7721 nohz_balance_exit_idle(rq); 7722 7723 set_cpu_active(cpu, false); 7724 7725 /* 7726 * From this point forward, this CPU will refuse to run any task that 7727 * is not: migrate_disable() or KTHREAD_IS_PER_CPU, and will actively 7728 * push those tasks away until this gets cleared, see 7729 * sched_cpu_dying(). 7730 */ 7731 balance_push_set(cpu, true); 7732 7733 /* 7734 * We've cleared cpu_active_mask / set balance_push, wait for all 7735 * preempt-disabled and RCU users of this state to go away such that 7736 * all new such users will observe it. 7737 * 7738 * Specifically, we rely on ttwu to no longer target this CPU, see 7739 * ttwu_queue_cond() and is_cpu_allowed(). 7740 * 7741 * Do sync before park smpboot threads to take care the rcu boost case. 7742 */ 7743 synchronize_rcu(); 7744 7745 rq_lock_irqsave(rq, &rf); 7746 if (rq->rd) { 7747 update_rq_clock(rq); 7748 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 7749 set_rq_offline(rq); 7750 } 7751 rq_unlock_irqrestore(rq, &rf); 7752 7753 #ifdef CONFIG_SCHED_SMT 7754 /* 7755 * When going down, decrement the number of cores with SMT present. 7756 */ 7757 if (cpumask_weight(cpu_smt_mask(cpu)) == 2) 7758 static_branch_dec_cpuslocked(&sched_smt_present); 7759 #endif 7760 7761 if (!sched_smp_initialized) 7762 return 0; 7763 7764 ret = cpuset_cpu_inactive(cpu); 7765 if (ret) { 7766 balance_push_set(cpu, false); 7767 set_cpu_active(cpu, true); 7768 return ret; 7769 } 7770 sched_domains_numa_masks_clear(cpu); 7771 return 0; 7772 } 7773 7774 static void sched_rq_cpu_starting(unsigned int cpu) 7775 { 7776 struct rq *rq = cpu_rq(cpu); 7777 7778 rq->calc_load_update = calc_load_update; 7779 update_max_interval(); 7780 } 7781 7782 int sched_cpu_starting(unsigned int cpu) 7783 { 7784 sched_rq_cpu_starting(cpu); 7785 sched_tick_start(cpu); 7786 return 0; 7787 } 7788 7789 #ifdef CONFIG_HOTPLUG_CPU 7790 7791 /* 7792 * Invoked immediately before the stopper thread is invoked to bring the 7793 * CPU down completely. At this point all per CPU kthreads except the 7794 * hotplug thread (current) and the stopper thread (inactive) have been 7795 * either parked or have been unbound from the outgoing CPU. Ensure that 7796 * any of those which might be on the way out are gone. 7797 * 7798 * If after this point a bound task is being woken on this CPU then the 7799 * responsible hotplug callback has failed to do it's job. 7800 * sched_cpu_dying() will catch it with the appropriate fireworks. 7801 */ 7802 int sched_cpu_wait_empty(unsigned int cpu) 7803 { 7804 balance_hotplug_wait(); 7805 return 0; 7806 } 7807 7808 /* 7809 * Since this CPU is going 'away' for a while, fold any nr_active delta we 7810 * might have. Called from the CPU stopper task after ensuring that the 7811 * stopper is the last running task on the CPU, so nr_active count is 7812 * stable. We need to take the teardown thread which is calling this into 7813 * account, so we hand in adjust = 1 to the load calculation. 7814 * 7815 * Also see the comment "Global load-average calculations". 7816 */ 7817 static void calc_load_migrate(struct rq *rq) 7818 { 7819 long delta = calc_load_fold_active(rq, 1); 7820 7821 if (delta) 7822 atomic_long_add(delta, &calc_load_tasks); 7823 } 7824 7825 static void dump_rq_tasks(struct rq *rq, const char *loglvl) 7826 { 7827 struct task_struct *g, *p; 7828 int cpu = cpu_of(rq); 7829 7830 lockdep_assert_held(&rq->lock); 7831 7832 printk("%sCPU%d enqueued tasks (%u total):\n", loglvl, cpu, rq->nr_running); 7833 for_each_process_thread(g, p) { 7834 if (task_cpu(p) != cpu) 7835 continue; 7836 7837 if (!task_on_rq_queued(p)) 7838 continue; 7839 7840 printk("%s\tpid: %d, name: %s\n", loglvl, p->pid, p->comm); 7841 } 7842 } 7843 7844 int sched_cpu_dying(unsigned int cpu) 7845 { 7846 struct rq *rq = cpu_rq(cpu); 7847 struct rq_flags rf; 7848 7849 /* Handle pending wakeups and then migrate everything off */ 7850 sched_tick_stop(cpu); 7851 7852 rq_lock_irqsave(rq, &rf); 7853 if (rq->nr_running != 1 || rq_has_pinned_tasks(rq)) { 7854 WARN(true, "Dying CPU not properly vacated!"); 7855 dump_rq_tasks(rq, KERN_WARNING); 7856 } 7857 rq_unlock_irqrestore(rq, &rf); 7858 7859 /* 7860 * Now that the CPU is offline, make sure we're welcome 7861 * to new tasks once we come back up. 7862 */ 7863 balance_push_set(cpu, false); 7864 7865 calc_load_migrate(rq); 7866 update_max_interval(); 7867 hrtick_clear(rq); 7868 return 0; 7869 } 7870 #endif 7871 7872 void __init sched_init_smp(void) 7873 { 7874 sched_init_numa(); 7875 7876 /* 7877 * There's no userspace yet to cause hotplug operations; hence all the 7878 * CPU masks are stable and all blatant races in the below code cannot 7879 * happen. 7880 */ 7881 mutex_lock(&sched_domains_mutex); 7882 sched_init_domains(cpu_active_mask); 7883 mutex_unlock(&sched_domains_mutex); 7884 7885 /* Move init over to a non-isolated CPU */ 7886 if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_FLAG_DOMAIN)) < 0) 7887 BUG(); 7888 sched_init_granularity(); 7889 7890 init_sched_rt_class(); 7891 init_sched_dl_class(); 7892 7893 sched_smp_initialized = true; 7894 } 7895 7896 static int __init migration_init(void) 7897 { 7898 sched_cpu_starting(smp_processor_id()); 7899 return 0; 7900 } 7901 early_initcall(migration_init); 7902 7903 #else 7904 void __init sched_init_smp(void) 7905 { 7906 sched_init_granularity(); 7907 } 7908 #endif /* CONFIG_SMP */ 7909 7910 int in_sched_functions(unsigned long addr) 7911 { 7912 return in_lock_functions(addr) || 7913 (addr >= (unsigned long)__sched_text_start 7914 && addr < (unsigned long)__sched_text_end); 7915 } 7916 7917 #ifdef CONFIG_CGROUP_SCHED 7918 /* 7919 * Default task group. 7920 * Every task in system belongs to this group at bootup. 7921 */ 7922 struct task_group root_task_group; 7923 LIST_HEAD(task_groups); 7924 7925 /* Cacheline aligned slab cache for task_group */ 7926 static struct kmem_cache *task_group_cache __read_mostly; 7927 #endif 7928 7929 DECLARE_PER_CPU(cpumask_var_t, load_balance_mask); 7930 DECLARE_PER_CPU(cpumask_var_t, select_idle_mask); 7931 7932 void __init sched_init(void) 7933 { 7934 unsigned long ptr = 0; 7935 int i; 7936 7937 /* Make sure the linker didn't screw up */ 7938 BUG_ON(&idle_sched_class + 1 != &fair_sched_class || 7939 &fair_sched_class + 1 != &rt_sched_class || 7940 &rt_sched_class + 1 != &dl_sched_class); 7941 #ifdef CONFIG_SMP 7942 BUG_ON(&dl_sched_class + 1 != &stop_sched_class); 7943 #endif 7944 7945 wait_bit_init(); 7946 7947 #ifdef CONFIG_FAIR_GROUP_SCHED 7948 ptr += 2 * nr_cpu_ids * sizeof(void **); 7949 #endif 7950 #ifdef CONFIG_RT_GROUP_SCHED 7951 ptr += 2 * nr_cpu_ids * sizeof(void **); 7952 #endif 7953 if (ptr) { 7954 ptr = (unsigned long)kzalloc(ptr, GFP_NOWAIT); 7955 7956 #ifdef CONFIG_FAIR_GROUP_SCHED 7957 root_task_group.se = (struct sched_entity **)ptr; 7958 ptr += nr_cpu_ids * sizeof(void **); 7959 7960 root_task_group.cfs_rq = (struct cfs_rq **)ptr; 7961 ptr += nr_cpu_ids * sizeof(void **); 7962 7963 root_task_group.shares = ROOT_TASK_GROUP_LOAD; 7964 init_cfs_bandwidth(&root_task_group.cfs_bandwidth); 7965 #endif /* CONFIG_FAIR_GROUP_SCHED */ 7966 #ifdef CONFIG_RT_GROUP_SCHED 7967 root_task_group.rt_se = (struct sched_rt_entity **)ptr; 7968 ptr += nr_cpu_ids * sizeof(void **); 7969 7970 root_task_group.rt_rq = (struct rt_rq **)ptr; 7971 ptr += nr_cpu_ids * sizeof(void **); 7972 7973 #endif /* CONFIG_RT_GROUP_SCHED */ 7974 } 7975 #ifdef CONFIG_CPUMASK_OFFSTACK 7976 for_each_possible_cpu(i) { 7977 per_cpu(load_balance_mask, i) = (cpumask_var_t)kzalloc_node( 7978 cpumask_size(), GFP_KERNEL, cpu_to_node(i)); 7979 per_cpu(select_idle_mask, i) = (cpumask_var_t)kzalloc_node( 7980 cpumask_size(), GFP_KERNEL, cpu_to_node(i)); 7981 } 7982 #endif /* CONFIG_CPUMASK_OFFSTACK */ 7983 7984 init_rt_bandwidth(&def_rt_bandwidth, global_rt_period(), global_rt_runtime()); 7985 init_dl_bandwidth(&def_dl_bandwidth, global_rt_period(), global_rt_runtime()); 7986 7987 #ifdef CONFIG_SMP 7988 init_defrootdomain(); 7989 #endif 7990 7991 #ifdef CONFIG_RT_GROUP_SCHED 7992 init_rt_bandwidth(&root_task_group.rt_bandwidth, 7993 global_rt_period(), global_rt_runtime()); 7994 #endif /* CONFIG_RT_GROUP_SCHED */ 7995 7996 #ifdef CONFIG_CGROUP_SCHED 7997 task_group_cache = KMEM_CACHE(task_group, 0); 7998 7999 list_add(&root_task_group.list, &task_groups); 8000 INIT_LIST_HEAD(&root_task_group.children); 8001 INIT_LIST_HEAD(&root_task_group.siblings); 8002 autogroup_init(&init_task); 8003 #endif /* CONFIG_CGROUP_SCHED */ 8004 8005 for_each_possible_cpu(i) { 8006 struct rq *rq; 8007 8008 rq = cpu_rq(i); 8009 raw_spin_lock_init(&rq->lock); 8010 rq->nr_running = 0; 8011 rq->calc_load_active = 0; 8012 rq->calc_load_update = jiffies + LOAD_FREQ; 8013 init_cfs_rq(&rq->cfs); 8014 init_rt_rq(&rq->rt); 8015 init_dl_rq(&rq->dl); 8016 #ifdef CONFIG_FAIR_GROUP_SCHED 8017 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list); 8018 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list; 8019 /* 8020 * How much CPU bandwidth does root_task_group get? 8021 * 8022 * In case of task-groups formed thr' the cgroup filesystem, it 8023 * gets 100% of the CPU resources in the system. This overall 8024 * system CPU resource is divided among the tasks of 8025 * root_task_group and its child task-groups in a fair manner, 8026 * based on each entity's (task or task-group's) weight 8027 * (se->load.weight). 8028 * 8029 * In other words, if root_task_group has 10 tasks of weight 8030 * 1024) and two child groups A0 and A1 (of weight 1024 each), 8031 * then A0's share of the CPU resource is: 8032 * 8033 * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33% 8034 * 8035 * We achieve this by letting root_task_group's tasks sit 8036 * directly in rq->cfs (i.e root_task_group->se[] = NULL). 8037 */ 8038 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL); 8039 #endif /* CONFIG_FAIR_GROUP_SCHED */ 8040 8041 rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime; 8042 #ifdef CONFIG_RT_GROUP_SCHED 8043 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL); 8044 #endif 8045 #ifdef CONFIG_SMP 8046 rq->sd = NULL; 8047 rq->rd = NULL; 8048 rq->cpu_capacity = rq->cpu_capacity_orig = SCHED_CAPACITY_SCALE; 8049 rq->balance_callback = NULL; 8050 rq->active_balance = 0; 8051 rq->next_balance = jiffies; 8052 rq->push_cpu = 0; 8053 rq->cpu = i; 8054 rq->online = 0; 8055 rq->idle_stamp = 0; 8056 rq->avg_idle = 2*sysctl_sched_migration_cost; 8057 rq->max_idle_balance_cost = sysctl_sched_migration_cost; 8058 8059 INIT_LIST_HEAD(&rq->cfs_tasks); 8060 8061 rq_attach_root(rq, &def_root_domain); 8062 #ifdef CONFIG_NO_HZ_COMMON 8063 rq->last_blocked_load_update_tick = jiffies; 8064 atomic_set(&rq->nohz_flags, 0); 8065 8066 INIT_CSD(&rq->nohz_csd, nohz_csd_func, rq); 8067 #endif 8068 #ifdef CONFIG_HOTPLUG_CPU 8069 rcuwait_init(&rq->hotplug_wait); 8070 #endif 8071 #endif /* CONFIG_SMP */ 8072 hrtick_rq_init(rq); 8073 atomic_set(&rq->nr_iowait, 0); 8074 } 8075 8076 set_load_weight(&init_task, false); 8077 8078 /* 8079 * The boot idle thread does lazy MMU switching as well: 8080 */ 8081 mmgrab(&init_mm); 8082 enter_lazy_tlb(&init_mm, current); 8083 8084 /* 8085 * Make us the idle thread. Technically, schedule() should not be 8086 * called from this thread, however somewhere below it might be, 8087 * but because we are the idle thread, we just pick up running again 8088 * when this runqueue becomes "idle". 8089 */ 8090 init_idle(current, smp_processor_id()); 8091 8092 calc_load_update = jiffies + LOAD_FREQ; 8093 8094 #ifdef CONFIG_SMP 8095 idle_thread_set_boot_cpu(); 8096 #endif 8097 init_sched_fair_class(); 8098 8099 init_schedstats(); 8100 8101 psi_init(); 8102 8103 init_uclamp(); 8104 8105 scheduler_running = 1; 8106 } 8107 8108 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP 8109 static inline int preempt_count_equals(int preempt_offset) 8110 { 8111 int nested = preempt_count() + rcu_preempt_depth(); 8112 8113 return (nested == preempt_offset); 8114 } 8115 8116 void __might_sleep(const char *file, int line, int preempt_offset) 8117 { 8118 /* 8119 * Blocking primitives will set (and therefore destroy) current->state, 8120 * since we will exit with TASK_RUNNING make sure we enter with it, 8121 * otherwise we will destroy state. 8122 */ 8123 WARN_ONCE(current->state != TASK_RUNNING && current->task_state_change, 8124 "do not call blocking ops when !TASK_RUNNING; " 8125 "state=%lx set at [<%p>] %pS\n", 8126 current->state, 8127 (void *)current->task_state_change, 8128 (void *)current->task_state_change); 8129 8130 ___might_sleep(file, line, preempt_offset); 8131 } 8132 EXPORT_SYMBOL(__might_sleep); 8133 8134 void ___might_sleep(const char *file, int line, int preempt_offset) 8135 { 8136 /* Ratelimiting timestamp: */ 8137 static unsigned long prev_jiffy; 8138 8139 unsigned long preempt_disable_ip; 8140 8141 /* WARN_ON_ONCE() by default, no rate limit required: */ 8142 rcu_sleep_check(); 8143 8144 if ((preempt_count_equals(preempt_offset) && !irqs_disabled() && 8145 !is_idle_task(current) && !current->non_block_count) || 8146 system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING || 8147 oops_in_progress) 8148 return; 8149 8150 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) 8151 return; 8152 prev_jiffy = jiffies; 8153 8154 /* Save this before calling printk(), since that will clobber it: */ 8155 preempt_disable_ip = get_preempt_disable_ip(current); 8156 8157 printk(KERN_ERR 8158 "BUG: sleeping function called from invalid context at %s:%d\n", 8159 file, line); 8160 printk(KERN_ERR 8161 "in_atomic(): %d, irqs_disabled(): %d, non_block: %d, pid: %d, name: %s\n", 8162 in_atomic(), irqs_disabled(), current->non_block_count, 8163 current->pid, current->comm); 8164 8165 if (task_stack_end_corrupted(current)) 8166 printk(KERN_EMERG "Thread overran stack, or stack corrupted\n"); 8167 8168 debug_show_held_locks(current); 8169 if (irqs_disabled()) 8170 print_irqtrace_events(current); 8171 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT) 8172 && !preempt_count_equals(preempt_offset)) { 8173 pr_err("Preemption disabled at:"); 8174 print_ip_sym(KERN_ERR, preempt_disable_ip); 8175 } 8176 dump_stack(); 8177 add_taint(TAINT_WARN, LOCKDEP_STILL_OK); 8178 } 8179 EXPORT_SYMBOL(___might_sleep); 8180 8181 void __cant_sleep(const char *file, int line, int preempt_offset) 8182 { 8183 static unsigned long prev_jiffy; 8184 8185 if (irqs_disabled()) 8186 return; 8187 8188 if (!IS_ENABLED(CONFIG_PREEMPT_COUNT)) 8189 return; 8190 8191 if (preempt_count() > preempt_offset) 8192 return; 8193 8194 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) 8195 return; 8196 prev_jiffy = jiffies; 8197 8198 printk(KERN_ERR "BUG: assuming atomic context at %s:%d\n", file, line); 8199 printk(KERN_ERR "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n", 8200 in_atomic(), irqs_disabled(), 8201 current->pid, current->comm); 8202 8203 debug_show_held_locks(current); 8204 dump_stack(); 8205 add_taint(TAINT_WARN, LOCKDEP_STILL_OK); 8206 } 8207 EXPORT_SYMBOL_GPL(__cant_sleep); 8208 8209 #ifdef CONFIG_SMP 8210 void __cant_migrate(const char *file, int line) 8211 { 8212 static unsigned long prev_jiffy; 8213 8214 if (irqs_disabled()) 8215 return; 8216 8217 if (is_migration_disabled(current)) 8218 return; 8219 8220 if (!IS_ENABLED(CONFIG_PREEMPT_COUNT)) 8221 return; 8222 8223 if (preempt_count() > 0) 8224 return; 8225 8226 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) 8227 return; 8228 prev_jiffy = jiffies; 8229 8230 pr_err("BUG: assuming non migratable context at %s:%d\n", file, line); 8231 pr_err("in_atomic(): %d, irqs_disabled(): %d, migration_disabled() %u pid: %d, name: %s\n", 8232 in_atomic(), irqs_disabled(), is_migration_disabled(current), 8233 current->pid, current->comm); 8234 8235 debug_show_held_locks(current); 8236 dump_stack(); 8237 add_taint(TAINT_WARN, LOCKDEP_STILL_OK); 8238 } 8239 EXPORT_SYMBOL_GPL(__cant_migrate); 8240 #endif 8241 #endif 8242 8243 #ifdef CONFIG_MAGIC_SYSRQ 8244 void normalize_rt_tasks(void) 8245 { 8246 struct task_struct *g, *p; 8247 struct sched_attr attr = { 8248 .sched_policy = SCHED_NORMAL, 8249 }; 8250 8251 read_lock(&tasklist_lock); 8252 for_each_process_thread(g, p) { 8253 /* 8254 * Only normalize user tasks: 8255 */ 8256 if (p->flags & PF_KTHREAD) 8257 continue; 8258 8259 p->se.exec_start = 0; 8260 schedstat_set(p->se.statistics.wait_start, 0); 8261 schedstat_set(p->se.statistics.sleep_start, 0); 8262 schedstat_set(p->se.statistics.block_start, 0); 8263 8264 if (!dl_task(p) && !rt_task(p)) { 8265 /* 8266 * Renice negative nice level userspace 8267 * tasks back to 0: 8268 */ 8269 if (task_nice(p) < 0) 8270 set_user_nice(p, 0); 8271 continue; 8272 } 8273 8274 __sched_setscheduler(p, &attr, false, false); 8275 } 8276 read_unlock(&tasklist_lock); 8277 } 8278 8279 #endif /* CONFIG_MAGIC_SYSRQ */ 8280 8281 #if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) 8282 /* 8283 * These functions are only useful for the IA64 MCA handling, or kdb. 8284 * 8285 * They can only be called when the whole system has been 8286 * stopped - every CPU needs to be quiescent, and no scheduling 8287 * activity can take place. Using them for anything else would 8288 * be a serious bug, and as a result, they aren't even visible 8289 * under any other configuration. 8290 */ 8291 8292 /** 8293 * curr_task - return the current task for a given CPU. 8294 * @cpu: the processor in question. 8295 * 8296 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! 8297 * 8298 * Return: The current task for @cpu. 8299 */ 8300 struct task_struct *curr_task(int cpu) 8301 { 8302 return cpu_curr(cpu); 8303 } 8304 8305 #endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */ 8306 8307 #ifdef CONFIG_IA64 8308 /** 8309 * ia64_set_curr_task - set the current task for a given CPU. 8310 * @cpu: the processor in question. 8311 * @p: the task pointer to set. 8312 * 8313 * Description: This function must only be used when non-maskable interrupts 8314 * are serviced on a separate stack. It allows the architecture to switch the 8315 * notion of the current task on a CPU in a non-blocking manner. This function 8316 * must be called with all CPU's synchronized, and interrupts disabled, the 8317 * and caller must save the original value of the current task (see 8318 * curr_task() above) and restore that value before reenabling interrupts and 8319 * re-starting the system. 8320 * 8321 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! 8322 */ 8323 void ia64_set_curr_task(int cpu, struct task_struct *p) 8324 { 8325 cpu_curr(cpu) = p; 8326 } 8327 8328 #endif 8329 8330 #ifdef CONFIG_CGROUP_SCHED 8331 /* task_group_lock serializes the addition/removal of task groups */ 8332 static DEFINE_SPINLOCK(task_group_lock); 8333 8334 static inline void alloc_uclamp_sched_group(struct task_group *tg, 8335 struct task_group *parent) 8336 { 8337 #ifdef CONFIG_UCLAMP_TASK_GROUP 8338 enum uclamp_id clamp_id; 8339 8340 for_each_clamp_id(clamp_id) { 8341 uclamp_se_set(&tg->uclamp_req[clamp_id], 8342 uclamp_none(clamp_id), false); 8343 tg->uclamp[clamp_id] = parent->uclamp[clamp_id]; 8344 } 8345 #endif 8346 } 8347 8348 static void sched_free_group(struct task_group *tg) 8349 { 8350 free_fair_sched_group(tg); 8351 free_rt_sched_group(tg); 8352 autogroup_free(tg); 8353 kmem_cache_free(task_group_cache, tg); 8354 } 8355 8356 /* allocate runqueue etc for a new task group */ 8357 struct task_group *sched_create_group(struct task_group *parent) 8358 { 8359 struct task_group *tg; 8360 8361 tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO); 8362 if (!tg) 8363 return ERR_PTR(-ENOMEM); 8364 8365 if (!alloc_fair_sched_group(tg, parent)) 8366 goto err; 8367 8368 if (!alloc_rt_sched_group(tg, parent)) 8369 goto err; 8370 8371 alloc_uclamp_sched_group(tg, parent); 8372 8373 return tg; 8374 8375 err: 8376 sched_free_group(tg); 8377 return ERR_PTR(-ENOMEM); 8378 } 8379 8380 void sched_online_group(struct task_group *tg, struct task_group *parent) 8381 { 8382 unsigned long flags; 8383 8384 spin_lock_irqsave(&task_group_lock, flags); 8385 list_add_rcu(&tg->list, &task_groups); 8386 8387 /* Root should already exist: */ 8388 WARN_ON(!parent); 8389 8390 tg->parent = parent; 8391 INIT_LIST_HEAD(&tg->children); 8392 list_add_rcu(&tg->siblings, &parent->children); 8393 spin_unlock_irqrestore(&task_group_lock, flags); 8394 8395 online_fair_sched_group(tg); 8396 } 8397 8398 /* rcu callback to free various structures associated with a task group */ 8399 static void sched_free_group_rcu(struct rcu_head *rhp) 8400 { 8401 /* Now it should be safe to free those cfs_rqs: */ 8402 sched_free_group(container_of(rhp, struct task_group, rcu)); 8403 } 8404 8405 void sched_destroy_group(struct task_group *tg) 8406 { 8407 /* Wait for possible concurrent references to cfs_rqs complete: */ 8408 call_rcu(&tg->rcu, sched_free_group_rcu); 8409 } 8410 8411 void sched_offline_group(struct task_group *tg) 8412 { 8413 unsigned long flags; 8414 8415 /* End participation in shares distribution: */ 8416 unregister_fair_sched_group(tg); 8417 8418 spin_lock_irqsave(&task_group_lock, flags); 8419 list_del_rcu(&tg->list); 8420 list_del_rcu(&tg->siblings); 8421 spin_unlock_irqrestore(&task_group_lock, flags); 8422 } 8423 8424 static void sched_change_group(struct task_struct *tsk, int type) 8425 { 8426 struct task_group *tg; 8427 8428 /* 8429 * All callers are synchronized by task_rq_lock(); we do not use RCU 8430 * which is pointless here. Thus, we pass "true" to task_css_check() 8431 * to prevent lockdep warnings. 8432 */ 8433 tg = container_of(task_css_check(tsk, cpu_cgrp_id, true), 8434 struct task_group, css); 8435 tg = autogroup_task_group(tsk, tg); 8436 tsk->sched_task_group = tg; 8437 8438 #ifdef CONFIG_FAIR_GROUP_SCHED 8439 if (tsk->sched_class->task_change_group) 8440 tsk->sched_class->task_change_group(tsk, type); 8441 else 8442 #endif 8443 set_task_rq(tsk, task_cpu(tsk)); 8444 } 8445 8446 /* 8447 * Change task's runqueue when it moves between groups. 8448 * 8449 * The caller of this function should have put the task in its new group by 8450 * now. This function just updates tsk->se.cfs_rq and tsk->se.parent to reflect 8451 * its new group. 8452 */ 8453 void sched_move_task(struct task_struct *tsk) 8454 { 8455 int queued, running, queue_flags = 8456 DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; 8457 struct rq_flags rf; 8458 struct rq *rq; 8459 8460 rq = task_rq_lock(tsk, &rf); 8461 update_rq_clock(rq); 8462 8463 running = task_current(rq, tsk); 8464 queued = task_on_rq_queued(tsk); 8465 8466 if (queued) 8467 dequeue_task(rq, tsk, queue_flags); 8468 if (running) 8469 put_prev_task(rq, tsk); 8470 8471 sched_change_group(tsk, TASK_MOVE_GROUP); 8472 8473 if (queued) 8474 enqueue_task(rq, tsk, queue_flags); 8475 if (running) { 8476 set_next_task(rq, tsk); 8477 /* 8478 * After changing group, the running task may have joined a 8479 * throttled one but it's still the running task. Trigger a 8480 * resched to make sure that task can still run. 8481 */ 8482 resched_curr(rq); 8483 } 8484 8485 task_rq_unlock(rq, tsk, &rf); 8486 } 8487 8488 static inline struct task_group *css_tg(struct cgroup_subsys_state *css) 8489 { 8490 return css ? container_of(css, struct task_group, css) : NULL; 8491 } 8492 8493 static struct cgroup_subsys_state * 8494 cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) 8495 { 8496 struct task_group *parent = css_tg(parent_css); 8497 struct task_group *tg; 8498 8499 if (!parent) { 8500 /* This is early initialization for the top cgroup */ 8501 return &root_task_group.css; 8502 } 8503 8504 tg = sched_create_group(parent); 8505 if (IS_ERR(tg)) 8506 return ERR_PTR(-ENOMEM); 8507 8508 return &tg->css; 8509 } 8510 8511 /* Expose task group only after completing cgroup initialization */ 8512 static int cpu_cgroup_css_online(struct cgroup_subsys_state *css) 8513 { 8514 struct task_group *tg = css_tg(css); 8515 struct task_group *parent = css_tg(css->parent); 8516 8517 if (parent) 8518 sched_online_group(tg, parent); 8519 8520 #ifdef CONFIG_UCLAMP_TASK_GROUP 8521 /* Propagate the effective uclamp value for the new group */ 8522 cpu_util_update_eff(css); 8523 #endif 8524 8525 return 0; 8526 } 8527 8528 static void cpu_cgroup_css_released(struct cgroup_subsys_state *css) 8529 { 8530 struct task_group *tg = css_tg(css); 8531 8532 sched_offline_group(tg); 8533 } 8534 8535 static void cpu_cgroup_css_free(struct cgroup_subsys_state *css) 8536 { 8537 struct task_group *tg = css_tg(css); 8538 8539 /* 8540 * Relies on the RCU grace period between css_released() and this. 8541 */ 8542 sched_free_group(tg); 8543 } 8544 8545 /* 8546 * This is called before wake_up_new_task(), therefore we really only 8547 * have to set its group bits, all the other stuff does not apply. 8548 */ 8549 static void cpu_cgroup_fork(struct task_struct *task) 8550 { 8551 struct rq_flags rf; 8552 struct rq *rq; 8553 8554 rq = task_rq_lock(task, &rf); 8555 8556 update_rq_clock(rq); 8557 sched_change_group(task, TASK_SET_GROUP); 8558 8559 task_rq_unlock(rq, task, &rf); 8560 } 8561 8562 static int cpu_cgroup_can_attach(struct cgroup_taskset *tset) 8563 { 8564 struct task_struct *task; 8565 struct cgroup_subsys_state *css; 8566 int ret = 0; 8567 8568 cgroup_taskset_for_each(task, css, tset) { 8569 #ifdef CONFIG_RT_GROUP_SCHED 8570 if (!sched_rt_can_attach(css_tg(css), task)) 8571 return -EINVAL; 8572 #endif 8573 /* 8574 * Serialize against wake_up_new_task() such that if it's 8575 * running, we're sure to observe its full state. 8576 */ 8577 raw_spin_lock_irq(&task->pi_lock); 8578 /* 8579 * Avoid calling sched_move_task() before wake_up_new_task() 8580 * has happened. This would lead to problems with PELT, due to 8581 * move wanting to detach+attach while we're not attached yet. 8582 */ 8583 if (task->state == TASK_NEW) 8584 ret = -EINVAL; 8585 raw_spin_unlock_irq(&task->pi_lock); 8586 8587 if (ret) 8588 break; 8589 } 8590 return ret; 8591 } 8592 8593 static void cpu_cgroup_attach(struct cgroup_taskset *tset) 8594 { 8595 struct task_struct *task; 8596 struct cgroup_subsys_state *css; 8597 8598 cgroup_taskset_for_each(task, css, tset) 8599 sched_move_task(task); 8600 } 8601 8602 #ifdef CONFIG_UCLAMP_TASK_GROUP 8603 static void cpu_util_update_eff(struct cgroup_subsys_state *css) 8604 { 8605 struct cgroup_subsys_state *top_css = css; 8606 struct uclamp_se *uc_parent = NULL; 8607 struct uclamp_se *uc_se = NULL; 8608 unsigned int eff[UCLAMP_CNT]; 8609 enum uclamp_id clamp_id; 8610 unsigned int clamps; 8611 8612 css_for_each_descendant_pre(css, top_css) { 8613 uc_parent = css_tg(css)->parent 8614 ? css_tg(css)->parent->uclamp : NULL; 8615 8616 for_each_clamp_id(clamp_id) { 8617 /* Assume effective clamps matches requested clamps */ 8618 eff[clamp_id] = css_tg(css)->uclamp_req[clamp_id].value; 8619 /* Cap effective clamps with parent's effective clamps */ 8620 if (uc_parent && 8621 eff[clamp_id] > uc_parent[clamp_id].value) { 8622 eff[clamp_id] = uc_parent[clamp_id].value; 8623 } 8624 } 8625 /* Ensure protection is always capped by limit */ 8626 eff[UCLAMP_MIN] = min(eff[UCLAMP_MIN], eff[UCLAMP_MAX]); 8627 8628 /* Propagate most restrictive effective clamps */ 8629 clamps = 0x0; 8630 uc_se = css_tg(css)->uclamp; 8631 for_each_clamp_id(clamp_id) { 8632 if (eff[clamp_id] == uc_se[clamp_id].value) 8633 continue; 8634 uc_se[clamp_id].value = eff[clamp_id]; 8635 uc_se[clamp_id].bucket_id = uclamp_bucket_id(eff[clamp_id]); 8636 clamps |= (0x1 << clamp_id); 8637 } 8638 if (!clamps) { 8639 css = css_rightmost_descendant(css); 8640 continue; 8641 } 8642 8643 /* Immediately update descendants RUNNABLE tasks */ 8644 uclamp_update_active_tasks(css, clamps); 8645 } 8646 } 8647 8648 /* 8649 * Integer 10^N with a given N exponent by casting to integer the literal "1eN" 8650 * C expression. Since there is no way to convert a macro argument (N) into a 8651 * character constant, use two levels of macros. 8652 */ 8653 #define _POW10(exp) ((unsigned int)1e##exp) 8654 #define POW10(exp) _POW10(exp) 8655 8656 struct uclamp_request { 8657 #define UCLAMP_PERCENT_SHIFT 2 8658 #define UCLAMP_PERCENT_SCALE (100 * POW10(UCLAMP_PERCENT_SHIFT)) 8659 s64 percent; 8660 u64 util; 8661 int ret; 8662 }; 8663 8664 static inline struct uclamp_request 8665 capacity_from_percent(char *buf) 8666 { 8667 struct uclamp_request req = { 8668 .percent = UCLAMP_PERCENT_SCALE, 8669 .util = SCHED_CAPACITY_SCALE, 8670 .ret = 0, 8671 }; 8672 8673 buf = strim(buf); 8674 if (strcmp(buf, "max")) { 8675 req.ret = cgroup_parse_float(buf, UCLAMP_PERCENT_SHIFT, 8676 &req.percent); 8677 if (req.ret) 8678 return req; 8679 if ((u64)req.percent > UCLAMP_PERCENT_SCALE) { 8680 req.ret = -ERANGE; 8681 return req; 8682 } 8683 8684 req.util = req.percent << SCHED_CAPACITY_SHIFT; 8685 req.util = DIV_ROUND_CLOSEST_ULL(req.util, UCLAMP_PERCENT_SCALE); 8686 } 8687 8688 return req; 8689 } 8690 8691 static ssize_t cpu_uclamp_write(struct kernfs_open_file *of, char *buf, 8692 size_t nbytes, loff_t off, 8693 enum uclamp_id clamp_id) 8694 { 8695 struct uclamp_request req; 8696 struct task_group *tg; 8697 8698 req = capacity_from_percent(buf); 8699 if (req.ret) 8700 return req.ret; 8701 8702 static_branch_enable(&sched_uclamp_used); 8703 8704 mutex_lock(&uclamp_mutex); 8705 rcu_read_lock(); 8706 8707 tg = css_tg(of_css(of)); 8708 if (tg->uclamp_req[clamp_id].value != req.util) 8709 uclamp_se_set(&tg->uclamp_req[clamp_id], req.util, false); 8710 8711 /* 8712 * Because of not recoverable conversion rounding we keep track of the 8713 * exact requested value 8714 */ 8715 tg->uclamp_pct[clamp_id] = req.percent; 8716 8717 /* Update effective clamps to track the most restrictive value */ 8718 cpu_util_update_eff(of_css(of)); 8719 8720 rcu_read_unlock(); 8721 mutex_unlock(&uclamp_mutex); 8722 8723 return nbytes; 8724 } 8725 8726 static ssize_t cpu_uclamp_min_write(struct kernfs_open_file *of, 8727 char *buf, size_t nbytes, 8728 loff_t off) 8729 { 8730 return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MIN); 8731 } 8732 8733 static ssize_t cpu_uclamp_max_write(struct kernfs_open_file *of, 8734 char *buf, size_t nbytes, 8735 loff_t off) 8736 { 8737 return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MAX); 8738 } 8739 8740 static inline void cpu_uclamp_print(struct seq_file *sf, 8741 enum uclamp_id clamp_id) 8742 { 8743 struct task_group *tg; 8744 u64 util_clamp; 8745 u64 percent; 8746 u32 rem; 8747 8748 rcu_read_lock(); 8749 tg = css_tg(seq_css(sf)); 8750 util_clamp = tg->uclamp_req[clamp_id].value; 8751 rcu_read_unlock(); 8752 8753 if (util_clamp == SCHED_CAPACITY_SCALE) { 8754 seq_puts(sf, "max\n"); 8755 return; 8756 } 8757 8758 percent = tg->uclamp_pct[clamp_id]; 8759 percent = div_u64_rem(percent, POW10(UCLAMP_PERCENT_SHIFT), &rem); 8760 seq_printf(sf, "%llu.%0*u\n", percent, UCLAMP_PERCENT_SHIFT, rem); 8761 } 8762 8763 static int cpu_uclamp_min_show(struct seq_file *sf, void *v) 8764 { 8765 cpu_uclamp_print(sf, UCLAMP_MIN); 8766 return 0; 8767 } 8768 8769 static int cpu_uclamp_max_show(struct seq_file *sf, void *v) 8770 { 8771 cpu_uclamp_print(sf, UCLAMP_MAX); 8772 return 0; 8773 } 8774 #endif /* CONFIG_UCLAMP_TASK_GROUP */ 8775 8776 #ifdef CONFIG_FAIR_GROUP_SCHED 8777 static int cpu_shares_write_u64(struct cgroup_subsys_state *css, 8778 struct cftype *cftype, u64 shareval) 8779 { 8780 if (shareval > scale_load_down(ULONG_MAX)) 8781 shareval = MAX_SHARES; 8782 return sched_group_set_shares(css_tg(css), scale_load(shareval)); 8783 } 8784 8785 static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css, 8786 struct cftype *cft) 8787 { 8788 struct task_group *tg = css_tg(css); 8789 8790 return (u64) scale_load_down(tg->shares); 8791 } 8792 8793 #ifdef CONFIG_CFS_BANDWIDTH 8794 static DEFINE_MUTEX(cfs_constraints_mutex); 8795 8796 const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */ 8797 static const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */ 8798 /* More than 203 days if BW_SHIFT equals 20. */ 8799 static const u64 max_cfs_runtime = MAX_BW * NSEC_PER_USEC; 8800 8801 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime); 8802 8803 static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota) 8804 { 8805 int i, ret = 0, runtime_enabled, runtime_was_enabled; 8806 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 8807 8808 if (tg == &root_task_group) 8809 return -EINVAL; 8810 8811 /* 8812 * Ensure we have at some amount of bandwidth every period. This is 8813 * to prevent reaching a state of large arrears when throttled via 8814 * entity_tick() resulting in prolonged exit starvation. 8815 */ 8816 if (quota < min_cfs_quota_period || period < min_cfs_quota_period) 8817 return -EINVAL; 8818 8819 /* 8820 * Likewise, bound things on the otherside by preventing insane quota 8821 * periods. This also allows us to normalize in computing quota 8822 * feasibility. 8823 */ 8824 if (period > max_cfs_quota_period) 8825 return -EINVAL; 8826 8827 /* 8828 * Bound quota to defend quota against overflow during bandwidth shift. 8829 */ 8830 if (quota != RUNTIME_INF && quota > max_cfs_runtime) 8831 return -EINVAL; 8832 8833 /* 8834 * Prevent race between setting of cfs_rq->runtime_enabled and 8835 * unthrottle_offline_cfs_rqs(). 8836 */ 8837 get_online_cpus(); 8838 mutex_lock(&cfs_constraints_mutex); 8839 ret = __cfs_schedulable(tg, period, quota); 8840 if (ret) 8841 goto out_unlock; 8842 8843 runtime_enabled = quota != RUNTIME_INF; 8844 runtime_was_enabled = cfs_b->quota != RUNTIME_INF; 8845 /* 8846 * If we need to toggle cfs_bandwidth_used, off->on must occur 8847 * before making related changes, and on->off must occur afterwards 8848 */ 8849 if (runtime_enabled && !runtime_was_enabled) 8850 cfs_bandwidth_usage_inc(); 8851 raw_spin_lock_irq(&cfs_b->lock); 8852 cfs_b->period = ns_to_ktime(period); 8853 cfs_b->quota = quota; 8854 8855 __refill_cfs_bandwidth_runtime(cfs_b); 8856 8857 /* Restart the period timer (if active) to handle new period expiry: */ 8858 if (runtime_enabled) 8859 start_cfs_bandwidth(cfs_b); 8860 8861 raw_spin_unlock_irq(&cfs_b->lock); 8862 8863 for_each_online_cpu(i) { 8864 struct cfs_rq *cfs_rq = tg->cfs_rq[i]; 8865 struct rq *rq = cfs_rq->rq; 8866 struct rq_flags rf; 8867 8868 rq_lock_irq(rq, &rf); 8869 cfs_rq->runtime_enabled = runtime_enabled; 8870 cfs_rq->runtime_remaining = 0; 8871 8872 if (cfs_rq->throttled) 8873 unthrottle_cfs_rq(cfs_rq); 8874 rq_unlock_irq(rq, &rf); 8875 } 8876 if (runtime_was_enabled && !runtime_enabled) 8877 cfs_bandwidth_usage_dec(); 8878 out_unlock: 8879 mutex_unlock(&cfs_constraints_mutex); 8880 put_online_cpus(); 8881 8882 return ret; 8883 } 8884 8885 static int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us) 8886 { 8887 u64 quota, period; 8888 8889 period = ktime_to_ns(tg->cfs_bandwidth.period); 8890 if (cfs_quota_us < 0) 8891 quota = RUNTIME_INF; 8892 else if ((u64)cfs_quota_us <= U64_MAX / NSEC_PER_USEC) 8893 quota = (u64)cfs_quota_us * NSEC_PER_USEC; 8894 else 8895 return -EINVAL; 8896 8897 return tg_set_cfs_bandwidth(tg, period, quota); 8898 } 8899 8900 static long tg_get_cfs_quota(struct task_group *tg) 8901 { 8902 u64 quota_us; 8903 8904 if (tg->cfs_bandwidth.quota == RUNTIME_INF) 8905 return -1; 8906 8907 quota_us = tg->cfs_bandwidth.quota; 8908 do_div(quota_us, NSEC_PER_USEC); 8909 8910 return quota_us; 8911 } 8912 8913 static int tg_set_cfs_period(struct task_group *tg, long cfs_period_us) 8914 { 8915 u64 quota, period; 8916 8917 if ((u64)cfs_period_us > U64_MAX / NSEC_PER_USEC) 8918 return -EINVAL; 8919 8920 period = (u64)cfs_period_us * NSEC_PER_USEC; 8921 quota = tg->cfs_bandwidth.quota; 8922 8923 return tg_set_cfs_bandwidth(tg, period, quota); 8924 } 8925 8926 static long tg_get_cfs_period(struct task_group *tg) 8927 { 8928 u64 cfs_period_us; 8929 8930 cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period); 8931 do_div(cfs_period_us, NSEC_PER_USEC); 8932 8933 return cfs_period_us; 8934 } 8935 8936 static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css, 8937 struct cftype *cft) 8938 { 8939 return tg_get_cfs_quota(css_tg(css)); 8940 } 8941 8942 static int cpu_cfs_quota_write_s64(struct cgroup_subsys_state *css, 8943 struct cftype *cftype, s64 cfs_quota_us) 8944 { 8945 return tg_set_cfs_quota(css_tg(css), cfs_quota_us); 8946 } 8947 8948 static u64 cpu_cfs_period_read_u64(struct cgroup_subsys_state *css, 8949 struct cftype *cft) 8950 { 8951 return tg_get_cfs_period(css_tg(css)); 8952 } 8953 8954 static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css, 8955 struct cftype *cftype, u64 cfs_period_us) 8956 { 8957 return tg_set_cfs_period(css_tg(css), cfs_period_us); 8958 } 8959 8960 struct cfs_schedulable_data { 8961 struct task_group *tg; 8962 u64 period, quota; 8963 }; 8964 8965 /* 8966 * normalize group quota/period to be quota/max_period 8967 * note: units are usecs 8968 */ 8969 static u64 normalize_cfs_quota(struct task_group *tg, 8970 struct cfs_schedulable_data *d) 8971 { 8972 u64 quota, period; 8973 8974 if (tg == d->tg) { 8975 period = d->period; 8976 quota = d->quota; 8977 } else { 8978 period = tg_get_cfs_period(tg); 8979 quota = tg_get_cfs_quota(tg); 8980 } 8981 8982 /* note: these should typically be equivalent */ 8983 if (quota == RUNTIME_INF || quota == -1) 8984 return RUNTIME_INF; 8985 8986 return to_ratio(period, quota); 8987 } 8988 8989 static int tg_cfs_schedulable_down(struct task_group *tg, void *data) 8990 { 8991 struct cfs_schedulable_data *d = data; 8992 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 8993 s64 quota = 0, parent_quota = -1; 8994 8995 if (!tg->parent) { 8996 quota = RUNTIME_INF; 8997 } else { 8998 struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth; 8999 9000 quota = normalize_cfs_quota(tg, d); 9001 parent_quota = parent_b->hierarchical_quota; 9002 9003 /* 9004 * Ensure max(child_quota) <= parent_quota. On cgroup2, 9005 * always take the min. On cgroup1, only inherit when no 9006 * limit is set: 9007 */ 9008 if (cgroup_subsys_on_dfl(cpu_cgrp_subsys)) { 9009 quota = min(quota, parent_quota); 9010 } else { 9011 if (quota == RUNTIME_INF) 9012 quota = parent_quota; 9013 else if (parent_quota != RUNTIME_INF && quota > parent_quota) 9014 return -EINVAL; 9015 } 9016 } 9017 cfs_b->hierarchical_quota = quota; 9018 9019 return 0; 9020 } 9021 9022 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota) 9023 { 9024 int ret; 9025 struct cfs_schedulable_data data = { 9026 .tg = tg, 9027 .period = period, 9028 .quota = quota, 9029 }; 9030 9031 if (quota != RUNTIME_INF) { 9032 do_div(data.period, NSEC_PER_USEC); 9033 do_div(data.quota, NSEC_PER_USEC); 9034 } 9035 9036 rcu_read_lock(); 9037 ret = walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data); 9038 rcu_read_unlock(); 9039 9040 return ret; 9041 } 9042 9043 static int cpu_cfs_stat_show(struct seq_file *sf, void *v) 9044 { 9045 struct task_group *tg = css_tg(seq_css(sf)); 9046 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 9047 9048 seq_printf(sf, "nr_periods %d\n", cfs_b->nr_periods); 9049 seq_printf(sf, "nr_throttled %d\n", cfs_b->nr_throttled); 9050 seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time); 9051 9052 if (schedstat_enabled() && tg != &root_task_group) { 9053 u64 ws = 0; 9054 int i; 9055 9056 for_each_possible_cpu(i) 9057 ws += schedstat_val(tg->se[i]->statistics.wait_sum); 9058 9059 seq_printf(sf, "wait_sum %llu\n", ws); 9060 } 9061 9062 return 0; 9063 } 9064 #endif /* CONFIG_CFS_BANDWIDTH */ 9065 #endif /* CONFIG_FAIR_GROUP_SCHED */ 9066 9067 #ifdef CONFIG_RT_GROUP_SCHED 9068 static int cpu_rt_runtime_write(struct cgroup_subsys_state *css, 9069 struct cftype *cft, s64 val) 9070 { 9071 return sched_group_set_rt_runtime(css_tg(css), val); 9072 } 9073 9074 static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css, 9075 struct cftype *cft) 9076 { 9077 return sched_group_rt_runtime(css_tg(css)); 9078 } 9079 9080 static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css, 9081 struct cftype *cftype, u64 rt_period_us) 9082 { 9083 return sched_group_set_rt_period(css_tg(css), rt_period_us); 9084 } 9085 9086 static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css, 9087 struct cftype *cft) 9088 { 9089 return sched_group_rt_period(css_tg(css)); 9090 } 9091 #endif /* CONFIG_RT_GROUP_SCHED */ 9092 9093 static struct cftype cpu_legacy_files[] = { 9094 #ifdef CONFIG_FAIR_GROUP_SCHED 9095 { 9096 .name = "shares", 9097 .read_u64 = cpu_shares_read_u64, 9098 .write_u64 = cpu_shares_write_u64, 9099 }, 9100 #endif 9101 #ifdef CONFIG_CFS_BANDWIDTH 9102 { 9103 .name = "cfs_quota_us", 9104 .read_s64 = cpu_cfs_quota_read_s64, 9105 .write_s64 = cpu_cfs_quota_write_s64, 9106 }, 9107 { 9108 .name = "cfs_period_us", 9109 .read_u64 = cpu_cfs_period_read_u64, 9110 .write_u64 = cpu_cfs_period_write_u64, 9111 }, 9112 { 9113 .name = "stat", 9114 .seq_show = cpu_cfs_stat_show, 9115 }, 9116 #endif 9117 #ifdef CONFIG_RT_GROUP_SCHED 9118 { 9119 .name = "rt_runtime_us", 9120 .read_s64 = cpu_rt_runtime_read, 9121 .write_s64 = cpu_rt_runtime_write, 9122 }, 9123 { 9124 .name = "rt_period_us", 9125 .read_u64 = cpu_rt_period_read_uint, 9126 .write_u64 = cpu_rt_period_write_uint, 9127 }, 9128 #endif 9129 #ifdef CONFIG_UCLAMP_TASK_GROUP 9130 { 9131 .name = "uclamp.min", 9132 .flags = CFTYPE_NOT_ON_ROOT, 9133 .seq_show = cpu_uclamp_min_show, 9134 .write = cpu_uclamp_min_write, 9135 }, 9136 { 9137 .name = "uclamp.max", 9138 .flags = CFTYPE_NOT_ON_ROOT, 9139 .seq_show = cpu_uclamp_max_show, 9140 .write = cpu_uclamp_max_write, 9141 }, 9142 #endif 9143 { } /* Terminate */ 9144 }; 9145 9146 static int cpu_extra_stat_show(struct seq_file *sf, 9147 struct cgroup_subsys_state *css) 9148 { 9149 #ifdef CONFIG_CFS_BANDWIDTH 9150 { 9151 struct task_group *tg = css_tg(css); 9152 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 9153 u64 throttled_usec; 9154 9155 throttled_usec = cfs_b->throttled_time; 9156 do_div(throttled_usec, NSEC_PER_USEC); 9157 9158 seq_printf(sf, "nr_periods %d\n" 9159 "nr_throttled %d\n" 9160 "throttled_usec %llu\n", 9161 cfs_b->nr_periods, cfs_b->nr_throttled, 9162 throttled_usec); 9163 } 9164 #endif 9165 return 0; 9166 } 9167 9168 #ifdef CONFIG_FAIR_GROUP_SCHED 9169 static u64 cpu_weight_read_u64(struct cgroup_subsys_state *css, 9170 struct cftype *cft) 9171 { 9172 struct task_group *tg = css_tg(css); 9173 u64 weight = scale_load_down(tg->shares); 9174 9175 return DIV_ROUND_CLOSEST_ULL(weight * CGROUP_WEIGHT_DFL, 1024); 9176 } 9177 9178 static int cpu_weight_write_u64(struct cgroup_subsys_state *css, 9179 struct cftype *cft, u64 weight) 9180 { 9181 /* 9182 * cgroup weight knobs should use the common MIN, DFL and MAX 9183 * values which are 1, 100 and 10000 respectively. While it loses 9184 * a bit of range on both ends, it maps pretty well onto the shares 9185 * value used by scheduler and the round-trip conversions preserve 9186 * the original value over the entire range. 9187 */ 9188 if (weight < CGROUP_WEIGHT_MIN || weight > CGROUP_WEIGHT_MAX) 9189 return -ERANGE; 9190 9191 weight = DIV_ROUND_CLOSEST_ULL(weight * 1024, CGROUP_WEIGHT_DFL); 9192 9193 return sched_group_set_shares(css_tg(css), scale_load(weight)); 9194 } 9195 9196 static s64 cpu_weight_nice_read_s64(struct cgroup_subsys_state *css, 9197 struct cftype *cft) 9198 { 9199 unsigned long weight = scale_load_down(css_tg(css)->shares); 9200 int last_delta = INT_MAX; 9201 int prio, delta; 9202 9203 /* find the closest nice value to the current weight */ 9204 for (prio = 0; prio < ARRAY_SIZE(sched_prio_to_weight); prio++) { 9205 delta = abs(sched_prio_to_weight[prio] - weight); 9206 if (delta >= last_delta) 9207 break; 9208 last_delta = delta; 9209 } 9210 9211 return PRIO_TO_NICE(prio - 1 + MAX_RT_PRIO); 9212 } 9213 9214 static int cpu_weight_nice_write_s64(struct cgroup_subsys_state *css, 9215 struct cftype *cft, s64 nice) 9216 { 9217 unsigned long weight; 9218 int idx; 9219 9220 if (nice < MIN_NICE || nice > MAX_NICE) 9221 return -ERANGE; 9222 9223 idx = NICE_TO_PRIO(nice) - MAX_RT_PRIO; 9224 idx = array_index_nospec(idx, 40); 9225 weight = sched_prio_to_weight[idx]; 9226 9227 return sched_group_set_shares(css_tg(css), scale_load(weight)); 9228 } 9229 #endif 9230 9231 static void __maybe_unused cpu_period_quota_print(struct seq_file *sf, 9232 long period, long quota) 9233 { 9234 if (quota < 0) 9235 seq_puts(sf, "max"); 9236 else 9237 seq_printf(sf, "%ld", quota); 9238 9239 seq_printf(sf, " %ld\n", period); 9240 } 9241 9242 /* caller should put the current value in *@periodp before calling */ 9243 static int __maybe_unused cpu_period_quota_parse(char *buf, 9244 u64 *periodp, u64 *quotap) 9245 { 9246 char tok[21]; /* U64_MAX */ 9247 9248 if (sscanf(buf, "%20s %llu", tok, periodp) < 1) 9249 return -EINVAL; 9250 9251 *periodp *= NSEC_PER_USEC; 9252 9253 if (sscanf(tok, "%llu", quotap)) 9254 *quotap *= NSEC_PER_USEC; 9255 else if (!strcmp(tok, "max")) 9256 *quotap = RUNTIME_INF; 9257 else 9258 return -EINVAL; 9259 9260 return 0; 9261 } 9262 9263 #ifdef CONFIG_CFS_BANDWIDTH 9264 static int cpu_max_show(struct seq_file *sf, void *v) 9265 { 9266 struct task_group *tg = css_tg(seq_css(sf)); 9267 9268 cpu_period_quota_print(sf, tg_get_cfs_period(tg), tg_get_cfs_quota(tg)); 9269 return 0; 9270 } 9271 9272 static ssize_t cpu_max_write(struct kernfs_open_file *of, 9273 char *buf, size_t nbytes, loff_t off) 9274 { 9275 struct task_group *tg = css_tg(of_css(of)); 9276 u64 period = tg_get_cfs_period(tg); 9277 u64 quota; 9278 int ret; 9279 9280 ret = cpu_period_quota_parse(buf, &period, "a); 9281 if (!ret) 9282 ret = tg_set_cfs_bandwidth(tg, period, quota); 9283 return ret ?: nbytes; 9284 } 9285 #endif 9286 9287 static struct cftype cpu_files[] = { 9288 #ifdef CONFIG_FAIR_GROUP_SCHED 9289 { 9290 .name = "weight", 9291 .flags = CFTYPE_NOT_ON_ROOT, 9292 .read_u64 = cpu_weight_read_u64, 9293 .write_u64 = cpu_weight_write_u64, 9294 }, 9295 { 9296 .name = "weight.nice", 9297 .flags = CFTYPE_NOT_ON_ROOT, 9298 .read_s64 = cpu_weight_nice_read_s64, 9299 .write_s64 = cpu_weight_nice_write_s64, 9300 }, 9301 #endif 9302 #ifdef CONFIG_CFS_BANDWIDTH 9303 { 9304 .name = "max", 9305 .flags = CFTYPE_NOT_ON_ROOT, 9306 .seq_show = cpu_max_show, 9307 .write = cpu_max_write, 9308 }, 9309 #endif 9310 #ifdef CONFIG_UCLAMP_TASK_GROUP 9311 { 9312 .name = "uclamp.min", 9313 .flags = CFTYPE_NOT_ON_ROOT, 9314 .seq_show = cpu_uclamp_min_show, 9315 .write = cpu_uclamp_min_write, 9316 }, 9317 { 9318 .name = "uclamp.max", 9319 .flags = CFTYPE_NOT_ON_ROOT, 9320 .seq_show = cpu_uclamp_max_show, 9321 .write = cpu_uclamp_max_write, 9322 }, 9323 #endif 9324 { } /* terminate */ 9325 }; 9326 9327 struct cgroup_subsys cpu_cgrp_subsys = { 9328 .css_alloc = cpu_cgroup_css_alloc, 9329 .css_online = cpu_cgroup_css_online, 9330 .css_released = cpu_cgroup_css_released, 9331 .css_free = cpu_cgroup_css_free, 9332 .css_extra_stat_show = cpu_extra_stat_show, 9333 .fork = cpu_cgroup_fork, 9334 .can_attach = cpu_cgroup_can_attach, 9335 .attach = cpu_cgroup_attach, 9336 .legacy_cftypes = cpu_legacy_files, 9337 .dfl_cftypes = cpu_files, 9338 .early_init = true, 9339 .threaded = true, 9340 }; 9341 9342 #endif /* CONFIG_CGROUP_SCHED */ 9343 9344 void dump_cpu_task(int cpu) 9345 { 9346 pr_info("Task dump for CPU %d:\n", cpu); 9347 sched_show_task(cpu_curr(cpu)); 9348 } 9349 9350 /* 9351 * Nice levels are multiplicative, with a gentle 10% change for every 9352 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to 9353 * nice 1, it will get ~10% less CPU time than another CPU-bound task 9354 * that remained on nice 0. 9355 * 9356 * The "10% effect" is relative and cumulative: from _any_ nice level, 9357 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level 9358 * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25. 9359 * If a task goes up by ~10% and another task goes down by ~10% then 9360 * the relative distance between them is ~25%.) 9361 */ 9362 const int sched_prio_to_weight[40] = { 9363 /* -20 */ 88761, 71755, 56483, 46273, 36291, 9364 /* -15 */ 29154, 23254, 18705, 14949, 11916, 9365 /* -10 */ 9548, 7620, 6100, 4904, 3906, 9366 /* -5 */ 3121, 2501, 1991, 1586, 1277, 9367 /* 0 */ 1024, 820, 655, 526, 423, 9368 /* 5 */ 335, 272, 215, 172, 137, 9369 /* 10 */ 110, 87, 70, 56, 45, 9370 /* 15 */ 36, 29, 23, 18, 15, 9371 }; 9372 9373 /* 9374 * Inverse (2^32/x) values of the sched_prio_to_weight[] array, precalculated. 9375 * 9376 * In cases where the weight does not change often, we can use the 9377 * precalculated inverse to speed up arithmetics by turning divisions 9378 * into multiplications: 9379 */ 9380 const u32 sched_prio_to_wmult[40] = { 9381 /* -20 */ 48388, 59856, 76040, 92818, 118348, 9382 /* -15 */ 147320, 184698, 229616, 287308, 360437, 9383 /* -10 */ 449829, 563644, 704093, 875809, 1099582, 9384 /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326, 9385 /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587, 9386 /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126, 9387 /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717, 9388 /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153, 9389 }; 9390 9391 void call_trace_sched_update_nr_running(struct rq *rq, int count) 9392 { 9393 trace_sched_update_nr_running_tp(rq, count); 9394 } 9395