1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * kernel/sched/core.c 4 * 5 * Core kernel scheduler code and related syscalls 6 * 7 * Copyright (C) 1991-2002 Linus Torvalds 8 */ 9 #define CREATE_TRACE_POINTS 10 #include <trace/events/sched.h> 11 #undef CREATE_TRACE_POINTS 12 13 #include "sched.h" 14 15 #include <linux/nospec.h> 16 17 #include <linux/kcov.h> 18 #include <linux/scs.h> 19 20 #include <asm/switch_to.h> 21 #include <asm/tlb.h> 22 23 #include "../workqueue_internal.h" 24 #include "../../fs/io-wq.h" 25 #include "../smpboot.h" 26 27 #include "pelt.h" 28 #include "smp.h" 29 30 /* 31 * Export tracepoints that act as a bare tracehook (ie: have no trace event 32 * associated with them) to allow external modules to probe them. 33 */ 34 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_cfs_tp); 35 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_rt_tp); 36 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_dl_tp); 37 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp); 38 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_se_tp); 39 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_cpu_capacity_tp); 40 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_overutilized_tp); 41 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_cfs_tp); 42 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_se_tp); 43 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_update_nr_running_tp); 44 45 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); 46 47 #ifdef CONFIG_SCHED_DEBUG 48 /* 49 * Debugging: various feature bits 50 * 51 * If SCHED_DEBUG is disabled, each compilation unit has its own copy of 52 * sysctl_sched_features, defined in sched.h, to allow constants propagation 53 * at compile time and compiler optimization based on features default. 54 */ 55 #define SCHED_FEAT(name, enabled) \ 56 (1UL << __SCHED_FEAT_##name) * enabled | 57 const_debug unsigned int sysctl_sched_features = 58 #include "features.h" 59 0; 60 #undef SCHED_FEAT 61 #endif 62 63 /* 64 * Number of tasks to iterate in a single balance run. 65 * Limited because this is done with IRQs disabled. 66 */ 67 const_debug unsigned int sysctl_sched_nr_migrate = 32; 68 69 /* 70 * period over which we measure -rt task CPU usage in us. 71 * default: 1s 72 */ 73 unsigned int sysctl_sched_rt_period = 1000000; 74 75 __read_mostly int scheduler_running; 76 77 /* 78 * part of the period that we allow rt tasks to run in us. 79 * default: 0.95s 80 */ 81 int sysctl_sched_rt_runtime = 950000; 82 83 84 /* 85 * Serialization rules: 86 * 87 * Lock order: 88 * 89 * p->pi_lock 90 * rq->lock 91 * hrtimer_cpu_base->lock (hrtimer_start() for bandwidth controls) 92 * 93 * rq1->lock 94 * rq2->lock where: rq1 < rq2 95 * 96 * Regular state: 97 * 98 * Normal scheduling state is serialized by rq->lock. __schedule() takes the 99 * local CPU's rq->lock, it optionally removes the task from the runqueue and 100 * always looks at the local rq data structures to find the most eligible task 101 * to run next. 102 * 103 * Task enqueue is also under rq->lock, possibly taken from another CPU. 104 * Wakeups from another LLC domain might use an IPI to transfer the enqueue to 105 * the local CPU to avoid bouncing the runqueue state around [ see 106 * ttwu_queue_wakelist() ] 107 * 108 * Task wakeup, specifically wakeups that involve migration, are horribly 109 * complicated to avoid having to take two rq->locks. 110 * 111 * Special state: 112 * 113 * System-calls and anything external will use task_rq_lock() which acquires 114 * both p->pi_lock and rq->lock. As a consequence the state they change is 115 * stable while holding either lock: 116 * 117 * - sched_setaffinity()/ 118 * set_cpus_allowed_ptr(): p->cpus_ptr, p->nr_cpus_allowed 119 * - set_user_nice(): p->se.load, p->*prio 120 * - __sched_setscheduler(): p->sched_class, p->policy, p->*prio, 121 * p->se.load, p->rt_priority, 122 * p->dl.dl_{runtime, deadline, period, flags, bw, density} 123 * - sched_setnuma(): p->numa_preferred_nid 124 * - sched_move_task()/ 125 * cpu_cgroup_fork(): p->sched_task_group 126 * - uclamp_update_active() p->uclamp* 127 * 128 * p->state <- TASK_*: 129 * 130 * is changed locklessly using set_current_state(), __set_current_state() or 131 * set_special_state(), see their respective comments, or by 132 * try_to_wake_up(). This latter uses p->pi_lock to serialize against 133 * concurrent self. 134 * 135 * p->on_rq <- { 0, 1 = TASK_ON_RQ_QUEUED, 2 = TASK_ON_RQ_MIGRATING }: 136 * 137 * is set by activate_task() and cleared by deactivate_task(), under 138 * rq->lock. Non-zero indicates the task is runnable, the special 139 * ON_RQ_MIGRATING state is used for migration without holding both 140 * rq->locks. It indicates task_cpu() is not stable, see task_rq_lock(). 141 * 142 * p->on_cpu <- { 0, 1 }: 143 * 144 * is set by prepare_task() and cleared by finish_task() such that it will be 145 * set before p is scheduled-in and cleared after p is scheduled-out, both 146 * under rq->lock. Non-zero indicates the task is running on its CPU. 147 * 148 * [ The astute reader will observe that it is possible for two tasks on one 149 * CPU to have ->on_cpu = 1 at the same time. ] 150 * 151 * task_cpu(p): is changed by set_task_cpu(), the rules are: 152 * 153 * - Don't call set_task_cpu() on a blocked task: 154 * 155 * We don't care what CPU we're not running on, this simplifies hotplug, 156 * the CPU assignment of blocked tasks isn't required to be valid. 157 * 158 * - for try_to_wake_up(), called under p->pi_lock: 159 * 160 * This allows try_to_wake_up() to only take one rq->lock, see its comment. 161 * 162 * - for migration called under rq->lock: 163 * [ see task_on_rq_migrating() in task_rq_lock() ] 164 * 165 * o move_queued_task() 166 * o detach_task() 167 * 168 * - for migration called under double_rq_lock(): 169 * 170 * o __migrate_swap_task() 171 * o push_rt_task() / pull_rt_task() 172 * o push_dl_task() / pull_dl_task() 173 * o dl_task_offline_migration() 174 * 175 */ 176 177 /* 178 * __task_rq_lock - lock the rq @p resides on. 179 */ 180 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) 181 __acquires(rq->lock) 182 { 183 struct rq *rq; 184 185 lockdep_assert_held(&p->pi_lock); 186 187 for (;;) { 188 rq = task_rq(p); 189 raw_spin_lock(&rq->lock); 190 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { 191 rq_pin_lock(rq, rf); 192 return rq; 193 } 194 raw_spin_unlock(&rq->lock); 195 196 while (unlikely(task_on_rq_migrating(p))) 197 cpu_relax(); 198 } 199 } 200 201 /* 202 * task_rq_lock - lock p->pi_lock and lock the rq @p resides on. 203 */ 204 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) 205 __acquires(p->pi_lock) 206 __acquires(rq->lock) 207 { 208 struct rq *rq; 209 210 for (;;) { 211 raw_spin_lock_irqsave(&p->pi_lock, rf->flags); 212 rq = task_rq(p); 213 raw_spin_lock(&rq->lock); 214 /* 215 * move_queued_task() task_rq_lock() 216 * 217 * ACQUIRE (rq->lock) 218 * [S] ->on_rq = MIGRATING [L] rq = task_rq() 219 * WMB (__set_task_cpu()) ACQUIRE (rq->lock); 220 * [S] ->cpu = new_cpu [L] task_rq() 221 * [L] ->on_rq 222 * RELEASE (rq->lock) 223 * 224 * If we observe the old CPU in task_rq_lock(), the acquire of 225 * the old rq->lock will fully serialize against the stores. 226 * 227 * If we observe the new CPU in task_rq_lock(), the address 228 * dependency headed by '[L] rq = task_rq()' and the acquire 229 * will pair with the WMB to ensure we then also see migrating. 230 */ 231 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { 232 rq_pin_lock(rq, rf); 233 return rq; 234 } 235 raw_spin_unlock(&rq->lock); 236 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); 237 238 while (unlikely(task_on_rq_migrating(p))) 239 cpu_relax(); 240 } 241 } 242 243 /* 244 * RQ-clock updating methods: 245 */ 246 247 static void update_rq_clock_task(struct rq *rq, s64 delta) 248 { 249 /* 250 * In theory, the compile should just see 0 here, and optimize out the call 251 * to sched_rt_avg_update. But I don't trust it... 252 */ 253 s64 __maybe_unused steal = 0, irq_delta = 0; 254 255 #ifdef CONFIG_IRQ_TIME_ACCOUNTING 256 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time; 257 258 /* 259 * Since irq_time is only updated on {soft,}irq_exit, we might run into 260 * this case when a previous update_rq_clock() happened inside a 261 * {soft,}irq region. 262 * 263 * When this happens, we stop ->clock_task and only update the 264 * prev_irq_time stamp to account for the part that fit, so that a next 265 * update will consume the rest. This ensures ->clock_task is 266 * monotonic. 267 * 268 * It does however cause some slight miss-attribution of {soft,}irq 269 * time, a more accurate solution would be to update the irq_time using 270 * the current rq->clock timestamp, except that would require using 271 * atomic ops. 272 */ 273 if (irq_delta > delta) 274 irq_delta = delta; 275 276 rq->prev_irq_time += irq_delta; 277 delta -= irq_delta; 278 #endif 279 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING 280 if (static_key_false((¶virt_steal_rq_enabled))) { 281 steal = paravirt_steal_clock(cpu_of(rq)); 282 steal -= rq->prev_steal_time_rq; 283 284 if (unlikely(steal > delta)) 285 steal = delta; 286 287 rq->prev_steal_time_rq += steal; 288 delta -= steal; 289 } 290 #endif 291 292 rq->clock_task += delta; 293 294 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ 295 if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY)) 296 update_irq_load_avg(rq, irq_delta + steal); 297 #endif 298 update_rq_clock_pelt(rq, delta); 299 } 300 301 void update_rq_clock(struct rq *rq) 302 { 303 s64 delta; 304 305 lockdep_assert_held(&rq->lock); 306 307 if (rq->clock_update_flags & RQCF_ACT_SKIP) 308 return; 309 310 #ifdef CONFIG_SCHED_DEBUG 311 if (sched_feat(WARN_DOUBLE_CLOCK)) 312 SCHED_WARN_ON(rq->clock_update_flags & RQCF_UPDATED); 313 rq->clock_update_flags |= RQCF_UPDATED; 314 #endif 315 316 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock; 317 if (delta < 0) 318 return; 319 rq->clock += delta; 320 update_rq_clock_task(rq, delta); 321 } 322 323 #ifdef CONFIG_SCHED_HRTICK 324 /* 325 * Use HR-timers to deliver accurate preemption points. 326 */ 327 328 static void hrtick_clear(struct rq *rq) 329 { 330 if (hrtimer_active(&rq->hrtick_timer)) 331 hrtimer_cancel(&rq->hrtick_timer); 332 } 333 334 /* 335 * High-resolution timer tick. 336 * Runs from hardirq context with interrupts disabled. 337 */ 338 static enum hrtimer_restart hrtick(struct hrtimer *timer) 339 { 340 struct rq *rq = container_of(timer, struct rq, hrtick_timer); 341 struct rq_flags rf; 342 343 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); 344 345 rq_lock(rq, &rf); 346 update_rq_clock(rq); 347 rq->curr->sched_class->task_tick(rq, rq->curr, 1); 348 rq_unlock(rq, &rf); 349 350 return HRTIMER_NORESTART; 351 } 352 353 #ifdef CONFIG_SMP 354 355 static void __hrtick_restart(struct rq *rq) 356 { 357 struct hrtimer *timer = &rq->hrtick_timer; 358 359 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED_HARD); 360 } 361 362 /* 363 * called from hardirq (IPI) context 364 */ 365 static void __hrtick_start(void *arg) 366 { 367 struct rq *rq = arg; 368 struct rq_flags rf; 369 370 rq_lock(rq, &rf); 371 __hrtick_restart(rq); 372 rq_unlock(rq, &rf); 373 } 374 375 /* 376 * Called to set the hrtick timer state. 377 * 378 * called with rq->lock held and irqs disabled 379 */ 380 void hrtick_start(struct rq *rq, u64 delay) 381 { 382 struct hrtimer *timer = &rq->hrtick_timer; 383 ktime_t time; 384 s64 delta; 385 386 /* 387 * Don't schedule slices shorter than 10000ns, that just 388 * doesn't make sense and can cause timer DoS. 389 */ 390 delta = max_t(s64, delay, 10000LL); 391 time = ktime_add_ns(timer->base->get_time(), delta); 392 393 hrtimer_set_expires(timer, time); 394 395 if (rq == this_rq()) 396 __hrtick_restart(rq); 397 else 398 smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd); 399 } 400 401 #else 402 /* 403 * Called to set the hrtick timer state. 404 * 405 * called with rq->lock held and irqs disabled 406 */ 407 void hrtick_start(struct rq *rq, u64 delay) 408 { 409 /* 410 * Don't schedule slices shorter than 10000ns, that just 411 * doesn't make sense. Rely on vruntime for fairness. 412 */ 413 delay = max_t(u64, delay, 10000LL); 414 hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay), 415 HRTIMER_MODE_REL_PINNED_HARD); 416 } 417 418 #endif /* CONFIG_SMP */ 419 420 static void hrtick_rq_init(struct rq *rq) 421 { 422 #ifdef CONFIG_SMP 423 INIT_CSD(&rq->hrtick_csd, __hrtick_start, rq); 424 #endif 425 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); 426 rq->hrtick_timer.function = hrtick; 427 } 428 #else /* CONFIG_SCHED_HRTICK */ 429 static inline void hrtick_clear(struct rq *rq) 430 { 431 } 432 433 static inline void hrtick_rq_init(struct rq *rq) 434 { 435 } 436 #endif /* CONFIG_SCHED_HRTICK */ 437 438 /* 439 * cmpxchg based fetch_or, macro so it works for different integer types 440 */ 441 #define fetch_or(ptr, mask) \ 442 ({ \ 443 typeof(ptr) _ptr = (ptr); \ 444 typeof(mask) _mask = (mask); \ 445 typeof(*_ptr) _old, _val = *_ptr; \ 446 \ 447 for (;;) { \ 448 _old = cmpxchg(_ptr, _val, _val | _mask); \ 449 if (_old == _val) \ 450 break; \ 451 _val = _old; \ 452 } \ 453 _old; \ 454 }) 455 456 #if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG) 457 /* 458 * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG, 459 * this avoids any races wrt polling state changes and thereby avoids 460 * spurious IPIs. 461 */ 462 static bool set_nr_and_not_polling(struct task_struct *p) 463 { 464 struct thread_info *ti = task_thread_info(p); 465 return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG); 466 } 467 468 /* 469 * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set. 470 * 471 * If this returns true, then the idle task promises to call 472 * sched_ttwu_pending() and reschedule soon. 473 */ 474 static bool set_nr_if_polling(struct task_struct *p) 475 { 476 struct thread_info *ti = task_thread_info(p); 477 typeof(ti->flags) old, val = READ_ONCE(ti->flags); 478 479 for (;;) { 480 if (!(val & _TIF_POLLING_NRFLAG)) 481 return false; 482 if (val & _TIF_NEED_RESCHED) 483 return true; 484 old = cmpxchg(&ti->flags, val, val | _TIF_NEED_RESCHED); 485 if (old == val) 486 break; 487 val = old; 488 } 489 return true; 490 } 491 492 #else 493 static bool set_nr_and_not_polling(struct task_struct *p) 494 { 495 set_tsk_need_resched(p); 496 return true; 497 } 498 499 #ifdef CONFIG_SMP 500 static bool set_nr_if_polling(struct task_struct *p) 501 { 502 return false; 503 } 504 #endif 505 #endif 506 507 static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task) 508 { 509 struct wake_q_node *node = &task->wake_q; 510 511 /* 512 * Atomically grab the task, if ->wake_q is !nil already it means 513 * it's already queued (either by us or someone else) and will get the 514 * wakeup due to that. 515 * 516 * In order to ensure that a pending wakeup will observe our pending 517 * state, even in the failed case, an explicit smp_mb() must be used. 518 */ 519 smp_mb__before_atomic(); 520 if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL))) 521 return false; 522 523 /* 524 * The head is context local, there can be no concurrency. 525 */ 526 *head->lastp = node; 527 head->lastp = &node->next; 528 return true; 529 } 530 531 /** 532 * wake_q_add() - queue a wakeup for 'later' waking. 533 * @head: the wake_q_head to add @task to 534 * @task: the task to queue for 'later' wakeup 535 * 536 * Queue a task for later wakeup, most likely by the wake_up_q() call in the 537 * same context, _HOWEVER_ this is not guaranteed, the wakeup can come 538 * instantly. 539 * 540 * This function must be used as-if it were wake_up_process(); IOW the task 541 * must be ready to be woken at this location. 542 */ 543 void wake_q_add(struct wake_q_head *head, struct task_struct *task) 544 { 545 if (__wake_q_add(head, task)) 546 get_task_struct(task); 547 } 548 549 /** 550 * wake_q_add_safe() - safely queue a wakeup for 'later' waking. 551 * @head: the wake_q_head to add @task to 552 * @task: the task to queue for 'later' wakeup 553 * 554 * Queue a task for later wakeup, most likely by the wake_up_q() call in the 555 * same context, _HOWEVER_ this is not guaranteed, the wakeup can come 556 * instantly. 557 * 558 * This function must be used as-if it were wake_up_process(); IOW the task 559 * must be ready to be woken at this location. 560 * 561 * This function is essentially a task-safe equivalent to wake_q_add(). Callers 562 * that already hold reference to @task can call the 'safe' version and trust 563 * wake_q to do the right thing depending whether or not the @task is already 564 * queued for wakeup. 565 */ 566 void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task) 567 { 568 if (!__wake_q_add(head, task)) 569 put_task_struct(task); 570 } 571 572 void wake_up_q(struct wake_q_head *head) 573 { 574 struct wake_q_node *node = head->first; 575 576 while (node != WAKE_Q_TAIL) { 577 struct task_struct *task; 578 579 task = container_of(node, struct task_struct, wake_q); 580 BUG_ON(!task); 581 /* Task can safely be re-inserted now: */ 582 node = node->next; 583 task->wake_q.next = NULL; 584 585 /* 586 * wake_up_process() executes a full barrier, which pairs with 587 * the queueing in wake_q_add() so as not to miss wakeups. 588 */ 589 wake_up_process(task); 590 put_task_struct(task); 591 } 592 } 593 594 /* 595 * resched_curr - mark rq's current task 'to be rescheduled now'. 596 * 597 * On UP this means the setting of the need_resched flag, on SMP it 598 * might also involve a cross-CPU call to trigger the scheduler on 599 * the target CPU. 600 */ 601 void resched_curr(struct rq *rq) 602 { 603 struct task_struct *curr = rq->curr; 604 int cpu; 605 606 lockdep_assert_held(&rq->lock); 607 608 if (test_tsk_need_resched(curr)) 609 return; 610 611 cpu = cpu_of(rq); 612 613 if (cpu == smp_processor_id()) { 614 set_tsk_need_resched(curr); 615 set_preempt_need_resched(); 616 return; 617 } 618 619 if (set_nr_and_not_polling(curr)) 620 smp_send_reschedule(cpu); 621 else 622 trace_sched_wake_idle_without_ipi(cpu); 623 } 624 625 void resched_cpu(int cpu) 626 { 627 struct rq *rq = cpu_rq(cpu); 628 unsigned long flags; 629 630 raw_spin_lock_irqsave(&rq->lock, flags); 631 if (cpu_online(cpu) || cpu == smp_processor_id()) 632 resched_curr(rq); 633 raw_spin_unlock_irqrestore(&rq->lock, flags); 634 } 635 636 #ifdef CONFIG_SMP 637 #ifdef CONFIG_NO_HZ_COMMON 638 /* 639 * In the semi idle case, use the nearest busy CPU for migrating timers 640 * from an idle CPU. This is good for power-savings. 641 * 642 * We don't do similar optimization for completely idle system, as 643 * selecting an idle CPU will add more delays to the timers than intended 644 * (as that CPU's timer base may not be uptodate wrt jiffies etc). 645 */ 646 int get_nohz_timer_target(void) 647 { 648 int i, cpu = smp_processor_id(), default_cpu = -1; 649 struct sched_domain *sd; 650 651 if (housekeeping_cpu(cpu, HK_FLAG_TIMER)) { 652 if (!idle_cpu(cpu)) 653 return cpu; 654 default_cpu = cpu; 655 } 656 657 rcu_read_lock(); 658 for_each_domain(cpu, sd) { 659 for_each_cpu_and(i, sched_domain_span(sd), 660 housekeeping_cpumask(HK_FLAG_TIMER)) { 661 if (cpu == i) 662 continue; 663 664 if (!idle_cpu(i)) { 665 cpu = i; 666 goto unlock; 667 } 668 } 669 } 670 671 if (default_cpu == -1) 672 default_cpu = housekeeping_any_cpu(HK_FLAG_TIMER); 673 cpu = default_cpu; 674 unlock: 675 rcu_read_unlock(); 676 return cpu; 677 } 678 679 /* 680 * When add_timer_on() enqueues a timer into the timer wheel of an 681 * idle CPU then this timer might expire before the next timer event 682 * which is scheduled to wake up that CPU. In case of a completely 683 * idle system the next event might even be infinite time into the 684 * future. wake_up_idle_cpu() ensures that the CPU is woken up and 685 * leaves the inner idle loop so the newly added timer is taken into 686 * account when the CPU goes back to idle and evaluates the timer 687 * wheel for the next timer event. 688 */ 689 static void wake_up_idle_cpu(int cpu) 690 { 691 struct rq *rq = cpu_rq(cpu); 692 693 if (cpu == smp_processor_id()) 694 return; 695 696 if (set_nr_and_not_polling(rq->idle)) 697 smp_send_reschedule(cpu); 698 else 699 trace_sched_wake_idle_without_ipi(cpu); 700 } 701 702 static bool wake_up_full_nohz_cpu(int cpu) 703 { 704 /* 705 * We just need the target to call irq_exit() and re-evaluate 706 * the next tick. The nohz full kick at least implies that. 707 * If needed we can still optimize that later with an 708 * empty IRQ. 709 */ 710 if (cpu_is_offline(cpu)) 711 return true; /* Don't try to wake offline CPUs. */ 712 if (tick_nohz_full_cpu(cpu)) { 713 if (cpu != smp_processor_id() || 714 tick_nohz_tick_stopped()) 715 tick_nohz_full_kick_cpu(cpu); 716 return true; 717 } 718 719 return false; 720 } 721 722 /* 723 * Wake up the specified CPU. If the CPU is going offline, it is the 724 * caller's responsibility to deal with the lost wakeup, for example, 725 * by hooking into the CPU_DEAD notifier like timers and hrtimers do. 726 */ 727 void wake_up_nohz_cpu(int cpu) 728 { 729 if (!wake_up_full_nohz_cpu(cpu)) 730 wake_up_idle_cpu(cpu); 731 } 732 733 static void nohz_csd_func(void *info) 734 { 735 struct rq *rq = info; 736 int cpu = cpu_of(rq); 737 unsigned int flags; 738 739 /* 740 * Release the rq::nohz_csd. 741 */ 742 flags = atomic_fetch_andnot(NOHZ_KICK_MASK, nohz_flags(cpu)); 743 WARN_ON(!(flags & NOHZ_KICK_MASK)); 744 745 rq->idle_balance = idle_cpu(cpu); 746 if (rq->idle_balance && !need_resched()) { 747 rq->nohz_idle_balance = flags; 748 raise_softirq_irqoff(SCHED_SOFTIRQ); 749 } 750 } 751 752 #endif /* CONFIG_NO_HZ_COMMON */ 753 754 #ifdef CONFIG_NO_HZ_FULL 755 bool sched_can_stop_tick(struct rq *rq) 756 { 757 int fifo_nr_running; 758 759 /* Deadline tasks, even if single, need the tick */ 760 if (rq->dl.dl_nr_running) 761 return false; 762 763 /* 764 * If there are more than one RR tasks, we need the tick to affect the 765 * actual RR behaviour. 766 */ 767 if (rq->rt.rr_nr_running) { 768 if (rq->rt.rr_nr_running == 1) 769 return true; 770 else 771 return false; 772 } 773 774 /* 775 * If there's no RR tasks, but FIFO tasks, we can skip the tick, no 776 * forced preemption between FIFO tasks. 777 */ 778 fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running; 779 if (fifo_nr_running) 780 return true; 781 782 /* 783 * If there are no DL,RR/FIFO tasks, there must only be CFS tasks left; 784 * if there's more than one we need the tick for involuntary 785 * preemption. 786 */ 787 if (rq->nr_running > 1) 788 return false; 789 790 return true; 791 } 792 #endif /* CONFIG_NO_HZ_FULL */ 793 #endif /* CONFIG_SMP */ 794 795 #if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \ 796 (defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH))) 797 /* 798 * Iterate task_group tree rooted at *from, calling @down when first entering a 799 * node and @up when leaving it for the final time. 800 * 801 * Caller must hold rcu_lock or sufficient equivalent. 802 */ 803 int walk_tg_tree_from(struct task_group *from, 804 tg_visitor down, tg_visitor up, void *data) 805 { 806 struct task_group *parent, *child; 807 int ret; 808 809 parent = from; 810 811 down: 812 ret = (*down)(parent, data); 813 if (ret) 814 goto out; 815 list_for_each_entry_rcu(child, &parent->children, siblings) { 816 parent = child; 817 goto down; 818 819 up: 820 continue; 821 } 822 ret = (*up)(parent, data); 823 if (ret || parent == from) 824 goto out; 825 826 child = parent; 827 parent = parent->parent; 828 if (parent) 829 goto up; 830 out: 831 return ret; 832 } 833 834 int tg_nop(struct task_group *tg, void *data) 835 { 836 return 0; 837 } 838 #endif 839 840 static void set_load_weight(struct task_struct *p, bool update_load) 841 { 842 int prio = p->static_prio - MAX_RT_PRIO; 843 struct load_weight *load = &p->se.load; 844 845 /* 846 * SCHED_IDLE tasks get minimal weight: 847 */ 848 if (task_has_idle_policy(p)) { 849 load->weight = scale_load(WEIGHT_IDLEPRIO); 850 load->inv_weight = WMULT_IDLEPRIO; 851 return; 852 } 853 854 /* 855 * SCHED_OTHER tasks have to update their load when changing their 856 * weight 857 */ 858 if (update_load && p->sched_class == &fair_sched_class) { 859 reweight_task(p, prio); 860 } else { 861 load->weight = scale_load(sched_prio_to_weight[prio]); 862 load->inv_weight = sched_prio_to_wmult[prio]; 863 } 864 } 865 866 #ifdef CONFIG_UCLAMP_TASK 867 /* 868 * Serializes updates of utilization clamp values 869 * 870 * The (slow-path) user-space triggers utilization clamp value updates which 871 * can require updates on (fast-path) scheduler's data structures used to 872 * support enqueue/dequeue operations. 873 * While the per-CPU rq lock protects fast-path update operations, user-space 874 * requests are serialized using a mutex to reduce the risk of conflicting 875 * updates or API abuses. 876 */ 877 static DEFINE_MUTEX(uclamp_mutex); 878 879 /* Max allowed minimum utilization */ 880 unsigned int sysctl_sched_uclamp_util_min = SCHED_CAPACITY_SCALE; 881 882 /* Max allowed maximum utilization */ 883 unsigned int sysctl_sched_uclamp_util_max = SCHED_CAPACITY_SCALE; 884 885 /* 886 * By default RT tasks run at the maximum performance point/capacity of the 887 * system. Uclamp enforces this by always setting UCLAMP_MIN of RT tasks to 888 * SCHED_CAPACITY_SCALE. 889 * 890 * This knob allows admins to change the default behavior when uclamp is being 891 * used. In battery powered devices, particularly, running at the maximum 892 * capacity and frequency will increase energy consumption and shorten the 893 * battery life. 894 * 895 * This knob only affects RT tasks that their uclamp_se->user_defined == false. 896 * 897 * This knob will not override the system default sched_util_clamp_min defined 898 * above. 899 */ 900 unsigned int sysctl_sched_uclamp_util_min_rt_default = SCHED_CAPACITY_SCALE; 901 902 /* All clamps are required to be less or equal than these values */ 903 static struct uclamp_se uclamp_default[UCLAMP_CNT]; 904 905 /* 906 * This static key is used to reduce the uclamp overhead in the fast path. It 907 * primarily disables the call to uclamp_rq_{inc, dec}() in 908 * enqueue/dequeue_task(). 909 * 910 * This allows users to continue to enable uclamp in their kernel config with 911 * minimum uclamp overhead in the fast path. 912 * 913 * As soon as userspace modifies any of the uclamp knobs, the static key is 914 * enabled, since we have an actual users that make use of uclamp 915 * functionality. 916 * 917 * The knobs that would enable this static key are: 918 * 919 * * A task modifying its uclamp value with sched_setattr(). 920 * * An admin modifying the sysctl_sched_uclamp_{min, max} via procfs. 921 * * An admin modifying the cgroup cpu.uclamp.{min, max} 922 */ 923 DEFINE_STATIC_KEY_FALSE(sched_uclamp_used); 924 925 /* Integer rounded range for each bucket */ 926 #define UCLAMP_BUCKET_DELTA DIV_ROUND_CLOSEST(SCHED_CAPACITY_SCALE, UCLAMP_BUCKETS) 927 928 #define for_each_clamp_id(clamp_id) \ 929 for ((clamp_id) = 0; (clamp_id) < UCLAMP_CNT; (clamp_id)++) 930 931 static inline unsigned int uclamp_bucket_id(unsigned int clamp_value) 932 { 933 return clamp_value / UCLAMP_BUCKET_DELTA; 934 } 935 936 static inline unsigned int uclamp_none(enum uclamp_id clamp_id) 937 { 938 if (clamp_id == UCLAMP_MIN) 939 return 0; 940 return SCHED_CAPACITY_SCALE; 941 } 942 943 static inline void uclamp_se_set(struct uclamp_se *uc_se, 944 unsigned int value, bool user_defined) 945 { 946 uc_se->value = value; 947 uc_se->bucket_id = uclamp_bucket_id(value); 948 uc_se->user_defined = user_defined; 949 } 950 951 static inline unsigned int 952 uclamp_idle_value(struct rq *rq, enum uclamp_id clamp_id, 953 unsigned int clamp_value) 954 { 955 /* 956 * Avoid blocked utilization pushing up the frequency when we go 957 * idle (which drops the max-clamp) by retaining the last known 958 * max-clamp. 959 */ 960 if (clamp_id == UCLAMP_MAX) { 961 rq->uclamp_flags |= UCLAMP_FLAG_IDLE; 962 return clamp_value; 963 } 964 965 return uclamp_none(UCLAMP_MIN); 966 } 967 968 static inline void uclamp_idle_reset(struct rq *rq, enum uclamp_id clamp_id, 969 unsigned int clamp_value) 970 { 971 /* Reset max-clamp retention only on idle exit */ 972 if (!(rq->uclamp_flags & UCLAMP_FLAG_IDLE)) 973 return; 974 975 WRITE_ONCE(rq->uclamp[clamp_id].value, clamp_value); 976 } 977 978 static inline 979 unsigned int uclamp_rq_max_value(struct rq *rq, enum uclamp_id clamp_id, 980 unsigned int clamp_value) 981 { 982 struct uclamp_bucket *bucket = rq->uclamp[clamp_id].bucket; 983 int bucket_id = UCLAMP_BUCKETS - 1; 984 985 /* 986 * Since both min and max clamps are max aggregated, find the 987 * top most bucket with tasks in. 988 */ 989 for ( ; bucket_id >= 0; bucket_id--) { 990 if (!bucket[bucket_id].tasks) 991 continue; 992 return bucket[bucket_id].value; 993 } 994 995 /* No tasks -- default clamp values */ 996 return uclamp_idle_value(rq, clamp_id, clamp_value); 997 } 998 999 static void __uclamp_update_util_min_rt_default(struct task_struct *p) 1000 { 1001 unsigned int default_util_min; 1002 struct uclamp_se *uc_se; 1003 1004 lockdep_assert_held(&p->pi_lock); 1005 1006 uc_se = &p->uclamp_req[UCLAMP_MIN]; 1007 1008 /* Only sync if user didn't override the default */ 1009 if (uc_se->user_defined) 1010 return; 1011 1012 default_util_min = sysctl_sched_uclamp_util_min_rt_default; 1013 uclamp_se_set(uc_se, default_util_min, false); 1014 } 1015 1016 static void uclamp_update_util_min_rt_default(struct task_struct *p) 1017 { 1018 struct rq_flags rf; 1019 struct rq *rq; 1020 1021 if (!rt_task(p)) 1022 return; 1023 1024 /* Protect updates to p->uclamp_* */ 1025 rq = task_rq_lock(p, &rf); 1026 __uclamp_update_util_min_rt_default(p); 1027 task_rq_unlock(rq, p, &rf); 1028 } 1029 1030 static void uclamp_sync_util_min_rt_default(void) 1031 { 1032 struct task_struct *g, *p; 1033 1034 /* 1035 * copy_process() sysctl_uclamp 1036 * uclamp_min_rt = X; 1037 * write_lock(&tasklist_lock) read_lock(&tasklist_lock) 1038 * // link thread smp_mb__after_spinlock() 1039 * write_unlock(&tasklist_lock) read_unlock(&tasklist_lock); 1040 * sched_post_fork() for_each_process_thread() 1041 * __uclamp_sync_rt() __uclamp_sync_rt() 1042 * 1043 * Ensures that either sched_post_fork() will observe the new 1044 * uclamp_min_rt or for_each_process_thread() will observe the new 1045 * task. 1046 */ 1047 read_lock(&tasklist_lock); 1048 smp_mb__after_spinlock(); 1049 read_unlock(&tasklist_lock); 1050 1051 rcu_read_lock(); 1052 for_each_process_thread(g, p) 1053 uclamp_update_util_min_rt_default(p); 1054 rcu_read_unlock(); 1055 } 1056 1057 static inline struct uclamp_se 1058 uclamp_tg_restrict(struct task_struct *p, enum uclamp_id clamp_id) 1059 { 1060 struct uclamp_se uc_req = p->uclamp_req[clamp_id]; 1061 #ifdef CONFIG_UCLAMP_TASK_GROUP 1062 struct uclamp_se uc_max; 1063 1064 /* 1065 * Tasks in autogroups or root task group will be 1066 * restricted by system defaults. 1067 */ 1068 if (task_group_is_autogroup(task_group(p))) 1069 return uc_req; 1070 if (task_group(p) == &root_task_group) 1071 return uc_req; 1072 1073 uc_max = task_group(p)->uclamp[clamp_id]; 1074 if (uc_req.value > uc_max.value || !uc_req.user_defined) 1075 return uc_max; 1076 #endif 1077 1078 return uc_req; 1079 } 1080 1081 /* 1082 * The effective clamp bucket index of a task depends on, by increasing 1083 * priority: 1084 * - the task specific clamp value, when explicitly requested from userspace 1085 * - the task group effective clamp value, for tasks not either in the root 1086 * group or in an autogroup 1087 * - the system default clamp value, defined by the sysadmin 1088 */ 1089 static inline struct uclamp_se 1090 uclamp_eff_get(struct task_struct *p, enum uclamp_id clamp_id) 1091 { 1092 struct uclamp_se uc_req = uclamp_tg_restrict(p, clamp_id); 1093 struct uclamp_se uc_max = uclamp_default[clamp_id]; 1094 1095 /* System default restrictions always apply */ 1096 if (unlikely(uc_req.value > uc_max.value)) 1097 return uc_max; 1098 1099 return uc_req; 1100 } 1101 1102 unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id) 1103 { 1104 struct uclamp_se uc_eff; 1105 1106 /* Task currently refcounted: use back-annotated (effective) value */ 1107 if (p->uclamp[clamp_id].active) 1108 return (unsigned long)p->uclamp[clamp_id].value; 1109 1110 uc_eff = uclamp_eff_get(p, clamp_id); 1111 1112 return (unsigned long)uc_eff.value; 1113 } 1114 1115 /* 1116 * When a task is enqueued on a rq, the clamp bucket currently defined by the 1117 * task's uclamp::bucket_id is refcounted on that rq. This also immediately 1118 * updates the rq's clamp value if required. 1119 * 1120 * Tasks can have a task-specific value requested from user-space, track 1121 * within each bucket the maximum value for tasks refcounted in it. 1122 * This "local max aggregation" allows to track the exact "requested" value 1123 * for each bucket when all its RUNNABLE tasks require the same clamp. 1124 */ 1125 static inline void uclamp_rq_inc_id(struct rq *rq, struct task_struct *p, 1126 enum uclamp_id clamp_id) 1127 { 1128 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id]; 1129 struct uclamp_se *uc_se = &p->uclamp[clamp_id]; 1130 struct uclamp_bucket *bucket; 1131 1132 lockdep_assert_held(&rq->lock); 1133 1134 /* Update task effective clamp */ 1135 p->uclamp[clamp_id] = uclamp_eff_get(p, clamp_id); 1136 1137 bucket = &uc_rq->bucket[uc_se->bucket_id]; 1138 bucket->tasks++; 1139 uc_se->active = true; 1140 1141 uclamp_idle_reset(rq, clamp_id, uc_se->value); 1142 1143 /* 1144 * Local max aggregation: rq buckets always track the max 1145 * "requested" clamp value of its RUNNABLE tasks. 1146 */ 1147 if (bucket->tasks == 1 || uc_se->value > bucket->value) 1148 bucket->value = uc_se->value; 1149 1150 if (uc_se->value > READ_ONCE(uc_rq->value)) 1151 WRITE_ONCE(uc_rq->value, uc_se->value); 1152 } 1153 1154 /* 1155 * When a task is dequeued from a rq, the clamp bucket refcounted by the task 1156 * is released. If this is the last task reference counting the rq's max 1157 * active clamp value, then the rq's clamp value is updated. 1158 * 1159 * Both refcounted tasks and rq's cached clamp values are expected to be 1160 * always valid. If it's detected they are not, as defensive programming, 1161 * enforce the expected state and warn. 1162 */ 1163 static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p, 1164 enum uclamp_id clamp_id) 1165 { 1166 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id]; 1167 struct uclamp_se *uc_se = &p->uclamp[clamp_id]; 1168 struct uclamp_bucket *bucket; 1169 unsigned int bkt_clamp; 1170 unsigned int rq_clamp; 1171 1172 lockdep_assert_held(&rq->lock); 1173 1174 /* 1175 * If sched_uclamp_used was enabled after task @p was enqueued, 1176 * we could end up with unbalanced call to uclamp_rq_dec_id(). 1177 * 1178 * In this case the uc_se->active flag should be false since no uclamp 1179 * accounting was performed at enqueue time and we can just return 1180 * here. 1181 * 1182 * Need to be careful of the following enqueue/dequeue ordering 1183 * problem too 1184 * 1185 * enqueue(taskA) 1186 * // sched_uclamp_used gets enabled 1187 * enqueue(taskB) 1188 * dequeue(taskA) 1189 * // Must not decrement bucket->tasks here 1190 * dequeue(taskB) 1191 * 1192 * where we could end up with stale data in uc_se and 1193 * bucket[uc_se->bucket_id]. 1194 * 1195 * The following check here eliminates the possibility of such race. 1196 */ 1197 if (unlikely(!uc_se->active)) 1198 return; 1199 1200 bucket = &uc_rq->bucket[uc_se->bucket_id]; 1201 1202 SCHED_WARN_ON(!bucket->tasks); 1203 if (likely(bucket->tasks)) 1204 bucket->tasks--; 1205 1206 uc_se->active = false; 1207 1208 /* 1209 * Keep "local max aggregation" simple and accept to (possibly) 1210 * overboost some RUNNABLE tasks in the same bucket. 1211 * The rq clamp bucket value is reset to its base value whenever 1212 * there are no more RUNNABLE tasks refcounting it. 1213 */ 1214 if (likely(bucket->tasks)) 1215 return; 1216 1217 rq_clamp = READ_ONCE(uc_rq->value); 1218 /* 1219 * Defensive programming: this should never happen. If it happens, 1220 * e.g. due to future modification, warn and fixup the expected value. 1221 */ 1222 SCHED_WARN_ON(bucket->value > rq_clamp); 1223 if (bucket->value >= rq_clamp) { 1224 bkt_clamp = uclamp_rq_max_value(rq, clamp_id, uc_se->value); 1225 WRITE_ONCE(uc_rq->value, bkt_clamp); 1226 } 1227 } 1228 1229 static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) 1230 { 1231 enum uclamp_id clamp_id; 1232 1233 /* 1234 * Avoid any overhead until uclamp is actually used by the userspace. 1235 * 1236 * The condition is constructed such that a NOP is generated when 1237 * sched_uclamp_used is disabled. 1238 */ 1239 if (!static_branch_unlikely(&sched_uclamp_used)) 1240 return; 1241 1242 if (unlikely(!p->sched_class->uclamp_enabled)) 1243 return; 1244 1245 for_each_clamp_id(clamp_id) 1246 uclamp_rq_inc_id(rq, p, clamp_id); 1247 1248 /* Reset clamp idle holding when there is one RUNNABLE task */ 1249 if (rq->uclamp_flags & UCLAMP_FLAG_IDLE) 1250 rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE; 1251 } 1252 1253 static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) 1254 { 1255 enum uclamp_id clamp_id; 1256 1257 /* 1258 * Avoid any overhead until uclamp is actually used by the userspace. 1259 * 1260 * The condition is constructed such that a NOP is generated when 1261 * sched_uclamp_used is disabled. 1262 */ 1263 if (!static_branch_unlikely(&sched_uclamp_used)) 1264 return; 1265 1266 if (unlikely(!p->sched_class->uclamp_enabled)) 1267 return; 1268 1269 for_each_clamp_id(clamp_id) 1270 uclamp_rq_dec_id(rq, p, clamp_id); 1271 } 1272 1273 static inline void 1274 uclamp_update_active(struct task_struct *p, enum uclamp_id clamp_id) 1275 { 1276 struct rq_flags rf; 1277 struct rq *rq; 1278 1279 /* 1280 * Lock the task and the rq where the task is (or was) queued. 1281 * 1282 * We might lock the (previous) rq of a !RUNNABLE task, but that's the 1283 * price to pay to safely serialize util_{min,max} updates with 1284 * enqueues, dequeues and migration operations. 1285 * This is the same locking schema used by __set_cpus_allowed_ptr(). 1286 */ 1287 rq = task_rq_lock(p, &rf); 1288 1289 /* 1290 * Setting the clamp bucket is serialized by task_rq_lock(). 1291 * If the task is not yet RUNNABLE and its task_struct is not 1292 * affecting a valid clamp bucket, the next time it's enqueued, 1293 * it will already see the updated clamp bucket value. 1294 */ 1295 if (p->uclamp[clamp_id].active) { 1296 uclamp_rq_dec_id(rq, p, clamp_id); 1297 uclamp_rq_inc_id(rq, p, clamp_id); 1298 } 1299 1300 task_rq_unlock(rq, p, &rf); 1301 } 1302 1303 #ifdef CONFIG_UCLAMP_TASK_GROUP 1304 static inline void 1305 uclamp_update_active_tasks(struct cgroup_subsys_state *css, 1306 unsigned int clamps) 1307 { 1308 enum uclamp_id clamp_id; 1309 struct css_task_iter it; 1310 struct task_struct *p; 1311 1312 css_task_iter_start(css, 0, &it); 1313 while ((p = css_task_iter_next(&it))) { 1314 for_each_clamp_id(clamp_id) { 1315 if ((0x1 << clamp_id) & clamps) 1316 uclamp_update_active(p, clamp_id); 1317 } 1318 } 1319 css_task_iter_end(&it); 1320 } 1321 1322 static void cpu_util_update_eff(struct cgroup_subsys_state *css); 1323 static void uclamp_update_root_tg(void) 1324 { 1325 struct task_group *tg = &root_task_group; 1326 1327 uclamp_se_set(&tg->uclamp_req[UCLAMP_MIN], 1328 sysctl_sched_uclamp_util_min, false); 1329 uclamp_se_set(&tg->uclamp_req[UCLAMP_MAX], 1330 sysctl_sched_uclamp_util_max, false); 1331 1332 rcu_read_lock(); 1333 cpu_util_update_eff(&root_task_group.css); 1334 rcu_read_unlock(); 1335 } 1336 #else 1337 static void uclamp_update_root_tg(void) { } 1338 #endif 1339 1340 int sysctl_sched_uclamp_handler(struct ctl_table *table, int write, 1341 void *buffer, size_t *lenp, loff_t *ppos) 1342 { 1343 bool update_root_tg = false; 1344 int old_min, old_max, old_min_rt; 1345 int result; 1346 1347 mutex_lock(&uclamp_mutex); 1348 old_min = sysctl_sched_uclamp_util_min; 1349 old_max = sysctl_sched_uclamp_util_max; 1350 old_min_rt = sysctl_sched_uclamp_util_min_rt_default; 1351 1352 result = proc_dointvec(table, write, buffer, lenp, ppos); 1353 if (result) 1354 goto undo; 1355 if (!write) 1356 goto done; 1357 1358 if (sysctl_sched_uclamp_util_min > sysctl_sched_uclamp_util_max || 1359 sysctl_sched_uclamp_util_max > SCHED_CAPACITY_SCALE || 1360 sysctl_sched_uclamp_util_min_rt_default > SCHED_CAPACITY_SCALE) { 1361 1362 result = -EINVAL; 1363 goto undo; 1364 } 1365 1366 if (old_min != sysctl_sched_uclamp_util_min) { 1367 uclamp_se_set(&uclamp_default[UCLAMP_MIN], 1368 sysctl_sched_uclamp_util_min, false); 1369 update_root_tg = true; 1370 } 1371 if (old_max != sysctl_sched_uclamp_util_max) { 1372 uclamp_se_set(&uclamp_default[UCLAMP_MAX], 1373 sysctl_sched_uclamp_util_max, false); 1374 update_root_tg = true; 1375 } 1376 1377 if (update_root_tg) { 1378 static_branch_enable(&sched_uclamp_used); 1379 uclamp_update_root_tg(); 1380 } 1381 1382 if (old_min_rt != sysctl_sched_uclamp_util_min_rt_default) { 1383 static_branch_enable(&sched_uclamp_used); 1384 uclamp_sync_util_min_rt_default(); 1385 } 1386 1387 /* 1388 * We update all RUNNABLE tasks only when task groups are in use. 1389 * Otherwise, keep it simple and do just a lazy update at each next 1390 * task enqueue time. 1391 */ 1392 1393 goto done; 1394 1395 undo: 1396 sysctl_sched_uclamp_util_min = old_min; 1397 sysctl_sched_uclamp_util_max = old_max; 1398 sysctl_sched_uclamp_util_min_rt_default = old_min_rt; 1399 done: 1400 mutex_unlock(&uclamp_mutex); 1401 1402 return result; 1403 } 1404 1405 static int uclamp_validate(struct task_struct *p, 1406 const struct sched_attr *attr) 1407 { 1408 int util_min = p->uclamp_req[UCLAMP_MIN].value; 1409 int util_max = p->uclamp_req[UCLAMP_MAX].value; 1410 1411 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN) { 1412 util_min = attr->sched_util_min; 1413 1414 if (util_min + 1 > SCHED_CAPACITY_SCALE + 1) 1415 return -EINVAL; 1416 } 1417 1418 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX) { 1419 util_max = attr->sched_util_max; 1420 1421 if (util_max + 1 > SCHED_CAPACITY_SCALE + 1) 1422 return -EINVAL; 1423 } 1424 1425 if (util_min != -1 && util_max != -1 && util_min > util_max) 1426 return -EINVAL; 1427 1428 /* 1429 * We have valid uclamp attributes; make sure uclamp is enabled. 1430 * 1431 * We need to do that here, because enabling static branches is a 1432 * blocking operation which obviously cannot be done while holding 1433 * scheduler locks. 1434 */ 1435 static_branch_enable(&sched_uclamp_used); 1436 1437 return 0; 1438 } 1439 1440 static bool uclamp_reset(const struct sched_attr *attr, 1441 enum uclamp_id clamp_id, 1442 struct uclamp_se *uc_se) 1443 { 1444 /* Reset on sched class change for a non user-defined clamp value. */ 1445 if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)) && 1446 !uc_se->user_defined) 1447 return true; 1448 1449 /* Reset on sched_util_{min,max} == -1. */ 1450 if (clamp_id == UCLAMP_MIN && 1451 attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN && 1452 attr->sched_util_min == -1) { 1453 return true; 1454 } 1455 1456 if (clamp_id == UCLAMP_MAX && 1457 attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX && 1458 attr->sched_util_max == -1) { 1459 return true; 1460 } 1461 1462 return false; 1463 } 1464 1465 static void __setscheduler_uclamp(struct task_struct *p, 1466 const struct sched_attr *attr) 1467 { 1468 enum uclamp_id clamp_id; 1469 1470 for_each_clamp_id(clamp_id) { 1471 struct uclamp_se *uc_se = &p->uclamp_req[clamp_id]; 1472 unsigned int value; 1473 1474 if (!uclamp_reset(attr, clamp_id, uc_se)) 1475 continue; 1476 1477 /* 1478 * RT by default have a 100% boost value that could be modified 1479 * at runtime. 1480 */ 1481 if (unlikely(rt_task(p) && clamp_id == UCLAMP_MIN)) 1482 value = sysctl_sched_uclamp_util_min_rt_default; 1483 else 1484 value = uclamp_none(clamp_id); 1485 1486 uclamp_se_set(uc_se, value, false); 1487 1488 } 1489 1490 if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP))) 1491 return; 1492 1493 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN && 1494 attr->sched_util_min != -1) { 1495 uclamp_se_set(&p->uclamp_req[UCLAMP_MIN], 1496 attr->sched_util_min, true); 1497 } 1498 1499 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX && 1500 attr->sched_util_max != -1) { 1501 uclamp_se_set(&p->uclamp_req[UCLAMP_MAX], 1502 attr->sched_util_max, true); 1503 } 1504 } 1505 1506 static void uclamp_fork(struct task_struct *p) 1507 { 1508 enum uclamp_id clamp_id; 1509 1510 /* 1511 * We don't need to hold task_rq_lock() when updating p->uclamp_* here 1512 * as the task is still at its early fork stages. 1513 */ 1514 for_each_clamp_id(clamp_id) 1515 p->uclamp[clamp_id].active = false; 1516 1517 if (likely(!p->sched_reset_on_fork)) 1518 return; 1519 1520 for_each_clamp_id(clamp_id) { 1521 uclamp_se_set(&p->uclamp_req[clamp_id], 1522 uclamp_none(clamp_id), false); 1523 } 1524 } 1525 1526 static void uclamp_post_fork(struct task_struct *p) 1527 { 1528 uclamp_update_util_min_rt_default(p); 1529 } 1530 1531 static void __init init_uclamp_rq(struct rq *rq) 1532 { 1533 enum uclamp_id clamp_id; 1534 struct uclamp_rq *uc_rq = rq->uclamp; 1535 1536 for_each_clamp_id(clamp_id) { 1537 uc_rq[clamp_id] = (struct uclamp_rq) { 1538 .value = uclamp_none(clamp_id) 1539 }; 1540 } 1541 1542 rq->uclamp_flags = 0; 1543 } 1544 1545 static void __init init_uclamp(void) 1546 { 1547 struct uclamp_se uc_max = {}; 1548 enum uclamp_id clamp_id; 1549 int cpu; 1550 1551 for_each_possible_cpu(cpu) 1552 init_uclamp_rq(cpu_rq(cpu)); 1553 1554 for_each_clamp_id(clamp_id) { 1555 uclamp_se_set(&init_task.uclamp_req[clamp_id], 1556 uclamp_none(clamp_id), false); 1557 } 1558 1559 /* System defaults allow max clamp values for both indexes */ 1560 uclamp_se_set(&uc_max, uclamp_none(UCLAMP_MAX), false); 1561 for_each_clamp_id(clamp_id) { 1562 uclamp_default[clamp_id] = uc_max; 1563 #ifdef CONFIG_UCLAMP_TASK_GROUP 1564 root_task_group.uclamp_req[clamp_id] = uc_max; 1565 root_task_group.uclamp[clamp_id] = uc_max; 1566 #endif 1567 } 1568 } 1569 1570 #else /* CONFIG_UCLAMP_TASK */ 1571 static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) { } 1572 static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) { } 1573 static inline int uclamp_validate(struct task_struct *p, 1574 const struct sched_attr *attr) 1575 { 1576 return -EOPNOTSUPP; 1577 } 1578 static void __setscheduler_uclamp(struct task_struct *p, 1579 const struct sched_attr *attr) { } 1580 static inline void uclamp_fork(struct task_struct *p) { } 1581 static inline void uclamp_post_fork(struct task_struct *p) { } 1582 static inline void init_uclamp(void) { } 1583 #endif /* CONFIG_UCLAMP_TASK */ 1584 1585 static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags) 1586 { 1587 if (!(flags & ENQUEUE_NOCLOCK)) 1588 update_rq_clock(rq); 1589 1590 if (!(flags & ENQUEUE_RESTORE)) { 1591 sched_info_queued(rq, p); 1592 psi_enqueue(p, flags & ENQUEUE_WAKEUP); 1593 } 1594 1595 uclamp_rq_inc(rq, p); 1596 p->sched_class->enqueue_task(rq, p, flags); 1597 } 1598 1599 static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags) 1600 { 1601 if (!(flags & DEQUEUE_NOCLOCK)) 1602 update_rq_clock(rq); 1603 1604 if (!(flags & DEQUEUE_SAVE)) { 1605 sched_info_dequeued(rq, p); 1606 psi_dequeue(p, flags & DEQUEUE_SLEEP); 1607 } 1608 1609 uclamp_rq_dec(rq, p); 1610 p->sched_class->dequeue_task(rq, p, flags); 1611 } 1612 1613 void activate_task(struct rq *rq, struct task_struct *p, int flags) 1614 { 1615 enqueue_task(rq, p, flags); 1616 1617 p->on_rq = TASK_ON_RQ_QUEUED; 1618 } 1619 1620 void deactivate_task(struct rq *rq, struct task_struct *p, int flags) 1621 { 1622 p->on_rq = (flags & DEQUEUE_SLEEP) ? 0 : TASK_ON_RQ_MIGRATING; 1623 1624 dequeue_task(rq, p, flags); 1625 } 1626 1627 /* 1628 * __normal_prio - return the priority that is based on the static prio 1629 */ 1630 static inline int __normal_prio(struct task_struct *p) 1631 { 1632 return p->static_prio; 1633 } 1634 1635 /* 1636 * Calculate the expected normal priority: i.e. priority 1637 * without taking RT-inheritance into account. Might be 1638 * boosted by interactivity modifiers. Changes upon fork, 1639 * setprio syscalls, and whenever the interactivity 1640 * estimator recalculates. 1641 */ 1642 static inline int normal_prio(struct task_struct *p) 1643 { 1644 int prio; 1645 1646 if (task_has_dl_policy(p)) 1647 prio = MAX_DL_PRIO-1; 1648 else if (task_has_rt_policy(p)) 1649 prio = MAX_RT_PRIO-1 - p->rt_priority; 1650 else 1651 prio = __normal_prio(p); 1652 return prio; 1653 } 1654 1655 /* 1656 * Calculate the current priority, i.e. the priority 1657 * taken into account by the scheduler. This value might 1658 * be boosted by RT tasks, or might be boosted by 1659 * interactivity modifiers. Will be RT if the task got 1660 * RT-boosted. If not then it returns p->normal_prio. 1661 */ 1662 static int effective_prio(struct task_struct *p) 1663 { 1664 p->normal_prio = normal_prio(p); 1665 /* 1666 * If we are RT tasks or we were boosted to RT priority, 1667 * keep the priority unchanged. Otherwise, update priority 1668 * to the normal priority: 1669 */ 1670 if (!rt_prio(p->prio)) 1671 return p->normal_prio; 1672 return p->prio; 1673 } 1674 1675 /** 1676 * task_curr - is this task currently executing on a CPU? 1677 * @p: the task in question. 1678 * 1679 * Return: 1 if the task is currently executing. 0 otherwise. 1680 */ 1681 inline int task_curr(const struct task_struct *p) 1682 { 1683 return cpu_curr(task_cpu(p)) == p; 1684 } 1685 1686 /* 1687 * switched_from, switched_to and prio_changed must _NOT_ drop rq->lock, 1688 * use the balance_callback list if you want balancing. 1689 * 1690 * this means any call to check_class_changed() must be followed by a call to 1691 * balance_callback(). 1692 */ 1693 static inline void check_class_changed(struct rq *rq, struct task_struct *p, 1694 const struct sched_class *prev_class, 1695 int oldprio) 1696 { 1697 if (prev_class != p->sched_class) { 1698 if (prev_class->switched_from) 1699 prev_class->switched_from(rq, p); 1700 1701 p->sched_class->switched_to(rq, p); 1702 } else if (oldprio != p->prio || dl_task(p)) 1703 p->sched_class->prio_changed(rq, p, oldprio); 1704 } 1705 1706 void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) 1707 { 1708 if (p->sched_class == rq->curr->sched_class) 1709 rq->curr->sched_class->check_preempt_curr(rq, p, flags); 1710 else if (p->sched_class > rq->curr->sched_class) 1711 resched_curr(rq); 1712 1713 /* 1714 * A queue event has occurred, and we're going to schedule. In 1715 * this case, we can save a useless back to back clock update. 1716 */ 1717 if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr)) 1718 rq_clock_skip_update(rq); 1719 } 1720 1721 #ifdef CONFIG_SMP 1722 1723 static void 1724 __do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask, u32 flags); 1725 1726 static int __set_cpus_allowed_ptr(struct task_struct *p, 1727 const struct cpumask *new_mask, 1728 u32 flags); 1729 1730 static void migrate_disable_switch(struct rq *rq, struct task_struct *p) 1731 { 1732 if (likely(!p->migration_disabled)) 1733 return; 1734 1735 if (p->cpus_ptr != &p->cpus_mask) 1736 return; 1737 1738 /* 1739 * Violates locking rules! see comment in __do_set_cpus_allowed(). 1740 */ 1741 __do_set_cpus_allowed(p, cpumask_of(rq->cpu), SCA_MIGRATE_DISABLE); 1742 } 1743 1744 void migrate_disable(void) 1745 { 1746 struct task_struct *p = current; 1747 1748 if (p->migration_disabled) { 1749 p->migration_disabled++; 1750 return; 1751 } 1752 1753 preempt_disable(); 1754 this_rq()->nr_pinned++; 1755 p->migration_disabled = 1; 1756 preempt_enable(); 1757 } 1758 EXPORT_SYMBOL_GPL(migrate_disable); 1759 1760 void migrate_enable(void) 1761 { 1762 struct task_struct *p = current; 1763 1764 if (p->migration_disabled > 1) { 1765 p->migration_disabled--; 1766 return; 1767 } 1768 1769 /* 1770 * Ensure stop_task runs either before or after this, and that 1771 * __set_cpus_allowed_ptr(SCA_MIGRATE_ENABLE) doesn't schedule(). 1772 */ 1773 preempt_disable(); 1774 if (p->cpus_ptr != &p->cpus_mask) 1775 __set_cpus_allowed_ptr(p, &p->cpus_mask, SCA_MIGRATE_ENABLE); 1776 /* 1777 * Mustn't clear migration_disabled() until cpus_ptr points back at the 1778 * regular cpus_mask, otherwise things that race (eg. 1779 * select_fallback_rq) get confused. 1780 */ 1781 barrier(); 1782 p->migration_disabled = 0; 1783 this_rq()->nr_pinned--; 1784 preempt_enable(); 1785 } 1786 EXPORT_SYMBOL_GPL(migrate_enable); 1787 1788 static inline bool rq_has_pinned_tasks(struct rq *rq) 1789 { 1790 return rq->nr_pinned; 1791 } 1792 1793 /* 1794 * Per-CPU kthreads are allowed to run on !active && online CPUs, see 1795 * __set_cpus_allowed_ptr() and select_fallback_rq(). 1796 */ 1797 static inline bool is_cpu_allowed(struct task_struct *p, int cpu) 1798 { 1799 /* When not in the task's cpumask, no point in looking further. */ 1800 if (!cpumask_test_cpu(cpu, p->cpus_ptr)) 1801 return false; 1802 1803 /* migrate_disabled() must be allowed to finish. */ 1804 if (is_migration_disabled(p)) 1805 return cpu_online(cpu); 1806 1807 /* Non kernel threads are not allowed during either online or offline. */ 1808 if (!(p->flags & PF_KTHREAD)) 1809 return cpu_active(cpu); 1810 1811 /* KTHREAD_IS_PER_CPU is always allowed. */ 1812 if (kthread_is_per_cpu(p)) 1813 return cpu_online(cpu); 1814 1815 /* Regular kernel threads don't get to stay during offline. */ 1816 if (cpu_rq(cpu)->balance_push) 1817 return false; 1818 1819 /* But are allowed during online. */ 1820 return cpu_online(cpu); 1821 } 1822 1823 /* 1824 * This is how migration works: 1825 * 1826 * 1) we invoke migration_cpu_stop() on the target CPU using 1827 * stop_one_cpu(). 1828 * 2) stopper starts to run (implicitly forcing the migrated thread 1829 * off the CPU) 1830 * 3) it checks whether the migrated task is still in the wrong runqueue. 1831 * 4) if it's in the wrong runqueue then the migration thread removes 1832 * it and puts it into the right queue. 1833 * 5) stopper completes and stop_one_cpu() returns and the migration 1834 * is done. 1835 */ 1836 1837 /* 1838 * move_queued_task - move a queued task to new rq. 1839 * 1840 * Returns (locked) new rq. Old rq's lock is released. 1841 */ 1842 static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf, 1843 struct task_struct *p, int new_cpu) 1844 { 1845 lockdep_assert_held(&rq->lock); 1846 1847 deactivate_task(rq, p, DEQUEUE_NOCLOCK); 1848 set_task_cpu(p, new_cpu); 1849 rq_unlock(rq, rf); 1850 1851 rq = cpu_rq(new_cpu); 1852 1853 rq_lock(rq, rf); 1854 BUG_ON(task_cpu(p) != new_cpu); 1855 activate_task(rq, p, 0); 1856 check_preempt_curr(rq, p, 0); 1857 1858 return rq; 1859 } 1860 1861 struct migration_arg { 1862 struct task_struct *task; 1863 int dest_cpu; 1864 struct set_affinity_pending *pending; 1865 }; 1866 1867 struct set_affinity_pending { 1868 refcount_t refs; 1869 struct completion done; 1870 struct cpu_stop_work stop_work; 1871 struct migration_arg arg; 1872 }; 1873 1874 /* 1875 * Move (not current) task off this CPU, onto the destination CPU. We're doing 1876 * this because either it can't run here any more (set_cpus_allowed() 1877 * away from this CPU, or CPU going down), or because we're 1878 * attempting to rebalance this task on exec (sched_exec). 1879 * 1880 * So we race with normal scheduler movements, but that's OK, as long 1881 * as the task is no longer on this CPU. 1882 */ 1883 static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf, 1884 struct task_struct *p, int dest_cpu) 1885 { 1886 /* Affinity changed (again). */ 1887 if (!is_cpu_allowed(p, dest_cpu)) 1888 return rq; 1889 1890 update_rq_clock(rq); 1891 rq = move_queued_task(rq, rf, p, dest_cpu); 1892 1893 return rq; 1894 } 1895 1896 /* 1897 * migration_cpu_stop - this will be executed by a highprio stopper thread 1898 * and performs thread migration by bumping thread off CPU then 1899 * 'pushing' onto another runqueue. 1900 */ 1901 static int migration_cpu_stop(void *data) 1902 { 1903 struct set_affinity_pending *pending; 1904 struct migration_arg *arg = data; 1905 struct task_struct *p = arg->task; 1906 int dest_cpu = arg->dest_cpu; 1907 struct rq *rq = this_rq(); 1908 bool complete = false; 1909 struct rq_flags rf; 1910 1911 /* 1912 * The original target CPU might have gone down and we might 1913 * be on another CPU but it doesn't matter. 1914 */ 1915 local_irq_save(rf.flags); 1916 /* 1917 * We need to explicitly wake pending tasks before running 1918 * __migrate_task() such that we will not miss enforcing cpus_ptr 1919 * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test. 1920 */ 1921 flush_smp_call_function_from_idle(); 1922 1923 raw_spin_lock(&p->pi_lock); 1924 rq_lock(rq, &rf); 1925 1926 pending = p->migration_pending; 1927 /* 1928 * If task_rq(p) != rq, it cannot be migrated here, because we're 1929 * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because 1930 * we're holding p->pi_lock. 1931 */ 1932 if (task_rq(p) == rq) { 1933 if (is_migration_disabled(p)) 1934 goto out; 1935 1936 if (pending) { 1937 p->migration_pending = NULL; 1938 complete = true; 1939 } 1940 1941 /* migrate_enable() -- we must not race against SCA */ 1942 if (dest_cpu < 0) { 1943 /* 1944 * When this was migrate_enable() but we no longer 1945 * have a @pending, a concurrent SCA 'fixed' things 1946 * and we should be valid again. Nothing to do. 1947 */ 1948 if (!pending) { 1949 WARN_ON_ONCE(!cpumask_test_cpu(task_cpu(p), &p->cpus_mask)); 1950 goto out; 1951 } 1952 1953 dest_cpu = cpumask_any_distribute(&p->cpus_mask); 1954 } 1955 1956 if (task_on_rq_queued(p)) 1957 rq = __migrate_task(rq, &rf, p, dest_cpu); 1958 else 1959 p->wake_cpu = dest_cpu; 1960 1961 } else if (dest_cpu < 0 || pending) { 1962 /* 1963 * This happens when we get migrated between migrate_enable()'s 1964 * preempt_enable() and scheduling the stopper task. At that 1965 * point we're a regular task again and not current anymore. 1966 * 1967 * A !PREEMPT kernel has a giant hole here, which makes it far 1968 * more likely. 1969 */ 1970 1971 /* 1972 * The task moved before the stopper got to run. We're holding 1973 * ->pi_lock, so the allowed mask is stable - if it got 1974 * somewhere allowed, we're done. 1975 */ 1976 if (pending && cpumask_test_cpu(task_cpu(p), p->cpus_ptr)) { 1977 p->migration_pending = NULL; 1978 complete = true; 1979 goto out; 1980 } 1981 1982 /* 1983 * When this was migrate_enable() but we no longer have an 1984 * @pending, a concurrent SCA 'fixed' things and we should be 1985 * valid again. Nothing to do. 1986 */ 1987 if (!pending) { 1988 WARN_ON_ONCE(!cpumask_test_cpu(task_cpu(p), &p->cpus_mask)); 1989 goto out; 1990 } 1991 1992 /* 1993 * When migrate_enable() hits a rq mis-match we can't reliably 1994 * determine is_migration_disabled() and so have to chase after 1995 * it. 1996 */ 1997 task_rq_unlock(rq, p, &rf); 1998 stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop, 1999 &pending->arg, &pending->stop_work); 2000 return 0; 2001 } 2002 out: 2003 task_rq_unlock(rq, p, &rf); 2004 2005 if (complete) 2006 complete_all(&pending->done); 2007 2008 /* For pending->{arg,stop_work} */ 2009 pending = arg->pending; 2010 if (pending && refcount_dec_and_test(&pending->refs)) 2011 wake_up_var(&pending->refs); 2012 2013 return 0; 2014 } 2015 2016 int push_cpu_stop(void *arg) 2017 { 2018 struct rq *lowest_rq = NULL, *rq = this_rq(); 2019 struct task_struct *p = arg; 2020 2021 raw_spin_lock_irq(&p->pi_lock); 2022 raw_spin_lock(&rq->lock); 2023 2024 if (task_rq(p) != rq) 2025 goto out_unlock; 2026 2027 if (is_migration_disabled(p)) { 2028 p->migration_flags |= MDF_PUSH; 2029 goto out_unlock; 2030 } 2031 2032 p->migration_flags &= ~MDF_PUSH; 2033 2034 if (p->sched_class->find_lock_rq) 2035 lowest_rq = p->sched_class->find_lock_rq(p, rq); 2036 2037 if (!lowest_rq) 2038 goto out_unlock; 2039 2040 // XXX validate p is still the highest prio task 2041 if (task_rq(p) == rq) { 2042 deactivate_task(rq, p, 0); 2043 set_task_cpu(p, lowest_rq->cpu); 2044 activate_task(lowest_rq, p, 0); 2045 resched_curr(lowest_rq); 2046 } 2047 2048 double_unlock_balance(rq, lowest_rq); 2049 2050 out_unlock: 2051 rq->push_busy = false; 2052 raw_spin_unlock(&rq->lock); 2053 raw_spin_unlock_irq(&p->pi_lock); 2054 2055 put_task_struct(p); 2056 return 0; 2057 } 2058 2059 /* 2060 * sched_class::set_cpus_allowed must do the below, but is not required to 2061 * actually call this function. 2062 */ 2063 void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask, u32 flags) 2064 { 2065 if (flags & (SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) { 2066 p->cpus_ptr = new_mask; 2067 return; 2068 } 2069 2070 cpumask_copy(&p->cpus_mask, new_mask); 2071 p->nr_cpus_allowed = cpumask_weight(new_mask); 2072 } 2073 2074 static void 2075 __do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask, u32 flags) 2076 { 2077 struct rq *rq = task_rq(p); 2078 bool queued, running; 2079 2080 /* 2081 * This here violates the locking rules for affinity, since we're only 2082 * supposed to change these variables while holding both rq->lock and 2083 * p->pi_lock. 2084 * 2085 * HOWEVER, it magically works, because ttwu() is the only code that 2086 * accesses these variables under p->pi_lock and only does so after 2087 * smp_cond_load_acquire(&p->on_cpu, !VAL), and we're in __schedule() 2088 * before finish_task(). 2089 * 2090 * XXX do further audits, this smells like something putrid. 2091 */ 2092 if (flags & SCA_MIGRATE_DISABLE) 2093 SCHED_WARN_ON(!p->on_cpu); 2094 else 2095 lockdep_assert_held(&p->pi_lock); 2096 2097 queued = task_on_rq_queued(p); 2098 running = task_current(rq, p); 2099 2100 if (queued) { 2101 /* 2102 * Because __kthread_bind() calls this on blocked tasks without 2103 * holding rq->lock. 2104 */ 2105 lockdep_assert_held(&rq->lock); 2106 dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK); 2107 } 2108 if (running) 2109 put_prev_task(rq, p); 2110 2111 p->sched_class->set_cpus_allowed(p, new_mask, flags); 2112 2113 if (queued) 2114 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); 2115 if (running) 2116 set_next_task(rq, p); 2117 } 2118 2119 void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) 2120 { 2121 __do_set_cpus_allowed(p, new_mask, 0); 2122 } 2123 2124 /* 2125 * This function is wildly self concurrent; here be dragons. 2126 * 2127 * 2128 * When given a valid mask, __set_cpus_allowed_ptr() must block until the 2129 * designated task is enqueued on an allowed CPU. If that task is currently 2130 * running, we have to kick it out using the CPU stopper. 2131 * 2132 * Migrate-Disable comes along and tramples all over our nice sandcastle. 2133 * Consider: 2134 * 2135 * Initial conditions: P0->cpus_mask = [0, 1] 2136 * 2137 * P0@CPU0 P1 2138 * 2139 * migrate_disable(); 2140 * <preempted> 2141 * set_cpus_allowed_ptr(P0, [1]); 2142 * 2143 * P1 *cannot* return from this set_cpus_allowed_ptr() call until P0 executes 2144 * its outermost migrate_enable() (i.e. it exits its Migrate-Disable region). 2145 * This means we need the following scheme: 2146 * 2147 * P0@CPU0 P1 2148 * 2149 * migrate_disable(); 2150 * <preempted> 2151 * set_cpus_allowed_ptr(P0, [1]); 2152 * <blocks> 2153 * <resumes> 2154 * migrate_enable(); 2155 * __set_cpus_allowed_ptr(); 2156 * <wakes local stopper> 2157 * `--> <woken on migration completion> 2158 * 2159 * Now the fun stuff: there may be several P1-like tasks, i.e. multiple 2160 * concurrent set_cpus_allowed_ptr(P0, [*]) calls. CPU affinity changes of any 2161 * task p are serialized by p->pi_lock, which we can leverage: the one that 2162 * should come into effect at the end of the Migrate-Disable region is the last 2163 * one. This means we only need to track a single cpumask (i.e. p->cpus_mask), 2164 * but we still need to properly signal those waiting tasks at the appropriate 2165 * moment. 2166 * 2167 * This is implemented using struct set_affinity_pending. The first 2168 * __set_cpus_allowed_ptr() caller within a given Migrate-Disable region will 2169 * setup an instance of that struct and install it on the targeted task_struct. 2170 * Any and all further callers will reuse that instance. Those then wait for 2171 * a completion signaled at the tail of the CPU stopper callback (1), triggered 2172 * on the end of the Migrate-Disable region (i.e. outermost migrate_enable()). 2173 * 2174 * 2175 * (1) In the cases covered above. There is one more where the completion is 2176 * signaled within affine_move_task() itself: when a subsequent affinity request 2177 * cancels the need for an active migration. Consider: 2178 * 2179 * Initial conditions: P0->cpus_mask = [0, 1] 2180 * 2181 * P0@CPU0 P1 P2 2182 * 2183 * migrate_disable(); 2184 * <preempted> 2185 * set_cpus_allowed_ptr(P0, [1]); 2186 * <blocks> 2187 * set_cpus_allowed_ptr(P0, [0, 1]); 2188 * <signal completion> 2189 * <awakes> 2190 * 2191 * Note that the above is safe vs a concurrent migrate_enable(), as any 2192 * pending affinity completion is preceded by an uninstallation of 2193 * p->migration_pending done with p->pi_lock held. 2194 */ 2195 static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flags *rf, 2196 int dest_cpu, unsigned int flags) 2197 { 2198 struct set_affinity_pending my_pending = { }, *pending = NULL; 2199 struct migration_arg arg = { 2200 .task = p, 2201 .dest_cpu = dest_cpu, 2202 }; 2203 bool complete = false; 2204 2205 /* Can the task run on the task's current CPU? If so, we're done */ 2206 if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) { 2207 struct task_struct *push_task = NULL; 2208 2209 if ((flags & SCA_MIGRATE_ENABLE) && 2210 (p->migration_flags & MDF_PUSH) && !rq->push_busy) { 2211 rq->push_busy = true; 2212 push_task = get_task_struct(p); 2213 } 2214 2215 pending = p->migration_pending; 2216 if (pending) { 2217 refcount_inc(&pending->refs); 2218 p->migration_pending = NULL; 2219 complete = true; 2220 } 2221 task_rq_unlock(rq, p, rf); 2222 2223 if (push_task) { 2224 stop_one_cpu_nowait(rq->cpu, push_cpu_stop, 2225 p, &rq->push_work); 2226 } 2227 2228 if (complete) 2229 goto do_complete; 2230 2231 return 0; 2232 } 2233 2234 if (!(flags & SCA_MIGRATE_ENABLE)) { 2235 /* serialized by p->pi_lock */ 2236 if (!p->migration_pending) { 2237 /* Install the request */ 2238 refcount_set(&my_pending.refs, 1); 2239 init_completion(&my_pending.done); 2240 p->migration_pending = &my_pending; 2241 } else { 2242 pending = p->migration_pending; 2243 refcount_inc(&pending->refs); 2244 } 2245 } 2246 pending = p->migration_pending; 2247 /* 2248 * - !MIGRATE_ENABLE: 2249 * we'll have installed a pending if there wasn't one already. 2250 * 2251 * - MIGRATE_ENABLE: 2252 * we're here because the current CPU isn't matching anymore, 2253 * the only way that can happen is because of a concurrent 2254 * set_cpus_allowed_ptr() call, which should then still be 2255 * pending completion. 2256 * 2257 * Either way, we really should have a @pending here. 2258 */ 2259 if (WARN_ON_ONCE(!pending)) { 2260 task_rq_unlock(rq, p, rf); 2261 return -EINVAL; 2262 } 2263 2264 if (flags & SCA_MIGRATE_ENABLE) { 2265 2266 refcount_inc(&pending->refs); /* pending->{arg,stop_work} */ 2267 p->migration_flags &= ~MDF_PUSH; 2268 task_rq_unlock(rq, p, rf); 2269 2270 pending->arg = (struct migration_arg) { 2271 .task = p, 2272 .dest_cpu = -1, 2273 .pending = pending, 2274 }; 2275 2276 stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop, 2277 &pending->arg, &pending->stop_work); 2278 2279 return 0; 2280 } 2281 2282 if (task_running(rq, p) || p->state == TASK_WAKING) { 2283 /* 2284 * Lessen races (and headaches) by delegating 2285 * is_migration_disabled(p) checks to the stopper, which will 2286 * run on the same CPU as said p. 2287 */ 2288 task_rq_unlock(rq, p, rf); 2289 stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg); 2290 2291 } else { 2292 2293 if (!is_migration_disabled(p)) { 2294 if (task_on_rq_queued(p)) 2295 rq = move_queued_task(rq, rf, p, dest_cpu); 2296 2297 p->migration_pending = NULL; 2298 complete = true; 2299 } 2300 task_rq_unlock(rq, p, rf); 2301 2302 do_complete: 2303 if (complete) 2304 complete_all(&pending->done); 2305 } 2306 2307 wait_for_completion(&pending->done); 2308 2309 if (refcount_dec_and_test(&pending->refs)) 2310 wake_up_var(&pending->refs); 2311 2312 /* 2313 * Block the original owner of &pending until all subsequent callers 2314 * have seen the completion and decremented the refcount 2315 */ 2316 wait_var_event(&my_pending.refs, !refcount_read(&my_pending.refs)); 2317 2318 return 0; 2319 } 2320 2321 /* 2322 * Change a given task's CPU affinity. Migrate the thread to a 2323 * proper CPU and schedule it away if the CPU it's executing on 2324 * is removed from the allowed bitmask. 2325 * 2326 * NOTE: the caller must have a valid reference to the task, the 2327 * task must not exit() & deallocate itself prematurely. The 2328 * call is not atomic; no spinlocks may be held. 2329 */ 2330 static int __set_cpus_allowed_ptr(struct task_struct *p, 2331 const struct cpumask *new_mask, 2332 u32 flags) 2333 { 2334 const struct cpumask *cpu_valid_mask = cpu_active_mask; 2335 unsigned int dest_cpu; 2336 struct rq_flags rf; 2337 struct rq *rq; 2338 int ret = 0; 2339 2340 rq = task_rq_lock(p, &rf); 2341 update_rq_clock(rq); 2342 2343 if (p->flags & PF_KTHREAD || is_migration_disabled(p)) { 2344 /* 2345 * Kernel threads are allowed on online && !active CPUs. 2346 * 2347 * Specifically, migration_disabled() tasks must not fail the 2348 * cpumask_any_and_distribute() pick below, esp. so on 2349 * SCA_MIGRATE_ENABLE, otherwise we'll not call 2350 * set_cpus_allowed_common() and actually reset p->cpus_ptr. 2351 */ 2352 cpu_valid_mask = cpu_online_mask; 2353 } 2354 2355 /* 2356 * Must re-check here, to close a race against __kthread_bind(), 2357 * sched_setaffinity() is not guaranteed to observe the flag. 2358 */ 2359 if ((flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) { 2360 ret = -EINVAL; 2361 goto out; 2362 } 2363 2364 if (!(flags & SCA_MIGRATE_ENABLE)) { 2365 if (cpumask_equal(&p->cpus_mask, new_mask)) 2366 goto out; 2367 2368 if (WARN_ON_ONCE(p == current && 2369 is_migration_disabled(p) && 2370 !cpumask_test_cpu(task_cpu(p), new_mask))) { 2371 ret = -EBUSY; 2372 goto out; 2373 } 2374 } 2375 2376 /* 2377 * Picking a ~random cpu helps in cases where we are changing affinity 2378 * for groups of tasks (ie. cpuset), so that load balancing is not 2379 * immediately required to distribute the tasks within their new mask. 2380 */ 2381 dest_cpu = cpumask_any_and_distribute(cpu_valid_mask, new_mask); 2382 if (dest_cpu >= nr_cpu_ids) { 2383 ret = -EINVAL; 2384 goto out; 2385 } 2386 2387 __do_set_cpus_allowed(p, new_mask, flags); 2388 2389 if (p->flags & PF_KTHREAD) { 2390 /* 2391 * For kernel threads that do indeed end up on online && 2392 * !active we want to ensure they are strict per-CPU threads. 2393 */ 2394 WARN_ON(cpumask_intersects(new_mask, cpu_online_mask) && 2395 !cpumask_intersects(new_mask, cpu_active_mask) && 2396 p->nr_cpus_allowed != 1); 2397 } 2398 2399 return affine_move_task(rq, p, &rf, dest_cpu, flags); 2400 2401 out: 2402 task_rq_unlock(rq, p, &rf); 2403 2404 return ret; 2405 } 2406 2407 int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) 2408 { 2409 return __set_cpus_allowed_ptr(p, new_mask, 0); 2410 } 2411 EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr); 2412 2413 void set_task_cpu(struct task_struct *p, unsigned int new_cpu) 2414 { 2415 #ifdef CONFIG_SCHED_DEBUG 2416 /* 2417 * We should never call set_task_cpu() on a blocked task, 2418 * ttwu() will sort out the placement. 2419 */ 2420 WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING && 2421 !p->on_rq); 2422 2423 /* 2424 * Migrating fair class task must have p->on_rq = TASK_ON_RQ_MIGRATING, 2425 * because schedstat_wait_{start,end} rebase migrating task's wait_start 2426 * time relying on p->on_rq. 2427 */ 2428 WARN_ON_ONCE(p->state == TASK_RUNNING && 2429 p->sched_class == &fair_sched_class && 2430 (p->on_rq && !task_on_rq_migrating(p))); 2431 2432 #ifdef CONFIG_LOCKDEP 2433 /* 2434 * The caller should hold either p->pi_lock or rq->lock, when changing 2435 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks. 2436 * 2437 * sched_move_task() holds both and thus holding either pins the cgroup, 2438 * see task_group(). 2439 * 2440 * Furthermore, all task_rq users should acquire both locks, see 2441 * task_rq_lock(). 2442 */ 2443 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) || 2444 lockdep_is_held(&task_rq(p)->lock))); 2445 #endif 2446 /* 2447 * Clearly, migrating tasks to offline CPUs is a fairly daft thing. 2448 */ 2449 WARN_ON_ONCE(!cpu_online(new_cpu)); 2450 2451 WARN_ON_ONCE(is_migration_disabled(p)); 2452 #endif 2453 2454 trace_sched_migrate_task(p, new_cpu); 2455 2456 if (task_cpu(p) != new_cpu) { 2457 if (p->sched_class->migrate_task_rq) 2458 p->sched_class->migrate_task_rq(p, new_cpu); 2459 p->se.nr_migrations++; 2460 rseq_migrate(p); 2461 perf_event_task_migrate(p); 2462 } 2463 2464 __set_task_cpu(p, new_cpu); 2465 } 2466 2467 #ifdef CONFIG_NUMA_BALANCING 2468 static void __migrate_swap_task(struct task_struct *p, int cpu) 2469 { 2470 if (task_on_rq_queued(p)) { 2471 struct rq *src_rq, *dst_rq; 2472 struct rq_flags srf, drf; 2473 2474 src_rq = task_rq(p); 2475 dst_rq = cpu_rq(cpu); 2476 2477 rq_pin_lock(src_rq, &srf); 2478 rq_pin_lock(dst_rq, &drf); 2479 2480 deactivate_task(src_rq, p, 0); 2481 set_task_cpu(p, cpu); 2482 activate_task(dst_rq, p, 0); 2483 check_preempt_curr(dst_rq, p, 0); 2484 2485 rq_unpin_lock(dst_rq, &drf); 2486 rq_unpin_lock(src_rq, &srf); 2487 2488 } else { 2489 /* 2490 * Task isn't running anymore; make it appear like we migrated 2491 * it before it went to sleep. This means on wakeup we make the 2492 * previous CPU our target instead of where it really is. 2493 */ 2494 p->wake_cpu = cpu; 2495 } 2496 } 2497 2498 struct migration_swap_arg { 2499 struct task_struct *src_task, *dst_task; 2500 int src_cpu, dst_cpu; 2501 }; 2502 2503 static int migrate_swap_stop(void *data) 2504 { 2505 struct migration_swap_arg *arg = data; 2506 struct rq *src_rq, *dst_rq; 2507 int ret = -EAGAIN; 2508 2509 if (!cpu_active(arg->src_cpu) || !cpu_active(arg->dst_cpu)) 2510 return -EAGAIN; 2511 2512 src_rq = cpu_rq(arg->src_cpu); 2513 dst_rq = cpu_rq(arg->dst_cpu); 2514 2515 double_raw_lock(&arg->src_task->pi_lock, 2516 &arg->dst_task->pi_lock); 2517 double_rq_lock(src_rq, dst_rq); 2518 2519 if (task_cpu(arg->dst_task) != arg->dst_cpu) 2520 goto unlock; 2521 2522 if (task_cpu(arg->src_task) != arg->src_cpu) 2523 goto unlock; 2524 2525 if (!cpumask_test_cpu(arg->dst_cpu, arg->src_task->cpus_ptr)) 2526 goto unlock; 2527 2528 if (!cpumask_test_cpu(arg->src_cpu, arg->dst_task->cpus_ptr)) 2529 goto unlock; 2530 2531 __migrate_swap_task(arg->src_task, arg->dst_cpu); 2532 __migrate_swap_task(arg->dst_task, arg->src_cpu); 2533 2534 ret = 0; 2535 2536 unlock: 2537 double_rq_unlock(src_rq, dst_rq); 2538 raw_spin_unlock(&arg->dst_task->pi_lock); 2539 raw_spin_unlock(&arg->src_task->pi_lock); 2540 2541 return ret; 2542 } 2543 2544 /* 2545 * Cross migrate two tasks 2546 */ 2547 int migrate_swap(struct task_struct *cur, struct task_struct *p, 2548 int target_cpu, int curr_cpu) 2549 { 2550 struct migration_swap_arg arg; 2551 int ret = -EINVAL; 2552 2553 arg = (struct migration_swap_arg){ 2554 .src_task = cur, 2555 .src_cpu = curr_cpu, 2556 .dst_task = p, 2557 .dst_cpu = target_cpu, 2558 }; 2559 2560 if (arg.src_cpu == arg.dst_cpu) 2561 goto out; 2562 2563 /* 2564 * These three tests are all lockless; this is OK since all of them 2565 * will be re-checked with proper locks held further down the line. 2566 */ 2567 if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu)) 2568 goto out; 2569 2570 if (!cpumask_test_cpu(arg.dst_cpu, arg.src_task->cpus_ptr)) 2571 goto out; 2572 2573 if (!cpumask_test_cpu(arg.src_cpu, arg.dst_task->cpus_ptr)) 2574 goto out; 2575 2576 trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu); 2577 ret = stop_two_cpus(arg.dst_cpu, arg.src_cpu, migrate_swap_stop, &arg); 2578 2579 out: 2580 return ret; 2581 } 2582 #endif /* CONFIG_NUMA_BALANCING */ 2583 2584 /* 2585 * wait_task_inactive - wait for a thread to unschedule. 2586 * 2587 * If @match_state is nonzero, it's the @p->state value just checked and 2588 * not expected to change. If it changes, i.e. @p might have woken up, 2589 * then return zero. When we succeed in waiting for @p to be off its CPU, 2590 * we return a positive number (its total switch count). If a second call 2591 * a short while later returns the same number, the caller can be sure that 2592 * @p has remained unscheduled the whole time. 2593 * 2594 * The caller must ensure that the task *will* unschedule sometime soon, 2595 * else this function might spin for a *long* time. This function can't 2596 * be called with interrupts off, or it may introduce deadlock with 2597 * smp_call_function() if an IPI is sent by the same process we are 2598 * waiting to become inactive. 2599 */ 2600 unsigned long wait_task_inactive(struct task_struct *p, long match_state) 2601 { 2602 int running, queued; 2603 struct rq_flags rf; 2604 unsigned long ncsw; 2605 struct rq *rq; 2606 2607 for (;;) { 2608 /* 2609 * We do the initial early heuristics without holding 2610 * any task-queue locks at all. We'll only try to get 2611 * the runqueue lock when things look like they will 2612 * work out! 2613 */ 2614 rq = task_rq(p); 2615 2616 /* 2617 * If the task is actively running on another CPU 2618 * still, just relax and busy-wait without holding 2619 * any locks. 2620 * 2621 * NOTE! Since we don't hold any locks, it's not 2622 * even sure that "rq" stays as the right runqueue! 2623 * But we don't care, since "task_running()" will 2624 * return false if the runqueue has changed and p 2625 * is actually now running somewhere else! 2626 */ 2627 while (task_running(rq, p)) { 2628 if (match_state && unlikely(p->state != match_state)) 2629 return 0; 2630 cpu_relax(); 2631 } 2632 2633 /* 2634 * Ok, time to look more closely! We need the rq 2635 * lock now, to be *sure*. If we're wrong, we'll 2636 * just go back and repeat. 2637 */ 2638 rq = task_rq_lock(p, &rf); 2639 trace_sched_wait_task(p); 2640 running = task_running(rq, p); 2641 queued = task_on_rq_queued(p); 2642 ncsw = 0; 2643 if (!match_state || p->state == match_state) 2644 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ 2645 task_rq_unlock(rq, p, &rf); 2646 2647 /* 2648 * If it changed from the expected state, bail out now. 2649 */ 2650 if (unlikely(!ncsw)) 2651 break; 2652 2653 /* 2654 * Was it really running after all now that we 2655 * checked with the proper locks actually held? 2656 * 2657 * Oops. Go back and try again.. 2658 */ 2659 if (unlikely(running)) { 2660 cpu_relax(); 2661 continue; 2662 } 2663 2664 /* 2665 * It's not enough that it's not actively running, 2666 * it must be off the runqueue _entirely_, and not 2667 * preempted! 2668 * 2669 * So if it was still runnable (but just not actively 2670 * running right now), it's preempted, and we should 2671 * yield - it could be a while. 2672 */ 2673 if (unlikely(queued)) { 2674 ktime_t to = NSEC_PER_SEC / HZ; 2675 2676 set_current_state(TASK_UNINTERRUPTIBLE); 2677 schedule_hrtimeout(&to, HRTIMER_MODE_REL); 2678 continue; 2679 } 2680 2681 /* 2682 * Ahh, all good. It wasn't running, and it wasn't 2683 * runnable, which means that it will never become 2684 * running in the future either. We're all done! 2685 */ 2686 break; 2687 } 2688 2689 return ncsw; 2690 } 2691 2692 /*** 2693 * kick_process - kick a running thread to enter/exit the kernel 2694 * @p: the to-be-kicked thread 2695 * 2696 * Cause a process which is running on another CPU to enter 2697 * kernel-mode, without any delay. (to get signals handled.) 2698 * 2699 * NOTE: this function doesn't have to take the runqueue lock, 2700 * because all it wants to ensure is that the remote task enters 2701 * the kernel. If the IPI races and the task has been migrated 2702 * to another CPU then no harm is done and the purpose has been 2703 * achieved as well. 2704 */ 2705 void kick_process(struct task_struct *p) 2706 { 2707 int cpu; 2708 2709 preempt_disable(); 2710 cpu = task_cpu(p); 2711 if ((cpu != smp_processor_id()) && task_curr(p)) 2712 smp_send_reschedule(cpu); 2713 preempt_enable(); 2714 } 2715 EXPORT_SYMBOL_GPL(kick_process); 2716 2717 /* 2718 * ->cpus_ptr is protected by both rq->lock and p->pi_lock 2719 * 2720 * A few notes on cpu_active vs cpu_online: 2721 * 2722 * - cpu_active must be a subset of cpu_online 2723 * 2724 * - on CPU-up we allow per-CPU kthreads on the online && !active CPU, 2725 * see __set_cpus_allowed_ptr(). At this point the newly online 2726 * CPU isn't yet part of the sched domains, and balancing will not 2727 * see it. 2728 * 2729 * - on CPU-down we clear cpu_active() to mask the sched domains and 2730 * avoid the load balancer to place new tasks on the to be removed 2731 * CPU. Existing tasks will remain running there and will be taken 2732 * off. 2733 * 2734 * This means that fallback selection must not select !active CPUs. 2735 * And can assume that any active CPU must be online. Conversely 2736 * select_task_rq() below may allow selection of !active CPUs in order 2737 * to satisfy the above rules. 2738 */ 2739 static int select_fallback_rq(int cpu, struct task_struct *p) 2740 { 2741 int nid = cpu_to_node(cpu); 2742 const struct cpumask *nodemask = NULL; 2743 enum { cpuset, possible, fail } state = cpuset; 2744 int dest_cpu; 2745 2746 /* 2747 * If the node that the CPU is on has been offlined, cpu_to_node() 2748 * will return -1. There is no CPU on the node, and we should 2749 * select the CPU on the other node. 2750 */ 2751 if (nid != -1) { 2752 nodemask = cpumask_of_node(nid); 2753 2754 /* Look for allowed, online CPU in same node. */ 2755 for_each_cpu(dest_cpu, nodemask) { 2756 if (!cpu_active(dest_cpu)) 2757 continue; 2758 if (cpumask_test_cpu(dest_cpu, p->cpus_ptr)) 2759 return dest_cpu; 2760 } 2761 } 2762 2763 for (;;) { 2764 /* Any allowed, online CPU? */ 2765 for_each_cpu(dest_cpu, p->cpus_ptr) { 2766 if (!is_cpu_allowed(p, dest_cpu)) 2767 continue; 2768 2769 goto out; 2770 } 2771 2772 /* No more Mr. Nice Guy. */ 2773 switch (state) { 2774 case cpuset: 2775 if (IS_ENABLED(CONFIG_CPUSETS)) { 2776 cpuset_cpus_allowed_fallback(p); 2777 state = possible; 2778 break; 2779 } 2780 fallthrough; 2781 case possible: 2782 /* 2783 * XXX When called from select_task_rq() we only 2784 * hold p->pi_lock and again violate locking order. 2785 * 2786 * More yuck to audit. 2787 */ 2788 do_set_cpus_allowed(p, cpu_possible_mask); 2789 state = fail; 2790 break; 2791 2792 case fail: 2793 BUG(); 2794 break; 2795 } 2796 } 2797 2798 out: 2799 if (state != cpuset) { 2800 /* 2801 * Don't tell them about moving exiting tasks or 2802 * kernel threads (both mm NULL), since they never 2803 * leave kernel. 2804 */ 2805 if (p->mm && printk_ratelimit()) { 2806 printk_deferred("process %d (%s) no longer affine to cpu%d\n", 2807 task_pid_nr(p), p->comm, cpu); 2808 } 2809 } 2810 2811 return dest_cpu; 2812 } 2813 2814 /* 2815 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_ptr is stable. 2816 */ 2817 static inline 2818 int select_task_rq(struct task_struct *p, int cpu, int wake_flags) 2819 { 2820 lockdep_assert_held(&p->pi_lock); 2821 2822 if (p->nr_cpus_allowed > 1 && !is_migration_disabled(p)) 2823 cpu = p->sched_class->select_task_rq(p, cpu, wake_flags); 2824 else 2825 cpu = cpumask_any(p->cpus_ptr); 2826 2827 /* 2828 * In order not to call set_task_cpu() on a blocking task we need 2829 * to rely on ttwu() to place the task on a valid ->cpus_ptr 2830 * CPU. 2831 * 2832 * Since this is common to all placement strategies, this lives here. 2833 * 2834 * [ this allows ->select_task() to simply return task_cpu(p) and 2835 * not worry about this generic constraint ] 2836 */ 2837 if (unlikely(!is_cpu_allowed(p, cpu))) 2838 cpu = select_fallback_rq(task_cpu(p), p); 2839 2840 return cpu; 2841 } 2842 2843 void sched_set_stop_task(int cpu, struct task_struct *stop) 2844 { 2845 static struct lock_class_key stop_pi_lock; 2846 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; 2847 struct task_struct *old_stop = cpu_rq(cpu)->stop; 2848 2849 if (stop) { 2850 /* 2851 * Make it appear like a SCHED_FIFO task, its something 2852 * userspace knows about and won't get confused about. 2853 * 2854 * Also, it will make PI more or less work without too 2855 * much confusion -- but then, stop work should not 2856 * rely on PI working anyway. 2857 */ 2858 sched_setscheduler_nocheck(stop, SCHED_FIFO, ¶m); 2859 2860 stop->sched_class = &stop_sched_class; 2861 2862 /* 2863 * The PI code calls rt_mutex_setprio() with ->pi_lock held to 2864 * adjust the effective priority of a task. As a result, 2865 * rt_mutex_setprio() can trigger (RT) balancing operations, 2866 * which can then trigger wakeups of the stop thread to push 2867 * around the current task. 2868 * 2869 * The stop task itself will never be part of the PI-chain, it 2870 * never blocks, therefore that ->pi_lock recursion is safe. 2871 * Tell lockdep about this by placing the stop->pi_lock in its 2872 * own class. 2873 */ 2874 lockdep_set_class(&stop->pi_lock, &stop_pi_lock); 2875 } 2876 2877 cpu_rq(cpu)->stop = stop; 2878 2879 if (old_stop) { 2880 /* 2881 * Reset it back to a normal scheduling class so that 2882 * it can die in pieces. 2883 */ 2884 old_stop->sched_class = &rt_sched_class; 2885 } 2886 } 2887 2888 #else /* CONFIG_SMP */ 2889 2890 static inline int __set_cpus_allowed_ptr(struct task_struct *p, 2891 const struct cpumask *new_mask, 2892 u32 flags) 2893 { 2894 return set_cpus_allowed_ptr(p, new_mask); 2895 } 2896 2897 static inline void migrate_disable_switch(struct rq *rq, struct task_struct *p) { } 2898 2899 static inline bool rq_has_pinned_tasks(struct rq *rq) 2900 { 2901 return false; 2902 } 2903 2904 #endif /* !CONFIG_SMP */ 2905 2906 static void 2907 ttwu_stat(struct task_struct *p, int cpu, int wake_flags) 2908 { 2909 struct rq *rq; 2910 2911 if (!schedstat_enabled()) 2912 return; 2913 2914 rq = this_rq(); 2915 2916 #ifdef CONFIG_SMP 2917 if (cpu == rq->cpu) { 2918 __schedstat_inc(rq->ttwu_local); 2919 __schedstat_inc(p->se.statistics.nr_wakeups_local); 2920 } else { 2921 struct sched_domain *sd; 2922 2923 __schedstat_inc(p->se.statistics.nr_wakeups_remote); 2924 rcu_read_lock(); 2925 for_each_domain(rq->cpu, sd) { 2926 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) { 2927 __schedstat_inc(sd->ttwu_wake_remote); 2928 break; 2929 } 2930 } 2931 rcu_read_unlock(); 2932 } 2933 2934 if (wake_flags & WF_MIGRATED) 2935 __schedstat_inc(p->se.statistics.nr_wakeups_migrate); 2936 #endif /* CONFIG_SMP */ 2937 2938 __schedstat_inc(rq->ttwu_count); 2939 __schedstat_inc(p->se.statistics.nr_wakeups); 2940 2941 if (wake_flags & WF_SYNC) 2942 __schedstat_inc(p->se.statistics.nr_wakeups_sync); 2943 } 2944 2945 /* 2946 * Mark the task runnable and perform wakeup-preemption. 2947 */ 2948 static void ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags, 2949 struct rq_flags *rf) 2950 { 2951 check_preempt_curr(rq, p, wake_flags); 2952 p->state = TASK_RUNNING; 2953 trace_sched_wakeup(p); 2954 2955 #ifdef CONFIG_SMP 2956 if (p->sched_class->task_woken) { 2957 /* 2958 * Our task @p is fully woken up and running; so it's safe to 2959 * drop the rq->lock, hereafter rq is only used for statistics. 2960 */ 2961 rq_unpin_lock(rq, rf); 2962 p->sched_class->task_woken(rq, p); 2963 rq_repin_lock(rq, rf); 2964 } 2965 2966 if (rq->idle_stamp) { 2967 u64 delta = rq_clock(rq) - rq->idle_stamp; 2968 u64 max = 2*rq->max_idle_balance_cost; 2969 2970 update_avg(&rq->avg_idle, delta); 2971 2972 if (rq->avg_idle > max) 2973 rq->avg_idle = max; 2974 2975 rq->idle_stamp = 0; 2976 } 2977 #endif 2978 } 2979 2980 static void 2981 ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags, 2982 struct rq_flags *rf) 2983 { 2984 int en_flags = ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK; 2985 2986 lockdep_assert_held(&rq->lock); 2987 2988 if (p->sched_contributes_to_load) 2989 rq->nr_uninterruptible--; 2990 2991 #ifdef CONFIG_SMP 2992 if (wake_flags & WF_MIGRATED) 2993 en_flags |= ENQUEUE_MIGRATED; 2994 else 2995 #endif 2996 if (p->in_iowait) { 2997 delayacct_blkio_end(p); 2998 atomic_dec(&task_rq(p)->nr_iowait); 2999 } 3000 3001 activate_task(rq, p, en_flags); 3002 ttwu_do_wakeup(rq, p, wake_flags, rf); 3003 } 3004 3005 /* 3006 * Consider @p being inside a wait loop: 3007 * 3008 * for (;;) { 3009 * set_current_state(TASK_UNINTERRUPTIBLE); 3010 * 3011 * if (CONDITION) 3012 * break; 3013 * 3014 * schedule(); 3015 * } 3016 * __set_current_state(TASK_RUNNING); 3017 * 3018 * between set_current_state() and schedule(). In this case @p is still 3019 * runnable, so all that needs doing is change p->state back to TASK_RUNNING in 3020 * an atomic manner. 3021 * 3022 * By taking task_rq(p)->lock we serialize against schedule(), if @p->on_rq 3023 * then schedule() must still happen and p->state can be changed to 3024 * TASK_RUNNING. Otherwise we lost the race, schedule() has happened, and we 3025 * need to do a full wakeup with enqueue. 3026 * 3027 * Returns: %true when the wakeup is done, 3028 * %false otherwise. 3029 */ 3030 static int ttwu_runnable(struct task_struct *p, int wake_flags) 3031 { 3032 struct rq_flags rf; 3033 struct rq *rq; 3034 int ret = 0; 3035 3036 rq = __task_rq_lock(p, &rf); 3037 if (task_on_rq_queued(p)) { 3038 /* check_preempt_curr() may use rq clock */ 3039 update_rq_clock(rq); 3040 ttwu_do_wakeup(rq, p, wake_flags, &rf); 3041 ret = 1; 3042 } 3043 __task_rq_unlock(rq, &rf); 3044 3045 return ret; 3046 } 3047 3048 #ifdef CONFIG_SMP 3049 void sched_ttwu_pending(void *arg) 3050 { 3051 struct llist_node *llist = arg; 3052 struct rq *rq = this_rq(); 3053 struct task_struct *p, *t; 3054 struct rq_flags rf; 3055 3056 if (!llist) 3057 return; 3058 3059 /* 3060 * rq::ttwu_pending racy indication of out-standing wakeups. 3061 * Races such that false-negatives are possible, since they 3062 * are shorter lived that false-positives would be. 3063 */ 3064 WRITE_ONCE(rq->ttwu_pending, 0); 3065 3066 rq_lock_irqsave(rq, &rf); 3067 update_rq_clock(rq); 3068 3069 llist_for_each_entry_safe(p, t, llist, wake_entry.llist) { 3070 if (WARN_ON_ONCE(p->on_cpu)) 3071 smp_cond_load_acquire(&p->on_cpu, !VAL); 3072 3073 if (WARN_ON_ONCE(task_cpu(p) != cpu_of(rq))) 3074 set_task_cpu(p, cpu_of(rq)); 3075 3076 ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0, &rf); 3077 } 3078 3079 rq_unlock_irqrestore(rq, &rf); 3080 } 3081 3082 void send_call_function_single_ipi(int cpu) 3083 { 3084 struct rq *rq = cpu_rq(cpu); 3085 3086 if (!set_nr_if_polling(rq->idle)) 3087 arch_send_call_function_single_ipi(cpu); 3088 else 3089 trace_sched_wake_idle_without_ipi(cpu); 3090 } 3091 3092 /* 3093 * Queue a task on the target CPUs wake_list and wake the CPU via IPI if 3094 * necessary. The wakee CPU on receipt of the IPI will queue the task 3095 * via sched_ttwu_wakeup() for activation so the wakee incurs the cost 3096 * of the wakeup instead of the waker. 3097 */ 3098 static void __ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags) 3099 { 3100 struct rq *rq = cpu_rq(cpu); 3101 3102 p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED); 3103 3104 WRITE_ONCE(rq->ttwu_pending, 1); 3105 __smp_call_single_queue(cpu, &p->wake_entry.llist); 3106 } 3107 3108 void wake_up_if_idle(int cpu) 3109 { 3110 struct rq *rq = cpu_rq(cpu); 3111 struct rq_flags rf; 3112 3113 rcu_read_lock(); 3114 3115 if (!is_idle_task(rcu_dereference(rq->curr))) 3116 goto out; 3117 3118 if (set_nr_if_polling(rq->idle)) { 3119 trace_sched_wake_idle_without_ipi(cpu); 3120 } else { 3121 rq_lock_irqsave(rq, &rf); 3122 if (is_idle_task(rq->curr)) 3123 smp_send_reschedule(cpu); 3124 /* Else CPU is not idle, do nothing here: */ 3125 rq_unlock_irqrestore(rq, &rf); 3126 } 3127 3128 out: 3129 rcu_read_unlock(); 3130 } 3131 3132 bool cpus_share_cache(int this_cpu, int that_cpu) 3133 { 3134 return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu); 3135 } 3136 3137 static inline bool ttwu_queue_cond(int cpu, int wake_flags) 3138 { 3139 /* 3140 * Do not complicate things with the async wake_list while the CPU is 3141 * in hotplug state. 3142 */ 3143 if (!cpu_active(cpu)) 3144 return false; 3145 3146 /* 3147 * If the CPU does not share cache, then queue the task on the 3148 * remote rqs wakelist to avoid accessing remote data. 3149 */ 3150 if (!cpus_share_cache(smp_processor_id(), cpu)) 3151 return true; 3152 3153 /* 3154 * If the task is descheduling and the only running task on the 3155 * CPU then use the wakelist to offload the task activation to 3156 * the soon-to-be-idle CPU as the current CPU is likely busy. 3157 * nr_running is checked to avoid unnecessary task stacking. 3158 */ 3159 if ((wake_flags & WF_ON_CPU) && cpu_rq(cpu)->nr_running <= 1) 3160 return true; 3161 3162 return false; 3163 } 3164 3165 static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags) 3166 { 3167 if (sched_feat(TTWU_QUEUE) && ttwu_queue_cond(cpu, wake_flags)) { 3168 if (WARN_ON_ONCE(cpu == smp_processor_id())) 3169 return false; 3170 3171 sched_clock_cpu(cpu); /* Sync clocks across CPUs */ 3172 __ttwu_queue_wakelist(p, cpu, wake_flags); 3173 return true; 3174 } 3175 3176 return false; 3177 } 3178 3179 #else /* !CONFIG_SMP */ 3180 3181 static inline bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags) 3182 { 3183 return false; 3184 } 3185 3186 #endif /* CONFIG_SMP */ 3187 3188 static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags) 3189 { 3190 struct rq *rq = cpu_rq(cpu); 3191 struct rq_flags rf; 3192 3193 if (ttwu_queue_wakelist(p, cpu, wake_flags)) 3194 return; 3195 3196 rq_lock(rq, &rf); 3197 update_rq_clock(rq); 3198 ttwu_do_activate(rq, p, wake_flags, &rf); 3199 rq_unlock(rq, &rf); 3200 } 3201 3202 /* 3203 * Notes on Program-Order guarantees on SMP systems. 3204 * 3205 * MIGRATION 3206 * 3207 * The basic program-order guarantee on SMP systems is that when a task [t] 3208 * migrates, all its activity on its old CPU [c0] happens-before any subsequent 3209 * execution on its new CPU [c1]. 3210 * 3211 * For migration (of runnable tasks) this is provided by the following means: 3212 * 3213 * A) UNLOCK of the rq(c0)->lock scheduling out task t 3214 * B) migration for t is required to synchronize *both* rq(c0)->lock and 3215 * rq(c1)->lock (if not at the same time, then in that order). 3216 * C) LOCK of the rq(c1)->lock scheduling in task 3217 * 3218 * Release/acquire chaining guarantees that B happens after A and C after B. 3219 * Note: the CPU doing B need not be c0 or c1 3220 * 3221 * Example: 3222 * 3223 * CPU0 CPU1 CPU2 3224 * 3225 * LOCK rq(0)->lock 3226 * sched-out X 3227 * sched-in Y 3228 * UNLOCK rq(0)->lock 3229 * 3230 * LOCK rq(0)->lock // orders against CPU0 3231 * dequeue X 3232 * UNLOCK rq(0)->lock 3233 * 3234 * LOCK rq(1)->lock 3235 * enqueue X 3236 * UNLOCK rq(1)->lock 3237 * 3238 * LOCK rq(1)->lock // orders against CPU2 3239 * sched-out Z 3240 * sched-in X 3241 * UNLOCK rq(1)->lock 3242 * 3243 * 3244 * BLOCKING -- aka. SLEEP + WAKEUP 3245 * 3246 * For blocking we (obviously) need to provide the same guarantee as for 3247 * migration. However the means are completely different as there is no lock 3248 * chain to provide order. Instead we do: 3249 * 3250 * 1) smp_store_release(X->on_cpu, 0) -- finish_task() 3251 * 2) smp_cond_load_acquire(!X->on_cpu) -- try_to_wake_up() 3252 * 3253 * Example: 3254 * 3255 * CPU0 (schedule) CPU1 (try_to_wake_up) CPU2 (schedule) 3256 * 3257 * LOCK rq(0)->lock LOCK X->pi_lock 3258 * dequeue X 3259 * sched-out X 3260 * smp_store_release(X->on_cpu, 0); 3261 * 3262 * smp_cond_load_acquire(&X->on_cpu, !VAL); 3263 * X->state = WAKING 3264 * set_task_cpu(X,2) 3265 * 3266 * LOCK rq(2)->lock 3267 * enqueue X 3268 * X->state = RUNNING 3269 * UNLOCK rq(2)->lock 3270 * 3271 * LOCK rq(2)->lock // orders against CPU1 3272 * sched-out Z 3273 * sched-in X 3274 * UNLOCK rq(2)->lock 3275 * 3276 * UNLOCK X->pi_lock 3277 * UNLOCK rq(0)->lock 3278 * 3279 * 3280 * However, for wakeups there is a second guarantee we must provide, namely we 3281 * must ensure that CONDITION=1 done by the caller can not be reordered with 3282 * accesses to the task state; see try_to_wake_up() and set_current_state(). 3283 */ 3284 3285 /** 3286 * try_to_wake_up - wake up a thread 3287 * @p: the thread to be awakened 3288 * @state: the mask of task states that can be woken 3289 * @wake_flags: wake modifier flags (WF_*) 3290 * 3291 * Conceptually does: 3292 * 3293 * If (@state & @p->state) @p->state = TASK_RUNNING. 3294 * 3295 * If the task was not queued/runnable, also place it back on a runqueue. 3296 * 3297 * This function is atomic against schedule() which would dequeue the task. 3298 * 3299 * It issues a full memory barrier before accessing @p->state, see the comment 3300 * with set_current_state(). 3301 * 3302 * Uses p->pi_lock to serialize against concurrent wake-ups. 3303 * 3304 * Relies on p->pi_lock stabilizing: 3305 * - p->sched_class 3306 * - p->cpus_ptr 3307 * - p->sched_task_group 3308 * in order to do migration, see its use of select_task_rq()/set_task_cpu(). 3309 * 3310 * Tries really hard to only take one task_rq(p)->lock for performance. 3311 * Takes rq->lock in: 3312 * - ttwu_runnable() -- old rq, unavoidable, see comment there; 3313 * - ttwu_queue() -- new rq, for enqueue of the task; 3314 * - psi_ttwu_dequeue() -- much sadness :-( accounting will kill us. 3315 * 3316 * As a consequence we race really badly with just about everything. See the 3317 * many memory barriers and their comments for details. 3318 * 3319 * Return: %true if @p->state changes (an actual wakeup was done), 3320 * %false otherwise. 3321 */ 3322 static int 3323 try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) 3324 { 3325 unsigned long flags; 3326 int cpu, success = 0; 3327 3328 preempt_disable(); 3329 if (p == current) { 3330 /* 3331 * We're waking current, this means 'p->on_rq' and 'task_cpu(p) 3332 * == smp_processor_id()'. Together this means we can special 3333 * case the whole 'p->on_rq && ttwu_runnable()' case below 3334 * without taking any locks. 3335 * 3336 * In particular: 3337 * - we rely on Program-Order guarantees for all the ordering, 3338 * - we're serialized against set_special_state() by virtue of 3339 * it disabling IRQs (this allows not taking ->pi_lock). 3340 */ 3341 if (!(p->state & state)) 3342 goto out; 3343 3344 success = 1; 3345 trace_sched_waking(p); 3346 p->state = TASK_RUNNING; 3347 trace_sched_wakeup(p); 3348 goto out; 3349 } 3350 3351 /* 3352 * If we are going to wake up a thread waiting for CONDITION we 3353 * need to ensure that CONDITION=1 done by the caller can not be 3354 * reordered with p->state check below. This pairs with smp_store_mb() 3355 * in set_current_state() that the waiting thread does. 3356 */ 3357 raw_spin_lock_irqsave(&p->pi_lock, flags); 3358 smp_mb__after_spinlock(); 3359 if (!(p->state & state)) 3360 goto unlock; 3361 3362 trace_sched_waking(p); 3363 3364 /* We're going to change ->state: */ 3365 success = 1; 3366 3367 /* 3368 * Ensure we load p->on_rq _after_ p->state, otherwise it would 3369 * be possible to, falsely, observe p->on_rq == 0 and get stuck 3370 * in smp_cond_load_acquire() below. 3371 * 3372 * sched_ttwu_pending() try_to_wake_up() 3373 * STORE p->on_rq = 1 LOAD p->state 3374 * UNLOCK rq->lock 3375 * 3376 * __schedule() (switch to task 'p') 3377 * LOCK rq->lock smp_rmb(); 3378 * smp_mb__after_spinlock(); 3379 * UNLOCK rq->lock 3380 * 3381 * [task p] 3382 * STORE p->state = UNINTERRUPTIBLE LOAD p->on_rq 3383 * 3384 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in 3385 * __schedule(). See the comment for smp_mb__after_spinlock(). 3386 * 3387 * A similar smb_rmb() lives in try_invoke_on_locked_down_task(). 3388 */ 3389 smp_rmb(); 3390 if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags)) 3391 goto unlock; 3392 3393 #ifdef CONFIG_SMP 3394 /* 3395 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be 3396 * possible to, falsely, observe p->on_cpu == 0. 3397 * 3398 * One must be running (->on_cpu == 1) in order to remove oneself 3399 * from the runqueue. 3400 * 3401 * __schedule() (switch to task 'p') try_to_wake_up() 3402 * STORE p->on_cpu = 1 LOAD p->on_rq 3403 * UNLOCK rq->lock 3404 * 3405 * __schedule() (put 'p' to sleep) 3406 * LOCK rq->lock smp_rmb(); 3407 * smp_mb__after_spinlock(); 3408 * STORE p->on_rq = 0 LOAD p->on_cpu 3409 * 3410 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in 3411 * __schedule(). See the comment for smp_mb__after_spinlock(). 3412 * 3413 * Form a control-dep-acquire with p->on_rq == 0 above, to ensure 3414 * schedule()'s deactivate_task() has 'happened' and p will no longer 3415 * care about it's own p->state. See the comment in __schedule(). 3416 */ 3417 smp_acquire__after_ctrl_dep(); 3418 3419 /* 3420 * We're doing the wakeup (@success == 1), they did a dequeue (p->on_rq 3421 * == 0), which means we need to do an enqueue, change p->state to 3422 * TASK_WAKING such that we can unlock p->pi_lock before doing the 3423 * enqueue, such as ttwu_queue_wakelist(). 3424 */ 3425 p->state = TASK_WAKING; 3426 3427 /* 3428 * If the owning (remote) CPU is still in the middle of schedule() with 3429 * this task as prev, considering queueing p on the remote CPUs wake_list 3430 * which potentially sends an IPI instead of spinning on p->on_cpu to 3431 * let the waker make forward progress. This is safe because IRQs are 3432 * disabled and the IPI will deliver after on_cpu is cleared. 3433 * 3434 * Ensure we load task_cpu(p) after p->on_cpu: 3435 * 3436 * set_task_cpu(p, cpu); 3437 * STORE p->cpu = @cpu 3438 * __schedule() (switch to task 'p') 3439 * LOCK rq->lock 3440 * smp_mb__after_spin_lock() smp_cond_load_acquire(&p->on_cpu) 3441 * STORE p->on_cpu = 1 LOAD p->cpu 3442 * 3443 * to ensure we observe the correct CPU on which the task is currently 3444 * scheduling. 3445 */ 3446 if (smp_load_acquire(&p->on_cpu) && 3447 ttwu_queue_wakelist(p, task_cpu(p), wake_flags | WF_ON_CPU)) 3448 goto unlock; 3449 3450 /* 3451 * If the owning (remote) CPU is still in the middle of schedule() with 3452 * this task as prev, wait until it's done referencing the task. 3453 * 3454 * Pairs with the smp_store_release() in finish_task(). 3455 * 3456 * This ensures that tasks getting woken will be fully ordered against 3457 * their previous state and preserve Program Order. 3458 */ 3459 smp_cond_load_acquire(&p->on_cpu, !VAL); 3460 3461 cpu = select_task_rq(p, p->wake_cpu, wake_flags | WF_TTWU); 3462 if (task_cpu(p) != cpu) { 3463 if (p->in_iowait) { 3464 delayacct_blkio_end(p); 3465 atomic_dec(&task_rq(p)->nr_iowait); 3466 } 3467 3468 wake_flags |= WF_MIGRATED; 3469 psi_ttwu_dequeue(p); 3470 set_task_cpu(p, cpu); 3471 } 3472 #else 3473 cpu = task_cpu(p); 3474 #endif /* CONFIG_SMP */ 3475 3476 ttwu_queue(p, cpu, wake_flags); 3477 unlock: 3478 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 3479 out: 3480 if (success) 3481 ttwu_stat(p, task_cpu(p), wake_flags); 3482 preempt_enable(); 3483 3484 return success; 3485 } 3486 3487 /** 3488 * try_invoke_on_locked_down_task - Invoke a function on task in fixed state 3489 * @p: Process for which the function is to be invoked. 3490 * @func: Function to invoke. 3491 * @arg: Argument to function. 3492 * 3493 * If the specified task can be quickly locked into a definite state 3494 * (either sleeping or on a given runqueue), arrange to keep it in that 3495 * state while invoking @func(@arg). This function can use ->on_rq and 3496 * task_curr() to work out what the state is, if required. Given that 3497 * @func can be invoked with a runqueue lock held, it had better be quite 3498 * lightweight. 3499 * 3500 * Returns: 3501 * @false if the task slipped out from under the locks. 3502 * @true if the task was locked onto a runqueue or is sleeping. 3503 * However, @func can override this by returning @false. 3504 */ 3505 bool try_invoke_on_locked_down_task(struct task_struct *p, bool (*func)(struct task_struct *t, void *arg), void *arg) 3506 { 3507 bool ret = false; 3508 struct rq_flags rf; 3509 struct rq *rq; 3510 3511 lockdep_assert_irqs_enabled(); 3512 raw_spin_lock_irq(&p->pi_lock); 3513 if (p->on_rq) { 3514 rq = __task_rq_lock(p, &rf); 3515 if (task_rq(p) == rq) 3516 ret = func(p, arg); 3517 rq_unlock(rq, &rf); 3518 } else { 3519 switch (p->state) { 3520 case TASK_RUNNING: 3521 case TASK_WAKING: 3522 break; 3523 default: 3524 smp_rmb(); // See smp_rmb() comment in try_to_wake_up(). 3525 if (!p->on_rq) 3526 ret = func(p, arg); 3527 } 3528 } 3529 raw_spin_unlock_irq(&p->pi_lock); 3530 return ret; 3531 } 3532 3533 /** 3534 * wake_up_process - Wake up a specific process 3535 * @p: The process to be woken up. 3536 * 3537 * Attempt to wake up the nominated process and move it to the set of runnable 3538 * processes. 3539 * 3540 * Return: 1 if the process was woken up, 0 if it was already running. 3541 * 3542 * This function executes a full memory barrier before accessing the task state. 3543 */ 3544 int wake_up_process(struct task_struct *p) 3545 { 3546 return try_to_wake_up(p, TASK_NORMAL, 0); 3547 } 3548 EXPORT_SYMBOL(wake_up_process); 3549 3550 int wake_up_state(struct task_struct *p, unsigned int state) 3551 { 3552 return try_to_wake_up(p, state, 0); 3553 } 3554 3555 /* 3556 * Perform scheduler related setup for a newly forked process p. 3557 * p is forked by current. 3558 * 3559 * __sched_fork() is basic setup used by init_idle() too: 3560 */ 3561 static void __sched_fork(unsigned long clone_flags, struct task_struct *p) 3562 { 3563 p->on_rq = 0; 3564 3565 p->se.on_rq = 0; 3566 p->se.exec_start = 0; 3567 p->se.sum_exec_runtime = 0; 3568 p->se.prev_sum_exec_runtime = 0; 3569 p->se.nr_migrations = 0; 3570 p->se.vruntime = 0; 3571 INIT_LIST_HEAD(&p->se.group_node); 3572 3573 #ifdef CONFIG_FAIR_GROUP_SCHED 3574 p->se.cfs_rq = NULL; 3575 #endif 3576 3577 #ifdef CONFIG_SCHEDSTATS 3578 /* Even if schedstat is disabled, there should not be garbage */ 3579 memset(&p->se.statistics, 0, sizeof(p->se.statistics)); 3580 #endif 3581 3582 RB_CLEAR_NODE(&p->dl.rb_node); 3583 init_dl_task_timer(&p->dl); 3584 init_dl_inactive_task_timer(&p->dl); 3585 __dl_clear_params(p); 3586 3587 INIT_LIST_HEAD(&p->rt.run_list); 3588 p->rt.timeout = 0; 3589 p->rt.time_slice = sched_rr_timeslice; 3590 p->rt.on_rq = 0; 3591 p->rt.on_list = 0; 3592 3593 #ifdef CONFIG_PREEMPT_NOTIFIERS 3594 INIT_HLIST_HEAD(&p->preempt_notifiers); 3595 #endif 3596 3597 #ifdef CONFIG_COMPACTION 3598 p->capture_control = NULL; 3599 #endif 3600 init_numa_balancing(clone_flags, p); 3601 #ifdef CONFIG_SMP 3602 p->wake_entry.u_flags = CSD_TYPE_TTWU; 3603 p->migration_pending = NULL; 3604 #endif 3605 } 3606 3607 DEFINE_STATIC_KEY_FALSE(sched_numa_balancing); 3608 3609 #ifdef CONFIG_NUMA_BALANCING 3610 3611 void set_numabalancing_state(bool enabled) 3612 { 3613 if (enabled) 3614 static_branch_enable(&sched_numa_balancing); 3615 else 3616 static_branch_disable(&sched_numa_balancing); 3617 } 3618 3619 #ifdef CONFIG_PROC_SYSCTL 3620 int sysctl_numa_balancing(struct ctl_table *table, int write, 3621 void *buffer, size_t *lenp, loff_t *ppos) 3622 { 3623 struct ctl_table t; 3624 int err; 3625 int state = static_branch_likely(&sched_numa_balancing); 3626 3627 if (write && !capable(CAP_SYS_ADMIN)) 3628 return -EPERM; 3629 3630 t = *table; 3631 t.data = &state; 3632 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); 3633 if (err < 0) 3634 return err; 3635 if (write) 3636 set_numabalancing_state(state); 3637 return err; 3638 } 3639 #endif 3640 #endif 3641 3642 #ifdef CONFIG_SCHEDSTATS 3643 3644 DEFINE_STATIC_KEY_FALSE(sched_schedstats); 3645 static bool __initdata __sched_schedstats = false; 3646 3647 static void set_schedstats(bool enabled) 3648 { 3649 if (enabled) 3650 static_branch_enable(&sched_schedstats); 3651 else 3652 static_branch_disable(&sched_schedstats); 3653 } 3654 3655 void force_schedstat_enabled(void) 3656 { 3657 if (!schedstat_enabled()) { 3658 pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n"); 3659 static_branch_enable(&sched_schedstats); 3660 } 3661 } 3662 3663 static int __init setup_schedstats(char *str) 3664 { 3665 int ret = 0; 3666 if (!str) 3667 goto out; 3668 3669 /* 3670 * This code is called before jump labels have been set up, so we can't 3671 * change the static branch directly just yet. Instead set a temporary 3672 * variable so init_schedstats() can do it later. 3673 */ 3674 if (!strcmp(str, "enable")) { 3675 __sched_schedstats = true; 3676 ret = 1; 3677 } else if (!strcmp(str, "disable")) { 3678 __sched_schedstats = false; 3679 ret = 1; 3680 } 3681 out: 3682 if (!ret) 3683 pr_warn("Unable to parse schedstats=\n"); 3684 3685 return ret; 3686 } 3687 __setup("schedstats=", setup_schedstats); 3688 3689 static void __init init_schedstats(void) 3690 { 3691 set_schedstats(__sched_schedstats); 3692 } 3693 3694 #ifdef CONFIG_PROC_SYSCTL 3695 int sysctl_schedstats(struct ctl_table *table, int write, void *buffer, 3696 size_t *lenp, loff_t *ppos) 3697 { 3698 struct ctl_table t; 3699 int err; 3700 int state = static_branch_likely(&sched_schedstats); 3701 3702 if (write && !capable(CAP_SYS_ADMIN)) 3703 return -EPERM; 3704 3705 t = *table; 3706 t.data = &state; 3707 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); 3708 if (err < 0) 3709 return err; 3710 if (write) 3711 set_schedstats(state); 3712 return err; 3713 } 3714 #endif /* CONFIG_PROC_SYSCTL */ 3715 #else /* !CONFIG_SCHEDSTATS */ 3716 static inline void init_schedstats(void) {} 3717 #endif /* CONFIG_SCHEDSTATS */ 3718 3719 /* 3720 * fork()/clone()-time setup: 3721 */ 3722 int sched_fork(unsigned long clone_flags, struct task_struct *p) 3723 { 3724 unsigned long flags; 3725 3726 __sched_fork(clone_flags, p); 3727 /* 3728 * We mark the process as NEW here. This guarantees that 3729 * nobody will actually run it, and a signal or other external 3730 * event cannot wake it up and insert it on the runqueue either. 3731 */ 3732 p->state = TASK_NEW; 3733 3734 /* 3735 * Make sure we do not leak PI boosting priority to the child. 3736 */ 3737 p->prio = current->normal_prio; 3738 3739 uclamp_fork(p); 3740 3741 /* 3742 * Revert to default priority/policy on fork if requested. 3743 */ 3744 if (unlikely(p->sched_reset_on_fork)) { 3745 if (task_has_dl_policy(p) || task_has_rt_policy(p)) { 3746 p->policy = SCHED_NORMAL; 3747 p->static_prio = NICE_TO_PRIO(0); 3748 p->rt_priority = 0; 3749 } else if (PRIO_TO_NICE(p->static_prio) < 0) 3750 p->static_prio = NICE_TO_PRIO(0); 3751 3752 p->prio = p->normal_prio = __normal_prio(p); 3753 set_load_weight(p, false); 3754 3755 /* 3756 * We don't need the reset flag anymore after the fork. It has 3757 * fulfilled its duty: 3758 */ 3759 p->sched_reset_on_fork = 0; 3760 } 3761 3762 if (dl_prio(p->prio)) 3763 return -EAGAIN; 3764 else if (rt_prio(p->prio)) 3765 p->sched_class = &rt_sched_class; 3766 else 3767 p->sched_class = &fair_sched_class; 3768 3769 init_entity_runnable_average(&p->se); 3770 3771 /* 3772 * The child is not yet in the pid-hash so no cgroup attach races, 3773 * and the cgroup is pinned to this child due to cgroup_fork() 3774 * is ran before sched_fork(). 3775 * 3776 * Silence PROVE_RCU. 3777 */ 3778 raw_spin_lock_irqsave(&p->pi_lock, flags); 3779 rseq_migrate(p); 3780 /* 3781 * We're setting the CPU for the first time, we don't migrate, 3782 * so use __set_task_cpu(). 3783 */ 3784 __set_task_cpu(p, smp_processor_id()); 3785 if (p->sched_class->task_fork) 3786 p->sched_class->task_fork(p); 3787 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 3788 3789 #ifdef CONFIG_SCHED_INFO 3790 if (likely(sched_info_on())) 3791 memset(&p->sched_info, 0, sizeof(p->sched_info)); 3792 #endif 3793 #if defined(CONFIG_SMP) 3794 p->on_cpu = 0; 3795 #endif 3796 init_task_preempt_count(p); 3797 #ifdef CONFIG_SMP 3798 plist_node_init(&p->pushable_tasks, MAX_PRIO); 3799 RB_CLEAR_NODE(&p->pushable_dl_tasks); 3800 #endif 3801 return 0; 3802 } 3803 3804 void sched_post_fork(struct task_struct *p) 3805 { 3806 uclamp_post_fork(p); 3807 } 3808 3809 unsigned long to_ratio(u64 period, u64 runtime) 3810 { 3811 if (runtime == RUNTIME_INF) 3812 return BW_UNIT; 3813 3814 /* 3815 * Doing this here saves a lot of checks in all 3816 * the calling paths, and returning zero seems 3817 * safe for them anyway. 3818 */ 3819 if (period == 0) 3820 return 0; 3821 3822 return div64_u64(runtime << BW_SHIFT, period); 3823 } 3824 3825 /* 3826 * wake_up_new_task - wake up a newly created task for the first time. 3827 * 3828 * This function will do some initial scheduler statistics housekeeping 3829 * that must be done for every newly created context, then puts the task 3830 * on the runqueue and wakes it. 3831 */ 3832 void wake_up_new_task(struct task_struct *p) 3833 { 3834 struct rq_flags rf; 3835 struct rq *rq; 3836 3837 raw_spin_lock_irqsave(&p->pi_lock, rf.flags); 3838 p->state = TASK_RUNNING; 3839 #ifdef CONFIG_SMP 3840 /* 3841 * Fork balancing, do it here and not earlier because: 3842 * - cpus_ptr can change in the fork path 3843 * - any previously selected CPU might disappear through hotplug 3844 * 3845 * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq, 3846 * as we're not fully set-up yet. 3847 */ 3848 p->recent_used_cpu = task_cpu(p); 3849 rseq_migrate(p); 3850 __set_task_cpu(p, select_task_rq(p, task_cpu(p), WF_FORK)); 3851 #endif 3852 rq = __task_rq_lock(p, &rf); 3853 update_rq_clock(rq); 3854 post_init_entity_util_avg(p); 3855 3856 activate_task(rq, p, ENQUEUE_NOCLOCK); 3857 trace_sched_wakeup_new(p); 3858 check_preempt_curr(rq, p, WF_FORK); 3859 #ifdef CONFIG_SMP 3860 if (p->sched_class->task_woken) { 3861 /* 3862 * Nothing relies on rq->lock after this, so it's fine to 3863 * drop it. 3864 */ 3865 rq_unpin_lock(rq, &rf); 3866 p->sched_class->task_woken(rq, p); 3867 rq_repin_lock(rq, &rf); 3868 } 3869 #endif 3870 task_rq_unlock(rq, p, &rf); 3871 } 3872 3873 #ifdef CONFIG_PREEMPT_NOTIFIERS 3874 3875 static DEFINE_STATIC_KEY_FALSE(preempt_notifier_key); 3876 3877 void preempt_notifier_inc(void) 3878 { 3879 static_branch_inc(&preempt_notifier_key); 3880 } 3881 EXPORT_SYMBOL_GPL(preempt_notifier_inc); 3882 3883 void preempt_notifier_dec(void) 3884 { 3885 static_branch_dec(&preempt_notifier_key); 3886 } 3887 EXPORT_SYMBOL_GPL(preempt_notifier_dec); 3888 3889 /** 3890 * preempt_notifier_register - tell me when current is being preempted & rescheduled 3891 * @notifier: notifier struct to register 3892 */ 3893 void preempt_notifier_register(struct preempt_notifier *notifier) 3894 { 3895 if (!static_branch_unlikely(&preempt_notifier_key)) 3896 WARN(1, "registering preempt_notifier while notifiers disabled\n"); 3897 3898 hlist_add_head(¬ifier->link, ¤t->preempt_notifiers); 3899 } 3900 EXPORT_SYMBOL_GPL(preempt_notifier_register); 3901 3902 /** 3903 * preempt_notifier_unregister - no longer interested in preemption notifications 3904 * @notifier: notifier struct to unregister 3905 * 3906 * This is *not* safe to call from within a preemption notifier. 3907 */ 3908 void preempt_notifier_unregister(struct preempt_notifier *notifier) 3909 { 3910 hlist_del(¬ifier->link); 3911 } 3912 EXPORT_SYMBOL_GPL(preempt_notifier_unregister); 3913 3914 static void __fire_sched_in_preempt_notifiers(struct task_struct *curr) 3915 { 3916 struct preempt_notifier *notifier; 3917 3918 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) 3919 notifier->ops->sched_in(notifier, raw_smp_processor_id()); 3920 } 3921 3922 static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr) 3923 { 3924 if (static_branch_unlikely(&preempt_notifier_key)) 3925 __fire_sched_in_preempt_notifiers(curr); 3926 } 3927 3928 static void 3929 __fire_sched_out_preempt_notifiers(struct task_struct *curr, 3930 struct task_struct *next) 3931 { 3932 struct preempt_notifier *notifier; 3933 3934 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) 3935 notifier->ops->sched_out(notifier, next); 3936 } 3937 3938 static __always_inline void 3939 fire_sched_out_preempt_notifiers(struct task_struct *curr, 3940 struct task_struct *next) 3941 { 3942 if (static_branch_unlikely(&preempt_notifier_key)) 3943 __fire_sched_out_preempt_notifiers(curr, next); 3944 } 3945 3946 #else /* !CONFIG_PREEMPT_NOTIFIERS */ 3947 3948 static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr) 3949 { 3950 } 3951 3952 static inline void 3953 fire_sched_out_preempt_notifiers(struct task_struct *curr, 3954 struct task_struct *next) 3955 { 3956 } 3957 3958 #endif /* CONFIG_PREEMPT_NOTIFIERS */ 3959 3960 static inline void prepare_task(struct task_struct *next) 3961 { 3962 #ifdef CONFIG_SMP 3963 /* 3964 * Claim the task as running, we do this before switching to it 3965 * such that any running task will have this set. 3966 * 3967 * See the ttwu() WF_ON_CPU case and its ordering comment. 3968 */ 3969 WRITE_ONCE(next->on_cpu, 1); 3970 #endif 3971 } 3972 3973 static inline void finish_task(struct task_struct *prev) 3974 { 3975 #ifdef CONFIG_SMP 3976 /* 3977 * This must be the very last reference to @prev from this CPU. After 3978 * p->on_cpu is cleared, the task can be moved to a different CPU. We 3979 * must ensure this doesn't happen until the switch is completely 3980 * finished. 3981 * 3982 * In particular, the load of prev->state in finish_task_switch() must 3983 * happen before this. 3984 * 3985 * Pairs with the smp_cond_load_acquire() in try_to_wake_up(). 3986 */ 3987 smp_store_release(&prev->on_cpu, 0); 3988 #endif 3989 } 3990 3991 #ifdef CONFIG_SMP 3992 3993 static void do_balance_callbacks(struct rq *rq, struct callback_head *head) 3994 { 3995 void (*func)(struct rq *rq); 3996 struct callback_head *next; 3997 3998 lockdep_assert_held(&rq->lock); 3999 4000 while (head) { 4001 func = (void (*)(struct rq *))head->func; 4002 next = head->next; 4003 head->next = NULL; 4004 head = next; 4005 4006 func(rq); 4007 } 4008 } 4009 4010 static void balance_push(struct rq *rq); 4011 4012 struct callback_head balance_push_callback = { 4013 .next = NULL, 4014 .func = (void (*)(struct callback_head *))balance_push, 4015 }; 4016 4017 static inline struct callback_head *splice_balance_callbacks(struct rq *rq) 4018 { 4019 struct callback_head *head = rq->balance_callback; 4020 4021 lockdep_assert_held(&rq->lock); 4022 if (head) 4023 rq->balance_callback = NULL; 4024 4025 return head; 4026 } 4027 4028 static void __balance_callbacks(struct rq *rq) 4029 { 4030 do_balance_callbacks(rq, splice_balance_callbacks(rq)); 4031 } 4032 4033 static inline void balance_callbacks(struct rq *rq, struct callback_head *head) 4034 { 4035 unsigned long flags; 4036 4037 if (unlikely(head)) { 4038 raw_spin_lock_irqsave(&rq->lock, flags); 4039 do_balance_callbacks(rq, head); 4040 raw_spin_unlock_irqrestore(&rq->lock, flags); 4041 } 4042 } 4043 4044 #else 4045 4046 static inline void __balance_callbacks(struct rq *rq) 4047 { 4048 } 4049 4050 static inline struct callback_head *splice_balance_callbacks(struct rq *rq) 4051 { 4052 return NULL; 4053 } 4054 4055 static inline void balance_callbacks(struct rq *rq, struct callback_head *head) 4056 { 4057 } 4058 4059 #endif 4060 4061 static inline void 4062 prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf) 4063 { 4064 /* 4065 * Since the runqueue lock will be released by the next 4066 * task (which is an invalid locking op but in the case 4067 * of the scheduler it's an obvious special-case), so we 4068 * do an early lockdep release here: 4069 */ 4070 rq_unpin_lock(rq, rf); 4071 spin_release(&rq->lock.dep_map, _THIS_IP_); 4072 #ifdef CONFIG_DEBUG_SPINLOCK 4073 /* this is a valid case when another task releases the spinlock */ 4074 rq->lock.owner = next; 4075 #endif 4076 } 4077 4078 static inline void finish_lock_switch(struct rq *rq) 4079 { 4080 /* 4081 * If we are tracking spinlock dependencies then we have to 4082 * fix up the runqueue lock - which gets 'carried over' from 4083 * prev into current: 4084 */ 4085 spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_); 4086 __balance_callbacks(rq); 4087 raw_spin_unlock_irq(&rq->lock); 4088 } 4089 4090 /* 4091 * NOP if the arch has not defined these: 4092 */ 4093 4094 #ifndef prepare_arch_switch 4095 # define prepare_arch_switch(next) do { } while (0) 4096 #endif 4097 4098 #ifndef finish_arch_post_lock_switch 4099 # define finish_arch_post_lock_switch() do { } while (0) 4100 #endif 4101 4102 static inline void kmap_local_sched_out(void) 4103 { 4104 #ifdef CONFIG_KMAP_LOCAL 4105 if (unlikely(current->kmap_ctrl.idx)) 4106 __kmap_local_sched_out(); 4107 #endif 4108 } 4109 4110 static inline void kmap_local_sched_in(void) 4111 { 4112 #ifdef CONFIG_KMAP_LOCAL 4113 if (unlikely(current->kmap_ctrl.idx)) 4114 __kmap_local_sched_in(); 4115 #endif 4116 } 4117 4118 /** 4119 * prepare_task_switch - prepare to switch tasks 4120 * @rq: the runqueue preparing to switch 4121 * @prev: the current task that is being switched out 4122 * @next: the task we are going to switch to. 4123 * 4124 * This is called with the rq lock held and interrupts off. It must 4125 * be paired with a subsequent finish_task_switch after the context 4126 * switch. 4127 * 4128 * prepare_task_switch sets up locking and calls architecture specific 4129 * hooks. 4130 */ 4131 static inline void 4132 prepare_task_switch(struct rq *rq, struct task_struct *prev, 4133 struct task_struct *next) 4134 { 4135 kcov_prepare_switch(prev); 4136 sched_info_switch(rq, prev, next); 4137 perf_event_task_sched_out(prev, next); 4138 rseq_preempt(prev); 4139 fire_sched_out_preempt_notifiers(prev, next); 4140 kmap_local_sched_out(); 4141 prepare_task(next); 4142 prepare_arch_switch(next); 4143 } 4144 4145 /** 4146 * finish_task_switch - clean up after a task-switch 4147 * @prev: the thread we just switched away from. 4148 * 4149 * finish_task_switch must be called after the context switch, paired 4150 * with a prepare_task_switch call before the context switch. 4151 * finish_task_switch will reconcile locking set up by prepare_task_switch, 4152 * and do any other architecture-specific cleanup actions. 4153 * 4154 * Note that we may have delayed dropping an mm in context_switch(). If 4155 * so, we finish that here outside of the runqueue lock. (Doing it 4156 * with the lock held can cause deadlocks; see schedule() for 4157 * details.) 4158 * 4159 * The context switch have flipped the stack from under us and restored the 4160 * local variables which were saved when this task called schedule() in the 4161 * past. prev == current is still correct but we need to recalculate this_rq 4162 * because prev may have moved to another CPU. 4163 */ 4164 static struct rq *finish_task_switch(struct task_struct *prev) 4165 __releases(rq->lock) 4166 { 4167 struct rq *rq = this_rq(); 4168 struct mm_struct *mm = rq->prev_mm; 4169 long prev_state; 4170 4171 /* 4172 * The previous task will have left us with a preempt_count of 2 4173 * because it left us after: 4174 * 4175 * schedule() 4176 * preempt_disable(); // 1 4177 * __schedule() 4178 * raw_spin_lock_irq(&rq->lock) // 2 4179 * 4180 * Also, see FORK_PREEMPT_COUNT. 4181 */ 4182 if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET, 4183 "corrupted preempt_count: %s/%d/0x%x\n", 4184 current->comm, current->pid, preempt_count())) 4185 preempt_count_set(FORK_PREEMPT_COUNT); 4186 4187 rq->prev_mm = NULL; 4188 4189 /* 4190 * A task struct has one reference for the use as "current". 4191 * If a task dies, then it sets TASK_DEAD in tsk->state and calls 4192 * schedule one last time. The schedule call will never return, and 4193 * the scheduled task must drop that reference. 4194 * 4195 * We must observe prev->state before clearing prev->on_cpu (in 4196 * finish_task), otherwise a concurrent wakeup can get prev 4197 * running on another CPU and we could rave with its RUNNING -> DEAD 4198 * transition, resulting in a double drop. 4199 */ 4200 prev_state = prev->state; 4201 vtime_task_switch(prev); 4202 perf_event_task_sched_in(prev, current); 4203 finish_task(prev); 4204 finish_lock_switch(rq); 4205 finish_arch_post_lock_switch(); 4206 kcov_finish_switch(current); 4207 /* 4208 * kmap_local_sched_out() is invoked with rq::lock held and 4209 * interrupts disabled. There is no requirement for that, but the 4210 * sched out code does not have an interrupt enabled section. 4211 * Restoring the maps on sched in does not require interrupts being 4212 * disabled either. 4213 */ 4214 kmap_local_sched_in(); 4215 4216 fire_sched_in_preempt_notifiers(current); 4217 /* 4218 * When switching through a kernel thread, the loop in 4219 * membarrier_{private,global}_expedited() may have observed that 4220 * kernel thread and not issued an IPI. It is therefore possible to 4221 * schedule between user->kernel->user threads without passing though 4222 * switch_mm(). Membarrier requires a barrier after storing to 4223 * rq->curr, before returning to userspace, so provide them here: 4224 * 4225 * - a full memory barrier for {PRIVATE,GLOBAL}_EXPEDITED, implicitly 4226 * provided by mmdrop(), 4227 * - a sync_core for SYNC_CORE. 4228 */ 4229 if (mm) { 4230 membarrier_mm_sync_core_before_usermode(mm); 4231 mmdrop(mm); 4232 } 4233 if (unlikely(prev_state == TASK_DEAD)) { 4234 if (prev->sched_class->task_dead) 4235 prev->sched_class->task_dead(prev); 4236 4237 /* 4238 * Remove function-return probe instances associated with this 4239 * task and put them back on the free list. 4240 */ 4241 kprobe_flush_task(prev); 4242 4243 /* Task is done with its stack. */ 4244 put_task_stack(prev); 4245 4246 put_task_struct_rcu_user(prev); 4247 } 4248 4249 tick_nohz_task_switch(); 4250 return rq; 4251 } 4252 4253 /** 4254 * schedule_tail - first thing a freshly forked thread must call. 4255 * @prev: the thread we just switched away from. 4256 */ 4257 asmlinkage __visible void schedule_tail(struct task_struct *prev) 4258 __releases(rq->lock) 4259 { 4260 struct rq *rq; 4261 4262 /* 4263 * New tasks start with FORK_PREEMPT_COUNT, see there and 4264 * finish_task_switch() for details. 4265 * 4266 * finish_task_switch() will drop rq->lock() and lower preempt_count 4267 * and the preempt_enable() will end up enabling preemption (on 4268 * PREEMPT_COUNT kernels). 4269 */ 4270 4271 rq = finish_task_switch(prev); 4272 preempt_enable(); 4273 4274 if (current->set_child_tid) 4275 put_user(task_pid_vnr(current), current->set_child_tid); 4276 4277 calculate_sigpending(); 4278 } 4279 4280 /* 4281 * context_switch - switch to the new MM and the new thread's register state. 4282 */ 4283 static __always_inline struct rq * 4284 context_switch(struct rq *rq, struct task_struct *prev, 4285 struct task_struct *next, struct rq_flags *rf) 4286 { 4287 prepare_task_switch(rq, prev, next); 4288 4289 /* 4290 * For paravirt, this is coupled with an exit in switch_to to 4291 * combine the page table reload and the switch backend into 4292 * one hypercall. 4293 */ 4294 arch_start_context_switch(prev); 4295 4296 /* 4297 * kernel -> kernel lazy + transfer active 4298 * user -> kernel lazy + mmgrab() active 4299 * 4300 * kernel -> user switch + mmdrop() active 4301 * user -> user switch 4302 */ 4303 if (!next->mm) { // to kernel 4304 enter_lazy_tlb(prev->active_mm, next); 4305 4306 next->active_mm = prev->active_mm; 4307 if (prev->mm) // from user 4308 mmgrab(prev->active_mm); 4309 else 4310 prev->active_mm = NULL; 4311 } else { // to user 4312 membarrier_switch_mm(rq, prev->active_mm, next->mm); 4313 /* 4314 * sys_membarrier() requires an smp_mb() between setting 4315 * rq->curr / membarrier_switch_mm() and returning to userspace. 4316 * 4317 * The below provides this either through switch_mm(), or in 4318 * case 'prev->active_mm == next->mm' through 4319 * finish_task_switch()'s mmdrop(). 4320 */ 4321 switch_mm_irqs_off(prev->active_mm, next->mm, next); 4322 4323 if (!prev->mm) { // from kernel 4324 /* will mmdrop() in finish_task_switch(). */ 4325 rq->prev_mm = prev->active_mm; 4326 prev->active_mm = NULL; 4327 } 4328 } 4329 4330 rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP); 4331 4332 prepare_lock_switch(rq, next, rf); 4333 4334 /* Here we just switch the register state and the stack. */ 4335 switch_to(prev, next, prev); 4336 barrier(); 4337 4338 return finish_task_switch(prev); 4339 } 4340 4341 /* 4342 * nr_running and nr_context_switches: 4343 * 4344 * externally visible scheduler statistics: current number of runnable 4345 * threads, total number of context switches performed since bootup. 4346 */ 4347 unsigned long nr_running(void) 4348 { 4349 unsigned long i, sum = 0; 4350 4351 for_each_online_cpu(i) 4352 sum += cpu_rq(i)->nr_running; 4353 4354 return sum; 4355 } 4356 4357 /* 4358 * Check if only the current task is running on the CPU. 4359 * 4360 * Caution: this function does not check that the caller has disabled 4361 * preemption, thus the result might have a time-of-check-to-time-of-use 4362 * race. The caller is responsible to use it correctly, for example: 4363 * 4364 * - from a non-preemptible section (of course) 4365 * 4366 * - from a thread that is bound to a single CPU 4367 * 4368 * - in a loop with very short iterations (e.g. a polling loop) 4369 */ 4370 bool single_task_running(void) 4371 { 4372 return raw_rq()->nr_running == 1; 4373 } 4374 EXPORT_SYMBOL(single_task_running); 4375 4376 unsigned long long nr_context_switches(void) 4377 { 4378 int i; 4379 unsigned long long sum = 0; 4380 4381 for_each_possible_cpu(i) 4382 sum += cpu_rq(i)->nr_switches; 4383 4384 return sum; 4385 } 4386 4387 /* 4388 * Consumers of these two interfaces, like for example the cpuidle menu 4389 * governor, are using nonsensical data. Preferring shallow idle state selection 4390 * for a CPU that has IO-wait which might not even end up running the task when 4391 * it does become runnable. 4392 */ 4393 4394 unsigned long nr_iowait_cpu(int cpu) 4395 { 4396 return atomic_read(&cpu_rq(cpu)->nr_iowait); 4397 } 4398 4399 /* 4400 * IO-wait accounting, and how it's mostly bollocks (on SMP). 4401 * 4402 * The idea behind IO-wait account is to account the idle time that we could 4403 * have spend running if it were not for IO. That is, if we were to improve the 4404 * storage performance, we'd have a proportional reduction in IO-wait time. 4405 * 4406 * This all works nicely on UP, where, when a task blocks on IO, we account 4407 * idle time as IO-wait, because if the storage were faster, it could've been 4408 * running and we'd not be idle. 4409 * 4410 * This has been extended to SMP, by doing the same for each CPU. This however 4411 * is broken. 4412 * 4413 * Imagine for instance the case where two tasks block on one CPU, only the one 4414 * CPU will have IO-wait accounted, while the other has regular idle. Even 4415 * though, if the storage were faster, both could've ran at the same time, 4416 * utilising both CPUs. 4417 * 4418 * This means, that when looking globally, the current IO-wait accounting on 4419 * SMP is a lower bound, by reason of under accounting. 4420 * 4421 * Worse, since the numbers are provided per CPU, they are sometimes 4422 * interpreted per CPU, and that is nonsensical. A blocked task isn't strictly 4423 * associated with any one particular CPU, it can wake to another CPU than it 4424 * blocked on. This means the per CPU IO-wait number is meaningless. 4425 * 4426 * Task CPU affinities can make all that even more 'interesting'. 4427 */ 4428 4429 unsigned long nr_iowait(void) 4430 { 4431 unsigned long i, sum = 0; 4432 4433 for_each_possible_cpu(i) 4434 sum += nr_iowait_cpu(i); 4435 4436 return sum; 4437 } 4438 4439 #ifdef CONFIG_SMP 4440 4441 /* 4442 * sched_exec - execve() is a valuable balancing opportunity, because at 4443 * this point the task has the smallest effective memory and cache footprint. 4444 */ 4445 void sched_exec(void) 4446 { 4447 struct task_struct *p = current; 4448 unsigned long flags; 4449 int dest_cpu; 4450 4451 raw_spin_lock_irqsave(&p->pi_lock, flags); 4452 dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), WF_EXEC); 4453 if (dest_cpu == smp_processor_id()) 4454 goto unlock; 4455 4456 if (likely(cpu_active(dest_cpu))) { 4457 struct migration_arg arg = { p, dest_cpu }; 4458 4459 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 4460 stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg); 4461 return; 4462 } 4463 unlock: 4464 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 4465 } 4466 4467 #endif 4468 4469 DEFINE_PER_CPU(struct kernel_stat, kstat); 4470 DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat); 4471 4472 EXPORT_PER_CPU_SYMBOL(kstat); 4473 EXPORT_PER_CPU_SYMBOL(kernel_cpustat); 4474 4475 /* 4476 * The function fair_sched_class.update_curr accesses the struct curr 4477 * and its field curr->exec_start; when called from task_sched_runtime(), 4478 * we observe a high rate of cache misses in practice. 4479 * Prefetching this data results in improved performance. 4480 */ 4481 static inline void prefetch_curr_exec_start(struct task_struct *p) 4482 { 4483 #ifdef CONFIG_FAIR_GROUP_SCHED 4484 struct sched_entity *curr = (&p->se)->cfs_rq->curr; 4485 #else 4486 struct sched_entity *curr = (&task_rq(p)->cfs)->curr; 4487 #endif 4488 prefetch(curr); 4489 prefetch(&curr->exec_start); 4490 } 4491 4492 /* 4493 * Return accounted runtime for the task. 4494 * In case the task is currently running, return the runtime plus current's 4495 * pending runtime that have not been accounted yet. 4496 */ 4497 unsigned long long task_sched_runtime(struct task_struct *p) 4498 { 4499 struct rq_flags rf; 4500 struct rq *rq; 4501 u64 ns; 4502 4503 #if defined(CONFIG_64BIT) && defined(CONFIG_SMP) 4504 /* 4505 * 64-bit doesn't need locks to atomically read a 64-bit value. 4506 * So we have a optimization chance when the task's delta_exec is 0. 4507 * Reading ->on_cpu is racy, but this is ok. 4508 * 4509 * If we race with it leaving CPU, we'll take a lock. So we're correct. 4510 * If we race with it entering CPU, unaccounted time is 0. This is 4511 * indistinguishable from the read occurring a few cycles earlier. 4512 * If we see ->on_cpu without ->on_rq, the task is leaving, and has 4513 * been accounted, so we're correct here as well. 4514 */ 4515 if (!p->on_cpu || !task_on_rq_queued(p)) 4516 return p->se.sum_exec_runtime; 4517 #endif 4518 4519 rq = task_rq_lock(p, &rf); 4520 /* 4521 * Must be ->curr _and_ ->on_rq. If dequeued, we would 4522 * project cycles that may never be accounted to this 4523 * thread, breaking clock_gettime(). 4524 */ 4525 if (task_current(rq, p) && task_on_rq_queued(p)) { 4526 prefetch_curr_exec_start(p); 4527 update_rq_clock(rq); 4528 p->sched_class->update_curr(rq); 4529 } 4530 ns = p->se.sum_exec_runtime; 4531 task_rq_unlock(rq, p, &rf); 4532 4533 return ns; 4534 } 4535 4536 /* 4537 * This function gets called by the timer code, with HZ frequency. 4538 * We call it with interrupts disabled. 4539 */ 4540 void scheduler_tick(void) 4541 { 4542 int cpu = smp_processor_id(); 4543 struct rq *rq = cpu_rq(cpu); 4544 struct task_struct *curr = rq->curr; 4545 struct rq_flags rf; 4546 unsigned long thermal_pressure; 4547 4548 arch_scale_freq_tick(); 4549 sched_clock_tick(); 4550 4551 rq_lock(rq, &rf); 4552 4553 update_rq_clock(rq); 4554 thermal_pressure = arch_scale_thermal_pressure(cpu_of(rq)); 4555 update_thermal_load_avg(rq_clock_thermal(rq), rq, thermal_pressure); 4556 curr->sched_class->task_tick(rq, curr, 0); 4557 calc_global_load_tick(rq); 4558 psi_task_tick(rq); 4559 4560 rq_unlock(rq, &rf); 4561 4562 perf_event_task_tick(); 4563 4564 #ifdef CONFIG_SMP 4565 rq->idle_balance = idle_cpu(cpu); 4566 trigger_load_balance(rq); 4567 #endif 4568 } 4569 4570 #ifdef CONFIG_NO_HZ_FULL 4571 4572 struct tick_work { 4573 int cpu; 4574 atomic_t state; 4575 struct delayed_work work; 4576 }; 4577 /* Values for ->state, see diagram below. */ 4578 #define TICK_SCHED_REMOTE_OFFLINE 0 4579 #define TICK_SCHED_REMOTE_OFFLINING 1 4580 #define TICK_SCHED_REMOTE_RUNNING 2 4581 4582 /* 4583 * State diagram for ->state: 4584 * 4585 * 4586 * TICK_SCHED_REMOTE_OFFLINE 4587 * | ^ 4588 * | | 4589 * | | sched_tick_remote() 4590 * | | 4591 * | | 4592 * +--TICK_SCHED_REMOTE_OFFLINING 4593 * | ^ 4594 * | | 4595 * sched_tick_start() | | sched_tick_stop() 4596 * | | 4597 * V | 4598 * TICK_SCHED_REMOTE_RUNNING 4599 * 4600 * 4601 * Other transitions get WARN_ON_ONCE(), except that sched_tick_remote() 4602 * and sched_tick_start() are happy to leave the state in RUNNING. 4603 */ 4604 4605 static struct tick_work __percpu *tick_work_cpu; 4606 4607 static void sched_tick_remote(struct work_struct *work) 4608 { 4609 struct delayed_work *dwork = to_delayed_work(work); 4610 struct tick_work *twork = container_of(dwork, struct tick_work, work); 4611 int cpu = twork->cpu; 4612 struct rq *rq = cpu_rq(cpu); 4613 struct task_struct *curr; 4614 struct rq_flags rf; 4615 u64 delta; 4616 int os; 4617 4618 /* 4619 * Handle the tick only if it appears the remote CPU is running in full 4620 * dynticks mode. The check is racy by nature, but missing a tick or 4621 * having one too much is no big deal because the scheduler tick updates 4622 * statistics and checks timeslices in a time-independent way, regardless 4623 * of when exactly it is running. 4624 */ 4625 if (!tick_nohz_tick_stopped_cpu(cpu)) 4626 goto out_requeue; 4627 4628 rq_lock_irq(rq, &rf); 4629 curr = rq->curr; 4630 if (cpu_is_offline(cpu)) 4631 goto out_unlock; 4632 4633 update_rq_clock(rq); 4634 4635 if (!is_idle_task(curr)) { 4636 /* 4637 * Make sure the next tick runs within a reasonable 4638 * amount of time. 4639 */ 4640 delta = rq_clock_task(rq) - curr->se.exec_start; 4641 WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3); 4642 } 4643 curr->sched_class->task_tick(rq, curr, 0); 4644 4645 calc_load_nohz_remote(rq); 4646 out_unlock: 4647 rq_unlock_irq(rq, &rf); 4648 out_requeue: 4649 4650 /* 4651 * Run the remote tick once per second (1Hz). This arbitrary 4652 * frequency is large enough to avoid overload but short enough 4653 * to keep scheduler internal stats reasonably up to date. But 4654 * first update state to reflect hotplug activity if required. 4655 */ 4656 os = atomic_fetch_add_unless(&twork->state, -1, TICK_SCHED_REMOTE_RUNNING); 4657 WARN_ON_ONCE(os == TICK_SCHED_REMOTE_OFFLINE); 4658 if (os == TICK_SCHED_REMOTE_RUNNING) 4659 queue_delayed_work(system_unbound_wq, dwork, HZ); 4660 } 4661 4662 static void sched_tick_start(int cpu) 4663 { 4664 int os; 4665 struct tick_work *twork; 4666 4667 if (housekeeping_cpu(cpu, HK_FLAG_TICK)) 4668 return; 4669 4670 WARN_ON_ONCE(!tick_work_cpu); 4671 4672 twork = per_cpu_ptr(tick_work_cpu, cpu); 4673 os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_RUNNING); 4674 WARN_ON_ONCE(os == TICK_SCHED_REMOTE_RUNNING); 4675 if (os == TICK_SCHED_REMOTE_OFFLINE) { 4676 twork->cpu = cpu; 4677 INIT_DELAYED_WORK(&twork->work, sched_tick_remote); 4678 queue_delayed_work(system_unbound_wq, &twork->work, HZ); 4679 } 4680 } 4681 4682 #ifdef CONFIG_HOTPLUG_CPU 4683 static void sched_tick_stop(int cpu) 4684 { 4685 struct tick_work *twork; 4686 int os; 4687 4688 if (housekeeping_cpu(cpu, HK_FLAG_TICK)) 4689 return; 4690 4691 WARN_ON_ONCE(!tick_work_cpu); 4692 4693 twork = per_cpu_ptr(tick_work_cpu, cpu); 4694 /* There cannot be competing actions, but don't rely on stop-machine. */ 4695 os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_OFFLINING); 4696 WARN_ON_ONCE(os != TICK_SCHED_REMOTE_RUNNING); 4697 /* Don't cancel, as this would mess up the state machine. */ 4698 } 4699 #endif /* CONFIG_HOTPLUG_CPU */ 4700 4701 int __init sched_tick_offload_init(void) 4702 { 4703 tick_work_cpu = alloc_percpu(struct tick_work); 4704 BUG_ON(!tick_work_cpu); 4705 return 0; 4706 } 4707 4708 #else /* !CONFIG_NO_HZ_FULL */ 4709 static inline void sched_tick_start(int cpu) { } 4710 static inline void sched_tick_stop(int cpu) { } 4711 #endif 4712 4713 #if defined(CONFIG_PREEMPTION) && (defined(CONFIG_DEBUG_PREEMPT) || \ 4714 defined(CONFIG_TRACE_PREEMPT_TOGGLE)) 4715 /* 4716 * If the value passed in is equal to the current preempt count 4717 * then we just disabled preemption. Start timing the latency. 4718 */ 4719 static inline void preempt_latency_start(int val) 4720 { 4721 if (preempt_count() == val) { 4722 unsigned long ip = get_lock_parent_ip(); 4723 #ifdef CONFIG_DEBUG_PREEMPT 4724 current->preempt_disable_ip = ip; 4725 #endif 4726 trace_preempt_off(CALLER_ADDR0, ip); 4727 } 4728 } 4729 4730 void preempt_count_add(int val) 4731 { 4732 #ifdef CONFIG_DEBUG_PREEMPT 4733 /* 4734 * Underflow? 4735 */ 4736 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0))) 4737 return; 4738 #endif 4739 __preempt_count_add(val); 4740 #ifdef CONFIG_DEBUG_PREEMPT 4741 /* 4742 * Spinlock count overflowing soon? 4743 */ 4744 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >= 4745 PREEMPT_MASK - 10); 4746 #endif 4747 preempt_latency_start(val); 4748 } 4749 EXPORT_SYMBOL(preempt_count_add); 4750 NOKPROBE_SYMBOL(preempt_count_add); 4751 4752 /* 4753 * If the value passed in equals to the current preempt count 4754 * then we just enabled preemption. Stop timing the latency. 4755 */ 4756 static inline void preempt_latency_stop(int val) 4757 { 4758 if (preempt_count() == val) 4759 trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip()); 4760 } 4761 4762 void preempt_count_sub(int val) 4763 { 4764 #ifdef CONFIG_DEBUG_PREEMPT 4765 /* 4766 * Underflow? 4767 */ 4768 if (DEBUG_LOCKS_WARN_ON(val > preempt_count())) 4769 return; 4770 /* 4771 * Is the spinlock portion underflowing? 4772 */ 4773 if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) && 4774 !(preempt_count() & PREEMPT_MASK))) 4775 return; 4776 #endif 4777 4778 preempt_latency_stop(val); 4779 __preempt_count_sub(val); 4780 } 4781 EXPORT_SYMBOL(preempt_count_sub); 4782 NOKPROBE_SYMBOL(preempt_count_sub); 4783 4784 #else 4785 static inline void preempt_latency_start(int val) { } 4786 static inline void preempt_latency_stop(int val) { } 4787 #endif 4788 4789 static inline unsigned long get_preempt_disable_ip(struct task_struct *p) 4790 { 4791 #ifdef CONFIG_DEBUG_PREEMPT 4792 return p->preempt_disable_ip; 4793 #else 4794 return 0; 4795 #endif 4796 } 4797 4798 /* 4799 * Print scheduling while atomic bug: 4800 */ 4801 static noinline void __schedule_bug(struct task_struct *prev) 4802 { 4803 /* Save this before calling printk(), since that will clobber it */ 4804 unsigned long preempt_disable_ip = get_preempt_disable_ip(current); 4805 4806 if (oops_in_progress) 4807 return; 4808 4809 printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n", 4810 prev->comm, prev->pid, preempt_count()); 4811 4812 debug_show_held_locks(prev); 4813 print_modules(); 4814 if (irqs_disabled()) 4815 print_irqtrace_events(prev); 4816 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT) 4817 && in_atomic_preempt_off()) { 4818 pr_err("Preemption disabled at:"); 4819 print_ip_sym(KERN_ERR, preempt_disable_ip); 4820 } 4821 if (panic_on_warn) 4822 panic("scheduling while atomic\n"); 4823 4824 dump_stack(); 4825 add_taint(TAINT_WARN, LOCKDEP_STILL_OK); 4826 } 4827 4828 /* 4829 * Various schedule()-time debugging checks and statistics: 4830 */ 4831 static inline void schedule_debug(struct task_struct *prev, bool preempt) 4832 { 4833 #ifdef CONFIG_SCHED_STACK_END_CHECK 4834 if (task_stack_end_corrupted(prev)) 4835 panic("corrupted stack end detected inside scheduler\n"); 4836 4837 if (task_scs_end_corrupted(prev)) 4838 panic("corrupted shadow stack detected inside scheduler\n"); 4839 #endif 4840 4841 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP 4842 if (!preempt && prev->state && prev->non_block_count) { 4843 printk(KERN_ERR "BUG: scheduling in a non-blocking section: %s/%d/%i\n", 4844 prev->comm, prev->pid, prev->non_block_count); 4845 dump_stack(); 4846 add_taint(TAINT_WARN, LOCKDEP_STILL_OK); 4847 } 4848 #endif 4849 4850 if (unlikely(in_atomic_preempt_off())) { 4851 __schedule_bug(prev); 4852 preempt_count_set(PREEMPT_DISABLED); 4853 } 4854 rcu_sleep_check(); 4855 SCHED_WARN_ON(ct_state() == CONTEXT_USER); 4856 4857 profile_hit(SCHED_PROFILING, __builtin_return_address(0)); 4858 4859 schedstat_inc(this_rq()->sched_count); 4860 } 4861 4862 static void put_prev_task_balance(struct rq *rq, struct task_struct *prev, 4863 struct rq_flags *rf) 4864 { 4865 #ifdef CONFIG_SMP 4866 const struct sched_class *class; 4867 /* 4868 * We must do the balancing pass before put_prev_task(), such 4869 * that when we release the rq->lock the task is in the same 4870 * state as before we took rq->lock. 4871 * 4872 * We can terminate the balance pass as soon as we know there is 4873 * a runnable task of @class priority or higher. 4874 */ 4875 for_class_range(class, prev->sched_class, &idle_sched_class) { 4876 if (class->balance(rq, prev, rf)) 4877 break; 4878 } 4879 #endif 4880 4881 put_prev_task(rq, prev); 4882 } 4883 4884 /* 4885 * Pick up the highest-prio task: 4886 */ 4887 static inline struct task_struct * 4888 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) 4889 { 4890 const struct sched_class *class; 4891 struct task_struct *p; 4892 4893 /* 4894 * Optimization: we know that if all tasks are in the fair class we can 4895 * call that function directly, but only if the @prev task wasn't of a 4896 * higher scheduling class, because otherwise those lose the 4897 * opportunity to pull in more work from other CPUs. 4898 */ 4899 if (likely(prev->sched_class <= &fair_sched_class && 4900 rq->nr_running == rq->cfs.h_nr_running)) { 4901 4902 p = pick_next_task_fair(rq, prev, rf); 4903 if (unlikely(p == RETRY_TASK)) 4904 goto restart; 4905 4906 /* Assumes fair_sched_class->next == idle_sched_class */ 4907 if (!p) { 4908 put_prev_task(rq, prev); 4909 p = pick_next_task_idle(rq); 4910 } 4911 4912 return p; 4913 } 4914 4915 restart: 4916 put_prev_task_balance(rq, prev, rf); 4917 4918 for_each_class(class) { 4919 p = class->pick_next_task(rq); 4920 if (p) 4921 return p; 4922 } 4923 4924 /* The idle class should always have a runnable task: */ 4925 BUG(); 4926 } 4927 4928 /* 4929 * __schedule() is the main scheduler function. 4930 * 4931 * The main means of driving the scheduler and thus entering this function are: 4932 * 4933 * 1. Explicit blocking: mutex, semaphore, waitqueue, etc. 4934 * 4935 * 2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return 4936 * paths. For example, see arch/x86/entry_64.S. 4937 * 4938 * To drive preemption between tasks, the scheduler sets the flag in timer 4939 * interrupt handler scheduler_tick(). 4940 * 4941 * 3. Wakeups don't really cause entry into schedule(). They add a 4942 * task to the run-queue and that's it. 4943 * 4944 * Now, if the new task added to the run-queue preempts the current 4945 * task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets 4946 * called on the nearest possible occasion: 4947 * 4948 * - If the kernel is preemptible (CONFIG_PREEMPTION=y): 4949 * 4950 * - in syscall or exception context, at the next outmost 4951 * preempt_enable(). (this might be as soon as the wake_up()'s 4952 * spin_unlock()!) 4953 * 4954 * - in IRQ context, return from interrupt-handler to 4955 * preemptible context 4956 * 4957 * - If the kernel is not preemptible (CONFIG_PREEMPTION is not set) 4958 * then at the next: 4959 * 4960 * - cond_resched() call 4961 * - explicit schedule() call 4962 * - return from syscall or exception to user-space 4963 * - return from interrupt-handler to user-space 4964 * 4965 * WARNING: must be called with preemption disabled! 4966 */ 4967 static void __sched notrace __schedule(bool preempt) 4968 { 4969 struct task_struct *prev, *next; 4970 unsigned long *switch_count; 4971 unsigned long prev_state; 4972 struct rq_flags rf; 4973 struct rq *rq; 4974 int cpu; 4975 4976 cpu = smp_processor_id(); 4977 rq = cpu_rq(cpu); 4978 prev = rq->curr; 4979 4980 schedule_debug(prev, preempt); 4981 4982 if (sched_feat(HRTICK)) 4983 hrtick_clear(rq); 4984 4985 local_irq_disable(); 4986 rcu_note_context_switch(preempt); 4987 4988 /* 4989 * Make sure that signal_pending_state()->signal_pending() below 4990 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE) 4991 * done by the caller to avoid the race with signal_wake_up(): 4992 * 4993 * __set_current_state(@state) signal_wake_up() 4994 * schedule() set_tsk_thread_flag(p, TIF_SIGPENDING) 4995 * wake_up_state(p, state) 4996 * LOCK rq->lock LOCK p->pi_state 4997 * smp_mb__after_spinlock() smp_mb__after_spinlock() 4998 * if (signal_pending_state()) if (p->state & @state) 4999 * 5000 * Also, the membarrier system call requires a full memory barrier 5001 * after coming from user-space, before storing to rq->curr. 5002 */ 5003 rq_lock(rq, &rf); 5004 smp_mb__after_spinlock(); 5005 5006 /* Promote REQ to ACT */ 5007 rq->clock_update_flags <<= 1; 5008 update_rq_clock(rq); 5009 5010 switch_count = &prev->nivcsw; 5011 5012 /* 5013 * We must load prev->state once (task_struct::state is volatile), such 5014 * that: 5015 * 5016 * - we form a control dependency vs deactivate_task() below. 5017 * - ptrace_{,un}freeze_traced() can change ->state underneath us. 5018 */ 5019 prev_state = prev->state; 5020 if (!preempt && prev_state) { 5021 if (signal_pending_state(prev_state, prev)) { 5022 prev->state = TASK_RUNNING; 5023 } else { 5024 prev->sched_contributes_to_load = 5025 (prev_state & TASK_UNINTERRUPTIBLE) && 5026 !(prev_state & TASK_NOLOAD) && 5027 !(prev->flags & PF_FROZEN); 5028 5029 if (prev->sched_contributes_to_load) 5030 rq->nr_uninterruptible++; 5031 5032 /* 5033 * __schedule() ttwu() 5034 * prev_state = prev->state; if (p->on_rq && ...) 5035 * if (prev_state) goto out; 5036 * p->on_rq = 0; smp_acquire__after_ctrl_dep(); 5037 * p->state = TASK_WAKING 5038 * 5039 * Where __schedule() and ttwu() have matching control dependencies. 5040 * 5041 * After this, schedule() must not care about p->state any more. 5042 */ 5043 deactivate_task(rq, prev, DEQUEUE_SLEEP | DEQUEUE_NOCLOCK); 5044 5045 if (prev->in_iowait) { 5046 atomic_inc(&rq->nr_iowait); 5047 delayacct_blkio_start(); 5048 } 5049 } 5050 switch_count = &prev->nvcsw; 5051 } 5052 5053 next = pick_next_task(rq, prev, &rf); 5054 clear_tsk_need_resched(prev); 5055 clear_preempt_need_resched(); 5056 5057 if (likely(prev != next)) { 5058 rq->nr_switches++; 5059 /* 5060 * RCU users of rcu_dereference(rq->curr) may not see 5061 * changes to task_struct made by pick_next_task(). 5062 */ 5063 RCU_INIT_POINTER(rq->curr, next); 5064 /* 5065 * The membarrier system call requires each architecture 5066 * to have a full memory barrier after updating 5067 * rq->curr, before returning to user-space. 5068 * 5069 * Here are the schemes providing that barrier on the 5070 * various architectures: 5071 * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC. 5072 * switch_mm() rely on membarrier_arch_switch_mm() on PowerPC. 5073 * - finish_lock_switch() for weakly-ordered 5074 * architectures where spin_unlock is a full barrier, 5075 * - switch_to() for arm64 (weakly-ordered, spin_unlock 5076 * is a RELEASE barrier), 5077 */ 5078 ++*switch_count; 5079 5080 migrate_disable_switch(rq, prev); 5081 psi_sched_switch(prev, next, !task_on_rq_queued(prev)); 5082 5083 trace_sched_switch(preempt, prev, next); 5084 5085 /* Also unlocks the rq: */ 5086 rq = context_switch(rq, prev, next, &rf); 5087 } else { 5088 rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP); 5089 5090 rq_unpin_lock(rq, &rf); 5091 __balance_callbacks(rq); 5092 raw_spin_unlock_irq(&rq->lock); 5093 } 5094 } 5095 5096 void __noreturn do_task_dead(void) 5097 { 5098 /* Causes final put_task_struct in finish_task_switch(): */ 5099 set_special_state(TASK_DEAD); 5100 5101 /* Tell freezer to ignore us: */ 5102 current->flags |= PF_NOFREEZE; 5103 5104 __schedule(false); 5105 BUG(); 5106 5107 /* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */ 5108 for (;;) 5109 cpu_relax(); 5110 } 5111 5112 static inline void sched_submit_work(struct task_struct *tsk) 5113 { 5114 unsigned int task_flags; 5115 5116 if (!tsk->state) 5117 return; 5118 5119 task_flags = tsk->flags; 5120 /* 5121 * If a worker went to sleep, notify and ask workqueue whether 5122 * it wants to wake up a task to maintain concurrency. 5123 * As this function is called inside the schedule() context, 5124 * we disable preemption to avoid it calling schedule() again 5125 * in the possible wakeup of a kworker and because wq_worker_sleeping() 5126 * requires it. 5127 */ 5128 if (task_flags & (PF_WQ_WORKER | PF_IO_WORKER)) { 5129 preempt_disable(); 5130 if (task_flags & PF_WQ_WORKER) 5131 wq_worker_sleeping(tsk); 5132 else 5133 io_wq_worker_sleeping(tsk); 5134 preempt_enable_no_resched(); 5135 } 5136 5137 if (tsk_is_pi_blocked(tsk)) 5138 return; 5139 5140 /* 5141 * If we are going to sleep and we have plugged IO queued, 5142 * make sure to submit it to avoid deadlocks. 5143 */ 5144 if (blk_needs_flush_plug(tsk)) 5145 blk_schedule_flush_plug(tsk); 5146 } 5147 5148 static void sched_update_worker(struct task_struct *tsk) 5149 { 5150 if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER)) { 5151 if (tsk->flags & PF_WQ_WORKER) 5152 wq_worker_running(tsk); 5153 else 5154 io_wq_worker_running(tsk); 5155 } 5156 } 5157 5158 asmlinkage __visible void __sched schedule(void) 5159 { 5160 struct task_struct *tsk = current; 5161 5162 sched_submit_work(tsk); 5163 do { 5164 preempt_disable(); 5165 __schedule(false); 5166 sched_preempt_enable_no_resched(); 5167 } while (need_resched()); 5168 sched_update_worker(tsk); 5169 } 5170 EXPORT_SYMBOL(schedule); 5171 5172 /* 5173 * synchronize_rcu_tasks() makes sure that no task is stuck in preempted 5174 * state (have scheduled out non-voluntarily) by making sure that all 5175 * tasks have either left the run queue or have gone into user space. 5176 * As idle tasks do not do either, they must not ever be preempted 5177 * (schedule out non-voluntarily). 5178 * 5179 * schedule_idle() is similar to schedule_preempt_disable() except that it 5180 * never enables preemption because it does not call sched_submit_work(). 5181 */ 5182 void __sched schedule_idle(void) 5183 { 5184 /* 5185 * As this skips calling sched_submit_work(), which the idle task does 5186 * regardless because that function is a nop when the task is in a 5187 * TASK_RUNNING state, make sure this isn't used someplace that the 5188 * current task can be in any other state. Note, idle is always in the 5189 * TASK_RUNNING state. 5190 */ 5191 WARN_ON_ONCE(current->state); 5192 do { 5193 __schedule(false); 5194 } while (need_resched()); 5195 } 5196 5197 #if defined(CONFIG_CONTEXT_TRACKING) && !defined(CONFIG_HAVE_CONTEXT_TRACKING_OFFSTACK) 5198 asmlinkage __visible void __sched schedule_user(void) 5199 { 5200 /* 5201 * If we come here after a random call to set_need_resched(), 5202 * or we have been woken up remotely but the IPI has not yet arrived, 5203 * we haven't yet exited the RCU idle mode. Do it here manually until 5204 * we find a better solution. 5205 * 5206 * NB: There are buggy callers of this function. Ideally we 5207 * should warn if prev_state != CONTEXT_USER, but that will trigger 5208 * too frequently to make sense yet. 5209 */ 5210 enum ctx_state prev_state = exception_enter(); 5211 schedule(); 5212 exception_exit(prev_state); 5213 } 5214 #endif 5215 5216 /** 5217 * schedule_preempt_disabled - called with preemption disabled 5218 * 5219 * Returns with preemption disabled. Note: preempt_count must be 1 5220 */ 5221 void __sched schedule_preempt_disabled(void) 5222 { 5223 sched_preempt_enable_no_resched(); 5224 schedule(); 5225 preempt_disable(); 5226 } 5227 5228 static void __sched notrace preempt_schedule_common(void) 5229 { 5230 do { 5231 /* 5232 * Because the function tracer can trace preempt_count_sub() 5233 * and it also uses preempt_enable/disable_notrace(), if 5234 * NEED_RESCHED is set, the preempt_enable_notrace() called 5235 * by the function tracer will call this function again and 5236 * cause infinite recursion. 5237 * 5238 * Preemption must be disabled here before the function 5239 * tracer can trace. Break up preempt_disable() into two 5240 * calls. One to disable preemption without fear of being 5241 * traced. The other to still record the preemption latency, 5242 * which can also be traced by the function tracer. 5243 */ 5244 preempt_disable_notrace(); 5245 preempt_latency_start(1); 5246 __schedule(true); 5247 preempt_latency_stop(1); 5248 preempt_enable_no_resched_notrace(); 5249 5250 /* 5251 * Check again in case we missed a preemption opportunity 5252 * between schedule and now. 5253 */ 5254 } while (need_resched()); 5255 } 5256 5257 #ifdef CONFIG_PREEMPTION 5258 /* 5259 * This is the entry point to schedule() from in-kernel preemption 5260 * off of preempt_enable. 5261 */ 5262 asmlinkage __visible void __sched notrace preempt_schedule(void) 5263 { 5264 /* 5265 * If there is a non-zero preempt_count or interrupts are disabled, 5266 * we do not want to preempt the current task. Just return.. 5267 */ 5268 if (likely(!preemptible())) 5269 return; 5270 5271 preempt_schedule_common(); 5272 } 5273 NOKPROBE_SYMBOL(preempt_schedule); 5274 EXPORT_SYMBOL(preempt_schedule); 5275 5276 /** 5277 * preempt_schedule_notrace - preempt_schedule called by tracing 5278 * 5279 * The tracing infrastructure uses preempt_enable_notrace to prevent 5280 * recursion and tracing preempt enabling caused by the tracing 5281 * infrastructure itself. But as tracing can happen in areas coming 5282 * from userspace or just about to enter userspace, a preempt enable 5283 * can occur before user_exit() is called. This will cause the scheduler 5284 * to be called when the system is still in usermode. 5285 * 5286 * To prevent this, the preempt_enable_notrace will use this function 5287 * instead of preempt_schedule() to exit user context if needed before 5288 * calling the scheduler. 5289 */ 5290 asmlinkage __visible void __sched notrace preempt_schedule_notrace(void) 5291 { 5292 enum ctx_state prev_ctx; 5293 5294 if (likely(!preemptible())) 5295 return; 5296 5297 do { 5298 /* 5299 * Because the function tracer can trace preempt_count_sub() 5300 * and it also uses preempt_enable/disable_notrace(), if 5301 * NEED_RESCHED is set, the preempt_enable_notrace() called 5302 * by the function tracer will call this function again and 5303 * cause infinite recursion. 5304 * 5305 * Preemption must be disabled here before the function 5306 * tracer can trace. Break up preempt_disable() into two 5307 * calls. One to disable preemption without fear of being 5308 * traced. The other to still record the preemption latency, 5309 * which can also be traced by the function tracer. 5310 */ 5311 preempt_disable_notrace(); 5312 preempt_latency_start(1); 5313 /* 5314 * Needs preempt disabled in case user_exit() is traced 5315 * and the tracer calls preempt_enable_notrace() causing 5316 * an infinite recursion. 5317 */ 5318 prev_ctx = exception_enter(); 5319 __schedule(true); 5320 exception_exit(prev_ctx); 5321 5322 preempt_latency_stop(1); 5323 preempt_enable_no_resched_notrace(); 5324 } while (need_resched()); 5325 } 5326 EXPORT_SYMBOL_GPL(preempt_schedule_notrace); 5327 5328 #endif /* CONFIG_PREEMPTION */ 5329 5330 /* 5331 * This is the entry point to schedule() from kernel preemption 5332 * off of irq context. 5333 * Note, that this is called and return with irqs disabled. This will 5334 * protect us against recursive calling from irq. 5335 */ 5336 asmlinkage __visible void __sched preempt_schedule_irq(void) 5337 { 5338 enum ctx_state prev_state; 5339 5340 /* Catch callers which need to be fixed */ 5341 BUG_ON(preempt_count() || !irqs_disabled()); 5342 5343 prev_state = exception_enter(); 5344 5345 do { 5346 preempt_disable(); 5347 local_irq_enable(); 5348 __schedule(true); 5349 local_irq_disable(); 5350 sched_preempt_enable_no_resched(); 5351 } while (need_resched()); 5352 5353 exception_exit(prev_state); 5354 } 5355 5356 int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags, 5357 void *key) 5358 { 5359 WARN_ON_ONCE(IS_ENABLED(CONFIG_SCHED_DEBUG) && wake_flags & ~WF_SYNC); 5360 return try_to_wake_up(curr->private, mode, wake_flags); 5361 } 5362 EXPORT_SYMBOL(default_wake_function); 5363 5364 #ifdef CONFIG_RT_MUTEXES 5365 5366 static inline int __rt_effective_prio(struct task_struct *pi_task, int prio) 5367 { 5368 if (pi_task) 5369 prio = min(prio, pi_task->prio); 5370 5371 return prio; 5372 } 5373 5374 static inline int rt_effective_prio(struct task_struct *p, int prio) 5375 { 5376 struct task_struct *pi_task = rt_mutex_get_top_task(p); 5377 5378 return __rt_effective_prio(pi_task, prio); 5379 } 5380 5381 /* 5382 * rt_mutex_setprio - set the current priority of a task 5383 * @p: task to boost 5384 * @pi_task: donor task 5385 * 5386 * This function changes the 'effective' priority of a task. It does 5387 * not touch ->normal_prio like __setscheduler(). 5388 * 5389 * Used by the rt_mutex code to implement priority inheritance 5390 * logic. Call site only calls if the priority of the task changed. 5391 */ 5392 void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task) 5393 { 5394 int prio, oldprio, queued, running, queue_flag = 5395 DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; 5396 const struct sched_class *prev_class; 5397 struct rq_flags rf; 5398 struct rq *rq; 5399 5400 /* XXX used to be waiter->prio, not waiter->task->prio */ 5401 prio = __rt_effective_prio(pi_task, p->normal_prio); 5402 5403 /* 5404 * If nothing changed; bail early. 5405 */ 5406 if (p->pi_top_task == pi_task && prio == p->prio && !dl_prio(prio)) 5407 return; 5408 5409 rq = __task_rq_lock(p, &rf); 5410 update_rq_clock(rq); 5411 /* 5412 * Set under pi_lock && rq->lock, such that the value can be used under 5413 * either lock. 5414 * 5415 * Note that there is loads of tricky to make this pointer cache work 5416 * right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to 5417 * ensure a task is de-boosted (pi_task is set to NULL) before the 5418 * task is allowed to run again (and can exit). This ensures the pointer 5419 * points to a blocked task -- which guarantees the task is present. 5420 */ 5421 p->pi_top_task = pi_task; 5422 5423 /* 5424 * For FIFO/RR we only need to set prio, if that matches we're done. 5425 */ 5426 if (prio == p->prio && !dl_prio(prio)) 5427 goto out_unlock; 5428 5429 /* 5430 * Idle task boosting is a nono in general. There is one 5431 * exception, when PREEMPT_RT and NOHZ is active: 5432 * 5433 * The idle task calls get_next_timer_interrupt() and holds 5434 * the timer wheel base->lock on the CPU and another CPU wants 5435 * to access the timer (probably to cancel it). We can safely 5436 * ignore the boosting request, as the idle CPU runs this code 5437 * with interrupts disabled and will complete the lock 5438 * protected section without being interrupted. So there is no 5439 * real need to boost. 5440 */ 5441 if (unlikely(p == rq->idle)) { 5442 WARN_ON(p != rq->curr); 5443 WARN_ON(p->pi_blocked_on); 5444 goto out_unlock; 5445 } 5446 5447 trace_sched_pi_setprio(p, pi_task); 5448 oldprio = p->prio; 5449 5450 if (oldprio == prio) 5451 queue_flag &= ~DEQUEUE_MOVE; 5452 5453 prev_class = p->sched_class; 5454 queued = task_on_rq_queued(p); 5455 running = task_current(rq, p); 5456 if (queued) 5457 dequeue_task(rq, p, queue_flag); 5458 if (running) 5459 put_prev_task(rq, p); 5460 5461 /* 5462 * Boosting condition are: 5463 * 1. -rt task is running and holds mutex A 5464 * --> -dl task blocks on mutex A 5465 * 5466 * 2. -dl task is running and holds mutex A 5467 * --> -dl task blocks on mutex A and could preempt the 5468 * running task 5469 */ 5470 if (dl_prio(prio)) { 5471 if (!dl_prio(p->normal_prio) || 5472 (pi_task && dl_prio(pi_task->prio) && 5473 dl_entity_preempt(&pi_task->dl, &p->dl))) { 5474 p->dl.pi_se = pi_task->dl.pi_se; 5475 queue_flag |= ENQUEUE_REPLENISH; 5476 } else { 5477 p->dl.pi_se = &p->dl; 5478 } 5479 p->sched_class = &dl_sched_class; 5480 } else if (rt_prio(prio)) { 5481 if (dl_prio(oldprio)) 5482 p->dl.pi_se = &p->dl; 5483 if (oldprio < prio) 5484 queue_flag |= ENQUEUE_HEAD; 5485 p->sched_class = &rt_sched_class; 5486 } else { 5487 if (dl_prio(oldprio)) 5488 p->dl.pi_se = &p->dl; 5489 if (rt_prio(oldprio)) 5490 p->rt.timeout = 0; 5491 p->sched_class = &fair_sched_class; 5492 } 5493 5494 p->prio = prio; 5495 5496 if (queued) 5497 enqueue_task(rq, p, queue_flag); 5498 if (running) 5499 set_next_task(rq, p); 5500 5501 check_class_changed(rq, p, prev_class, oldprio); 5502 out_unlock: 5503 /* Avoid rq from going away on us: */ 5504 preempt_disable(); 5505 5506 rq_unpin_lock(rq, &rf); 5507 __balance_callbacks(rq); 5508 raw_spin_unlock(&rq->lock); 5509 5510 preempt_enable(); 5511 } 5512 #else 5513 static inline int rt_effective_prio(struct task_struct *p, int prio) 5514 { 5515 return prio; 5516 } 5517 #endif 5518 5519 void set_user_nice(struct task_struct *p, long nice) 5520 { 5521 bool queued, running; 5522 int old_prio; 5523 struct rq_flags rf; 5524 struct rq *rq; 5525 5526 if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE) 5527 return; 5528 /* 5529 * We have to be careful, if called from sys_setpriority(), 5530 * the task might be in the middle of scheduling on another CPU. 5531 */ 5532 rq = task_rq_lock(p, &rf); 5533 update_rq_clock(rq); 5534 5535 /* 5536 * The RT priorities are set via sched_setscheduler(), but we still 5537 * allow the 'normal' nice value to be set - but as expected 5538 * it won't have any effect on scheduling until the task is 5539 * SCHED_DEADLINE, SCHED_FIFO or SCHED_RR: 5540 */ 5541 if (task_has_dl_policy(p) || task_has_rt_policy(p)) { 5542 p->static_prio = NICE_TO_PRIO(nice); 5543 goto out_unlock; 5544 } 5545 queued = task_on_rq_queued(p); 5546 running = task_current(rq, p); 5547 if (queued) 5548 dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK); 5549 if (running) 5550 put_prev_task(rq, p); 5551 5552 p->static_prio = NICE_TO_PRIO(nice); 5553 set_load_weight(p, true); 5554 old_prio = p->prio; 5555 p->prio = effective_prio(p); 5556 5557 if (queued) 5558 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); 5559 if (running) 5560 set_next_task(rq, p); 5561 5562 /* 5563 * If the task increased its priority or is running and 5564 * lowered its priority, then reschedule its CPU: 5565 */ 5566 p->sched_class->prio_changed(rq, p, old_prio); 5567 5568 out_unlock: 5569 task_rq_unlock(rq, p, &rf); 5570 } 5571 EXPORT_SYMBOL(set_user_nice); 5572 5573 /* 5574 * can_nice - check if a task can reduce its nice value 5575 * @p: task 5576 * @nice: nice value 5577 */ 5578 int can_nice(const struct task_struct *p, const int nice) 5579 { 5580 /* Convert nice value [19,-20] to rlimit style value [1,40]: */ 5581 int nice_rlim = nice_to_rlimit(nice); 5582 5583 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) || 5584 capable(CAP_SYS_NICE)); 5585 } 5586 5587 #ifdef __ARCH_WANT_SYS_NICE 5588 5589 /* 5590 * sys_nice - change the priority of the current process. 5591 * @increment: priority increment 5592 * 5593 * sys_setpriority is a more generic, but much slower function that 5594 * does similar things. 5595 */ 5596 SYSCALL_DEFINE1(nice, int, increment) 5597 { 5598 long nice, retval; 5599 5600 /* 5601 * Setpriority might change our priority at the same moment. 5602 * We don't have to worry. Conceptually one call occurs first 5603 * and we have a single winner. 5604 */ 5605 increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH); 5606 nice = task_nice(current) + increment; 5607 5608 nice = clamp_val(nice, MIN_NICE, MAX_NICE); 5609 if (increment < 0 && !can_nice(current, nice)) 5610 return -EPERM; 5611 5612 retval = security_task_setnice(current, nice); 5613 if (retval) 5614 return retval; 5615 5616 set_user_nice(current, nice); 5617 return 0; 5618 } 5619 5620 #endif 5621 5622 /** 5623 * task_prio - return the priority value of a given task. 5624 * @p: the task in question. 5625 * 5626 * Return: The priority value as seen by users in /proc. 5627 * RT tasks are offset by -200. Normal tasks are centered 5628 * around 0, value goes from -16 to +15. 5629 */ 5630 int task_prio(const struct task_struct *p) 5631 { 5632 return p->prio - MAX_RT_PRIO; 5633 } 5634 5635 /** 5636 * idle_cpu - is a given CPU idle currently? 5637 * @cpu: the processor in question. 5638 * 5639 * Return: 1 if the CPU is currently idle. 0 otherwise. 5640 */ 5641 int idle_cpu(int cpu) 5642 { 5643 struct rq *rq = cpu_rq(cpu); 5644 5645 if (rq->curr != rq->idle) 5646 return 0; 5647 5648 if (rq->nr_running) 5649 return 0; 5650 5651 #ifdef CONFIG_SMP 5652 if (rq->ttwu_pending) 5653 return 0; 5654 #endif 5655 5656 return 1; 5657 } 5658 5659 /** 5660 * available_idle_cpu - is a given CPU idle for enqueuing work. 5661 * @cpu: the CPU in question. 5662 * 5663 * Return: 1 if the CPU is currently idle. 0 otherwise. 5664 */ 5665 int available_idle_cpu(int cpu) 5666 { 5667 if (!idle_cpu(cpu)) 5668 return 0; 5669 5670 if (vcpu_is_preempted(cpu)) 5671 return 0; 5672 5673 return 1; 5674 } 5675 5676 /** 5677 * idle_task - return the idle task for a given CPU. 5678 * @cpu: the processor in question. 5679 * 5680 * Return: The idle task for the CPU @cpu. 5681 */ 5682 struct task_struct *idle_task(int cpu) 5683 { 5684 return cpu_rq(cpu)->idle; 5685 } 5686 5687 /** 5688 * find_process_by_pid - find a process with a matching PID value. 5689 * @pid: the pid in question. 5690 * 5691 * The task of @pid, if found. %NULL otherwise. 5692 */ 5693 static struct task_struct *find_process_by_pid(pid_t pid) 5694 { 5695 return pid ? find_task_by_vpid(pid) : current; 5696 } 5697 5698 /* 5699 * sched_setparam() passes in -1 for its policy, to let the functions 5700 * it calls know not to change it. 5701 */ 5702 #define SETPARAM_POLICY -1 5703 5704 static void __setscheduler_params(struct task_struct *p, 5705 const struct sched_attr *attr) 5706 { 5707 int policy = attr->sched_policy; 5708 5709 if (policy == SETPARAM_POLICY) 5710 policy = p->policy; 5711 5712 p->policy = policy; 5713 5714 if (dl_policy(policy)) 5715 __setparam_dl(p, attr); 5716 else if (fair_policy(policy)) 5717 p->static_prio = NICE_TO_PRIO(attr->sched_nice); 5718 5719 /* 5720 * __sched_setscheduler() ensures attr->sched_priority == 0 when 5721 * !rt_policy. Always setting this ensures that things like 5722 * getparam()/getattr() don't report silly values for !rt tasks. 5723 */ 5724 p->rt_priority = attr->sched_priority; 5725 p->normal_prio = normal_prio(p); 5726 set_load_weight(p, true); 5727 } 5728 5729 /* Actually do priority change: must hold pi & rq lock. */ 5730 static void __setscheduler(struct rq *rq, struct task_struct *p, 5731 const struct sched_attr *attr, bool keep_boost) 5732 { 5733 /* 5734 * If params can't change scheduling class changes aren't allowed 5735 * either. 5736 */ 5737 if (attr->sched_flags & SCHED_FLAG_KEEP_PARAMS) 5738 return; 5739 5740 __setscheduler_params(p, attr); 5741 5742 /* 5743 * Keep a potential priority boosting if called from 5744 * sched_setscheduler(). 5745 */ 5746 p->prio = normal_prio(p); 5747 if (keep_boost) 5748 p->prio = rt_effective_prio(p, p->prio); 5749 5750 if (dl_prio(p->prio)) 5751 p->sched_class = &dl_sched_class; 5752 else if (rt_prio(p->prio)) 5753 p->sched_class = &rt_sched_class; 5754 else 5755 p->sched_class = &fair_sched_class; 5756 } 5757 5758 /* 5759 * Check the target process has a UID that matches the current process's: 5760 */ 5761 static bool check_same_owner(struct task_struct *p) 5762 { 5763 const struct cred *cred = current_cred(), *pcred; 5764 bool match; 5765 5766 rcu_read_lock(); 5767 pcred = __task_cred(p); 5768 match = (uid_eq(cred->euid, pcred->euid) || 5769 uid_eq(cred->euid, pcred->uid)); 5770 rcu_read_unlock(); 5771 return match; 5772 } 5773 5774 static int __sched_setscheduler(struct task_struct *p, 5775 const struct sched_attr *attr, 5776 bool user, bool pi) 5777 { 5778 int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 : 5779 MAX_RT_PRIO - 1 - attr->sched_priority; 5780 int retval, oldprio, oldpolicy = -1, queued, running; 5781 int new_effective_prio, policy = attr->sched_policy; 5782 const struct sched_class *prev_class; 5783 struct callback_head *head; 5784 struct rq_flags rf; 5785 int reset_on_fork; 5786 int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; 5787 struct rq *rq; 5788 5789 /* The pi code expects interrupts enabled */ 5790 BUG_ON(pi && in_interrupt()); 5791 recheck: 5792 /* Double check policy once rq lock held: */ 5793 if (policy < 0) { 5794 reset_on_fork = p->sched_reset_on_fork; 5795 policy = oldpolicy = p->policy; 5796 } else { 5797 reset_on_fork = !!(attr->sched_flags & SCHED_FLAG_RESET_ON_FORK); 5798 5799 if (!valid_policy(policy)) 5800 return -EINVAL; 5801 } 5802 5803 if (attr->sched_flags & ~(SCHED_FLAG_ALL | SCHED_FLAG_SUGOV)) 5804 return -EINVAL; 5805 5806 /* 5807 * Valid priorities for SCHED_FIFO and SCHED_RR are 5808 * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL, 5809 * SCHED_BATCH and SCHED_IDLE is 0. 5810 */ 5811 if ((p->mm && attr->sched_priority > MAX_USER_RT_PRIO-1) || 5812 (!p->mm && attr->sched_priority > MAX_RT_PRIO-1)) 5813 return -EINVAL; 5814 if ((dl_policy(policy) && !__checkparam_dl(attr)) || 5815 (rt_policy(policy) != (attr->sched_priority != 0))) 5816 return -EINVAL; 5817 5818 /* 5819 * Allow unprivileged RT tasks to decrease priority: 5820 */ 5821 if (user && !capable(CAP_SYS_NICE)) { 5822 if (fair_policy(policy)) { 5823 if (attr->sched_nice < task_nice(p) && 5824 !can_nice(p, attr->sched_nice)) 5825 return -EPERM; 5826 } 5827 5828 if (rt_policy(policy)) { 5829 unsigned long rlim_rtprio = 5830 task_rlimit(p, RLIMIT_RTPRIO); 5831 5832 /* Can't set/change the rt policy: */ 5833 if (policy != p->policy && !rlim_rtprio) 5834 return -EPERM; 5835 5836 /* Can't increase priority: */ 5837 if (attr->sched_priority > p->rt_priority && 5838 attr->sched_priority > rlim_rtprio) 5839 return -EPERM; 5840 } 5841 5842 /* 5843 * Can't set/change SCHED_DEADLINE policy at all for now 5844 * (safest behavior); in the future we would like to allow 5845 * unprivileged DL tasks to increase their relative deadline 5846 * or reduce their runtime (both ways reducing utilization) 5847 */ 5848 if (dl_policy(policy)) 5849 return -EPERM; 5850 5851 /* 5852 * Treat SCHED_IDLE as nice 20. Only allow a switch to 5853 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it. 5854 */ 5855 if (task_has_idle_policy(p) && !idle_policy(policy)) { 5856 if (!can_nice(p, task_nice(p))) 5857 return -EPERM; 5858 } 5859 5860 /* Can't change other user's priorities: */ 5861 if (!check_same_owner(p)) 5862 return -EPERM; 5863 5864 /* Normal users shall not reset the sched_reset_on_fork flag: */ 5865 if (p->sched_reset_on_fork && !reset_on_fork) 5866 return -EPERM; 5867 } 5868 5869 if (user) { 5870 if (attr->sched_flags & SCHED_FLAG_SUGOV) 5871 return -EINVAL; 5872 5873 retval = security_task_setscheduler(p); 5874 if (retval) 5875 return retval; 5876 } 5877 5878 /* Update task specific "requested" clamps */ 5879 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) { 5880 retval = uclamp_validate(p, attr); 5881 if (retval) 5882 return retval; 5883 } 5884 5885 if (pi) 5886 cpuset_read_lock(); 5887 5888 /* 5889 * Make sure no PI-waiters arrive (or leave) while we are 5890 * changing the priority of the task: 5891 * 5892 * To be able to change p->policy safely, the appropriate 5893 * runqueue lock must be held. 5894 */ 5895 rq = task_rq_lock(p, &rf); 5896 update_rq_clock(rq); 5897 5898 /* 5899 * Changing the policy of the stop threads its a very bad idea: 5900 */ 5901 if (p == rq->stop) { 5902 retval = -EINVAL; 5903 goto unlock; 5904 } 5905 5906 /* 5907 * If not changing anything there's no need to proceed further, 5908 * but store a possible modification of reset_on_fork. 5909 */ 5910 if (unlikely(policy == p->policy)) { 5911 if (fair_policy(policy) && attr->sched_nice != task_nice(p)) 5912 goto change; 5913 if (rt_policy(policy) && attr->sched_priority != p->rt_priority) 5914 goto change; 5915 if (dl_policy(policy) && dl_param_changed(p, attr)) 5916 goto change; 5917 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) 5918 goto change; 5919 5920 p->sched_reset_on_fork = reset_on_fork; 5921 retval = 0; 5922 goto unlock; 5923 } 5924 change: 5925 5926 if (user) { 5927 #ifdef CONFIG_RT_GROUP_SCHED 5928 /* 5929 * Do not allow realtime tasks into groups that have no runtime 5930 * assigned. 5931 */ 5932 if (rt_bandwidth_enabled() && rt_policy(policy) && 5933 task_group(p)->rt_bandwidth.rt_runtime == 0 && 5934 !task_group_is_autogroup(task_group(p))) { 5935 retval = -EPERM; 5936 goto unlock; 5937 } 5938 #endif 5939 #ifdef CONFIG_SMP 5940 if (dl_bandwidth_enabled() && dl_policy(policy) && 5941 !(attr->sched_flags & SCHED_FLAG_SUGOV)) { 5942 cpumask_t *span = rq->rd->span; 5943 5944 /* 5945 * Don't allow tasks with an affinity mask smaller than 5946 * the entire root_domain to become SCHED_DEADLINE. We 5947 * will also fail if there's no bandwidth available. 5948 */ 5949 if (!cpumask_subset(span, p->cpus_ptr) || 5950 rq->rd->dl_bw.bw == 0) { 5951 retval = -EPERM; 5952 goto unlock; 5953 } 5954 } 5955 #endif 5956 } 5957 5958 /* Re-check policy now with rq lock held: */ 5959 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { 5960 policy = oldpolicy = -1; 5961 task_rq_unlock(rq, p, &rf); 5962 if (pi) 5963 cpuset_read_unlock(); 5964 goto recheck; 5965 } 5966 5967 /* 5968 * If setscheduling to SCHED_DEADLINE (or changing the parameters 5969 * of a SCHED_DEADLINE task) we need to check if enough bandwidth 5970 * is available. 5971 */ 5972 if ((dl_policy(policy) || dl_task(p)) && sched_dl_overflow(p, policy, attr)) { 5973 retval = -EBUSY; 5974 goto unlock; 5975 } 5976 5977 p->sched_reset_on_fork = reset_on_fork; 5978 oldprio = p->prio; 5979 5980 if (pi) { 5981 /* 5982 * Take priority boosted tasks into account. If the new 5983 * effective priority is unchanged, we just store the new 5984 * normal parameters and do not touch the scheduler class and 5985 * the runqueue. This will be done when the task deboost 5986 * itself. 5987 */ 5988 new_effective_prio = rt_effective_prio(p, newprio); 5989 if (new_effective_prio == oldprio) 5990 queue_flags &= ~DEQUEUE_MOVE; 5991 } 5992 5993 queued = task_on_rq_queued(p); 5994 running = task_current(rq, p); 5995 if (queued) 5996 dequeue_task(rq, p, queue_flags); 5997 if (running) 5998 put_prev_task(rq, p); 5999 6000 prev_class = p->sched_class; 6001 6002 __setscheduler(rq, p, attr, pi); 6003 __setscheduler_uclamp(p, attr); 6004 6005 if (queued) { 6006 /* 6007 * We enqueue to tail when the priority of a task is 6008 * increased (user space view). 6009 */ 6010 if (oldprio < p->prio) 6011 queue_flags |= ENQUEUE_HEAD; 6012 6013 enqueue_task(rq, p, queue_flags); 6014 } 6015 if (running) 6016 set_next_task(rq, p); 6017 6018 check_class_changed(rq, p, prev_class, oldprio); 6019 6020 /* Avoid rq from going away on us: */ 6021 preempt_disable(); 6022 head = splice_balance_callbacks(rq); 6023 task_rq_unlock(rq, p, &rf); 6024 6025 if (pi) { 6026 cpuset_read_unlock(); 6027 rt_mutex_adjust_pi(p); 6028 } 6029 6030 /* Run balance callbacks after we've adjusted the PI chain: */ 6031 balance_callbacks(rq, head); 6032 preempt_enable(); 6033 6034 return 0; 6035 6036 unlock: 6037 task_rq_unlock(rq, p, &rf); 6038 if (pi) 6039 cpuset_read_unlock(); 6040 return retval; 6041 } 6042 6043 static int _sched_setscheduler(struct task_struct *p, int policy, 6044 const struct sched_param *param, bool check) 6045 { 6046 struct sched_attr attr = { 6047 .sched_policy = policy, 6048 .sched_priority = param->sched_priority, 6049 .sched_nice = PRIO_TO_NICE(p->static_prio), 6050 }; 6051 6052 /* Fixup the legacy SCHED_RESET_ON_FORK hack. */ 6053 if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) { 6054 attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; 6055 policy &= ~SCHED_RESET_ON_FORK; 6056 attr.sched_policy = policy; 6057 } 6058 6059 return __sched_setscheduler(p, &attr, check, true); 6060 } 6061 /** 6062 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread. 6063 * @p: the task in question. 6064 * @policy: new policy. 6065 * @param: structure containing the new RT priority. 6066 * 6067 * Use sched_set_fifo(), read its comment. 6068 * 6069 * Return: 0 on success. An error code otherwise. 6070 * 6071 * NOTE that the task may be already dead. 6072 */ 6073 int sched_setscheduler(struct task_struct *p, int policy, 6074 const struct sched_param *param) 6075 { 6076 return _sched_setscheduler(p, policy, param, true); 6077 } 6078 6079 int sched_setattr(struct task_struct *p, const struct sched_attr *attr) 6080 { 6081 return __sched_setscheduler(p, attr, true, true); 6082 } 6083 6084 int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr) 6085 { 6086 return __sched_setscheduler(p, attr, false, true); 6087 } 6088 6089 /** 6090 * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace. 6091 * @p: the task in question. 6092 * @policy: new policy. 6093 * @param: structure containing the new RT priority. 6094 * 6095 * Just like sched_setscheduler, only don't bother checking if the 6096 * current context has permission. For example, this is needed in 6097 * stop_machine(): we create temporary high priority worker threads, 6098 * but our caller might not have that capability. 6099 * 6100 * Return: 0 on success. An error code otherwise. 6101 */ 6102 int sched_setscheduler_nocheck(struct task_struct *p, int policy, 6103 const struct sched_param *param) 6104 { 6105 return _sched_setscheduler(p, policy, param, false); 6106 } 6107 6108 /* 6109 * SCHED_FIFO is a broken scheduler model; that is, it is fundamentally 6110 * incapable of resource management, which is the one thing an OS really should 6111 * be doing. 6112 * 6113 * This is of course the reason it is limited to privileged users only. 6114 * 6115 * Worse still; it is fundamentally impossible to compose static priority 6116 * workloads. You cannot take two correctly working static prio workloads 6117 * and smash them together and still expect them to work. 6118 * 6119 * For this reason 'all' FIFO tasks the kernel creates are basically at: 6120 * 6121 * MAX_RT_PRIO / 2 6122 * 6123 * The administrator _MUST_ configure the system, the kernel simply doesn't 6124 * know enough information to make a sensible choice. 6125 */ 6126 void sched_set_fifo(struct task_struct *p) 6127 { 6128 struct sched_param sp = { .sched_priority = MAX_RT_PRIO / 2 }; 6129 WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0); 6130 } 6131 EXPORT_SYMBOL_GPL(sched_set_fifo); 6132 6133 /* 6134 * For when you don't much care about FIFO, but want to be above SCHED_NORMAL. 6135 */ 6136 void sched_set_fifo_low(struct task_struct *p) 6137 { 6138 struct sched_param sp = { .sched_priority = 1 }; 6139 WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0); 6140 } 6141 EXPORT_SYMBOL_GPL(sched_set_fifo_low); 6142 6143 void sched_set_normal(struct task_struct *p, int nice) 6144 { 6145 struct sched_attr attr = { 6146 .sched_policy = SCHED_NORMAL, 6147 .sched_nice = nice, 6148 }; 6149 WARN_ON_ONCE(sched_setattr_nocheck(p, &attr) != 0); 6150 } 6151 EXPORT_SYMBOL_GPL(sched_set_normal); 6152 6153 static int 6154 do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) 6155 { 6156 struct sched_param lparam; 6157 struct task_struct *p; 6158 int retval; 6159 6160 if (!param || pid < 0) 6161 return -EINVAL; 6162 if (copy_from_user(&lparam, param, sizeof(struct sched_param))) 6163 return -EFAULT; 6164 6165 rcu_read_lock(); 6166 retval = -ESRCH; 6167 p = find_process_by_pid(pid); 6168 if (likely(p)) 6169 get_task_struct(p); 6170 rcu_read_unlock(); 6171 6172 if (likely(p)) { 6173 retval = sched_setscheduler(p, policy, &lparam); 6174 put_task_struct(p); 6175 } 6176 6177 return retval; 6178 } 6179 6180 /* 6181 * Mimics kernel/events/core.c perf_copy_attr(). 6182 */ 6183 static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *attr) 6184 { 6185 u32 size; 6186 int ret; 6187 6188 /* Zero the full structure, so that a short copy will be nice: */ 6189 memset(attr, 0, sizeof(*attr)); 6190 6191 ret = get_user(size, &uattr->size); 6192 if (ret) 6193 return ret; 6194 6195 /* ABI compatibility quirk: */ 6196 if (!size) 6197 size = SCHED_ATTR_SIZE_VER0; 6198 if (size < SCHED_ATTR_SIZE_VER0 || size > PAGE_SIZE) 6199 goto err_size; 6200 6201 ret = copy_struct_from_user(attr, sizeof(*attr), uattr, size); 6202 if (ret) { 6203 if (ret == -E2BIG) 6204 goto err_size; 6205 return ret; 6206 } 6207 6208 if ((attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) && 6209 size < SCHED_ATTR_SIZE_VER1) 6210 return -EINVAL; 6211 6212 /* 6213 * XXX: Do we want to be lenient like existing syscalls; or do we want 6214 * to be strict and return an error on out-of-bounds values? 6215 */ 6216 attr->sched_nice = clamp(attr->sched_nice, MIN_NICE, MAX_NICE); 6217 6218 return 0; 6219 6220 err_size: 6221 put_user(sizeof(*attr), &uattr->size); 6222 return -E2BIG; 6223 } 6224 6225 /** 6226 * sys_sched_setscheduler - set/change the scheduler policy and RT priority 6227 * @pid: the pid in question. 6228 * @policy: new policy. 6229 * @param: structure containing the new RT priority. 6230 * 6231 * Return: 0 on success. An error code otherwise. 6232 */ 6233 SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param) 6234 { 6235 if (policy < 0) 6236 return -EINVAL; 6237 6238 return do_sched_setscheduler(pid, policy, param); 6239 } 6240 6241 /** 6242 * sys_sched_setparam - set/change the RT priority of a thread 6243 * @pid: the pid in question. 6244 * @param: structure containing the new RT priority. 6245 * 6246 * Return: 0 on success. An error code otherwise. 6247 */ 6248 SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) 6249 { 6250 return do_sched_setscheduler(pid, SETPARAM_POLICY, param); 6251 } 6252 6253 /** 6254 * sys_sched_setattr - same as above, but with extended sched_attr 6255 * @pid: the pid in question. 6256 * @uattr: structure containing the extended parameters. 6257 * @flags: for future extension. 6258 */ 6259 SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr, 6260 unsigned int, flags) 6261 { 6262 struct sched_attr attr; 6263 struct task_struct *p; 6264 int retval; 6265 6266 if (!uattr || pid < 0 || flags) 6267 return -EINVAL; 6268 6269 retval = sched_copy_attr(uattr, &attr); 6270 if (retval) 6271 return retval; 6272 6273 if ((int)attr.sched_policy < 0) 6274 return -EINVAL; 6275 if (attr.sched_flags & SCHED_FLAG_KEEP_POLICY) 6276 attr.sched_policy = SETPARAM_POLICY; 6277 6278 rcu_read_lock(); 6279 retval = -ESRCH; 6280 p = find_process_by_pid(pid); 6281 if (likely(p)) 6282 get_task_struct(p); 6283 rcu_read_unlock(); 6284 6285 if (likely(p)) { 6286 retval = sched_setattr(p, &attr); 6287 put_task_struct(p); 6288 } 6289 6290 return retval; 6291 } 6292 6293 /** 6294 * sys_sched_getscheduler - get the policy (scheduling class) of a thread 6295 * @pid: the pid in question. 6296 * 6297 * Return: On success, the policy of the thread. Otherwise, a negative error 6298 * code. 6299 */ 6300 SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) 6301 { 6302 struct task_struct *p; 6303 int retval; 6304 6305 if (pid < 0) 6306 return -EINVAL; 6307 6308 retval = -ESRCH; 6309 rcu_read_lock(); 6310 p = find_process_by_pid(pid); 6311 if (p) { 6312 retval = security_task_getscheduler(p); 6313 if (!retval) 6314 retval = p->policy 6315 | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0); 6316 } 6317 rcu_read_unlock(); 6318 return retval; 6319 } 6320 6321 /** 6322 * sys_sched_getparam - get the RT priority of a thread 6323 * @pid: the pid in question. 6324 * @param: structure containing the RT priority. 6325 * 6326 * Return: On success, 0 and the RT priority is in @param. Otherwise, an error 6327 * code. 6328 */ 6329 SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) 6330 { 6331 struct sched_param lp = { .sched_priority = 0 }; 6332 struct task_struct *p; 6333 int retval; 6334 6335 if (!param || pid < 0) 6336 return -EINVAL; 6337 6338 rcu_read_lock(); 6339 p = find_process_by_pid(pid); 6340 retval = -ESRCH; 6341 if (!p) 6342 goto out_unlock; 6343 6344 retval = security_task_getscheduler(p); 6345 if (retval) 6346 goto out_unlock; 6347 6348 if (task_has_rt_policy(p)) 6349 lp.sched_priority = p->rt_priority; 6350 rcu_read_unlock(); 6351 6352 /* 6353 * This one might sleep, we cannot do it with a spinlock held ... 6354 */ 6355 retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0; 6356 6357 return retval; 6358 6359 out_unlock: 6360 rcu_read_unlock(); 6361 return retval; 6362 } 6363 6364 /* 6365 * Copy the kernel size attribute structure (which might be larger 6366 * than what user-space knows about) to user-space. 6367 * 6368 * Note that all cases are valid: user-space buffer can be larger or 6369 * smaller than the kernel-space buffer. The usual case is that both 6370 * have the same size. 6371 */ 6372 static int 6373 sched_attr_copy_to_user(struct sched_attr __user *uattr, 6374 struct sched_attr *kattr, 6375 unsigned int usize) 6376 { 6377 unsigned int ksize = sizeof(*kattr); 6378 6379 if (!access_ok(uattr, usize)) 6380 return -EFAULT; 6381 6382 /* 6383 * sched_getattr() ABI forwards and backwards compatibility: 6384 * 6385 * If usize == ksize then we just copy everything to user-space and all is good. 6386 * 6387 * If usize < ksize then we only copy as much as user-space has space for, 6388 * this keeps ABI compatibility as well. We skip the rest. 6389 * 6390 * If usize > ksize then user-space is using a newer version of the ABI, 6391 * which part the kernel doesn't know about. Just ignore it - tooling can 6392 * detect the kernel's knowledge of attributes from the attr->size value 6393 * which is set to ksize in this case. 6394 */ 6395 kattr->size = min(usize, ksize); 6396 6397 if (copy_to_user(uattr, kattr, kattr->size)) 6398 return -EFAULT; 6399 6400 return 0; 6401 } 6402 6403 /** 6404 * sys_sched_getattr - similar to sched_getparam, but with sched_attr 6405 * @pid: the pid in question. 6406 * @uattr: structure containing the extended parameters. 6407 * @usize: sizeof(attr) for fwd/bwd comp. 6408 * @flags: for future extension. 6409 */ 6410 SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, 6411 unsigned int, usize, unsigned int, flags) 6412 { 6413 struct sched_attr kattr = { }; 6414 struct task_struct *p; 6415 int retval; 6416 6417 if (!uattr || pid < 0 || usize > PAGE_SIZE || 6418 usize < SCHED_ATTR_SIZE_VER0 || flags) 6419 return -EINVAL; 6420 6421 rcu_read_lock(); 6422 p = find_process_by_pid(pid); 6423 retval = -ESRCH; 6424 if (!p) 6425 goto out_unlock; 6426 6427 retval = security_task_getscheduler(p); 6428 if (retval) 6429 goto out_unlock; 6430 6431 kattr.sched_policy = p->policy; 6432 if (p->sched_reset_on_fork) 6433 kattr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; 6434 if (task_has_dl_policy(p)) 6435 __getparam_dl(p, &kattr); 6436 else if (task_has_rt_policy(p)) 6437 kattr.sched_priority = p->rt_priority; 6438 else 6439 kattr.sched_nice = task_nice(p); 6440 6441 #ifdef CONFIG_UCLAMP_TASK 6442 /* 6443 * This could race with another potential updater, but this is fine 6444 * because it'll correctly read the old or the new value. We don't need 6445 * to guarantee who wins the race as long as it doesn't return garbage. 6446 */ 6447 kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value; 6448 kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value; 6449 #endif 6450 6451 rcu_read_unlock(); 6452 6453 return sched_attr_copy_to_user(uattr, &kattr, usize); 6454 6455 out_unlock: 6456 rcu_read_unlock(); 6457 return retval; 6458 } 6459 6460 long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) 6461 { 6462 cpumask_var_t cpus_allowed, new_mask; 6463 struct task_struct *p; 6464 int retval; 6465 6466 rcu_read_lock(); 6467 6468 p = find_process_by_pid(pid); 6469 if (!p) { 6470 rcu_read_unlock(); 6471 return -ESRCH; 6472 } 6473 6474 /* Prevent p going away */ 6475 get_task_struct(p); 6476 rcu_read_unlock(); 6477 6478 if (p->flags & PF_NO_SETAFFINITY) { 6479 retval = -EINVAL; 6480 goto out_put_task; 6481 } 6482 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) { 6483 retval = -ENOMEM; 6484 goto out_put_task; 6485 } 6486 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) { 6487 retval = -ENOMEM; 6488 goto out_free_cpus_allowed; 6489 } 6490 retval = -EPERM; 6491 if (!check_same_owner(p)) { 6492 rcu_read_lock(); 6493 if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) { 6494 rcu_read_unlock(); 6495 goto out_free_new_mask; 6496 } 6497 rcu_read_unlock(); 6498 } 6499 6500 retval = security_task_setscheduler(p); 6501 if (retval) 6502 goto out_free_new_mask; 6503 6504 6505 cpuset_cpus_allowed(p, cpus_allowed); 6506 cpumask_and(new_mask, in_mask, cpus_allowed); 6507 6508 /* 6509 * Since bandwidth control happens on root_domain basis, 6510 * if admission test is enabled, we only admit -deadline 6511 * tasks allowed to run on all the CPUs in the task's 6512 * root_domain. 6513 */ 6514 #ifdef CONFIG_SMP 6515 if (task_has_dl_policy(p) && dl_bandwidth_enabled()) { 6516 rcu_read_lock(); 6517 if (!cpumask_subset(task_rq(p)->rd->span, new_mask)) { 6518 retval = -EBUSY; 6519 rcu_read_unlock(); 6520 goto out_free_new_mask; 6521 } 6522 rcu_read_unlock(); 6523 } 6524 #endif 6525 again: 6526 retval = __set_cpus_allowed_ptr(p, new_mask, SCA_CHECK); 6527 6528 if (!retval) { 6529 cpuset_cpus_allowed(p, cpus_allowed); 6530 if (!cpumask_subset(new_mask, cpus_allowed)) { 6531 /* 6532 * We must have raced with a concurrent cpuset 6533 * update. Just reset the cpus_allowed to the 6534 * cpuset's cpus_allowed 6535 */ 6536 cpumask_copy(new_mask, cpus_allowed); 6537 goto again; 6538 } 6539 } 6540 out_free_new_mask: 6541 free_cpumask_var(new_mask); 6542 out_free_cpus_allowed: 6543 free_cpumask_var(cpus_allowed); 6544 out_put_task: 6545 put_task_struct(p); 6546 return retval; 6547 } 6548 6549 static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, 6550 struct cpumask *new_mask) 6551 { 6552 if (len < cpumask_size()) 6553 cpumask_clear(new_mask); 6554 else if (len > cpumask_size()) 6555 len = cpumask_size(); 6556 6557 return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0; 6558 } 6559 6560 /** 6561 * sys_sched_setaffinity - set the CPU affinity of a process 6562 * @pid: pid of the process 6563 * @len: length in bytes of the bitmask pointed to by user_mask_ptr 6564 * @user_mask_ptr: user-space pointer to the new CPU mask 6565 * 6566 * Return: 0 on success. An error code otherwise. 6567 */ 6568 SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len, 6569 unsigned long __user *, user_mask_ptr) 6570 { 6571 cpumask_var_t new_mask; 6572 int retval; 6573 6574 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) 6575 return -ENOMEM; 6576 6577 retval = get_user_cpu_mask(user_mask_ptr, len, new_mask); 6578 if (retval == 0) 6579 retval = sched_setaffinity(pid, new_mask); 6580 free_cpumask_var(new_mask); 6581 return retval; 6582 } 6583 6584 long sched_getaffinity(pid_t pid, struct cpumask *mask) 6585 { 6586 struct task_struct *p; 6587 unsigned long flags; 6588 int retval; 6589 6590 rcu_read_lock(); 6591 6592 retval = -ESRCH; 6593 p = find_process_by_pid(pid); 6594 if (!p) 6595 goto out_unlock; 6596 6597 retval = security_task_getscheduler(p); 6598 if (retval) 6599 goto out_unlock; 6600 6601 raw_spin_lock_irqsave(&p->pi_lock, flags); 6602 cpumask_and(mask, &p->cpus_mask, cpu_active_mask); 6603 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 6604 6605 out_unlock: 6606 rcu_read_unlock(); 6607 6608 return retval; 6609 } 6610 6611 /** 6612 * sys_sched_getaffinity - get the CPU affinity of a process 6613 * @pid: pid of the process 6614 * @len: length in bytes of the bitmask pointed to by user_mask_ptr 6615 * @user_mask_ptr: user-space pointer to hold the current CPU mask 6616 * 6617 * Return: size of CPU mask copied to user_mask_ptr on success. An 6618 * error code otherwise. 6619 */ 6620 SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, 6621 unsigned long __user *, user_mask_ptr) 6622 { 6623 int ret; 6624 cpumask_var_t mask; 6625 6626 if ((len * BITS_PER_BYTE) < nr_cpu_ids) 6627 return -EINVAL; 6628 if (len & (sizeof(unsigned long)-1)) 6629 return -EINVAL; 6630 6631 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) 6632 return -ENOMEM; 6633 6634 ret = sched_getaffinity(pid, mask); 6635 if (ret == 0) { 6636 unsigned int retlen = min(len, cpumask_size()); 6637 6638 if (copy_to_user(user_mask_ptr, mask, retlen)) 6639 ret = -EFAULT; 6640 else 6641 ret = retlen; 6642 } 6643 free_cpumask_var(mask); 6644 6645 return ret; 6646 } 6647 6648 static void do_sched_yield(void) 6649 { 6650 struct rq_flags rf; 6651 struct rq *rq; 6652 6653 rq = this_rq_lock_irq(&rf); 6654 6655 schedstat_inc(rq->yld_count); 6656 current->sched_class->yield_task(rq); 6657 6658 preempt_disable(); 6659 rq_unlock_irq(rq, &rf); 6660 sched_preempt_enable_no_resched(); 6661 6662 schedule(); 6663 } 6664 6665 /** 6666 * sys_sched_yield - yield the current processor to other threads. 6667 * 6668 * This function yields the current CPU to other tasks. If there are no 6669 * other threads running on this CPU then this function will return. 6670 * 6671 * Return: 0. 6672 */ 6673 SYSCALL_DEFINE0(sched_yield) 6674 { 6675 do_sched_yield(); 6676 return 0; 6677 } 6678 6679 #ifndef CONFIG_PREEMPTION 6680 int __sched _cond_resched(void) 6681 { 6682 if (should_resched(0)) { 6683 preempt_schedule_common(); 6684 return 1; 6685 } 6686 rcu_all_qs(); 6687 return 0; 6688 } 6689 EXPORT_SYMBOL(_cond_resched); 6690 #endif 6691 6692 /* 6693 * __cond_resched_lock() - if a reschedule is pending, drop the given lock, 6694 * call schedule, and on return reacquire the lock. 6695 * 6696 * This works OK both with and without CONFIG_PREEMPTION. We do strange low-level 6697 * operations here to prevent schedule() from being called twice (once via 6698 * spin_unlock(), once by hand). 6699 */ 6700 int __cond_resched_lock(spinlock_t *lock) 6701 { 6702 int resched = should_resched(PREEMPT_LOCK_OFFSET); 6703 int ret = 0; 6704 6705 lockdep_assert_held(lock); 6706 6707 if (spin_needbreak(lock) || resched) { 6708 spin_unlock(lock); 6709 if (resched) 6710 preempt_schedule_common(); 6711 else 6712 cpu_relax(); 6713 ret = 1; 6714 spin_lock(lock); 6715 } 6716 return ret; 6717 } 6718 EXPORT_SYMBOL(__cond_resched_lock); 6719 6720 /** 6721 * yield - yield the current processor to other threads. 6722 * 6723 * Do not ever use this function, there's a 99% chance you're doing it wrong. 6724 * 6725 * The scheduler is at all times free to pick the calling task as the most 6726 * eligible task to run, if removing the yield() call from your code breaks 6727 * it, it's already broken. 6728 * 6729 * Typical broken usage is: 6730 * 6731 * while (!event) 6732 * yield(); 6733 * 6734 * where one assumes that yield() will let 'the other' process run that will 6735 * make event true. If the current task is a SCHED_FIFO task that will never 6736 * happen. Never use yield() as a progress guarantee!! 6737 * 6738 * If you want to use yield() to wait for something, use wait_event(). 6739 * If you want to use yield() to be 'nice' for others, use cond_resched(). 6740 * If you still want to use yield(), do not! 6741 */ 6742 void __sched yield(void) 6743 { 6744 set_current_state(TASK_RUNNING); 6745 do_sched_yield(); 6746 } 6747 EXPORT_SYMBOL(yield); 6748 6749 /** 6750 * yield_to - yield the current processor to another thread in 6751 * your thread group, or accelerate that thread toward the 6752 * processor it's on. 6753 * @p: target task 6754 * @preempt: whether task preemption is allowed or not 6755 * 6756 * It's the caller's job to ensure that the target task struct 6757 * can't go away on us before we can do any checks. 6758 * 6759 * Return: 6760 * true (>0) if we indeed boosted the target task. 6761 * false (0) if we failed to boost the target. 6762 * -ESRCH if there's no task to yield to. 6763 */ 6764 int __sched yield_to(struct task_struct *p, bool preempt) 6765 { 6766 struct task_struct *curr = current; 6767 struct rq *rq, *p_rq; 6768 unsigned long flags; 6769 int yielded = 0; 6770 6771 local_irq_save(flags); 6772 rq = this_rq(); 6773 6774 again: 6775 p_rq = task_rq(p); 6776 /* 6777 * If we're the only runnable task on the rq and target rq also 6778 * has only one task, there's absolutely no point in yielding. 6779 */ 6780 if (rq->nr_running == 1 && p_rq->nr_running == 1) { 6781 yielded = -ESRCH; 6782 goto out_irq; 6783 } 6784 6785 double_rq_lock(rq, p_rq); 6786 if (task_rq(p) != p_rq) { 6787 double_rq_unlock(rq, p_rq); 6788 goto again; 6789 } 6790 6791 if (!curr->sched_class->yield_to_task) 6792 goto out_unlock; 6793 6794 if (curr->sched_class != p->sched_class) 6795 goto out_unlock; 6796 6797 if (task_running(p_rq, p) || p->state) 6798 goto out_unlock; 6799 6800 yielded = curr->sched_class->yield_to_task(rq, p); 6801 if (yielded) { 6802 schedstat_inc(rq->yld_count); 6803 /* 6804 * Make p's CPU reschedule; pick_next_entity takes care of 6805 * fairness. 6806 */ 6807 if (preempt && rq != p_rq) 6808 resched_curr(p_rq); 6809 } 6810 6811 out_unlock: 6812 double_rq_unlock(rq, p_rq); 6813 out_irq: 6814 local_irq_restore(flags); 6815 6816 if (yielded > 0) 6817 schedule(); 6818 6819 return yielded; 6820 } 6821 EXPORT_SYMBOL_GPL(yield_to); 6822 6823 int io_schedule_prepare(void) 6824 { 6825 int old_iowait = current->in_iowait; 6826 6827 current->in_iowait = 1; 6828 blk_schedule_flush_plug(current); 6829 6830 return old_iowait; 6831 } 6832 6833 void io_schedule_finish(int token) 6834 { 6835 current->in_iowait = token; 6836 } 6837 6838 /* 6839 * This task is about to go to sleep on IO. Increment rq->nr_iowait so 6840 * that process accounting knows that this is a task in IO wait state. 6841 */ 6842 long __sched io_schedule_timeout(long timeout) 6843 { 6844 int token; 6845 long ret; 6846 6847 token = io_schedule_prepare(); 6848 ret = schedule_timeout(timeout); 6849 io_schedule_finish(token); 6850 6851 return ret; 6852 } 6853 EXPORT_SYMBOL(io_schedule_timeout); 6854 6855 void __sched io_schedule(void) 6856 { 6857 int token; 6858 6859 token = io_schedule_prepare(); 6860 schedule(); 6861 io_schedule_finish(token); 6862 } 6863 EXPORT_SYMBOL(io_schedule); 6864 6865 /** 6866 * sys_sched_get_priority_max - return maximum RT priority. 6867 * @policy: scheduling class. 6868 * 6869 * Return: On success, this syscall returns the maximum 6870 * rt_priority that can be used by a given scheduling class. 6871 * On failure, a negative error code is returned. 6872 */ 6873 SYSCALL_DEFINE1(sched_get_priority_max, int, policy) 6874 { 6875 int ret = -EINVAL; 6876 6877 switch (policy) { 6878 case SCHED_FIFO: 6879 case SCHED_RR: 6880 ret = MAX_USER_RT_PRIO-1; 6881 break; 6882 case SCHED_DEADLINE: 6883 case SCHED_NORMAL: 6884 case SCHED_BATCH: 6885 case SCHED_IDLE: 6886 ret = 0; 6887 break; 6888 } 6889 return ret; 6890 } 6891 6892 /** 6893 * sys_sched_get_priority_min - return minimum RT priority. 6894 * @policy: scheduling class. 6895 * 6896 * Return: On success, this syscall returns the minimum 6897 * rt_priority that can be used by a given scheduling class. 6898 * On failure, a negative error code is returned. 6899 */ 6900 SYSCALL_DEFINE1(sched_get_priority_min, int, policy) 6901 { 6902 int ret = -EINVAL; 6903 6904 switch (policy) { 6905 case SCHED_FIFO: 6906 case SCHED_RR: 6907 ret = 1; 6908 break; 6909 case SCHED_DEADLINE: 6910 case SCHED_NORMAL: 6911 case SCHED_BATCH: 6912 case SCHED_IDLE: 6913 ret = 0; 6914 } 6915 return ret; 6916 } 6917 6918 static int sched_rr_get_interval(pid_t pid, struct timespec64 *t) 6919 { 6920 struct task_struct *p; 6921 unsigned int time_slice; 6922 struct rq_flags rf; 6923 struct rq *rq; 6924 int retval; 6925 6926 if (pid < 0) 6927 return -EINVAL; 6928 6929 retval = -ESRCH; 6930 rcu_read_lock(); 6931 p = find_process_by_pid(pid); 6932 if (!p) 6933 goto out_unlock; 6934 6935 retval = security_task_getscheduler(p); 6936 if (retval) 6937 goto out_unlock; 6938 6939 rq = task_rq_lock(p, &rf); 6940 time_slice = 0; 6941 if (p->sched_class->get_rr_interval) 6942 time_slice = p->sched_class->get_rr_interval(rq, p); 6943 task_rq_unlock(rq, p, &rf); 6944 6945 rcu_read_unlock(); 6946 jiffies_to_timespec64(time_slice, t); 6947 return 0; 6948 6949 out_unlock: 6950 rcu_read_unlock(); 6951 return retval; 6952 } 6953 6954 /** 6955 * sys_sched_rr_get_interval - return the default timeslice of a process. 6956 * @pid: pid of the process. 6957 * @interval: userspace pointer to the timeslice value. 6958 * 6959 * this syscall writes the default timeslice value of a given process 6960 * into the user-space timespec buffer. A value of '0' means infinity. 6961 * 6962 * Return: On success, 0 and the timeslice is in @interval. Otherwise, 6963 * an error code. 6964 */ 6965 SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, 6966 struct __kernel_timespec __user *, interval) 6967 { 6968 struct timespec64 t; 6969 int retval = sched_rr_get_interval(pid, &t); 6970 6971 if (retval == 0) 6972 retval = put_timespec64(&t, interval); 6973 6974 return retval; 6975 } 6976 6977 #ifdef CONFIG_COMPAT_32BIT_TIME 6978 SYSCALL_DEFINE2(sched_rr_get_interval_time32, pid_t, pid, 6979 struct old_timespec32 __user *, interval) 6980 { 6981 struct timespec64 t; 6982 int retval = sched_rr_get_interval(pid, &t); 6983 6984 if (retval == 0) 6985 retval = put_old_timespec32(&t, interval); 6986 return retval; 6987 } 6988 #endif 6989 6990 void sched_show_task(struct task_struct *p) 6991 { 6992 unsigned long free = 0; 6993 int ppid; 6994 6995 if (!try_get_task_stack(p)) 6996 return; 6997 6998 pr_info("task:%-15.15s state:%c", p->comm, task_state_to_char(p)); 6999 7000 if (p->state == TASK_RUNNING) 7001 pr_cont(" running task "); 7002 #ifdef CONFIG_DEBUG_STACK_USAGE 7003 free = stack_not_used(p); 7004 #endif 7005 ppid = 0; 7006 rcu_read_lock(); 7007 if (pid_alive(p)) 7008 ppid = task_pid_nr(rcu_dereference(p->real_parent)); 7009 rcu_read_unlock(); 7010 pr_cont(" stack:%5lu pid:%5d ppid:%6d flags:0x%08lx\n", 7011 free, task_pid_nr(p), ppid, 7012 (unsigned long)task_thread_info(p)->flags); 7013 7014 print_worker_info(KERN_INFO, p); 7015 print_stop_info(KERN_INFO, p); 7016 show_stack(p, NULL, KERN_INFO); 7017 put_task_stack(p); 7018 } 7019 EXPORT_SYMBOL_GPL(sched_show_task); 7020 7021 static inline bool 7022 state_filter_match(unsigned long state_filter, struct task_struct *p) 7023 { 7024 /* no filter, everything matches */ 7025 if (!state_filter) 7026 return true; 7027 7028 /* filter, but doesn't match */ 7029 if (!(p->state & state_filter)) 7030 return false; 7031 7032 /* 7033 * When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows 7034 * TASK_KILLABLE). 7035 */ 7036 if (state_filter == TASK_UNINTERRUPTIBLE && p->state == TASK_IDLE) 7037 return false; 7038 7039 return true; 7040 } 7041 7042 7043 void show_state_filter(unsigned long state_filter) 7044 { 7045 struct task_struct *g, *p; 7046 7047 rcu_read_lock(); 7048 for_each_process_thread(g, p) { 7049 /* 7050 * reset the NMI-timeout, listing all files on a slow 7051 * console might take a lot of time: 7052 * Also, reset softlockup watchdogs on all CPUs, because 7053 * another CPU might be blocked waiting for us to process 7054 * an IPI. 7055 */ 7056 touch_nmi_watchdog(); 7057 touch_all_softlockup_watchdogs(); 7058 if (state_filter_match(state_filter, p)) 7059 sched_show_task(p); 7060 } 7061 7062 #ifdef CONFIG_SCHED_DEBUG 7063 if (!state_filter) 7064 sysrq_sched_debug_show(); 7065 #endif 7066 rcu_read_unlock(); 7067 /* 7068 * Only show locks if all tasks are dumped: 7069 */ 7070 if (!state_filter) 7071 debug_show_all_locks(); 7072 } 7073 7074 /** 7075 * init_idle - set up an idle thread for a given CPU 7076 * @idle: task in question 7077 * @cpu: CPU the idle task belongs to 7078 * 7079 * NOTE: this function does not set the idle thread's NEED_RESCHED 7080 * flag, to make booting more robust. 7081 */ 7082 void init_idle(struct task_struct *idle, int cpu) 7083 { 7084 struct rq *rq = cpu_rq(cpu); 7085 unsigned long flags; 7086 7087 __sched_fork(0, idle); 7088 7089 raw_spin_lock_irqsave(&idle->pi_lock, flags); 7090 raw_spin_lock(&rq->lock); 7091 7092 idle->state = TASK_RUNNING; 7093 idle->se.exec_start = sched_clock(); 7094 idle->flags |= PF_IDLE; 7095 7096 scs_task_reset(idle); 7097 kasan_unpoison_task_stack(idle); 7098 7099 #ifdef CONFIG_SMP 7100 /* 7101 * It's possible that init_idle() gets called multiple times on a task, 7102 * in that case do_set_cpus_allowed() will not do the right thing. 7103 * 7104 * And since this is boot we can forgo the serialization. 7105 */ 7106 set_cpus_allowed_common(idle, cpumask_of(cpu), 0); 7107 #endif 7108 /* 7109 * We're having a chicken and egg problem, even though we are 7110 * holding rq->lock, the CPU isn't yet set to this CPU so the 7111 * lockdep check in task_group() will fail. 7112 * 7113 * Similar case to sched_fork(). / Alternatively we could 7114 * use task_rq_lock() here and obtain the other rq->lock. 7115 * 7116 * Silence PROVE_RCU 7117 */ 7118 rcu_read_lock(); 7119 __set_task_cpu(idle, cpu); 7120 rcu_read_unlock(); 7121 7122 rq->idle = idle; 7123 rcu_assign_pointer(rq->curr, idle); 7124 idle->on_rq = TASK_ON_RQ_QUEUED; 7125 #ifdef CONFIG_SMP 7126 idle->on_cpu = 1; 7127 #endif 7128 raw_spin_unlock(&rq->lock); 7129 raw_spin_unlock_irqrestore(&idle->pi_lock, flags); 7130 7131 /* Set the preempt count _outside_ the spinlocks! */ 7132 init_idle_preempt_count(idle, cpu); 7133 7134 /* 7135 * The idle tasks have their own, simple scheduling class: 7136 */ 7137 idle->sched_class = &idle_sched_class; 7138 ftrace_graph_init_idle_task(idle, cpu); 7139 vtime_init_idle(idle, cpu); 7140 #ifdef CONFIG_SMP 7141 sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu); 7142 #endif 7143 } 7144 7145 #ifdef CONFIG_SMP 7146 7147 int cpuset_cpumask_can_shrink(const struct cpumask *cur, 7148 const struct cpumask *trial) 7149 { 7150 int ret = 1; 7151 7152 if (!cpumask_weight(cur)) 7153 return ret; 7154 7155 ret = dl_cpuset_cpumask_can_shrink(cur, trial); 7156 7157 return ret; 7158 } 7159 7160 int task_can_attach(struct task_struct *p, 7161 const struct cpumask *cs_cpus_allowed) 7162 { 7163 int ret = 0; 7164 7165 /* 7166 * Kthreads which disallow setaffinity shouldn't be moved 7167 * to a new cpuset; we don't want to change their CPU 7168 * affinity and isolating such threads by their set of 7169 * allowed nodes is unnecessary. Thus, cpusets are not 7170 * applicable for such threads. This prevents checking for 7171 * success of set_cpus_allowed_ptr() on all attached tasks 7172 * before cpus_mask may be changed. 7173 */ 7174 if (p->flags & PF_NO_SETAFFINITY) { 7175 ret = -EINVAL; 7176 goto out; 7177 } 7178 7179 if (dl_task(p) && !cpumask_intersects(task_rq(p)->rd->span, 7180 cs_cpus_allowed)) 7181 ret = dl_task_can_attach(p, cs_cpus_allowed); 7182 7183 out: 7184 return ret; 7185 } 7186 7187 bool sched_smp_initialized __read_mostly; 7188 7189 #ifdef CONFIG_NUMA_BALANCING 7190 /* Migrate current task p to target_cpu */ 7191 int migrate_task_to(struct task_struct *p, int target_cpu) 7192 { 7193 struct migration_arg arg = { p, target_cpu }; 7194 int curr_cpu = task_cpu(p); 7195 7196 if (curr_cpu == target_cpu) 7197 return 0; 7198 7199 if (!cpumask_test_cpu(target_cpu, p->cpus_ptr)) 7200 return -EINVAL; 7201 7202 /* TODO: This is not properly updating schedstats */ 7203 7204 trace_sched_move_numa(p, curr_cpu, target_cpu); 7205 return stop_one_cpu(curr_cpu, migration_cpu_stop, &arg); 7206 } 7207 7208 /* 7209 * Requeue a task on a given node and accurately track the number of NUMA 7210 * tasks on the runqueues 7211 */ 7212 void sched_setnuma(struct task_struct *p, int nid) 7213 { 7214 bool queued, running; 7215 struct rq_flags rf; 7216 struct rq *rq; 7217 7218 rq = task_rq_lock(p, &rf); 7219 queued = task_on_rq_queued(p); 7220 running = task_current(rq, p); 7221 7222 if (queued) 7223 dequeue_task(rq, p, DEQUEUE_SAVE); 7224 if (running) 7225 put_prev_task(rq, p); 7226 7227 p->numa_preferred_nid = nid; 7228 7229 if (queued) 7230 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); 7231 if (running) 7232 set_next_task(rq, p); 7233 task_rq_unlock(rq, p, &rf); 7234 } 7235 #endif /* CONFIG_NUMA_BALANCING */ 7236 7237 #ifdef CONFIG_HOTPLUG_CPU 7238 /* 7239 * Ensure that the idle task is using init_mm right before its CPU goes 7240 * offline. 7241 */ 7242 void idle_task_exit(void) 7243 { 7244 struct mm_struct *mm = current->active_mm; 7245 7246 BUG_ON(cpu_online(smp_processor_id())); 7247 BUG_ON(current != this_rq()->idle); 7248 7249 if (mm != &init_mm) { 7250 switch_mm(mm, &init_mm, current); 7251 finish_arch_post_lock_switch(); 7252 } 7253 7254 /* finish_cpu(), as ran on the BP, will clean up the active_mm state */ 7255 } 7256 7257 static int __balance_push_cpu_stop(void *arg) 7258 { 7259 struct task_struct *p = arg; 7260 struct rq *rq = this_rq(); 7261 struct rq_flags rf; 7262 int cpu; 7263 7264 raw_spin_lock_irq(&p->pi_lock); 7265 rq_lock(rq, &rf); 7266 7267 update_rq_clock(rq); 7268 7269 if (task_rq(p) == rq && task_on_rq_queued(p)) { 7270 cpu = select_fallback_rq(rq->cpu, p); 7271 rq = __migrate_task(rq, &rf, p, cpu); 7272 } 7273 7274 rq_unlock(rq, &rf); 7275 raw_spin_unlock_irq(&p->pi_lock); 7276 7277 put_task_struct(p); 7278 7279 return 0; 7280 } 7281 7282 static DEFINE_PER_CPU(struct cpu_stop_work, push_work); 7283 7284 /* 7285 * Ensure we only run per-cpu kthreads once the CPU goes !active. 7286 */ 7287 static void balance_push(struct rq *rq) 7288 { 7289 struct task_struct *push_task = rq->curr; 7290 7291 lockdep_assert_held(&rq->lock); 7292 SCHED_WARN_ON(rq->cpu != smp_processor_id()); 7293 /* 7294 * Ensure the thing is persistent until balance_push_set(.on = false); 7295 */ 7296 rq->balance_callback = &balance_push_callback; 7297 7298 /* 7299 * Both the cpu-hotplug and stop task are in this case and are 7300 * required to complete the hotplug process. 7301 * 7302 * XXX: the idle task does not match kthread_is_per_cpu() due to 7303 * histerical raisins. 7304 */ 7305 if (rq->idle == push_task || 7306 ((push_task->flags & PF_KTHREAD) && kthread_is_per_cpu(push_task)) || 7307 is_migration_disabled(push_task)) { 7308 7309 /* 7310 * If this is the idle task on the outgoing CPU try to wake 7311 * up the hotplug control thread which might wait for the 7312 * last task to vanish. The rcuwait_active() check is 7313 * accurate here because the waiter is pinned on this CPU 7314 * and can't obviously be running in parallel. 7315 * 7316 * On RT kernels this also has to check whether there are 7317 * pinned and scheduled out tasks on the runqueue. They 7318 * need to leave the migrate disabled section first. 7319 */ 7320 if (!rq->nr_running && !rq_has_pinned_tasks(rq) && 7321 rcuwait_active(&rq->hotplug_wait)) { 7322 raw_spin_unlock(&rq->lock); 7323 rcuwait_wake_up(&rq->hotplug_wait); 7324 raw_spin_lock(&rq->lock); 7325 } 7326 return; 7327 } 7328 7329 get_task_struct(push_task); 7330 /* 7331 * Temporarily drop rq->lock such that we can wake-up the stop task. 7332 * Both preemption and IRQs are still disabled. 7333 */ 7334 raw_spin_unlock(&rq->lock); 7335 stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task, 7336 this_cpu_ptr(&push_work)); 7337 /* 7338 * At this point need_resched() is true and we'll take the loop in 7339 * schedule(). The next pick is obviously going to be the stop task 7340 * which kthread_is_per_cpu() and will push this task away. 7341 */ 7342 raw_spin_lock(&rq->lock); 7343 } 7344 7345 static void balance_push_set(int cpu, bool on) 7346 { 7347 struct rq *rq = cpu_rq(cpu); 7348 struct rq_flags rf; 7349 7350 rq_lock_irqsave(rq, &rf); 7351 rq->balance_push = on; 7352 if (on) { 7353 WARN_ON_ONCE(rq->balance_callback); 7354 rq->balance_callback = &balance_push_callback; 7355 } else if (rq->balance_callback == &balance_push_callback) { 7356 rq->balance_callback = NULL; 7357 } 7358 rq_unlock_irqrestore(rq, &rf); 7359 } 7360 7361 /* 7362 * Invoked from a CPUs hotplug control thread after the CPU has been marked 7363 * inactive. All tasks which are not per CPU kernel threads are either 7364 * pushed off this CPU now via balance_push() or placed on a different CPU 7365 * during wakeup. Wait until the CPU is quiescent. 7366 */ 7367 static void balance_hotplug_wait(void) 7368 { 7369 struct rq *rq = this_rq(); 7370 7371 rcuwait_wait_event(&rq->hotplug_wait, 7372 rq->nr_running == 1 && !rq_has_pinned_tasks(rq), 7373 TASK_UNINTERRUPTIBLE); 7374 } 7375 7376 #else 7377 7378 static inline void balance_push(struct rq *rq) 7379 { 7380 } 7381 7382 static inline void balance_push_set(int cpu, bool on) 7383 { 7384 } 7385 7386 static inline void balance_hotplug_wait(void) 7387 { 7388 } 7389 7390 #endif /* CONFIG_HOTPLUG_CPU */ 7391 7392 void set_rq_online(struct rq *rq) 7393 { 7394 if (!rq->online) { 7395 const struct sched_class *class; 7396 7397 cpumask_set_cpu(rq->cpu, rq->rd->online); 7398 rq->online = 1; 7399 7400 for_each_class(class) { 7401 if (class->rq_online) 7402 class->rq_online(rq); 7403 } 7404 } 7405 } 7406 7407 void set_rq_offline(struct rq *rq) 7408 { 7409 if (rq->online) { 7410 const struct sched_class *class; 7411 7412 for_each_class(class) { 7413 if (class->rq_offline) 7414 class->rq_offline(rq); 7415 } 7416 7417 cpumask_clear_cpu(rq->cpu, rq->rd->online); 7418 rq->online = 0; 7419 } 7420 } 7421 7422 /* 7423 * used to mark begin/end of suspend/resume: 7424 */ 7425 static int num_cpus_frozen; 7426 7427 /* 7428 * Update cpusets according to cpu_active mask. If cpusets are 7429 * disabled, cpuset_update_active_cpus() becomes a simple wrapper 7430 * around partition_sched_domains(). 7431 * 7432 * If we come here as part of a suspend/resume, don't touch cpusets because we 7433 * want to restore it back to its original state upon resume anyway. 7434 */ 7435 static void cpuset_cpu_active(void) 7436 { 7437 if (cpuhp_tasks_frozen) { 7438 /* 7439 * num_cpus_frozen tracks how many CPUs are involved in suspend 7440 * resume sequence. As long as this is not the last online 7441 * operation in the resume sequence, just build a single sched 7442 * domain, ignoring cpusets. 7443 */ 7444 partition_sched_domains(1, NULL, NULL); 7445 if (--num_cpus_frozen) 7446 return; 7447 /* 7448 * This is the last CPU online operation. So fall through and 7449 * restore the original sched domains by considering the 7450 * cpuset configurations. 7451 */ 7452 cpuset_force_rebuild(); 7453 } 7454 cpuset_update_active_cpus(); 7455 } 7456 7457 static int cpuset_cpu_inactive(unsigned int cpu) 7458 { 7459 if (!cpuhp_tasks_frozen) { 7460 if (dl_cpu_busy(cpu)) 7461 return -EBUSY; 7462 cpuset_update_active_cpus(); 7463 } else { 7464 num_cpus_frozen++; 7465 partition_sched_domains(1, NULL, NULL); 7466 } 7467 return 0; 7468 } 7469 7470 int sched_cpu_activate(unsigned int cpu) 7471 { 7472 struct rq *rq = cpu_rq(cpu); 7473 struct rq_flags rf; 7474 7475 /* 7476 * Make sure that when the hotplug state machine does a roll-back 7477 * we clear balance_push. Ideally that would happen earlier... 7478 */ 7479 balance_push_set(cpu, false); 7480 7481 #ifdef CONFIG_SCHED_SMT 7482 /* 7483 * When going up, increment the number of cores with SMT present. 7484 */ 7485 if (cpumask_weight(cpu_smt_mask(cpu)) == 2) 7486 static_branch_inc_cpuslocked(&sched_smt_present); 7487 #endif 7488 set_cpu_active(cpu, true); 7489 7490 if (sched_smp_initialized) { 7491 sched_domains_numa_masks_set(cpu); 7492 cpuset_cpu_active(); 7493 } 7494 7495 /* 7496 * Put the rq online, if not already. This happens: 7497 * 7498 * 1) In the early boot process, because we build the real domains 7499 * after all CPUs have been brought up. 7500 * 7501 * 2) At runtime, if cpuset_cpu_active() fails to rebuild the 7502 * domains. 7503 */ 7504 rq_lock_irqsave(rq, &rf); 7505 if (rq->rd) { 7506 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 7507 set_rq_online(rq); 7508 } 7509 rq_unlock_irqrestore(rq, &rf); 7510 7511 return 0; 7512 } 7513 7514 int sched_cpu_deactivate(unsigned int cpu) 7515 { 7516 struct rq *rq = cpu_rq(cpu); 7517 struct rq_flags rf; 7518 int ret; 7519 7520 set_cpu_active(cpu, false); 7521 balance_push_set(cpu, true); 7522 7523 /* 7524 * We've cleared cpu_active_mask / set balance_push, wait for all 7525 * preempt-disabled and RCU users of this state to go away such that 7526 * all new such users will observe it. 7527 * 7528 * Specifically, we rely on ttwu to no longer target this CPU, see 7529 * ttwu_queue_cond() and is_cpu_allowed(). 7530 * 7531 * Do sync before park smpboot threads to take care the rcu boost case. 7532 */ 7533 synchronize_rcu(); 7534 7535 rq_lock_irqsave(rq, &rf); 7536 if (rq->rd) { 7537 update_rq_clock(rq); 7538 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 7539 set_rq_offline(rq); 7540 } 7541 rq_unlock_irqrestore(rq, &rf); 7542 7543 #ifdef CONFIG_SCHED_SMT 7544 /* 7545 * When going down, decrement the number of cores with SMT present. 7546 */ 7547 if (cpumask_weight(cpu_smt_mask(cpu)) == 2) 7548 static_branch_dec_cpuslocked(&sched_smt_present); 7549 #endif 7550 7551 if (!sched_smp_initialized) 7552 return 0; 7553 7554 ret = cpuset_cpu_inactive(cpu); 7555 if (ret) { 7556 balance_push_set(cpu, false); 7557 set_cpu_active(cpu, true); 7558 return ret; 7559 } 7560 sched_domains_numa_masks_clear(cpu); 7561 return 0; 7562 } 7563 7564 static void sched_rq_cpu_starting(unsigned int cpu) 7565 { 7566 struct rq *rq = cpu_rq(cpu); 7567 7568 rq->calc_load_update = calc_load_update; 7569 update_max_interval(); 7570 } 7571 7572 int sched_cpu_starting(unsigned int cpu) 7573 { 7574 sched_rq_cpu_starting(cpu); 7575 sched_tick_start(cpu); 7576 return 0; 7577 } 7578 7579 #ifdef CONFIG_HOTPLUG_CPU 7580 7581 /* 7582 * Invoked immediately before the stopper thread is invoked to bring the 7583 * CPU down completely. At this point all per CPU kthreads except the 7584 * hotplug thread (current) and the stopper thread (inactive) have been 7585 * either parked or have been unbound from the outgoing CPU. Ensure that 7586 * any of those which might be on the way out are gone. 7587 * 7588 * If after this point a bound task is being woken on this CPU then the 7589 * responsible hotplug callback has failed to do it's job. 7590 * sched_cpu_dying() will catch it with the appropriate fireworks. 7591 */ 7592 int sched_cpu_wait_empty(unsigned int cpu) 7593 { 7594 balance_hotplug_wait(); 7595 return 0; 7596 } 7597 7598 /* 7599 * Since this CPU is going 'away' for a while, fold any nr_active delta we 7600 * might have. Called from the CPU stopper task after ensuring that the 7601 * stopper is the last running task on the CPU, so nr_active count is 7602 * stable. We need to take the teardown thread which is calling this into 7603 * account, so we hand in adjust = 1 to the load calculation. 7604 * 7605 * Also see the comment "Global load-average calculations". 7606 */ 7607 static void calc_load_migrate(struct rq *rq) 7608 { 7609 long delta = calc_load_fold_active(rq, 1); 7610 7611 if (delta) 7612 atomic_long_add(delta, &calc_load_tasks); 7613 } 7614 7615 static void dump_rq_tasks(struct rq *rq, const char *loglvl) 7616 { 7617 struct task_struct *g, *p; 7618 int cpu = cpu_of(rq); 7619 7620 lockdep_assert_held(&rq->lock); 7621 7622 printk("%sCPU%d enqueued tasks (%u total):\n", loglvl, cpu, rq->nr_running); 7623 for_each_process_thread(g, p) { 7624 if (task_cpu(p) != cpu) 7625 continue; 7626 7627 if (!task_on_rq_queued(p)) 7628 continue; 7629 7630 printk("%s\tpid: %d, name: %s\n", loglvl, p->pid, p->comm); 7631 } 7632 } 7633 7634 int sched_cpu_dying(unsigned int cpu) 7635 { 7636 struct rq *rq = cpu_rq(cpu); 7637 struct rq_flags rf; 7638 7639 /* Handle pending wakeups and then migrate everything off */ 7640 sched_tick_stop(cpu); 7641 7642 rq_lock_irqsave(rq, &rf); 7643 if (rq->nr_running != 1 || rq_has_pinned_tasks(rq)) { 7644 WARN(true, "Dying CPU not properly vacated!"); 7645 dump_rq_tasks(rq, KERN_WARNING); 7646 } 7647 rq_unlock_irqrestore(rq, &rf); 7648 7649 /* 7650 * Now that the CPU is offline, make sure we're welcome 7651 * to new tasks once we come back up. 7652 */ 7653 balance_push_set(cpu, false); 7654 7655 calc_load_migrate(rq); 7656 update_max_interval(); 7657 nohz_balance_exit_idle(rq); 7658 hrtick_clear(rq); 7659 return 0; 7660 } 7661 #endif 7662 7663 void __init sched_init_smp(void) 7664 { 7665 sched_init_numa(); 7666 7667 /* 7668 * There's no userspace yet to cause hotplug operations; hence all the 7669 * CPU masks are stable and all blatant races in the below code cannot 7670 * happen. 7671 */ 7672 mutex_lock(&sched_domains_mutex); 7673 sched_init_domains(cpu_active_mask); 7674 mutex_unlock(&sched_domains_mutex); 7675 7676 /* Move init over to a non-isolated CPU */ 7677 if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_FLAG_DOMAIN)) < 0) 7678 BUG(); 7679 sched_init_granularity(); 7680 7681 init_sched_rt_class(); 7682 init_sched_dl_class(); 7683 7684 sched_smp_initialized = true; 7685 } 7686 7687 static int __init migration_init(void) 7688 { 7689 sched_cpu_starting(smp_processor_id()); 7690 return 0; 7691 } 7692 early_initcall(migration_init); 7693 7694 #else 7695 void __init sched_init_smp(void) 7696 { 7697 sched_init_granularity(); 7698 } 7699 #endif /* CONFIG_SMP */ 7700 7701 int in_sched_functions(unsigned long addr) 7702 { 7703 return in_lock_functions(addr) || 7704 (addr >= (unsigned long)__sched_text_start 7705 && addr < (unsigned long)__sched_text_end); 7706 } 7707 7708 #ifdef CONFIG_CGROUP_SCHED 7709 /* 7710 * Default task group. 7711 * Every task in system belongs to this group at bootup. 7712 */ 7713 struct task_group root_task_group; 7714 LIST_HEAD(task_groups); 7715 7716 /* Cacheline aligned slab cache for task_group */ 7717 static struct kmem_cache *task_group_cache __read_mostly; 7718 #endif 7719 7720 DECLARE_PER_CPU(cpumask_var_t, load_balance_mask); 7721 DECLARE_PER_CPU(cpumask_var_t, select_idle_mask); 7722 7723 void __init sched_init(void) 7724 { 7725 unsigned long ptr = 0; 7726 int i; 7727 7728 /* Make sure the linker didn't screw up */ 7729 BUG_ON(&idle_sched_class + 1 != &fair_sched_class || 7730 &fair_sched_class + 1 != &rt_sched_class || 7731 &rt_sched_class + 1 != &dl_sched_class); 7732 #ifdef CONFIG_SMP 7733 BUG_ON(&dl_sched_class + 1 != &stop_sched_class); 7734 #endif 7735 7736 wait_bit_init(); 7737 7738 #ifdef CONFIG_FAIR_GROUP_SCHED 7739 ptr += 2 * nr_cpu_ids * sizeof(void **); 7740 #endif 7741 #ifdef CONFIG_RT_GROUP_SCHED 7742 ptr += 2 * nr_cpu_ids * sizeof(void **); 7743 #endif 7744 if (ptr) { 7745 ptr = (unsigned long)kzalloc(ptr, GFP_NOWAIT); 7746 7747 #ifdef CONFIG_FAIR_GROUP_SCHED 7748 root_task_group.se = (struct sched_entity **)ptr; 7749 ptr += nr_cpu_ids * sizeof(void **); 7750 7751 root_task_group.cfs_rq = (struct cfs_rq **)ptr; 7752 ptr += nr_cpu_ids * sizeof(void **); 7753 7754 root_task_group.shares = ROOT_TASK_GROUP_LOAD; 7755 init_cfs_bandwidth(&root_task_group.cfs_bandwidth); 7756 #endif /* CONFIG_FAIR_GROUP_SCHED */ 7757 #ifdef CONFIG_RT_GROUP_SCHED 7758 root_task_group.rt_se = (struct sched_rt_entity **)ptr; 7759 ptr += nr_cpu_ids * sizeof(void **); 7760 7761 root_task_group.rt_rq = (struct rt_rq **)ptr; 7762 ptr += nr_cpu_ids * sizeof(void **); 7763 7764 #endif /* CONFIG_RT_GROUP_SCHED */ 7765 } 7766 #ifdef CONFIG_CPUMASK_OFFSTACK 7767 for_each_possible_cpu(i) { 7768 per_cpu(load_balance_mask, i) = (cpumask_var_t)kzalloc_node( 7769 cpumask_size(), GFP_KERNEL, cpu_to_node(i)); 7770 per_cpu(select_idle_mask, i) = (cpumask_var_t)kzalloc_node( 7771 cpumask_size(), GFP_KERNEL, cpu_to_node(i)); 7772 } 7773 #endif /* CONFIG_CPUMASK_OFFSTACK */ 7774 7775 init_rt_bandwidth(&def_rt_bandwidth, global_rt_period(), global_rt_runtime()); 7776 init_dl_bandwidth(&def_dl_bandwidth, global_rt_period(), global_rt_runtime()); 7777 7778 #ifdef CONFIG_SMP 7779 init_defrootdomain(); 7780 #endif 7781 7782 #ifdef CONFIG_RT_GROUP_SCHED 7783 init_rt_bandwidth(&root_task_group.rt_bandwidth, 7784 global_rt_period(), global_rt_runtime()); 7785 #endif /* CONFIG_RT_GROUP_SCHED */ 7786 7787 #ifdef CONFIG_CGROUP_SCHED 7788 task_group_cache = KMEM_CACHE(task_group, 0); 7789 7790 list_add(&root_task_group.list, &task_groups); 7791 INIT_LIST_HEAD(&root_task_group.children); 7792 INIT_LIST_HEAD(&root_task_group.siblings); 7793 autogroup_init(&init_task); 7794 #endif /* CONFIG_CGROUP_SCHED */ 7795 7796 for_each_possible_cpu(i) { 7797 struct rq *rq; 7798 7799 rq = cpu_rq(i); 7800 raw_spin_lock_init(&rq->lock); 7801 rq->nr_running = 0; 7802 rq->calc_load_active = 0; 7803 rq->calc_load_update = jiffies + LOAD_FREQ; 7804 init_cfs_rq(&rq->cfs); 7805 init_rt_rq(&rq->rt); 7806 init_dl_rq(&rq->dl); 7807 #ifdef CONFIG_FAIR_GROUP_SCHED 7808 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list); 7809 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list; 7810 /* 7811 * How much CPU bandwidth does root_task_group get? 7812 * 7813 * In case of task-groups formed thr' the cgroup filesystem, it 7814 * gets 100% of the CPU resources in the system. This overall 7815 * system CPU resource is divided among the tasks of 7816 * root_task_group and its child task-groups in a fair manner, 7817 * based on each entity's (task or task-group's) weight 7818 * (se->load.weight). 7819 * 7820 * In other words, if root_task_group has 10 tasks of weight 7821 * 1024) and two child groups A0 and A1 (of weight 1024 each), 7822 * then A0's share of the CPU resource is: 7823 * 7824 * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33% 7825 * 7826 * We achieve this by letting root_task_group's tasks sit 7827 * directly in rq->cfs (i.e root_task_group->se[] = NULL). 7828 */ 7829 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL); 7830 #endif /* CONFIG_FAIR_GROUP_SCHED */ 7831 7832 rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime; 7833 #ifdef CONFIG_RT_GROUP_SCHED 7834 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL); 7835 #endif 7836 #ifdef CONFIG_SMP 7837 rq->sd = NULL; 7838 rq->rd = NULL; 7839 rq->cpu_capacity = rq->cpu_capacity_orig = SCHED_CAPACITY_SCALE; 7840 rq->balance_callback = NULL; 7841 rq->active_balance = 0; 7842 rq->next_balance = jiffies; 7843 rq->push_cpu = 0; 7844 rq->cpu = i; 7845 rq->online = 0; 7846 rq->idle_stamp = 0; 7847 rq->avg_idle = 2*sysctl_sched_migration_cost; 7848 rq->max_idle_balance_cost = sysctl_sched_migration_cost; 7849 7850 INIT_LIST_HEAD(&rq->cfs_tasks); 7851 7852 rq_attach_root(rq, &def_root_domain); 7853 #ifdef CONFIG_NO_HZ_COMMON 7854 rq->last_blocked_load_update_tick = jiffies; 7855 atomic_set(&rq->nohz_flags, 0); 7856 7857 INIT_CSD(&rq->nohz_csd, nohz_csd_func, rq); 7858 #endif 7859 #ifdef CONFIG_HOTPLUG_CPU 7860 rcuwait_init(&rq->hotplug_wait); 7861 #endif 7862 #endif /* CONFIG_SMP */ 7863 hrtick_rq_init(rq); 7864 atomic_set(&rq->nr_iowait, 0); 7865 } 7866 7867 set_load_weight(&init_task, false); 7868 7869 /* 7870 * The boot idle thread does lazy MMU switching as well: 7871 */ 7872 mmgrab(&init_mm); 7873 enter_lazy_tlb(&init_mm, current); 7874 7875 /* 7876 * Make us the idle thread. Technically, schedule() should not be 7877 * called from this thread, however somewhere below it might be, 7878 * but because we are the idle thread, we just pick up running again 7879 * when this runqueue becomes "idle". 7880 */ 7881 init_idle(current, smp_processor_id()); 7882 7883 calc_load_update = jiffies + LOAD_FREQ; 7884 7885 #ifdef CONFIG_SMP 7886 idle_thread_set_boot_cpu(); 7887 #endif 7888 init_sched_fair_class(); 7889 7890 init_schedstats(); 7891 7892 psi_init(); 7893 7894 init_uclamp(); 7895 7896 scheduler_running = 1; 7897 } 7898 7899 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP 7900 static inline int preempt_count_equals(int preempt_offset) 7901 { 7902 int nested = preempt_count() + rcu_preempt_depth(); 7903 7904 return (nested == preempt_offset); 7905 } 7906 7907 void __might_sleep(const char *file, int line, int preempt_offset) 7908 { 7909 /* 7910 * Blocking primitives will set (and therefore destroy) current->state, 7911 * since we will exit with TASK_RUNNING make sure we enter with it, 7912 * otherwise we will destroy state. 7913 */ 7914 WARN_ONCE(current->state != TASK_RUNNING && current->task_state_change, 7915 "do not call blocking ops when !TASK_RUNNING; " 7916 "state=%lx set at [<%p>] %pS\n", 7917 current->state, 7918 (void *)current->task_state_change, 7919 (void *)current->task_state_change); 7920 7921 ___might_sleep(file, line, preempt_offset); 7922 } 7923 EXPORT_SYMBOL(__might_sleep); 7924 7925 void ___might_sleep(const char *file, int line, int preempt_offset) 7926 { 7927 /* Ratelimiting timestamp: */ 7928 static unsigned long prev_jiffy; 7929 7930 unsigned long preempt_disable_ip; 7931 7932 /* WARN_ON_ONCE() by default, no rate limit required: */ 7933 rcu_sleep_check(); 7934 7935 if ((preempt_count_equals(preempt_offset) && !irqs_disabled() && 7936 !is_idle_task(current) && !current->non_block_count) || 7937 system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING || 7938 oops_in_progress) 7939 return; 7940 7941 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) 7942 return; 7943 prev_jiffy = jiffies; 7944 7945 /* Save this before calling printk(), since that will clobber it: */ 7946 preempt_disable_ip = get_preempt_disable_ip(current); 7947 7948 printk(KERN_ERR 7949 "BUG: sleeping function called from invalid context at %s:%d\n", 7950 file, line); 7951 printk(KERN_ERR 7952 "in_atomic(): %d, irqs_disabled(): %d, non_block: %d, pid: %d, name: %s\n", 7953 in_atomic(), irqs_disabled(), current->non_block_count, 7954 current->pid, current->comm); 7955 7956 if (task_stack_end_corrupted(current)) 7957 printk(KERN_EMERG "Thread overran stack, or stack corrupted\n"); 7958 7959 debug_show_held_locks(current); 7960 if (irqs_disabled()) 7961 print_irqtrace_events(current); 7962 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT) 7963 && !preempt_count_equals(preempt_offset)) { 7964 pr_err("Preemption disabled at:"); 7965 print_ip_sym(KERN_ERR, preempt_disable_ip); 7966 } 7967 dump_stack(); 7968 add_taint(TAINT_WARN, LOCKDEP_STILL_OK); 7969 } 7970 EXPORT_SYMBOL(___might_sleep); 7971 7972 void __cant_sleep(const char *file, int line, int preempt_offset) 7973 { 7974 static unsigned long prev_jiffy; 7975 7976 if (irqs_disabled()) 7977 return; 7978 7979 if (!IS_ENABLED(CONFIG_PREEMPT_COUNT)) 7980 return; 7981 7982 if (preempt_count() > preempt_offset) 7983 return; 7984 7985 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) 7986 return; 7987 prev_jiffy = jiffies; 7988 7989 printk(KERN_ERR "BUG: assuming atomic context at %s:%d\n", file, line); 7990 printk(KERN_ERR "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n", 7991 in_atomic(), irqs_disabled(), 7992 current->pid, current->comm); 7993 7994 debug_show_held_locks(current); 7995 dump_stack(); 7996 add_taint(TAINT_WARN, LOCKDEP_STILL_OK); 7997 } 7998 EXPORT_SYMBOL_GPL(__cant_sleep); 7999 8000 #ifdef CONFIG_SMP 8001 void __cant_migrate(const char *file, int line) 8002 { 8003 static unsigned long prev_jiffy; 8004 8005 if (irqs_disabled()) 8006 return; 8007 8008 if (is_migration_disabled(current)) 8009 return; 8010 8011 if (!IS_ENABLED(CONFIG_PREEMPT_COUNT)) 8012 return; 8013 8014 if (preempt_count() > 0) 8015 return; 8016 8017 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) 8018 return; 8019 prev_jiffy = jiffies; 8020 8021 pr_err("BUG: assuming non migratable context at %s:%d\n", file, line); 8022 pr_err("in_atomic(): %d, irqs_disabled(): %d, migration_disabled() %u pid: %d, name: %s\n", 8023 in_atomic(), irqs_disabled(), is_migration_disabled(current), 8024 current->pid, current->comm); 8025 8026 debug_show_held_locks(current); 8027 dump_stack(); 8028 add_taint(TAINT_WARN, LOCKDEP_STILL_OK); 8029 } 8030 EXPORT_SYMBOL_GPL(__cant_migrate); 8031 #endif 8032 #endif 8033 8034 #ifdef CONFIG_MAGIC_SYSRQ 8035 void normalize_rt_tasks(void) 8036 { 8037 struct task_struct *g, *p; 8038 struct sched_attr attr = { 8039 .sched_policy = SCHED_NORMAL, 8040 }; 8041 8042 read_lock(&tasklist_lock); 8043 for_each_process_thread(g, p) { 8044 /* 8045 * Only normalize user tasks: 8046 */ 8047 if (p->flags & PF_KTHREAD) 8048 continue; 8049 8050 p->se.exec_start = 0; 8051 schedstat_set(p->se.statistics.wait_start, 0); 8052 schedstat_set(p->se.statistics.sleep_start, 0); 8053 schedstat_set(p->se.statistics.block_start, 0); 8054 8055 if (!dl_task(p) && !rt_task(p)) { 8056 /* 8057 * Renice negative nice level userspace 8058 * tasks back to 0: 8059 */ 8060 if (task_nice(p) < 0) 8061 set_user_nice(p, 0); 8062 continue; 8063 } 8064 8065 __sched_setscheduler(p, &attr, false, false); 8066 } 8067 read_unlock(&tasklist_lock); 8068 } 8069 8070 #endif /* CONFIG_MAGIC_SYSRQ */ 8071 8072 #if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) 8073 /* 8074 * These functions are only useful for the IA64 MCA handling, or kdb. 8075 * 8076 * They can only be called when the whole system has been 8077 * stopped - every CPU needs to be quiescent, and no scheduling 8078 * activity can take place. Using them for anything else would 8079 * be a serious bug, and as a result, they aren't even visible 8080 * under any other configuration. 8081 */ 8082 8083 /** 8084 * curr_task - return the current task for a given CPU. 8085 * @cpu: the processor in question. 8086 * 8087 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! 8088 * 8089 * Return: The current task for @cpu. 8090 */ 8091 struct task_struct *curr_task(int cpu) 8092 { 8093 return cpu_curr(cpu); 8094 } 8095 8096 #endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */ 8097 8098 #ifdef CONFIG_IA64 8099 /** 8100 * ia64_set_curr_task - set the current task for a given CPU. 8101 * @cpu: the processor in question. 8102 * @p: the task pointer to set. 8103 * 8104 * Description: This function must only be used when non-maskable interrupts 8105 * are serviced on a separate stack. It allows the architecture to switch the 8106 * notion of the current task on a CPU in a non-blocking manner. This function 8107 * must be called with all CPU's synchronized, and interrupts disabled, the 8108 * and caller must save the original value of the current task (see 8109 * curr_task() above) and restore that value before reenabling interrupts and 8110 * re-starting the system. 8111 * 8112 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! 8113 */ 8114 void ia64_set_curr_task(int cpu, struct task_struct *p) 8115 { 8116 cpu_curr(cpu) = p; 8117 } 8118 8119 #endif 8120 8121 #ifdef CONFIG_CGROUP_SCHED 8122 /* task_group_lock serializes the addition/removal of task groups */ 8123 static DEFINE_SPINLOCK(task_group_lock); 8124 8125 static inline void alloc_uclamp_sched_group(struct task_group *tg, 8126 struct task_group *parent) 8127 { 8128 #ifdef CONFIG_UCLAMP_TASK_GROUP 8129 enum uclamp_id clamp_id; 8130 8131 for_each_clamp_id(clamp_id) { 8132 uclamp_se_set(&tg->uclamp_req[clamp_id], 8133 uclamp_none(clamp_id), false); 8134 tg->uclamp[clamp_id] = parent->uclamp[clamp_id]; 8135 } 8136 #endif 8137 } 8138 8139 static void sched_free_group(struct task_group *tg) 8140 { 8141 free_fair_sched_group(tg); 8142 free_rt_sched_group(tg); 8143 autogroup_free(tg); 8144 kmem_cache_free(task_group_cache, tg); 8145 } 8146 8147 /* allocate runqueue etc for a new task group */ 8148 struct task_group *sched_create_group(struct task_group *parent) 8149 { 8150 struct task_group *tg; 8151 8152 tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO); 8153 if (!tg) 8154 return ERR_PTR(-ENOMEM); 8155 8156 if (!alloc_fair_sched_group(tg, parent)) 8157 goto err; 8158 8159 if (!alloc_rt_sched_group(tg, parent)) 8160 goto err; 8161 8162 alloc_uclamp_sched_group(tg, parent); 8163 8164 return tg; 8165 8166 err: 8167 sched_free_group(tg); 8168 return ERR_PTR(-ENOMEM); 8169 } 8170 8171 void sched_online_group(struct task_group *tg, struct task_group *parent) 8172 { 8173 unsigned long flags; 8174 8175 spin_lock_irqsave(&task_group_lock, flags); 8176 list_add_rcu(&tg->list, &task_groups); 8177 8178 /* Root should already exist: */ 8179 WARN_ON(!parent); 8180 8181 tg->parent = parent; 8182 INIT_LIST_HEAD(&tg->children); 8183 list_add_rcu(&tg->siblings, &parent->children); 8184 spin_unlock_irqrestore(&task_group_lock, flags); 8185 8186 online_fair_sched_group(tg); 8187 } 8188 8189 /* rcu callback to free various structures associated with a task group */ 8190 static void sched_free_group_rcu(struct rcu_head *rhp) 8191 { 8192 /* Now it should be safe to free those cfs_rqs: */ 8193 sched_free_group(container_of(rhp, struct task_group, rcu)); 8194 } 8195 8196 void sched_destroy_group(struct task_group *tg) 8197 { 8198 /* Wait for possible concurrent references to cfs_rqs complete: */ 8199 call_rcu(&tg->rcu, sched_free_group_rcu); 8200 } 8201 8202 void sched_offline_group(struct task_group *tg) 8203 { 8204 unsigned long flags; 8205 8206 /* End participation in shares distribution: */ 8207 unregister_fair_sched_group(tg); 8208 8209 spin_lock_irqsave(&task_group_lock, flags); 8210 list_del_rcu(&tg->list); 8211 list_del_rcu(&tg->siblings); 8212 spin_unlock_irqrestore(&task_group_lock, flags); 8213 } 8214 8215 static void sched_change_group(struct task_struct *tsk, int type) 8216 { 8217 struct task_group *tg; 8218 8219 /* 8220 * All callers are synchronized by task_rq_lock(); we do not use RCU 8221 * which is pointless here. Thus, we pass "true" to task_css_check() 8222 * to prevent lockdep warnings. 8223 */ 8224 tg = container_of(task_css_check(tsk, cpu_cgrp_id, true), 8225 struct task_group, css); 8226 tg = autogroup_task_group(tsk, tg); 8227 tsk->sched_task_group = tg; 8228 8229 #ifdef CONFIG_FAIR_GROUP_SCHED 8230 if (tsk->sched_class->task_change_group) 8231 tsk->sched_class->task_change_group(tsk, type); 8232 else 8233 #endif 8234 set_task_rq(tsk, task_cpu(tsk)); 8235 } 8236 8237 /* 8238 * Change task's runqueue when it moves between groups. 8239 * 8240 * The caller of this function should have put the task in its new group by 8241 * now. This function just updates tsk->se.cfs_rq and tsk->se.parent to reflect 8242 * its new group. 8243 */ 8244 void sched_move_task(struct task_struct *tsk) 8245 { 8246 int queued, running, queue_flags = 8247 DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; 8248 struct rq_flags rf; 8249 struct rq *rq; 8250 8251 rq = task_rq_lock(tsk, &rf); 8252 update_rq_clock(rq); 8253 8254 running = task_current(rq, tsk); 8255 queued = task_on_rq_queued(tsk); 8256 8257 if (queued) 8258 dequeue_task(rq, tsk, queue_flags); 8259 if (running) 8260 put_prev_task(rq, tsk); 8261 8262 sched_change_group(tsk, TASK_MOVE_GROUP); 8263 8264 if (queued) 8265 enqueue_task(rq, tsk, queue_flags); 8266 if (running) { 8267 set_next_task(rq, tsk); 8268 /* 8269 * After changing group, the running task may have joined a 8270 * throttled one but it's still the running task. Trigger a 8271 * resched to make sure that task can still run. 8272 */ 8273 resched_curr(rq); 8274 } 8275 8276 task_rq_unlock(rq, tsk, &rf); 8277 } 8278 8279 static inline struct task_group *css_tg(struct cgroup_subsys_state *css) 8280 { 8281 return css ? container_of(css, struct task_group, css) : NULL; 8282 } 8283 8284 static struct cgroup_subsys_state * 8285 cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) 8286 { 8287 struct task_group *parent = css_tg(parent_css); 8288 struct task_group *tg; 8289 8290 if (!parent) { 8291 /* This is early initialization for the top cgroup */ 8292 return &root_task_group.css; 8293 } 8294 8295 tg = sched_create_group(parent); 8296 if (IS_ERR(tg)) 8297 return ERR_PTR(-ENOMEM); 8298 8299 return &tg->css; 8300 } 8301 8302 /* Expose task group only after completing cgroup initialization */ 8303 static int cpu_cgroup_css_online(struct cgroup_subsys_state *css) 8304 { 8305 struct task_group *tg = css_tg(css); 8306 struct task_group *parent = css_tg(css->parent); 8307 8308 if (parent) 8309 sched_online_group(tg, parent); 8310 8311 #ifdef CONFIG_UCLAMP_TASK_GROUP 8312 /* Propagate the effective uclamp value for the new group */ 8313 cpu_util_update_eff(css); 8314 #endif 8315 8316 return 0; 8317 } 8318 8319 static void cpu_cgroup_css_released(struct cgroup_subsys_state *css) 8320 { 8321 struct task_group *tg = css_tg(css); 8322 8323 sched_offline_group(tg); 8324 } 8325 8326 static void cpu_cgroup_css_free(struct cgroup_subsys_state *css) 8327 { 8328 struct task_group *tg = css_tg(css); 8329 8330 /* 8331 * Relies on the RCU grace period between css_released() and this. 8332 */ 8333 sched_free_group(tg); 8334 } 8335 8336 /* 8337 * This is called before wake_up_new_task(), therefore we really only 8338 * have to set its group bits, all the other stuff does not apply. 8339 */ 8340 static void cpu_cgroup_fork(struct task_struct *task) 8341 { 8342 struct rq_flags rf; 8343 struct rq *rq; 8344 8345 rq = task_rq_lock(task, &rf); 8346 8347 update_rq_clock(rq); 8348 sched_change_group(task, TASK_SET_GROUP); 8349 8350 task_rq_unlock(rq, task, &rf); 8351 } 8352 8353 static int cpu_cgroup_can_attach(struct cgroup_taskset *tset) 8354 { 8355 struct task_struct *task; 8356 struct cgroup_subsys_state *css; 8357 int ret = 0; 8358 8359 cgroup_taskset_for_each(task, css, tset) { 8360 #ifdef CONFIG_RT_GROUP_SCHED 8361 if (!sched_rt_can_attach(css_tg(css), task)) 8362 return -EINVAL; 8363 #endif 8364 /* 8365 * Serialize against wake_up_new_task() such that if it's 8366 * running, we're sure to observe its full state. 8367 */ 8368 raw_spin_lock_irq(&task->pi_lock); 8369 /* 8370 * Avoid calling sched_move_task() before wake_up_new_task() 8371 * has happened. This would lead to problems with PELT, due to 8372 * move wanting to detach+attach while we're not attached yet. 8373 */ 8374 if (task->state == TASK_NEW) 8375 ret = -EINVAL; 8376 raw_spin_unlock_irq(&task->pi_lock); 8377 8378 if (ret) 8379 break; 8380 } 8381 return ret; 8382 } 8383 8384 static void cpu_cgroup_attach(struct cgroup_taskset *tset) 8385 { 8386 struct task_struct *task; 8387 struct cgroup_subsys_state *css; 8388 8389 cgroup_taskset_for_each(task, css, tset) 8390 sched_move_task(task); 8391 } 8392 8393 #ifdef CONFIG_UCLAMP_TASK_GROUP 8394 static void cpu_util_update_eff(struct cgroup_subsys_state *css) 8395 { 8396 struct cgroup_subsys_state *top_css = css; 8397 struct uclamp_se *uc_parent = NULL; 8398 struct uclamp_se *uc_se = NULL; 8399 unsigned int eff[UCLAMP_CNT]; 8400 enum uclamp_id clamp_id; 8401 unsigned int clamps; 8402 8403 css_for_each_descendant_pre(css, top_css) { 8404 uc_parent = css_tg(css)->parent 8405 ? css_tg(css)->parent->uclamp : NULL; 8406 8407 for_each_clamp_id(clamp_id) { 8408 /* Assume effective clamps matches requested clamps */ 8409 eff[clamp_id] = css_tg(css)->uclamp_req[clamp_id].value; 8410 /* Cap effective clamps with parent's effective clamps */ 8411 if (uc_parent && 8412 eff[clamp_id] > uc_parent[clamp_id].value) { 8413 eff[clamp_id] = uc_parent[clamp_id].value; 8414 } 8415 } 8416 /* Ensure protection is always capped by limit */ 8417 eff[UCLAMP_MIN] = min(eff[UCLAMP_MIN], eff[UCLAMP_MAX]); 8418 8419 /* Propagate most restrictive effective clamps */ 8420 clamps = 0x0; 8421 uc_se = css_tg(css)->uclamp; 8422 for_each_clamp_id(clamp_id) { 8423 if (eff[clamp_id] == uc_se[clamp_id].value) 8424 continue; 8425 uc_se[clamp_id].value = eff[clamp_id]; 8426 uc_se[clamp_id].bucket_id = uclamp_bucket_id(eff[clamp_id]); 8427 clamps |= (0x1 << clamp_id); 8428 } 8429 if (!clamps) { 8430 css = css_rightmost_descendant(css); 8431 continue; 8432 } 8433 8434 /* Immediately update descendants RUNNABLE tasks */ 8435 uclamp_update_active_tasks(css, clamps); 8436 } 8437 } 8438 8439 /* 8440 * Integer 10^N with a given N exponent by casting to integer the literal "1eN" 8441 * C expression. Since there is no way to convert a macro argument (N) into a 8442 * character constant, use two levels of macros. 8443 */ 8444 #define _POW10(exp) ((unsigned int)1e##exp) 8445 #define POW10(exp) _POW10(exp) 8446 8447 struct uclamp_request { 8448 #define UCLAMP_PERCENT_SHIFT 2 8449 #define UCLAMP_PERCENT_SCALE (100 * POW10(UCLAMP_PERCENT_SHIFT)) 8450 s64 percent; 8451 u64 util; 8452 int ret; 8453 }; 8454 8455 static inline struct uclamp_request 8456 capacity_from_percent(char *buf) 8457 { 8458 struct uclamp_request req = { 8459 .percent = UCLAMP_PERCENT_SCALE, 8460 .util = SCHED_CAPACITY_SCALE, 8461 .ret = 0, 8462 }; 8463 8464 buf = strim(buf); 8465 if (strcmp(buf, "max")) { 8466 req.ret = cgroup_parse_float(buf, UCLAMP_PERCENT_SHIFT, 8467 &req.percent); 8468 if (req.ret) 8469 return req; 8470 if ((u64)req.percent > UCLAMP_PERCENT_SCALE) { 8471 req.ret = -ERANGE; 8472 return req; 8473 } 8474 8475 req.util = req.percent << SCHED_CAPACITY_SHIFT; 8476 req.util = DIV_ROUND_CLOSEST_ULL(req.util, UCLAMP_PERCENT_SCALE); 8477 } 8478 8479 return req; 8480 } 8481 8482 static ssize_t cpu_uclamp_write(struct kernfs_open_file *of, char *buf, 8483 size_t nbytes, loff_t off, 8484 enum uclamp_id clamp_id) 8485 { 8486 struct uclamp_request req; 8487 struct task_group *tg; 8488 8489 req = capacity_from_percent(buf); 8490 if (req.ret) 8491 return req.ret; 8492 8493 static_branch_enable(&sched_uclamp_used); 8494 8495 mutex_lock(&uclamp_mutex); 8496 rcu_read_lock(); 8497 8498 tg = css_tg(of_css(of)); 8499 if (tg->uclamp_req[clamp_id].value != req.util) 8500 uclamp_se_set(&tg->uclamp_req[clamp_id], req.util, false); 8501 8502 /* 8503 * Because of not recoverable conversion rounding we keep track of the 8504 * exact requested value 8505 */ 8506 tg->uclamp_pct[clamp_id] = req.percent; 8507 8508 /* Update effective clamps to track the most restrictive value */ 8509 cpu_util_update_eff(of_css(of)); 8510 8511 rcu_read_unlock(); 8512 mutex_unlock(&uclamp_mutex); 8513 8514 return nbytes; 8515 } 8516 8517 static ssize_t cpu_uclamp_min_write(struct kernfs_open_file *of, 8518 char *buf, size_t nbytes, 8519 loff_t off) 8520 { 8521 return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MIN); 8522 } 8523 8524 static ssize_t cpu_uclamp_max_write(struct kernfs_open_file *of, 8525 char *buf, size_t nbytes, 8526 loff_t off) 8527 { 8528 return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MAX); 8529 } 8530 8531 static inline void cpu_uclamp_print(struct seq_file *sf, 8532 enum uclamp_id clamp_id) 8533 { 8534 struct task_group *tg; 8535 u64 util_clamp; 8536 u64 percent; 8537 u32 rem; 8538 8539 rcu_read_lock(); 8540 tg = css_tg(seq_css(sf)); 8541 util_clamp = tg->uclamp_req[clamp_id].value; 8542 rcu_read_unlock(); 8543 8544 if (util_clamp == SCHED_CAPACITY_SCALE) { 8545 seq_puts(sf, "max\n"); 8546 return; 8547 } 8548 8549 percent = tg->uclamp_pct[clamp_id]; 8550 percent = div_u64_rem(percent, POW10(UCLAMP_PERCENT_SHIFT), &rem); 8551 seq_printf(sf, "%llu.%0*u\n", percent, UCLAMP_PERCENT_SHIFT, rem); 8552 } 8553 8554 static int cpu_uclamp_min_show(struct seq_file *sf, void *v) 8555 { 8556 cpu_uclamp_print(sf, UCLAMP_MIN); 8557 return 0; 8558 } 8559 8560 static int cpu_uclamp_max_show(struct seq_file *sf, void *v) 8561 { 8562 cpu_uclamp_print(sf, UCLAMP_MAX); 8563 return 0; 8564 } 8565 #endif /* CONFIG_UCLAMP_TASK_GROUP */ 8566 8567 #ifdef CONFIG_FAIR_GROUP_SCHED 8568 static int cpu_shares_write_u64(struct cgroup_subsys_state *css, 8569 struct cftype *cftype, u64 shareval) 8570 { 8571 if (shareval > scale_load_down(ULONG_MAX)) 8572 shareval = MAX_SHARES; 8573 return sched_group_set_shares(css_tg(css), scale_load(shareval)); 8574 } 8575 8576 static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css, 8577 struct cftype *cft) 8578 { 8579 struct task_group *tg = css_tg(css); 8580 8581 return (u64) scale_load_down(tg->shares); 8582 } 8583 8584 #ifdef CONFIG_CFS_BANDWIDTH 8585 static DEFINE_MUTEX(cfs_constraints_mutex); 8586 8587 const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */ 8588 static const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */ 8589 /* More than 203 days if BW_SHIFT equals 20. */ 8590 static const u64 max_cfs_runtime = MAX_BW * NSEC_PER_USEC; 8591 8592 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime); 8593 8594 static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota) 8595 { 8596 int i, ret = 0, runtime_enabled, runtime_was_enabled; 8597 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 8598 8599 if (tg == &root_task_group) 8600 return -EINVAL; 8601 8602 /* 8603 * Ensure we have at some amount of bandwidth every period. This is 8604 * to prevent reaching a state of large arrears when throttled via 8605 * entity_tick() resulting in prolonged exit starvation. 8606 */ 8607 if (quota < min_cfs_quota_period || period < min_cfs_quota_period) 8608 return -EINVAL; 8609 8610 /* 8611 * Likewise, bound things on the otherside by preventing insane quota 8612 * periods. This also allows us to normalize in computing quota 8613 * feasibility. 8614 */ 8615 if (period > max_cfs_quota_period) 8616 return -EINVAL; 8617 8618 /* 8619 * Bound quota to defend quota against overflow during bandwidth shift. 8620 */ 8621 if (quota != RUNTIME_INF && quota > max_cfs_runtime) 8622 return -EINVAL; 8623 8624 /* 8625 * Prevent race between setting of cfs_rq->runtime_enabled and 8626 * unthrottle_offline_cfs_rqs(). 8627 */ 8628 get_online_cpus(); 8629 mutex_lock(&cfs_constraints_mutex); 8630 ret = __cfs_schedulable(tg, period, quota); 8631 if (ret) 8632 goto out_unlock; 8633 8634 runtime_enabled = quota != RUNTIME_INF; 8635 runtime_was_enabled = cfs_b->quota != RUNTIME_INF; 8636 /* 8637 * If we need to toggle cfs_bandwidth_used, off->on must occur 8638 * before making related changes, and on->off must occur afterwards 8639 */ 8640 if (runtime_enabled && !runtime_was_enabled) 8641 cfs_bandwidth_usage_inc(); 8642 raw_spin_lock_irq(&cfs_b->lock); 8643 cfs_b->period = ns_to_ktime(period); 8644 cfs_b->quota = quota; 8645 8646 __refill_cfs_bandwidth_runtime(cfs_b); 8647 8648 /* Restart the period timer (if active) to handle new period expiry: */ 8649 if (runtime_enabled) 8650 start_cfs_bandwidth(cfs_b); 8651 8652 raw_spin_unlock_irq(&cfs_b->lock); 8653 8654 for_each_online_cpu(i) { 8655 struct cfs_rq *cfs_rq = tg->cfs_rq[i]; 8656 struct rq *rq = cfs_rq->rq; 8657 struct rq_flags rf; 8658 8659 rq_lock_irq(rq, &rf); 8660 cfs_rq->runtime_enabled = runtime_enabled; 8661 cfs_rq->runtime_remaining = 0; 8662 8663 if (cfs_rq->throttled) 8664 unthrottle_cfs_rq(cfs_rq); 8665 rq_unlock_irq(rq, &rf); 8666 } 8667 if (runtime_was_enabled && !runtime_enabled) 8668 cfs_bandwidth_usage_dec(); 8669 out_unlock: 8670 mutex_unlock(&cfs_constraints_mutex); 8671 put_online_cpus(); 8672 8673 return ret; 8674 } 8675 8676 static int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us) 8677 { 8678 u64 quota, period; 8679 8680 period = ktime_to_ns(tg->cfs_bandwidth.period); 8681 if (cfs_quota_us < 0) 8682 quota = RUNTIME_INF; 8683 else if ((u64)cfs_quota_us <= U64_MAX / NSEC_PER_USEC) 8684 quota = (u64)cfs_quota_us * NSEC_PER_USEC; 8685 else 8686 return -EINVAL; 8687 8688 return tg_set_cfs_bandwidth(tg, period, quota); 8689 } 8690 8691 static long tg_get_cfs_quota(struct task_group *tg) 8692 { 8693 u64 quota_us; 8694 8695 if (tg->cfs_bandwidth.quota == RUNTIME_INF) 8696 return -1; 8697 8698 quota_us = tg->cfs_bandwidth.quota; 8699 do_div(quota_us, NSEC_PER_USEC); 8700 8701 return quota_us; 8702 } 8703 8704 static int tg_set_cfs_period(struct task_group *tg, long cfs_period_us) 8705 { 8706 u64 quota, period; 8707 8708 if ((u64)cfs_period_us > U64_MAX / NSEC_PER_USEC) 8709 return -EINVAL; 8710 8711 period = (u64)cfs_period_us * NSEC_PER_USEC; 8712 quota = tg->cfs_bandwidth.quota; 8713 8714 return tg_set_cfs_bandwidth(tg, period, quota); 8715 } 8716 8717 static long tg_get_cfs_period(struct task_group *tg) 8718 { 8719 u64 cfs_period_us; 8720 8721 cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period); 8722 do_div(cfs_period_us, NSEC_PER_USEC); 8723 8724 return cfs_period_us; 8725 } 8726 8727 static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css, 8728 struct cftype *cft) 8729 { 8730 return tg_get_cfs_quota(css_tg(css)); 8731 } 8732 8733 static int cpu_cfs_quota_write_s64(struct cgroup_subsys_state *css, 8734 struct cftype *cftype, s64 cfs_quota_us) 8735 { 8736 return tg_set_cfs_quota(css_tg(css), cfs_quota_us); 8737 } 8738 8739 static u64 cpu_cfs_period_read_u64(struct cgroup_subsys_state *css, 8740 struct cftype *cft) 8741 { 8742 return tg_get_cfs_period(css_tg(css)); 8743 } 8744 8745 static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css, 8746 struct cftype *cftype, u64 cfs_period_us) 8747 { 8748 return tg_set_cfs_period(css_tg(css), cfs_period_us); 8749 } 8750 8751 struct cfs_schedulable_data { 8752 struct task_group *tg; 8753 u64 period, quota; 8754 }; 8755 8756 /* 8757 * normalize group quota/period to be quota/max_period 8758 * note: units are usecs 8759 */ 8760 static u64 normalize_cfs_quota(struct task_group *tg, 8761 struct cfs_schedulable_data *d) 8762 { 8763 u64 quota, period; 8764 8765 if (tg == d->tg) { 8766 period = d->period; 8767 quota = d->quota; 8768 } else { 8769 period = tg_get_cfs_period(tg); 8770 quota = tg_get_cfs_quota(tg); 8771 } 8772 8773 /* note: these should typically be equivalent */ 8774 if (quota == RUNTIME_INF || quota == -1) 8775 return RUNTIME_INF; 8776 8777 return to_ratio(period, quota); 8778 } 8779 8780 static int tg_cfs_schedulable_down(struct task_group *tg, void *data) 8781 { 8782 struct cfs_schedulable_data *d = data; 8783 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 8784 s64 quota = 0, parent_quota = -1; 8785 8786 if (!tg->parent) { 8787 quota = RUNTIME_INF; 8788 } else { 8789 struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth; 8790 8791 quota = normalize_cfs_quota(tg, d); 8792 parent_quota = parent_b->hierarchical_quota; 8793 8794 /* 8795 * Ensure max(child_quota) <= parent_quota. On cgroup2, 8796 * always take the min. On cgroup1, only inherit when no 8797 * limit is set: 8798 */ 8799 if (cgroup_subsys_on_dfl(cpu_cgrp_subsys)) { 8800 quota = min(quota, parent_quota); 8801 } else { 8802 if (quota == RUNTIME_INF) 8803 quota = parent_quota; 8804 else if (parent_quota != RUNTIME_INF && quota > parent_quota) 8805 return -EINVAL; 8806 } 8807 } 8808 cfs_b->hierarchical_quota = quota; 8809 8810 return 0; 8811 } 8812 8813 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota) 8814 { 8815 int ret; 8816 struct cfs_schedulable_data data = { 8817 .tg = tg, 8818 .period = period, 8819 .quota = quota, 8820 }; 8821 8822 if (quota != RUNTIME_INF) { 8823 do_div(data.period, NSEC_PER_USEC); 8824 do_div(data.quota, NSEC_PER_USEC); 8825 } 8826 8827 rcu_read_lock(); 8828 ret = walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data); 8829 rcu_read_unlock(); 8830 8831 return ret; 8832 } 8833 8834 static int cpu_cfs_stat_show(struct seq_file *sf, void *v) 8835 { 8836 struct task_group *tg = css_tg(seq_css(sf)); 8837 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 8838 8839 seq_printf(sf, "nr_periods %d\n", cfs_b->nr_periods); 8840 seq_printf(sf, "nr_throttled %d\n", cfs_b->nr_throttled); 8841 seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time); 8842 8843 if (schedstat_enabled() && tg != &root_task_group) { 8844 u64 ws = 0; 8845 int i; 8846 8847 for_each_possible_cpu(i) 8848 ws += schedstat_val(tg->se[i]->statistics.wait_sum); 8849 8850 seq_printf(sf, "wait_sum %llu\n", ws); 8851 } 8852 8853 return 0; 8854 } 8855 #endif /* CONFIG_CFS_BANDWIDTH */ 8856 #endif /* CONFIG_FAIR_GROUP_SCHED */ 8857 8858 #ifdef CONFIG_RT_GROUP_SCHED 8859 static int cpu_rt_runtime_write(struct cgroup_subsys_state *css, 8860 struct cftype *cft, s64 val) 8861 { 8862 return sched_group_set_rt_runtime(css_tg(css), val); 8863 } 8864 8865 static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css, 8866 struct cftype *cft) 8867 { 8868 return sched_group_rt_runtime(css_tg(css)); 8869 } 8870 8871 static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css, 8872 struct cftype *cftype, u64 rt_period_us) 8873 { 8874 return sched_group_set_rt_period(css_tg(css), rt_period_us); 8875 } 8876 8877 static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css, 8878 struct cftype *cft) 8879 { 8880 return sched_group_rt_period(css_tg(css)); 8881 } 8882 #endif /* CONFIG_RT_GROUP_SCHED */ 8883 8884 static struct cftype cpu_legacy_files[] = { 8885 #ifdef CONFIG_FAIR_GROUP_SCHED 8886 { 8887 .name = "shares", 8888 .read_u64 = cpu_shares_read_u64, 8889 .write_u64 = cpu_shares_write_u64, 8890 }, 8891 #endif 8892 #ifdef CONFIG_CFS_BANDWIDTH 8893 { 8894 .name = "cfs_quota_us", 8895 .read_s64 = cpu_cfs_quota_read_s64, 8896 .write_s64 = cpu_cfs_quota_write_s64, 8897 }, 8898 { 8899 .name = "cfs_period_us", 8900 .read_u64 = cpu_cfs_period_read_u64, 8901 .write_u64 = cpu_cfs_period_write_u64, 8902 }, 8903 { 8904 .name = "stat", 8905 .seq_show = cpu_cfs_stat_show, 8906 }, 8907 #endif 8908 #ifdef CONFIG_RT_GROUP_SCHED 8909 { 8910 .name = "rt_runtime_us", 8911 .read_s64 = cpu_rt_runtime_read, 8912 .write_s64 = cpu_rt_runtime_write, 8913 }, 8914 { 8915 .name = "rt_period_us", 8916 .read_u64 = cpu_rt_period_read_uint, 8917 .write_u64 = cpu_rt_period_write_uint, 8918 }, 8919 #endif 8920 #ifdef CONFIG_UCLAMP_TASK_GROUP 8921 { 8922 .name = "uclamp.min", 8923 .flags = CFTYPE_NOT_ON_ROOT, 8924 .seq_show = cpu_uclamp_min_show, 8925 .write = cpu_uclamp_min_write, 8926 }, 8927 { 8928 .name = "uclamp.max", 8929 .flags = CFTYPE_NOT_ON_ROOT, 8930 .seq_show = cpu_uclamp_max_show, 8931 .write = cpu_uclamp_max_write, 8932 }, 8933 #endif 8934 { } /* Terminate */ 8935 }; 8936 8937 static int cpu_extra_stat_show(struct seq_file *sf, 8938 struct cgroup_subsys_state *css) 8939 { 8940 #ifdef CONFIG_CFS_BANDWIDTH 8941 { 8942 struct task_group *tg = css_tg(css); 8943 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 8944 u64 throttled_usec; 8945 8946 throttled_usec = cfs_b->throttled_time; 8947 do_div(throttled_usec, NSEC_PER_USEC); 8948 8949 seq_printf(sf, "nr_periods %d\n" 8950 "nr_throttled %d\n" 8951 "throttled_usec %llu\n", 8952 cfs_b->nr_periods, cfs_b->nr_throttled, 8953 throttled_usec); 8954 } 8955 #endif 8956 return 0; 8957 } 8958 8959 #ifdef CONFIG_FAIR_GROUP_SCHED 8960 static u64 cpu_weight_read_u64(struct cgroup_subsys_state *css, 8961 struct cftype *cft) 8962 { 8963 struct task_group *tg = css_tg(css); 8964 u64 weight = scale_load_down(tg->shares); 8965 8966 return DIV_ROUND_CLOSEST_ULL(weight * CGROUP_WEIGHT_DFL, 1024); 8967 } 8968 8969 static int cpu_weight_write_u64(struct cgroup_subsys_state *css, 8970 struct cftype *cft, u64 weight) 8971 { 8972 /* 8973 * cgroup weight knobs should use the common MIN, DFL and MAX 8974 * values which are 1, 100 and 10000 respectively. While it loses 8975 * a bit of range on both ends, it maps pretty well onto the shares 8976 * value used by scheduler and the round-trip conversions preserve 8977 * the original value over the entire range. 8978 */ 8979 if (weight < CGROUP_WEIGHT_MIN || weight > CGROUP_WEIGHT_MAX) 8980 return -ERANGE; 8981 8982 weight = DIV_ROUND_CLOSEST_ULL(weight * 1024, CGROUP_WEIGHT_DFL); 8983 8984 return sched_group_set_shares(css_tg(css), scale_load(weight)); 8985 } 8986 8987 static s64 cpu_weight_nice_read_s64(struct cgroup_subsys_state *css, 8988 struct cftype *cft) 8989 { 8990 unsigned long weight = scale_load_down(css_tg(css)->shares); 8991 int last_delta = INT_MAX; 8992 int prio, delta; 8993 8994 /* find the closest nice value to the current weight */ 8995 for (prio = 0; prio < ARRAY_SIZE(sched_prio_to_weight); prio++) { 8996 delta = abs(sched_prio_to_weight[prio] - weight); 8997 if (delta >= last_delta) 8998 break; 8999 last_delta = delta; 9000 } 9001 9002 return PRIO_TO_NICE(prio - 1 + MAX_RT_PRIO); 9003 } 9004 9005 static int cpu_weight_nice_write_s64(struct cgroup_subsys_state *css, 9006 struct cftype *cft, s64 nice) 9007 { 9008 unsigned long weight; 9009 int idx; 9010 9011 if (nice < MIN_NICE || nice > MAX_NICE) 9012 return -ERANGE; 9013 9014 idx = NICE_TO_PRIO(nice) - MAX_RT_PRIO; 9015 idx = array_index_nospec(idx, 40); 9016 weight = sched_prio_to_weight[idx]; 9017 9018 return sched_group_set_shares(css_tg(css), scale_load(weight)); 9019 } 9020 #endif 9021 9022 static void __maybe_unused cpu_period_quota_print(struct seq_file *sf, 9023 long period, long quota) 9024 { 9025 if (quota < 0) 9026 seq_puts(sf, "max"); 9027 else 9028 seq_printf(sf, "%ld", quota); 9029 9030 seq_printf(sf, " %ld\n", period); 9031 } 9032 9033 /* caller should put the current value in *@periodp before calling */ 9034 static int __maybe_unused cpu_period_quota_parse(char *buf, 9035 u64 *periodp, u64 *quotap) 9036 { 9037 char tok[21]; /* U64_MAX */ 9038 9039 if (sscanf(buf, "%20s %llu", tok, periodp) < 1) 9040 return -EINVAL; 9041 9042 *periodp *= NSEC_PER_USEC; 9043 9044 if (sscanf(tok, "%llu", quotap)) 9045 *quotap *= NSEC_PER_USEC; 9046 else if (!strcmp(tok, "max")) 9047 *quotap = RUNTIME_INF; 9048 else 9049 return -EINVAL; 9050 9051 return 0; 9052 } 9053 9054 #ifdef CONFIG_CFS_BANDWIDTH 9055 static int cpu_max_show(struct seq_file *sf, void *v) 9056 { 9057 struct task_group *tg = css_tg(seq_css(sf)); 9058 9059 cpu_period_quota_print(sf, tg_get_cfs_period(tg), tg_get_cfs_quota(tg)); 9060 return 0; 9061 } 9062 9063 static ssize_t cpu_max_write(struct kernfs_open_file *of, 9064 char *buf, size_t nbytes, loff_t off) 9065 { 9066 struct task_group *tg = css_tg(of_css(of)); 9067 u64 period = tg_get_cfs_period(tg); 9068 u64 quota; 9069 int ret; 9070 9071 ret = cpu_period_quota_parse(buf, &period, "a); 9072 if (!ret) 9073 ret = tg_set_cfs_bandwidth(tg, period, quota); 9074 return ret ?: nbytes; 9075 } 9076 #endif 9077 9078 static struct cftype cpu_files[] = { 9079 #ifdef CONFIG_FAIR_GROUP_SCHED 9080 { 9081 .name = "weight", 9082 .flags = CFTYPE_NOT_ON_ROOT, 9083 .read_u64 = cpu_weight_read_u64, 9084 .write_u64 = cpu_weight_write_u64, 9085 }, 9086 { 9087 .name = "weight.nice", 9088 .flags = CFTYPE_NOT_ON_ROOT, 9089 .read_s64 = cpu_weight_nice_read_s64, 9090 .write_s64 = cpu_weight_nice_write_s64, 9091 }, 9092 #endif 9093 #ifdef CONFIG_CFS_BANDWIDTH 9094 { 9095 .name = "max", 9096 .flags = CFTYPE_NOT_ON_ROOT, 9097 .seq_show = cpu_max_show, 9098 .write = cpu_max_write, 9099 }, 9100 #endif 9101 #ifdef CONFIG_UCLAMP_TASK_GROUP 9102 { 9103 .name = "uclamp.min", 9104 .flags = CFTYPE_NOT_ON_ROOT, 9105 .seq_show = cpu_uclamp_min_show, 9106 .write = cpu_uclamp_min_write, 9107 }, 9108 { 9109 .name = "uclamp.max", 9110 .flags = CFTYPE_NOT_ON_ROOT, 9111 .seq_show = cpu_uclamp_max_show, 9112 .write = cpu_uclamp_max_write, 9113 }, 9114 #endif 9115 { } /* terminate */ 9116 }; 9117 9118 struct cgroup_subsys cpu_cgrp_subsys = { 9119 .css_alloc = cpu_cgroup_css_alloc, 9120 .css_online = cpu_cgroup_css_online, 9121 .css_released = cpu_cgroup_css_released, 9122 .css_free = cpu_cgroup_css_free, 9123 .css_extra_stat_show = cpu_extra_stat_show, 9124 .fork = cpu_cgroup_fork, 9125 .can_attach = cpu_cgroup_can_attach, 9126 .attach = cpu_cgroup_attach, 9127 .legacy_cftypes = cpu_legacy_files, 9128 .dfl_cftypes = cpu_files, 9129 .early_init = true, 9130 .threaded = true, 9131 }; 9132 9133 #endif /* CONFIG_CGROUP_SCHED */ 9134 9135 void dump_cpu_task(int cpu) 9136 { 9137 pr_info("Task dump for CPU %d:\n", cpu); 9138 sched_show_task(cpu_curr(cpu)); 9139 } 9140 9141 /* 9142 * Nice levels are multiplicative, with a gentle 10% change for every 9143 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to 9144 * nice 1, it will get ~10% less CPU time than another CPU-bound task 9145 * that remained on nice 0. 9146 * 9147 * The "10% effect" is relative and cumulative: from _any_ nice level, 9148 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level 9149 * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25. 9150 * If a task goes up by ~10% and another task goes down by ~10% then 9151 * the relative distance between them is ~25%.) 9152 */ 9153 const int sched_prio_to_weight[40] = { 9154 /* -20 */ 88761, 71755, 56483, 46273, 36291, 9155 /* -15 */ 29154, 23254, 18705, 14949, 11916, 9156 /* -10 */ 9548, 7620, 6100, 4904, 3906, 9157 /* -5 */ 3121, 2501, 1991, 1586, 1277, 9158 /* 0 */ 1024, 820, 655, 526, 423, 9159 /* 5 */ 335, 272, 215, 172, 137, 9160 /* 10 */ 110, 87, 70, 56, 45, 9161 /* 15 */ 36, 29, 23, 18, 15, 9162 }; 9163 9164 /* 9165 * Inverse (2^32/x) values of the sched_prio_to_weight[] array, precalculated. 9166 * 9167 * In cases where the weight does not change often, we can use the 9168 * precalculated inverse to speed up arithmetics by turning divisions 9169 * into multiplications: 9170 */ 9171 const u32 sched_prio_to_wmult[40] = { 9172 /* -20 */ 48388, 59856, 76040, 92818, 118348, 9173 /* -15 */ 147320, 184698, 229616, 287308, 360437, 9174 /* -10 */ 449829, 563644, 704093, 875809, 1099582, 9175 /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326, 9176 /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587, 9177 /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126, 9178 /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717, 9179 /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153, 9180 }; 9181 9182 void call_trace_sched_update_nr_running(struct rq *rq, int count) 9183 { 9184 trace_sched_update_nr_running_tp(rq, count); 9185 } 9186