1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * kernel/sched/core.c 4 * 5 * Core kernel scheduler code and related syscalls 6 * 7 * Copyright (C) 1991-2002 Linus Torvalds 8 */ 9 #include "sched.h" 10 11 #include <linux/nospec.h> 12 13 #include <linux/kcov.h> 14 15 #include <asm/switch_to.h> 16 #include <asm/tlb.h> 17 18 #include "../workqueue_internal.h" 19 #include "../../fs/io-wq.h" 20 #include "../smpboot.h" 21 22 #include "pelt.h" 23 24 #define CREATE_TRACE_POINTS 25 #include <trace/events/sched.h> 26 27 /* 28 * Export tracepoints that act as a bare tracehook (ie: have no trace event 29 * associated with them) to allow external modules to probe them. 30 */ 31 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_cfs_tp); 32 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_rt_tp); 33 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_dl_tp); 34 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp); 35 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_se_tp); 36 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_overutilized_tp); 37 38 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); 39 40 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_JUMP_LABEL) 41 /* 42 * Debugging: various feature bits 43 * 44 * If SCHED_DEBUG is disabled, each compilation unit has its own copy of 45 * sysctl_sched_features, defined in sched.h, to allow constants propagation 46 * at compile time and compiler optimization based on features default. 47 */ 48 #define SCHED_FEAT(name, enabled) \ 49 (1UL << __SCHED_FEAT_##name) * enabled | 50 const_debug unsigned int sysctl_sched_features = 51 #include "features.h" 52 0; 53 #undef SCHED_FEAT 54 #endif 55 56 /* 57 * Number of tasks to iterate in a single balance run. 58 * Limited because this is done with IRQs disabled. 59 */ 60 const_debug unsigned int sysctl_sched_nr_migrate = 32; 61 62 /* 63 * period over which we measure -rt task CPU usage in us. 64 * default: 1s 65 */ 66 unsigned int sysctl_sched_rt_period = 1000000; 67 68 __read_mostly int scheduler_running; 69 70 /* 71 * part of the period that we allow rt tasks to run in us. 72 * default: 0.95s 73 */ 74 int sysctl_sched_rt_runtime = 950000; 75 76 /* 77 * __task_rq_lock - lock the rq @p resides on. 78 */ 79 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) 80 __acquires(rq->lock) 81 { 82 struct rq *rq; 83 84 lockdep_assert_held(&p->pi_lock); 85 86 for (;;) { 87 rq = task_rq(p); 88 raw_spin_lock(&rq->lock); 89 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { 90 rq_pin_lock(rq, rf); 91 return rq; 92 } 93 raw_spin_unlock(&rq->lock); 94 95 while (unlikely(task_on_rq_migrating(p))) 96 cpu_relax(); 97 } 98 } 99 100 /* 101 * task_rq_lock - lock p->pi_lock and lock the rq @p resides on. 102 */ 103 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) 104 __acquires(p->pi_lock) 105 __acquires(rq->lock) 106 { 107 struct rq *rq; 108 109 for (;;) { 110 raw_spin_lock_irqsave(&p->pi_lock, rf->flags); 111 rq = task_rq(p); 112 raw_spin_lock(&rq->lock); 113 /* 114 * move_queued_task() task_rq_lock() 115 * 116 * ACQUIRE (rq->lock) 117 * [S] ->on_rq = MIGRATING [L] rq = task_rq() 118 * WMB (__set_task_cpu()) ACQUIRE (rq->lock); 119 * [S] ->cpu = new_cpu [L] task_rq() 120 * [L] ->on_rq 121 * RELEASE (rq->lock) 122 * 123 * If we observe the old CPU in task_rq_lock(), the acquire of 124 * the old rq->lock will fully serialize against the stores. 125 * 126 * If we observe the new CPU in task_rq_lock(), the address 127 * dependency headed by '[L] rq = task_rq()' and the acquire 128 * will pair with the WMB to ensure we then also see migrating. 129 */ 130 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { 131 rq_pin_lock(rq, rf); 132 return rq; 133 } 134 raw_spin_unlock(&rq->lock); 135 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); 136 137 while (unlikely(task_on_rq_migrating(p))) 138 cpu_relax(); 139 } 140 } 141 142 /* 143 * RQ-clock updating methods: 144 */ 145 146 static void update_rq_clock_task(struct rq *rq, s64 delta) 147 { 148 /* 149 * In theory, the compile should just see 0 here, and optimize out the call 150 * to sched_rt_avg_update. But I don't trust it... 151 */ 152 s64 __maybe_unused steal = 0, irq_delta = 0; 153 154 #ifdef CONFIG_IRQ_TIME_ACCOUNTING 155 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time; 156 157 /* 158 * Since irq_time is only updated on {soft,}irq_exit, we might run into 159 * this case when a previous update_rq_clock() happened inside a 160 * {soft,}irq region. 161 * 162 * When this happens, we stop ->clock_task and only update the 163 * prev_irq_time stamp to account for the part that fit, so that a next 164 * update will consume the rest. This ensures ->clock_task is 165 * monotonic. 166 * 167 * It does however cause some slight miss-attribution of {soft,}irq 168 * time, a more accurate solution would be to update the irq_time using 169 * the current rq->clock timestamp, except that would require using 170 * atomic ops. 171 */ 172 if (irq_delta > delta) 173 irq_delta = delta; 174 175 rq->prev_irq_time += irq_delta; 176 delta -= irq_delta; 177 #endif 178 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING 179 if (static_key_false((¶virt_steal_rq_enabled))) { 180 steal = paravirt_steal_clock(cpu_of(rq)); 181 steal -= rq->prev_steal_time_rq; 182 183 if (unlikely(steal > delta)) 184 steal = delta; 185 186 rq->prev_steal_time_rq += steal; 187 delta -= steal; 188 } 189 #endif 190 191 rq->clock_task += delta; 192 193 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ 194 if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY)) 195 update_irq_load_avg(rq, irq_delta + steal); 196 #endif 197 update_rq_clock_pelt(rq, delta); 198 } 199 200 void update_rq_clock(struct rq *rq) 201 { 202 s64 delta; 203 204 lockdep_assert_held(&rq->lock); 205 206 if (rq->clock_update_flags & RQCF_ACT_SKIP) 207 return; 208 209 #ifdef CONFIG_SCHED_DEBUG 210 if (sched_feat(WARN_DOUBLE_CLOCK)) 211 SCHED_WARN_ON(rq->clock_update_flags & RQCF_UPDATED); 212 rq->clock_update_flags |= RQCF_UPDATED; 213 #endif 214 215 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock; 216 if (delta < 0) 217 return; 218 rq->clock += delta; 219 update_rq_clock_task(rq, delta); 220 } 221 222 223 #ifdef CONFIG_SCHED_HRTICK 224 /* 225 * Use HR-timers to deliver accurate preemption points. 226 */ 227 228 static void hrtick_clear(struct rq *rq) 229 { 230 if (hrtimer_active(&rq->hrtick_timer)) 231 hrtimer_cancel(&rq->hrtick_timer); 232 } 233 234 /* 235 * High-resolution timer tick. 236 * Runs from hardirq context with interrupts disabled. 237 */ 238 static enum hrtimer_restart hrtick(struct hrtimer *timer) 239 { 240 struct rq *rq = container_of(timer, struct rq, hrtick_timer); 241 struct rq_flags rf; 242 243 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); 244 245 rq_lock(rq, &rf); 246 update_rq_clock(rq); 247 rq->curr->sched_class->task_tick(rq, rq->curr, 1); 248 rq_unlock(rq, &rf); 249 250 return HRTIMER_NORESTART; 251 } 252 253 #ifdef CONFIG_SMP 254 255 static void __hrtick_restart(struct rq *rq) 256 { 257 struct hrtimer *timer = &rq->hrtick_timer; 258 259 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED_HARD); 260 } 261 262 /* 263 * called from hardirq (IPI) context 264 */ 265 static void __hrtick_start(void *arg) 266 { 267 struct rq *rq = arg; 268 struct rq_flags rf; 269 270 rq_lock(rq, &rf); 271 __hrtick_restart(rq); 272 rq->hrtick_csd_pending = 0; 273 rq_unlock(rq, &rf); 274 } 275 276 /* 277 * Called to set the hrtick timer state. 278 * 279 * called with rq->lock held and irqs disabled 280 */ 281 void hrtick_start(struct rq *rq, u64 delay) 282 { 283 struct hrtimer *timer = &rq->hrtick_timer; 284 ktime_t time; 285 s64 delta; 286 287 /* 288 * Don't schedule slices shorter than 10000ns, that just 289 * doesn't make sense and can cause timer DoS. 290 */ 291 delta = max_t(s64, delay, 10000LL); 292 time = ktime_add_ns(timer->base->get_time(), delta); 293 294 hrtimer_set_expires(timer, time); 295 296 if (rq == this_rq()) { 297 __hrtick_restart(rq); 298 } else if (!rq->hrtick_csd_pending) { 299 smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd); 300 rq->hrtick_csd_pending = 1; 301 } 302 } 303 304 #else 305 /* 306 * Called to set the hrtick timer state. 307 * 308 * called with rq->lock held and irqs disabled 309 */ 310 void hrtick_start(struct rq *rq, u64 delay) 311 { 312 /* 313 * Don't schedule slices shorter than 10000ns, that just 314 * doesn't make sense. Rely on vruntime for fairness. 315 */ 316 delay = max_t(u64, delay, 10000LL); 317 hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay), 318 HRTIMER_MODE_REL_PINNED_HARD); 319 } 320 #endif /* CONFIG_SMP */ 321 322 static void hrtick_rq_init(struct rq *rq) 323 { 324 #ifdef CONFIG_SMP 325 rq->hrtick_csd_pending = 0; 326 327 rq->hrtick_csd.flags = 0; 328 rq->hrtick_csd.func = __hrtick_start; 329 rq->hrtick_csd.info = rq; 330 #endif 331 332 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); 333 rq->hrtick_timer.function = hrtick; 334 } 335 #else /* CONFIG_SCHED_HRTICK */ 336 static inline void hrtick_clear(struct rq *rq) 337 { 338 } 339 340 static inline void hrtick_rq_init(struct rq *rq) 341 { 342 } 343 #endif /* CONFIG_SCHED_HRTICK */ 344 345 /* 346 * cmpxchg based fetch_or, macro so it works for different integer types 347 */ 348 #define fetch_or(ptr, mask) \ 349 ({ \ 350 typeof(ptr) _ptr = (ptr); \ 351 typeof(mask) _mask = (mask); \ 352 typeof(*_ptr) _old, _val = *_ptr; \ 353 \ 354 for (;;) { \ 355 _old = cmpxchg(_ptr, _val, _val | _mask); \ 356 if (_old == _val) \ 357 break; \ 358 _val = _old; \ 359 } \ 360 _old; \ 361 }) 362 363 #if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG) 364 /* 365 * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG, 366 * this avoids any races wrt polling state changes and thereby avoids 367 * spurious IPIs. 368 */ 369 static bool set_nr_and_not_polling(struct task_struct *p) 370 { 371 struct thread_info *ti = task_thread_info(p); 372 return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG); 373 } 374 375 /* 376 * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set. 377 * 378 * If this returns true, then the idle task promises to call 379 * sched_ttwu_pending() and reschedule soon. 380 */ 381 static bool set_nr_if_polling(struct task_struct *p) 382 { 383 struct thread_info *ti = task_thread_info(p); 384 typeof(ti->flags) old, val = READ_ONCE(ti->flags); 385 386 for (;;) { 387 if (!(val & _TIF_POLLING_NRFLAG)) 388 return false; 389 if (val & _TIF_NEED_RESCHED) 390 return true; 391 old = cmpxchg(&ti->flags, val, val | _TIF_NEED_RESCHED); 392 if (old == val) 393 break; 394 val = old; 395 } 396 return true; 397 } 398 399 #else 400 static bool set_nr_and_not_polling(struct task_struct *p) 401 { 402 set_tsk_need_resched(p); 403 return true; 404 } 405 406 #ifdef CONFIG_SMP 407 static bool set_nr_if_polling(struct task_struct *p) 408 { 409 return false; 410 } 411 #endif 412 #endif 413 414 static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task) 415 { 416 struct wake_q_node *node = &task->wake_q; 417 418 /* 419 * Atomically grab the task, if ->wake_q is !nil already it means 420 * its already queued (either by us or someone else) and will get the 421 * wakeup due to that. 422 * 423 * In order to ensure that a pending wakeup will observe our pending 424 * state, even in the failed case, an explicit smp_mb() must be used. 425 */ 426 smp_mb__before_atomic(); 427 if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL))) 428 return false; 429 430 /* 431 * The head is context local, there can be no concurrency. 432 */ 433 *head->lastp = node; 434 head->lastp = &node->next; 435 return true; 436 } 437 438 /** 439 * wake_q_add() - queue a wakeup for 'later' waking. 440 * @head: the wake_q_head to add @task to 441 * @task: the task to queue for 'later' wakeup 442 * 443 * Queue a task for later wakeup, most likely by the wake_up_q() call in the 444 * same context, _HOWEVER_ this is not guaranteed, the wakeup can come 445 * instantly. 446 * 447 * This function must be used as-if it were wake_up_process(); IOW the task 448 * must be ready to be woken at this location. 449 */ 450 void wake_q_add(struct wake_q_head *head, struct task_struct *task) 451 { 452 if (__wake_q_add(head, task)) 453 get_task_struct(task); 454 } 455 456 /** 457 * wake_q_add_safe() - safely queue a wakeup for 'later' waking. 458 * @head: the wake_q_head to add @task to 459 * @task: the task to queue for 'later' wakeup 460 * 461 * Queue a task for later wakeup, most likely by the wake_up_q() call in the 462 * same context, _HOWEVER_ this is not guaranteed, the wakeup can come 463 * instantly. 464 * 465 * This function must be used as-if it were wake_up_process(); IOW the task 466 * must be ready to be woken at this location. 467 * 468 * This function is essentially a task-safe equivalent to wake_q_add(). Callers 469 * that already hold reference to @task can call the 'safe' version and trust 470 * wake_q to do the right thing depending whether or not the @task is already 471 * queued for wakeup. 472 */ 473 void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task) 474 { 475 if (!__wake_q_add(head, task)) 476 put_task_struct(task); 477 } 478 479 void wake_up_q(struct wake_q_head *head) 480 { 481 struct wake_q_node *node = head->first; 482 483 while (node != WAKE_Q_TAIL) { 484 struct task_struct *task; 485 486 task = container_of(node, struct task_struct, wake_q); 487 BUG_ON(!task); 488 /* Task can safely be re-inserted now: */ 489 node = node->next; 490 task->wake_q.next = NULL; 491 492 /* 493 * wake_up_process() executes a full barrier, which pairs with 494 * the queueing in wake_q_add() so as not to miss wakeups. 495 */ 496 wake_up_process(task); 497 put_task_struct(task); 498 } 499 } 500 501 /* 502 * resched_curr - mark rq's current task 'to be rescheduled now'. 503 * 504 * On UP this means the setting of the need_resched flag, on SMP it 505 * might also involve a cross-CPU call to trigger the scheduler on 506 * the target CPU. 507 */ 508 void resched_curr(struct rq *rq) 509 { 510 struct task_struct *curr = rq->curr; 511 int cpu; 512 513 lockdep_assert_held(&rq->lock); 514 515 if (test_tsk_need_resched(curr)) 516 return; 517 518 cpu = cpu_of(rq); 519 520 if (cpu == smp_processor_id()) { 521 set_tsk_need_resched(curr); 522 set_preempt_need_resched(); 523 return; 524 } 525 526 if (set_nr_and_not_polling(curr)) 527 smp_send_reschedule(cpu); 528 else 529 trace_sched_wake_idle_without_ipi(cpu); 530 } 531 532 void resched_cpu(int cpu) 533 { 534 struct rq *rq = cpu_rq(cpu); 535 unsigned long flags; 536 537 raw_spin_lock_irqsave(&rq->lock, flags); 538 if (cpu_online(cpu) || cpu == smp_processor_id()) 539 resched_curr(rq); 540 raw_spin_unlock_irqrestore(&rq->lock, flags); 541 } 542 543 #ifdef CONFIG_SMP 544 #ifdef CONFIG_NO_HZ_COMMON 545 /* 546 * In the semi idle case, use the nearest busy CPU for migrating timers 547 * from an idle CPU. This is good for power-savings. 548 * 549 * We don't do similar optimization for completely idle system, as 550 * selecting an idle CPU will add more delays to the timers than intended 551 * (as that CPU's timer base may not be uptodate wrt jiffies etc). 552 */ 553 int get_nohz_timer_target(void) 554 { 555 int i, cpu = smp_processor_id(), default_cpu = -1; 556 struct sched_domain *sd; 557 558 if (housekeeping_cpu(cpu, HK_FLAG_TIMER)) { 559 if (!idle_cpu(cpu)) 560 return cpu; 561 default_cpu = cpu; 562 } 563 564 rcu_read_lock(); 565 for_each_domain(cpu, sd) { 566 for_each_cpu_and(i, sched_domain_span(sd), 567 housekeeping_cpumask(HK_FLAG_TIMER)) { 568 if (cpu == i) 569 continue; 570 571 if (!idle_cpu(i)) { 572 cpu = i; 573 goto unlock; 574 } 575 } 576 } 577 578 if (default_cpu == -1) 579 default_cpu = housekeeping_any_cpu(HK_FLAG_TIMER); 580 cpu = default_cpu; 581 unlock: 582 rcu_read_unlock(); 583 return cpu; 584 } 585 586 /* 587 * When add_timer_on() enqueues a timer into the timer wheel of an 588 * idle CPU then this timer might expire before the next timer event 589 * which is scheduled to wake up that CPU. In case of a completely 590 * idle system the next event might even be infinite time into the 591 * future. wake_up_idle_cpu() ensures that the CPU is woken up and 592 * leaves the inner idle loop so the newly added timer is taken into 593 * account when the CPU goes back to idle and evaluates the timer 594 * wheel for the next timer event. 595 */ 596 static void wake_up_idle_cpu(int cpu) 597 { 598 struct rq *rq = cpu_rq(cpu); 599 600 if (cpu == smp_processor_id()) 601 return; 602 603 if (set_nr_and_not_polling(rq->idle)) 604 smp_send_reschedule(cpu); 605 else 606 trace_sched_wake_idle_without_ipi(cpu); 607 } 608 609 static bool wake_up_full_nohz_cpu(int cpu) 610 { 611 /* 612 * We just need the target to call irq_exit() and re-evaluate 613 * the next tick. The nohz full kick at least implies that. 614 * If needed we can still optimize that later with an 615 * empty IRQ. 616 */ 617 if (cpu_is_offline(cpu)) 618 return true; /* Don't try to wake offline CPUs. */ 619 if (tick_nohz_full_cpu(cpu)) { 620 if (cpu != smp_processor_id() || 621 tick_nohz_tick_stopped()) 622 tick_nohz_full_kick_cpu(cpu); 623 return true; 624 } 625 626 return false; 627 } 628 629 /* 630 * Wake up the specified CPU. If the CPU is going offline, it is the 631 * caller's responsibility to deal with the lost wakeup, for example, 632 * by hooking into the CPU_DEAD notifier like timers and hrtimers do. 633 */ 634 void wake_up_nohz_cpu(int cpu) 635 { 636 if (!wake_up_full_nohz_cpu(cpu)) 637 wake_up_idle_cpu(cpu); 638 } 639 640 static inline bool got_nohz_idle_kick(void) 641 { 642 int cpu = smp_processor_id(); 643 644 if (!(atomic_read(nohz_flags(cpu)) & NOHZ_KICK_MASK)) 645 return false; 646 647 if (idle_cpu(cpu) && !need_resched()) 648 return true; 649 650 /* 651 * We can't run Idle Load Balance on this CPU for this time so we 652 * cancel it and clear NOHZ_BALANCE_KICK 653 */ 654 atomic_andnot(NOHZ_KICK_MASK, nohz_flags(cpu)); 655 return false; 656 } 657 658 #else /* CONFIG_NO_HZ_COMMON */ 659 660 static inline bool got_nohz_idle_kick(void) 661 { 662 return false; 663 } 664 665 #endif /* CONFIG_NO_HZ_COMMON */ 666 667 #ifdef CONFIG_NO_HZ_FULL 668 bool sched_can_stop_tick(struct rq *rq) 669 { 670 int fifo_nr_running; 671 672 /* Deadline tasks, even if single, need the tick */ 673 if (rq->dl.dl_nr_running) 674 return false; 675 676 /* 677 * If there are more than one RR tasks, we need the tick to effect the 678 * actual RR behaviour. 679 */ 680 if (rq->rt.rr_nr_running) { 681 if (rq->rt.rr_nr_running == 1) 682 return true; 683 else 684 return false; 685 } 686 687 /* 688 * If there's no RR tasks, but FIFO tasks, we can skip the tick, no 689 * forced preemption between FIFO tasks. 690 */ 691 fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running; 692 if (fifo_nr_running) 693 return true; 694 695 /* 696 * If there are no DL,RR/FIFO tasks, there must only be CFS tasks left; 697 * if there's more than one we need the tick for involuntary 698 * preemption. 699 */ 700 if (rq->nr_running > 1) 701 return false; 702 703 return true; 704 } 705 #endif /* CONFIG_NO_HZ_FULL */ 706 #endif /* CONFIG_SMP */ 707 708 #if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \ 709 (defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH))) 710 /* 711 * Iterate task_group tree rooted at *from, calling @down when first entering a 712 * node and @up when leaving it for the final time. 713 * 714 * Caller must hold rcu_lock or sufficient equivalent. 715 */ 716 int walk_tg_tree_from(struct task_group *from, 717 tg_visitor down, tg_visitor up, void *data) 718 { 719 struct task_group *parent, *child; 720 int ret; 721 722 parent = from; 723 724 down: 725 ret = (*down)(parent, data); 726 if (ret) 727 goto out; 728 list_for_each_entry_rcu(child, &parent->children, siblings) { 729 parent = child; 730 goto down; 731 732 up: 733 continue; 734 } 735 ret = (*up)(parent, data); 736 if (ret || parent == from) 737 goto out; 738 739 child = parent; 740 parent = parent->parent; 741 if (parent) 742 goto up; 743 out: 744 return ret; 745 } 746 747 int tg_nop(struct task_group *tg, void *data) 748 { 749 return 0; 750 } 751 #endif 752 753 static void set_load_weight(struct task_struct *p, bool update_load) 754 { 755 int prio = p->static_prio - MAX_RT_PRIO; 756 struct load_weight *load = &p->se.load; 757 758 /* 759 * SCHED_IDLE tasks get minimal weight: 760 */ 761 if (task_has_idle_policy(p)) { 762 load->weight = scale_load(WEIGHT_IDLEPRIO); 763 load->inv_weight = WMULT_IDLEPRIO; 764 return; 765 } 766 767 /* 768 * SCHED_OTHER tasks have to update their load when changing their 769 * weight 770 */ 771 if (update_load && p->sched_class == &fair_sched_class) { 772 reweight_task(p, prio); 773 } else { 774 load->weight = scale_load(sched_prio_to_weight[prio]); 775 load->inv_weight = sched_prio_to_wmult[prio]; 776 } 777 } 778 779 #ifdef CONFIG_UCLAMP_TASK 780 /* 781 * Serializes updates of utilization clamp values 782 * 783 * The (slow-path) user-space triggers utilization clamp value updates which 784 * can require updates on (fast-path) scheduler's data structures used to 785 * support enqueue/dequeue operations. 786 * While the per-CPU rq lock protects fast-path update operations, user-space 787 * requests are serialized using a mutex to reduce the risk of conflicting 788 * updates or API abuses. 789 */ 790 static DEFINE_MUTEX(uclamp_mutex); 791 792 /* Max allowed minimum utilization */ 793 unsigned int sysctl_sched_uclamp_util_min = SCHED_CAPACITY_SCALE; 794 795 /* Max allowed maximum utilization */ 796 unsigned int sysctl_sched_uclamp_util_max = SCHED_CAPACITY_SCALE; 797 798 /* All clamps are required to be less or equal than these values */ 799 static struct uclamp_se uclamp_default[UCLAMP_CNT]; 800 801 /* Integer rounded range for each bucket */ 802 #define UCLAMP_BUCKET_DELTA DIV_ROUND_CLOSEST(SCHED_CAPACITY_SCALE, UCLAMP_BUCKETS) 803 804 #define for_each_clamp_id(clamp_id) \ 805 for ((clamp_id) = 0; (clamp_id) < UCLAMP_CNT; (clamp_id)++) 806 807 static inline unsigned int uclamp_bucket_id(unsigned int clamp_value) 808 { 809 return clamp_value / UCLAMP_BUCKET_DELTA; 810 } 811 812 static inline unsigned int uclamp_bucket_base_value(unsigned int clamp_value) 813 { 814 return UCLAMP_BUCKET_DELTA * uclamp_bucket_id(clamp_value); 815 } 816 817 static inline unsigned int uclamp_none(enum uclamp_id clamp_id) 818 { 819 if (clamp_id == UCLAMP_MIN) 820 return 0; 821 return SCHED_CAPACITY_SCALE; 822 } 823 824 static inline void uclamp_se_set(struct uclamp_se *uc_se, 825 unsigned int value, bool user_defined) 826 { 827 uc_se->value = value; 828 uc_se->bucket_id = uclamp_bucket_id(value); 829 uc_se->user_defined = user_defined; 830 } 831 832 static inline unsigned int 833 uclamp_idle_value(struct rq *rq, enum uclamp_id clamp_id, 834 unsigned int clamp_value) 835 { 836 /* 837 * Avoid blocked utilization pushing up the frequency when we go 838 * idle (which drops the max-clamp) by retaining the last known 839 * max-clamp. 840 */ 841 if (clamp_id == UCLAMP_MAX) { 842 rq->uclamp_flags |= UCLAMP_FLAG_IDLE; 843 return clamp_value; 844 } 845 846 return uclamp_none(UCLAMP_MIN); 847 } 848 849 static inline void uclamp_idle_reset(struct rq *rq, enum uclamp_id clamp_id, 850 unsigned int clamp_value) 851 { 852 /* Reset max-clamp retention only on idle exit */ 853 if (!(rq->uclamp_flags & UCLAMP_FLAG_IDLE)) 854 return; 855 856 WRITE_ONCE(rq->uclamp[clamp_id].value, clamp_value); 857 } 858 859 static inline 860 unsigned int uclamp_rq_max_value(struct rq *rq, enum uclamp_id clamp_id, 861 unsigned int clamp_value) 862 { 863 struct uclamp_bucket *bucket = rq->uclamp[clamp_id].bucket; 864 int bucket_id = UCLAMP_BUCKETS - 1; 865 866 /* 867 * Since both min and max clamps are max aggregated, find the 868 * top most bucket with tasks in. 869 */ 870 for ( ; bucket_id >= 0; bucket_id--) { 871 if (!bucket[bucket_id].tasks) 872 continue; 873 return bucket[bucket_id].value; 874 } 875 876 /* No tasks -- default clamp values */ 877 return uclamp_idle_value(rq, clamp_id, clamp_value); 878 } 879 880 static inline struct uclamp_se 881 uclamp_tg_restrict(struct task_struct *p, enum uclamp_id clamp_id) 882 { 883 struct uclamp_se uc_req = p->uclamp_req[clamp_id]; 884 #ifdef CONFIG_UCLAMP_TASK_GROUP 885 struct uclamp_se uc_max; 886 887 /* 888 * Tasks in autogroups or root task group will be 889 * restricted by system defaults. 890 */ 891 if (task_group_is_autogroup(task_group(p))) 892 return uc_req; 893 if (task_group(p) == &root_task_group) 894 return uc_req; 895 896 uc_max = task_group(p)->uclamp[clamp_id]; 897 if (uc_req.value > uc_max.value || !uc_req.user_defined) 898 return uc_max; 899 #endif 900 901 return uc_req; 902 } 903 904 /* 905 * The effective clamp bucket index of a task depends on, by increasing 906 * priority: 907 * - the task specific clamp value, when explicitly requested from userspace 908 * - the task group effective clamp value, for tasks not either in the root 909 * group or in an autogroup 910 * - the system default clamp value, defined by the sysadmin 911 */ 912 static inline struct uclamp_se 913 uclamp_eff_get(struct task_struct *p, enum uclamp_id clamp_id) 914 { 915 struct uclamp_se uc_req = uclamp_tg_restrict(p, clamp_id); 916 struct uclamp_se uc_max = uclamp_default[clamp_id]; 917 918 /* System default restrictions always apply */ 919 if (unlikely(uc_req.value > uc_max.value)) 920 return uc_max; 921 922 return uc_req; 923 } 924 925 unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id) 926 { 927 struct uclamp_se uc_eff; 928 929 /* Task currently refcounted: use back-annotated (effective) value */ 930 if (p->uclamp[clamp_id].active) 931 return (unsigned long)p->uclamp[clamp_id].value; 932 933 uc_eff = uclamp_eff_get(p, clamp_id); 934 935 return (unsigned long)uc_eff.value; 936 } 937 938 /* 939 * When a task is enqueued on a rq, the clamp bucket currently defined by the 940 * task's uclamp::bucket_id is refcounted on that rq. This also immediately 941 * updates the rq's clamp value if required. 942 * 943 * Tasks can have a task-specific value requested from user-space, track 944 * within each bucket the maximum value for tasks refcounted in it. 945 * This "local max aggregation" allows to track the exact "requested" value 946 * for each bucket when all its RUNNABLE tasks require the same clamp. 947 */ 948 static inline void uclamp_rq_inc_id(struct rq *rq, struct task_struct *p, 949 enum uclamp_id clamp_id) 950 { 951 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id]; 952 struct uclamp_se *uc_se = &p->uclamp[clamp_id]; 953 struct uclamp_bucket *bucket; 954 955 lockdep_assert_held(&rq->lock); 956 957 /* Update task effective clamp */ 958 p->uclamp[clamp_id] = uclamp_eff_get(p, clamp_id); 959 960 bucket = &uc_rq->bucket[uc_se->bucket_id]; 961 bucket->tasks++; 962 uc_se->active = true; 963 964 uclamp_idle_reset(rq, clamp_id, uc_se->value); 965 966 /* 967 * Local max aggregation: rq buckets always track the max 968 * "requested" clamp value of its RUNNABLE tasks. 969 */ 970 if (bucket->tasks == 1 || uc_se->value > bucket->value) 971 bucket->value = uc_se->value; 972 973 if (uc_se->value > READ_ONCE(uc_rq->value)) 974 WRITE_ONCE(uc_rq->value, uc_se->value); 975 } 976 977 /* 978 * When a task is dequeued from a rq, the clamp bucket refcounted by the task 979 * is released. If this is the last task reference counting the rq's max 980 * active clamp value, then the rq's clamp value is updated. 981 * 982 * Both refcounted tasks and rq's cached clamp values are expected to be 983 * always valid. If it's detected they are not, as defensive programming, 984 * enforce the expected state and warn. 985 */ 986 static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p, 987 enum uclamp_id clamp_id) 988 { 989 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id]; 990 struct uclamp_se *uc_se = &p->uclamp[clamp_id]; 991 struct uclamp_bucket *bucket; 992 unsigned int bkt_clamp; 993 unsigned int rq_clamp; 994 995 lockdep_assert_held(&rq->lock); 996 997 bucket = &uc_rq->bucket[uc_se->bucket_id]; 998 SCHED_WARN_ON(!bucket->tasks); 999 if (likely(bucket->tasks)) 1000 bucket->tasks--; 1001 uc_se->active = false; 1002 1003 /* 1004 * Keep "local max aggregation" simple and accept to (possibly) 1005 * overboost some RUNNABLE tasks in the same bucket. 1006 * The rq clamp bucket value is reset to its base value whenever 1007 * there are no more RUNNABLE tasks refcounting it. 1008 */ 1009 if (likely(bucket->tasks)) 1010 return; 1011 1012 rq_clamp = READ_ONCE(uc_rq->value); 1013 /* 1014 * Defensive programming: this should never happen. If it happens, 1015 * e.g. due to future modification, warn and fixup the expected value. 1016 */ 1017 SCHED_WARN_ON(bucket->value > rq_clamp); 1018 if (bucket->value >= rq_clamp) { 1019 bkt_clamp = uclamp_rq_max_value(rq, clamp_id, uc_se->value); 1020 WRITE_ONCE(uc_rq->value, bkt_clamp); 1021 } 1022 } 1023 1024 static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) 1025 { 1026 enum uclamp_id clamp_id; 1027 1028 if (unlikely(!p->sched_class->uclamp_enabled)) 1029 return; 1030 1031 for_each_clamp_id(clamp_id) 1032 uclamp_rq_inc_id(rq, p, clamp_id); 1033 1034 /* Reset clamp idle holding when there is one RUNNABLE task */ 1035 if (rq->uclamp_flags & UCLAMP_FLAG_IDLE) 1036 rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE; 1037 } 1038 1039 static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) 1040 { 1041 enum uclamp_id clamp_id; 1042 1043 if (unlikely(!p->sched_class->uclamp_enabled)) 1044 return; 1045 1046 for_each_clamp_id(clamp_id) 1047 uclamp_rq_dec_id(rq, p, clamp_id); 1048 } 1049 1050 static inline void 1051 uclamp_update_active(struct task_struct *p, enum uclamp_id clamp_id) 1052 { 1053 struct rq_flags rf; 1054 struct rq *rq; 1055 1056 /* 1057 * Lock the task and the rq where the task is (or was) queued. 1058 * 1059 * We might lock the (previous) rq of a !RUNNABLE task, but that's the 1060 * price to pay to safely serialize util_{min,max} updates with 1061 * enqueues, dequeues and migration operations. 1062 * This is the same locking schema used by __set_cpus_allowed_ptr(). 1063 */ 1064 rq = task_rq_lock(p, &rf); 1065 1066 /* 1067 * Setting the clamp bucket is serialized by task_rq_lock(). 1068 * If the task is not yet RUNNABLE and its task_struct is not 1069 * affecting a valid clamp bucket, the next time it's enqueued, 1070 * it will already see the updated clamp bucket value. 1071 */ 1072 if (p->uclamp[clamp_id].active) { 1073 uclamp_rq_dec_id(rq, p, clamp_id); 1074 uclamp_rq_inc_id(rq, p, clamp_id); 1075 } 1076 1077 task_rq_unlock(rq, p, &rf); 1078 } 1079 1080 #ifdef CONFIG_UCLAMP_TASK_GROUP 1081 static inline void 1082 uclamp_update_active_tasks(struct cgroup_subsys_state *css, 1083 unsigned int clamps) 1084 { 1085 enum uclamp_id clamp_id; 1086 struct css_task_iter it; 1087 struct task_struct *p; 1088 1089 css_task_iter_start(css, 0, &it); 1090 while ((p = css_task_iter_next(&it))) { 1091 for_each_clamp_id(clamp_id) { 1092 if ((0x1 << clamp_id) & clamps) 1093 uclamp_update_active(p, clamp_id); 1094 } 1095 } 1096 css_task_iter_end(&it); 1097 } 1098 1099 static void cpu_util_update_eff(struct cgroup_subsys_state *css); 1100 static void uclamp_update_root_tg(void) 1101 { 1102 struct task_group *tg = &root_task_group; 1103 1104 uclamp_se_set(&tg->uclamp_req[UCLAMP_MIN], 1105 sysctl_sched_uclamp_util_min, false); 1106 uclamp_se_set(&tg->uclamp_req[UCLAMP_MAX], 1107 sysctl_sched_uclamp_util_max, false); 1108 1109 rcu_read_lock(); 1110 cpu_util_update_eff(&root_task_group.css); 1111 rcu_read_unlock(); 1112 } 1113 #else 1114 static void uclamp_update_root_tg(void) { } 1115 #endif 1116 1117 int sysctl_sched_uclamp_handler(struct ctl_table *table, int write, 1118 void __user *buffer, size_t *lenp, 1119 loff_t *ppos) 1120 { 1121 bool update_root_tg = false; 1122 int old_min, old_max; 1123 int result; 1124 1125 mutex_lock(&uclamp_mutex); 1126 old_min = sysctl_sched_uclamp_util_min; 1127 old_max = sysctl_sched_uclamp_util_max; 1128 1129 result = proc_dointvec(table, write, buffer, lenp, ppos); 1130 if (result) 1131 goto undo; 1132 if (!write) 1133 goto done; 1134 1135 if (sysctl_sched_uclamp_util_min > sysctl_sched_uclamp_util_max || 1136 sysctl_sched_uclamp_util_max > SCHED_CAPACITY_SCALE) { 1137 result = -EINVAL; 1138 goto undo; 1139 } 1140 1141 if (old_min != sysctl_sched_uclamp_util_min) { 1142 uclamp_se_set(&uclamp_default[UCLAMP_MIN], 1143 sysctl_sched_uclamp_util_min, false); 1144 update_root_tg = true; 1145 } 1146 if (old_max != sysctl_sched_uclamp_util_max) { 1147 uclamp_se_set(&uclamp_default[UCLAMP_MAX], 1148 sysctl_sched_uclamp_util_max, false); 1149 update_root_tg = true; 1150 } 1151 1152 if (update_root_tg) 1153 uclamp_update_root_tg(); 1154 1155 /* 1156 * We update all RUNNABLE tasks only when task groups are in use. 1157 * Otherwise, keep it simple and do just a lazy update at each next 1158 * task enqueue time. 1159 */ 1160 1161 goto done; 1162 1163 undo: 1164 sysctl_sched_uclamp_util_min = old_min; 1165 sysctl_sched_uclamp_util_max = old_max; 1166 done: 1167 mutex_unlock(&uclamp_mutex); 1168 1169 return result; 1170 } 1171 1172 static int uclamp_validate(struct task_struct *p, 1173 const struct sched_attr *attr) 1174 { 1175 unsigned int lower_bound = p->uclamp_req[UCLAMP_MIN].value; 1176 unsigned int upper_bound = p->uclamp_req[UCLAMP_MAX].value; 1177 1178 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN) 1179 lower_bound = attr->sched_util_min; 1180 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX) 1181 upper_bound = attr->sched_util_max; 1182 1183 if (lower_bound > upper_bound) 1184 return -EINVAL; 1185 if (upper_bound > SCHED_CAPACITY_SCALE) 1186 return -EINVAL; 1187 1188 return 0; 1189 } 1190 1191 static void __setscheduler_uclamp(struct task_struct *p, 1192 const struct sched_attr *attr) 1193 { 1194 enum uclamp_id clamp_id; 1195 1196 /* 1197 * On scheduling class change, reset to default clamps for tasks 1198 * without a task-specific value. 1199 */ 1200 for_each_clamp_id(clamp_id) { 1201 struct uclamp_se *uc_se = &p->uclamp_req[clamp_id]; 1202 unsigned int clamp_value = uclamp_none(clamp_id); 1203 1204 /* Keep using defined clamps across class changes */ 1205 if (uc_se->user_defined) 1206 continue; 1207 1208 /* By default, RT tasks always get 100% boost */ 1209 if (unlikely(rt_task(p) && clamp_id == UCLAMP_MIN)) 1210 clamp_value = uclamp_none(UCLAMP_MAX); 1211 1212 uclamp_se_set(uc_se, clamp_value, false); 1213 } 1214 1215 if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP))) 1216 return; 1217 1218 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN) { 1219 uclamp_se_set(&p->uclamp_req[UCLAMP_MIN], 1220 attr->sched_util_min, true); 1221 } 1222 1223 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX) { 1224 uclamp_se_set(&p->uclamp_req[UCLAMP_MAX], 1225 attr->sched_util_max, true); 1226 } 1227 } 1228 1229 static void uclamp_fork(struct task_struct *p) 1230 { 1231 enum uclamp_id clamp_id; 1232 1233 for_each_clamp_id(clamp_id) 1234 p->uclamp[clamp_id].active = false; 1235 1236 if (likely(!p->sched_reset_on_fork)) 1237 return; 1238 1239 for_each_clamp_id(clamp_id) { 1240 unsigned int clamp_value = uclamp_none(clamp_id); 1241 1242 /* By default, RT tasks always get 100% boost */ 1243 if (unlikely(rt_task(p) && clamp_id == UCLAMP_MIN)) 1244 clamp_value = uclamp_none(UCLAMP_MAX); 1245 1246 uclamp_se_set(&p->uclamp_req[clamp_id], clamp_value, false); 1247 } 1248 } 1249 1250 static void __init init_uclamp(void) 1251 { 1252 struct uclamp_se uc_max = {}; 1253 enum uclamp_id clamp_id; 1254 int cpu; 1255 1256 mutex_init(&uclamp_mutex); 1257 1258 for_each_possible_cpu(cpu) { 1259 memset(&cpu_rq(cpu)->uclamp, 0, 1260 sizeof(struct uclamp_rq)*UCLAMP_CNT); 1261 cpu_rq(cpu)->uclamp_flags = 0; 1262 } 1263 1264 for_each_clamp_id(clamp_id) { 1265 uclamp_se_set(&init_task.uclamp_req[clamp_id], 1266 uclamp_none(clamp_id), false); 1267 } 1268 1269 /* System defaults allow max clamp values for both indexes */ 1270 uclamp_se_set(&uc_max, uclamp_none(UCLAMP_MAX), false); 1271 for_each_clamp_id(clamp_id) { 1272 uclamp_default[clamp_id] = uc_max; 1273 #ifdef CONFIG_UCLAMP_TASK_GROUP 1274 root_task_group.uclamp_req[clamp_id] = uc_max; 1275 root_task_group.uclamp[clamp_id] = uc_max; 1276 #endif 1277 } 1278 } 1279 1280 #else /* CONFIG_UCLAMP_TASK */ 1281 static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) { } 1282 static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) { } 1283 static inline int uclamp_validate(struct task_struct *p, 1284 const struct sched_attr *attr) 1285 { 1286 return -EOPNOTSUPP; 1287 } 1288 static void __setscheduler_uclamp(struct task_struct *p, 1289 const struct sched_attr *attr) { } 1290 static inline void uclamp_fork(struct task_struct *p) { } 1291 static inline void init_uclamp(void) { } 1292 #endif /* CONFIG_UCLAMP_TASK */ 1293 1294 static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags) 1295 { 1296 if (!(flags & ENQUEUE_NOCLOCK)) 1297 update_rq_clock(rq); 1298 1299 if (!(flags & ENQUEUE_RESTORE)) { 1300 sched_info_queued(rq, p); 1301 psi_enqueue(p, flags & ENQUEUE_WAKEUP); 1302 } 1303 1304 uclamp_rq_inc(rq, p); 1305 p->sched_class->enqueue_task(rq, p, flags); 1306 } 1307 1308 static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags) 1309 { 1310 if (!(flags & DEQUEUE_NOCLOCK)) 1311 update_rq_clock(rq); 1312 1313 if (!(flags & DEQUEUE_SAVE)) { 1314 sched_info_dequeued(rq, p); 1315 psi_dequeue(p, flags & DEQUEUE_SLEEP); 1316 } 1317 1318 uclamp_rq_dec(rq, p); 1319 p->sched_class->dequeue_task(rq, p, flags); 1320 } 1321 1322 void activate_task(struct rq *rq, struct task_struct *p, int flags) 1323 { 1324 if (task_contributes_to_load(p)) 1325 rq->nr_uninterruptible--; 1326 1327 enqueue_task(rq, p, flags); 1328 1329 p->on_rq = TASK_ON_RQ_QUEUED; 1330 } 1331 1332 void deactivate_task(struct rq *rq, struct task_struct *p, int flags) 1333 { 1334 p->on_rq = (flags & DEQUEUE_SLEEP) ? 0 : TASK_ON_RQ_MIGRATING; 1335 1336 if (task_contributes_to_load(p)) 1337 rq->nr_uninterruptible++; 1338 1339 dequeue_task(rq, p, flags); 1340 } 1341 1342 /* 1343 * __normal_prio - return the priority that is based on the static prio 1344 */ 1345 static inline int __normal_prio(struct task_struct *p) 1346 { 1347 return p->static_prio; 1348 } 1349 1350 /* 1351 * Calculate the expected normal priority: i.e. priority 1352 * without taking RT-inheritance into account. Might be 1353 * boosted by interactivity modifiers. Changes upon fork, 1354 * setprio syscalls, and whenever the interactivity 1355 * estimator recalculates. 1356 */ 1357 static inline int normal_prio(struct task_struct *p) 1358 { 1359 int prio; 1360 1361 if (task_has_dl_policy(p)) 1362 prio = MAX_DL_PRIO-1; 1363 else if (task_has_rt_policy(p)) 1364 prio = MAX_RT_PRIO-1 - p->rt_priority; 1365 else 1366 prio = __normal_prio(p); 1367 return prio; 1368 } 1369 1370 /* 1371 * Calculate the current priority, i.e. the priority 1372 * taken into account by the scheduler. This value might 1373 * be boosted by RT tasks, or might be boosted by 1374 * interactivity modifiers. Will be RT if the task got 1375 * RT-boosted. If not then it returns p->normal_prio. 1376 */ 1377 static int effective_prio(struct task_struct *p) 1378 { 1379 p->normal_prio = normal_prio(p); 1380 /* 1381 * If we are RT tasks or we were boosted to RT priority, 1382 * keep the priority unchanged. Otherwise, update priority 1383 * to the normal priority: 1384 */ 1385 if (!rt_prio(p->prio)) 1386 return p->normal_prio; 1387 return p->prio; 1388 } 1389 1390 /** 1391 * task_curr - is this task currently executing on a CPU? 1392 * @p: the task in question. 1393 * 1394 * Return: 1 if the task is currently executing. 0 otherwise. 1395 */ 1396 inline int task_curr(const struct task_struct *p) 1397 { 1398 return cpu_curr(task_cpu(p)) == p; 1399 } 1400 1401 /* 1402 * switched_from, switched_to and prio_changed must _NOT_ drop rq->lock, 1403 * use the balance_callback list if you want balancing. 1404 * 1405 * this means any call to check_class_changed() must be followed by a call to 1406 * balance_callback(). 1407 */ 1408 static inline void check_class_changed(struct rq *rq, struct task_struct *p, 1409 const struct sched_class *prev_class, 1410 int oldprio) 1411 { 1412 if (prev_class != p->sched_class) { 1413 if (prev_class->switched_from) 1414 prev_class->switched_from(rq, p); 1415 1416 p->sched_class->switched_to(rq, p); 1417 } else if (oldprio != p->prio || dl_task(p)) 1418 p->sched_class->prio_changed(rq, p, oldprio); 1419 } 1420 1421 void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) 1422 { 1423 const struct sched_class *class; 1424 1425 if (p->sched_class == rq->curr->sched_class) { 1426 rq->curr->sched_class->check_preempt_curr(rq, p, flags); 1427 } else { 1428 for_each_class(class) { 1429 if (class == rq->curr->sched_class) 1430 break; 1431 if (class == p->sched_class) { 1432 resched_curr(rq); 1433 break; 1434 } 1435 } 1436 } 1437 1438 /* 1439 * A queue event has occurred, and we're going to schedule. In 1440 * this case, we can save a useless back to back clock update. 1441 */ 1442 if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr)) 1443 rq_clock_skip_update(rq); 1444 } 1445 1446 #ifdef CONFIG_SMP 1447 1448 /* 1449 * Per-CPU kthreads are allowed to run on !active && online CPUs, see 1450 * __set_cpus_allowed_ptr() and select_fallback_rq(). 1451 */ 1452 static inline bool is_cpu_allowed(struct task_struct *p, int cpu) 1453 { 1454 if (!cpumask_test_cpu(cpu, p->cpus_ptr)) 1455 return false; 1456 1457 if (is_per_cpu_kthread(p)) 1458 return cpu_online(cpu); 1459 1460 return cpu_active(cpu); 1461 } 1462 1463 /* 1464 * This is how migration works: 1465 * 1466 * 1) we invoke migration_cpu_stop() on the target CPU using 1467 * stop_one_cpu(). 1468 * 2) stopper starts to run (implicitly forcing the migrated thread 1469 * off the CPU) 1470 * 3) it checks whether the migrated task is still in the wrong runqueue. 1471 * 4) if it's in the wrong runqueue then the migration thread removes 1472 * it and puts it into the right queue. 1473 * 5) stopper completes and stop_one_cpu() returns and the migration 1474 * is done. 1475 */ 1476 1477 /* 1478 * move_queued_task - move a queued task to new rq. 1479 * 1480 * Returns (locked) new rq. Old rq's lock is released. 1481 */ 1482 static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf, 1483 struct task_struct *p, int new_cpu) 1484 { 1485 lockdep_assert_held(&rq->lock); 1486 1487 WRITE_ONCE(p->on_rq, TASK_ON_RQ_MIGRATING); 1488 dequeue_task(rq, p, DEQUEUE_NOCLOCK); 1489 set_task_cpu(p, new_cpu); 1490 rq_unlock(rq, rf); 1491 1492 rq = cpu_rq(new_cpu); 1493 1494 rq_lock(rq, rf); 1495 BUG_ON(task_cpu(p) != new_cpu); 1496 enqueue_task(rq, p, 0); 1497 p->on_rq = TASK_ON_RQ_QUEUED; 1498 check_preempt_curr(rq, p, 0); 1499 1500 return rq; 1501 } 1502 1503 struct migration_arg { 1504 struct task_struct *task; 1505 int dest_cpu; 1506 }; 1507 1508 /* 1509 * Move (not current) task off this CPU, onto the destination CPU. We're doing 1510 * this because either it can't run here any more (set_cpus_allowed() 1511 * away from this CPU, or CPU going down), or because we're 1512 * attempting to rebalance this task on exec (sched_exec). 1513 * 1514 * So we race with normal scheduler movements, but that's OK, as long 1515 * as the task is no longer on this CPU. 1516 */ 1517 static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf, 1518 struct task_struct *p, int dest_cpu) 1519 { 1520 /* Affinity changed (again). */ 1521 if (!is_cpu_allowed(p, dest_cpu)) 1522 return rq; 1523 1524 update_rq_clock(rq); 1525 rq = move_queued_task(rq, rf, p, dest_cpu); 1526 1527 return rq; 1528 } 1529 1530 /* 1531 * migration_cpu_stop - this will be executed by a highprio stopper thread 1532 * and performs thread migration by bumping thread off CPU then 1533 * 'pushing' onto another runqueue. 1534 */ 1535 static int migration_cpu_stop(void *data) 1536 { 1537 struct migration_arg *arg = data; 1538 struct task_struct *p = arg->task; 1539 struct rq *rq = this_rq(); 1540 struct rq_flags rf; 1541 1542 /* 1543 * The original target CPU might have gone down and we might 1544 * be on another CPU but it doesn't matter. 1545 */ 1546 local_irq_disable(); 1547 /* 1548 * We need to explicitly wake pending tasks before running 1549 * __migrate_task() such that we will not miss enforcing cpus_ptr 1550 * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test. 1551 */ 1552 sched_ttwu_pending(); 1553 1554 raw_spin_lock(&p->pi_lock); 1555 rq_lock(rq, &rf); 1556 /* 1557 * If task_rq(p) != rq, it cannot be migrated here, because we're 1558 * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because 1559 * we're holding p->pi_lock. 1560 */ 1561 if (task_rq(p) == rq) { 1562 if (task_on_rq_queued(p)) 1563 rq = __migrate_task(rq, &rf, p, arg->dest_cpu); 1564 else 1565 p->wake_cpu = arg->dest_cpu; 1566 } 1567 rq_unlock(rq, &rf); 1568 raw_spin_unlock(&p->pi_lock); 1569 1570 local_irq_enable(); 1571 return 0; 1572 } 1573 1574 /* 1575 * sched_class::set_cpus_allowed must do the below, but is not required to 1576 * actually call this function. 1577 */ 1578 void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask) 1579 { 1580 cpumask_copy(&p->cpus_mask, new_mask); 1581 p->nr_cpus_allowed = cpumask_weight(new_mask); 1582 } 1583 1584 void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) 1585 { 1586 struct rq *rq = task_rq(p); 1587 bool queued, running; 1588 1589 lockdep_assert_held(&p->pi_lock); 1590 1591 queued = task_on_rq_queued(p); 1592 running = task_current(rq, p); 1593 1594 if (queued) { 1595 /* 1596 * Because __kthread_bind() calls this on blocked tasks without 1597 * holding rq->lock. 1598 */ 1599 lockdep_assert_held(&rq->lock); 1600 dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK); 1601 } 1602 if (running) 1603 put_prev_task(rq, p); 1604 1605 p->sched_class->set_cpus_allowed(p, new_mask); 1606 1607 if (queued) 1608 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); 1609 if (running) 1610 set_next_task(rq, p); 1611 } 1612 1613 /* 1614 * Change a given task's CPU affinity. Migrate the thread to a 1615 * proper CPU and schedule it away if the CPU it's executing on 1616 * is removed from the allowed bitmask. 1617 * 1618 * NOTE: the caller must have a valid reference to the task, the 1619 * task must not exit() & deallocate itself prematurely. The 1620 * call is not atomic; no spinlocks may be held. 1621 */ 1622 static int __set_cpus_allowed_ptr(struct task_struct *p, 1623 const struct cpumask *new_mask, bool check) 1624 { 1625 const struct cpumask *cpu_valid_mask = cpu_active_mask; 1626 unsigned int dest_cpu; 1627 struct rq_flags rf; 1628 struct rq *rq; 1629 int ret = 0; 1630 1631 rq = task_rq_lock(p, &rf); 1632 update_rq_clock(rq); 1633 1634 if (p->flags & PF_KTHREAD) { 1635 /* 1636 * Kernel threads are allowed on online && !active CPUs 1637 */ 1638 cpu_valid_mask = cpu_online_mask; 1639 } 1640 1641 /* 1642 * Must re-check here, to close a race against __kthread_bind(), 1643 * sched_setaffinity() is not guaranteed to observe the flag. 1644 */ 1645 if (check && (p->flags & PF_NO_SETAFFINITY)) { 1646 ret = -EINVAL; 1647 goto out; 1648 } 1649 1650 if (cpumask_equal(p->cpus_ptr, new_mask)) 1651 goto out; 1652 1653 /* 1654 * Picking a ~random cpu helps in cases where we are changing affinity 1655 * for groups of tasks (ie. cpuset), so that load balancing is not 1656 * immediately required to distribute the tasks within their new mask. 1657 */ 1658 dest_cpu = cpumask_any_and_distribute(cpu_valid_mask, new_mask); 1659 if (dest_cpu >= nr_cpu_ids) { 1660 ret = -EINVAL; 1661 goto out; 1662 } 1663 1664 do_set_cpus_allowed(p, new_mask); 1665 1666 if (p->flags & PF_KTHREAD) { 1667 /* 1668 * For kernel threads that do indeed end up on online && 1669 * !active we want to ensure they are strict per-CPU threads. 1670 */ 1671 WARN_ON(cpumask_intersects(new_mask, cpu_online_mask) && 1672 !cpumask_intersects(new_mask, cpu_active_mask) && 1673 p->nr_cpus_allowed != 1); 1674 } 1675 1676 /* Can the task run on the task's current CPU? If so, we're done */ 1677 if (cpumask_test_cpu(task_cpu(p), new_mask)) 1678 goto out; 1679 1680 if (task_running(rq, p) || p->state == TASK_WAKING) { 1681 struct migration_arg arg = { p, dest_cpu }; 1682 /* Need help from migration thread: drop lock and wait. */ 1683 task_rq_unlock(rq, p, &rf); 1684 stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg); 1685 return 0; 1686 } else if (task_on_rq_queued(p)) { 1687 /* 1688 * OK, since we're going to drop the lock immediately 1689 * afterwards anyway. 1690 */ 1691 rq = move_queued_task(rq, &rf, p, dest_cpu); 1692 } 1693 out: 1694 task_rq_unlock(rq, p, &rf); 1695 1696 return ret; 1697 } 1698 1699 int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) 1700 { 1701 return __set_cpus_allowed_ptr(p, new_mask, false); 1702 } 1703 EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr); 1704 1705 void set_task_cpu(struct task_struct *p, unsigned int new_cpu) 1706 { 1707 #ifdef CONFIG_SCHED_DEBUG 1708 /* 1709 * We should never call set_task_cpu() on a blocked task, 1710 * ttwu() will sort out the placement. 1711 */ 1712 WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING && 1713 !p->on_rq); 1714 1715 /* 1716 * Migrating fair class task must have p->on_rq = TASK_ON_RQ_MIGRATING, 1717 * because schedstat_wait_{start,end} rebase migrating task's wait_start 1718 * time relying on p->on_rq. 1719 */ 1720 WARN_ON_ONCE(p->state == TASK_RUNNING && 1721 p->sched_class == &fair_sched_class && 1722 (p->on_rq && !task_on_rq_migrating(p))); 1723 1724 #ifdef CONFIG_LOCKDEP 1725 /* 1726 * The caller should hold either p->pi_lock or rq->lock, when changing 1727 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks. 1728 * 1729 * sched_move_task() holds both and thus holding either pins the cgroup, 1730 * see task_group(). 1731 * 1732 * Furthermore, all task_rq users should acquire both locks, see 1733 * task_rq_lock(). 1734 */ 1735 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) || 1736 lockdep_is_held(&task_rq(p)->lock))); 1737 #endif 1738 /* 1739 * Clearly, migrating tasks to offline CPUs is a fairly daft thing. 1740 */ 1741 WARN_ON_ONCE(!cpu_online(new_cpu)); 1742 #endif 1743 1744 trace_sched_migrate_task(p, new_cpu); 1745 1746 if (task_cpu(p) != new_cpu) { 1747 if (p->sched_class->migrate_task_rq) 1748 p->sched_class->migrate_task_rq(p, new_cpu); 1749 p->se.nr_migrations++; 1750 rseq_migrate(p); 1751 perf_event_task_migrate(p); 1752 } 1753 1754 __set_task_cpu(p, new_cpu); 1755 } 1756 1757 #ifdef CONFIG_NUMA_BALANCING 1758 static void __migrate_swap_task(struct task_struct *p, int cpu) 1759 { 1760 if (task_on_rq_queued(p)) { 1761 struct rq *src_rq, *dst_rq; 1762 struct rq_flags srf, drf; 1763 1764 src_rq = task_rq(p); 1765 dst_rq = cpu_rq(cpu); 1766 1767 rq_pin_lock(src_rq, &srf); 1768 rq_pin_lock(dst_rq, &drf); 1769 1770 deactivate_task(src_rq, p, 0); 1771 set_task_cpu(p, cpu); 1772 activate_task(dst_rq, p, 0); 1773 check_preempt_curr(dst_rq, p, 0); 1774 1775 rq_unpin_lock(dst_rq, &drf); 1776 rq_unpin_lock(src_rq, &srf); 1777 1778 } else { 1779 /* 1780 * Task isn't running anymore; make it appear like we migrated 1781 * it before it went to sleep. This means on wakeup we make the 1782 * previous CPU our target instead of where it really is. 1783 */ 1784 p->wake_cpu = cpu; 1785 } 1786 } 1787 1788 struct migration_swap_arg { 1789 struct task_struct *src_task, *dst_task; 1790 int src_cpu, dst_cpu; 1791 }; 1792 1793 static int migrate_swap_stop(void *data) 1794 { 1795 struct migration_swap_arg *arg = data; 1796 struct rq *src_rq, *dst_rq; 1797 int ret = -EAGAIN; 1798 1799 if (!cpu_active(arg->src_cpu) || !cpu_active(arg->dst_cpu)) 1800 return -EAGAIN; 1801 1802 src_rq = cpu_rq(arg->src_cpu); 1803 dst_rq = cpu_rq(arg->dst_cpu); 1804 1805 double_raw_lock(&arg->src_task->pi_lock, 1806 &arg->dst_task->pi_lock); 1807 double_rq_lock(src_rq, dst_rq); 1808 1809 if (task_cpu(arg->dst_task) != arg->dst_cpu) 1810 goto unlock; 1811 1812 if (task_cpu(arg->src_task) != arg->src_cpu) 1813 goto unlock; 1814 1815 if (!cpumask_test_cpu(arg->dst_cpu, arg->src_task->cpus_ptr)) 1816 goto unlock; 1817 1818 if (!cpumask_test_cpu(arg->src_cpu, arg->dst_task->cpus_ptr)) 1819 goto unlock; 1820 1821 __migrate_swap_task(arg->src_task, arg->dst_cpu); 1822 __migrate_swap_task(arg->dst_task, arg->src_cpu); 1823 1824 ret = 0; 1825 1826 unlock: 1827 double_rq_unlock(src_rq, dst_rq); 1828 raw_spin_unlock(&arg->dst_task->pi_lock); 1829 raw_spin_unlock(&arg->src_task->pi_lock); 1830 1831 return ret; 1832 } 1833 1834 /* 1835 * Cross migrate two tasks 1836 */ 1837 int migrate_swap(struct task_struct *cur, struct task_struct *p, 1838 int target_cpu, int curr_cpu) 1839 { 1840 struct migration_swap_arg arg; 1841 int ret = -EINVAL; 1842 1843 arg = (struct migration_swap_arg){ 1844 .src_task = cur, 1845 .src_cpu = curr_cpu, 1846 .dst_task = p, 1847 .dst_cpu = target_cpu, 1848 }; 1849 1850 if (arg.src_cpu == arg.dst_cpu) 1851 goto out; 1852 1853 /* 1854 * These three tests are all lockless; this is OK since all of them 1855 * will be re-checked with proper locks held further down the line. 1856 */ 1857 if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu)) 1858 goto out; 1859 1860 if (!cpumask_test_cpu(arg.dst_cpu, arg.src_task->cpus_ptr)) 1861 goto out; 1862 1863 if (!cpumask_test_cpu(arg.src_cpu, arg.dst_task->cpus_ptr)) 1864 goto out; 1865 1866 trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu); 1867 ret = stop_two_cpus(arg.dst_cpu, arg.src_cpu, migrate_swap_stop, &arg); 1868 1869 out: 1870 return ret; 1871 } 1872 #endif /* CONFIG_NUMA_BALANCING */ 1873 1874 /* 1875 * wait_task_inactive - wait for a thread to unschedule. 1876 * 1877 * If @match_state is nonzero, it's the @p->state value just checked and 1878 * not expected to change. If it changes, i.e. @p might have woken up, 1879 * then return zero. When we succeed in waiting for @p to be off its CPU, 1880 * we return a positive number (its total switch count). If a second call 1881 * a short while later returns the same number, the caller can be sure that 1882 * @p has remained unscheduled the whole time. 1883 * 1884 * The caller must ensure that the task *will* unschedule sometime soon, 1885 * else this function might spin for a *long* time. This function can't 1886 * be called with interrupts off, or it may introduce deadlock with 1887 * smp_call_function() if an IPI is sent by the same process we are 1888 * waiting to become inactive. 1889 */ 1890 unsigned long wait_task_inactive(struct task_struct *p, long match_state) 1891 { 1892 int running, queued; 1893 struct rq_flags rf; 1894 unsigned long ncsw; 1895 struct rq *rq; 1896 1897 for (;;) { 1898 /* 1899 * We do the initial early heuristics without holding 1900 * any task-queue locks at all. We'll only try to get 1901 * the runqueue lock when things look like they will 1902 * work out! 1903 */ 1904 rq = task_rq(p); 1905 1906 /* 1907 * If the task is actively running on another CPU 1908 * still, just relax and busy-wait without holding 1909 * any locks. 1910 * 1911 * NOTE! Since we don't hold any locks, it's not 1912 * even sure that "rq" stays as the right runqueue! 1913 * But we don't care, since "task_running()" will 1914 * return false if the runqueue has changed and p 1915 * is actually now running somewhere else! 1916 */ 1917 while (task_running(rq, p)) { 1918 if (match_state && unlikely(p->state != match_state)) 1919 return 0; 1920 cpu_relax(); 1921 } 1922 1923 /* 1924 * Ok, time to look more closely! We need the rq 1925 * lock now, to be *sure*. If we're wrong, we'll 1926 * just go back and repeat. 1927 */ 1928 rq = task_rq_lock(p, &rf); 1929 trace_sched_wait_task(p); 1930 running = task_running(rq, p); 1931 queued = task_on_rq_queued(p); 1932 ncsw = 0; 1933 if (!match_state || p->state == match_state) 1934 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ 1935 task_rq_unlock(rq, p, &rf); 1936 1937 /* 1938 * If it changed from the expected state, bail out now. 1939 */ 1940 if (unlikely(!ncsw)) 1941 break; 1942 1943 /* 1944 * Was it really running after all now that we 1945 * checked with the proper locks actually held? 1946 * 1947 * Oops. Go back and try again.. 1948 */ 1949 if (unlikely(running)) { 1950 cpu_relax(); 1951 continue; 1952 } 1953 1954 /* 1955 * It's not enough that it's not actively running, 1956 * it must be off the runqueue _entirely_, and not 1957 * preempted! 1958 * 1959 * So if it was still runnable (but just not actively 1960 * running right now), it's preempted, and we should 1961 * yield - it could be a while. 1962 */ 1963 if (unlikely(queued)) { 1964 ktime_t to = NSEC_PER_SEC / HZ; 1965 1966 set_current_state(TASK_UNINTERRUPTIBLE); 1967 schedule_hrtimeout(&to, HRTIMER_MODE_REL); 1968 continue; 1969 } 1970 1971 /* 1972 * Ahh, all good. It wasn't running, and it wasn't 1973 * runnable, which means that it will never become 1974 * running in the future either. We're all done! 1975 */ 1976 break; 1977 } 1978 1979 return ncsw; 1980 } 1981 1982 /*** 1983 * kick_process - kick a running thread to enter/exit the kernel 1984 * @p: the to-be-kicked thread 1985 * 1986 * Cause a process which is running on another CPU to enter 1987 * kernel-mode, without any delay. (to get signals handled.) 1988 * 1989 * NOTE: this function doesn't have to take the runqueue lock, 1990 * because all it wants to ensure is that the remote task enters 1991 * the kernel. If the IPI races and the task has been migrated 1992 * to another CPU then no harm is done and the purpose has been 1993 * achieved as well. 1994 */ 1995 void kick_process(struct task_struct *p) 1996 { 1997 int cpu; 1998 1999 preempt_disable(); 2000 cpu = task_cpu(p); 2001 if ((cpu != smp_processor_id()) && task_curr(p)) 2002 smp_send_reschedule(cpu); 2003 preempt_enable(); 2004 } 2005 EXPORT_SYMBOL_GPL(kick_process); 2006 2007 /* 2008 * ->cpus_ptr is protected by both rq->lock and p->pi_lock 2009 * 2010 * A few notes on cpu_active vs cpu_online: 2011 * 2012 * - cpu_active must be a subset of cpu_online 2013 * 2014 * - on CPU-up we allow per-CPU kthreads on the online && !active CPU, 2015 * see __set_cpus_allowed_ptr(). At this point the newly online 2016 * CPU isn't yet part of the sched domains, and balancing will not 2017 * see it. 2018 * 2019 * - on CPU-down we clear cpu_active() to mask the sched domains and 2020 * avoid the load balancer to place new tasks on the to be removed 2021 * CPU. Existing tasks will remain running there and will be taken 2022 * off. 2023 * 2024 * This means that fallback selection must not select !active CPUs. 2025 * And can assume that any active CPU must be online. Conversely 2026 * select_task_rq() below may allow selection of !active CPUs in order 2027 * to satisfy the above rules. 2028 */ 2029 static int select_fallback_rq(int cpu, struct task_struct *p) 2030 { 2031 int nid = cpu_to_node(cpu); 2032 const struct cpumask *nodemask = NULL; 2033 enum { cpuset, possible, fail } state = cpuset; 2034 int dest_cpu; 2035 2036 /* 2037 * If the node that the CPU is on has been offlined, cpu_to_node() 2038 * will return -1. There is no CPU on the node, and we should 2039 * select the CPU on the other node. 2040 */ 2041 if (nid != -1) { 2042 nodemask = cpumask_of_node(nid); 2043 2044 /* Look for allowed, online CPU in same node. */ 2045 for_each_cpu(dest_cpu, nodemask) { 2046 if (!cpu_active(dest_cpu)) 2047 continue; 2048 if (cpumask_test_cpu(dest_cpu, p->cpus_ptr)) 2049 return dest_cpu; 2050 } 2051 } 2052 2053 for (;;) { 2054 /* Any allowed, online CPU? */ 2055 for_each_cpu(dest_cpu, p->cpus_ptr) { 2056 if (!is_cpu_allowed(p, dest_cpu)) 2057 continue; 2058 2059 goto out; 2060 } 2061 2062 /* No more Mr. Nice Guy. */ 2063 switch (state) { 2064 case cpuset: 2065 if (IS_ENABLED(CONFIG_CPUSETS)) { 2066 cpuset_cpus_allowed_fallback(p); 2067 state = possible; 2068 break; 2069 } 2070 /* Fall-through */ 2071 case possible: 2072 do_set_cpus_allowed(p, cpu_possible_mask); 2073 state = fail; 2074 break; 2075 2076 case fail: 2077 BUG(); 2078 break; 2079 } 2080 } 2081 2082 out: 2083 if (state != cpuset) { 2084 /* 2085 * Don't tell them about moving exiting tasks or 2086 * kernel threads (both mm NULL), since they never 2087 * leave kernel. 2088 */ 2089 if (p->mm && printk_ratelimit()) { 2090 printk_deferred("process %d (%s) no longer affine to cpu%d\n", 2091 task_pid_nr(p), p->comm, cpu); 2092 } 2093 } 2094 2095 return dest_cpu; 2096 } 2097 2098 /* 2099 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_ptr is stable. 2100 */ 2101 static inline 2102 int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags) 2103 { 2104 lockdep_assert_held(&p->pi_lock); 2105 2106 if (p->nr_cpus_allowed > 1) 2107 cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags); 2108 else 2109 cpu = cpumask_any(p->cpus_ptr); 2110 2111 /* 2112 * In order not to call set_task_cpu() on a blocking task we need 2113 * to rely on ttwu() to place the task on a valid ->cpus_ptr 2114 * CPU. 2115 * 2116 * Since this is common to all placement strategies, this lives here. 2117 * 2118 * [ this allows ->select_task() to simply return task_cpu(p) and 2119 * not worry about this generic constraint ] 2120 */ 2121 if (unlikely(!is_cpu_allowed(p, cpu))) 2122 cpu = select_fallback_rq(task_cpu(p), p); 2123 2124 return cpu; 2125 } 2126 2127 static void update_avg(u64 *avg, u64 sample) 2128 { 2129 s64 diff = sample - *avg; 2130 *avg += diff >> 3; 2131 } 2132 2133 void sched_set_stop_task(int cpu, struct task_struct *stop) 2134 { 2135 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; 2136 struct task_struct *old_stop = cpu_rq(cpu)->stop; 2137 2138 if (stop) { 2139 /* 2140 * Make it appear like a SCHED_FIFO task, its something 2141 * userspace knows about and won't get confused about. 2142 * 2143 * Also, it will make PI more or less work without too 2144 * much confusion -- but then, stop work should not 2145 * rely on PI working anyway. 2146 */ 2147 sched_setscheduler_nocheck(stop, SCHED_FIFO, ¶m); 2148 2149 stop->sched_class = &stop_sched_class; 2150 } 2151 2152 cpu_rq(cpu)->stop = stop; 2153 2154 if (old_stop) { 2155 /* 2156 * Reset it back to a normal scheduling class so that 2157 * it can die in pieces. 2158 */ 2159 old_stop->sched_class = &rt_sched_class; 2160 } 2161 } 2162 2163 #else 2164 2165 static inline int __set_cpus_allowed_ptr(struct task_struct *p, 2166 const struct cpumask *new_mask, bool check) 2167 { 2168 return set_cpus_allowed_ptr(p, new_mask); 2169 } 2170 2171 #endif /* CONFIG_SMP */ 2172 2173 static void 2174 ttwu_stat(struct task_struct *p, int cpu, int wake_flags) 2175 { 2176 struct rq *rq; 2177 2178 if (!schedstat_enabled()) 2179 return; 2180 2181 rq = this_rq(); 2182 2183 #ifdef CONFIG_SMP 2184 if (cpu == rq->cpu) { 2185 __schedstat_inc(rq->ttwu_local); 2186 __schedstat_inc(p->se.statistics.nr_wakeups_local); 2187 } else { 2188 struct sched_domain *sd; 2189 2190 __schedstat_inc(p->se.statistics.nr_wakeups_remote); 2191 rcu_read_lock(); 2192 for_each_domain(rq->cpu, sd) { 2193 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) { 2194 __schedstat_inc(sd->ttwu_wake_remote); 2195 break; 2196 } 2197 } 2198 rcu_read_unlock(); 2199 } 2200 2201 if (wake_flags & WF_MIGRATED) 2202 __schedstat_inc(p->se.statistics.nr_wakeups_migrate); 2203 #endif /* CONFIG_SMP */ 2204 2205 __schedstat_inc(rq->ttwu_count); 2206 __schedstat_inc(p->se.statistics.nr_wakeups); 2207 2208 if (wake_flags & WF_SYNC) 2209 __schedstat_inc(p->se.statistics.nr_wakeups_sync); 2210 } 2211 2212 /* 2213 * Mark the task runnable and perform wakeup-preemption. 2214 */ 2215 static void ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags, 2216 struct rq_flags *rf) 2217 { 2218 check_preempt_curr(rq, p, wake_flags); 2219 p->state = TASK_RUNNING; 2220 trace_sched_wakeup(p); 2221 2222 #ifdef CONFIG_SMP 2223 if (p->sched_class->task_woken) { 2224 /* 2225 * Our task @p is fully woken up and running; so its safe to 2226 * drop the rq->lock, hereafter rq is only used for statistics. 2227 */ 2228 rq_unpin_lock(rq, rf); 2229 p->sched_class->task_woken(rq, p); 2230 rq_repin_lock(rq, rf); 2231 } 2232 2233 if (rq->idle_stamp) { 2234 u64 delta = rq_clock(rq) - rq->idle_stamp; 2235 u64 max = 2*rq->max_idle_balance_cost; 2236 2237 update_avg(&rq->avg_idle, delta); 2238 2239 if (rq->avg_idle > max) 2240 rq->avg_idle = max; 2241 2242 rq->idle_stamp = 0; 2243 } 2244 #endif 2245 } 2246 2247 static void 2248 ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags, 2249 struct rq_flags *rf) 2250 { 2251 int en_flags = ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK; 2252 2253 lockdep_assert_held(&rq->lock); 2254 2255 #ifdef CONFIG_SMP 2256 if (p->sched_contributes_to_load) 2257 rq->nr_uninterruptible--; 2258 2259 if (wake_flags & WF_MIGRATED) 2260 en_flags |= ENQUEUE_MIGRATED; 2261 #endif 2262 2263 activate_task(rq, p, en_flags); 2264 ttwu_do_wakeup(rq, p, wake_flags, rf); 2265 } 2266 2267 /* 2268 * Called in case the task @p isn't fully descheduled from its runqueue, 2269 * in this case we must do a remote wakeup. Its a 'light' wakeup though, 2270 * since all we need to do is flip p->state to TASK_RUNNING, since 2271 * the task is still ->on_rq. 2272 */ 2273 static int ttwu_remote(struct task_struct *p, int wake_flags) 2274 { 2275 struct rq_flags rf; 2276 struct rq *rq; 2277 int ret = 0; 2278 2279 rq = __task_rq_lock(p, &rf); 2280 if (task_on_rq_queued(p)) { 2281 /* check_preempt_curr() may use rq clock */ 2282 update_rq_clock(rq); 2283 ttwu_do_wakeup(rq, p, wake_flags, &rf); 2284 ret = 1; 2285 } 2286 __task_rq_unlock(rq, &rf); 2287 2288 return ret; 2289 } 2290 2291 #ifdef CONFIG_SMP 2292 void sched_ttwu_pending(void) 2293 { 2294 struct rq *rq = this_rq(); 2295 struct llist_node *llist = llist_del_all(&rq->wake_list); 2296 struct task_struct *p, *t; 2297 struct rq_flags rf; 2298 2299 if (!llist) 2300 return; 2301 2302 rq_lock_irqsave(rq, &rf); 2303 update_rq_clock(rq); 2304 2305 llist_for_each_entry_safe(p, t, llist, wake_entry) 2306 ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0, &rf); 2307 2308 rq_unlock_irqrestore(rq, &rf); 2309 } 2310 2311 void scheduler_ipi(void) 2312 { 2313 /* 2314 * Fold TIF_NEED_RESCHED into the preempt_count; anybody setting 2315 * TIF_NEED_RESCHED remotely (for the first time) will also send 2316 * this IPI. 2317 */ 2318 preempt_fold_need_resched(); 2319 2320 if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick()) 2321 return; 2322 2323 /* 2324 * Not all reschedule IPI handlers call irq_enter/irq_exit, since 2325 * traditionally all their work was done from the interrupt return 2326 * path. Now that we actually do some work, we need to make sure 2327 * we do call them. 2328 * 2329 * Some archs already do call them, luckily irq_enter/exit nest 2330 * properly. 2331 * 2332 * Arguably we should visit all archs and update all handlers, 2333 * however a fair share of IPIs are still resched only so this would 2334 * somewhat pessimize the simple resched case. 2335 */ 2336 irq_enter(); 2337 sched_ttwu_pending(); 2338 2339 /* 2340 * Check if someone kicked us for doing the nohz idle load balance. 2341 */ 2342 if (unlikely(got_nohz_idle_kick())) { 2343 this_rq()->idle_balance = 1; 2344 raise_softirq_irqoff(SCHED_SOFTIRQ); 2345 } 2346 irq_exit(); 2347 } 2348 2349 static void ttwu_queue_remote(struct task_struct *p, int cpu, int wake_flags) 2350 { 2351 struct rq *rq = cpu_rq(cpu); 2352 2353 p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED); 2354 2355 if (llist_add(&p->wake_entry, &cpu_rq(cpu)->wake_list)) { 2356 if (!set_nr_if_polling(rq->idle)) 2357 smp_send_reschedule(cpu); 2358 else 2359 trace_sched_wake_idle_without_ipi(cpu); 2360 } 2361 } 2362 2363 void wake_up_if_idle(int cpu) 2364 { 2365 struct rq *rq = cpu_rq(cpu); 2366 struct rq_flags rf; 2367 2368 rcu_read_lock(); 2369 2370 if (!is_idle_task(rcu_dereference(rq->curr))) 2371 goto out; 2372 2373 if (set_nr_if_polling(rq->idle)) { 2374 trace_sched_wake_idle_without_ipi(cpu); 2375 } else { 2376 rq_lock_irqsave(rq, &rf); 2377 if (is_idle_task(rq->curr)) 2378 smp_send_reschedule(cpu); 2379 /* Else CPU is not idle, do nothing here: */ 2380 rq_unlock_irqrestore(rq, &rf); 2381 } 2382 2383 out: 2384 rcu_read_unlock(); 2385 } 2386 2387 bool cpus_share_cache(int this_cpu, int that_cpu) 2388 { 2389 return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu); 2390 } 2391 #endif /* CONFIG_SMP */ 2392 2393 static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags) 2394 { 2395 struct rq *rq = cpu_rq(cpu); 2396 struct rq_flags rf; 2397 2398 #if defined(CONFIG_SMP) 2399 if (sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) { 2400 sched_clock_cpu(cpu); /* Sync clocks across CPUs */ 2401 ttwu_queue_remote(p, cpu, wake_flags); 2402 return; 2403 } 2404 #endif 2405 2406 rq_lock(rq, &rf); 2407 update_rq_clock(rq); 2408 ttwu_do_activate(rq, p, wake_flags, &rf); 2409 rq_unlock(rq, &rf); 2410 } 2411 2412 /* 2413 * Notes on Program-Order guarantees on SMP systems. 2414 * 2415 * MIGRATION 2416 * 2417 * The basic program-order guarantee on SMP systems is that when a task [t] 2418 * migrates, all its activity on its old CPU [c0] happens-before any subsequent 2419 * execution on its new CPU [c1]. 2420 * 2421 * For migration (of runnable tasks) this is provided by the following means: 2422 * 2423 * A) UNLOCK of the rq(c0)->lock scheduling out task t 2424 * B) migration for t is required to synchronize *both* rq(c0)->lock and 2425 * rq(c1)->lock (if not at the same time, then in that order). 2426 * C) LOCK of the rq(c1)->lock scheduling in task 2427 * 2428 * Release/acquire chaining guarantees that B happens after A and C after B. 2429 * Note: the CPU doing B need not be c0 or c1 2430 * 2431 * Example: 2432 * 2433 * CPU0 CPU1 CPU2 2434 * 2435 * LOCK rq(0)->lock 2436 * sched-out X 2437 * sched-in Y 2438 * UNLOCK rq(0)->lock 2439 * 2440 * LOCK rq(0)->lock // orders against CPU0 2441 * dequeue X 2442 * UNLOCK rq(0)->lock 2443 * 2444 * LOCK rq(1)->lock 2445 * enqueue X 2446 * UNLOCK rq(1)->lock 2447 * 2448 * LOCK rq(1)->lock // orders against CPU2 2449 * sched-out Z 2450 * sched-in X 2451 * UNLOCK rq(1)->lock 2452 * 2453 * 2454 * BLOCKING -- aka. SLEEP + WAKEUP 2455 * 2456 * For blocking we (obviously) need to provide the same guarantee as for 2457 * migration. However the means are completely different as there is no lock 2458 * chain to provide order. Instead we do: 2459 * 2460 * 1) smp_store_release(X->on_cpu, 0) 2461 * 2) smp_cond_load_acquire(!X->on_cpu) 2462 * 2463 * Example: 2464 * 2465 * CPU0 (schedule) CPU1 (try_to_wake_up) CPU2 (schedule) 2466 * 2467 * LOCK rq(0)->lock LOCK X->pi_lock 2468 * dequeue X 2469 * sched-out X 2470 * smp_store_release(X->on_cpu, 0); 2471 * 2472 * smp_cond_load_acquire(&X->on_cpu, !VAL); 2473 * X->state = WAKING 2474 * set_task_cpu(X,2) 2475 * 2476 * LOCK rq(2)->lock 2477 * enqueue X 2478 * X->state = RUNNING 2479 * UNLOCK rq(2)->lock 2480 * 2481 * LOCK rq(2)->lock // orders against CPU1 2482 * sched-out Z 2483 * sched-in X 2484 * UNLOCK rq(2)->lock 2485 * 2486 * UNLOCK X->pi_lock 2487 * UNLOCK rq(0)->lock 2488 * 2489 * 2490 * However, for wakeups there is a second guarantee we must provide, namely we 2491 * must ensure that CONDITION=1 done by the caller can not be reordered with 2492 * accesses to the task state; see try_to_wake_up() and set_current_state(). 2493 */ 2494 2495 /** 2496 * try_to_wake_up - wake up a thread 2497 * @p: the thread to be awakened 2498 * @state: the mask of task states that can be woken 2499 * @wake_flags: wake modifier flags (WF_*) 2500 * 2501 * If (@state & @p->state) @p->state = TASK_RUNNING. 2502 * 2503 * If the task was not queued/runnable, also place it back on a runqueue. 2504 * 2505 * Atomic against schedule() which would dequeue a task, also see 2506 * set_current_state(). 2507 * 2508 * This function executes a full memory barrier before accessing the task 2509 * state; see set_current_state(). 2510 * 2511 * Return: %true if @p->state changes (an actual wakeup was done), 2512 * %false otherwise. 2513 */ 2514 static int 2515 try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) 2516 { 2517 unsigned long flags; 2518 int cpu, success = 0; 2519 2520 preempt_disable(); 2521 if (p == current) { 2522 /* 2523 * We're waking current, this means 'p->on_rq' and 'task_cpu(p) 2524 * == smp_processor_id()'. Together this means we can special 2525 * case the whole 'p->on_rq && ttwu_remote()' case below 2526 * without taking any locks. 2527 * 2528 * In particular: 2529 * - we rely on Program-Order guarantees for all the ordering, 2530 * - we're serialized against set_special_state() by virtue of 2531 * it disabling IRQs (this allows not taking ->pi_lock). 2532 */ 2533 if (!(p->state & state)) 2534 goto out; 2535 2536 success = 1; 2537 cpu = task_cpu(p); 2538 trace_sched_waking(p); 2539 p->state = TASK_RUNNING; 2540 trace_sched_wakeup(p); 2541 goto out; 2542 } 2543 2544 /* 2545 * If we are going to wake up a thread waiting for CONDITION we 2546 * need to ensure that CONDITION=1 done by the caller can not be 2547 * reordered with p->state check below. This pairs with mb() in 2548 * set_current_state() the waiting thread does. 2549 */ 2550 raw_spin_lock_irqsave(&p->pi_lock, flags); 2551 smp_mb__after_spinlock(); 2552 if (!(p->state & state)) 2553 goto unlock; 2554 2555 trace_sched_waking(p); 2556 2557 /* We're going to change ->state: */ 2558 success = 1; 2559 cpu = task_cpu(p); 2560 2561 /* 2562 * Ensure we load p->on_rq _after_ p->state, otherwise it would 2563 * be possible to, falsely, observe p->on_rq == 0 and get stuck 2564 * in smp_cond_load_acquire() below. 2565 * 2566 * sched_ttwu_pending() try_to_wake_up() 2567 * STORE p->on_rq = 1 LOAD p->state 2568 * UNLOCK rq->lock 2569 * 2570 * __schedule() (switch to task 'p') 2571 * LOCK rq->lock smp_rmb(); 2572 * smp_mb__after_spinlock(); 2573 * UNLOCK rq->lock 2574 * 2575 * [task p] 2576 * STORE p->state = UNINTERRUPTIBLE LOAD p->on_rq 2577 * 2578 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in 2579 * __schedule(). See the comment for smp_mb__after_spinlock(). 2580 */ 2581 smp_rmb(); 2582 if (p->on_rq && ttwu_remote(p, wake_flags)) 2583 goto unlock; 2584 2585 #ifdef CONFIG_SMP 2586 /* 2587 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be 2588 * possible to, falsely, observe p->on_cpu == 0. 2589 * 2590 * One must be running (->on_cpu == 1) in order to remove oneself 2591 * from the runqueue. 2592 * 2593 * __schedule() (switch to task 'p') try_to_wake_up() 2594 * STORE p->on_cpu = 1 LOAD p->on_rq 2595 * UNLOCK rq->lock 2596 * 2597 * __schedule() (put 'p' to sleep) 2598 * LOCK rq->lock smp_rmb(); 2599 * smp_mb__after_spinlock(); 2600 * STORE p->on_rq = 0 LOAD p->on_cpu 2601 * 2602 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in 2603 * __schedule(). See the comment for smp_mb__after_spinlock(). 2604 */ 2605 smp_rmb(); 2606 2607 /* 2608 * If the owning (remote) CPU is still in the middle of schedule() with 2609 * this task as prev, wait until its done referencing the task. 2610 * 2611 * Pairs with the smp_store_release() in finish_task(). 2612 * 2613 * This ensures that tasks getting woken will be fully ordered against 2614 * their previous state and preserve Program Order. 2615 */ 2616 smp_cond_load_acquire(&p->on_cpu, !VAL); 2617 2618 p->sched_contributes_to_load = !!task_contributes_to_load(p); 2619 p->state = TASK_WAKING; 2620 2621 if (p->in_iowait) { 2622 delayacct_blkio_end(p); 2623 atomic_dec(&task_rq(p)->nr_iowait); 2624 } 2625 2626 cpu = select_task_rq(p, p->wake_cpu, SD_BALANCE_WAKE, wake_flags); 2627 if (task_cpu(p) != cpu) { 2628 wake_flags |= WF_MIGRATED; 2629 psi_ttwu_dequeue(p); 2630 set_task_cpu(p, cpu); 2631 } 2632 2633 #else /* CONFIG_SMP */ 2634 2635 if (p->in_iowait) { 2636 delayacct_blkio_end(p); 2637 atomic_dec(&task_rq(p)->nr_iowait); 2638 } 2639 2640 #endif /* CONFIG_SMP */ 2641 2642 ttwu_queue(p, cpu, wake_flags); 2643 unlock: 2644 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 2645 out: 2646 if (success) 2647 ttwu_stat(p, cpu, wake_flags); 2648 preempt_enable(); 2649 2650 return success; 2651 } 2652 2653 /** 2654 * wake_up_process - Wake up a specific process 2655 * @p: The process to be woken up. 2656 * 2657 * Attempt to wake up the nominated process and move it to the set of runnable 2658 * processes. 2659 * 2660 * Return: 1 if the process was woken up, 0 if it was already running. 2661 * 2662 * This function executes a full memory barrier before accessing the task state. 2663 */ 2664 int wake_up_process(struct task_struct *p) 2665 { 2666 return try_to_wake_up(p, TASK_NORMAL, 0); 2667 } 2668 EXPORT_SYMBOL(wake_up_process); 2669 2670 int wake_up_state(struct task_struct *p, unsigned int state) 2671 { 2672 return try_to_wake_up(p, state, 0); 2673 } 2674 2675 /* 2676 * Perform scheduler related setup for a newly forked process p. 2677 * p is forked by current. 2678 * 2679 * __sched_fork() is basic setup used by init_idle() too: 2680 */ 2681 static void __sched_fork(unsigned long clone_flags, struct task_struct *p) 2682 { 2683 p->on_rq = 0; 2684 2685 p->se.on_rq = 0; 2686 p->se.exec_start = 0; 2687 p->se.sum_exec_runtime = 0; 2688 p->se.prev_sum_exec_runtime = 0; 2689 p->se.nr_migrations = 0; 2690 p->se.vruntime = 0; 2691 INIT_LIST_HEAD(&p->se.group_node); 2692 2693 #ifdef CONFIG_FAIR_GROUP_SCHED 2694 p->se.cfs_rq = NULL; 2695 #endif 2696 2697 #ifdef CONFIG_SCHEDSTATS 2698 /* Even if schedstat is disabled, there should not be garbage */ 2699 memset(&p->se.statistics, 0, sizeof(p->se.statistics)); 2700 #endif 2701 2702 RB_CLEAR_NODE(&p->dl.rb_node); 2703 init_dl_task_timer(&p->dl); 2704 init_dl_inactive_task_timer(&p->dl); 2705 __dl_clear_params(p); 2706 2707 INIT_LIST_HEAD(&p->rt.run_list); 2708 p->rt.timeout = 0; 2709 p->rt.time_slice = sched_rr_timeslice; 2710 p->rt.on_rq = 0; 2711 p->rt.on_list = 0; 2712 2713 #ifdef CONFIG_PREEMPT_NOTIFIERS 2714 INIT_HLIST_HEAD(&p->preempt_notifiers); 2715 #endif 2716 2717 #ifdef CONFIG_COMPACTION 2718 p->capture_control = NULL; 2719 #endif 2720 init_numa_balancing(clone_flags, p); 2721 } 2722 2723 DEFINE_STATIC_KEY_FALSE(sched_numa_balancing); 2724 2725 #ifdef CONFIG_NUMA_BALANCING 2726 2727 void set_numabalancing_state(bool enabled) 2728 { 2729 if (enabled) 2730 static_branch_enable(&sched_numa_balancing); 2731 else 2732 static_branch_disable(&sched_numa_balancing); 2733 } 2734 2735 #ifdef CONFIG_PROC_SYSCTL 2736 int sysctl_numa_balancing(struct ctl_table *table, int write, 2737 void __user *buffer, size_t *lenp, loff_t *ppos) 2738 { 2739 struct ctl_table t; 2740 int err; 2741 int state = static_branch_likely(&sched_numa_balancing); 2742 2743 if (write && !capable(CAP_SYS_ADMIN)) 2744 return -EPERM; 2745 2746 t = *table; 2747 t.data = &state; 2748 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); 2749 if (err < 0) 2750 return err; 2751 if (write) 2752 set_numabalancing_state(state); 2753 return err; 2754 } 2755 #endif 2756 #endif 2757 2758 #ifdef CONFIG_SCHEDSTATS 2759 2760 DEFINE_STATIC_KEY_FALSE(sched_schedstats); 2761 static bool __initdata __sched_schedstats = false; 2762 2763 static void set_schedstats(bool enabled) 2764 { 2765 if (enabled) 2766 static_branch_enable(&sched_schedstats); 2767 else 2768 static_branch_disable(&sched_schedstats); 2769 } 2770 2771 void force_schedstat_enabled(void) 2772 { 2773 if (!schedstat_enabled()) { 2774 pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n"); 2775 static_branch_enable(&sched_schedstats); 2776 } 2777 } 2778 2779 static int __init setup_schedstats(char *str) 2780 { 2781 int ret = 0; 2782 if (!str) 2783 goto out; 2784 2785 /* 2786 * This code is called before jump labels have been set up, so we can't 2787 * change the static branch directly just yet. Instead set a temporary 2788 * variable so init_schedstats() can do it later. 2789 */ 2790 if (!strcmp(str, "enable")) { 2791 __sched_schedstats = true; 2792 ret = 1; 2793 } else if (!strcmp(str, "disable")) { 2794 __sched_schedstats = false; 2795 ret = 1; 2796 } 2797 out: 2798 if (!ret) 2799 pr_warn("Unable to parse schedstats=\n"); 2800 2801 return ret; 2802 } 2803 __setup("schedstats=", setup_schedstats); 2804 2805 static void __init init_schedstats(void) 2806 { 2807 set_schedstats(__sched_schedstats); 2808 } 2809 2810 #ifdef CONFIG_PROC_SYSCTL 2811 int sysctl_schedstats(struct ctl_table *table, int write, 2812 void __user *buffer, size_t *lenp, loff_t *ppos) 2813 { 2814 struct ctl_table t; 2815 int err; 2816 int state = static_branch_likely(&sched_schedstats); 2817 2818 if (write && !capable(CAP_SYS_ADMIN)) 2819 return -EPERM; 2820 2821 t = *table; 2822 t.data = &state; 2823 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); 2824 if (err < 0) 2825 return err; 2826 if (write) 2827 set_schedstats(state); 2828 return err; 2829 } 2830 #endif /* CONFIG_PROC_SYSCTL */ 2831 #else /* !CONFIG_SCHEDSTATS */ 2832 static inline void init_schedstats(void) {} 2833 #endif /* CONFIG_SCHEDSTATS */ 2834 2835 /* 2836 * fork()/clone()-time setup: 2837 */ 2838 int sched_fork(unsigned long clone_flags, struct task_struct *p) 2839 { 2840 unsigned long flags; 2841 2842 __sched_fork(clone_flags, p); 2843 /* 2844 * We mark the process as NEW here. This guarantees that 2845 * nobody will actually run it, and a signal or other external 2846 * event cannot wake it up and insert it on the runqueue either. 2847 */ 2848 p->state = TASK_NEW; 2849 2850 /* 2851 * Make sure we do not leak PI boosting priority to the child. 2852 */ 2853 p->prio = current->normal_prio; 2854 2855 uclamp_fork(p); 2856 2857 /* 2858 * Revert to default priority/policy on fork if requested. 2859 */ 2860 if (unlikely(p->sched_reset_on_fork)) { 2861 if (task_has_dl_policy(p) || task_has_rt_policy(p)) { 2862 p->policy = SCHED_NORMAL; 2863 p->static_prio = NICE_TO_PRIO(0); 2864 p->rt_priority = 0; 2865 } else if (PRIO_TO_NICE(p->static_prio) < 0) 2866 p->static_prio = NICE_TO_PRIO(0); 2867 2868 p->prio = p->normal_prio = __normal_prio(p); 2869 set_load_weight(p, false); 2870 2871 /* 2872 * We don't need the reset flag anymore after the fork. It has 2873 * fulfilled its duty: 2874 */ 2875 p->sched_reset_on_fork = 0; 2876 } 2877 2878 if (dl_prio(p->prio)) 2879 return -EAGAIN; 2880 else if (rt_prio(p->prio)) 2881 p->sched_class = &rt_sched_class; 2882 else 2883 p->sched_class = &fair_sched_class; 2884 2885 init_entity_runnable_average(&p->se); 2886 2887 /* 2888 * The child is not yet in the pid-hash so no cgroup attach races, 2889 * and the cgroup is pinned to this child due to cgroup_fork() 2890 * is ran before sched_fork(). 2891 * 2892 * Silence PROVE_RCU. 2893 */ 2894 raw_spin_lock_irqsave(&p->pi_lock, flags); 2895 /* 2896 * We're setting the CPU for the first time, we don't migrate, 2897 * so use __set_task_cpu(). 2898 */ 2899 __set_task_cpu(p, smp_processor_id()); 2900 if (p->sched_class->task_fork) 2901 p->sched_class->task_fork(p); 2902 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 2903 2904 #ifdef CONFIG_SCHED_INFO 2905 if (likely(sched_info_on())) 2906 memset(&p->sched_info, 0, sizeof(p->sched_info)); 2907 #endif 2908 #if defined(CONFIG_SMP) 2909 p->on_cpu = 0; 2910 #endif 2911 init_task_preempt_count(p); 2912 #ifdef CONFIG_SMP 2913 plist_node_init(&p->pushable_tasks, MAX_PRIO); 2914 RB_CLEAR_NODE(&p->pushable_dl_tasks); 2915 #endif 2916 return 0; 2917 } 2918 2919 unsigned long to_ratio(u64 period, u64 runtime) 2920 { 2921 if (runtime == RUNTIME_INF) 2922 return BW_UNIT; 2923 2924 /* 2925 * Doing this here saves a lot of checks in all 2926 * the calling paths, and returning zero seems 2927 * safe for them anyway. 2928 */ 2929 if (period == 0) 2930 return 0; 2931 2932 return div64_u64(runtime << BW_SHIFT, period); 2933 } 2934 2935 /* 2936 * wake_up_new_task - wake up a newly created task for the first time. 2937 * 2938 * This function will do some initial scheduler statistics housekeeping 2939 * that must be done for every newly created context, then puts the task 2940 * on the runqueue and wakes it. 2941 */ 2942 void wake_up_new_task(struct task_struct *p) 2943 { 2944 struct rq_flags rf; 2945 struct rq *rq; 2946 2947 raw_spin_lock_irqsave(&p->pi_lock, rf.flags); 2948 p->state = TASK_RUNNING; 2949 #ifdef CONFIG_SMP 2950 /* 2951 * Fork balancing, do it here and not earlier because: 2952 * - cpus_ptr can change in the fork path 2953 * - any previously selected CPU might disappear through hotplug 2954 * 2955 * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq, 2956 * as we're not fully set-up yet. 2957 */ 2958 p->recent_used_cpu = task_cpu(p); 2959 __set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0)); 2960 #endif 2961 rq = __task_rq_lock(p, &rf); 2962 update_rq_clock(rq); 2963 post_init_entity_util_avg(p); 2964 2965 activate_task(rq, p, ENQUEUE_NOCLOCK); 2966 trace_sched_wakeup_new(p); 2967 check_preempt_curr(rq, p, WF_FORK); 2968 #ifdef CONFIG_SMP 2969 if (p->sched_class->task_woken) { 2970 /* 2971 * Nothing relies on rq->lock after this, so its fine to 2972 * drop it. 2973 */ 2974 rq_unpin_lock(rq, &rf); 2975 p->sched_class->task_woken(rq, p); 2976 rq_repin_lock(rq, &rf); 2977 } 2978 #endif 2979 task_rq_unlock(rq, p, &rf); 2980 } 2981 2982 #ifdef CONFIG_PREEMPT_NOTIFIERS 2983 2984 static DEFINE_STATIC_KEY_FALSE(preempt_notifier_key); 2985 2986 void preempt_notifier_inc(void) 2987 { 2988 static_branch_inc(&preempt_notifier_key); 2989 } 2990 EXPORT_SYMBOL_GPL(preempt_notifier_inc); 2991 2992 void preempt_notifier_dec(void) 2993 { 2994 static_branch_dec(&preempt_notifier_key); 2995 } 2996 EXPORT_SYMBOL_GPL(preempt_notifier_dec); 2997 2998 /** 2999 * preempt_notifier_register - tell me when current is being preempted & rescheduled 3000 * @notifier: notifier struct to register 3001 */ 3002 void preempt_notifier_register(struct preempt_notifier *notifier) 3003 { 3004 if (!static_branch_unlikely(&preempt_notifier_key)) 3005 WARN(1, "registering preempt_notifier while notifiers disabled\n"); 3006 3007 hlist_add_head(¬ifier->link, ¤t->preempt_notifiers); 3008 } 3009 EXPORT_SYMBOL_GPL(preempt_notifier_register); 3010 3011 /** 3012 * preempt_notifier_unregister - no longer interested in preemption notifications 3013 * @notifier: notifier struct to unregister 3014 * 3015 * This is *not* safe to call from within a preemption notifier. 3016 */ 3017 void preempt_notifier_unregister(struct preempt_notifier *notifier) 3018 { 3019 hlist_del(¬ifier->link); 3020 } 3021 EXPORT_SYMBOL_GPL(preempt_notifier_unregister); 3022 3023 static void __fire_sched_in_preempt_notifiers(struct task_struct *curr) 3024 { 3025 struct preempt_notifier *notifier; 3026 3027 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) 3028 notifier->ops->sched_in(notifier, raw_smp_processor_id()); 3029 } 3030 3031 static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr) 3032 { 3033 if (static_branch_unlikely(&preempt_notifier_key)) 3034 __fire_sched_in_preempt_notifiers(curr); 3035 } 3036 3037 static void 3038 __fire_sched_out_preempt_notifiers(struct task_struct *curr, 3039 struct task_struct *next) 3040 { 3041 struct preempt_notifier *notifier; 3042 3043 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) 3044 notifier->ops->sched_out(notifier, next); 3045 } 3046 3047 static __always_inline void 3048 fire_sched_out_preempt_notifiers(struct task_struct *curr, 3049 struct task_struct *next) 3050 { 3051 if (static_branch_unlikely(&preempt_notifier_key)) 3052 __fire_sched_out_preempt_notifiers(curr, next); 3053 } 3054 3055 #else /* !CONFIG_PREEMPT_NOTIFIERS */ 3056 3057 static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr) 3058 { 3059 } 3060 3061 static inline void 3062 fire_sched_out_preempt_notifiers(struct task_struct *curr, 3063 struct task_struct *next) 3064 { 3065 } 3066 3067 #endif /* CONFIG_PREEMPT_NOTIFIERS */ 3068 3069 static inline void prepare_task(struct task_struct *next) 3070 { 3071 #ifdef CONFIG_SMP 3072 /* 3073 * Claim the task as running, we do this before switching to it 3074 * such that any running task will have this set. 3075 */ 3076 next->on_cpu = 1; 3077 #endif 3078 } 3079 3080 static inline void finish_task(struct task_struct *prev) 3081 { 3082 #ifdef CONFIG_SMP 3083 /* 3084 * After ->on_cpu is cleared, the task can be moved to a different CPU. 3085 * We must ensure this doesn't happen until the switch is completely 3086 * finished. 3087 * 3088 * In particular, the load of prev->state in finish_task_switch() must 3089 * happen before this. 3090 * 3091 * Pairs with the smp_cond_load_acquire() in try_to_wake_up(). 3092 */ 3093 smp_store_release(&prev->on_cpu, 0); 3094 #endif 3095 } 3096 3097 static inline void 3098 prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf) 3099 { 3100 /* 3101 * Since the runqueue lock will be released by the next 3102 * task (which is an invalid locking op but in the case 3103 * of the scheduler it's an obvious special-case), so we 3104 * do an early lockdep release here: 3105 */ 3106 rq_unpin_lock(rq, rf); 3107 spin_release(&rq->lock.dep_map, _THIS_IP_); 3108 #ifdef CONFIG_DEBUG_SPINLOCK 3109 /* this is a valid case when another task releases the spinlock */ 3110 rq->lock.owner = next; 3111 #endif 3112 } 3113 3114 static inline void finish_lock_switch(struct rq *rq) 3115 { 3116 /* 3117 * If we are tracking spinlock dependencies then we have to 3118 * fix up the runqueue lock - which gets 'carried over' from 3119 * prev into current: 3120 */ 3121 spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_); 3122 raw_spin_unlock_irq(&rq->lock); 3123 } 3124 3125 /* 3126 * NOP if the arch has not defined these: 3127 */ 3128 3129 #ifndef prepare_arch_switch 3130 # define prepare_arch_switch(next) do { } while (0) 3131 #endif 3132 3133 #ifndef finish_arch_post_lock_switch 3134 # define finish_arch_post_lock_switch() do { } while (0) 3135 #endif 3136 3137 /** 3138 * prepare_task_switch - prepare to switch tasks 3139 * @rq: the runqueue preparing to switch 3140 * @prev: the current task that is being switched out 3141 * @next: the task we are going to switch to. 3142 * 3143 * This is called with the rq lock held and interrupts off. It must 3144 * be paired with a subsequent finish_task_switch after the context 3145 * switch. 3146 * 3147 * prepare_task_switch sets up locking and calls architecture specific 3148 * hooks. 3149 */ 3150 static inline void 3151 prepare_task_switch(struct rq *rq, struct task_struct *prev, 3152 struct task_struct *next) 3153 { 3154 kcov_prepare_switch(prev); 3155 sched_info_switch(rq, prev, next); 3156 perf_event_task_sched_out(prev, next); 3157 rseq_preempt(prev); 3158 fire_sched_out_preempt_notifiers(prev, next); 3159 prepare_task(next); 3160 prepare_arch_switch(next); 3161 } 3162 3163 /** 3164 * finish_task_switch - clean up after a task-switch 3165 * @prev: the thread we just switched away from. 3166 * 3167 * finish_task_switch must be called after the context switch, paired 3168 * with a prepare_task_switch call before the context switch. 3169 * finish_task_switch will reconcile locking set up by prepare_task_switch, 3170 * and do any other architecture-specific cleanup actions. 3171 * 3172 * Note that we may have delayed dropping an mm in context_switch(). If 3173 * so, we finish that here outside of the runqueue lock. (Doing it 3174 * with the lock held can cause deadlocks; see schedule() for 3175 * details.) 3176 * 3177 * The context switch have flipped the stack from under us and restored the 3178 * local variables which were saved when this task called schedule() in the 3179 * past. prev == current is still correct but we need to recalculate this_rq 3180 * because prev may have moved to another CPU. 3181 */ 3182 static struct rq *finish_task_switch(struct task_struct *prev) 3183 __releases(rq->lock) 3184 { 3185 struct rq *rq = this_rq(); 3186 struct mm_struct *mm = rq->prev_mm; 3187 long prev_state; 3188 3189 /* 3190 * The previous task will have left us with a preempt_count of 2 3191 * because it left us after: 3192 * 3193 * schedule() 3194 * preempt_disable(); // 1 3195 * __schedule() 3196 * raw_spin_lock_irq(&rq->lock) // 2 3197 * 3198 * Also, see FORK_PREEMPT_COUNT. 3199 */ 3200 if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET, 3201 "corrupted preempt_count: %s/%d/0x%x\n", 3202 current->comm, current->pid, preempt_count())) 3203 preempt_count_set(FORK_PREEMPT_COUNT); 3204 3205 rq->prev_mm = NULL; 3206 3207 /* 3208 * A task struct has one reference for the use as "current". 3209 * If a task dies, then it sets TASK_DEAD in tsk->state and calls 3210 * schedule one last time. The schedule call will never return, and 3211 * the scheduled task must drop that reference. 3212 * 3213 * We must observe prev->state before clearing prev->on_cpu (in 3214 * finish_task), otherwise a concurrent wakeup can get prev 3215 * running on another CPU and we could rave with its RUNNING -> DEAD 3216 * transition, resulting in a double drop. 3217 */ 3218 prev_state = prev->state; 3219 vtime_task_switch(prev); 3220 perf_event_task_sched_in(prev, current); 3221 finish_task(prev); 3222 finish_lock_switch(rq); 3223 finish_arch_post_lock_switch(); 3224 kcov_finish_switch(current); 3225 3226 fire_sched_in_preempt_notifiers(current); 3227 /* 3228 * When switching through a kernel thread, the loop in 3229 * membarrier_{private,global}_expedited() may have observed that 3230 * kernel thread and not issued an IPI. It is therefore possible to 3231 * schedule between user->kernel->user threads without passing though 3232 * switch_mm(). Membarrier requires a barrier after storing to 3233 * rq->curr, before returning to userspace, so provide them here: 3234 * 3235 * - a full memory barrier for {PRIVATE,GLOBAL}_EXPEDITED, implicitly 3236 * provided by mmdrop(), 3237 * - a sync_core for SYNC_CORE. 3238 */ 3239 if (mm) { 3240 membarrier_mm_sync_core_before_usermode(mm); 3241 mmdrop(mm); 3242 } 3243 if (unlikely(prev_state == TASK_DEAD)) { 3244 if (prev->sched_class->task_dead) 3245 prev->sched_class->task_dead(prev); 3246 3247 /* 3248 * Remove function-return probe instances associated with this 3249 * task and put them back on the free list. 3250 */ 3251 kprobe_flush_task(prev); 3252 3253 /* Task is done with its stack. */ 3254 put_task_stack(prev); 3255 3256 put_task_struct_rcu_user(prev); 3257 } 3258 3259 tick_nohz_task_switch(); 3260 return rq; 3261 } 3262 3263 #ifdef CONFIG_SMP 3264 3265 /* rq->lock is NOT held, but preemption is disabled */ 3266 static void __balance_callback(struct rq *rq) 3267 { 3268 struct callback_head *head, *next; 3269 void (*func)(struct rq *rq); 3270 unsigned long flags; 3271 3272 raw_spin_lock_irqsave(&rq->lock, flags); 3273 head = rq->balance_callback; 3274 rq->balance_callback = NULL; 3275 while (head) { 3276 func = (void (*)(struct rq *))head->func; 3277 next = head->next; 3278 head->next = NULL; 3279 head = next; 3280 3281 func(rq); 3282 } 3283 raw_spin_unlock_irqrestore(&rq->lock, flags); 3284 } 3285 3286 static inline void balance_callback(struct rq *rq) 3287 { 3288 if (unlikely(rq->balance_callback)) 3289 __balance_callback(rq); 3290 } 3291 3292 #else 3293 3294 static inline void balance_callback(struct rq *rq) 3295 { 3296 } 3297 3298 #endif 3299 3300 /** 3301 * schedule_tail - first thing a freshly forked thread must call. 3302 * @prev: the thread we just switched away from. 3303 */ 3304 asmlinkage __visible void schedule_tail(struct task_struct *prev) 3305 __releases(rq->lock) 3306 { 3307 struct rq *rq; 3308 3309 /* 3310 * New tasks start with FORK_PREEMPT_COUNT, see there and 3311 * finish_task_switch() for details. 3312 * 3313 * finish_task_switch() will drop rq->lock() and lower preempt_count 3314 * and the preempt_enable() will end up enabling preemption (on 3315 * PREEMPT_COUNT kernels). 3316 */ 3317 3318 rq = finish_task_switch(prev); 3319 balance_callback(rq); 3320 preempt_enable(); 3321 3322 if (current->set_child_tid) 3323 put_user(task_pid_vnr(current), current->set_child_tid); 3324 3325 calculate_sigpending(); 3326 } 3327 3328 /* 3329 * context_switch - switch to the new MM and the new thread's register state. 3330 */ 3331 static __always_inline struct rq * 3332 context_switch(struct rq *rq, struct task_struct *prev, 3333 struct task_struct *next, struct rq_flags *rf) 3334 { 3335 prepare_task_switch(rq, prev, next); 3336 3337 /* 3338 * For paravirt, this is coupled with an exit in switch_to to 3339 * combine the page table reload and the switch backend into 3340 * one hypercall. 3341 */ 3342 arch_start_context_switch(prev); 3343 3344 /* 3345 * kernel -> kernel lazy + transfer active 3346 * user -> kernel lazy + mmgrab() active 3347 * 3348 * kernel -> user switch + mmdrop() active 3349 * user -> user switch 3350 */ 3351 if (!next->mm) { // to kernel 3352 enter_lazy_tlb(prev->active_mm, next); 3353 3354 next->active_mm = prev->active_mm; 3355 if (prev->mm) // from user 3356 mmgrab(prev->active_mm); 3357 else 3358 prev->active_mm = NULL; 3359 } else { // to user 3360 membarrier_switch_mm(rq, prev->active_mm, next->mm); 3361 /* 3362 * sys_membarrier() requires an smp_mb() between setting 3363 * rq->curr / membarrier_switch_mm() and returning to userspace. 3364 * 3365 * The below provides this either through switch_mm(), or in 3366 * case 'prev->active_mm == next->mm' through 3367 * finish_task_switch()'s mmdrop(). 3368 */ 3369 switch_mm_irqs_off(prev->active_mm, next->mm, next); 3370 3371 if (!prev->mm) { // from kernel 3372 /* will mmdrop() in finish_task_switch(). */ 3373 rq->prev_mm = prev->active_mm; 3374 prev->active_mm = NULL; 3375 } 3376 } 3377 3378 rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP); 3379 3380 prepare_lock_switch(rq, next, rf); 3381 3382 /* Here we just switch the register state and the stack. */ 3383 switch_to(prev, next, prev); 3384 barrier(); 3385 3386 return finish_task_switch(prev); 3387 } 3388 3389 /* 3390 * nr_running and nr_context_switches: 3391 * 3392 * externally visible scheduler statistics: current number of runnable 3393 * threads, total number of context switches performed since bootup. 3394 */ 3395 unsigned long nr_running(void) 3396 { 3397 unsigned long i, sum = 0; 3398 3399 for_each_online_cpu(i) 3400 sum += cpu_rq(i)->nr_running; 3401 3402 return sum; 3403 } 3404 3405 /* 3406 * Check if only the current task is running on the CPU. 3407 * 3408 * Caution: this function does not check that the caller has disabled 3409 * preemption, thus the result might have a time-of-check-to-time-of-use 3410 * race. The caller is responsible to use it correctly, for example: 3411 * 3412 * - from a non-preemptible section (of course) 3413 * 3414 * - from a thread that is bound to a single CPU 3415 * 3416 * - in a loop with very short iterations (e.g. a polling loop) 3417 */ 3418 bool single_task_running(void) 3419 { 3420 return raw_rq()->nr_running == 1; 3421 } 3422 EXPORT_SYMBOL(single_task_running); 3423 3424 unsigned long long nr_context_switches(void) 3425 { 3426 int i; 3427 unsigned long long sum = 0; 3428 3429 for_each_possible_cpu(i) 3430 sum += cpu_rq(i)->nr_switches; 3431 3432 return sum; 3433 } 3434 3435 /* 3436 * Consumers of these two interfaces, like for example the cpuidle menu 3437 * governor, are using nonsensical data. Preferring shallow idle state selection 3438 * for a CPU that has IO-wait which might not even end up running the task when 3439 * it does become runnable. 3440 */ 3441 3442 unsigned long nr_iowait_cpu(int cpu) 3443 { 3444 return atomic_read(&cpu_rq(cpu)->nr_iowait); 3445 } 3446 3447 /* 3448 * IO-wait accounting, and how its mostly bollocks (on SMP). 3449 * 3450 * The idea behind IO-wait account is to account the idle time that we could 3451 * have spend running if it were not for IO. That is, if we were to improve the 3452 * storage performance, we'd have a proportional reduction in IO-wait time. 3453 * 3454 * This all works nicely on UP, where, when a task blocks on IO, we account 3455 * idle time as IO-wait, because if the storage were faster, it could've been 3456 * running and we'd not be idle. 3457 * 3458 * This has been extended to SMP, by doing the same for each CPU. This however 3459 * is broken. 3460 * 3461 * Imagine for instance the case where two tasks block on one CPU, only the one 3462 * CPU will have IO-wait accounted, while the other has regular idle. Even 3463 * though, if the storage were faster, both could've ran at the same time, 3464 * utilising both CPUs. 3465 * 3466 * This means, that when looking globally, the current IO-wait accounting on 3467 * SMP is a lower bound, by reason of under accounting. 3468 * 3469 * Worse, since the numbers are provided per CPU, they are sometimes 3470 * interpreted per CPU, and that is nonsensical. A blocked task isn't strictly 3471 * associated with any one particular CPU, it can wake to another CPU than it 3472 * blocked on. This means the per CPU IO-wait number is meaningless. 3473 * 3474 * Task CPU affinities can make all that even more 'interesting'. 3475 */ 3476 3477 unsigned long nr_iowait(void) 3478 { 3479 unsigned long i, sum = 0; 3480 3481 for_each_possible_cpu(i) 3482 sum += nr_iowait_cpu(i); 3483 3484 return sum; 3485 } 3486 3487 #ifdef CONFIG_SMP 3488 3489 /* 3490 * sched_exec - execve() is a valuable balancing opportunity, because at 3491 * this point the task has the smallest effective memory and cache footprint. 3492 */ 3493 void sched_exec(void) 3494 { 3495 struct task_struct *p = current; 3496 unsigned long flags; 3497 int dest_cpu; 3498 3499 raw_spin_lock_irqsave(&p->pi_lock, flags); 3500 dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), SD_BALANCE_EXEC, 0); 3501 if (dest_cpu == smp_processor_id()) 3502 goto unlock; 3503 3504 if (likely(cpu_active(dest_cpu))) { 3505 struct migration_arg arg = { p, dest_cpu }; 3506 3507 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 3508 stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg); 3509 return; 3510 } 3511 unlock: 3512 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 3513 } 3514 3515 #endif 3516 3517 DEFINE_PER_CPU(struct kernel_stat, kstat); 3518 DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat); 3519 3520 EXPORT_PER_CPU_SYMBOL(kstat); 3521 EXPORT_PER_CPU_SYMBOL(kernel_cpustat); 3522 3523 /* 3524 * The function fair_sched_class.update_curr accesses the struct curr 3525 * and its field curr->exec_start; when called from task_sched_runtime(), 3526 * we observe a high rate of cache misses in practice. 3527 * Prefetching this data results in improved performance. 3528 */ 3529 static inline void prefetch_curr_exec_start(struct task_struct *p) 3530 { 3531 #ifdef CONFIG_FAIR_GROUP_SCHED 3532 struct sched_entity *curr = (&p->se)->cfs_rq->curr; 3533 #else 3534 struct sched_entity *curr = (&task_rq(p)->cfs)->curr; 3535 #endif 3536 prefetch(curr); 3537 prefetch(&curr->exec_start); 3538 } 3539 3540 /* 3541 * Return accounted runtime for the task. 3542 * In case the task is currently running, return the runtime plus current's 3543 * pending runtime that have not been accounted yet. 3544 */ 3545 unsigned long long task_sched_runtime(struct task_struct *p) 3546 { 3547 struct rq_flags rf; 3548 struct rq *rq; 3549 u64 ns; 3550 3551 #if defined(CONFIG_64BIT) && defined(CONFIG_SMP) 3552 /* 3553 * 64-bit doesn't need locks to atomically read a 64-bit value. 3554 * So we have a optimization chance when the task's delta_exec is 0. 3555 * Reading ->on_cpu is racy, but this is ok. 3556 * 3557 * If we race with it leaving CPU, we'll take a lock. So we're correct. 3558 * If we race with it entering CPU, unaccounted time is 0. This is 3559 * indistinguishable from the read occurring a few cycles earlier. 3560 * If we see ->on_cpu without ->on_rq, the task is leaving, and has 3561 * been accounted, so we're correct here as well. 3562 */ 3563 if (!p->on_cpu || !task_on_rq_queued(p)) 3564 return p->se.sum_exec_runtime; 3565 #endif 3566 3567 rq = task_rq_lock(p, &rf); 3568 /* 3569 * Must be ->curr _and_ ->on_rq. If dequeued, we would 3570 * project cycles that may never be accounted to this 3571 * thread, breaking clock_gettime(). 3572 */ 3573 if (task_current(rq, p) && task_on_rq_queued(p)) { 3574 prefetch_curr_exec_start(p); 3575 update_rq_clock(rq); 3576 p->sched_class->update_curr(rq); 3577 } 3578 ns = p->se.sum_exec_runtime; 3579 task_rq_unlock(rq, p, &rf); 3580 3581 return ns; 3582 } 3583 3584 DEFINE_PER_CPU(unsigned long, thermal_pressure); 3585 3586 void arch_set_thermal_pressure(struct cpumask *cpus, 3587 unsigned long th_pressure) 3588 { 3589 int cpu; 3590 3591 for_each_cpu(cpu, cpus) 3592 WRITE_ONCE(per_cpu(thermal_pressure, cpu), th_pressure); 3593 } 3594 3595 /* 3596 * This function gets called by the timer code, with HZ frequency. 3597 * We call it with interrupts disabled. 3598 */ 3599 void scheduler_tick(void) 3600 { 3601 int cpu = smp_processor_id(); 3602 struct rq *rq = cpu_rq(cpu); 3603 struct task_struct *curr = rq->curr; 3604 struct rq_flags rf; 3605 unsigned long thermal_pressure; 3606 3607 arch_scale_freq_tick(); 3608 sched_clock_tick(); 3609 3610 rq_lock(rq, &rf); 3611 3612 update_rq_clock(rq); 3613 thermal_pressure = arch_scale_thermal_pressure(cpu_of(rq)); 3614 update_thermal_load_avg(rq_clock_thermal(rq), rq, thermal_pressure); 3615 curr->sched_class->task_tick(rq, curr, 0); 3616 calc_global_load_tick(rq); 3617 psi_task_tick(rq); 3618 3619 rq_unlock(rq, &rf); 3620 3621 perf_event_task_tick(); 3622 3623 #ifdef CONFIG_SMP 3624 rq->idle_balance = idle_cpu(cpu); 3625 trigger_load_balance(rq); 3626 #endif 3627 } 3628 3629 #ifdef CONFIG_NO_HZ_FULL 3630 3631 struct tick_work { 3632 int cpu; 3633 atomic_t state; 3634 struct delayed_work work; 3635 }; 3636 /* Values for ->state, see diagram below. */ 3637 #define TICK_SCHED_REMOTE_OFFLINE 0 3638 #define TICK_SCHED_REMOTE_OFFLINING 1 3639 #define TICK_SCHED_REMOTE_RUNNING 2 3640 3641 /* 3642 * State diagram for ->state: 3643 * 3644 * 3645 * TICK_SCHED_REMOTE_OFFLINE 3646 * | ^ 3647 * | | 3648 * | | sched_tick_remote() 3649 * | | 3650 * | | 3651 * +--TICK_SCHED_REMOTE_OFFLINING 3652 * | ^ 3653 * | | 3654 * sched_tick_start() | | sched_tick_stop() 3655 * | | 3656 * V | 3657 * TICK_SCHED_REMOTE_RUNNING 3658 * 3659 * 3660 * Other transitions get WARN_ON_ONCE(), except that sched_tick_remote() 3661 * and sched_tick_start() are happy to leave the state in RUNNING. 3662 */ 3663 3664 static struct tick_work __percpu *tick_work_cpu; 3665 3666 static void sched_tick_remote(struct work_struct *work) 3667 { 3668 struct delayed_work *dwork = to_delayed_work(work); 3669 struct tick_work *twork = container_of(dwork, struct tick_work, work); 3670 int cpu = twork->cpu; 3671 struct rq *rq = cpu_rq(cpu); 3672 struct task_struct *curr; 3673 struct rq_flags rf; 3674 u64 delta; 3675 int os; 3676 3677 /* 3678 * Handle the tick only if it appears the remote CPU is running in full 3679 * dynticks mode. The check is racy by nature, but missing a tick or 3680 * having one too much is no big deal because the scheduler tick updates 3681 * statistics and checks timeslices in a time-independent way, regardless 3682 * of when exactly it is running. 3683 */ 3684 if (!tick_nohz_tick_stopped_cpu(cpu)) 3685 goto out_requeue; 3686 3687 rq_lock_irq(rq, &rf); 3688 curr = rq->curr; 3689 if (cpu_is_offline(cpu)) 3690 goto out_unlock; 3691 3692 update_rq_clock(rq); 3693 3694 if (!is_idle_task(curr)) { 3695 /* 3696 * Make sure the next tick runs within a reasonable 3697 * amount of time. 3698 */ 3699 delta = rq_clock_task(rq) - curr->se.exec_start; 3700 WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3); 3701 } 3702 curr->sched_class->task_tick(rq, curr, 0); 3703 3704 calc_load_nohz_remote(rq); 3705 out_unlock: 3706 rq_unlock_irq(rq, &rf); 3707 out_requeue: 3708 3709 /* 3710 * Run the remote tick once per second (1Hz). This arbitrary 3711 * frequency is large enough to avoid overload but short enough 3712 * to keep scheduler internal stats reasonably up to date. But 3713 * first update state to reflect hotplug activity if required. 3714 */ 3715 os = atomic_fetch_add_unless(&twork->state, -1, TICK_SCHED_REMOTE_RUNNING); 3716 WARN_ON_ONCE(os == TICK_SCHED_REMOTE_OFFLINE); 3717 if (os == TICK_SCHED_REMOTE_RUNNING) 3718 queue_delayed_work(system_unbound_wq, dwork, HZ); 3719 } 3720 3721 static void sched_tick_start(int cpu) 3722 { 3723 int os; 3724 struct tick_work *twork; 3725 3726 if (housekeeping_cpu(cpu, HK_FLAG_TICK)) 3727 return; 3728 3729 WARN_ON_ONCE(!tick_work_cpu); 3730 3731 twork = per_cpu_ptr(tick_work_cpu, cpu); 3732 os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_RUNNING); 3733 WARN_ON_ONCE(os == TICK_SCHED_REMOTE_RUNNING); 3734 if (os == TICK_SCHED_REMOTE_OFFLINE) { 3735 twork->cpu = cpu; 3736 INIT_DELAYED_WORK(&twork->work, sched_tick_remote); 3737 queue_delayed_work(system_unbound_wq, &twork->work, HZ); 3738 } 3739 } 3740 3741 #ifdef CONFIG_HOTPLUG_CPU 3742 static void sched_tick_stop(int cpu) 3743 { 3744 struct tick_work *twork; 3745 int os; 3746 3747 if (housekeeping_cpu(cpu, HK_FLAG_TICK)) 3748 return; 3749 3750 WARN_ON_ONCE(!tick_work_cpu); 3751 3752 twork = per_cpu_ptr(tick_work_cpu, cpu); 3753 /* There cannot be competing actions, but don't rely on stop-machine. */ 3754 os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_OFFLINING); 3755 WARN_ON_ONCE(os != TICK_SCHED_REMOTE_RUNNING); 3756 /* Don't cancel, as this would mess up the state machine. */ 3757 } 3758 #endif /* CONFIG_HOTPLUG_CPU */ 3759 3760 int __init sched_tick_offload_init(void) 3761 { 3762 tick_work_cpu = alloc_percpu(struct tick_work); 3763 BUG_ON(!tick_work_cpu); 3764 return 0; 3765 } 3766 3767 #else /* !CONFIG_NO_HZ_FULL */ 3768 static inline void sched_tick_start(int cpu) { } 3769 static inline void sched_tick_stop(int cpu) { } 3770 #endif 3771 3772 #if defined(CONFIG_PREEMPTION) && (defined(CONFIG_DEBUG_PREEMPT) || \ 3773 defined(CONFIG_TRACE_PREEMPT_TOGGLE)) 3774 /* 3775 * If the value passed in is equal to the current preempt count 3776 * then we just disabled preemption. Start timing the latency. 3777 */ 3778 static inline void preempt_latency_start(int val) 3779 { 3780 if (preempt_count() == val) { 3781 unsigned long ip = get_lock_parent_ip(); 3782 #ifdef CONFIG_DEBUG_PREEMPT 3783 current->preempt_disable_ip = ip; 3784 #endif 3785 trace_preempt_off(CALLER_ADDR0, ip); 3786 } 3787 } 3788 3789 void preempt_count_add(int val) 3790 { 3791 #ifdef CONFIG_DEBUG_PREEMPT 3792 /* 3793 * Underflow? 3794 */ 3795 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0))) 3796 return; 3797 #endif 3798 __preempt_count_add(val); 3799 #ifdef CONFIG_DEBUG_PREEMPT 3800 /* 3801 * Spinlock count overflowing soon? 3802 */ 3803 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >= 3804 PREEMPT_MASK - 10); 3805 #endif 3806 preempt_latency_start(val); 3807 } 3808 EXPORT_SYMBOL(preempt_count_add); 3809 NOKPROBE_SYMBOL(preempt_count_add); 3810 3811 /* 3812 * If the value passed in equals to the current preempt count 3813 * then we just enabled preemption. Stop timing the latency. 3814 */ 3815 static inline void preempt_latency_stop(int val) 3816 { 3817 if (preempt_count() == val) 3818 trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip()); 3819 } 3820 3821 void preempt_count_sub(int val) 3822 { 3823 #ifdef CONFIG_DEBUG_PREEMPT 3824 /* 3825 * Underflow? 3826 */ 3827 if (DEBUG_LOCKS_WARN_ON(val > preempt_count())) 3828 return; 3829 /* 3830 * Is the spinlock portion underflowing? 3831 */ 3832 if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) && 3833 !(preempt_count() & PREEMPT_MASK))) 3834 return; 3835 #endif 3836 3837 preempt_latency_stop(val); 3838 __preempt_count_sub(val); 3839 } 3840 EXPORT_SYMBOL(preempt_count_sub); 3841 NOKPROBE_SYMBOL(preempt_count_sub); 3842 3843 #else 3844 static inline void preempt_latency_start(int val) { } 3845 static inline void preempt_latency_stop(int val) { } 3846 #endif 3847 3848 static inline unsigned long get_preempt_disable_ip(struct task_struct *p) 3849 { 3850 #ifdef CONFIG_DEBUG_PREEMPT 3851 return p->preempt_disable_ip; 3852 #else 3853 return 0; 3854 #endif 3855 } 3856 3857 /* 3858 * Print scheduling while atomic bug: 3859 */ 3860 static noinline void __schedule_bug(struct task_struct *prev) 3861 { 3862 /* Save this before calling printk(), since that will clobber it */ 3863 unsigned long preempt_disable_ip = get_preempt_disable_ip(current); 3864 3865 if (oops_in_progress) 3866 return; 3867 3868 printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n", 3869 prev->comm, prev->pid, preempt_count()); 3870 3871 debug_show_held_locks(prev); 3872 print_modules(); 3873 if (irqs_disabled()) 3874 print_irqtrace_events(prev); 3875 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT) 3876 && in_atomic_preempt_off()) { 3877 pr_err("Preemption disabled at:"); 3878 print_ip_sym(preempt_disable_ip); 3879 pr_cont("\n"); 3880 } 3881 if (panic_on_warn) 3882 panic("scheduling while atomic\n"); 3883 3884 dump_stack(); 3885 add_taint(TAINT_WARN, LOCKDEP_STILL_OK); 3886 } 3887 3888 /* 3889 * Various schedule()-time debugging checks and statistics: 3890 */ 3891 static inline void schedule_debug(struct task_struct *prev, bool preempt) 3892 { 3893 #ifdef CONFIG_SCHED_STACK_END_CHECK 3894 if (task_stack_end_corrupted(prev)) 3895 panic("corrupted stack end detected inside scheduler\n"); 3896 #endif 3897 3898 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP 3899 if (!preempt && prev->state && prev->non_block_count) { 3900 printk(KERN_ERR "BUG: scheduling in a non-blocking section: %s/%d/%i\n", 3901 prev->comm, prev->pid, prev->non_block_count); 3902 dump_stack(); 3903 add_taint(TAINT_WARN, LOCKDEP_STILL_OK); 3904 } 3905 #endif 3906 3907 if (unlikely(in_atomic_preempt_off())) { 3908 __schedule_bug(prev); 3909 preempt_count_set(PREEMPT_DISABLED); 3910 } 3911 rcu_sleep_check(); 3912 3913 profile_hit(SCHED_PROFILING, __builtin_return_address(0)); 3914 3915 schedstat_inc(this_rq()->sched_count); 3916 } 3917 3918 /* 3919 * Pick up the highest-prio task: 3920 */ 3921 static inline struct task_struct * 3922 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) 3923 { 3924 const struct sched_class *class; 3925 struct task_struct *p; 3926 3927 /* 3928 * Optimization: we know that if all tasks are in the fair class we can 3929 * call that function directly, but only if the @prev task wasn't of a 3930 * higher scheduling class, because otherwise those loose the 3931 * opportunity to pull in more work from other CPUs. 3932 */ 3933 if (likely((prev->sched_class == &idle_sched_class || 3934 prev->sched_class == &fair_sched_class) && 3935 rq->nr_running == rq->cfs.h_nr_running)) { 3936 3937 p = pick_next_task_fair(rq, prev, rf); 3938 if (unlikely(p == RETRY_TASK)) 3939 goto restart; 3940 3941 /* Assumes fair_sched_class->next == idle_sched_class */ 3942 if (!p) { 3943 put_prev_task(rq, prev); 3944 p = pick_next_task_idle(rq); 3945 } 3946 3947 return p; 3948 } 3949 3950 restart: 3951 #ifdef CONFIG_SMP 3952 /* 3953 * We must do the balancing pass before put_next_task(), such 3954 * that when we release the rq->lock the task is in the same 3955 * state as before we took rq->lock. 3956 * 3957 * We can terminate the balance pass as soon as we know there is 3958 * a runnable task of @class priority or higher. 3959 */ 3960 for_class_range(class, prev->sched_class, &idle_sched_class) { 3961 if (class->balance(rq, prev, rf)) 3962 break; 3963 } 3964 #endif 3965 3966 put_prev_task(rq, prev); 3967 3968 for_each_class(class) { 3969 p = class->pick_next_task(rq); 3970 if (p) 3971 return p; 3972 } 3973 3974 /* The idle class should always have a runnable task: */ 3975 BUG(); 3976 } 3977 3978 /* 3979 * __schedule() is the main scheduler function. 3980 * 3981 * The main means of driving the scheduler and thus entering this function are: 3982 * 3983 * 1. Explicit blocking: mutex, semaphore, waitqueue, etc. 3984 * 3985 * 2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return 3986 * paths. For example, see arch/x86/entry_64.S. 3987 * 3988 * To drive preemption between tasks, the scheduler sets the flag in timer 3989 * interrupt handler scheduler_tick(). 3990 * 3991 * 3. Wakeups don't really cause entry into schedule(). They add a 3992 * task to the run-queue and that's it. 3993 * 3994 * Now, if the new task added to the run-queue preempts the current 3995 * task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets 3996 * called on the nearest possible occasion: 3997 * 3998 * - If the kernel is preemptible (CONFIG_PREEMPTION=y): 3999 * 4000 * - in syscall or exception context, at the next outmost 4001 * preempt_enable(). (this might be as soon as the wake_up()'s 4002 * spin_unlock()!) 4003 * 4004 * - in IRQ context, return from interrupt-handler to 4005 * preemptible context 4006 * 4007 * - If the kernel is not preemptible (CONFIG_PREEMPTION is not set) 4008 * then at the next: 4009 * 4010 * - cond_resched() call 4011 * - explicit schedule() call 4012 * - return from syscall or exception to user-space 4013 * - return from interrupt-handler to user-space 4014 * 4015 * WARNING: must be called with preemption disabled! 4016 */ 4017 static void __sched notrace __schedule(bool preempt) 4018 { 4019 struct task_struct *prev, *next; 4020 unsigned long *switch_count; 4021 struct rq_flags rf; 4022 struct rq *rq; 4023 int cpu; 4024 4025 cpu = smp_processor_id(); 4026 rq = cpu_rq(cpu); 4027 prev = rq->curr; 4028 4029 schedule_debug(prev, preempt); 4030 4031 if (sched_feat(HRTICK)) 4032 hrtick_clear(rq); 4033 4034 local_irq_disable(); 4035 rcu_note_context_switch(preempt); 4036 4037 /* 4038 * Make sure that signal_pending_state()->signal_pending() below 4039 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE) 4040 * done by the caller to avoid the race with signal_wake_up(). 4041 * 4042 * The membarrier system call requires a full memory barrier 4043 * after coming from user-space, before storing to rq->curr. 4044 */ 4045 rq_lock(rq, &rf); 4046 smp_mb__after_spinlock(); 4047 4048 /* Promote REQ to ACT */ 4049 rq->clock_update_flags <<= 1; 4050 update_rq_clock(rq); 4051 4052 switch_count = &prev->nivcsw; 4053 if (!preempt && prev->state) { 4054 if (signal_pending_state(prev->state, prev)) { 4055 prev->state = TASK_RUNNING; 4056 } else { 4057 deactivate_task(rq, prev, DEQUEUE_SLEEP | DEQUEUE_NOCLOCK); 4058 4059 if (prev->in_iowait) { 4060 atomic_inc(&rq->nr_iowait); 4061 delayacct_blkio_start(); 4062 } 4063 } 4064 switch_count = &prev->nvcsw; 4065 } 4066 4067 next = pick_next_task(rq, prev, &rf); 4068 clear_tsk_need_resched(prev); 4069 clear_preempt_need_resched(); 4070 4071 if (likely(prev != next)) { 4072 rq->nr_switches++; 4073 /* 4074 * RCU users of rcu_dereference(rq->curr) may not see 4075 * changes to task_struct made by pick_next_task(). 4076 */ 4077 RCU_INIT_POINTER(rq->curr, next); 4078 /* 4079 * The membarrier system call requires each architecture 4080 * to have a full memory barrier after updating 4081 * rq->curr, before returning to user-space. 4082 * 4083 * Here are the schemes providing that barrier on the 4084 * various architectures: 4085 * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC. 4086 * switch_mm() rely on membarrier_arch_switch_mm() on PowerPC. 4087 * - finish_lock_switch() for weakly-ordered 4088 * architectures where spin_unlock is a full barrier, 4089 * - switch_to() for arm64 (weakly-ordered, spin_unlock 4090 * is a RELEASE barrier), 4091 */ 4092 ++*switch_count; 4093 4094 trace_sched_switch(preempt, prev, next); 4095 4096 /* Also unlocks the rq: */ 4097 rq = context_switch(rq, prev, next, &rf); 4098 } else { 4099 rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP); 4100 rq_unlock_irq(rq, &rf); 4101 } 4102 4103 balance_callback(rq); 4104 } 4105 4106 void __noreturn do_task_dead(void) 4107 { 4108 /* Causes final put_task_struct in finish_task_switch(): */ 4109 set_special_state(TASK_DEAD); 4110 4111 /* Tell freezer to ignore us: */ 4112 current->flags |= PF_NOFREEZE; 4113 4114 __schedule(false); 4115 BUG(); 4116 4117 /* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */ 4118 for (;;) 4119 cpu_relax(); 4120 } 4121 4122 static inline void sched_submit_work(struct task_struct *tsk) 4123 { 4124 if (!tsk->state) 4125 return; 4126 4127 /* 4128 * If a worker went to sleep, notify and ask workqueue whether 4129 * it wants to wake up a task to maintain concurrency. 4130 * As this function is called inside the schedule() context, 4131 * we disable preemption to avoid it calling schedule() again 4132 * in the possible wakeup of a kworker. 4133 */ 4134 if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER)) { 4135 preempt_disable(); 4136 if (tsk->flags & PF_WQ_WORKER) 4137 wq_worker_sleeping(tsk); 4138 else 4139 io_wq_worker_sleeping(tsk); 4140 preempt_enable_no_resched(); 4141 } 4142 4143 if (tsk_is_pi_blocked(tsk)) 4144 return; 4145 4146 /* 4147 * If we are going to sleep and we have plugged IO queued, 4148 * make sure to submit it to avoid deadlocks. 4149 */ 4150 if (blk_needs_flush_plug(tsk)) 4151 blk_schedule_flush_plug(tsk); 4152 } 4153 4154 static void sched_update_worker(struct task_struct *tsk) 4155 { 4156 if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER)) { 4157 if (tsk->flags & PF_WQ_WORKER) 4158 wq_worker_running(tsk); 4159 else 4160 io_wq_worker_running(tsk); 4161 } 4162 } 4163 4164 asmlinkage __visible void __sched schedule(void) 4165 { 4166 struct task_struct *tsk = current; 4167 4168 sched_submit_work(tsk); 4169 do { 4170 preempt_disable(); 4171 __schedule(false); 4172 sched_preempt_enable_no_resched(); 4173 } while (need_resched()); 4174 sched_update_worker(tsk); 4175 } 4176 EXPORT_SYMBOL(schedule); 4177 4178 /* 4179 * synchronize_rcu_tasks() makes sure that no task is stuck in preempted 4180 * state (have scheduled out non-voluntarily) by making sure that all 4181 * tasks have either left the run queue or have gone into user space. 4182 * As idle tasks do not do either, they must not ever be preempted 4183 * (schedule out non-voluntarily). 4184 * 4185 * schedule_idle() is similar to schedule_preempt_disable() except that it 4186 * never enables preemption because it does not call sched_submit_work(). 4187 */ 4188 void __sched schedule_idle(void) 4189 { 4190 /* 4191 * As this skips calling sched_submit_work(), which the idle task does 4192 * regardless because that function is a nop when the task is in a 4193 * TASK_RUNNING state, make sure this isn't used someplace that the 4194 * current task can be in any other state. Note, idle is always in the 4195 * TASK_RUNNING state. 4196 */ 4197 WARN_ON_ONCE(current->state); 4198 do { 4199 __schedule(false); 4200 } while (need_resched()); 4201 } 4202 4203 #ifdef CONFIG_CONTEXT_TRACKING 4204 asmlinkage __visible void __sched schedule_user(void) 4205 { 4206 /* 4207 * If we come here after a random call to set_need_resched(), 4208 * or we have been woken up remotely but the IPI has not yet arrived, 4209 * we haven't yet exited the RCU idle mode. Do it here manually until 4210 * we find a better solution. 4211 * 4212 * NB: There are buggy callers of this function. Ideally we 4213 * should warn if prev_state != CONTEXT_USER, but that will trigger 4214 * too frequently to make sense yet. 4215 */ 4216 enum ctx_state prev_state = exception_enter(); 4217 schedule(); 4218 exception_exit(prev_state); 4219 } 4220 #endif 4221 4222 /** 4223 * schedule_preempt_disabled - called with preemption disabled 4224 * 4225 * Returns with preemption disabled. Note: preempt_count must be 1 4226 */ 4227 void __sched schedule_preempt_disabled(void) 4228 { 4229 sched_preempt_enable_no_resched(); 4230 schedule(); 4231 preempt_disable(); 4232 } 4233 4234 static void __sched notrace preempt_schedule_common(void) 4235 { 4236 do { 4237 /* 4238 * Because the function tracer can trace preempt_count_sub() 4239 * and it also uses preempt_enable/disable_notrace(), if 4240 * NEED_RESCHED is set, the preempt_enable_notrace() called 4241 * by the function tracer will call this function again and 4242 * cause infinite recursion. 4243 * 4244 * Preemption must be disabled here before the function 4245 * tracer can trace. Break up preempt_disable() into two 4246 * calls. One to disable preemption without fear of being 4247 * traced. The other to still record the preemption latency, 4248 * which can also be traced by the function tracer. 4249 */ 4250 preempt_disable_notrace(); 4251 preempt_latency_start(1); 4252 __schedule(true); 4253 preempt_latency_stop(1); 4254 preempt_enable_no_resched_notrace(); 4255 4256 /* 4257 * Check again in case we missed a preemption opportunity 4258 * between schedule and now. 4259 */ 4260 } while (need_resched()); 4261 } 4262 4263 #ifdef CONFIG_PREEMPTION 4264 /* 4265 * This is the entry point to schedule() from in-kernel preemption 4266 * off of preempt_enable. 4267 */ 4268 asmlinkage __visible void __sched notrace preempt_schedule(void) 4269 { 4270 /* 4271 * If there is a non-zero preempt_count or interrupts are disabled, 4272 * we do not want to preempt the current task. Just return.. 4273 */ 4274 if (likely(!preemptible())) 4275 return; 4276 4277 preempt_schedule_common(); 4278 } 4279 NOKPROBE_SYMBOL(preempt_schedule); 4280 EXPORT_SYMBOL(preempt_schedule); 4281 4282 /** 4283 * preempt_schedule_notrace - preempt_schedule called by tracing 4284 * 4285 * The tracing infrastructure uses preempt_enable_notrace to prevent 4286 * recursion and tracing preempt enabling caused by the tracing 4287 * infrastructure itself. But as tracing can happen in areas coming 4288 * from userspace or just about to enter userspace, a preempt enable 4289 * can occur before user_exit() is called. This will cause the scheduler 4290 * to be called when the system is still in usermode. 4291 * 4292 * To prevent this, the preempt_enable_notrace will use this function 4293 * instead of preempt_schedule() to exit user context if needed before 4294 * calling the scheduler. 4295 */ 4296 asmlinkage __visible void __sched notrace preempt_schedule_notrace(void) 4297 { 4298 enum ctx_state prev_ctx; 4299 4300 if (likely(!preemptible())) 4301 return; 4302 4303 do { 4304 /* 4305 * Because the function tracer can trace preempt_count_sub() 4306 * and it also uses preempt_enable/disable_notrace(), if 4307 * NEED_RESCHED is set, the preempt_enable_notrace() called 4308 * by the function tracer will call this function again and 4309 * cause infinite recursion. 4310 * 4311 * Preemption must be disabled here before the function 4312 * tracer can trace. Break up preempt_disable() into two 4313 * calls. One to disable preemption without fear of being 4314 * traced. The other to still record the preemption latency, 4315 * which can also be traced by the function tracer. 4316 */ 4317 preempt_disable_notrace(); 4318 preempt_latency_start(1); 4319 /* 4320 * Needs preempt disabled in case user_exit() is traced 4321 * and the tracer calls preempt_enable_notrace() causing 4322 * an infinite recursion. 4323 */ 4324 prev_ctx = exception_enter(); 4325 __schedule(true); 4326 exception_exit(prev_ctx); 4327 4328 preempt_latency_stop(1); 4329 preempt_enable_no_resched_notrace(); 4330 } while (need_resched()); 4331 } 4332 EXPORT_SYMBOL_GPL(preempt_schedule_notrace); 4333 4334 #endif /* CONFIG_PREEMPTION */ 4335 4336 /* 4337 * This is the entry point to schedule() from kernel preemption 4338 * off of irq context. 4339 * Note, that this is called and return with irqs disabled. This will 4340 * protect us against recursive calling from irq. 4341 */ 4342 asmlinkage __visible void __sched preempt_schedule_irq(void) 4343 { 4344 enum ctx_state prev_state; 4345 4346 /* Catch callers which need to be fixed */ 4347 BUG_ON(preempt_count() || !irqs_disabled()); 4348 4349 prev_state = exception_enter(); 4350 4351 do { 4352 preempt_disable(); 4353 local_irq_enable(); 4354 __schedule(true); 4355 local_irq_disable(); 4356 sched_preempt_enable_no_resched(); 4357 } while (need_resched()); 4358 4359 exception_exit(prev_state); 4360 } 4361 4362 int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags, 4363 void *key) 4364 { 4365 return try_to_wake_up(curr->private, mode, wake_flags); 4366 } 4367 EXPORT_SYMBOL(default_wake_function); 4368 4369 #ifdef CONFIG_RT_MUTEXES 4370 4371 static inline int __rt_effective_prio(struct task_struct *pi_task, int prio) 4372 { 4373 if (pi_task) 4374 prio = min(prio, pi_task->prio); 4375 4376 return prio; 4377 } 4378 4379 static inline int rt_effective_prio(struct task_struct *p, int prio) 4380 { 4381 struct task_struct *pi_task = rt_mutex_get_top_task(p); 4382 4383 return __rt_effective_prio(pi_task, prio); 4384 } 4385 4386 /* 4387 * rt_mutex_setprio - set the current priority of a task 4388 * @p: task to boost 4389 * @pi_task: donor task 4390 * 4391 * This function changes the 'effective' priority of a task. It does 4392 * not touch ->normal_prio like __setscheduler(). 4393 * 4394 * Used by the rt_mutex code to implement priority inheritance 4395 * logic. Call site only calls if the priority of the task changed. 4396 */ 4397 void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task) 4398 { 4399 int prio, oldprio, queued, running, queue_flag = 4400 DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; 4401 const struct sched_class *prev_class; 4402 struct rq_flags rf; 4403 struct rq *rq; 4404 4405 /* XXX used to be waiter->prio, not waiter->task->prio */ 4406 prio = __rt_effective_prio(pi_task, p->normal_prio); 4407 4408 /* 4409 * If nothing changed; bail early. 4410 */ 4411 if (p->pi_top_task == pi_task && prio == p->prio && !dl_prio(prio)) 4412 return; 4413 4414 rq = __task_rq_lock(p, &rf); 4415 update_rq_clock(rq); 4416 /* 4417 * Set under pi_lock && rq->lock, such that the value can be used under 4418 * either lock. 4419 * 4420 * Note that there is loads of tricky to make this pointer cache work 4421 * right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to 4422 * ensure a task is de-boosted (pi_task is set to NULL) before the 4423 * task is allowed to run again (and can exit). This ensures the pointer 4424 * points to a blocked task -- which guaratees the task is present. 4425 */ 4426 p->pi_top_task = pi_task; 4427 4428 /* 4429 * For FIFO/RR we only need to set prio, if that matches we're done. 4430 */ 4431 if (prio == p->prio && !dl_prio(prio)) 4432 goto out_unlock; 4433 4434 /* 4435 * Idle task boosting is a nono in general. There is one 4436 * exception, when PREEMPT_RT and NOHZ is active: 4437 * 4438 * The idle task calls get_next_timer_interrupt() and holds 4439 * the timer wheel base->lock on the CPU and another CPU wants 4440 * to access the timer (probably to cancel it). We can safely 4441 * ignore the boosting request, as the idle CPU runs this code 4442 * with interrupts disabled and will complete the lock 4443 * protected section without being interrupted. So there is no 4444 * real need to boost. 4445 */ 4446 if (unlikely(p == rq->idle)) { 4447 WARN_ON(p != rq->curr); 4448 WARN_ON(p->pi_blocked_on); 4449 goto out_unlock; 4450 } 4451 4452 trace_sched_pi_setprio(p, pi_task); 4453 oldprio = p->prio; 4454 4455 if (oldprio == prio) 4456 queue_flag &= ~DEQUEUE_MOVE; 4457 4458 prev_class = p->sched_class; 4459 queued = task_on_rq_queued(p); 4460 running = task_current(rq, p); 4461 if (queued) 4462 dequeue_task(rq, p, queue_flag); 4463 if (running) 4464 put_prev_task(rq, p); 4465 4466 /* 4467 * Boosting condition are: 4468 * 1. -rt task is running and holds mutex A 4469 * --> -dl task blocks on mutex A 4470 * 4471 * 2. -dl task is running and holds mutex A 4472 * --> -dl task blocks on mutex A and could preempt the 4473 * running task 4474 */ 4475 if (dl_prio(prio)) { 4476 if (!dl_prio(p->normal_prio) || 4477 (pi_task && dl_entity_preempt(&pi_task->dl, &p->dl))) { 4478 p->dl.dl_boosted = 1; 4479 queue_flag |= ENQUEUE_REPLENISH; 4480 } else 4481 p->dl.dl_boosted = 0; 4482 p->sched_class = &dl_sched_class; 4483 } else if (rt_prio(prio)) { 4484 if (dl_prio(oldprio)) 4485 p->dl.dl_boosted = 0; 4486 if (oldprio < prio) 4487 queue_flag |= ENQUEUE_HEAD; 4488 p->sched_class = &rt_sched_class; 4489 } else { 4490 if (dl_prio(oldprio)) 4491 p->dl.dl_boosted = 0; 4492 if (rt_prio(oldprio)) 4493 p->rt.timeout = 0; 4494 p->sched_class = &fair_sched_class; 4495 } 4496 4497 p->prio = prio; 4498 4499 if (queued) 4500 enqueue_task(rq, p, queue_flag); 4501 if (running) 4502 set_next_task(rq, p); 4503 4504 check_class_changed(rq, p, prev_class, oldprio); 4505 out_unlock: 4506 /* Avoid rq from going away on us: */ 4507 preempt_disable(); 4508 __task_rq_unlock(rq, &rf); 4509 4510 balance_callback(rq); 4511 preempt_enable(); 4512 } 4513 #else 4514 static inline int rt_effective_prio(struct task_struct *p, int prio) 4515 { 4516 return prio; 4517 } 4518 #endif 4519 4520 void set_user_nice(struct task_struct *p, long nice) 4521 { 4522 bool queued, running; 4523 int old_prio; 4524 struct rq_flags rf; 4525 struct rq *rq; 4526 4527 if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE) 4528 return; 4529 /* 4530 * We have to be careful, if called from sys_setpriority(), 4531 * the task might be in the middle of scheduling on another CPU. 4532 */ 4533 rq = task_rq_lock(p, &rf); 4534 update_rq_clock(rq); 4535 4536 /* 4537 * The RT priorities are set via sched_setscheduler(), but we still 4538 * allow the 'normal' nice value to be set - but as expected 4539 * it wont have any effect on scheduling until the task is 4540 * SCHED_DEADLINE, SCHED_FIFO or SCHED_RR: 4541 */ 4542 if (task_has_dl_policy(p) || task_has_rt_policy(p)) { 4543 p->static_prio = NICE_TO_PRIO(nice); 4544 goto out_unlock; 4545 } 4546 queued = task_on_rq_queued(p); 4547 running = task_current(rq, p); 4548 if (queued) 4549 dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK); 4550 if (running) 4551 put_prev_task(rq, p); 4552 4553 p->static_prio = NICE_TO_PRIO(nice); 4554 set_load_weight(p, true); 4555 old_prio = p->prio; 4556 p->prio = effective_prio(p); 4557 4558 if (queued) 4559 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); 4560 if (running) 4561 set_next_task(rq, p); 4562 4563 /* 4564 * If the task increased its priority or is running and 4565 * lowered its priority, then reschedule its CPU: 4566 */ 4567 p->sched_class->prio_changed(rq, p, old_prio); 4568 4569 out_unlock: 4570 task_rq_unlock(rq, p, &rf); 4571 } 4572 EXPORT_SYMBOL(set_user_nice); 4573 4574 /* 4575 * can_nice - check if a task can reduce its nice value 4576 * @p: task 4577 * @nice: nice value 4578 */ 4579 int can_nice(const struct task_struct *p, const int nice) 4580 { 4581 /* Convert nice value [19,-20] to rlimit style value [1,40]: */ 4582 int nice_rlim = nice_to_rlimit(nice); 4583 4584 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) || 4585 capable(CAP_SYS_NICE)); 4586 } 4587 4588 #ifdef __ARCH_WANT_SYS_NICE 4589 4590 /* 4591 * sys_nice - change the priority of the current process. 4592 * @increment: priority increment 4593 * 4594 * sys_setpriority is a more generic, but much slower function that 4595 * does similar things. 4596 */ 4597 SYSCALL_DEFINE1(nice, int, increment) 4598 { 4599 long nice, retval; 4600 4601 /* 4602 * Setpriority might change our priority at the same moment. 4603 * We don't have to worry. Conceptually one call occurs first 4604 * and we have a single winner. 4605 */ 4606 increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH); 4607 nice = task_nice(current) + increment; 4608 4609 nice = clamp_val(nice, MIN_NICE, MAX_NICE); 4610 if (increment < 0 && !can_nice(current, nice)) 4611 return -EPERM; 4612 4613 retval = security_task_setnice(current, nice); 4614 if (retval) 4615 return retval; 4616 4617 set_user_nice(current, nice); 4618 return 0; 4619 } 4620 4621 #endif 4622 4623 /** 4624 * task_prio - return the priority value of a given task. 4625 * @p: the task in question. 4626 * 4627 * Return: The priority value as seen by users in /proc. 4628 * RT tasks are offset by -200. Normal tasks are centered 4629 * around 0, value goes from -16 to +15. 4630 */ 4631 int task_prio(const struct task_struct *p) 4632 { 4633 return p->prio - MAX_RT_PRIO; 4634 } 4635 4636 /** 4637 * idle_cpu - is a given CPU idle currently? 4638 * @cpu: the processor in question. 4639 * 4640 * Return: 1 if the CPU is currently idle. 0 otherwise. 4641 */ 4642 int idle_cpu(int cpu) 4643 { 4644 struct rq *rq = cpu_rq(cpu); 4645 4646 if (rq->curr != rq->idle) 4647 return 0; 4648 4649 if (rq->nr_running) 4650 return 0; 4651 4652 #ifdef CONFIG_SMP 4653 if (!llist_empty(&rq->wake_list)) 4654 return 0; 4655 #endif 4656 4657 return 1; 4658 } 4659 4660 /** 4661 * available_idle_cpu - is a given CPU idle for enqueuing work. 4662 * @cpu: the CPU in question. 4663 * 4664 * Return: 1 if the CPU is currently idle. 0 otherwise. 4665 */ 4666 int available_idle_cpu(int cpu) 4667 { 4668 if (!idle_cpu(cpu)) 4669 return 0; 4670 4671 if (vcpu_is_preempted(cpu)) 4672 return 0; 4673 4674 return 1; 4675 } 4676 4677 /** 4678 * idle_task - return the idle task for a given CPU. 4679 * @cpu: the processor in question. 4680 * 4681 * Return: The idle task for the CPU @cpu. 4682 */ 4683 struct task_struct *idle_task(int cpu) 4684 { 4685 return cpu_rq(cpu)->idle; 4686 } 4687 4688 /** 4689 * find_process_by_pid - find a process with a matching PID value. 4690 * @pid: the pid in question. 4691 * 4692 * The task of @pid, if found. %NULL otherwise. 4693 */ 4694 static struct task_struct *find_process_by_pid(pid_t pid) 4695 { 4696 return pid ? find_task_by_vpid(pid) : current; 4697 } 4698 4699 /* 4700 * sched_setparam() passes in -1 for its policy, to let the functions 4701 * it calls know not to change it. 4702 */ 4703 #define SETPARAM_POLICY -1 4704 4705 static void __setscheduler_params(struct task_struct *p, 4706 const struct sched_attr *attr) 4707 { 4708 int policy = attr->sched_policy; 4709 4710 if (policy == SETPARAM_POLICY) 4711 policy = p->policy; 4712 4713 p->policy = policy; 4714 4715 if (dl_policy(policy)) 4716 __setparam_dl(p, attr); 4717 else if (fair_policy(policy)) 4718 p->static_prio = NICE_TO_PRIO(attr->sched_nice); 4719 4720 /* 4721 * __sched_setscheduler() ensures attr->sched_priority == 0 when 4722 * !rt_policy. Always setting this ensures that things like 4723 * getparam()/getattr() don't report silly values for !rt tasks. 4724 */ 4725 p->rt_priority = attr->sched_priority; 4726 p->normal_prio = normal_prio(p); 4727 set_load_weight(p, true); 4728 } 4729 4730 /* Actually do priority change: must hold pi & rq lock. */ 4731 static void __setscheduler(struct rq *rq, struct task_struct *p, 4732 const struct sched_attr *attr, bool keep_boost) 4733 { 4734 /* 4735 * If params can't change scheduling class changes aren't allowed 4736 * either. 4737 */ 4738 if (attr->sched_flags & SCHED_FLAG_KEEP_PARAMS) 4739 return; 4740 4741 __setscheduler_params(p, attr); 4742 4743 /* 4744 * Keep a potential priority boosting if called from 4745 * sched_setscheduler(). 4746 */ 4747 p->prio = normal_prio(p); 4748 if (keep_boost) 4749 p->prio = rt_effective_prio(p, p->prio); 4750 4751 if (dl_prio(p->prio)) 4752 p->sched_class = &dl_sched_class; 4753 else if (rt_prio(p->prio)) 4754 p->sched_class = &rt_sched_class; 4755 else 4756 p->sched_class = &fair_sched_class; 4757 } 4758 4759 /* 4760 * Check the target process has a UID that matches the current process's: 4761 */ 4762 static bool check_same_owner(struct task_struct *p) 4763 { 4764 const struct cred *cred = current_cred(), *pcred; 4765 bool match; 4766 4767 rcu_read_lock(); 4768 pcred = __task_cred(p); 4769 match = (uid_eq(cred->euid, pcred->euid) || 4770 uid_eq(cred->euid, pcred->uid)); 4771 rcu_read_unlock(); 4772 return match; 4773 } 4774 4775 static int __sched_setscheduler(struct task_struct *p, 4776 const struct sched_attr *attr, 4777 bool user, bool pi) 4778 { 4779 int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 : 4780 MAX_RT_PRIO - 1 - attr->sched_priority; 4781 int retval, oldprio, oldpolicy = -1, queued, running; 4782 int new_effective_prio, policy = attr->sched_policy; 4783 const struct sched_class *prev_class; 4784 struct rq_flags rf; 4785 int reset_on_fork; 4786 int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; 4787 struct rq *rq; 4788 4789 /* The pi code expects interrupts enabled */ 4790 BUG_ON(pi && in_interrupt()); 4791 recheck: 4792 /* Double check policy once rq lock held: */ 4793 if (policy < 0) { 4794 reset_on_fork = p->sched_reset_on_fork; 4795 policy = oldpolicy = p->policy; 4796 } else { 4797 reset_on_fork = !!(attr->sched_flags & SCHED_FLAG_RESET_ON_FORK); 4798 4799 if (!valid_policy(policy)) 4800 return -EINVAL; 4801 } 4802 4803 if (attr->sched_flags & ~(SCHED_FLAG_ALL | SCHED_FLAG_SUGOV)) 4804 return -EINVAL; 4805 4806 /* 4807 * Valid priorities for SCHED_FIFO and SCHED_RR are 4808 * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL, 4809 * SCHED_BATCH and SCHED_IDLE is 0. 4810 */ 4811 if ((p->mm && attr->sched_priority > MAX_USER_RT_PRIO-1) || 4812 (!p->mm && attr->sched_priority > MAX_RT_PRIO-1)) 4813 return -EINVAL; 4814 if ((dl_policy(policy) && !__checkparam_dl(attr)) || 4815 (rt_policy(policy) != (attr->sched_priority != 0))) 4816 return -EINVAL; 4817 4818 /* 4819 * Allow unprivileged RT tasks to decrease priority: 4820 */ 4821 if (user && !capable(CAP_SYS_NICE)) { 4822 if (fair_policy(policy)) { 4823 if (attr->sched_nice < task_nice(p) && 4824 !can_nice(p, attr->sched_nice)) 4825 return -EPERM; 4826 } 4827 4828 if (rt_policy(policy)) { 4829 unsigned long rlim_rtprio = 4830 task_rlimit(p, RLIMIT_RTPRIO); 4831 4832 /* Can't set/change the rt policy: */ 4833 if (policy != p->policy && !rlim_rtprio) 4834 return -EPERM; 4835 4836 /* Can't increase priority: */ 4837 if (attr->sched_priority > p->rt_priority && 4838 attr->sched_priority > rlim_rtprio) 4839 return -EPERM; 4840 } 4841 4842 /* 4843 * Can't set/change SCHED_DEADLINE policy at all for now 4844 * (safest behavior); in the future we would like to allow 4845 * unprivileged DL tasks to increase their relative deadline 4846 * or reduce their runtime (both ways reducing utilization) 4847 */ 4848 if (dl_policy(policy)) 4849 return -EPERM; 4850 4851 /* 4852 * Treat SCHED_IDLE as nice 20. Only allow a switch to 4853 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it. 4854 */ 4855 if (task_has_idle_policy(p) && !idle_policy(policy)) { 4856 if (!can_nice(p, task_nice(p))) 4857 return -EPERM; 4858 } 4859 4860 /* Can't change other user's priorities: */ 4861 if (!check_same_owner(p)) 4862 return -EPERM; 4863 4864 /* Normal users shall not reset the sched_reset_on_fork flag: */ 4865 if (p->sched_reset_on_fork && !reset_on_fork) 4866 return -EPERM; 4867 } 4868 4869 if (user) { 4870 if (attr->sched_flags & SCHED_FLAG_SUGOV) 4871 return -EINVAL; 4872 4873 retval = security_task_setscheduler(p); 4874 if (retval) 4875 return retval; 4876 } 4877 4878 /* Update task specific "requested" clamps */ 4879 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) { 4880 retval = uclamp_validate(p, attr); 4881 if (retval) 4882 return retval; 4883 } 4884 4885 if (pi) 4886 cpuset_read_lock(); 4887 4888 /* 4889 * Make sure no PI-waiters arrive (or leave) while we are 4890 * changing the priority of the task: 4891 * 4892 * To be able to change p->policy safely, the appropriate 4893 * runqueue lock must be held. 4894 */ 4895 rq = task_rq_lock(p, &rf); 4896 update_rq_clock(rq); 4897 4898 /* 4899 * Changing the policy of the stop threads its a very bad idea: 4900 */ 4901 if (p == rq->stop) { 4902 retval = -EINVAL; 4903 goto unlock; 4904 } 4905 4906 /* 4907 * If not changing anything there's no need to proceed further, 4908 * but store a possible modification of reset_on_fork. 4909 */ 4910 if (unlikely(policy == p->policy)) { 4911 if (fair_policy(policy) && attr->sched_nice != task_nice(p)) 4912 goto change; 4913 if (rt_policy(policy) && attr->sched_priority != p->rt_priority) 4914 goto change; 4915 if (dl_policy(policy) && dl_param_changed(p, attr)) 4916 goto change; 4917 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) 4918 goto change; 4919 4920 p->sched_reset_on_fork = reset_on_fork; 4921 retval = 0; 4922 goto unlock; 4923 } 4924 change: 4925 4926 if (user) { 4927 #ifdef CONFIG_RT_GROUP_SCHED 4928 /* 4929 * Do not allow realtime tasks into groups that have no runtime 4930 * assigned. 4931 */ 4932 if (rt_bandwidth_enabled() && rt_policy(policy) && 4933 task_group(p)->rt_bandwidth.rt_runtime == 0 && 4934 !task_group_is_autogroup(task_group(p))) { 4935 retval = -EPERM; 4936 goto unlock; 4937 } 4938 #endif 4939 #ifdef CONFIG_SMP 4940 if (dl_bandwidth_enabled() && dl_policy(policy) && 4941 !(attr->sched_flags & SCHED_FLAG_SUGOV)) { 4942 cpumask_t *span = rq->rd->span; 4943 4944 /* 4945 * Don't allow tasks with an affinity mask smaller than 4946 * the entire root_domain to become SCHED_DEADLINE. We 4947 * will also fail if there's no bandwidth available. 4948 */ 4949 if (!cpumask_subset(span, p->cpus_ptr) || 4950 rq->rd->dl_bw.bw == 0) { 4951 retval = -EPERM; 4952 goto unlock; 4953 } 4954 } 4955 #endif 4956 } 4957 4958 /* Re-check policy now with rq lock held: */ 4959 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { 4960 policy = oldpolicy = -1; 4961 task_rq_unlock(rq, p, &rf); 4962 if (pi) 4963 cpuset_read_unlock(); 4964 goto recheck; 4965 } 4966 4967 /* 4968 * If setscheduling to SCHED_DEADLINE (or changing the parameters 4969 * of a SCHED_DEADLINE task) we need to check if enough bandwidth 4970 * is available. 4971 */ 4972 if ((dl_policy(policy) || dl_task(p)) && sched_dl_overflow(p, policy, attr)) { 4973 retval = -EBUSY; 4974 goto unlock; 4975 } 4976 4977 p->sched_reset_on_fork = reset_on_fork; 4978 oldprio = p->prio; 4979 4980 if (pi) { 4981 /* 4982 * Take priority boosted tasks into account. If the new 4983 * effective priority is unchanged, we just store the new 4984 * normal parameters and do not touch the scheduler class and 4985 * the runqueue. This will be done when the task deboost 4986 * itself. 4987 */ 4988 new_effective_prio = rt_effective_prio(p, newprio); 4989 if (new_effective_prio == oldprio) 4990 queue_flags &= ~DEQUEUE_MOVE; 4991 } 4992 4993 queued = task_on_rq_queued(p); 4994 running = task_current(rq, p); 4995 if (queued) 4996 dequeue_task(rq, p, queue_flags); 4997 if (running) 4998 put_prev_task(rq, p); 4999 5000 prev_class = p->sched_class; 5001 5002 __setscheduler(rq, p, attr, pi); 5003 __setscheduler_uclamp(p, attr); 5004 5005 if (queued) { 5006 /* 5007 * We enqueue to tail when the priority of a task is 5008 * increased (user space view). 5009 */ 5010 if (oldprio < p->prio) 5011 queue_flags |= ENQUEUE_HEAD; 5012 5013 enqueue_task(rq, p, queue_flags); 5014 } 5015 if (running) 5016 set_next_task(rq, p); 5017 5018 check_class_changed(rq, p, prev_class, oldprio); 5019 5020 /* Avoid rq from going away on us: */ 5021 preempt_disable(); 5022 task_rq_unlock(rq, p, &rf); 5023 5024 if (pi) { 5025 cpuset_read_unlock(); 5026 rt_mutex_adjust_pi(p); 5027 } 5028 5029 /* Run balance callbacks after we've adjusted the PI chain: */ 5030 balance_callback(rq); 5031 preempt_enable(); 5032 5033 return 0; 5034 5035 unlock: 5036 task_rq_unlock(rq, p, &rf); 5037 if (pi) 5038 cpuset_read_unlock(); 5039 return retval; 5040 } 5041 5042 static int _sched_setscheduler(struct task_struct *p, int policy, 5043 const struct sched_param *param, bool check) 5044 { 5045 struct sched_attr attr = { 5046 .sched_policy = policy, 5047 .sched_priority = param->sched_priority, 5048 .sched_nice = PRIO_TO_NICE(p->static_prio), 5049 }; 5050 5051 /* Fixup the legacy SCHED_RESET_ON_FORK hack. */ 5052 if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) { 5053 attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; 5054 policy &= ~SCHED_RESET_ON_FORK; 5055 attr.sched_policy = policy; 5056 } 5057 5058 return __sched_setscheduler(p, &attr, check, true); 5059 } 5060 /** 5061 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread. 5062 * @p: the task in question. 5063 * @policy: new policy. 5064 * @param: structure containing the new RT priority. 5065 * 5066 * Return: 0 on success. An error code otherwise. 5067 * 5068 * NOTE that the task may be already dead. 5069 */ 5070 int sched_setscheduler(struct task_struct *p, int policy, 5071 const struct sched_param *param) 5072 { 5073 return _sched_setscheduler(p, policy, param, true); 5074 } 5075 EXPORT_SYMBOL_GPL(sched_setscheduler); 5076 5077 int sched_setattr(struct task_struct *p, const struct sched_attr *attr) 5078 { 5079 return __sched_setscheduler(p, attr, true, true); 5080 } 5081 EXPORT_SYMBOL_GPL(sched_setattr); 5082 5083 int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr) 5084 { 5085 return __sched_setscheduler(p, attr, false, true); 5086 } 5087 5088 /** 5089 * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace. 5090 * @p: the task in question. 5091 * @policy: new policy. 5092 * @param: structure containing the new RT priority. 5093 * 5094 * Just like sched_setscheduler, only don't bother checking if the 5095 * current context has permission. For example, this is needed in 5096 * stop_machine(): we create temporary high priority worker threads, 5097 * but our caller might not have that capability. 5098 * 5099 * Return: 0 on success. An error code otherwise. 5100 */ 5101 int sched_setscheduler_nocheck(struct task_struct *p, int policy, 5102 const struct sched_param *param) 5103 { 5104 return _sched_setscheduler(p, policy, param, false); 5105 } 5106 EXPORT_SYMBOL_GPL(sched_setscheduler_nocheck); 5107 5108 static int 5109 do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) 5110 { 5111 struct sched_param lparam; 5112 struct task_struct *p; 5113 int retval; 5114 5115 if (!param || pid < 0) 5116 return -EINVAL; 5117 if (copy_from_user(&lparam, param, sizeof(struct sched_param))) 5118 return -EFAULT; 5119 5120 rcu_read_lock(); 5121 retval = -ESRCH; 5122 p = find_process_by_pid(pid); 5123 if (likely(p)) 5124 get_task_struct(p); 5125 rcu_read_unlock(); 5126 5127 if (likely(p)) { 5128 retval = sched_setscheduler(p, policy, &lparam); 5129 put_task_struct(p); 5130 } 5131 5132 return retval; 5133 } 5134 5135 /* 5136 * Mimics kernel/events/core.c perf_copy_attr(). 5137 */ 5138 static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *attr) 5139 { 5140 u32 size; 5141 int ret; 5142 5143 /* Zero the full structure, so that a short copy will be nice: */ 5144 memset(attr, 0, sizeof(*attr)); 5145 5146 ret = get_user(size, &uattr->size); 5147 if (ret) 5148 return ret; 5149 5150 /* ABI compatibility quirk: */ 5151 if (!size) 5152 size = SCHED_ATTR_SIZE_VER0; 5153 if (size < SCHED_ATTR_SIZE_VER0 || size > PAGE_SIZE) 5154 goto err_size; 5155 5156 ret = copy_struct_from_user(attr, sizeof(*attr), uattr, size); 5157 if (ret) { 5158 if (ret == -E2BIG) 5159 goto err_size; 5160 return ret; 5161 } 5162 5163 if ((attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) && 5164 size < SCHED_ATTR_SIZE_VER1) 5165 return -EINVAL; 5166 5167 /* 5168 * XXX: Do we want to be lenient like existing syscalls; or do we want 5169 * to be strict and return an error on out-of-bounds values? 5170 */ 5171 attr->sched_nice = clamp(attr->sched_nice, MIN_NICE, MAX_NICE); 5172 5173 return 0; 5174 5175 err_size: 5176 put_user(sizeof(*attr), &uattr->size); 5177 return -E2BIG; 5178 } 5179 5180 /** 5181 * sys_sched_setscheduler - set/change the scheduler policy and RT priority 5182 * @pid: the pid in question. 5183 * @policy: new policy. 5184 * @param: structure containing the new RT priority. 5185 * 5186 * Return: 0 on success. An error code otherwise. 5187 */ 5188 SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param) 5189 { 5190 if (policy < 0) 5191 return -EINVAL; 5192 5193 return do_sched_setscheduler(pid, policy, param); 5194 } 5195 5196 /** 5197 * sys_sched_setparam - set/change the RT priority of a thread 5198 * @pid: the pid in question. 5199 * @param: structure containing the new RT priority. 5200 * 5201 * Return: 0 on success. An error code otherwise. 5202 */ 5203 SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) 5204 { 5205 return do_sched_setscheduler(pid, SETPARAM_POLICY, param); 5206 } 5207 5208 /** 5209 * sys_sched_setattr - same as above, but with extended sched_attr 5210 * @pid: the pid in question. 5211 * @uattr: structure containing the extended parameters. 5212 * @flags: for future extension. 5213 */ 5214 SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr, 5215 unsigned int, flags) 5216 { 5217 struct sched_attr attr; 5218 struct task_struct *p; 5219 int retval; 5220 5221 if (!uattr || pid < 0 || flags) 5222 return -EINVAL; 5223 5224 retval = sched_copy_attr(uattr, &attr); 5225 if (retval) 5226 return retval; 5227 5228 if ((int)attr.sched_policy < 0) 5229 return -EINVAL; 5230 if (attr.sched_flags & SCHED_FLAG_KEEP_POLICY) 5231 attr.sched_policy = SETPARAM_POLICY; 5232 5233 rcu_read_lock(); 5234 retval = -ESRCH; 5235 p = find_process_by_pid(pid); 5236 if (likely(p)) 5237 get_task_struct(p); 5238 rcu_read_unlock(); 5239 5240 if (likely(p)) { 5241 retval = sched_setattr(p, &attr); 5242 put_task_struct(p); 5243 } 5244 5245 return retval; 5246 } 5247 5248 /** 5249 * sys_sched_getscheduler - get the policy (scheduling class) of a thread 5250 * @pid: the pid in question. 5251 * 5252 * Return: On success, the policy of the thread. Otherwise, a negative error 5253 * code. 5254 */ 5255 SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) 5256 { 5257 struct task_struct *p; 5258 int retval; 5259 5260 if (pid < 0) 5261 return -EINVAL; 5262 5263 retval = -ESRCH; 5264 rcu_read_lock(); 5265 p = find_process_by_pid(pid); 5266 if (p) { 5267 retval = security_task_getscheduler(p); 5268 if (!retval) 5269 retval = p->policy 5270 | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0); 5271 } 5272 rcu_read_unlock(); 5273 return retval; 5274 } 5275 5276 /** 5277 * sys_sched_getparam - get the RT priority of a thread 5278 * @pid: the pid in question. 5279 * @param: structure containing the RT priority. 5280 * 5281 * Return: On success, 0 and the RT priority is in @param. Otherwise, an error 5282 * code. 5283 */ 5284 SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) 5285 { 5286 struct sched_param lp = { .sched_priority = 0 }; 5287 struct task_struct *p; 5288 int retval; 5289 5290 if (!param || pid < 0) 5291 return -EINVAL; 5292 5293 rcu_read_lock(); 5294 p = find_process_by_pid(pid); 5295 retval = -ESRCH; 5296 if (!p) 5297 goto out_unlock; 5298 5299 retval = security_task_getscheduler(p); 5300 if (retval) 5301 goto out_unlock; 5302 5303 if (task_has_rt_policy(p)) 5304 lp.sched_priority = p->rt_priority; 5305 rcu_read_unlock(); 5306 5307 /* 5308 * This one might sleep, we cannot do it with a spinlock held ... 5309 */ 5310 retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0; 5311 5312 return retval; 5313 5314 out_unlock: 5315 rcu_read_unlock(); 5316 return retval; 5317 } 5318 5319 /* 5320 * Copy the kernel size attribute structure (which might be larger 5321 * than what user-space knows about) to user-space. 5322 * 5323 * Note that all cases are valid: user-space buffer can be larger or 5324 * smaller than the kernel-space buffer. The usual case is that both 5325 * have the same size. 5326 */ 5327 static int 5328 sched_attr_copy_to_user(struct sched_attr __user *uattr, 5329 struct sched_attr *kattr, 5330 unsigned int usize) 5331 { 5332 unsigned int ksize = sizeof(*kattr); 5333 5334 if (!access_ok(uattr, usize)) 5335 return -EFAULT; 5336 5337 /* 5338 * sched_getattr() ABI forwards and backwards compatibility: 5339 * 5340 * If usize == ksize then we just copy everything to user-space and all is good. 5341 * 5342 * If usize < ksize then we only copy as much as user-space has space for, 5343 * this keeps ABI compatibility as well. We skip the rest. 5344 * 5345 * If usize > ksize then user-space is using a newer version of the ABI, 5346 * which part the kernel doesn't know about. Just ignore it - tooling can 5347 * detect the kernel's knowledge of attributes from the attr->size value 5348 * which is set to ksize in this case. 5349 */ 5350 kattr->size = min(usize, ksize); 5351 5352 if (copy_to_user(uattr, kattr, kattr->size)) 5353 return -EFAULT; 5354 5355 return 0; 5356 } 5357 5358 /** 5359 * sys_sched_getattr - similar to sched_getparam, but with sched_attr 5360 * @pid: the pid in question. 5361 * @uattr: structure containing the extended parameters. 5362 * @usize: sizeof(attr) for fwd/bwd comp. 5363 * @flags: for future extension. 5364 */ 5365 SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, 5366 unsigned int, usize, unsigned int, flags) 5367 { 5368 struct sched_attr kattr = { }; 5369 struct task_struct *p; 5370 int retval; 5371 5372 if (!uattr || pid < 0 || usize > PAGE_SIZE || 5373 usize < SCHED_ATTR_SIZE_VER0 || flags) 5374 return -EINVAL; 5375 5376 rcu_read_lock(); 5377 p = find_process_by_pid(pid); 5378 retval = -ESRCH; 5379 if (!p) 5380 goto out_unlock; 5381 5382 retval = security_task_getscheduler(p); 5383 if (retval) 5384 goto out_unlock; 5385 5386 kattr.sched_policy = p->policy; 5387 if (p->sched_reset_on_fork) 5388 kattr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; 5389 if (task_has_dl_policy(p)) 5390 __getparam_dl(p, &kattr); 5391 else if (task_has_rt_policy(p)) 5392 kattr.sched_priority = p->rt_priority; 5393 else 5394 kattr.sched_nice = task_nice(p); 5395 5396 #ifdef CONFIG_UCLAMP_TASK 5397 kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value; 5398 kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value; 5399 #endif 5400 5401 rcu_read_unlock(); 5402 5403 return sched_attr_copy_to_user(uattr, &kattr, usize); 5404 5405 out_unlock: 5406 rcu_read_unlock(); 5407 return retval; 5408 } 5409 5410 long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) 5411 { 5412 cpumask_var_t cpus_allowed, new_mask; 5413 struct task_struct *p; 5414 int retval; 5415 5416 rcu_read_lock(); 5417 5418 p = find_process_by_pid(pid); 5419 if (!p) { 5420 rcu_read_unlock(); 5421 return -ESRCH; 5422 } 5423 5424 /* Prevent p going away */ 5425 get_task_struct(p); 5426 rcu_read_unlock(); 5427 5428 if (p->flags & PF_NO_SETAFFINITY) { 5429 retval = -EINVAL; 5430 goto out_put_task; 5431 } 5432 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) { 5433 retval = -ENOMEM; 5434 goto out_put_task; 5435 } 5436 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) { 5437 retval = -ENOMEM; 5438 goto out_free_cpus_allowed; 5439 } 5440 retval = -EPERM; 5441 if (!check_same_owner(p)) { 5442 rcu_read_lock(); 5443 if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) { 5444 rcu_read_unlock(); 5445 goto out_free_new_mask; 5446 } 5447 rcu_read_unlock(); 5448 } 5449 5450 retval = security_task_setscheduler(p); 5451 if (retval) 5452 goto out_free_new_mask; 5453 5454 5455 cpuset_cpus_allowed(p, cpus_allowed); 5456 cpumask_and(new_mask, in_mask, cpus_allowed); 5457 5458 /* 5459 * Since bandwidth control happens on root_domain basis, 5460 * if admission test is enabled, we only admit -deadline 5461 * tasks allowed to run on all the CPUs in the task's 5462 * root_domain. 5463 */ 5464 #ifdef CONFIG_SMP 5465 if (task_has_dl_policy(p) && dl_bandwidth_enabled()) { 5466 rcu_read_lock(); 5467 if (!cpumask_subset(task_rq(p)->rd->span, new_mask)) { 5468 retval = -EBUSY; 5469 rcu_read_unlock(); 5470 goto out_free_new_mask; 5471 } 5472 rcu_read_unlock(); 5473 } 5474 #endif 5475 again: 5476 retval = __set_cpus_allowed_ptr(p, new_mask, true); 5477 5478 if (!retval) { 5479 cpuset_cpus_allowed(p, cpus_allowed); 5480 if (!cpumask_subset(new_mask, cpus_allowed)) { 5481 /* 5482 * We must have raced with a concurrent cpuset 5483 * update. Just reset the cpus_allowed to the 5484 * cpuset's cpus_allowed 5485 */ 5486 cpumask_copy(new_mask, cpus_allowed); 5487 goto again; 5488 } 5489 } 5490 out_free_new_mask: 5491 free_cpumask_var(new_mask); 5492 out_free_cpus_allowed: 5493 free_cpumask_var(cpus_allowed); 5494 out_put_task: 5495 put_task_struct(p); 5496 return retval; 5497 } 5498 5499 static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, 5500 struct cpumask *new_mask) 5501 { 5502 if (len < cpumask_size()) 5503 cpumask_clear(new_mask); 5504 else if (len > cpumask_size()) 5505 len = cpumask_size(); 5506 5507 return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0; 5508 } 5509 5510 /** 5511 * sys_sched_setaffinity - set the CPU affinity of a process 5512 * @pid: pid of the process 5513 * @len: length in bytes of the bitmask pointed to by user_mask_ptr 5514 * @user_mask_ptr: user-space pointer to the new CPU mask 5515 * 5516 * Return: 0 on success. An error code otherwise. 5517 */ 5518 SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len, 5519 unsigned long __user *, user_mask_ptr) 5520 { 5521 cpumask_var_t new_mask; 5522 int retval; 5523 5524 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) 5525 return -ENOMEM; 5526 5527 retval = get_user_cpu_mask(user_mask_ptr, len, new_mask); 5528 if (retval == 0) 5529 retval = sched_setaffinity(pid, new_mask); 5530 free_cpumask_var(new_mask); 5531 return retval; 5532 } 5533 5534 long sched_getaffinity(pid_t pid, struct cpumask *mask) 5535 { 5536 struct task_struct *p; 5537 unsigned long flags; 5538 int retval; 5539 5540 rcu_read_lock(); 5541 5542 retval = -ESRCH; 5543 p = find_process_by_pid(pid); 5544 if (!p) 5545 goto out_unlock; 5546 5547 retval = security_task_getscheduler(p); 5548 if (retval) 5549 goto out_unlock; 5550 5551 raw_spin_lock_irqsave(&p->pi_lock, flags); 5552 cpumask_and(mask, &p->cpus_mask, cpu_active_mask); 5553 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 5554 5555 out_unlock: 5556 rcu_read_unlock(); 5557 5558 return retval; 5559 } 5560 5561 /** 5562 * sys_sched_getaffinity - get the CPU affinity of a process 5563 * @pid: pid of the process 5564 * @len: length in bytes of the bitmask pointed to by user_mask_ptr 5565 * @user_mask_ptr: user-space pointer to hold the current CPU mask 5566 * 5567 * Return: size of CPU mask copied to user_mask_ptr on success. An 5568 * error code otherwise. 5569 */ 5570 SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, 5571 unsigned long __user *, user_mask_ptr) 5572 { 5573 int ret; 5574 cpumask_var_t mask; 5575 5576 if ((len * BITS_PER_BYTE) < nr_cpu_ids) 5577 return -EINVAL; 5578 if (len & (sizeof(unsigned long)-1)) 5579 return -EINVAL; 5580 5581 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) 5582 return -ENOMEM; 5583 5584 ret = sched_getaffinity(pid, mask); 5585 if (ret == 0) { 5586 unsigned int retlen = min(len, cpumask_size()); 5587 5588 if (copy_to_user(user_mask_ptr, mask, retlen)) 5589 ret = -EFAULT; 5590 else 5591 ret = retlen; 5592 } 5593 free_cpumask_var(mask); 5594 5595 return ret; 5596 } 5597 5598 /** 5599 * sys_sched_yield - yield the current processor to other threads. 5600 * 5601 * This function yields the current CPU to other tasks. If there are no 5602 * other threads running on this CPU then this function will return. 5603 * 5604 * Return: 0. 5605 */ 5606 static void do_sched_yield(void) 5607 { 5608 struct rq_flags rf; 5609 struct rq *rq; 5610 5611 rq = this_rq_lock_irq(&rf); 5612 5613 schedstat_inc(rq->yld_count); 5614 current->sched_class->yield_task(rq); 5615 5616 /* 5617 * Since we are going to call schedule() anyway, there's 5618 * no need to preempt or enable interrupts: 5619 */ 5620 preempt_disable(); 5621 rq_unlock(rq, &rf); 5622 sched_preempt_enable_no_resched(); 5623 5624 schedule(); 5625 } 5626 5627 SYSCALL_DEFINE0(sched_yield) 5628 { 5629 do_sched_yield(); 5630 return 0; 5631 } 5632 5633 #ifndef CONFIG_PREEMPTION 5634 int __sched _cond_resched(void) 5635 { 5636 if (should_resched(0)) { 5637 preempt_schedule_common(); 5638 return 1; 5639 } 5640 rcu_all_qs(); 5641 return 0; 5642 } 5643 EXPORT_SYMBOL(_cond_resched); 5644 #endif 5645 5646 /* 5647 * __cond_resched_lock() - if a reschedule is pending, drop the given lock, 5648 * call schedule, and on return reacquire the lock. 5649 * 5650 * This works OK both with and without CONFIG_PREEMPTION. We do strange low-level 5651 * operations here to prevent schedule() from being called twice (once via 5652 * spin_unlock(), once by hand). 5653 */ 5654 int __cond_resched_lock(spinlock_t *lock) 5655 { 5656 int resched = should_resched(PREEMPT_LOCK_OFFSET); 5657 int ret = 0; 5658 5659 lockdep_assert_held(lock); 5660 5661 if (spin_needbreak(lock) || resched) { 5662 spin_unlock(lock); 5663 if (resched) 5664 preempt_schedule_common(); 5665 else 5666 cpu_relax(); 5667 ret = 1; 5668 spin_lock(lock); 5669 } 5670 return ret; 5671 } 5672 EXPORT_SYMBOL(__cond_resched_lock); 5673 5674 /** 5675 * yield - yield the current processor to other threads. 5676 * 5677 * Do not ever use this function, there's a 99% chance you're doing it wrong. 5678 * 5679 * The scheduler is at all times free to pick the calling task as the most 5680 * eligible task to run, if removing the yield() call from your code breaks 5681 * it, its already broken. 5682 * 5683 * Typical broken usage is: 5684 * 5685 * while (!event) 5686 * yield(); 5687 * 5688 * where one assumes that yield() will let 'the other' process run that will 5689 * make event true. If the current task is a SCHED_FIFO task that will never 5690 * happen. Never use yield() as a progress guarantee!! 5691 * 5692 * If you want to use yield() to wait for something, use wait_event(). 5693 * If you want to use yield() to be 'nice' for others, use cond_resched(). 5694 * If you still want to use yield(), do not! 5695 */ 5696 void __sched yield(void) 5697 { 5698 set_current_state(TASK_RUNNING); 5699 do_sched_yield(); 5700 } 5701 EXPORT_SYMBOL(yield); 5702 5703 /** 5704 * yield_to - yield the current processor to another thread in 5705 * your thread group, or accelerate that thread toward the 5706 * processor it's on. 5707 * @p: target task 5708 * @preempt: whether task preemption is allowed or not 5709 * 5710 * It's the caller's job to ensure that the target task struct 5711 * can't go away on us before we can do any checks. 5712 * 5713 * Return: 5714 * true (>0) if we indeed boosted the target task. 5715 * false (0) if we failed to boost the target. 5716 * -ESRCH if there's no task to yield to. 5717 */ 5718 int __sched yield_to(struct task_struct *p, bool preempt) 5719 { 5720 struct task_struct *curr = current; 5721 struct rq *rq, *p_rq; 5722 unsigned long flags; 5723 int yielded = 0; 5724 5725 local_irq_save(flags); 5726 rq = this_rq(); 5727 5728 again: 5729 p_rq = task_rq(p); 5730 /* 5731 * If we're the only runnable task on the rq and target rq also 5732 * has only one task, there's absolutely no point in yielding. 5733 */ 5734 if (rq->nr_running == 1 && p_rq->nr_running == 1) { 5735 yielded = -ESRCH; 5736 goto out_irq; 5737 } 5738 5739 double_rq_lock(rq, p_rq); 5740 if (task_rq(p) != p_rq) { 5741 double_rq_unlock(rq, p_rq); 5742 goto again; 5743 } 5744 5745 if (!curr->sched_class->yield_to_task) 5746 goto out_unlock; 5747 5748 if (curr->sched_class != p->sched_class) 5749 goto out_unlock; 5750 5751 if (task_running(p_rq, p) || p->state) 5752 goto out_unlock; 5753 5754 yielded = curr->sched_class->yield_to_task(rq, p, preempt); 5755 if (yielded) { 5756 schedstat_inc(rq->yld_count); 5757 /* 5758 * Make p's CPU reschedule; pick_next_entity takes care of 5759 * fairness. 5760 */ 5761 if (preempt && rq != p_rq) 5762 resched_curr(p_rq); 5763 } 5764 5765 out_unlock: 5766 double_rq_unlock(rq, p_rq); 5767 out_irq: 5768 local_irq_restore(flags); 5769 5770 if (yielded > 0) 5771 schedule(); 5772 5773 return yielded; 5774 } 5775 EXPORT_SYMBOL_GPL(yield_to); 5776 5777 int io_schedule_prepare(void) 5778 { 5779 int old_iowait = current->in_iowait; 5780 5781 current->in_iowait = 1; 5782 blk_schedule_flush_plug(current); 5783 5784 return old_iowait; 5785 } 5786 5787 void io_schedule_finish(int token) 5788 { 5789 current->in_iowait = token; 5790 } 5791 5792 /* 5793 * This task is about to go to sleep on IO. Increment rq->nr_iowait so 5794 * that process accounting knows that this is a task in IO wait state. 5795 */ 5796 long __sched io_schedule_timeout(long timeout) 5797 { 5798 int token; 5799 long ret; 5800 5801 token = io_schedule_prepare(); 5802 ret = schedule_timeout(timeout); 5803 io_schedule_finish(token); 5804 5805 return ret; 5806 } 5807 EXPORT_SYMBOL(io_schedule_timeout); 5808 5809 void __sched io_schedule(void) 5810 { 5811 int token; 5812 5813 token = io_schedule_prepare(); 5814 schedule(); 5815 io_schedule_finish(token); 5816 } 5817 EXPORT_SYMBOL(io_schedule); 5818 5819 /** 5820 * sys_sched_get_priority_max - return maximum RT priority. 5821 * @policy: scheduling class. 5822 * 5823 * Return: On success, this syscall returns the maximum 5824 * rt_priority that can be used by a given scheduling class. 5825 * On failure, a negative error code is returned. 5826 */ 5827 SYSCALL_DEFINE1(sched_get_priority_max, int, policy) 5828 { 5829 int ret = -EINVAL; 5830 5831 switch (policy) { 5832 case SCHED_FIFO: 5833 case SCHED_RR: 5834 ret = MAX_USER_RT_PRIO-1; 5835 break; 5836 case SCHED_DEADLINE: 5837 case SCHED_NORMAL: 5838 case SCHED_BATCH: 5839 case SCHED_IDLE: 5840 ret = 0; 5841 break; 5842 } 5843 return ret; 5844 } 5845 5846 /** 5847 * sys_sched_get_priority_min - return minimum RT priority. 5848 * @policy: scheduling class. 5849 * 5850 * Return: On success, this syscall returns the minimum 5851 * rt_priority that can be used by a given scheduling class. 5852 * On failure, a negative error code is returned. 5853 */ 5854 SYSCALL_DEFINE1(sched_get_priority_min, int, policy) 5855 { 5856 int ret = -EINVAL; 5857 5858 switch (policy) { 5859 case SCHED_FIFO: 5860 case SCHED_RR: 5861 ret = 1; 5862 break; 5863 case SCHED_DEADLINE: 5864 case SCHED_NORMAL: 5865 case SCHED_BATCH: 5866 case SCHED_IDLE: 5867 ret = 0; 5868 } 5869 return ret; 5870 } 5871 5872 static int sched_rr_get_interval(pid_t pid, struct timespec64 *t) 5873 { 5874 struct task_struct *p; 5875 unsigned int time_slice; 5876 struct rq_flags rf; 5877 struct rq *rq; 5878 int retval; 5879 5880 if (pid < 0) 5881 return -EINVAL; 5882 5883 retval = -ESRCH; 5884 rcu_read_lock(); 5885 p = find_process_by_pid(pid); 5886 if (!p) 5887 goto out_unlock; 5888 5889 retval = security_task_getscheduler(p); 5890 if (retval) 5891 goto out_unlock; 5892 5893 rq = task_rq_lock(p, &rf); 5894 time_slice = 0; 5895 if (p->sched_class->get_rr_interval) 5896 time_slice = p->sched_class->get_rr_interval(rq, p); 5897 task_rq_unlock(rq, p, &rf); 5898 5899 rcu_read_unlock(); 5900 jiffies_to_timespec64(time_slice, t); 5901 return 0; 5902 5903 out_unlock: 5904 rcu_read_unlock(); 5905 return retval; 5906 } 5907 5908 /** 5909 * sys_sched_rr_get_interval - return the default timeslice of a process. 5910 * @pid: pid of the process. 5911 * @interval: userspace pointer to the timeslice value. 5912 * 5913 * this syscall writes the default timeslice value of a given process 5914 * into the user-space timespec buffer. A value of '0' means infinity. 5915 * 5916 * Return: On success, 0 and the timeslice is in @interval. Otherwise, 5917 * an error code. 5918 */ 5919 SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, 5920 struct __kernel_timespec __user *, interval) 5921 { 5922 struct timespec64 t; 5923 int retval = sched_rr_get_interval(pid, &t); 5924 5925 if (retval == 0) 5926 retval = put_timespec64(&t, interval); 5927 5928 return retval; 5929 } 5930 5931 #ifdef CONFIG_COMPAT_32BIT_TIME 5932 SYSCALL_DEFINE2(sched_rr_get_interval_time32, pid_t, pid, 5933 struct old_timespec32 __user *, interval) 5934 { 5935 struct timespec64 t; 5936 int retval = sched_rr_get_interval(pid, &t); 5937 5938 if (retval == 0) 5939 retval = put_old_timespec32(&t, interval); 5940 return retval; 5941 } 5942 #endif 5943 5944 void sched_show_task(struct task_struct *p) 5945 { 5946 unsigned long free = 0; 5947 int ppid; 5948 5949 if (!try_get_task_stack(p)) 5950 return; 5951 5952 printk(KERN_INFO "%-15.15s %c", p->comm, task_state_to_char(p)); 5953 5954 if (p->state == TASK_RUNNING) 5955 printk(KERN_CONT " running task "); 5956 #ifdef CONFIG_DEBUG_STACK_USAGE 5957 free = stack_not_used(p); 5958 #endif 5959 ppid = 0; 5960 rcu_read_lock(); 5961 if (pid_alive(p)) 5962 ppid = task_pid_nr(rcu_dereference(p->real_parent)); 5963 rcu_read_unlock(); 5964 printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free, 5965 task_pid_nr(p), ppid, 5966 (unsigned long)task_thread_info(p)->flags); 5967 5968 print_worker_info(KERN_INFO, p); 5969 show_stack(p, NULL); 5970 put_task_stack(p); 5971 } 5972 EXPORT_SYMBOL_GPL(sched_show_task); 5973 5974 static inline bool 5975 state_filter_match(unsigned long state_filter, struct task_struct *p) 5976 { 5977 /* no filter, everything matches */ 5978 if (!state_filter) 5979 return true; 5980 5981 /* filter, but doesn't match */ 5982 if (!(p->state & state_filter)) 5983 return false; 5984 5985 /* 5986 * When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows 5987 * TASK_KILLABLE). 5988 */ 5989 if (state_filter == TASK_UNINTERRUPTIBLE && p->state == TASK_IDLE) 5990 return false; 5991 5992 return true; 5993 } 5994 5995 5996 void show_state_filter(unsigned long state_filter) 5997 { 5998 struct task_struct *g, *p; 5999 6000 #if BITS_PER_LONG == 32 6001 printk(KERN_INFO 6002 " task PC stack pid father\n"); 6003 #else 6004 printk(KERN_INFO 6005 " task PC stack pid father\n"); 6006 #endif 6007 rcu_read_lock(); 6008 for_each_process_thread(g, p) { 6009 /* 6010 * reset the NMI-timeout, listing all files on a slow 6011 * console might take a lot of time: 6012 * Also, reset softlockup watchdogs on all CPUs, because 6013 * another CPU might be blocked waiting for us to process 6014 * an IPI. 6015 */ 6016 touch_nmi_watchdog(); 6017 touch_all_softlockup_watchdogs(); 6018 if (state_filter_match(state_filter, p)) 6019 sched_show_task(p); 6020 } 6021 6022 #ifdef CONFIG_SCHED_DEBUG 6023 if (!state_filter) 6024 sysrq_sched_debug_show(); 6025 #endif 6026 rcu_read_unlock(); 6027 /* 6028 * Only show locks if all tasks are dumped: 6029 */ 6030 if (!state_filter) 6031 debug_show_all_locks(); 6032 } 6033 6034 /** 6035 * init_idle - set up an idle thread for a given CPU 6036 * @idle: task in question 6037 * @cpu: CPU the idle task belongs to 6038 * 6039 * NOTE: this function does not set the idle thread's NEED_RESCHED 6040 * flag, to make booting more robust. 6041 */ 6042 void init_idle(struct task_struct *idle, int cpu) 6043 { 6044 struct rq *rq = cpu_rq(cpu); 6045 unsigned long flags; 6046 6047 __sched_fork(0, idle); 6048 6049 raw_spin_lock_irqsave(&idle->pi_lock, flags); 6050 raw_spin_lock(&rq->lock); 6051 6052 idle->state = TASK_RUNNING; 6053 idle->se.exec_start = sched_clock(); 6054 idle->flags |= PF_IDLE; 6055 6056 kasan_unpoison_task_stack(idle); 6057 6058 #ifdef CONFIG_SMP 6059 /* 6060 * Its possible that init_idle() gets called multiple times on a task, 6061 * in that case do_set_cpus_allowed() will not do the right thing. 6062 * 6063 * And since this is boot we can forgo the serialization. 6064 */ 6065 set_cpus_allowed_common(idle, cpumask_of(cpu)); 6066 #endif 6067 /* 6068 * We're having a chicken and egg problem, even though we are 6069 * holding rq->lock, the CPU isn't yet set to this CPU so the 6070 * lockdep check in task_group() will fail. 6071 * 6072 * Similar case to sched_fork(). / Alternatively we could 6073 * use task_rq_lock() here and obtain the other rq->lock. 6074 * 6075 * Silence PROVE_RCU 6076 */ 6077 rcu_read_lock(); 6078 __set_task_cpu(idle, cpu); 6079 rcu_read_unlock(); 6080 6081 rq->idle = idle; 6082 rcu_assign_pointer(rq->curr, idle); 6083 idle->on_rq = TASK_ON_RQ_QUEUED; 6084 #ifdef CONFIG_SMP 6085 idle->on_cpu = 1; 6086 #endif 6087 raw_spin_unlock(&rq->lock); 6088 raw_spin_unlock_irqrestore(&idle->pi_lock, flags); 6089 6090 /* Set the preempt count _outside_ the spinlocks! */ 6091 init_idle_preempt_count(idle, cpu); 6092 6093 /* 6094 * The idle tasks have their own, simple scheduling class: 6095 */ 6096 idle->sched_class = &idle_sched_class; 6097 ftrace_graph_init_idle_task(idle, cpu); 6098 vtime_init_idle(idle, cpu); 6099 #ifdef CONFIG_SMP 6100 sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu); 6101 #endif 6102 } 6103 6104 #ifdef CONFIG_SMP 6105 6106 int cpuset_cpumask_can_shrink(const struct cpumask *cur, 6107 const struct cpumask *trial) 6108 { 6109 int ret = 1; 6110 6111 if (!cpumask_weight(cur)) 6112 return ret; 6113 6114 ret = dl_cpuset_cpumask_can_shrink(cur, trial); 6115 6116 return ret; 6117 } 6118 6119 int task_can_attach(struct task_struct *p, 6120 const struct cpumask *cs_cpus_allowed) 6121 { 6122 int ret = 0; 6123 6124 /* 6125 * Kthreads which disallow setaffinity shouldn't be moved 6126 * to a new cpuset; we don't want to change their CPU 6127 * affinity and isolating such threads by their set of 6128 * allowed nodes is unnecessary. Thus, cpusets are not 6129 * applicable for such threads. This prevents checking for 6130 * success of set_cpus_allowed_ptr() on all attached tasks 6131 * before cpus_mask may be changed. 6132 */ 6133 if (p->flags & PF_NO_SETAFFINITY) { 6134 ret = -EINVAL; 6135 goto out; 6136 } 6137 6138 if (dl_task(p) && !cpumask_intersects(task_rq(p)->rd->span, 6139 cs_cpus_allowed)) 6140 ret = dl_task_can_attach(p, cs_cpus_allowed); 6141 6142 out: 6143 return ret; 6144 } 6145 6146 bool sched_smp_initialized __read_mostly; 6147 6148 #ifdef CONFIG_NUMA_BALANCING 6149 /* Migrate current task p to target_cpu */ 6150 int migrate_task_to(struct task_struct *p, int target_cpu) 6151 { 6152 struct migration_arg arg = { p, target_cpu }; 6153 int curr_cpu = task_cpu(p); 6154 6155 if (curr_cpu == target_cpu) 6156 return 0; 6157 6158 if (!cpumask_test_cpu(target_cpu, p->cpus_ptr)) 6159 return -EINVAL; 6160 6161 /* TODO: This is not properly updating schedstats */ 6162 6163 trace_sched_move_numa(p, curr_cpu, target_cpu); 6164 return stop_one_cpu(curr_cpu, migration_cpu_stop, &arg); 6165 } 6166 6167 /* 6168 * Requeue a task on a given node and accurately track the number of NUMA 6169 * tasks on the runqueues 6170 */ 6171 void sched_setnuma(struct task_struct *p, int nid) 6172 { 6173 bool queued, running; 6174 struct rq_flags rf; 6175 struct rq *rq; 6176 6177 rq = task_rq_lock(p, &rf); 6178 queued = task_on_rq_queued(p); 6179 running = task_current(rq, p); 6180 6181 if (queued) 6182 dequeue_task(rq, p, DEQUEUE_SAVE); 6183 if (running) 6184 put_prev_task(rq, p); 6185 6186 p->numa_preferred_nid = nid; 6187 6188 if (queued) 6189 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); 6190 if (running) 6191 set_next_task(rq, p); 6192 task_rq_unlock(rq, p, &rf); 6193 } 6194 #endif /* CONFIG_NUMA_BALANCING */ 6195 6196 #ifdef CONFIG_HOTPLUG_CPU 6197 /* 6198 * Ensure that the idle task is using init_mm right before its CPU goes 6199 * offline. 6200 */ 6201 void idle_task_exit(void) 6202 { 6203 struct mm_struct *mm = current->active_mm; 6204 6205 BUG_ON(cpu_online(smp_processor_id())); 6206 6207 if (mm != &init_mm) { 6208 switch_mm(mm, &init_mm, current); 6209 current->active_mm = &init_mm; 6210 finish_arch_post_lock_switch(); 6211 } 6212 mmdrop(mm); 6213 } 6214 6215 /* 6216 * Since this CPU is going 'away' for a while, fold any nr_active delta 6217 * we might have. Assumes we're called after migrate_tasks() so that the 6218 * nr_active count is stable. We need to take the teardown thread which 6219 * is calling this into account, so we hand in adjust = 1 to the load 6220 * calculation. 6221 * 6222 * Also see the comment "Global load-average calculations". 6223 */ 6224 static void calc_load_migrate(struct rq *rq) 6225 { 6226 long delta = calc_load_fold_active(rq, 1); 6227 if (delta) 6228 atomic_long_add(delta, &calc_load_tasks); 6229 } 6230 6231 static struct task_struct *__pick_migrate_task(struct rq *rq) 6232 { 6233 const struct sched_class *class; 6234 struct task_struct *next; 6235 6236 for_each_class(class) { 6237 next = class->pick_next_task(rq); 6238 if (next) { 6239 next->sched_class->put_prev_task(rq, next); 6240 return next; 6241 } 6242 } 6243 6244 /* The idle class should always have a runnable task */ 6245 BUG(); 6246 } 6247 6248 /* 6249 * Migrate all tasks from the rq, sleeping tasks will be migrated by 6250 * try_to_wake_up()->select_task_rq(). 6251 * 6252 * Called with rq->lock held even though we'er in stop_machine() and 6253 * there's no concurrency possible, we hold the required locks anyway 6254 * because of lock validation efforts. 6255 */ 6256 static void migrate_tasks(struct rq *dead_rq, struct rq_flags *rf) 6257 { 6258 struct rq *rq = dead_rq; 6259 struct task_struct *next, *stop = rq->stop; 6260 struct rq_flags orf = *rf; 6261 int dest_cpu; 6262 6263 /* 6264 * Fudge the rq selection such that the below task selection loop 6265 * doesn't get stuck on the currently eligible stop task. 6266 * 6267 * We're currently inside stop_machine() and the rq is either stuck 6268 * in the stop_machine_cpu_stop() loop, or we're executing this code, 6269 * either way we should never end up calling schedule() until we're 6270 * done here. 6271 */ 6272 rq->stop = NULL; 6273 6274 /* 6275 * put_prev_task() and pick_next_task() sched 6276 * class method both need to have an up-to-date 6277 * value of rq->clock[_task] 6278 */ 6279 update_rq_clock(rq); 6280 6281 for (;;) { 6282 /* 6283 * There's this thread running, bail when that's the only 6284 * remaining thread: 6285 */ 6286 if (rq->nr_running == 1) 6287 break; 6288 6289 next = __pick_migrate_task(rq); 6290 6291 /* 6292 * Rules for changing task_struct::cpus_mask are holding 6293 * both pi_lock and rq->lock, such that holding either 6294 * stabilizes the mask. 6295 * 6296 * Drop rq->lock is not quite as disastrous as it usually is 6297 * because !cpu_active at this point, which means load-balance 6298 * will not interfere. Also, stop-machine. 6299 */ 6300 rq_unlock(rq, rf); 6301 raw_spin_lock(&next->pi_lock); 6302 rq_relock(rq, rf); 6303 6304 /* 6305 * Since we're inside stop-machine, _nothing_ should have 6306 * changed the task, WARN if weird stuff happened, because in 6307 * that case the above rq->lock drop is a fail too. 6308 */ 6309 if (WARN_ON(task_rq(next) != rq || !task_on_rq_queued(next))) { 6310 raw_spin_unlock(&next->pi_lock); 6311 continue; 6312 } 6313 6314 /* Find suitable destination for @next, with force if needed. */ 6315 dest_cpu = select_fallback_rq(dead_rq->cpu, next); 6316 rq = __migrate_task(rq, rf, next, dest_cpu); 6317 if (rq != dead_rq) { 6318 rq_unlock(rq, rf); 6319 rq = dead_rq; 6320 *rf = orf; 6321 rq_relock(rq, rf); 6322 } 6323 raw_spin_unlock(&next->pi_lock); 6324 } 6325 6326 rq->stop = stop; 6327 } 6328 #endif /* CONFIG_HOTPLUG_CPU */ 6329 6330 void set_rq_online(struct rq *rq) 6331 { 6332 if (!rq->online) { 6333 const struct sched_class *class; 6334 6335 cpumask_set_cpu(rq->cpu, rq->rd->online); 6336 rq->online = 1; 6337 6338 for_each_class(class) { 6339 if (class->rq_online) 6340 class->rq_online(rq); 6341 } 6342 } 6343 } 6344 6345 void set_rq_offline(struct rq *rq) 6346 { 6347 if (rq->online) { 6348 const struct sched_class *class; 6349 6350 for_each_class(class) { 6351 if (class->rq_offline) 6352 class->rq_offline(rq); 6353 } 6354 6355 cpumask_clear_cpu(rq->cpu, rq->rd->online); 6356 rq->online = 0; 6357 } 6358 } 6359 6360 /* 6361 * used to mark begin/end of suspend/resume: 6362 */ 6363 static int num_cpus_frozen; 6364 6365 /* 6366 * Update cpusets according to cpu_active mask. If cpusets are 6367 * disabled, cpuset_update_active_cpus() becomes a simple wrapper 6368 * around partition_sched_domains(). 6369 * 6370 * If we come here as part of a suspend/resume, don't touch cpusets because we 6371 * want to restore it back to its original state upon resume anyway. 6372 */ 6373 static void cpuset_cpu_active(void) 6374 { 6375 if (cpuhp_tasks_frozen) { 6376 /* 6377 * num_cpus_frozen tracks how many CPUs are involved in suspend 6378 * resume sequence. As long as this is not the last online 6379 * operation in the resume sequence, just build a single sched 6380 * domain, ignoring cpusets. 6381 */ 6382 partition_sched_domains(1, NULL, NULL); 6383 if (--num_cpus_frozen) 6384 return; 6385 /* 6386 * This is the last CPU online operation. So fall through and 6387 * restore the original sched domains by considering the 6388 * cpuset configurations. 6389 */ 6390 cpuset_force_rebuild(); 6391 } 6392 cpuset_update_active_cpus(); 6393 } 6394 6395 static int cpuset_cpu_inactive(unsigned int cpu) 6396 { 6397 if (!cpuhp_tasks_frozen) { 6398 if (dl_cpu_busy(cpu)) 6399 return -EBUSY; 6400 cpuset_update_active_cpus(); 6401 } else { 6402 num_cpus_frozen++; 6403 partition_sched_domains(1, NULL, NULL); 6404 } 6405 return 0; 6406 } 6407 6408 int sched_cpu_activate(unsigned int cpu) 6409 { 6410 struct rq *rq = cpu_rq(cpu); 6411 struct rq_flags rf; 6412 6413 #ifdef CONFIG_SCHED_SMT 6414 /* 6415 * When going up, increment the number of cores with SMT present. 6416 */ 6417 if (cpumask_weight(cpu_smt_mask(cpu)) == 2) 6418 static_branch_inc_cpuslocked(&sched_smt_present); 6419 #endif 6420 set_cpu_active(cpu, true); 6421 6422 if (sched_smp_initialized) { 6423 sched_domains_numa_masks_set(cpu); 6424 cpuset_cpu_active(); 6425 } 6426 6427 /* 6428 * Put the rq online, if not already. This happens: 6429 * 6430 * 1) In the early boot process, because we build the real domains 6431 * after all CPUs have been brought up. 6432 * 6433 * 2) At runtime, if cpuset_cpu_active() fails to rebuild the 6434 * domains. 6435 */ 6436 rq_lock_irqsave(rq, &rf); 6437 if (rq->rd) { 6438 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 6439 set_rq_online(rq); 6440 } 6441 rq_unlock_irqrestore(rq, &rf); 6442 6443 return 0; 6444 } 6445 6446 int sched_cpu_deactivate(unsigned int cpu) 6447 { 6448 int ret; 6449 6450 set_cpu_active(cpu, false); 6451 /* 6452 * We've cleared cpu_active_mask, wait for all preempt-disabled and RCU 6453 * users of this state to go away such that all new such users will 6454 * observe it. 6455 * 6456 * Do sync before park smpboot threads to take care the rcu boost case. 6457 */ 6458 synchronize_rcu(); 6459 6460 #ifdef CONFIG_SCHED_SMT 6461 /* 6462 * When going down, decrement the number of cores with SMT present. 6463 */ 6464 if (cpumask_weight(cpu_smt_mask(cpu)) == 2) 6465 static_branch_dec_cpuslocked(&sched_smt_present); 6466 #endif 6467 6468 if (!sched_smp_initialized) 6469 return 0; 6470 6471 ret = cpuset_cpu_inactive(cpu); 6472 if (ret) { 6473 set_cpu_active(cpu, true); 6474 return ret; 6475 } 6476 sched_domains_numa_masks_clear(cpu); 6477 return 0; 6478 } 6479 6480 static void sched_rq_cpu_starting(unsigned int cpu) 6481 { 6482 struct rq *rq = cpu_rq(cpu); 6483 6484 rq->calc_load_update = calc_load_update; 6485 update_max_interval(); 6486 } 6487 6488 int sched_cpu_starting(unsigned int cpu) 6489 { 6490 sched_rq_cpu_starting(cpu); 6491 sched_tick_start(cpu); 6492 return 0; 6493 } 6494 6495 #ifdef CONFIG_HOTPLUG_CPU 6496 int sched_cpu_dying(unsigned int cpu) 6497 { 6498 struct rq *rq = cpu_rq(cpu); 6499 struct rq_flags rf; 6500 6501 /* Handle pending wakeups and then migrate everything off */ 6502 sched_ttwu_pending(); 6503 sched_tick_stop(cpu); 6504 6505 rq_lock_irqsave(rq, &rf); 6506 if (rq->rd) { 6507 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 6508 set_rq_offline(rq); 6509 } 6510 migrate_tasks(rq, &rf); 6511 BUG_ON(rq->nr_running != 1); 6512 rq_unlock_irqrestore(rq, &rf); 6513 6514 calc_load_migrate(rq); 6515 update_max_interval(); 6516 nohz_balance_exit_idle(rq); 6517 hrtick_clear(rq); 6518 return 0; 6519 } 6520 #endif 6521 6522 void __init sched_init_smp(void) 6523 { 6524 sched_init_numa(); 6525 6526 /* 6527 * There's no userspace yet to cause hotplug operations; hence all the 6528 * CPU masks are stable and all blatant races in the below code cannot 6529 * happen. 6530 */ 6531 mutex_lock(&sched_domains_mutex); 6532 sched_init_domains(cpu_active_mask); 6533 mutex_unlock(&sched_domains_mutex); 6534 6535 /* Move init over to a non-isolated CPU */ 6536 if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_FLAG_DOMAIN)) < 0) 6537 BUG(); 6538 sched_init_granularity(); 6539 6540 init_sched_rt_class(); 6541 init_sched_dl_class(); 6542 6543 sched_smp_initialized = true; 6544 } 6545 6546 static int __init migration_init(void) 6547 { 6548 sched_cpu_starting(smp_processor_id()); 6549 return 0; 6550 } 6551 early_initcall(migration_init); 6552 6553 #else 6554 void __init sched_init_smp(void) 6555 { 6556 sched_init_granularity(); 6557 } 6558 #endif /* CONFIG_SMP */ 6559 6560 int in_sched_functions(unsigned long addr) 6561 { 6562 return in_lock_functions(addr) || 6563 (addr >= (unsigned long)__sched_text_start 6564 && addr < (unsigned long)__sched_text_end); 6565 } 6566 6567 #ifdef CONFIG_CGROUP_SCHED 6568 /* 6569 * Default task group. 6570 * Every task in system belongs to this group at bootup. 6571 */ 6572 struct task_group root_task_group; 6573 LIST_HEAD(task_groups); 6574 6575 /* Cacheline aligned slab cache for task_group */ 6576 static struct kmem_cache *task_group_cache __read_mostly; 6577 #endif 6578 6579 DECLARE_PER_CPU(cpumask_var_t, load_balance_mask); 6580 DECLARE_PER_CPU(cpumask_var_t, select_idle_mask); 6581 6582 void __init sched_init(void) 6583 { 6584 unsigned long ptr = 0; 6585 int i; 6586 6587 wait_bit_init(); 6588 6589 #ifdef CONFIG_FAIR_GROUP_SCHED 6590 ptr += 2 * nr_cpu_ids * sizeof(void **); 6591 #endif 6592 #ifdef CONFIG_RT_GROUP_SCHED 6593 ptr += 2 * nr_cpu_ids * sizeof(void **); 6594 #endif 6595 if (ptr) { 6596 ptr = (unsigned long)kzalloc(ptr, GFP_NOWAIT); 6597 6598 #ifdef CONFIG_FAIR_GROUP_SCHED 6599 root_task_group.se = (struct sched_entity **)ptr; 6600 ptr += nr_cpu_ids * sizeof(void **); 6601 6602 root_task_group.cfs_rq = (struct cfs_rq **)ptr; 6603 ptr += nr_cpu_ids * sizeof(void **); 6604 6605 #endif /* CONFIG_FAIR_GROUP_SCHED */ 6606 #ifdef CONFIG_RT_GROUP_SCHED 6607 root_task_group.rt_se = (struct sched_rt_entity **)ptr; 6608 ptr += nr_cpu_ids * sizeof(void **); 6609 6610 root_task_group.rt_rq = (struct rt_rq **)ptr; 6611 ptr += nr_cpu_ids * sizeof(void **); 6612 6613 #endif /* CONFIG_RT_GROUP_SCHED */ 6614 } 6615 #ifdef CONFIG_CPUMASK_OFFSTACK 6616 for_each_possible_cpu(i) { 6617 per_cpu(load_balance_mask, i) = (cpumask_var_t)kzalloc_node( 6618 cpumask_size(), GFP_KERNEL, cpu_to_node(i)); 6619 per_cpu(select_idle_mask, i) = (cpumask_var_t)kzalloc_node( 6620 cpumask_size(), GFP_KERNEL, cpu_to_node(i)); 6621 } 6622 #endif /* CONFIG_CPUMASK_OFFSTACK */ 6623 6624 init_rt_bandwidth(&def_rt_bandwidth, global_rt_period(), global_rt_runtime()); 6625 init_dl_bandwidth(&def_dl_bandwidth, global_rt_period(), global_rt_runtime()); 6626 6627 #ifdef CONFIG_SMP 6628 init_defrootdomain(); 6629 #endif 6630 6631 #ifdef CONFIG_RT_GROUP_SCHED 6632 init_rt_bandwidth(&root_task_group.rt_bandwidth, 6633 global_rt_period(), global_rt_runtime()); 6634 #endif /* CONFIG_RT_GROUP_SCHED */ 6635 6636 #ifdef CONFIG_CGROUP_SCHED 6637 task_group_cache = KMEM_CACHE(task_group, 0); 6638 6639 list_add(&root_task_group.list, &task_groups); 6640 INIT_LIST_HEAD(&root_task_group.children); 6641 INIT_LIST_HEAD(&root_task_group.siblings); 6642 autogroup_init(&init_task); 6643 #endif /* CONFIG_CGROUP_SCHED */ 6644 6645 for_each_possible_cpu(i) { 6646 struct rq *rq; 6647 6648 rq = cpu_rq(i); 6649 raw_spin_lock_init(&rq->lock); 6650 rq->nr_running = 0; 6651 rq->calc_load_active = 0; 6652 rq->calc_load_update = jiffies + LOAD_FREQ; 6653 init_cfs_rq(&rq->cfs); 6654 init_rt_rq(&rq->rt); 6655 init_dl_rq(&rq->dl); 6656 #ifdef CONFIG_FAIR_GROUP_SCHED 6657 root_task_group.shares = ROOT_TASK_GROUP_LOAD; 6658 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list); 6659 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list; 6660 /* 6661 * How much CPU bandwidth does root_task_group get? 6662 * 6663 * In case of task-groups formed thr' the cgroup filesystem, it 6664 * gets 100% of the CPU resources in the system. This overall 6665 * system CPU resource is divided among the tasks of 6666 * root_task_group and its child task-groups in a fair manner, 6667 * based on each entity's (task or task-group's) weight 6668 * (se->load.weight). 6669 * 6670 * In other words, if root_task_group has 10 tasks of weight 6671 * 1024) and two child groups A0 and A1 (of weight 1024 each), 6672 * then A0's share of the CPU resource is: 6673 * 6674 * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33% 6675 * 6676 * We achieve this by letting root_task_group's tasks sit 6677 * directly in rq->cfs (i.e root_task_group->se[] = NULL). 6678 */ 6679 init_cfs_bandwidth(&root_task_group.cfs_bandwidth); 6680 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL); 6681 #endif /* CONFIG_FAIR_GROUP_SCHED */ 6682 6683 rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime; 6684 #ifdef CONFIG_RT_GROUP_SCHED 6685 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL); 6686 #endif 6687 #ifdef CONFIG_SMP 6688 rq->sd = NULL; 6689 rq->rd = NULL; 6690 rq->cpu_capacity = rq->cpu_capacity_orig = SCHED_CAPACITY_SCALE; 6691 rq->balance_callback = NULL; 6692 rq->active_balance = 0; 6693 rq->next_balance = jiffies; 6694 rq->push_cpu = 0; 6695 rq->cpu = i; 6696 rq->online = 0; 6697 rq->idle_stamp = 0; 6698 rq->avg_idle = 2*sysctl_sched_migration_cost; 6699 rq->max_idle_balance_cost = sysctl_sched_migration_cost; 6700 6701 INIT_LIST_HEAD(&rq->cfs_tasks); 6702 6703 rq_attach_root(rq, &def_root_domain); 6704 #ifdef CONFIG_NO_HZ_COMMON 6705 rq->last_load_update_tick = jiffies; 6706 rq->last_blocked_load_update_tick = jiffies; 6707 atomic_set(&rq->nohz_flags, 0); 6708 #endif 6709 #endif /* CONFIG_SMP */ 6710 hrtick_rq_init(rq); 6711 atomic_set(&rq->nr_iowait, 0); 6712 } 6713 6714 set_load_weight(&init_task, false); 6715 6716 /* 6717 * The boot idle thread does lazy MMU switching as well: 6718 */ 6719 mmgrab(&init_mm); 6720 enter_lazy_tlb(&init_mm, current); 6721 6722 /* 6723 * Make us the idle thread. Technically, schedule() should not be 6724 * called from this thread, however somewhere below it might be, 6725 * but because we are the idle thread, we just pick up running again 6726 * when this runqueue becomes "idle". 6727 */ 6728 init_idle(current, smp_processor_id()); 6729 6730 calc_load_update = jiffies + LOAD_FREQ; 6731 6732 #ifdef CONFIG_SMP 6733 idle_thread_set_boot_cpu(); 6734 #endif 6735 init_sched_fair_class(); 6736 6737 init_schedstats(); 6738 6739 psi_init(); 6740 6741 init_uclamp(); 6742 6743 scheduler_running = 1; 6744 } 6745 6746 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP 6747 static inline int preempt_count_equals(int preempt_offset) 6748 { 6749 int nested = preempt_count() + rcu_preempt_depth(); 6750 6751 return (nested == preempt_offset); 6752 } 6753 6754 void __might_sleep(const char *file, int line, int preempt_offset) 6755 { 6756 /* 6757 * Blocking primitives will set (and therefore destroy) current->state, 6758 * since we will exit with TASK_RUNNING make sure we enter with it, 6759 * otherwise we will destroy state. 6760 */ 6761 WARN_ONCE(current->state != TASK_RUNNING && current->task_state_change, 6762 "do not call blocking ops when !TASK_RUNNING; " 6763 "state=%lx set at [<%p>] %pS\n", 6764 current->state, 6765 (void *)current->task_state_change, 6766 (void *)current->task_state_change); 6767 6768 ___might_sleep(file, line, preempt_offset); 6769 } 6770 EXPORT_SYMBOL(__might_sleep); 6771 6772 void ___might_sleep(const char *file, int line, int preempt_offset) 6773 { 6774 /* Ratelimiting timestamp: */ 6775 static unsigned long prev_jiffy; 6776 6777 unsigned long preempt_disable_ip; 6778 6779 /* WARN_ON_ONCE() by default, no rate limit required: */ 6780 rcu_sleep_check(); 6781 6782 if ((preempt_count_equals(preempt_offset) && !irqs_disabled() && 6783 !is_idle_task(current) && !current->non_block_count) || 6784 system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING || 6785 oops_in_progress) 6786 return; 6787 6788 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) 6789 return; 6790 prev_jiffy = jiffies; 6791 6792 /* Save this before calling printk(), since that will clobber it: */ 6793 preempt_disable_ip = get_preempt_disable_ip(current); 6794 6795 printk(KERN_ERR 6796 "BUG: sleeping function called from invalid context at %s:%d\n", 6797 file, line); 6798 printk(KERN_ERR 6799 "in_atomic(): %d, irqs_disabled(): %d, non_block: %d, pid: %d, name: %s\n", 6800 in_atomic(), irqs_disabled(), current->non_block_count, 6801 current->pid, current->comm); 6802 6803 if (task_stack_end_corrupted(current)) 6804 printk(KERN_EMERG "Thread overran stack, or stack corrupted\n"); 6805 6806 debug_show_held_locks(current); 6807 if (irqs_disabled()) 6808 print_irqtrace_events(current); 6809 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT) 6810 && !preempt_count_equals(preempt_offset)) { 6811 pr_err("Preemption disabled at:"); 6812 print_ip_sym(preempt_disable_ip); 6813 pr_cont("\n"); 6814 } 6815 dump_stack(); 6816 add_taint(TAINT_WARN, LOCKDEP_STILL_OK); 6817 } 6818 EXPORT_SYMBOL(___might_sleep); 6819 6820 void __cant_sleep(const char *file, int line, int preempt_offset) 6821 { 6822 static unsigned long prev_jiffy; 6823 6824 if (irqs_disabled()) 6825 return; 6826 6827 if (!IS_ENABLED(CONFIG_PREEMPT_COUNT)) 6828 return; 6829 6830 if (preempt_count() > preempt_offset) 6831 return; 6832 6833 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) 6834 return; 6835 prev_jiffy = jiffies; 6836 6837 printk(KERN_ERR "BUG: assuming atomic context at %s:%d\n", file, line); 6838 printk(KERN_ERR "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n", 6839 in_atomic(), irqs_disabled(), 6840 current->pid, current->comm); 6841 6842 debug_show_held_locks(current); 6843 dump_stack(); 6844 add_taint(TAINT_WARN, LOCKDEP_STILL_OK); 6845 } 6846 EXPORT_SYMBOL_GPL(__cant_sleep); 6847 #endif 6848 6849 #ifdef CONFIG_MAGIC_SYSRQ 6850 void normalize_rt_tasks(void) 6851 { 6852 struct task_struct *g, *p; 6853 struct sched_attr attr = { 6854 .sched_policy = SCHED_NORMAL, 6855 }; 6856 6857 read_lock(&tasklist_lock); 6858 for_each_process_thread(g, p) { 6859 /* 6860 * Only normalize user tasks: 6861 */ 6862 if (p->flags & PF_KTHREAD) 6863 continue; 6864 6865 p->se.exec_start = 0; 6866 schedstat_set(p->se.statistics.wait_start, 0); 6867 schedstat_set(p->se.statistics.sleep_start, 0); 6868 schedstat_set(p->se.statistics.block_start, 0); 6869 6870 if (!dl_task(p) && !rt_task(p)) { 6871 /* 6872 * Renice negative nice level userspace 6873 * tasks back to 0: 6874 */ 6875 if (task_nice(p) < 0) 6876 set_user_nice(p, 0); 6877 continue; 6878 } 6879 6880 __sched_setscheduler(p, &attr, false, false); 6881 } 6882 read_unlock(&tasklist_lock); 6883 } 6884 6885 #endif /* CONFIG_MAGIC_SYSRQ */ 6886 6887 #if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) 6888 /* 6889 * These functions are only useful for the IA64 MCA handling, or kdb. 6890 * 6891 * They can only be called when the whole system has been 6892 * stopped - every CPU needs to be quiescent, and no scheduling 6893 * activity can take place. Using them for anything else would 6894 * be a serious bug, and as a result, they aren't even visible 6895 * under any other configuration. 6896 */ 6897 6898 /** 6899 * curr_task - return the current task for a given CPU. 6900 * @cpu: the processor in question. 6901 * 6902 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! 6903 * 6904 * Return: The current task for @cpu. 6905 */ 6906 struct task_struct *curr_task(int cpu) 6907 { 6908 return cpu_curr(cpu); 6909 } 6910 6911 #endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */ 6912 6913 #ifdef CONFIG_IA64 6914 /** 6915 * ia64_set_curr_task - set the current task for a given CPU. 6916 * @cpu: the processor in question. 6917 * @p: the task pointer to set. 6918 * 6919 * Description: This function must only be used when non-maskable interrupts 6920 * are serviced on a separate stack. It allows the architecture to switch the 6921 * notion of the current task on a CPU in a non-blocking manner. This function 6922 * must be called with all CPU's synchronized, and interrupts disabled, the 6923 * and caller must save the original value of the current task (see 6924 * curr_task() above) and restore that value before reenabling interrupts and 6925 * re-starting the system. 6926 * 6927 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! 6928 */ 6929 void ia64_set_curr_task(int cpu, struct task_struct *p) 6930 { 6931 cpu_curr(cpu) = p; 6932 } 6933 6934 #endif 6935 6936 #ifdef CONFIG_CGROUP_SCHED 6937 /* task_group_lock serializes the addition/removal of task groups */ 6938 static DEFINE_SPINLOCK(task_group_lock); 6939 6940 static inline void alloc_uclamp_sched_group(struct task_group *tg, 6941 struct task_group *parent) 6942 { 6943 #ifdef CONFIG_UCLAMP_TASK_GROUP 6944 enum uclamp_id clamp_id; 6945 6946 for_each_clamp_id(clamp_id) { 6947 uclamp_se_set(&tg->uclamp_req[clamp_id], 6948 uclamp_none(clamp_id), false); 6949 tg->uclamp[clamp_id] = parent->uclamp[clamp_id]; 6950 } 6951 #endif 6952 } 6953 6954 static void sched_free_group(struct task_group *tg) 6955 { 6956 free_fair_sched_group(tg); 6957 free_rt_sched_group(tg); 6958 autogroup_free(tg); 6959 kmem_cache_free(task_group_cache, tg); 6960 } 6961 6962 /* allocate runqueue etc for a new task group */ 6963 struct task_group *sched_create_group(struct task_group *parent) 6964 { 6965 struct task_group *tg; 6966 6967 tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO); 6968 if (!tg) 6969 return ERR_PTR(-ENOMEM); 6970 6971 if (!alloc_fair_sched_group(tg, parent)) 6972 goto err; 6973 6974 if (!alloc_rt_sched_group(tg, parent)) 6975 goto err; 6976 6977 alloc_uclamp_sched_group(tg, parent); 6978 6979 return tg; 6980 6981 err: 6982 sched_free_group(tg); 6983 return ERR_PTR(-ENOMEM); 6984 } 6985 6986 void sched_online_group(struct task_group *tg, struct task_group *parent) 6987 { 6988 unsigned long flags; 6989 6990 spin_lock_irqsave(&task_group_lock, flags); 6991 list_add_rcu(&tg->list, &task_groups); 6992 6993 /* Root should already exist: */ 6994 WARN_ON(!parent); 6995 6996 tg->parent = parent; 6997 INIT_LIST_HEAD(&tg->children); 6998 list_add_rcu(&tg->siblings, &parent->children); 6999 spin_unlock_irqrestore(&task_group_lock, flags); 7000 7001 online_fair_sched_group(tg); 7002 } 7003 7004 /* rcu callback to free various structures associated with a task group */ 7005 static void sched_free_group_rcu(struct rcu_head *rhp) 7006 { 7007 /* Now it should be safe to free those cfs_rqs: */ 7008 sched_free_group(container_of(rhp, struct task_group, rcu)); 7009 } 7010 7011 void sched_destroy_group(struct task_group *tg) 7012 { 7013 /* Wait for possible concurrent references to cfs_rqs complete: */ 7014 call_rcu(&tg->rcu, sched_free_group_rcu); 7015 } 7016 7017 void sched_offline_group(struct task_group *tg) 7018 { 7019 unsigned long flags; 7020 7021 /* End participation in shares distribution: */ 7022 unregister_fair_sched_group(tg); 7023 7024 spin_lock_irqsave(&task_group_lock, flags); 7025 list_del_rcu(&tg->list); 7026 list_del_rcu(&tg->siblings); 7027 spin_unlock_irqrestore(&task_group_lock, flags); 7028 } 7029 7030 static void sched_change_group(struct task_struct *tsk, int type) 7031 { 7032 struct task_group *tg; 7033 7034 /* 7035 * All callers are synchronized by task_rq_lock(); we do not use RCU 7036 * which is pointless here. Thus, we pass "true" to task_css_check() 7037 * to prevent lockdep warnings. 7038 */ 7039 tg = container_of(task_css_check(tsk, cpu_cgrp_id, true), 7040 struct task_group, css); 7041 tg = autogroup_task_group(tsk, tg); 7042 tsk->sched_task_group = tg; 7043 7044 #ifdef CONFIG_FAIR_GROUP_SCHED 7045 if (tsk->sched_class->task_change_group) 7046 tsk->sched_class->task_change_group(tsk, type); 7047 else 7048 #endif 7049 set_task_rq(tsk, task_cpu(tsk)); 7050 } 7051 7052 /* 7053 * Change task's runqueue when it moves between groups. 7054 * 7055 * The caller of this function should have put the task in its new group by 7056 * now. This function just updates tsk->se.cfs_rq and tsk->se.parent to reflect 7057 * its new group. 7058 */ 7059 void sched_move_task(struct task_struct *tsk) 7060 { 7061 int queued, running, queue_flags = 7062 DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; 7063 struct rq_flags rf; 7064 struct rq *rq; 7065 7066 rq = task_rq_lock(tsk, &rf); 7067 update_rq_clock(rq); 7068 7069 running = task_current(rq, tsk); 7070 queued = task_on_rq_queued(tsk); 7071 7072 if (queued) 7073 dequeue_task(rq, tsk, queue_flags); 7074 if (running) 7075 put_prev_task(rq, tsk); 7076 7077 sched_change_group(tsk, TASK_MOVE_GROUP); 7078 7079 if (queued) 7080 enqueue_task(rq, tsk, queue_flags); 7081 if (running) { 7082 set_next_task(rq, tsk); 7083 /* 7084 * After changing group, the running task may have joined a 7085 * throttled one but it's still the running task. Trigger a 7086 * resched to make sure that task can still run. 7087 */ 7088 resched_curr(rq); 7089 } 7090 7091 task_rq_unlock(rq, tsk, &rf); 7092 } 7093 7094 static inline struct task_group *css_tg(struct cgroup_subsys_state *css) 7095 { 7096 return css ? container_of(css, struct task_group, css) : NULL; 7097 } 7098 7099 static struct cgroup_subsys_state * 7100 cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) 7101 { 7102 struct task_group *parent = css_tg(parent_css); 7103 struct task_group *tg; 7104 7105 if (!parent) { 7106 /* This is early initialization for the top cgroup */ 7107 return &root_task_group.css; 7108 } 7109 7110 tg = sched_create_group(parent); 7111 if (IS_ERR(tg)) 7112 return ERR_PTR(-ENOMEM); 7113 7114 return &tg->css; 7115 } 7116 7117 /* Expose task group only after completing cgroup initialization */ 7118 static int cpu_cgroup_css_online(struct cgroup_subsys_state *css) 7119 { 7120 struct task_group *tg = css_tg(css); 7121 struct task_group *parent = css_tg(css->parent); 7122 7123 if (parent) 7124 sched_online_group(tg, parent); 7125 7126 #ifdef CONFIG_UCLAMP_TASK_GROUP 7127 /* Propagate the effective uclamp value for the new group */ 7128 cpu_util_update_eff(css); 7129 #endif 7130 7131 return 0; 7132 } 7133 7134 static void cpu_cgroup_css_released(struct cgroup_subsys_state *css) 7135 { 7136 struct task_group *tg = css_tg(css); 7137 7138 sched_offline_group(tg); 7139 } 7140 7141 static void cpu_cgroup_css_free(struct cgroup_subsys_state *css) 7142 { 7143 struct task_group *tg = css_tg(css); 7144 7145 /* 7146 * Relies on the RCU grace period between css_released() and this. 7147 */ 7148 sched_free_group(tg); 7149 } 7150 7151 /* 7152 * This is called before wake_up_new_task(), therefore we really only 7153 * have to set its group bits, all the other stuff does not apply. 7154 */ 7155 static void cpu_cgroup_fork(struct task_struct *task) 7156 { 7157 struct rq_flags rf; 7158 struct rq *rq; 7159 7160 rq = task_rq_lock(task, &rf); 7161 7162 update_rq_clock(rq); 7163 sched_change_group(task, TASK_SET_GROUP); 7164 7165 task_rq_unlock(rq, task, &rf); 7166 } 7167 7168 static int cpu_cgroup_can_attach(struct cgroup_taskset *tset) 7169 { 7170 struct task_struct *task; 7171 struct cgroup_subsys_state *css; 7172 int ret = 0; 7173 7174 cgroup_taskset_for_each(task, css, tset) { 7175 #ifdef CONFIG_RT_GROUP_SCHED 7176 if (!sched_rt_can_attach(css_tg(css), task)) 7177 return -EINVAL; 7178 #endif 7179 /* 7180 * Serialize against wake_up_new_task() such that if its 7181 * running, we're sure to observe its full state. 7182 */ 7183 raw_spin_lock_irq(&task->pi_lock); 7184 /* 7185 * Avoid calling sched_move_task() before wake_up_new_task() 7186 * has happened. This would lead to problems with PELT, due to 7187 * move wanting to detach+attach while we're not attached yet. 7188 */ 7189 if (task->state == TASK_NEW) 7190 ret = -EINVAL; 7191 raw_spin_unlock_irq(&task->pi_lock); 7192 7193 if (ret) 7194 break; 7195 } 7196 return ret; 7197 } 7198 7199 static void cpu_cgroup_attach(struct cgroup_taskset *tset) 7200 { 7201 struct task_struct *task; 7202 struct cgroup_subsys_state *css; 7203 7204 cgroup_taskset_for_each(task, css, tset) 7205 sched_move_task(task); 7206 } 7207 7208 #ifdef CONFIG_UCLAMP_TASK_GROUP 7209 static void cpu_util_update_eff(struct cgroup_subsys_state *css) 7210 { 7211 struct cgroup_subsys_state *top_css = css; 7212 struct uclamp_se *uc_parent = NULL; 7213 struct uclamp_se *uc_se = NULL; 7214 unsigned int eff[UCLAMP_CNT]; 7215 enum uclamp_id clamp_id; 7216 unsigned int clamps; 7217 7218 css_for_each_descendant_pre(css, top_css) { 7219 uc_parent = css_tg(css)->parent 7220 ? css_tg(css)->parent->uclamp : NULL; 7221 7222 for_each_clamp_id(clamp_id) { 7223 /* Assume effective clamps matches requested clamps */ 7224 eff[clamp_id] = css_tg(css)->uclamp_req[clamp_id].value; 7225 /* Cap effective clamps with parent's effective clamps */ 7226 if (uc_parent && 7227 eff[clamp_id] > uc_parent[clamp_id].value) { 7228 eff[clamp_id] = uc_parent[clamp_id].value; 7229 } 7230 } 7231 /* Ensure protection is always capped by limit */ 7232 eff[UCLAMP_MIN] = min(eff[UCLAMP_MIN], eff[UCLAMP_MAX]); 7233 7234 /* Propagate most restrictive effective clamps */ 7235 clamps = 0x0; 7236 uc_se = css_tg(css)->uclamp; 7237 for_each_clamp_id(clamp_id) { 7238 if (eff[clamp_id] == uc_se[clamp_id].value) 7239 continue; 7240 uc_se[clamp_id].value = eff[clamp_id]; 7241 uc_se[clamp_id].bucket_id = uclamp_bucket_id(eff[clamp_id]); 7242 clamps |= (0x1 << clamp_id); 7243 } 7244 if (!clamps) { 7245 css = css_rightmost_descendant(css); 7246 continue; 7247 } 7248 7249 /* Immediately update descendants RUNNABLE tasks */ 7250 uclamp_update_active_tasks(css, clamps); 7251 } 7252 } 7253 7254 /* 7255 * Integer 10^N with a given N exponent by casting to integer the literal "1eN" 7256 * C expression. Since there is no way to convert a macro argument (N) into a 7257 * character constant, use two levels of macros. 7258 */ 7259 #define _POW10(exp) ((unsigned int)1e##exp) 7260 #define POW10(exp) _POW10(exp) 7261 7262 struct uclamp_request { 7263 #define UCLAMP_PERCENT_SHIFT 2 7264 #define UCLAMP_PERCENT_SCALE (100 * POW10(UCLAMP_PERCENT_SHIFT)) 7265 s64 percent; 7266 u64 util; 7267 int ret; 7268 }; 7269 7270 static inline struct uclamp_request 7271 capacity_from_percent(char *buf) 7272 { 7273 struct uclamp_request req = { 7274 .percent = UCLAMP_PERCENT_SCALE, 7275 .util = SCHED_CAPACITY_SCALE, 7276 .ret = 0, 7277 }; 7278 7279 buf = strim(buf); 7280 if (strcmp(buf, "max")) { 7281 req.ret = cgroup_parse_float(buf, UCLAMP_PERCENT_SHIFT, 7282 &req.percent); 7283 if (req.ret) 7284 return req; 7285 if ((u64)req.percent > UCLAMP_PERCENT_SCALE) { 7286 req.ret = -ERANGE; 7287 return req; 7288 } 7289 7290 req.util = req.percent << SCHED_CAPACITY_SHIFT; 7291 req.util = DIV_ROUND_CLOSEST_ULL(req.util, UCLAMP_PERCENT_SCALE); 7292 } 7293 7294 return req; 7295 } 7296 7297 static ssize_t cpu_uclamp_write(struct kernfs_open_file *of, char *buf, 7298 size_t nbytes, loff_t off, 7299 enum uclamp_id clamp_id) 7300 { 7301 struct uclamp_request req; 7302 struct task_group *tg; 7303 7304 req = capacity_from_percent(buf); 7305 if (req.ret) 7306 return req.ret; 7307 7308 mutex_lock(&uclamp_mutex); 7309 rcu_read_lock(); 7310 7311 tg = css_tg(of_css(of)); 7312 if (tg->uclamp_req[clamp_id].value != req.util) 7313 uclamp_se_set(&tg->uclamp_req[clamp_id], req.util, false); 7314 7315 /* 7316 * Because of not recoverable conversion rounding we keep track of the 7317 * exact requested value 7318 */ 7319 tg->uclamp_pct[clamp_id] = req.percent; 7320 7321 /* Update effective clamps to track the most restrictive value */ 7322 cpu_util_update_eff(of_css(of)); 7323 7324 rcu_read_unlock(); 7325 mutex_unlock(&uclamp_mutex); 7326 7327 return nbytes; 7328 } 7329 7330 static ssize_t cpu_uclamp_min_write(struct kernfs_open_file *of, 7331 char *buf, size_t nbytes, 7332 loff_t off) 7333 { 7334 return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MIN); 7335 } 7336 7337 static ssize_t cpu_uclamp_max_write(struct kernfs_open_file *of, 7338 char *buf, size_t nbytes, 7339 loff_t off) 7340 { 7341 return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MAX); 7342 } 7343 7344 static inline void cpu_uclamp_print(struct seq_file *sf, 7345 enum uclamp_id clamp_id) 7346 { 7347 struct task_group *tg; 7348 u64 util_clamp; 7349 u64 percent; 7350 u32 rem; 7351 7352 rcu_read_lock(); 7353 tg = css_tg(seq_css(sf)); 7354 util_clamp = tg->uclamp_req[clamp_id].value; 7355 rcu_read_unlock(); 7356 7357 if (util_clamp == SCHED_CAPACITY_SCALE) { 7358 seq_puts(sf, "max\n"); 7359 return; 7360 } 7361 7362 percent = tg->uclamp_pct[clamp_id]; 7363 percent = div_u64_rem(percent, POW10(UCLAMP_PERCENT_SHIFT), &rem); 7364 seq_printf(sf, "%llu.%0*u\n", percent, UCLAMP_PERCENT_SHIFT, rem); 7365 } 7366 7367 static int cpu_uclamp_min_show(struct seq_file *sf, void *v) 7368 { 7369 cpu_uclamp_print(sf, UCLAMP_MIN); 7370 return 0; 7371 } 7372 7373 static int cpu_uclamp_max_show(struct seq_file *sf, void *v) 7374 { 7375 cpu_uclamp_print(sf, UCLAMP_MAX); 7376 return 0; 7377 } 7378 #endif /* CONFIG_UCLAMP_TASK_GROUP */ 7379 7380 #ifdef CONFIG_FAIR_GROUP_SCHED 7381 static int cpu_shares_write_u64(struct cgroup_subsys_state *css, 7382 struct cftype *cftype, u64 shareval) 7383 { 7384 if (shareval > scale_load_down(ULONG_MAX)) 7385 shareval = MAX_SHARES; 7386 return sched_group_set_shares(css_tg(css), scale_load(shareval)); 7387 } 7388 7389 static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css, 7390 struct cftype *cft) 7391 { 7392 struct task_group *tg = css_tg(css); 7393 7394 return (u64) scale_load_down(tg->shares); 7395 } 7396 7397 #ifdef CONFIG_CFS_BANDWIDTH 7398 static DEFINE_MUTEX(cfs_constraints_mutex); 7399 7400 const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */ 7401 static const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */ 7402 7403 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime); 7404 7405 static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota) 7406 { 7407 int i, ret = 0, runtime_enabled, runtime_was_enabled; 7408 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 7409 7410 if (tg == &root_task_group) 7411 return -EINVAL; 7412 7413 /* 7414 * Ensure we have at some amount of bandwidth every period. This is 7415 * to prevent reaching a state of large arrears when throttled via 7416 * entity_tick() resulting in prolonged exit starvation. 7417 */ 7418 if (quota < min_cfs_quota_period || period < min_cfs_quota_period) 7419 return -EINVAL; 7420 7421 /* 7422 * Likewise, bound things on the otherside by preventing insane quota 7423 * periods. This also allows us to normalize in computing quota 7424 * feasibility. 7425 */ 7426 if (period > max_cfs_quota_period) 7427 return -EINVAL; 7428 7429 /* 7430 * Prevent race between setting of cfs_rq->runtime_enabled and 7431 * unthrottle_offline_cfs_rqs(). 7432 */ 7433 get_online_cpus(); 7434 mutex_lock(&cfs_constraints_mutex); 7435 ret = __cfs_schedulable(tg, period, quota); 7436 if (ret) 7437 goto out_unlock; 7438 7439 runtime_enabled = quota != RUNTIME_INF; 7440 runtime_was_enabled = cfs_b->quota != RUNTIME_INF; 7441 /* 7442 * If we need to toggle cfs_bandwidth_used, off->on must occur 7443 * before making related changes, and on->off must occur afterwards 7444 */ 7445 if (runtime_enabled && !runtime_was_enabled) 7446 cfs_bandwidth_usage_inc(); 7447 raw_spin_lock_irq(&cfs_b->lock); 7448 cfs_b->period = ns_to_ktime(period); 7449 cfs_b->quota = quota; 7450 7451 __refill_cfs_bandwidth_runtime(cfs_b); 7452 7453 /* Restart the period timer (if active) to handle new period expiry: */ 7454 if (runtime_enabled) 7455 start_cfs_bandwidth(cfs_b); 7456 7457 raw_spin_unlock_irq(&cfs_b->lock); 7458 7459 for_each_online_cpu(i) { 7460 struct cfs_rq *cfs_rq = tg->cfs_rq[i]; 7461 struct rq *rq = cfs_rq->rq; 7462 struct rq_flags rf; 7463 7464 rq_lock_irq(rq, &rf); 7465 cfs_rq->runtime_enabled = runtime_enabled; 7466 cfs_rq->runtime_remaining = 0; 7467 7468 if (cfs_rq->throttled) 7469 unthrottle_cfs_rq(cfs_rq); 7470 rq_unlock_irq(rq, &rf); 7471 } 7472 if (runtime_was_enabled && !runtime_enabled) 7473 cfs_bandwidth_usage_dec(); 7474 out_unlock: 7475 mutex_unlock(&cfs_constraints_mutex); 7476 put_online_cpus(); 7477 7478 return ret; 7479 } 7480 7481 static int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us) 7482 { 7483 u64 quota, period; 7484 7485 period = ktime_to_ns(tg->cfs_bandwidth.period); 7486 if (cfs_quota_us < 0) 7487 quota = RUNTIME_INF; 7488 else if ((u64)cfs_quota_us <= U64_MAX / NSEC_PER_USEC) 7489 quota = (u64)cfs_quota_us * NSEC_PER_USEC; 7490 else 7491 return -EINVAL; 7492 7493 return tg_set_cfs_bandwidth(tg, period, quota); 7494 } 7495 7496 static long tg_get_cfs_quota(struct task_group *tg) 7497 { 7498 u64 quota_us; 7499 7500 if (tg->cfs_bandwidth.quota == RUNTIME_INF) 7501 return -1; 7502 7503 quota_us = tg->cfs_bandwidth.quota; 7504 do_div(quota_us, NSEC_PER_USEC); 7505 7506 return quota_us; 7507 } 7508 7509 static int tg_set_cfs_period(struct task_group *tg, long cfs_period_us) 7510 { 7511 u64 quota, period; 7512 7513 if ((u64)cfs_period_us > U64_MAX / NSEC_PER_USEC) 7514 return -EINVAL; 7515 7516 period = (u64)cfs_period_us * NSEC_PER_USEC; 7517 quota = tg->cfs_bandwidth.quota; 7518 7519 return tg_set_cfs_bandwidth(tg, period, quota); 7520 } 7521 7522 static long tg_get_cfs_period(struct task_group *tg) 7523 { 7524 u64 cfs_period_us; 7525 7526 cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period); 7527 do_div(cfs_period_us, NSEC_PER_USEC); 7528 7529 return cfs_period_us; 7530 } 7531 7532 static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css, 7533 struct cftype *cft) 7534 { 7535 return tg_get_cfs_quota(css_tg(css)); 7536 } 7537 7538 static int cpu_cfs_quota_write_s64(struct cgroup_subsys_state *css, 7539 struct cftype *cftype, s64 cfs_quota_us) 7540 { 7541 return tg_set_cfs_quota(css_tg(css), cfs_quota_us); 7542 } 7543 7544 static u64 cpu_cfs_period_read_u64(struct cgroup_subsys_state *css, 7545 struct cftype *cft) 7546 { 7547 return tg_get_cfs_period(css_tg(css)); 7548 } 7549 7550 static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css, 7551 struct cftype *cftype, u64 cfs_period_us) 7552 { 7553 return tg_set_cfs_period(css_tg(css), cfs_period_us); 7554 } 7555 7556 struct cfs_schedulable_data { 7557 struct task_group *tg; 7558 u64 period, quota; 7559 }; 7560 7561 /* 7562 * normalize group quota/period to be quota/max_period 7563 * note: units are usecs 7564 */ 7565 static u64 normalize_cfs_quota(struct task_group *tg, 7566 struct cfs_schedulable_data *d) 7567 { 7568 u64 quota, period; 7569 7570 if (tg == d->tg) { 7571 period = d->period; 7572 quota = d->quota; 7573 } else { 7574 period = tg_get_cfs_period(tg); 7575 quota = tg_get_cfs_quota(tg); 7576 } 7577 7578 /* note: these should typically be equivalent */ 7579 if (quota == RUNTIME_INF || quota == -1) 7580 return RUNTIME_INF; 7581 7582 return to_ratio(period, quota); 7583 } 7584 7585 static int tg_cfs_schedulable_down(struct task_group *tg, void *data) 7586 { 7587 struct cfs_schedulable_data *d = data; 7588 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 7589 s64 quota = 0, parent_quota = -1; 7590 7591 if (!tg->parent) { 7592 quota = RUNTIME_INF; 7593 } else { 7594 struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth; 7595 7596 quota = normalize_cfs_quota(tg, d); 7597 parent_quota = parent_b->hierarchical_quota; 7598 7599 /* 7600 * Ensure max(child_quota) <= parent_quota. On cgroup2, 7601 * always take the min. On cgroup1, only inherit when no 7602 * limit is set: 7603 */ 7604 if (cgroup_subsys_on_dfl(cpu_cgrp_subsys)) { 7605 quota = min(quota, parent_quota); 7606 } else { 7607 if (quota == RUNTIME_INF) 7608 quota = parent_quota; 7609 else if (parent_quota != RUNTIME_INF && quota > parent_quota) 7610 return -EINVAL; 7611 } 7612 } 7613 cfs_b->hierarchical_quota = quota; 7614 7615 return 0; 7616 } 7617 7618 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota) 7619 { 7620 int ret; 7621 struct cfs_schedulable_data data = { 7622 .tg = tg, 7623 .period = period, 7624 .quota = quota, 7625 }; 7626 7627 if (quota != RUNTIME_INF) { 7628 do_div(data.period, NSEC_PER_USEC); 7629 do_div(data.quota, NSEC_PER_USEC); 7630 } 7631 7632 rcu_read_lock(); 7633 ret = walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data); 7634 rcu_read_unlock(); 7635 7636 return ret; 7637 } 7638 7639 static int cpu_cfs_stat_show(struct seq_file *sf, void *v) 7640 { 7641 struct task_group *tg = css_tg(seq_css(sf)); 7642 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 7643 7644 seq_printf(sf, "nr_periods %d\n", cfs_b->nr_periods); 7645 seq_printf(sf, "nr_throttled %d\n", cfs_b->nr_throttled); 7646 seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time); 7647 7648 if (schedstat_enabled() && tg != &root_task_group) { 7649 u64 ws = 0; 7650 int i; 7651 7652 for_each_possible_cpu(i) 7653 ws += schedstat_val(tg->se[i]->statistics.wait_sum); 7654 7655 seq_printf(sf, "wait_sum %llu\n", ws); 7656 } 7657 7658 return 0; 7659 } 7660 #endif /* CONFIG_CFS_BANDWIDTH */ 7661 #endif /* CONFIG_FAIR_GROUP_SCHED */ 7662 7663 #ifdef CONFIG_RT_GROUP_SCHED 7664 static int cpu_rt_runtime_write(struct cgroup_subsys_state *css, 7665 struct cftype *cft, s64 val) 7666 { 7667 return sched_group_set_rt_runtime(css_tg(css), val); 7668 } 7669 7670 static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css, 7671 struct cftype *cft) 7672 { 7673 return sched_group_rt_runtime(css_tg(css)); 7674 } 7675 7676 static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css, 7677 struct cftype *cftype, u64 rt_period_us) 7678 { 7679 return sched_group_set_rt_period(css_tg(css), rt_period_us); 7680 } 7681 7682 static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css, 7683 struct cftype *cft) 7684 { 7685 return sched_group_rt_period(css_tg(css)); 7686 } 7687 #endif /* CONFIG_RT_GROUP_SCHED */ 7688 7689 static struct cftype cpu_legacy_files[] = { 7690 #ifdef CONFIG_FAIR_GROUP_SCHED 7691 { 7692 .name = "shares", 7693 .read_u64 = cpu_shares_read_u64, 7694 .write_u64 = cpu_shares_write_u64, 7695 }, 7696 #endif 7697 #ifdef CONFIG_CFS_BANDWIDTH 7698 { 7699 .name = "cfs_quota_us", 7700 .read_s64 = cpu_cfs_quota_read_s64, 7701 .write_s64 = cpu_cfs_quota_write_s64, 7702 }, 7703 { 7704 .name = "cfs_period_us", 7705 .read_u64 = cpu_cfs_period_read_u64, 7706 .write_u64 = cpu_cfs_period_write_u64, 7707 }, 7708 { 7709 .name = "stat", 7710 .seq_show = cpu_cfs_stat_show, 7711 }, 7712 #endif 7713 #ifdef CONFIG_RT_GROUP_SCHED 7714 { 7715 .name = "rt_runtime_us", 7716 .read_s64 = cpu_rt_runtime_read, 7717 .write_s64 = cpu_rt_runtime_write, 7718 }, 7719 { 7720 .name = "rt_period_us", 7721 .read_u64 = cpu_rt_period_read_uint, 7722 .write_u64 = cpu_rt_period_write_uint, 7723 }, 7724 #endif 7725 #ifdef CONFIG_UCLAMP_TASK_GROUP 7726 { 7727 .name = "uclamp.min", 7728 .flags = CFTYPE_NOT_ON_ROOT, 7729 .seq_show = cpu_uclamp_min_show, 7730 .write = cpu_uclamp_min_write, 7731 }, 7732 { 7733 .name = "uclamp.max", 7734 .flags = CFTYPE_NOT_ON_ROOT, 7735 .seq_show = cpu_uclamp_max_show, 7736 .write = cpu_uclamp_max_write, 7737 }, 7738 #endif 7739 { } /* Terminate */ 7740 }; 7741 7742 static int cpu_extra_stat_show(struct seq_file *sf, 7743 struct cgroup_subsys_state *css) 7744 { 7745 #ifdef CONFIG_CFS_BANDWIDTH 7746 { 7747 struct task_group *tg = css_tg(css); 7748 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 7749 u64 throttled_usec; 7750 7751 throttled_usec = cfs_b->throttled_time; 7752 do_div(throttled_usec, NSEC_PER_USEC); 7753 7754 seq_printf(sf, "nr_periods %d\n" 7755 "nr_throttled %d\n" 7756 "throttled_usec %llu\n", 7757 cfs_b->nr_periods, cfs_b->nr_throttled, 7758 throttled_usec); 7759 } 7760 #endif 7761 return 0; 7762 } 7763 7764 #ifdef CONFIG_FAIR_GROUP_SCHED 7765 static u64 cpu_weight_read_u64(struct cgroup_subsys_state *css, 7766 struct cftype *cft) 7767 { 7768 struct task_group *tg = css_tg(css); 7769 u64 weight = scale_load_down(tg->shares); 7770 7771 return DIV_ROUND_CLOSEST_ULL(weight * CGROUP_WEIGHT_DFL, 1024); 7772 } 7773 7774 static int cpu_weight_write_u64(struct cgroup_subsys_state *css, 7775 struct cftype *cft, u64 weight) 7776 { 7777 /* 7778 * cgroup weight knobs should use the common MIN, DFL and MAX 7779 * values which are 1, 100 and 10000 respectively. While it loses 7780 * a bit of range on both ends, it maps pretty well onto the shares 7781 * value used by scheduler and the round-trip conversions preserve 7782 * the original value over the entire range. 7783 */ 7784 if (weight < CGROUP_WEIGHT_MIN || weight > CGROUP_WEIGHT_MAX) 7785 return -ERANGE; 7786 7787 weight = DIV_ROUND_CLOSEST_ULL(weight * 1024, CGROUP_WEIGHT_DFL); 7788 7789 return sched_group_set_shares(css_tg(css), scale_load(weight)); 7790 } 7791 7792 static s64 cpu_weight_nice_read_s64(struct cgroup_subsys_state *css, 7793 struct cftype *cft) 7794 { 7795 unsigned long weight = scale_load_down(css_tg(css)->shares); 7796 int last_delta = INT_MAX; 7797 int prio, delta; 7798 7799 /* find the closest nice value to the current weight */ 7800 for (prio = 0; prio < ARRAY_SIZE(sched_prio_to_weight); prio++) { 7801 delta = abs(sched_prio_to_weight[prio] - weight); 7802 if (delta >= last_delta) 7803 break; 7804 last_delta = delta; 7805 } 7806 7807 return PRIO_TO_NICE(prio - 1 + MAX_RT_PRIO); 7808 } 7809 7810 static int cpu_weight_nice_write_s64(struct cgroup_subsys_state *css, 7811 struct cftype *cft, s64 nice) 7812 { 7813 unsigned long weight; 7814 int idx; 7815 7816 if (nice < MIN_NICE || nice > MAX_NICE) 7817 return -ERANGE; 7818 7819 idx = NICE_TO_PRIO(nice) - MAX_RT_PRIO; 7820 idx = array_index_nospec(idx, 40); 7821 weight = sched_prio_to_weight[idx]; 7822 7823 return sched_group_set_shares(css_tg(css), scale_load(weight)); 7824 } 7825 #endif 7826 7827 static void __maybe_unused cpu_period_quota_print(struct seq_file *sf, 7828 long period, long quota) 7829 { 7830 if (quota < 0) 7831 seq_puts(sf, "max"); 7832 else 7833 seq_printf(sf, "%ld", quota); 7834 7835 seq_printf(sf, " %ld\n", period); 7836 } 7837 7838 /* caller should put the current value in *@periodp before calling */ 7839 static int __maybe_unused cpu_period_quota_parse(char *buf, 7840 u64 *periodp, u64 *quotap) 7841 { 7842 char tok[21]; /* U64_MAX */ 7843 7844 if (sscanf(buf, "%20s %llu", tok, periodp) < 1) 7845 return -EINVAL; 7846 7847 *periodp *= NSEC_PER_USEC; 7848 7849 if (sscanf(tok, "%llu", quotap)) 7850 *quotap *= NSEC_PER_USEC; 7851 else if (!strcmp(tok, "max")) 7852 *quotap = RUNTIME_INF; 7853 else 7854 return -EINVAL; 7855 7856 return 0; 7857 } 7858 7859 #ifdef CONFIG_CFS_BANDWIDTH 7860 static int cpu_max_show(struct seq_file *sf, void *v) 7861 { 7862 struct task_group *tg = css_tg(seq_css(sf)); 7863 7864 cpu_period_quota_print(sf, tg_get_cfs_period(tg), tg_get_cfs_quota(tg)); 7865 return 0; 7866 } 7867 7868 static ssize_t cpu_max_write(struct kernfs_open_file *of, 7869 char *buf, size_t nbytes, loff_t off) 7870 { 7871 struct task_group *tg = css_tg(of_css(of)); 7872 u64 period = tg_get_cfs_period(tg); 7873 u64 quota; 7874 int ret; 7875 7876 ret = cpu_period_quota_parse(buf, &period, "a); 7877 if (!ret) 7878 ret = tg_set_cfs_bandwidth(tg, period, quota); 7879 return ret ?: nbytes; 7880 } 7881 #endif 7882 7883 static struct cftype cpu_files[] = { 7884 #ifdef CONFIG_FAIR_GROUP_SCHED 7885 { 7886 .name = "weight", 7887 .flags = CFTYPE_NOT_ON_ROOT, 7888 .read_u64 = cpu_weight_read_u64, 7889 .write_u64 = cpu_weight_write_u64, 7890 }, 7891 { 7892 .name = "weight.nice", 7893 .flags = CFTYPE_NOT_ON_ROOT, 7894 .read_s64 = cpu_weight_nice_read_s64, 7895 .write_s64 = cpu_weight_nice_write_s64, 7896 }, 7897 #endif 7898 #ifdef CONFIG_CFS_BANDWIDTH 7899 { 7900 .name = "max", 7901 .flags = CFTYPE_NOT_ON_ROOT, 7902 .seq_show = cpu_max_show, 7903 .write = cpu_max_write, 7904 }, 7905 #endif 7906 #ifdef CONFIG_UCLAMP_TASK_GROUP 7907 { 7908 .name = "uclamp.min", 7909 .flags = CFTYPE_NOT_ON_ROOT, 7910 .seq_show = cpu_uclamp_min_show, 7911 .write = cpu_uclamp_min_write, 7912 }, 7913 { 7914 .name = "uclamp.max", 7915 .flags = CFTYPE_NOT_ON_ROOT, 7916 .seq_show = cpu_uclamp_max_show, 7917 .write = cpu_uclamp_max_write, 7918 }, 7919 #endif 7920 { } /* terminate */ 7921 }; 7922 7923 struct cgroup_subsys cpu_cgrp_subsys = { 7924 .css_alloc = cpu_cgroup_css_alloc, 7925 .css_online = cpu_cgroup_css_online, 7926 .css_released = cpu_cgroup_css_released, 7927 .css_free = cpu_cgroup_css_free, 7928 .css_extra_stat_show = cpu_extra_stat_show, 7929 .fork = cpu_cgroup_fork, 7930 .can_attach = cpu_cgroup_can_attach, 7931 .attach = cpu_cgroup_attach, 7932 .legacy_cftypes = cpu_legacy_files, 7933 .dfl_cftypes = cpu_files, 7934 .early_init = true, 7935 .threaded = true, 7936 }; 7937 7938 #endif /* CONFIG_CGROUP_SCHED */ 7939 7940 void dump_cpu_task(int cpu) 7941 { 7942 pr_info("Task dump for CPU %d:\n", cpu); 7943 sched_show_task(cpu_curr(cpu)); 7944 } 7945 7946 /* 7947 * Nice levels are multiplicative, with a gentle 10% change for every 7948 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to 7949 * nice 1, it will get ~10% less CPU time than another CPU-bound task 7950 * that remained on nice 0. 7951 * 7952 * The "10% effect" is relative and cumulative: from _any_ nice level, 7953 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level 7954 * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25. 7955 * If a task goes up by ~10% and another task goes down by ~10% then 7956 * the relative distance between them is ~25%.) 7957 */ 7958 const int sched_prio_to_weight[40] = { 7959 /* -20 */ 88761, 71755, 56483, 46273, 36291, 7960 /* -15 */ 29154, 23254, 18705, 14949, 11916, 7961 /* -10 */ 9548, 7620, 6100, 4904, 3906, 7962 /* -5 */ 3121, 2501, 1991, 1586, 1277, 7963 /* 0 */ 1024, 820, 655, 526, 423, 7964 /* 5 */ 335, 272, 215, 172, 137, 7965 /* 10 */ 110, 87, 70, 56, 45, 7966 /* 15 */ 36, 29, 23, 18, 15, 7967 }; 7968 7969 /* 7970 * Inverse (2^32/x) values of the sched_prio_to_weight[] array, precalculated. 7971 * 7972 * In cases where the weight does not change often, we can use the 7973 * precalculated inverse to speed up arithmetics by turning divisions 7974 * into multiplications: 7975 */ 7976 const u32 sched_prio_to_wmult[40] = { 7977 /* -20 */ 48388, 59856, 76040, 92818, 118348, 7978 /* -15 */ 147320, 184698, 229616, 287308, 360437, 7979 /* -10 */ 449829, 563644, 704093, 875809, 1099582, 7980 /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326, 7981 /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587, 7982 /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126, 7983 /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717, 7984 /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153, 7985 }; 7986 7987 #undef CREATE_TRACE_POINTS 7988