1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * kernel/sched/core.c 4 * 5 * Core kernel scheduler code and related syscalls 6 * 7 * Copyright (C) 1991-2002 Linus Torvalds 8 */ 9 #include "sched.h" 10 11 #include <linux/nospec.h> 12 13 #include <linux/kcov.h> 14 #include <linux/scs.h> 15 16 #include <asm/switch_to.h> 17 #include <asm/tlb.h> 18 19 #include "../workqueue_internal.h" 20 #include "../../fs/io-wq.h" 21 #include "../smpboot.h" 22 23 #include "pelt.h" 24 #include "smp.h" 25 26 #define CREATE_TRACE_POINTS 27 #include <trace/events/sched.h> 28 29 /* 30 * Export tracepoints that act as a bare tracehook (ie: have no trace event 31 * associated with them) to allow external modules to probe them. 32 */ 33 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_cfs_tp); 34 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_rt_tp); 35 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_dl_tp); 36 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp); 37 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_se_tp); 38 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_overutilized_tp); 39 40 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); 41 42 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_JUMP_LABEL) 43 /* 44 * Debugging: various feature bits 45 * 46 * If SCHED_DEBUG is disabled, each compilation unit has its own copy of 47 * sysctl_sched_features, defined in sched.h, to allow constants propagation 48 * at compile time and compiler optimization based on features default. 49 */ 50 #define SCHED_FEAT(name, enabled) \ 51 (1UL << __SCHED_FEAT_##name) * enabled | 52 const_debug unsigned int sysctl_sched_features = 53 #include "features.h" 54 0; 55 #undef SCHED_FEAT 56 #endif 57 58 /* 59 * Number of tasks to iterate in a single balance run. 60 * Limited because this is done with IRQs disabled. 61 */ 62 const_debug unsigned int sysctl_sched_nr_migrate = 32; 63 64 /* 65 * period over which we measure -rt task CPU usage in us. 66 * default: 1s 67 */ 68 unsigned int sysctl_sched_rt_period = 1000000; 69 70 __read_mostly int scheduler_running; 71 72 /* 73 * part of the period that we allow rt tasks to run in us. 74 * default: 0.95s 75 */ 76 int sysctl_sched_rt_runtime = 950000; 77 78 /* 79 * __task_rq_lock - lock the rq @p resides on. 80 */ 81 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) 82 __acquires(rq->lock) 83 { 84 struct rq *rq; 85 86 lockdep_assert_held(&p->pi_lock); 87 88 for (;;) { 89 rq = task_rq(p); 90 raw_spin_lock(&rq->lock); 91 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { 92 rq_pin_lock(rq, rf); 93 return rq; 94 } 95 raw_spin_unlock(&rq->lock); 96 97 while (unlikely(task_on_rq_migrating(p))) 98 cpu_relax(); 99 } 100 } 101 102 /* 103 * task_rq_lock - lock p->pi_lock and lock the rq @p resides on. 104 */ 105 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) 106 __acquires(p->pi_lock) 107 __acquires(rq->lock) 108 { 109 struct rq *rq; 110 111 for (;;) { 112 raw_spin_lock_irqsave(&p->pi_lock, rf->flags); 113 rq = task_rq(p); 114 raw_spin_lock(&rq->lock); 115 /* 116 * move_queued_task() task_rq_lock() 117 * 118 * ACQUIRE (rq->lock) 119 * [S] ->on_rq = MIGRATING [L] rq = task_rq() 120 * WMB (__set_task_cpu()) ACQUIRE (rq->lock); 121 * [S] ->cpu = new_cpu [L] task_rq() 122 * [L] ->on_rq 123 * RELEASE (rq->lock) 124 * 125 * If we observe the old CPU in task_rq_lock(), the acquire of 126 * the old rq->lock will fully serialize against the stores. 127 * 128 * If we observe the new CPU in task_rq_lock(), the address 129 * dependency headed by '[L] rq = task_rq()' and the acquire 130 * will pair with the WMB to ensure we then also see migrating. 131 */ 132 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { 133 rq_pin_lock(rq, rf); 134 return rq; 135 } 136 raw_spin_unlock(&rq->lock); 137 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); 138 139 while (unlikely(task_on_rq_migrating(p))) 140 cpu_relax(); 141 } 142 } 143 144 /* 145 * RQ-clock updating methods: 146 */ 147 148 static void update_rq_clock_task(struct rq *rq, s64 delta) 149 { 150 /* 151 * In theory, the compile should just see 0 here, and optimize out the call 152 * to sched_rt_avg_update. But I don't trust it... 153 */ 154 s64 __maybe_unused steal = 0, irq_delta = 0; 155 156 #ifdef CONFIG_IRQ_TIME_ACCOUNTING 157 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time; 158 159 /* 160 * Since irq_time is only updated on {soft,}irq_exit, we might run into 161 * this case when a previous update_rq_clock() happened inside a 162 * {soft,}irq region. 163 * 164 * When this happens, we stop ->clock_task and only update the 165 * prev_irq_time stamp to account for the part that fit, so that a next 166 * update will consume the rest. This ensures ->clock_task is 167 * monotonic. 168 * 169 * It does however cause some slight miss-attribution of {soft,}irq 170 * time, a more accurate solution would be to update the irq_time using 171 * the current rq->clock timestamp, except that would require using 172 * atomic ops. 173 */ 174 if (irq_delta > delta) 175 irq_delta = delta; 176 177 rq->prev_irq_time += irq_delta; 178 delta -= irq_delta; 179 #endif 180 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING 181 if (static_key_false((¶virt_steal_rq_enabled))) { 182 steal = paravirt_steal_clock(cpu_of(rq)); 183 steal -= rq->prev_steal_time_rq; 184 185 if (unlikely(steal > delta)) 186 steal = delta; 187 188 rq->prev_steal_time_rq += steal; 189 delta -= steal; 190 } 191 #endif 192 193 rq->clock_task += delta; 194 195 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ 196 if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY)) 197 update_irq_load_avg(rq, irq_delta + steal); 198 #endif 199 update_rq_clock_pelt(rq, delta); 200 } 201 202 void update_rq_clock(struct rq *rq) 203 { 204 s64 delta; 205 206 lockdep_assert_held(&rq->lock); 207 208 if (rq->clock_update_flags & RQCF_ACT_SKIP) 209 return; 210 211 #ifdef CONFIG_SCHED_DEBUG 212 if (sched_feat(WARN_DOUBLE_CLOCK)) 213 SCHED_WARN_ON(rq->clock_update_flags & RQCF_UPDATED); 214 rq->clock_update_flags |= RQCF_UPDATED; 215 #endif 216 217 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock; 218 if (delta < 0) 219 return; 220 rq->clock += delta; 221 update_rq_clock_task(rq, delta); 222 } 223 224 static inline void 225 rq_csd_init(struct rq *rq, call_single_data_t *csd, smp_call_func_t func) 226 { 227 csd->flags = 0; 228 csd->func = func; 229 csd->info = rq; 230 } 231 232 #ifdef CONFIG_SCHED_HRTICK 233 /* 234 * Use HR-timers to deliver accurate preemption points. 235 */ 236 237 static void hrtick_clear(struct rq *rq) 238 { 239 if (hrtimer_active(&rq->hrtick_timer)) 240 hrtimer_cancel(&rq->hrtick_timer); 241 } 242 243 /* 244 * High-resolution timer tick. 245 * Runs from hardirq context with interrupts disabled. 246 */ 247 static enum hrtimer_restart hrtick(struct hrtimer *timer) 248 { 249 struct rq *rq = container_of(timer, struct rq, hrtick_timer); 250 struct rq_flags rf; 251 252 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); 253 254 rq_lock(rq, &rf); 255 update_rq_clock(rq); 256 rq->curr->sched_class->task_tick(rq, rq->curr, 1); 257 rq_unlock(rq, &rf); 258 259 return HRTIMER_NORESTART; 260 } 261 262 #ifdef CONFIG_SMP 263 264 static void __hrtick_restart(struct rq *rq) 265 { 266 struct hrtimer *timer = &rq->hrtick_timer; 267 268 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED_HARD); 269 } 270 271 /* 272 * called from hardirq (IPI) context 273 */ 274 static void __hrtick_start(void *arg) 275 { 276 struct rq *rq = arg; 277 struct rq_flags rf; 278 279 rq_lock(rq, &rf); 280 __hrtick_restart(rq); 281 rq_unlock(rq, &rf); 282 } 283 284 /* 285 * Called to set the hrtick timer state. 286 * 287 * called with rq->lock held and irqs disabled 288 */ 289 void hrtick_start(struct rq *rq, u64 delay) 290 { 291 struct hrtimer *timer = &rq->hrtick_timer; 292 ktime_t time; 293 s64 delta; 294 295 /* 296 * Don't schedule slices shorter than 10000ns, that just 297 * doesn't make sense and can cause timer DoS. 298 */ 299 delta = max_t(s64, delay, 10000LL); 300 time = ktime_add_ns(timer->base->get_time(), delta); 301 302 hrtimer_set_expires(timer, time); 303 304 if (rq == this_rq()) 305 __hrtick_restart(rq); 306 else 307 smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd); 308 } 309 310 #else 311 /* 312 * Called to set the hrtick timer state. 313 * 314 * called with rq->lock held and irqs disabled 315 */ 316 void hrtick_start(struct rq *rq, u64 delay) 317 { 318 /* 319 * Don't schedule slices shorter than 10000ns, that just 320 * doesn't make sense. Rely on vruntime for fairness. 321 */ 322 delay = max_t(u64, delay, 10000LL); 323 hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay), 324 HRTIMER_MODE_REL_PINNED_HARD); 325 } 326 327 #endif /* CONFIG_SMP */ 328 329 static void hrtick_rq_init(struct rq *rq) 330 { 331 #ifdef CONFIG_SMP 332 rq_csd_init(rq, &rq->hrtick_csd, __hrtick_start); 333 #endif 334 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); 335 rq->hrtick_timer.function = hrtick; 336 } 337 #else /* CONFIG_SCHED_HRTICK */ 338 static inline void hrtick_clear(struct rq *rq) 339 { 340 } 341 342 static inline void hrtick_rq_init(struct rq *rq) 343 { 344 } 345 #endif /* CONFIG_SCHED_HRTICK */ 346 347 /* 348 * cmpxchg based fetch_or, macro so it works for different integer types 349 */ 350 #define fetch_or(ptr, mask) \ 351 ({ \ 352 typeof(ptr) _ptr = (ptr); \ 353 typeof(mask) _mask = (mask); \ 354 typeof(*_ptr) _old, _val = *_ptr; \ 355 \ 356 for (;;) { \ 357 _old = cmpxchg(_ptr, _val, _val | _mask); \ 358 if (_old == _val) \ 359 break; \ 360 _val = _old; \ 361 } \ 362 _old; \ 363 }) 364 365 #if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG) 366 /* 367 * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG, 368 * this avoids any races wrt polling state changes and thereby avoids 369 * spurious IPIs. 370 */ 371 static bool set_nr_and_not_polling(struct task_struct *p) 372 { 373 struct thread_info *ti = task_thread_info(p); 374 return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG); 375 } 376 377 /* 378 * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set. 379 * 380 * If this returns true, then the idle task promises to call 381 * sched_ttwu_pending() and reschedule soon. 382 */ 383 static bool set_nr_if_polling(struct task_struct *p) 384 { 385 struct thread_info *ti = task_thread_info(p); 386 typeof(ti->flags) old, val = READ_ONCE(ti->flags); 387 388 for (;;) { 389 if (!(val & _TIF_POLLING_NRFLAG)) 390 return false; 391 if (val & _TIF_NEED_RESCHED) 392 return true; 393 old = cmpxchg(&ti->flags, val, val | _TIF_NEED_RESCHED); 394 if (old == val) 395 break; 396 val = old; 397 } 398 return true; 399 } 400 401 #else 402 static bool set_nr_and_not_polling(struct task_struct *p) 403 { 404 set_tsk_need_resched(p); 405 return true; 406 } 407 408 #ifdef CONFIG_SMP 409 static bool set_nr_if_polling(struct task_struct *p) 410 { 411 return false; 412 } 413 #endif 414 #endif 415 416 static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task) 417 { 418 struct wake_q_node *node = &task->wake_q; 419 420 /* 421 * Atomically grab the task, if ->wake_q is !nil already it means 422 * its already queued (either by us or someone else) and will get the 423 * wakeup due to that. 424 * 425 * In order to ensure that a pending wakeup will observe our pending 426 * state, even in the failed case, an explicit smp_mb() must be used. 427 */ 428 smp_mb__before_atomic(); 429 if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL))) 430 return false; 431 432 /* 433 * The head is context local, there can be no concurrency. 434 */ 435 *head->lastp = node; 436 head->lastp = &node->next; 437 return true; 438 } 439 440 /** 441 * wake_q_add() - queue a wakeup for 'later' waking. 442 * @head: the wake_q_head to add @task to 443 * @task: the task to queue for 'later' wakeup 444 * 445 * Queue a task for later wakeup, most likely by the wake_up_q() call in the 446 * same context, _HOWEVER_ this is not guaranteed, the wakeup can come 447 * instantly. 448 * 449 * This function must be used as-if it were wake_up_process(); IOW the task 450 * must be ready to be woken at this location. 451 */ 452 void wake_q_add(struct wake_q_head *head, struct task_struct *task) 453 { 454 if (__wake_q_add(head, task)) 455 get_task_struct(task); 456 } 457 458 /** 459 * wake_q_add_safe() - safely queue a wakeup for 'later' waking. 460 * @head: the wake_q_head to add @task to 461 * @task: the task to queue for 'later' wakeup 462 * 463 * Queue a task for later wakeup, most likely by the wake_up_q() call in the 464 * same context, _HOWEVER_ this is not guaranteed, the wakeup can come 465 * instantly. 466 * 467 * This function must be used as-if it were wake_up_process(); IOW the task 468 * must be ready to be woken at this location. 469 * 470 * This function is essentially a task-safe equivalent to wake_q_add(). Callers 471 * that already hold reference to @task can call the 'safe' version and trust 472 * wake_q to do the right thing depending whether or not the @task is already 473 * queued for wakeup. 474 */ 475 void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task) 476 { 477 if (!__wake_q_add(head, task)) 478 put_task_struct(task); 479 } 480 481 void wake_up_q(struct wake_q_head *head) 482 { 483 struct wake_q_node *node = head->first; 484 485 while (node != WAKE_Q_TAIL) { 486 struct task_struct *task; 487 488 task = container_of(node, struct task_struct, wake_q); 489 BUG_ON(!task); 490 /* Task can safely be re-inserted now: */ 491 node = node->next; 492 task->wake_q.next = NULL; 493 494 /* 495 * wake_up_process() executes a full barrier, which pairs with 496 * the queueing in wake_q_add() so as not to miss wakeups. 497 */ 498 wake_up_process(task); 499 put_task_struct(task); 500 } 501 } 502 503 /* 504 * resched_curr - mark rq's current task 'to be rescheduled now'. 505 * 506 * On UP this means the setting of the need_resched flag, on SMP it 507 * might also involve a cross-CPU call to trigger the scheduler on 508 * the target CPU. 509 */ 510 void resched_curr(struct rq *rq) 511 { 512 struct task_struct *curr = rq->curr; 513 int cpu; 514 515 lockdep_assert_held(&rq->lock); 516 517 if (test_tsk_need_resched(curr)) 518 return; 519 520 cpu = cpu_of(rq); 521 522 if (cpu == smp_processor_id()) { 523 set_tsk_need_resched(curr); 524 set_preempt_need_resched(); 525 return; 526 } 527 528 if (set_nr_and_not_polling(curr)) 529 smp_send_reschedule(cpu); 530 else 531 trace_sched_wake_idle_without_ipi(cpu); 532 } 533 534 void resched_cpu(int cpu) 535 { 536 struct rq *rq = cpu_rq(cpu); 537 unsigned long flags; 538 539 raw_spin_lock_irqsave(&rq->lock, flags); 540 if (cpu_online(cpu) || cpu == smp_processor_id()) 541 resched_curr(rq); 542 raw_spin_unlock_irqrestore(&rq->lock, flags); 543 } 544 545 #ifdef CONFIG_SMP 546 #ifdef CONFIG_NO_HZ_COMMON 547 /* 548 * In the semi idle case, use the nearest busy CPU for migrating timers 549 * from an idle CPU. This is good for power-savings. 550 * 551 * We don't do similar optimization for completely idle system, as 552 * selecting an idle CPU will add more delays to the timers than intended 553 * (as that CPU's timer base may not be uptodate wrt jiffies etc). 554 */ 555 int get_nohz_timer_target(void) 556 { 557 int i, cpu = smp_processor_id(), default_cpu = -1; 558 struct sched_domain *sd; 559 560 if (housekeeping_cpu(cpu, HK_FLAG_TIMER)) { 561 if (!idle_cpu(cpu)) 562 return cpu; 563 default_cpu = cpu; 564 } 565 566 rcu_read_lock(); 567 for_each_domain(cpu, sd) { 568 for_each_cpu_and(i, sched_domain_span(sd), 569 housekeeping_cpumask(HK_FLAG_TIMER)) { 570 if (cpu == i) 571 continue; 572 573 if (!idle_cpu(i)) { 574 cpu = i; 575 goto unlock; 576 } 577 } 578 } 579 580 if (default_cpu == -1) 581 default_cpu = housekeeping_any_cpu(HK_FLAG_TIMER); 582 cpu = default_cpu; 583 unlock: 584 rcu_read_unlock(); 585 return cpu; 586 } 587 588 /* 589 * When add_timer_on() enqueues a timer into the timer wheel of an 590 * idle CPU then this timer might expire before the next timer event 591 * which is scheduled to wake up that CPU. In case of a completely 592 * idle system the next event might even be infinite time into the 593 * future. wake_up_idle_cpu() ensures that the CPU is woken up and 594 * leaves the inner idle loop so the newly added timer is taken into 595 * account when the CPU goes back to idle and evaluates the timer 596 * wheel for the next timer event. 597 */ 598 static void wake_up_idle_cpu(int cpu) 599 { 600 struct rq *rq = cpu_rq(cpu); 601 602 if (cpu == smp_processor_id()) 603 return; 604 605 if (set_nr_and_not_polling(rq->idle)) 606 smp_send_reschedule(cpu); 607 else 608 trace_sched_wake_idle_without_ipi(cpu); 609 } 610 611 static bool wake_up_full_nohz_cpu(int cpu) 612 { 613 /* 614 * We just need the target to call irq_exit() and re-evaluate 615 * the next tick. The nohz full kick at least implies that. 616 * If needed we can still optimize that later with an 617 * empty IRQ. 618 */ 619 if (cpu_is_offline(cpu)) 620 return true; /* Don't try to wake offline CPUs. */ 621 if (tick_nohz_full_cpu(cpu)) { 622 if (cpu != smp_processor_id() || 623 tick_nohz_tick_stopped()) 624 tick_nohz_full_kick_cpu(cpu); 625 return true; 626 } 627 628 return false; 629 } 630 631 /* 632 * Wake up the specified CPU. If the CPU is going offline, it is the 633 * caller's responsibility to deal with the lost wakeup, for example, 634 * by hooking into the CPU_DEAD notifier like timers and hrtimers do. 635 */ 636 void wake_up_nohz_cpu(int cpu) 637 { 638 if (!wake_up_full_nohz_cpu(cpu)) 639 wake_up_idle_cpu(cpu); 640 } 641 642 static void nohz_csd_func(void *info) 643 { 644 struct rq *rq = info; 645 int cpu = cpu_of(rq); 646 unsigned int flags; 647 648 /* 649 * Release the rq::nohz_csd. 650 */ 651 flags = atomic_fetch_andnot(NOHZ_KICK_MASK, nohz_flags(cpu)); 652 WARN_ON(!(flags & NOHZ_KICK_MASK)); 653 654 rq->idle_balance = idle_cpu(cpu); 655 if (rq->idle_balance && !need_resched()) { 656 rq->nohz_idle_balance = flags; 657 raise_softirq_irqoff(SCHED_SOFTIRQ); 658 } 659 } 660 661 #endif /* CONFIG_NO_HZ_COMMON */ 662 663 #ifdef CONFIG_NO_HZ_FULL 664 bool sched_can_stop_tick(struct rq *rq) 665 { 666 int fifo_nr_running; 667 668 /* Deadline tasks, even if single, need the tick */ 669 if (rq->dl.dl_nr_running) 670 return false; 671 672 /* 673 * If there are more than one RR tasks, we need the tick to effect the 674 * actual RR behaviour. 675 */ 676 if (rq->rt.rr_nr_running) { 677 if (rq->rt.rr_nr_running == 1) 678 return true; 679 else 680 return false; 681 } 682 683 /* 684 * If there's no RR tasks, but FIFO tasks, we can skip the tick, no 685 * forced preemption between FIFO tasks. 686 */ 687 fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running; 688 if (fifo_nr_running) 689 return true; 690 691 /* 692 * If there are no DL,RR/FIFO tasks, there must only be CFS tasks left; 693 * if there's more than one we need the tick for involuntary 694 * preemption. 695 */ 696 if (rq->nr_running > 1) 697 return false; 698 699 return true; 700 } 701 #endif /* CONFIG_NO_HZ_FULL */ 702 #endif /* CONFIG_SMP */ 703 704 #if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \ 705 (defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH))) 706 /* 707 * Iterate task_group tree rooted at *from, calling @down when first entering a 708 * node and @up when leaving it for the final time. 709 * 710 * Caller must hold rcu_lock or sufficient equivalent. 711 */ 712 int walk_tg_tree_from(struct task_group *from, 713 tg_visitor down, tg_visitor up, void *data) 714 { 715 struct task_group *parent, *child; 716 int ret; 717 718 parent = from; 719 720 down: 721 ret = (*down)(parent, data); 722 if (ret) 723 goto out; 724 list_for_each_entry_rcu(child, &parent->children, siblings) { 725 parent = child; 726 goto down; 727 728 up: 729 continue; 730 } 731 ret = (*up)(parent, data); 732 if (ret || parent == from) 733 goto out; 734 735 child = parent; 736 parent = parent->parent; 737 if (parent) 738 goto up; 739 out: 740 return ret; 741 } 742 743 int tg_nop(struct task_group *tg, void *data) 744 { 745 return 0; 746 } 747 #endif 748 749 static void set_load_weight(struct task_struct *p, bool update_load) 750 { 751 int prio = p->static_prio - MAX_RT_PRIO; 752 struct load_weight *load = &p->se.load; 753 754 /* 755 * SCHED_IDLE tasks get minimal weight: 756 */ 757 if (task_has_idle_policy(p)) { 758 load->weight = scale_load(WEIGHT_IDLEPRIO); 759 load->inv_weight = WMULT_IDLEPRIO; 760 return; 761 } 762 763 /* 764 * SCHED_OTHER tasks have to update their load when changing their 765 * weight 766 */ 767 if (update_load && p->sched_class == &fair_sched_class) { 768 reweight_task(p, prio); 769 } else { 770 load->weight = scale_load(sched_prio_to_weight[prio]); 771 load->inv_weight = sched_prio_to_wmult[prio]; 772 } 773 } 774 775 #ifdef CONFIG_UCLAMP_TASK 776 /* 777 * Serializes updates of utilization clamp values 778 * 779 * The (slow-path) user-space triggers utilization clamp value updates which 780 * can require updates on (fast-path) scheduler's data structures used to 781 * support enqueue/dequeue operations. 782 * While the per-CPU rq lock protects fast-path update operations, user-space 783 * requests are serialized using a mutex to reduce the risk of conflicting 784 * updates or API abuses. 785 */ 786 static DEFINE_MUTEX(uclamp_mutex); 787 788 /* Max allowed minimum utilization */ 789 unsigned int sysctl_sched_uclamp_util_min = SCHED_CAPACITY_SCALE; 790 791 /* Max allowed maximum utilization */ 792 unsigned int sysctl_sched_uclamp_util_max = SCHED_CAPACITY_SCALE; 793 794 /* All clamps are required to be less or equal than these values */ 795 static struct uclamp_se uclamp_default[UCLAMP_CNT]; 796 797 /* Integer rounded range for each bucket */ 798 #define UCLAMP_BUCKET_DELTA DIV_ROUND_CLOSEST(SCHED_CAPACITY_SCALE, UCLAMP_BUCKETS) 799 800 #define for_each_clamp_id(clamp_id) \ 801 for ((clamp_id) = 0; (clamp_id) < UCLAMP_CNT; (clamp_id)++) 802 803 static inline unsigned int uclamp_bucket_id(unsigned int clamp_value) 804 { 805 return clamp_value / UCLAMP_BUCKET_DELTA; 806 } 807 808 static inline unsigned int uclamp_bucket_base_value(unsigned int clamp_value) 809 { 810 return UCLAMP_BUCKET_DELTA * uclamp_bucket_id(clamp_value); 811 } 812 813 static inline unsigned int uclamp_none(enum uclamp_id clamp_id) 814 { 815 if (clamp_id == UCLAMP_MIN) 816 return 0; 817 return SCHED_CAPACITY_SCALE; 818 } 819 820 static inline void uclamp_se_set(struct uclamp_se *uc_se, 821 unsigned int value, bool user_defined) 822 { 823 uc_se->value = value; 824 uc_se->bucket_id = uclamp_bucket_id(value); 825 uc_se->user_defined = user_defined; 826 } 827 828 static inline unsigned int 829 uclamp_idle_value(struct rq *rq, enum uclamp_id clamp_id, 830 unsigned int clamp_value) 831 { 832 /* 833 * Avoid blocked utilization pushing up the frequency when we go 834 * idle (which drops the max-clamp) by retaining the last known 835 * max-clamp. 836 */ 837 if (clamp_id == UCLAMP_MAX) { 838 rq->uclamp_flags |= UCLAMP_FLAG_IDLE; 839 return clamp_value; 840 } 841 842 return uclamp_none(UCLAMP_MIN); 843 } 844 845 static inline void uclamp_idle_reset(struct rq *rq, enum uclamp_id clamp_id, 846 unsigned int clamp_value) 847 { 848 /* Reset max-clamp retention only on idle exit */ 849 if (!(rq->uclamp_flags & UCLAMP_FLAG_IDLE)) 850 return; 851 852 WRITE_ONCE(rq->uclamp[clamp_id].value, clamp_value); 853 } 854 855 static inline 856 unsigned int uclamp_rq_max_value(struct rq *rq, enum uclamp_id clamp_id, 857 unsigned int clamp_value) 858 { 859 struct uclamp_bucket *bucket = rq->uclamp[clamp_id].bucket; 860 int bucket_id = UCLAMP_BUCKETS - 1; 861 862 /* 863 * Since both min and max clamps are max aggregated, find the 864 * top most bucket with tasks in. 865 */ 866 for ( ; bucket_id >= 0; bucket_id--) { 867 if (!bucket[bucket_id].tasks) 868 continue; 869 return bucket[bucket_id].value; 870 } 871 872 /* No tasks -- default clamp values */ 873 return uclamp_idle_value(rq, clamp_id, clamp_value); 874 } 875 876 static inline struct uclamp_se 877 uclamp_tg_restrict(struct task_struct *p, enum uclamp_id clamp_id) 878 { 879 struct uclamp_se uc_req = p->uclamp_req[clamp_id]; 880 #ifdef CONFIG_UCLAMP_TASK_GROUP 881 struct uclamp_se uc_max; 882 883 /* 884 * Tasks in autogroups or root task group will be 885 * restricted by system defaults. 886 */ 887 if (task_group_is_autogroup(task_group(p))) 888 return uc_req; 889 if (task_group(p) == &root_task_group) 890 return uc_req; 891 892 uc_max = task_group(p)->uclamp[clamp_id]; 893 if (uc_req.value > uc_max.value || !uc_req.user_defined) 894 return uc_max; 895 #endif 896 897 return uc_req; 898 } 899 900 /* 901 * The effective clamp bucket index of a task depends on, by increasing 902 * priority: 903 * - the task specific clamp value, when explicitly requested from userspace 904 * - the task group effective clamp value, for tasks not either in the root 905 * group or in an autogroup 906 * - the system default clamp value, defined by the sysadmin 907 */ 908 static inline struct uclamp_se 909 uclamp_eff_get(struct task_struct *p, enum uclamp_id clamp_id) 910 { 911 struct uclamp_se uc_req = uclamp_tg_restrict(p, clamp_id); 912 struct uclamp_se uc_max = uclamp_default[clamp_id]; 913 914 /* System default restrictions always apply */ 915 if (unlikely(uc_req.value > uc_max.value)) 916 return uc_max; 917 918 return uc_req; 919 } 920 921 unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id) 922 { 923 struct uclamp_se uc_eff; 924 925 /* Task currently refcounted: use back-annotated (effective) value */ 926 if (p->uclamp[clamp_id].active) 927 return (unsigned long)p->uclamp[clamp_id].value; 928 929 uc_eff = uclamp_eff_get(p, clamp_id); 930 931 return (unsigned long)uc_eff.value; 932 } 933 934 /* 935 * When a task is enqueued on a rq, the clamp bucket currently defined by the 936 * task's uclamp::bucket_id is refcounted on that rq. This also immediately 937 * updates the rq's clamp value if required. 938 * 939 * Tasks can have a task-specific value requested from user-space, track 940 * within each bucket the maximum value for tasks refcounted in it. 941 * This "local max aggregation" allows to track the exact "requested" value 942 * for each bucket when all its RUNNABLE tasks require the same clamp. 943 */ 944 static inline void uclamp_rq_inc_id(struct rq *rq, struct task_struct *p, 945 enum uclamp_id clamp_id) 946 { 947 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id]; 948 struct uclamp_se *uc_se = &p->uclamp[clamp_id]; 949 struct uclamp_bucket *bucket; 950 951 lockdep_assert_held(&rq->lock); 952 953 /* Update task effective clamp */ 954 p->uclamp[clamp_id] = uclamp_eff_get(p, clamp_id); 955 956 bucket = &uc_rq->bucket[uc_se->bucket_id]; 957 bucket->tasks++; 958 uc_se->active = true; 959 960 uclamp_idle_reset(rq, clamp_id, uc_se->value); 961 962 /* 963 * Local max aggregation: rq buckets always track the max 964 * "requested" clamp value of its RUNNABLE tasks. 965 */ 966 if (bucket->tasks == 1 || uc_se->value > bucket->value) 967 bucket->value = uc_se->value; 968 969 if (uc_se->value > READ_ONCE(uc_rq->value)) 970 WRITE_ONCE(uc_rq->value, uc_se->value); 971 } 972 973 /* 974 * When a task is dequeued from a rq, the clamp bucket refcounted by the task 975 * is released. If this is the last task reference counting the rq's max 976 * active clamp value, then the rq's clamp value is updated. 977 * 978 * Both refcounted tasks and rq's cached clamp values are expected to be 979 * always valid. If it's detected they are not, as defensive programming, 980 * enforce the expected state and warn. 981 */ 982 static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p, 983 enum uclamp_id clamp_id) 984 { 985 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id]; 986 struct uclamp_se *uc_se = &p->uclamp[clamp_id]; 987 struct uclamp_bucket *bucket; 988 unsigned int bkt_clamp; 989 unsigned int rq_clamp; 990 991 lockdep_assert_held(&rq->lock); 992 993 bucket = &uc_rq->bucket[uc_se->bucket_id]; 994 SCHED_WARN_ON(!bucket->tasks); 995 if (likely(bucket->tasks)) 996 bucket->tasks--; 997 uc_se->active = false; 998 999 /* 1000 * Keep "local max aggregation" simple and accept to (possibly) 1001 * overboost some RUNNABLE tasks in the same bucket. 1002 * The rq clamp bucket value is reset to its base value whenever 1003 * there are no more RUNNABLE tasks refcounting it. 1004 */ 1005 if (likely(bucket->tasks)) 1006 return; 1007 1008 rq_clamp = READ_ONCE(uc_rq->value); 1009 /* 1010 * Defensive programming: this should never happen. If it happens, 1011 * e.g. due to future modification, warn and fixup the expected value. 1012 */ 1013 SCHED_WARN_ON(bucket->value > rq_clamp); 1014 if (bucket->value >= rq_clamp) { 1015 bkt_clamp = uclamp_rq_max_value(rq, clamp_id, uc_se->value); 1016 WRITE_ONCE(uc_rq->value, bkt_clamp); 1017 } 1018 } 1019 1020 static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) 1021 { 1022 enum uclamp_id clamp_id; 1023 1024 if (unlikely(!p->sched_class->uclamp_enabled)) 1025 return; 1026 1027 for_each_clamp_id(clamp_id) 1028 uclamp_rq_inc_id(rq, p, clamp_id); 1029 1030 /* Reset clamp idle holding when there is one RUNNABLE task */ 1031 if (rq->uclamp_flags & UCLAMP_FLAG_IDLE) 1032 rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE; 1033 } 1034 1035 static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) 1036 { 1037 enum uclamp_id clamp_id; 1038 1039 if (unlikely(!p->sched_class->uclamp_enabled)) 1040 return; 1041 1042 for_each_clamp_id(clamp_id) 1043 uclamp_rq_dec_id(rq, p, clamp_id); 1044 } 1045 1046 static inline void 1047 uclamp_update_active(struct task_struct *p, enum uclamp_id clamp_id) 1048 { 1049 struct rq_flags rf; 1050 struct rq *rq; 1051 1052 /* 1053 * Lock the task and the rq where the task is (or was) queued. 1054 * 1055 * We might lock the (previous) rq of a !RUNNABLE task, but that's the 1056 * price to pay to safely serialize util_{min,max} updates with 1057 * enqueues, dequeues and migration operations. 1058 * This is the same locking schema used by __set_cpus_allowed_ptr(). 1059 */ 1060 rq = task_rq_lock(p, &rf); 1061 1062 /* 1063 * Setting the clamp bucket is serialized by task_rq_lock(). 1064 * If the task is not yet RUNNABLE and its task_struct is not 1065 * affecting a valid clamp bucket, the next time it's enqueued, 1066 * it will already see the updated clamp bucket value. 1067 */ 1068 if (p->uclamp[clamp_id].active) { 1069 uclamp_rq_dec_id(rq, p, clamp_id); 1070 uclamp_rq_inc_id(rq, p, clamp_id); 1071 } 1072 1073 task_rq_unlock(rq, p, &rf); 1074 } 1075 1076 #ifdef CONFIG_UCLAMP_TASK_GROUP 1077 static inline void 1078 uclamp_update_active_tasks(struct cgroup_subsys_state *css, 1079 unsigned int clamps) 1080 { 1081 enum uclamp_id clamp_id; 1082 struct css_task_iter it; 1083 struct task_struct *p; 1084 1085 css_task_iter_start(css, 0, &it); 1086 while ((p = css_task_iter_next(&it))) { 1087 for_each_clamp_id(clamp_id) { 1088 if ((0x1 << clamp_id) & clamps) 1089 uclamp_update_active(p, clamp_id); 1090 } 1091 } 1092 css_task_iter_end(&it); 1093 } 1094 1095 static void cpu_util_update_eff(struct cgroup_subsys_state *css); 1096 static void uclamp_update_root_tg(void) 1097 { 1098 struct task_group *tg = &root_task_group; 1099 1100 uclamp_se_set(&tg->uclamp_req[UCLAMP_MIN], 1101 sysctl_sched_uclamp_util_min, false); 1102 uclamp_se_set(&tg->uclamp_req[UCLAMP_MAX], 1103 sysctl_sched_uclamp_util_max, false); 1104 1105 rcu_read_lock(); 1106 cpu_util_update_eff(&root_task_group.css); 1107 rcu_read_unlock(); 1108 } 1109 #else 1110 static void uclamp_update_root_tg(void) { } 1111 #endif 1112 1113 int sysctl_sched_uclamp_handler(struct ctl_table *table, int write, 1114 void *buffer, size_t *lenp, loff_t *ppos) 1115 { 1116 bool update_root_tg = false; 1117 int old_min, old_max; 1118 int result; 1119 1120 mutex_lock(&uclamp_mutex); 1121 old_min = sysctl_sched_uclamp_util_min; 1122 old_max = sysctl_sched_uclamp_util_max; 1123 1124 result = proc_dointvec(table, write, buffer, lenp, ppos); 1125 if (result) 1126 goto undo; 1127 if (!write) 1128 goto done; 1129 1130 if (sysctl_sched_uclamp_util_min > sysctl_sched_uclamp_util_max || 1131 sysctl_sched_uclamp_util_max > SCHED_CAPACITY_SCALE) { 1132 result = -EINVAL; 1133 goto undo; 1134 } 1135 1136 if (old_min != sysctl_sched_uclamp_util_min) { 1137 uclamp_se_set(&uclamp_default[UCLAMP_MIN], 1138 sysctl_sched_uclamp_util_min, false); 1139 update_root_tg = true; 1140 } 1141 if (old_max != sysctl_sched_uclamp_util_max) { 1142 uclamp_se_set(&uclamp_default[UCLAMP_MAX], 1143 sysctl_sched_uclamp_util_max, false); 1144 update_root_tg = true; 1145 } 1146 1147 if (update_root_tg) 1148 uclamp_update_root_tg(); 1149 1150 /* 1151 * We update all RUNNABLE tasks only when task groups are in use. 1152 * Otherwise, keep it simple and do just a lazy update at each next 1153 * task enqueue time. 1154 */ 1155 1156 goto done; 1157 1158 undo: 1159 sysctl_sched_uclamp_util_min = old_min; 1160 sysctl_sched_uclamp_util_max = old_max; 1161 done: 1162 mutex_unlock(&uclamp_mutex); 1163 1164 return result; 1165 } 1166 1167 static int uclamp_validate(struct task_struct *p, 1168 const struct sched_attr *attr) 1169 { 1170 unsigned int lower_bound = p->uclamp_req[UCLAMP_MIN].value; 1171 unsigned int upper_bound = p->uclamp_req[UCLAMP_MAX].value; 1172 1173 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN) 1174 lower_bound = attr->sched_util_min; 1175 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX) 1176 upper_bound = attr->sched_util_max; 1177 1178 if (lower_bound > upper_bound) 1179 return -EINVAL; 1180 if (upper_bound > SCHED_CAPACITY_SCALE) 1181 return -EINVAL; 1182 1183 return 0; 1184 } 1185 1186 static void __setscheduler_uclamp(struct task_struct *p, 1187 const struct sched_attr *attr) 1188 { 1189 enum uclamp_id clamp_id; 1190 1191 /* 1192 * On scheduling class change, reset to default clamps for tasks 1193 * without a task-specific value. 1194 */ 1195 for_each_clamp_id(clamp_id) { 1196 struct uclamp_se *uc_se = &p->uclamp_req[clamp_id]; 1197 unsigned int clamp_value = uclamp_none(clamp_id); 1198 1199 /* Keep using defined clamps across class changes */ 1200 if (uc_se->user_defined) 1201 continue; 1202 1203 /* By default, RT tasks always get 100% boost */ 1204 if (unlikely(rt_task(p) && clamp_id == UCLAMP_MIN)) 1205 clamp_value = uclamp_none(UCLAMP_MAX); 1206 1207 uclamp_se_set(uc_se, clamp_value, false); 1208 } 1209 1210 if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP))) 1211 return; 1212 1213 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN) { 1214 uclamp_se_set(&p->uclamp_req[UCLAMP_MIN], 1215 attr->sched_util_min, true); 1216 } 1217 1218 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX) { 1219 uclamp_se_set(&p->uclamp_req[UCLAMP_MAX], 1220 attr->sched_util_max, true); 1221 } 1222 } 1223 1224 static void uclamp_fork(struct task_struct *p) 1225 { 1226 enum uclamp_id clamp_id; 1227 1228 for_each_clamp_id(clamp_id) 1229 p->uclamp[clamp_id].active = false; 1230 1231 if (likely(!p->sched_reset_on_fork)) 1232 return; 1233 1234 for_each_clamp_id(clamp_id) { 1235 uclamp_se_set(&p->uclamp_req[clamp_id], 1236 uclamp_none(clamp_id), false); 1237 } 1238 } 1239 1240 static void __init init_uclamp(void) 1241 { 1242 struct uclamp_se uc_max = {}; 1243 enum uclamp_id clamp_id; 1244 int cpu; 1245 1246 mutex_init(&uclamp_mutex); 1247 1248 for_each_possible_cpu(cpu) { 1249 memset(&cpu_rq(cpu)->uclamp, 0, 1250 sizeof(struct uclamp_rq)*UCLAMP_CNT); 1251 cpu_rq(cpu)->uclamp_flags = 0; 1252 } 1253 1254 for_each_clamp_id(clamp_id) { 1255 uclamp_se_set(&init_task.uclamp_req[clamp_id], 1256 uclamp_none(clamp_id), false); 1257 } 1258 1259 /* System defaults allow max clamp values for both indexes */ 1260 uclamp_se_set(&uc_max, uclamp_none(UCLAMP_MAX), false); 1261 for_each_clamp_id(clamp_id) { 1262 uclamp_default[clamp_id] = uc_max; 1263 #ifdef CONFIG_UCLAMP_TASK_GROUP 1264 root_task_group.uclamp_req[clamp_id] = uc_max; 1265 root_task_group.uclamp[clamp_id] = uc_max; 1266 #endif 1267 } 1268 } 1269 1270 #else /* CONFIG_UCLAMP_TASK */ 1271 static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) { } 1272 static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) { } 1273 static inline int uclamp_validate(struct task_struct *p, 1274 const struct sched_attr *attr) 1275 { 1276 return -EOPNOTSUPP; 1277 } 1278 static void __setscheduler_uclamp(struct task_struct *p, 1279 const struct sched_attr *attr) { } 1280 static inline void uclamp_fork(struct task_struct *p) { } 1281 static inline void init_uclamp(void) { } 1282 #endif /* CONFIG_UCLAMP_TASK */ 1283 1284 static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags) 1285 { 1286 if (!(flags & ENQUEUE_NOCLOCK)) 1287 update_rq_clock(rq); 1288 1289 if (!(flags & ENQUEUE_RESTORE)) { 1290 sched_info_queued(rq, p); 1291 psi_enqueue(p, flags & ENQUEUE_WAKEUP); 1292 } 1293 1294 uclamp_rq_inc(rq, p); 1295 p->sched_class->enqueue_task(rq, p, flags); 1296 } 1297 1298 static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags) 1299 { 1300 if (!(flags & DEQUEUE_NOCLOCK)) 1301 update_rq_clock(rq); 1302 1303 if (!(flags & DEQUEUE_SAVE)) { 1304 sched_info_dequeued(rq, p); 1305 psi_dequeue(p, flags & DEQUEUE_SLEEP); 1306 } 1307 1308 uclamp_rq_dec(rq, p); 1309 p->sched_class->dequeue_task(rq, p, flags); 1310 } 1311 1312 void activate_task(struct rq *rq, struct task_struct *p, int flags) 1313 { 1314 if (task_contributes_to_load(p)) 1315 rq->nr_uninterruptible--; 1316 1317 enqueue_task(rq, p, flags); 1318 1319 p->on_rq = TASK_ON_RQ_QUEUED; 1320 } 1321 1322 void deactivate_task(struct rq *rq, struct task_struct *p, int flags) 1323 { 1324 p->on_rq = (flags & DEQUEUE_SLEEP) ? 0 : TASK_ON_RQ_MIGRATING; 1325 1326 if (task_contributes_to_load(p)) 1327 rq->nr_uninterruptible++; 1328 1329 dequeue_task(rq, p, flags); 1330 } 1331 1332 /* 1333 * __normal_prio - return the priority that is based on the static prio 1334 */ 1335 static inline int __normal_prio(struct task_struct *p) 1336 { 1337 return p->static_prio; 1338 } 1339 1340 /* 1341 * Calculate the expected normal priority: i.e. priority 1342 * without taking RT-inheritance into account. Might be 1343 * boosted by interactivity modifiers. Changes upon fork, 1344 * setprio syscalls, and whenever the interactivity 1345 * estimator recalculates. 1346 */ 1347 static inline int normal_prio(struct task_struct *p) 1348 { 1349 int prio; 1350 1351 if (task_has_dl_policy(p)) 1352 prio = MAX_DL_PRIO-1; 1353 else if (task_has_rt_policy(p)) 1354 prio = MAX_RT_PRIO-1 - p->rt_priority; 1355 else 1356 prio = __normal_prio(p); 1357 return prio; 1358 } 1359 1360 /* 1361 * Calculate the current priority, i.e. the priority 1362 * taken into account by the scheduler. This value might 1363 * be boosted by RT tasks, or might be boosted by 1364 * interactivity modifiers. Will be RT if the task got 1365 * RT-boosted. If not then it returns p->normal_prio. 1366 */ 1367 static int effective_prio(struct task_struct *p) 1368 { 1369 p->normal_prio = normal_prio(p); 1370 /* 1371 * If we are RT tasks or we were boosted to RT priority, 1372 * keep the priority unchanged. Otherwise, update priority 1373 * to the normal priority: 1374 */ 1375 if (!rt_prio(p->prio)) 1376 return p->normal_prio; 1377 return p->prio; 1378 } 1379 1380 /** 1381 * task_curr - is this task currently executing on a CPU? 1382 * @p: the task in question. 1383 * 1384 * Return: 1 if the task is currently executing. 0 otherwise. 1385 */ 1386 inline int task_curr(const struct task_struct *p) 1387 { 1388 return cpu_curr(task_cpu(p)) == p; 1389 } 1390 1391 /* 1392 * switched_from, switched_to and prio_changed must _NOT_ drop rq->lock, 1393 * use the balance_callback list if you want balancing. 1394 * 1395 * this means any call to check_class_changed() must be followed by a call to 1396 * balance_callback(). 1397 */ 1398 static inline void check_class_changed(struct rq *rq, struct task_struct *p, 1399 const struct sched_class *prev_class, 1400 int oldprio) 1401 { 1402 if (prev_class != p->sched_class) { 1403 if (prev_class->switched_from) 1404 prev_class->switched_from(rq, p); 1405 1406 p->sched_class->switched_to(rq, p); 1407 } else if (oldprio != p->prio || dl_task(p)) 1408 p->sched_class->prio_changed(rq, p, oldprio); 1409 } 1410 1411 void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) 1412 { 1413 const struct sched_class *class; 1414 1415 if (p->sched_class == rq->curr->sched_class) { 1416 rq->curr->sched_class->check_preempt_curr(rq, p, flags); 1417 } else { 1418 for_each_class(class) { 1419 if (class == rq->curr->sched_class) 1420 break; 1421 if (class == p->sched_class) { 1422 resched_curr(rq); 1423 break; 1424 } 1425 } 1426 } 1427 1428 /* 1429 * A queue event has occurred, and we're going to schedule. In 1430 * this case, we can save a useless back to back clock update. 1431 */ 1432 if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr)) 1433 rq_clock_skip_update(rq); 1434 } 1435 1436 #ifdef CONFIG_SMP 1437 1438 /* 1439 * Per-CPU kthreads are allowed to run on !active && online CPUs, see 1440 * __set_cpus_allowed_ptr() and select_fallback_rq(). 1441 */ 1442 static inline bool is_cpu_allowed(struct task_struct *p, int cpu) 1443 { 1444 if (!cpumask_test_cpu(cpu, p->cpus_ptr)) 1445 return false; 1446 1447 if (is_per_cpu_kthread(p)) 1448 return cpu_online(cpu); 1449 1450 return cpu_active(cpu); 1451 } 1452 1453 /* 1454 * This is how migration works: 1455 * 1456 * 1) we invoke migration_cpu_stop() on the target CPU using 1457 * stop_one_cpu(). 1458 * 2) stopper starts to run (implicitly forcing the migrated thread 1459 * off the CPU) 1460 * 3) it checks whether the migrated task is still in the wrong runqueue. 1461 * 4) if it's in the wrong runqueue then the migration thread removes 1462 * it and puts it into the right queue. 1463 * 5) stopper completes and stop_one_cpu() returns and the migration 1464 * is done. 1465 */ 1466 1467 /* 1468 * move_queued_task - move a queued task to new rq. 1469 * 1470 * Returns (locked) new rq. Old rq's lock is released. 1471 */ 1472 static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf, 1473 struct task_struct *p, int new_cpu) 1474 { 1475 lockdep_assert_held(&rq->lock); 1476 1477 WRITE_ONCE(p->on_rq, TASK_ON_RQ_MIGRATING); 1478 dequeue_task(rq, p, DEQUEUE_NOCLOCK); 1479 set_task_cpu(p, new_cpu); 1480 rq_unlock(rq, rf); 1481 1482 rq = cpu_rq(new_cpu); 1483 1484 rq_lock(rq, rf); 1485 BUG_ON(task_cpu(p) != new_cpu); 1486 enqueue_task(rq, p, 0); 1487 p->on_rq = TASK_ON_RQ_QUEUED; 1488 check_preempt_curr(rq, p, 0); 1489 1490 return rq; 1491 } 1492 1493 struct migration_arg { 1494 struct task_struct *task; 1495 int dest_cpu; 1496 }; 1497 1498 /* 1499 * Move (not current) task off this CPU, onto the destination CPU. We're doing 1500 * this because either it can't run here any more (set_cpus_allowed() 1501 * away from this CPU, or CPU going down), or because we're 1502 * attempting to rebalance this task on exec (sched_exec). 1503 * 1504 * So we race with normal scheduler movements, but that's OK, as long 1505 * as the task is no longer on this CPU. 1506 */ 1507 static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf, 1508 struct task_struct *p, int dest_cpu) 1509 { 1510 /* Affinity changed (again). */ 1511 if (!is_cpu_allowed(p, dest_cpu)) 1512 return rq; 1513 1514 update_rq_clock(rq); 1515 rq = move_queued_task(rq, rf, p, dest_cpu); 1516 1517 return rq; 1518 } 1519 1520 /* 1521 * migration_cpu_stop - this will be executed by a highprio stopper thread 1522 * and performs thread migration by bumping thread off CPU then 1523 * 'pushing' onto another runqueue. 1524 */ 1525 static int migration_cpu_stop(void *data) 1526 { 1527 struct migration_arg *arg = data; 1528 struct task_struct *p = arg->task; 1529 struct rq *rq = this_rq(); 1530 struct rq_flags rf; 1531 1532 /* 1533 * The original target CPU might have gone down and we might 1534 * be on another CPU but it doesn't matter. 1535 */ 1536 local_irq_disable(); 1537 /* 1538 * We need to explicitly wake pending tasks before running 1539 * __migrate_task() such that we will not miss enforcing cpus_ptr 1540 * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test. 1541 */ 1542 flush_smp_call_function_from_idle(); 1543 1544 raw_spin_lock(&p->pi_lock); 1545 rq_lock(rq, &rf); 1546 /* 1547 * If task_rq(p) != rq, it cannot be migrated here, because we're 1548 * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because 1549 * we're holding p->pi_lock. 1550 */ 1551 if (task_rq(p) == rq) { 1552 if (task_on_rq_queued(p)) 1553 rq = __migrate_task(rq, &rf, p, arg->dest_cpu); 1554 else 1555 p->wake_cpu = arg->dest_cpu; 1556 } 1557 rq_unlock(rq, &rf); 1558 raw_spin_unlock(&p->pi_lock); 1559 1560 local_irq_enable(); 1561 return 0; 1562 } 1563 1564 /* 1565 * sched_class::set_cpus_allowed must do the below, but is not required to 1566 * actually call this function. 1567 */ 1568 void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask) 1569 { 1570 cpumask_copy(&p->cpus_mask, new_mask); 1571 p->nr_cpus_allowed = cpumask_weight(new_mask); 1572 } 1573 1574 void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) 1575 { 1576 struct rq *rq = task_rq(p); 1577 bool queued, running; 1578 1579 lockdep_assert_held(&p->pi_lock); 1580 1581 queued = task_on_rq_queued(p); 1582 running = task_current(rq, p); 1583 1584 if (queued) { 1585 /* 1586 * Because __kthread_bind() calls this on blocked tasks without 1587 * holding rq->lock. 1588 */ 1589 lockdep_assert_held(&rq->lock); 1590 dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK); 1591 } 1592 if (running) 1593 put_prev_task(rq, p); 1594 1595 p->sched_class->set_cpus_allowed(p, new_mask); 1596 1597 if (queued) 1598 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); 1599 if (running) 1600 set_next_task(rq, p); 1601 } 1602 1603 /* 1604 * Change a given task's CPU affinity. Migrate the thread to a 1605 * proper CPU and schedule it away if the CPU it's executing on 1606 * is removed from the allowed bitmask. 1607 * 1608 * NOTE: the caller must have a valid reference to the task, the 1609 * task must not exit() & deallocate itself prematurely. The 1610 * call is not atomic; no spinlocks may be held. 1611 */ 1612 static int __set_cpus_allowed_ptr(struct task_struct *p, 1613 const struct cpumask *new_mask, bool check) 1614 { 1615 const struct cpumask *cpu_valid_mask = cpu_active_mask; 1616 unsigned int dest_cpu; 1617 struct rq_flags rf; 1618 struct rq *rq; 1619 int ret = 0; 1620 1621 rq = task_rq_lock(p, &rf); 1622 update_rq_clock(rq); 1623 1624 if (p->flags & PF_KTHREAD) { 1625 /* 1626 * Kernel threads are allowed on online && !active CPUs 1627 */ 1628 cpu_valid_mask = cpu_online_mask; 1629 } 1630 1631 /* 1632 * Must re-check here, to close a race against __kthread_bind(), 1633 * sched_setaffinity() is not guaranteed to observe the flag. 1634 */ 1635 if (check && (p->flags & PF_NO_SETAFFINITY)) { 1636 ret = -EINVAL; 1637 goto out; 1638 } 1639 1640 if (cpumask_equal(p->cpus_ptr, new_mask)) 1641 goto out; 1642 1643 /* 1644 * Picking a ~random cpu helps in cases where we are changing affinity 1645 * for groups of tasks (ie. cpuset), so that load balancing is not 1646 * immediately required to distribute the tasks within their new mask. 1647 */ 1648 dest_cpu = cpumask_any_and_distribute(cpu_valid_mask, new_mask); 1649 if (dest_cpu >= nr_cpu_ids) { 1650 ret = -EINVAL; 1651 goto out; 1652 } 1653 1654 do_set_cpus_allowed(p, new_mask); 1655 1656 if (p->flags & PF_KTHREAD) { 1657 /* 1658 * For kernel threads that do indeed end up on online && 1659 * !active we want to ensure they are strict per-CPU threads. 1660 */ 1661 WARN_ON(cpumask_intersects(new_mask, cpu_online_mask) && 1662 !cpumask_intersects(new_mask, cpu_active_mask) && 1663 p->nr_cpus_allowed != 1); 1664 } 1665 1666 /* Can the task run on the task's current CPU? If so, we're done */ 1667 if (cpumask_test_cpu(task_cpu(p), new_mask)) 1668 goto out; 1669 1670 if (task_running(rq, p) || p->state == TASK_WAKING) { 1671 struct migration_arg arg = { p, dest_cpu }; 1672 /* Need help from migration thread: drop lock and wait. */ 1673 task_rq_unlock(rq, p, &rf); 1674 stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg); 1675 return 0; 1676 } else if (task_on_rq_queued(p)) { 1677 /* 1678 * OK, since we're going to drop the lock immediately 1679 * afterwards anyway. 1680 */ 1681 rq = move_queued_task(rq, &rf, p, dest_cpu); 1682 } 1683 out: 1684 task_rq_unlock(rq, p, &rf); 1685 1686 return ret; 1687 } 1688 1689 int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) 1690 { 1691 return __set_cpus_allowed_ptr(p, new_mask, false); 1692 } 1693 EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr); 1694 1695 void set_task_cpu(struct task_struct *p, unsigned int new_cpu) 1696 { 1697 #ifdef CONFIG_SCHED_DEBUG 1698 /* 1699 * We should never call set_task_cpu() on a blocked task, 1700 * ttwu() will sort out the placement. 1701 */ 1702 WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING && 1703 !p->on_rq); 1704 1705 /* 1706 * Migrating fair class task must have p->on_rq = TASK_ON_RQ_MIGRATING, 1707 * because schedstat_wait_{start,end} rebase migrating task's wait_start 1708 * time relying on p->on_rq. 1709 */ 1710 WARN_ON_ONCE(p->state == TASK_RUNNING && 1711 p->sched_class == &fair_sched_class && 1712 (p->on_rq && !task_on_rq_migrating(p))); 1713 1714 #ifdef CONFIG_LOCKDEP 1715 /* 1716 * The caller should hold either p->pi_lock or rq->lock, when changing 1717 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks. 1718 * 1719 * sched_move_task() holds both and thus holding either pins the cgroup, 1720 * see task_group(). 1721 * 1722 * Furthermore, all task_rq users should acquire both locks, see 1723 * task_rq_lock(). 1724 */ 1725 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) || 1726 lockdep_is_held(&task_rq(p)->lock))); 1727 #endif 1728 /* 1729 * Clearly, migrating tasks to offline CPUs is a fairly daft thing. 1730 */ 1731 WARN_ON_ONCE(!cpu_online(new_cpu)); 1732 #endif 1733 1734 trace_sched_migrate_task(p, new_cpu); 1735 1736 if (task_cpu(p) != new_cpu) { 1737 if (p->sched_class->migrate_task_rq) 1738 p->sched_class->migrate_task_rq(p, new_cpu); 1739 p->se.nr_migrations++; 1740 rseq_migrate(p); 1741 perf_event_task_migrate(p); 1742 } 1743 1744 __set_task_cpu(p, new_cpu); 1745 } 1746 1747 #ifdef CONFIG_NUMA_BALANCING 1748 static void __migrate_swap_task(struct task_struct *p, int cpu) 1749 { 1750 if (task_on_rq_queued(p)) { 1751 struct rq *src_rq, *dst_rq; 1752 struct rq_flags srf, drf; 1753 1754 src_rq = task_rq(p); 1755 dst_rq = cpu_rq(cpu); 1756 1757 rq_pin_lock(src_rq, &srf); 1758 rq_pin_lock(dst_rq, &drf); 1759 1760 deactivate_task(src_rq, p, 0); 1761 set_task_cpu(p, cpu); 1762 activate_task(dst_rq, p, 0); 1763 check_preempt_curr(dst_rq, p, 0); 1764 1765 rq_unpin_lock(dst_rq, &drf); 1766 rq_unpin_lock(src_rq, &srf); 1767 1768 } else { 1769 /* 1770 * Task isn't running anymore; make it appear like we migrated 1771 * it before it went to sleep. This means on wakeup we make the 1772 * previous CPU our target instead of where it really is. 1773 */ 1774 p->wake_cpu = cpu; 1775 } 1776 } 1777 1778 struct migration_swap_arg { 1779 struct task_struct *src_task, *dst_task; 1780 int src_cpu, dst_cpu; 1781 }; 1782 1783 static int migrate_swap_stop(void *data) 1784 { 1785 struct migration_swap_arg *arg = data; 1786 struct rq *src_rq, *dst_rq; 1787 int ret = -EAGAIN; 1788 1789 if (!cpu_active(arg->src_cpu) || !cpu_active(arg->dst_cpu)) 1790 return -EAGAIN; 1791 1792 src_rq = cpu_rq(arg->src_cpu); 1793 dst_rq = cpu_rq(arg->dst_cpu); 1794 1795 double_raw_lock(&arg->src_task->pi_lock, 1796 &arg->dst_task->pi_lock); 1797 double_rq_lock(src_rq, dst_rq); 1798 1799 if (task_cpu(arg->dst_task) != arg->dst_cpu) 1800 goto unlock; 1801 1802 if (task_cpu(arg->src_task) != arg->src_cpu) 1803 goto unlock; 1804 1805 if (!cpumask_test_cpu(arg->dst_cpu, arg->src_task->cpus_ptr)) 1806 goto unlock; 1807 1808 if (!cpumask_test_cpu(arg->src_cpu, arg->dst_task->cpus_ptr)) 1809 goto unlock; 1810 1811 __migrate_swap_task(arg->src_task, arg->dst_cpu); 1812 __migrate_swap_task(arg->dst_task, arg->src_cpu); 1813 1814 ret = 0; 1815 1816 unlock: 1817 double_rq_unlock(src_rq, dst_rq); 1818 raw_spin_unlock(&arg->dst_task->pi_lock); 1819 raw_spin_unlock(&arg->src_task->pi_lock); 1820 1821 return ret; 1822 } 1823 1824 /* 1825 * Cross migrate two tasks 1826 */ 1827 int migrate_swap(struct task_struct *cur, struct task_struct *p, 1828 int target_cpu, int curr_cpu) 1829 { 1830 struct migration_swap_arg arg; 1831 int ret = -EINVAL; 1832 1833 arg = (struct migration_swap_arg){ 1834 .src_task = cur, 1835 .src_cpu = curr_cpu, 1836 .dst_task = p, 1837 .dst_cpu = target_cpu, 1838 }; 1839 1840 if (arg.src_cpu == arg.dst_cpu) 1841 goto out; 1842 1843 /* 1844 * These three tests are all lockless; this is OK since all of them 1845 * will be re-checked with proper locks held further down the line. 1846 */ 1847 if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu)) 1848 goto out; 1849 1850 if (!cpumask_test_cpu(arg.dst_cpu, arg.src_task->cpus_ptr)) 1851 goto out; 1852 1853 if (!cpumask_test_cpu(arg.src_cpu, arg.dst_task->cpus_ptr)) 1854 goto out; 1855 1856 trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu); 1857 ret = stop_two_cpus(arg.dst_cpu, arg.src_cpu, migrate_swap_stop, &arg); 1858 1859 out: 1860 return ret; 1861 } 1862 #endif /* CONFIG_NUMA_BALANCING */ 1863 1864 /* 1865 * wait_task_inactive - wait for a thread to unschedule. 1866 * 1867 * If @match_state is nonzero, it's the @p->state value just checked and 1868 * not expected to change. If it changes, i.e. @p might have woken up, 1869 * then return zero. When we succeed in waiting for @p to be off its CPU, 1870 * we return a positive number (its total switch count). If a second call 1871 * a short while later returns the same number, the caller can be sure that 1872 * @p has remained unscheduled the whole time. 1873 * 1874 * The caller must ensure that the task *will* unschedule sometime soon, 1875 * else this function might spin for a *long* time. This function can't 1876 * be called with interrupts off, or it may introduce deadlock with 1877 * smp_call_function() if an IPI is sent by the same process we are 1878 * waiting to become inactive. 1879 */ 1880 unsigned long wait_task_inactive(struct task_struct *p, long match_state) 1881 { 1882 int running, queued; 1883 struct rq_flags rf; 1884 unsigned long ncsw; 1885 struct rq *rq; 1886 1887 for (;;) { 1888 /* 1889 * We do the initial early heuristics without holding 1890 * any task-queue locks at all. We'll only try to get 1891 * the runqueue lock when things look like they will 1892 * work out! 1893 */ 1894 rq = task_rq(p); 1895 1896 /* 1897 * If the task is actively running on another CPU 1898 * still, just relax and busy-wait without holding 1899 * any locks. 1900 * 1901 * NOTE! Since we don't hold any locks, it's not 1902 * even sure that "rq" stays as the right runqueue! 1903 * But we don't care, since "task_running()" will 1904 * return false if the runqueue has changed and p 1905 * is actually now running somewhere else! 1906 */ 1907 while (task_running(rq, p)) { 1908 if (match_state && unlikely(p->state != match_state)) 1909 return 0; 1910 cpu_relax(); 1911 } 1912 1913 /* 1914 * Ok, time to look more closely! We need the rq 1915 * lock now, to be *sure*. If we're wrong, we'll 1916 * just go back and repeat. 1917 */ 1918 rq = task_rq_lock(p, &rf); 1919 trace_sched_wait_task(p); 1920 running = task_running(rq, p); 1921 queued = task_on_rq_queued(p); 1922 ncsw = 0; 1923 if (!match_state || p->state == match_state) 1924 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ 1925 task_rq_unlock(rq, p, &rf); 1926 1927 /* 1928 * If it changed from the expected state, bail out now. 1929 */ 1930 if (unlikely(!ncsw)) 1931 break; 1932 1933 /* 1934 * Was it really running after all now that we 1935 * checked with the proper locks actually held? 1936 * 1937 * Oops. Go back and try again.. 1938 */ 1939 if (unlikely(running)) { 1940 cpu_relax(); 1941 continue; 1942 } 1943 1944 /* 1945 * It's not enough that it's not actively running, 1946 * it must be off the runqueue _entirely_, and not 1947 * preempted! 1948 * 1949 * So if it was still runnable (but just not actively 1950 * running right now), it's preempted, and we should 1951 * yield - it could be a while. 1952 */ 1953 if (unlikely(queued)) { 1954 ktime_t to = NSEC_PER_SEC / HZ; 1955 1956 set_current_state(TASK_UNINTERRUPTIBLE); 1957 schedule_hrtimeout(&to, HRTIMER_MODE_REL); 1958 continue; 1959 } 1960 1961 /* 1962 * Ahh, all good. It wasn't running, and it wasn't 1963 * runnable, which means that it will never become 1964 * running in the future either. We're all done! 1965 */ 1966 break; 1967 } 1968 1969 return ncsw; 1970 } 1971 1972 /*** 1973 * kick_process - kick a running thread to enter/exit the kernel 1974 * @p: the to-be-kicked thread 1975 * 1976 * Cause a process which is running on another CPU to enter 1977 * kernel-mode, without any delay. (to get signals handled.) 1978 * 1979 * NOTE: this function doesn't have to take the runqueue lock, 1980 * because all it wants to ensure is that the remote task enters 1981 * the kernel. If the IPI races and the task has been migrated 1982 * to another CPU then no harm is done and the purpose has been 1983 * achieved as well. 1984 */ 1985 void kick_process(struct task_struct *p) 1986 { 1987 int cpu; 1988 1989 preempt_disable(); 1990 cpu = task_cpu(p); 1991 if ((cpu != smp_processor_id()) && task_curr(p)) 1992 smp_send_reschedule(cpu); 1993 preempt_enable(); 1994 } 1995 EXPORT_SYMBOL_GPL(kick_process); 1996 1997 /* 1998 * ->cpus_ptr is protected by both rq->lock and p->pi_lock 1999 * 2000 * A few notes on cpu_active vs cpu_online: 2001 * 2002 * - cpu_active must be a subset of cpu_online 2003 * 2004 * - on CPU-up we allow per-CPU kthreads on the online && !active CPU, 2005 * see __set_cpus_allowed_ptr(). At this point the newly online 2006 * CPU isn't yet part of the sched domains, and balancing will not 2007 * see it. 2008 * 2009 * - on CPU-down we clear cpu_active() to mask the sched domains and 2010 * avoid the load balancer to place new tasks on the to be removed 2011 * CPU. Existing tasks will remain running there and will be taken 2012 * off. 2013 * 2014 * This means that fallback selection must not select !active CPUs. 2015 * And can assume that any active CPU must be online. Conversely 2016 * select_task_rq() below may allow selection of !active CPUs in order 2017 * to satisfy the above rules. 2018 */ 2019 static int select_fallback_rq(int cpu, struct task_struct *p) 2020 { 2021 int nid = cpu_to_node(cpu); 2022 const struct cpumask *nodemask = NULL; 2023 enum { cpuset, possible, fail } state = cpuset; 2024 int dest_cpu; 2025 2026 /* 2027 * If the node that the CPU is on has been offlined, cpu_to_node() 2028 * will return -1. There is no CPU on the node, and we should 2029 * select the CPU on the other node. 2030 */ 2031 if (nid != -1) { 2032 nodemask = cpumask_of_node(nid); 2033 2034 /* Look for allowed, online CPU in same node. */ 2035 for_each_cpu(dest_cpu, nodemask) { 2036 if (!cpu_active(dest_cpu)) 2037 continue; 2038 if (cpumask_test_cpu(dest_cpu, p->cpus_ptr)) 2039 return dest_cpu; 2040 } 2041 } 2042 2043 for (;;) { 2044 /* Any allowed, online CPU? */ 2045 for_each_cpu(dest_cpu, p->cpus_ptr) { 2046 if (!is_cpu_allowed(p, dest_cpu)) 2047 continue; 2048 2049 goto out; 2050 } 2051 2052 /* No more Mr. Nice Guy. */ 2053 switch (state) { 2054 case cpuset: 2055 if (IS_ENABLED(CONFIG_CPUSETS)) { 2056 cpuset_cpus_allowed_fallback(p); 2057 state = possible; 2058 break; 2059 } 2060 /* Fall-through */ 2061 case possible: 2062 do_set_cpus_allowed(p, cpu_possible_mask); 2063 state = fail; 2064 break; 2065 2066 case fail: 2067 BUG(); 2068 break; 2069 } 2070 } 2071 2072 out: 2073 if (state != cpuset) { 2074 /* 2075 * Don't tell them about moving exiting tasks or 2076 * kernel threads (both mm NULL), since they never 2077 * leave kernel. 2078 */ 2079 if (p->mm && printk_ratelimit()) { 2080 printk_deferred("process %d (%s) no longer affine to cpu%d\n", 2081 task_pid_nr(p), p->comm, cpu); 2082 } 2083 } 2084 2085 return dest_cpu; 2086 } 2087 2088 /* 2089 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_ptr is stable. 2090 */ 2091 static inline 2092 int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags) 2093 { 2094 lockdep_assert_held(&p->pi_lock); 2095 2096 if (p->nr_cpus_allowed > 1) 2097 cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags); 2098 else 2099 cpu = cpumask_any(p->cpus_ptr); 2100 2101 /* 2102 * In order not to call set_task_cpu() on a blocking task we need 2103 * to rely on ttwu() to place the task on a valid ->cpus_ptr 2104 * CPU. 2105 * 2106 * Since this is common to all placement strategies, this lives here. 2107 * 2108 * [ this allows ->select_task() to simply return task_cpu(p) and 2109 * not worry about this generic constraint ] 2110 */ 2111 if (unlikely(!is_cpu_allowed(p, cpu))) 2112 cpu = select_fallback_rq(task_cpu(p), p); 2113 2114 return cpu; 2115 } 2116 2117 void sched_set_stop_task(int cpu, struct task_struct *stop) 2118 { 2119 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; 2120 struct task_struct *old_stop = cpu_rq(cpu)->stop; 2121 2122 if (stop) { 2123 /* 2124 * Make it appear like a SCHED_FIFO task, its something 2125 * userspace knows about and won't get confused about. 2126 * 2127 * Also, it will make PI more or less work without too 2128 * much confusion -- but then, stop work should not 2129 * rely on PI working anyway. 2130 */ 2131 sched_setscheduler_nocheck(stop, SCHED_FIFO, ¶m); 2132 2133 stop->sched_class = &stop_sched_class; 2134 } 2135 2136 cpu_rq(cpu)->stop = stop; 2137 2138 if (old_stop) { 2139 /* 2140 * Reset it back to a normal scheduling class so that 2141 * it can die in pieces. 2142 */ 2143 old_stop->sched_class = &rt_sched_class; 2144 } 2145 } 2146 2147 #else 2148 2149 static inline int __set_cpus_allowed_ptr(struct task_struct *p, 2150 const struct cpumask *new_mask, bool check) 2151 { 2152 return set_cpus_allowed_ptr(p, new_mask); 2153 } 2154 2155 #endif /* CONFIG_SMP */ 2156 2157 static void 2158 ttwu_stat(struct task_struct *p, int cpu, int wake_flags) 2159 { 2160 struct rq *rq; 2161 2162 if (!schedstat_enabled()) 2163 return; 2164 2165 rq = this_rq(); 2166 2167 #ifdef CONFIG_SMP 2168 if (cpu == rq->cpu) { 2169 __schedstat_inc(rq->ttwu_local); 2170 __schedstat_inc(p->se.statistics.nr_wakeups_local); 2171 } else { 2172 struct sched_domain *sd; 2173 2174 __schedstat_inc(p->se.statistics.nr_wakeups_remote); 2175 rcu_read_lock(); 2176 for_each_domain(rq->cpu, sd) { 2177 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) { 2178 __schedstat_inc(sd->ttwu_wake_remote); 2179 break; 2180 } 2181 } 2182 rcu_read_unlock(); 2183 } 2184 2185 if (wake_flags & WF_MIGRATED) 2186 __schedstat_inc(p->se.statistics.nr_wakeups_migrate); 2187 #endif /* CONFIG_SMP */ 2188 2189 __schedstat_inc(rq->ttwu_count); 2190 __schedstat_inc(p->se.statistics.nr_wakeups); 2191 2192 if (wake_flags & WF_SYNC) 2193 __schedstat_inc(p->se.statistics.nr_wakeups_sync); 2194 } 2195 2196 /* 2197 * Mark the task runnable and perform wakeup-preemption. 2198 */ 2199 static void ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags, 2200 struct rq_flags *rf) 2201 { 2202 check_preempt_curr(rq, p, wake_flags); 2203 p->state = TASK_RUNNING; 2204 trace_sched_wakeup(p); 2205 2206 #ifdef CONFIG_SMP 2207 if (p->sched_class->task_woken) { 2208 /* 2209 * Our task @p is fully woken up and running; so its safe to 2210 * drop the rq->lock, hereafter rq is only used for statistics. 2211 */ 2212 rq_unpin_lock(rq, rf); 2213 p->sched_class->task_woken(rq, p); 2214 rq_repin_lock(rq, rf); 2215 } 2216 2217 if (rq->idle_stamp) { 2218 u64 delta = rq_clock(rq) - rq->idle_stamp; 2219 u64 max = 2*rq->max_idle_balance_cost; 2220 2221 update_avg(&rq->avg_idle, delta); 2222 2223 if (rq->avg_idle > max) 2224 rq->avg_idle = max; 2225 2226 rq->idle_stamp = 0; 2227 } 2228 #endif 2229 } 2230 2231 static void 2232 ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags, 2233 struct rq_flags *rf) 2234 { 2235 int en_flags = ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK; 2236 2237 lockdep_assert_held(&rq->lock); 2238 2239 #ifdef CONFIG_SMP 2240 if (p->sched_contributes_to_load) 2241 rq->nr_uninterruptible--; 2242 2243 if (wake_flags & WF_MIGRATED) 2244 en_flags |= ENQUEUE_MIGRATED; 2245 #endif 2246 2247 activate_task(rq, p, en_flags); 2248 ttwu_do_wakeup(rq, p, wake_flags, rf); 2249 } 2250 2251 /* 2252 * Called in case the task @p isn't fully descheduled from its runqueue, 2253 * in this case we must do a remote wakeup. Its a 'light' wakeup though, 2254 * since all we need to do is flip p->state to TASK_RUNNING, since 2255 * the task is still ->on_rq. 2256 */ 2257 static int ttwu_remote(struct task_struct *p, int wake_flags) 2258 { 2259 struct rq_flags rf; 2260 struct rq *rq; 2261 int ret = 0; 2262 2263 rq = __task_rq_lock(p, &rf); 2264 if (task_on_rq_queued(p)) { 2265 /* check_preempt_curr() may use rq clock */ 2266 update_rq_clock(rq); 2267 ttwu_do_wakeup(rq, p, wake_flags, &rf); 2268 ret = 1; 2269 } 2270 __task_rq_unlock(rq, &rf); 2271 2272 return ret; 2273 } 2274 2275 #ifdef CONFIG_SMP 2276 void sched_ttwu_pending(void *arg) 2277 { 2278 struct llist_node *llist = arg; 2279 struct rq *rq = this_rq(); 2280 struct task_struct *p, *t; 2281 struct rq_flags rf; 2282 2283 if (!llist) 2284 return; 2285 2286 /* 2287 * rq::ttwu_pending racy indication of out-standing wakeups. 2288 * Races such that false-negatives are possible, since they 2289 * are shorter lived that false-positives would be. 2290 */ 2291 WRITE_ONCE(rq->ttwu_pending, 0); 2292 2293 rq_lock_irqsave(rq, &rf); 2294 update_rq_clock(rq); 2295 2296 llist_for_each_entry_safe(p, t, llist, wake_entry) 2297 ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0, &rf); 2298 2299 rq_unlock_irqrestore(rq, &rf); 2300 } 2301 2302 void send_call_function_single_ipi(int cpu) 2303 { 2304 struct rq *rq = cpu_rq(cpu); 2305 2306 if (!set_nr_if_polling(rq->idle)) 2307 arch_send_call_function_single_ipi(cpu); 2308 else 2309 trace_sched_wake_idle_without_ipi(cpu); 2310 } 2311 2312 /* 2313 * Queue a task on the target CPUs wake_list and wake the CPU via IPI if 2314 * necessary. The wakee CPU on receipt of the IPI will queue the task 2315 * via sched_ttwu_wakeup() for activation so the wakee incurs the cost 2316 * of the wakeup instead of the waker. 2317 */ 2318 static void __ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags) 2319 { 2320 struct rq *rq = cpu_rq(cpu); 2321 2322 p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED); 2323 2324 WRITE_ONCE(rq->ttwu_pending, 1); 2325 __smp_call_single_queue(cpu, &p->wake_entry); 2326 } 2327 2328 void wake_up_if_idle(int cpu) 2329 { 2330 struct rq *rq = cpu_rq(cpu); 2331 struct rq_flags rf; 2332 2333 rcu_read_lock(); 2334 2335 if (!is_idle_task(rcu_dereference(rq->curr))) 2336 goto out; 2337 2338 if (set_nr_if_polling(rq->idle)) { 2339 trace_sched_wake_idle_without_ipi(cpu); 2340 } else { 2341 rq_lock_irqsave(rq, &rf); 2342 if (is_idle_task(rq->curr)) 2343 smp_send_reschedule(cpu); 2344 /* Else CPU is not idle, do nothing here: */ 2345 rq_unlock_irqrestore(rq, &rf); 2346 } 2347 2348 out: 2349 rcu_read_unlock(); 2350 } 2351 2352 bool cpus_share_cache(int this_cpu, int that_cpu) 2353 { 2354 return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu); 2355 } 2356 2357 static inline bool ttwu_queue_cond(int cpu, int wake_flags) 2358 { 2359 /* 2360 * If the CPU does not share cache, then queue the task on the 2361 * remote rqs wakelist to avoid accessing remote data. 2362 */ 2363 if (!cpus_share_cache(smp_processor_id(), cpu)) 2364 return true; 2365 2366 /* 2367 * If the task is descheduling and the only running task on the 2368 * CPU then use the wakelist to offload the task activation to 2369 * the soon-to-be-idle CPU as the current CPU is likely busy. 2370 * nr_running is checked to avoid unnecessary task stacking. 2371 */ 2372 if ((wake_flags & WF_ON_RQ) && cpu_rq(cpu)->nr_running <= 1) 2373 return true; 2374 2375 return false; 2376 } 2377 2378 static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags) 2379 { 2380 if (sched_feat(TTWU_QUEUE) && ttwu_queue_cond(cpu, wake_flags)) { 2381 sched_clock_cpu(cpu); /* Sync clocks across CPUs */ 2382 __ttwu_queue_wakelist(p, cpu, wake_flags); 2383 return true; 2384 } 2385 2386 return false; 2387 } 2388 #endif /* CONFIG_SMP */ 2389 2390 static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags) 2391 { 2392 struct rq *rq = cpu_rq(cpu); 2393 struct rq_flags rf; 2394 2395 #if defined(CONFIG_SMP) 2396 if (ttwu_queue_wakelist(p, cpu, wake_flags)) 2397 return; 2398 #endif 2399 2400 rq_lock(rq, &rf); 2401 update_rq_clock(rq); 2402 ttwu_do_activate(rq, p, wake_flags, &rf); 2403 rq_unlock(rq, &rf); 2404 } 2405 2406 /* 2407 * Notes on Program-Order guarantees on SMP systems. 2408 * 2409 * MIGRATION 2410 * 2411 * The basic program-order guarantee on SMP systems is that when a task [t] 2412 * migrates, all its activity on its old CPU [c0] happens-before any subsequent 2413 * execution on its new CPU [c1]. 2414 * 2415 * For migration (of runnable tasks) this is provided by the following means: 2416 * 2417 * A) UNLOCK of the rq(c0)->lock scheduling out task t 2418 * B) migration for t is required to synchronize *both* rq(c0)->lock and 2419 * rq(c1)->lock (if not at the same time, then in that order). 2420 * C) LOCK of the rq(c1)->lock scheduling in task 2421 * 2422 * Release/acquire chaining guarantees that B happens after A and C after B. 2423 * Note: the CPU doing B need not be c0 or c1 2424 * 2425 * Example: 2426 * 2427 * CPU0 CPU1 CPU2 2428 * 2429 * LOCK rq(0)->lock 2430 * sched-out X 2431 * sched-in Y 2432 * UNLOCK rq(0)->lock 2433 * 2434 * LOCK rq(0)->lock // orders against CPU0 2435 * dequeue X 2436 * UNLOCK rq(0)->lock 2437 * 2438 * LOCK rq(1)->lock 2439 * enqueue X 2440 * UNLOCK rq(1)->lock 2441 * 2442 * LOCK rq(1)->lock // orders against CPU2 2443 * sched-out Z 2444 * sched-in X 2445 * UNLOCK rq(1)->lock 2446 * 2447 * 2448 * BLOCKING -- aka. SLEEP + WAKEUP 2449 * 2450 * For blocking we (obviously) need to provide the same guarantee as for 2451 * migration. However the means are completely different as there is no lock 2452 * chain to provide order. Instead we do: 2453 * 2454 * 1) smp_store_release(X->on_cpu, 0) 2455 * 2) smp_cond_load_acquire(!X->on_cpu) 2456 * 2457 * Example: 2458 * 2459 * CPU0 (schedule) CPU1 (try_to_wake_up) CPU2 (schedule) 2460 * 2461 * LOCK rq(0)->lock LOCK X->pi_lock 2462 * dequeue X 2463 * sched-out X 2464 * smp_store_release(X->on_cpu, 0); 2465 * 2466 * smp_cond_load_acquire(&X->on_cpu, !VAL); 2467 * X->state = WAKING 2468 * set_task_cpu(X,2) 2469 * 2470 * LOCK rq(2)->lock 2471 * enqueue X 2472 * X->state = RUNNING 2473 * UNLOCK rq(2)->lock 2474 * 2475 * LOCK rq(2)->lock // orders against CPU1 2476 * sched-out Z 2477 * sched-in X 2478 * UNLOCK rq(2)->lock 2479 * 2480 * UNLOCK X->pi_lock 2481 * UNLOCK rq(0)->lock 2482 * 2483 * 2484 * However, for wakeups there is a second guarantee we must provide, namely we 2485 * must ensure that CONDITION=1 done by the caller can not be reordered with 2486 * accesses to the task state; see try_to_wake_up() and set_current_state(). 2487 */ 2488 2489 /** 2490 * try_to_wake_up - wake up a thread 2491 * @p: the thread to be awakened 2492 * @state: the mask of task states that can be woken 2493 * @wake_flags: wake modifier flags (WF_*) 2494 * 2495 * If (@state & @p->state) @p->state = TASK_RUNNING. 2496 * 2497 * If the task was not queued/runnable, also place it back on a runqueue. 2498 * 2499 * Atomic against schedule() which would dequeue a task, also see 2500 * set_current_state(). 2501 * 2502 * This function executes a full memory barrier before accessing the task 2503 * state; see set_current_state(). 2504 * 2505 * Return: %true if @p->state changes (an actual wakeup was done), 2506 * %false otherwise. 2507 */ 2508 static int 2509 try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) 2510 { 2511 unsigned long flags; 2512 int cpu, success = 0; 2513 2514 preempt_disable(); 2515 if (p == current) { 2516 /* 2517 * We're waking current, this means 'p->on_rq' and 'task_cpu(p) 2518 * == smp_processor_id()'. Together this means we can special 2519 * case the whole 'p->on_rq && ttwu_remote()' case below 2520 * without taking any locks. 2521 * 2522 * In particular: 2523 * - we rely on Program-Order guarantees for all the ordering, 2524 * - we're serialized against set_special_state() by virtue of 2525 * it disabling IRQs (this allows not taking ->pi_lock). 2526 */ 2527 if (!(p->state & state)) 2528 goto out; 2529 2530 success = 1; 2531 cpu = task_cpu(p); 2532 trace_sched_waking(p); 2533 p->state = TASK_RUNNING; 2534 trace_sched_wakeup(p); 2535 goto out; 2536 } 2537 2538 /* 2539 * If we are going to wake up a thread waiting for CONDITION we 2540 * need to ensure that CONDITION=1 done by the caller can not be 2541 * reordered with p->state check below. This pairs with mb() in 2542 * set_current_state() the waiting thread does. 2543 */ 2544 raw_spin_lock_irqsave(&p->pi_lock, flags); 2545 smp_mb__after_spinlock(); 2546 if (!(p->state & state)) 2547 goto unlock; 2548 2549 trace_sched_waking(p); 2550 2551 /* We're going to change ->state: */ 2552 success = 1; 2553 cpu = task_cpu(p); 2554 2555 /* 2556 * Ensure we load p->on_rq _after_ p->state, otherwise it would 2557 * be possible to, falsely, observe p->on_rq == 0 and get stuck 2558 * in smp_cond_load_acquire() below. 2559 * 2560 * sched_ttwu_pending() try_to_wake_up() 2561 * STORE p->on_rq = 1 LOAD p->state 2562 * UNLOCK rq->lock 2563 * 2564 * __schedule() (switch to task 'p') 2565 * LOCK rq->lock smp_rmb(); 2566 * smp_mb__after_spinlock(); 2567 * UNLOCK rq->lock 2568 * 2569 * [task p] 2570 * STORE p->state = UNINTERRUPTIBLE LOAD p->on_rq 2571 * 2572 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in 2573 * __schedule(). See the comment for smp_mb__after_spinlock(). 2574 * 2575 * A similar smb_rmb() lives in try_invoke_on_locked_down_task(). 2576 */ 2577 smp_rmb(); 2578 if (p->on_rq && ttwu_remote(p, wake_flags)) 2579 goto unlock; 2580 2581 if (p->in_iowait) { 2582 delayacct_blkio_end(p); 2583 atomic_dec(&task_rq(p)->nr_iowait); 2584 } 2585 2586 #ifdef CONFIG_SMP 2587 p->sched_contributes_to_load = !!task_contributes_to_load(p); 2588 p->state = TASK_WAKING; 2589 2590 /* 2591 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be 2592 * possible to, falsely, observe p->on_cpu == 0. 2593 * 2594 * One must be running (->on_cpu == 1) in order to remove oneself 2595 * from the runqueue. 2596 * 2597 * __schedule() (switch to task 'p') try_to_wake_up() 2598 * STORE p->on_cpu = 1 LOAD p->on_rq 2599 * UNLOCK rq->lock 2600 * 2601 * __schedule() (put 'p' to sleep) 2602 * LOCK rq->lock smp_rmb(); 2603 * smp_mb__after_spinlock(); 2604 * STORE p->on_rq = 0 LOAD p->on_cpu 2605 * 2606 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in 2607 * __schedule(). See the comment for smp_mb__after_spinlock(). 2608 */ 2609 smp_rmb(); 2610 2611 /* 2612 * If the owning (remote) CPU is still in the middle of schedule() with 2613 * this task as prev, considering queueing p on the remote CPUs wake_list 2614 * which potentially sends an IPI instead of spinning on p->on_cpu to 2615 * let the waker make forward progress. This is safe because IRQs are 2616 * disabled and the IPI will deliver after on_cpu is cleared. 2617 */ 2618 if (READ_ONCE(p->on_cpu) && ttwu_queue_wakelist(p, cpu, wake_flags | WF_ON_RQ)) 2619 goto unlock; 2620 2621 /* 2622 * If the owning (remote) CPU is still in the middle of schedule() with 2623 * this task as prev, wait until its done referencing the task. 2624 * 2625 * Pairs with the smp_store_release() in finish_task(). 2626 * 2627 * This ensures that tasks getting woken will be fully ordered against 2628 * their previous state and preserve Program Order. 2629 */ 2630 smp_cond_load_acquire(&p->on_cpu, !VAL); 2631 2632 cpu = select_task_rq(p, p->wake_cpu, SD_BALANCE_WAKE, wake_flags); 2633 if (task_cpu(p) != cpu) { 2634 wake_flags |= WF_MIGRATED; 2635 psi_ttwu_dequeue(p); 2636 set_task_cpu(p, cpu); 2637 } 2638 #endif /* CONFIG_SMP */ 2639 2640 ttwu_queue(p, cpu, wake_flags); 2641 unlock: 2642 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 2643 out: 2644 if (success) 2645 ttwu_stat(p, cpu, wake_flags); 2646 preempt_enable(); 2647 2648 return success; 2649 } 2650 2651 /** 2652 * try_invoke_on_locked_down_task - Invoke a function on task in fixed state 2653 * @p: Process for which the function is to be invoked. 2654 * @func: Function to invoke. 2655 * @arg: Argument to function. 2656 * 2657 * If the specified task can be quickly locked into a definite state 2658 * (either sleeping or on a given runqueue), arrange to keep it in that 2659 * state while invoking @func(@arg). This function can use ->on_rq and 2660 * task_curr() to work out what the state is, if required. Given that 2661 * @func can be invoked with a runqueue lock held, it had better be quite 2662 * lightweight. 2663 * 2664 * Returns: 2665 * @false if the task slipped out from under the locks. 2666 * @true if the task was locked onto a runqueue or is sleeping. 2667 * However, @func can override this by returning @false. 2668 */ 2669 bool try_invoke_on_locked_down_task(struct task_struct *p, bool (*func)(struct task_struct *t, void *arg), void *arg) 2670 { 2671 bool ret = false; 2672 struct rq_flags rf; 2673 struct rq *rq; 2674 2675 lockdep_assert_irqs_enabled(); 2676 raw_spin_lock_irq(&p->pi_lock); 2677 if (p->on_rq) { 2678 rq = __task_rq_lock(p, &rf); 2679 if (task_rq(p) == rq) 2680 ret = func(p, arg); 2681 rq_unlock(rq, &rf); 2682 } else { 2683 switch (p->state) { 2684 case TASK_RUNNING: 2685 case TASK_WAKING: 2686 break; 2687 default: 2688 smp_rmb(); // See smp_rmb() comment in try_to_wake_up(). 2689 if (!p->on_rq) 2690 ret = func(p, arg); 2691 } 2692 } 2693 raw_spin_unlock_irq(&p->pi_lock); 2694 return ret; 2695 } 2696 2697 /** 2698 * wake_up_process - Wake up a specific process 2699 * @p: The process to be woken up. 2700 * 2701 * Attempt to wake up the nominated process and move it to the set of runnable 2702 * processes. 2703 * 2704 * Return: 1 if the process was woken up, 0 if it was already running. 2705 * 2706 * This function executes a full memory barrier before accessing the task state. 2707 */ 2708 int wake_up_process(struct task_struct *p) 2709 { 2710 return try_to_wake_up(p, TASK_NORMAL, 0); 2711 } 2712 EXPORT_SYMBOL(wake_up_process); 2713 2714 int wake_up_state(struct task_struct *p, unsigned int state) 2715 { 2716 return try_to_wake_up(p, state, 0); 2717 } 2718 2719 /* 2720 * Perform scheduler related setup for a newly forked process p. 2721 * p is forked by current. 2722 * 2723 * __sched_fork() is basic setup used by init_idle() too: 2724 */ 2725 static void __sched_fork(unsigned long clone_flags, struct task_struct *p) 2726 { 2727 p->on_rq = 0; 2728 2729 p->se.on_rq = 0; 2730 p->se.exec_start = 0; 2731 p->se.sum_exec_runtime = 0; 2732 p->se.prev_sum_exec_runtime = 0; 2733 p->se.nr_migrations = 0; 2734 p->se.vruntime = 0; 2735 INIT_LIST_HEAD(&p->se.group_node); 2736 2737 #ifdef CONFIG_FAIR_GROUP_SCHED 2738 p->se.cfs_rq = NULL; 2739 #endif 2740 2741 #ifdef CONFIG_SCHEDSTATS 2742 /* Even if schedstat is disabled, there should not be garbage */ 2743 memset(&p->se.statistics, 0, sizeof(p->se.statistics)); 2744 #endif 2745 2746 RB_CLEAR_NODE(&p->dl.rb_node); 2747 init_dl_task_timer(&p->dl); 2748 init_dl_inactive_task_timer(&p->dl); 2749 __dl_clear_params(p); 2750 2751 INIT_LIST_HEAD(&p->rt.run_list); 2752 p->rt.timeout = 0; 2753 p->rt.time_slice = sched_rr_timeslice; 2754 p->rt.on_rq = 0; 2755 p->rt.on_list = 0; 2756 2757 #ifdef CONFIG_PREEMPT_NOTIFIERS 2758 INIT_HLIST_HEAD(&p->preempt_notifiers); 2759 #endif 2760 2761 #ifdef CONFIG_COMPACTION 2762 p->capture_control = NULL; 2763 #endif 2764 init_numa_balancing(clone_flags, p); 2765 #ifdef CONFIG_SMP 2766 p->wake_entry_type = CSD_TYPE_TTWU; 2767 #endif 2768 } 2769 2770 DEFINE_STATIC_KEY_FALSE(sched_numa_balancing); 2771 2772 #ifdef CONFIG_NUMA_BALANCING 2773 2774 void set_numabalancing_state(bool enabled) 2775 { 2776 if (enabled) 2777 static_branch_enable(&sched_numa_balancing); 2778 else 2779 static_branch_disable(&sched_numa_balancing); 2780 } 2781 2782 #ifdef CONFIG_PROC_SYSCTL 2783 int sysctl_numa_balancing(struct ctl_table *table, int write, 2784 void *buffer, size_t *lenp, loff_t *ppos) 2785 { 2786 struct ctl_table t; 2787 int err; 2788 int state = static_branch_likely(&sched_numa_balancing); 2789 2790 if (write && !capable(CAP_SYS_ADMIN)) 2791 return -EPERM; 2792 2793 t = *table; 2794 t.data = &state; 2795 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); 2796 if (err < 0) 2797 return err; 2798 if (write) 2799 set_numabalancing_state(state); 2800 return err; 2801 } 2802 #endif 2803 #endif 2804 2805 #ifdef CONFIG_SCHEDSTATS 2806 2807 DEFINE_STATIC_KEY_FALSE(sched_schedstats); 2808 static bool __initdata __sched_schedstats = false; 2809 2810 static void set_schedstats(bool enabled) 2811 { 2812 if (enabled) 2813 static_branch_enable(&sched_schedstats); 2814 else 2815 static_branch_disable(&sched_schedstats); 2816 } 2817 2818 void force_schedstat_enabled(void) 2819 { 2820 if (!schedstat_enabled()) { 2821 pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n"); 2822 static_branch_enable(&sched_schedstats); 2823 } 2824 } 2825 2826 static int __init setup_schedstats(char *str) 2827 { 2828 int ret = 0; 2829 if (!str) 2830 goto out; 2831 2832 /* 2833 * This code is called before jump labels have been set up, so we can't 2834 * change the static branch directly just yet. Instead set a temporary 2835 * variable so init_schedstats() can do it later. 2836 */ 2837 if (!strcmp(str, "enable")) { 2838 __sched_schedstats = true; 2839 ret = 1; 2840 } else if (!strcmp(str, "disable")) { 2841 __sched_schedstats = false; 2842 ret = 1; 2843 } 2844 out: 2845 if (!ret) 2846 pr_warn("Unable to parse schedstats=\n"); 2847 2848 return ret; 2849 } 2850 __setup("schedstats=", setup_schedstats); 2851 2852 static void __init init_schedstats(void) 2853 { 2854 set_schedstats(__sched_schedstats); 2855 } 2856 2857 #ifdef CONFIG_PROC_SYSCTL 2858 int sysctl_schedstats(struct ctl_table *table, int write, void *buffer, 2859 size_t *lenp, loff_t *ppos) 2860 { 2861 struct ctl_table t; 2862 int err; 2863 int state = static_branch_likely(&sched_schedstats); 2864 2865 if (write && !capable(CAP_SYS_ADMIN)) 2866 return -EPERM; 2867 2868 t = *table; 2869 t.data = &state; 2870 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); 2871 if (err < 0) 2872 return err; 2873 if (write) 2874 set_schedstats(state); 2875 return err; 2876 } 2877 #endif /* CONFIG_PROC_SYSCTL */ 2878 #else /* !CONFIG_SCHEDSTATS */ 2879 static inline void init_schedstats(void) {} 2880 #endif /* CONFIG_SCHEDSTATS */ 2881 2882 /* 2883 * fork()/clone()-time setup: 2884 */ 2885 int sched_fork(unsigned long clone_flags, struct task_struct *p) 2886 { 2887 unsigned long flags; 2888 2889 __sched_fork(clone_flags, p); 2890 /* 2891 * We mark the process as NEW here. This guarantees that 2892 * nobody will actually run it, and a signal or other external 2893 * event cannot wake it up and insert it on the runqueue either. 2894 */ 2895 p->state = TASK_NEW; 2896 2897 /* 2898 * Make sure we do not leak PI boosting priority to the child. 2899 */ 2900 p->prio = current->normal_prio; 2901 2902 uclamp_fork(p); 2903 2904 /* 2905 * Revert to default priority/policy on fork if requested. 2906 */ 2907 if (unlikely(p->sched_reset_on_fork)) { 2908 if (task_has_dl_policy(p) || task_has_rt_policy(p)) { 2909 p->policy = SCHED_NORMAL; 2910 p->static_prio = NICE_TO_PRIO(0); 2911 p->rt_priority = 0; 2912 } else if (PRIO_TO_NICE(p->static_prio) < 0) 2913 p->static_prio = NICE_TO_PRIO(0); 2914 2915 p->prio = p->normal_prio = __normal_prio(p); 2916 set_load_weight(p, false); 2917 2918 /* 2919 * We don't need the reset flag anymore after the fork. It has 2920 * fulfilled its duty: 2921 */ 2922 p->sched_reset_on_fork = 0; 2923 } 2924 2925 if (dl_prio(p->prio)) 2926 return -EAGAIN; 2927 else if (rt_prio(p->prio)) 2928 p->sched_class = &rt_sched_class; 2929 else 2930 p->sched_class = &fair_sched_class; 2931 2932 init_entity_runnable_average(&p->se); 2933 2934 /* 2935 * The child is not yet in the pid-hash so no cgroup attach races, 2936 * and the cgroup is pinned to this child due to cgroup_fork() 2937 * is ran before sched_fork(). 2938 * 2939 * Silence PROVE_RCU. 2940 */ 2941 raw_spin_lock_irqsave(&p->pi_lock, flags); 2942 /* 2943 * We're setting the CPU for the first time, we don't migrate, 2944 * so use __set_task_cpu(). 2945 */ 2946 __set_task_cpu(p, smp_processor_id()); 2947 if (p->sched_class->task_fork) 2948 p->sched_class->task_fork(p); 2949 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 2950 2951 #ifdef CONFIG_SCHED_INFO 2952 if (likely(sched_info_on())) 2953 memset(&p->sched_info, 0, sizeof(p->sched_info)); 2954 #endif 2955 #if defined(CONFIG_SMP) 2956 p->on_cpu = 0; 2957 #endif 2958 init_task_preempt_count(p); 2959 #ifdef CONFIG_SMP 2960 plist_node_init(&p->pushable_tasks, MAX_PRIO); 2961 RB_CLEAR_NODE(&p->pushable_dl_tasks); 2962 #endif 2963 return 0; 2964 } 2965 2966 unsigned long to_ratio(u64 period, u64 runtime) 2967 { 2968 if (runtime == RUNTIME_INF) 2969 return BW_UNIT; 2970 2971 /* 2972 * Doing this here saves a lot of checks in all 2973 * the calling paths, and returning zero seems 2974 * safe for them anyway. 2975 */ 2976 if (period == 0) 2977 return 0; 2978 2979 return div64_u64(runtime << BW_SHIFT, period); 2980 } 2981 2982 /* 2983 * wake_up_new_task - wake up a newly created task for the first time. 2984 * 2985 * This function will do some initial scheduler statistics housekeeping 2986 * that must be done for every newly created context, then puts the task 2987 * on the runqueue and wakes it. 2988 */ 2989 void wake_up_new_task(struct task_struct *p) 2990 { 2991 struct rq_flags rf; 2992 struct rq *rq; 2993 2994 raw_spin_lock_irqsave(&p->pi_lock, rf.flags); 2995 p->state = TASK_RUNNING; 2996 #ifdef CONFIG_SMP 2997 /* 2998 * Fork balancing, do it here and not earlier because: 2999 * - cpus_ptr can change in the fork path 3000 * - any previously selected CPU might disappear through hotplug 3001 * 3002 * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq, 3003 * as we're not fully set-up yet. 3004 */ 3005 p->recent_used_cpu = task_cpu(p); 3006 __set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0)); 3007 #endif 3008 rq = __task_rq_lock(p, &rf); 3009 update_rq_clock(rq); 3010 post_init_entity_util_avg(p); 3011 3012 activate_task(rq, p, ENQUEUE_NOCLOCK); 3013 trace_sched_wakeup_new(p); 3014 check_preempt_curr(rq, p, WF_FORK); 3015 #ifdef CONFIG_SMP 3016 if (p->sched_class->task_woken) { 3017 /* 3018 * Nothing relies on rq->lock after this, so its fine to 3019 * drop it. 3020 */ 3021 rq_unpin_lock(rq, &rf); 3022 p->sched_class->task_woken(rq, p); 3023 rq_repin_lock(rq, &rf); 3024 } 3025 #endif 3026 task_rq_unlock(rq, p, &rf); 3027 } 3028 3029 #ifdef CONFIG_PREEMPT_NOTIFIERS 3030 3031 static DEFINE_STATIC_KEY_FALSE(preempt_notifier_key); 3032 3033 void preempt_notifier_inc(void) 3034 { 3035 static_branch_inc(&preempt_notifier_key); 3036 } 3037 EXPORT_SYMBOL_GPL(preempt_notifier_inc); 3038 3039 void preempt_notifier_dec(void) 3040 { 3041 static_branch_dec(&preempt_notifier_key); 3042 } 3043 EXPORT_SYMBOL_GPL(preempt_notifier_dec); 3044 3045 /** 3046 * preempt_notifier_register - tell me when current is being preempted & rescheduled 3047 * @notifier: notifier struct to register 3048 */ 3049 void preempt_notifier_register(struct preempt_notifier *notifier) 3050 { 3051 if (!static_branch_unlikely(&preempt_notifier_key)) 3052 WARN(1, "registering preempt_notifier while notifiers disabled\n"); 3053 3054 hlist_add_head(¬ifier->link, ¤t->preempt_notifiers); 3055 } 3056 EXPORT_SYMBOL_GPL(preempt_notifier_register); 3057 3058 /** 3059 * preempt_notifier_unregister - no longer interested in preemption notifications 3060 * @notifier: notifier struct to unregister 3061 * 3062 * This is *not* safe to call from within a preemption notifier. 3063 */ 3064 void preempt_notifier_unregister(struct preempt_notifier *notifier) 3065 { 3066 hlist_del(¬ifier->link); 3067 } 3068 EXPORT_SYMBOL_GPL(preempt_notifier_unregister); 3069 3070 static void __fire_sched_in_preempt_notifiers(struct task_struct *curr) 3071 { 3072 struct preempt_notifier *notifier; 3073 3074 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) 3075 notifier->ops->sched_in(notifier, raw_smp_processor_id()); 3076 } 3077 3078 static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr) 3079 { 3080 if (static_branch_unlikely(&preempt_notifier_key)) 3081 __fire_sched_in_preempt_notifiers(curr); 3082 } 3083 3084 static void 3085 __fire_sched_out_preempt_notifiers(struct task_struct *curr, 3086 struct task_struct *next) 3087 { 3088 struct preempt_notifier *notifier; 3089 3090 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) 3091 notifier->ops->sched_out(notifier, next); 3092 } 3093 3094 static __always_inline void 3095 fire_sched_out_preempt_notifiers(struct task_struct *curr, 3096 struct task_struct *next) 3097 { 3098 if (static_branch_unlikely(&preempt_notifier_key)) 3099 __fire_sched_out_preempt_notifiers(curr, next); 3100 } 3101 3102 #else /* !CONFIG_PREEMPT_NOTIFIERS */ 3103 3104 static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr) 3105 { 3106 } 3107 3108 static inline void 3109 fire_sched_out_preempt_notifiers(struct task_struct *curr, 3110 struct task_struct *next) 3111 { 3112 } 3113 3114 #endif /* CONFIG_PREEMPT_NOTIFIERS */ 3115 3116 static inline void prepare_task(struct task_struct *next) 3117 { 3118 #ifdef CONFIG_SMP 3119 /* 3120 * Claim the task as running, we do this before switching to it 3121 * such that any running task will have this set. 3122 */ 3123 next->on_cpu = 1; 3124 #endif 3125 } 3126 3127 static inline void finish_task(struct task_struct *prev) 3128 { 3129 #ifdef CONFIG_SMP 3130 /* 3131 * After ->on_cpu is cleared, the task can be moved to a different CPU. 3132 * We must ensure this doesn't happen until the switch is completely 3133 * finished. 3134 * 3135 * In particular, the load of prev->state in finish_task_switch() must 3136 * happen before this. 3137 * 3138 * Pairs with the smp_cond_load_acquire() in try_to_wake_up(). 3139 */ 3140 smp_store_release(&prev->on_cpu, 0); 3141 #endif 3142 } 3143 3144 static inline void 3145 prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf) 3146 { 3147 /* 3148 * Since the runqueue lock will be released by the next 3149 * task (which is an invalid locking op but in the case 3150 * of the scheduler it's an obvious special-case), so we 3151 * do an early lockdep release here: 3152 */ 3153 rq_unpin_lock(rq, rf); 3154 spin_release(&rq->lock.dep_map, _THIS_IP_); 3155 #ifdef CONFIG_DEBUG_SPINLOCK 3156 /* this is a valid case when another task releases the spinlock */ 3157 rq->lock.owner = next; 3158 #endif 3159 } 3160 3161 static inline void finish_lock_switch(struct rq *rq) 3162 { 3163 /* 3164 * If we are tracking spinlock dependencies then we have to 3165 * fix up the runqueue lock - which gets 'carried over' from 3166 * prev into current: 3167 */ 3168 spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_); 3169 raw_spin_unlock_irq(&rq->lock); 3170 } 3171 3172 /* 3173 * NOP if the arch has not defined these: 3174 */ 3175 3176 #ifndef prepare_arch_switch 3177 # define prepare_arch_switch(next) do { } while (0) 3178 #endif 3179 3180 #ifndef finish_arch_post_lock_switch 3181 # define finish_arch_post_lock_switch() do { } while (0) 3182 #endif 3183 3184 /** 3185 * prepare_task_switch - prepare to switch tasks 3186 * @rq: the runqueue preparing to switch 3187 * @prev: the current task that is being switched out 3188 * @next: the task we are going to switch to. 3189 * 3190 * This is called with the rq lock held and interrupts off. It must 3191 * be paired with a subsequent finish_task_switch after the context 3192 * switch. 3193 * 3194 * prepare_task_switch sets up locking and calls architecture specific 3195 * hooks. 3196 */ 3197 static inline void 3198 prepare_task_switch(struct rq *rq, struct task_struct *prev, 3199 struct task_struct *next) 3200 { 3201 kcov_prepare_switch(prev); 3202 sched_info_switch(rq, prev, next); 3203 perf_event_task_sched_out(prev, next); 3204 rseq_preempt(prev); 3205 fire_sched_out_preempt_notifiers(prev, next); 3206 prepare_task(next); 3207 prepare_arch_switch(next); 3208 } 3209 3210 /** 3211 * finish_task_switch - clean up after a task-switch 3212 * @prev: the thread we just switched away from. 3213 * 3214 * finish_task_switch must be called after the context switch, paired 3215 * with a prepare_task_switch call before the context switch. 3216 * finish_task_switch will reconcile locking set up by prepare_task_switch, 3217 * and do any other architecture-specific cleanup actions. 3218 * 3219 * Note that we may have delayed dropping an mm in context_switch(). If 3220 * so, we finish that here outside of the runqueue lock. (Doing it 3221 * with the lock held can cause deadlocks; see schedule() for 3222 * details.) 3223 * 3224 * The context switch have flipped the stack from under us and restored the 3225 * local variables which were saved when this task called schedule() in the 3226 * past. prev == current is still correct but we need to recalculate this_rq 3227 * because prev may have moved to another CPU. 3228 */ 3229 static struct rq *finish_task_switch(struct task_struct *prev) 3230 __releases(rq->lock) 3231 { 3232 struct rq *rq = this_rq(); 3233 struct mm_struct *mm = rq->prev_mm; 3234 long prev_state; 3235 3236 /* 3237 * The previous task will have left us with a preempt_count of 2 3238 * because it left us after: 3239 * 3240 * schedule() 3241 * preempt_disable(); // 1 3242 * __schedule() 3243 * raw_spin_lock_irq(&rq->lock) // 2 3244 * 3245 * Also, see FORK_PREEMPT_COUNT. 3246 */ 3247 if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET, 3248 "corrupted preempt_count: %s/%d/0x%x\n", 3249 current->comm, current->pid, preempt_count())) 3250 preempt_count_set(FORK_PREEMPT_COUNT); 3251 3252 rq->prev_mm = NULL; 3253 3254 /* 3255 * A task struct has one reference for the use as "current". 3256 * If a task dies, then it sets TASK_DEAD in tsk->state and calls 3257 * schedule one last time. The schedule call will never return, and 3258 * the scheduled task must drop that reference. 3259 * 3260 * We must observe prev->state before clearing prev->on_cpu (in 3261 * finish_task), otherwise a concurrent wakeup can get prev 3262 * running on another CPU and we could rave with its RUNNING -> DEAD 3263 * transition, resulting in a double drop. 3264 */ 3265 prev_state = prev->state; 3266 vtime_task_switch(prev); 3267 perf_event_task_sched_in(prev, current); 3268 finish_task(prev); 3269 finish_lock_switch(rq); 3270 finish_arch_post_lock_switch(); 3271 kcov_finish_switch(current); 3272 3273 fire_sched_in_preempt_notifiers(current); 3274 /* 3275 * When switching through a kernel thread, the loop in 3276 * membarrier_{private,global}_expedited() may have observed that 3277 * kernel thread and not issued an IPI. It is therefore possible to 3278 * schedule between user->kernel->user threads without passing though 3279 * switch_mm(). Membarrier requires a barrier after storing to 3280 * rq->curr, before returning to userspace, so provide them here: 3281 * 3282 * - a full memory barrier for {PRIVATE,GLOBAL}_EXPEDITED, implicitly 3283 * provided by mmdrop(), 3284 * - a sync_core for SYNC_CORE. 3285 */ 3286 if (mm) { 3287 membarrier_mm_sync_core_before_usermode(mm); 3288 mmdrop(mm); 3289 } 3290 if (unlikely(prev_state == TASK_DEAD)) { 3291 if (prev->sched_class->task_dead) 3292 prev->sched_class->task_dead(prev); 3293 3294 /* 3295 * Remove function-return probe instances associated with this 3296 * task and put them back on the free list. 3297 */ 3298 kprobe_flush_task(prev); 3299 3300 /* Task is done with its stack. */ 3301 put_task_stack(prev); 3302 3303 put_task_struct_rcu_user(prev); 3304 } 3305 3306 tick_nohz_task_switch(); 3307 return rq; 3308 } 3309 3310 #ifdef CONFIG_SMP 3311 3312 /* rq->lock is NOT held, but preemption is disabled */ 3313 static void __balance_callback(struct rq *rq) 3314 { 3315 struct callback_head *head, *next; 3316 void (*func)(struct rq *rq); 3317 unsigned long flags; 3318 3319 raw_spin_lock_irqsave(&rq->lock, flags); 3320 head = rq->balance_callback; 3321 rq->balance_callback = NULL; 3322 while (head) { 3323 func = (void (*)(struct rq *))head->func; 3324 next = head->next; 3325 head->next = NULL; 3326 head = next; 3327 3328 func(rq); 3329 } 3330 raw_spin_unlock_irqrestore(&rq->lock, flags); 3331 } 3332 3333 static inline void balance_callback(struct rq *rq) 3334 { 3335 if (unlikely(rq->balance_callback)) 3336 __balance_callback(rq); 3337 } 3338 3339 #else 3340 3341 static inline void balance_callback(struct rq *rq) 3342 { 3343 } 3344 3345 #endif 3346 3347 /** 3348 * schedule_tail - first thing a freshly forked thread must call. 3349 * @prev: the thread we just switched away from. 3350 */ 3351 asmlinkage __visible void schedule_tail(struct task_struct *prev) 3352 __releases(rq->lock) 3353 { 3354 struct rq *rq; 3355 3356 /* 3357 * New tasks start with FORK_PREEMPT_COUNT, see there and 3358 * finish_task_switch() for details. 3359 * 3360 * finish_task_switch() will drop rq->lock() and lower preempt_count 3361 * and the preempt_enable() will end up enabling preemption (on 3362 * PREEMPT_COUNT kernels). 3363 */ 3364 3365 rq = finish_task_switch(prev); 3366 balance_callback(rq); 3367 preempt_enable(); 3368 3369 if (current->set_child_tid) 3370 put_user(task_pid_vnr(current), current->set_child_tid); 3371 3372 calculate_sigpending(); 3373 } 3374 3375 /* 3376 * context_switch - switch to the new MM and the new thread's register state. 3377 */ 3378 static __always_inline struct rq * 3379 context_switch(struct rq *rq, struct task_struct *prev, 3380 struct task_struct *next, struct rq_flags *rf) 3381 { 3382 prepare_task_switch(rq, prev, next); 3383 3384 /* 3385 * For paravirt, this is coupled with an exit in switch_to to 3386 * combine the page table reload and the switch backend into 3387 * one hypercall. 3388 */ 3389 arch_start_context_switch(prev); 3390 3391 /* 3392 * kernel -> kernel lazy + transfer active 3393 * user -> kernel lazy + mmgrab() active 3394 * 3395 * kernel -> user switch + mmdrop() active 3396 * user -> user switch 3397 */ 3398 if (!next->mm) { // to kernel 3399 enter_lazy_tlb(prev->active_mm, next); 3400 3401 next->active_mm = prev->active_mm; 3402 if (prev->mm) // from user 3403 mmgrab(prev->active_mm); 3404 else 3405 prev->active_mm = NULL; 3406 } else { // to user 3407 membarrier_switch_mm(rq, prev->active_mm, next->mm); 3408 /* 3409 * sys_membarrier() requires an smp_mb() between setting 3410 * rq->curr / membarrier_switch_mm() and returning to userspace. 3411 * 3412 * The below provides this either through switch_mm(), or in 3413 * case 'prev->active_mm == next->mm' through 3414 * finish_task_switch()'s mmdrop(). 3415 */ 3416 switch_mm_irqs_off(prev->active_mm, next->mm, next); 3417 3418 if (!prev->mm) { // from kernel 3419 /* will mmdrop() in finish_task_switch(). */ 3420 rq->prev_mm = prev->active_mm; 3421 prev->active_mm = NULL; 3422 } 3423 } 3424 3425 rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP); 3426 3427 prepare_lock_switch(rq, next, rf); 3428 3429 /* Here we just switch the register state and the stack. */ 3430 switch_to(prev, next, prev); 3431 barrier(); 3432 3433 return finish_task_switch(prev); 3434 } 3435 3436 /* 3437 * nr_running and nr_context_switches: 3438 * 3439 * externally visible scheduler statistics: current number of runnable 3440 * threads, total number of context switches performed since bootup. 3441 */ 3442 unsigned long nr_running(void) 3443 { 3444 unsigned long i, sum = 0; 3445 3446 for_each_online_cpu(i) 3447 sum += cpu_rq(i)->nr_running; 3448 3449 return sum; 3450 } 3451 3452 /* 3453 * Check if only the current task is running on the CPU. 3454 * 3455 * Caution: this function does not check that the caller has disabled 3456 * preemption, thus the result might have a time-of-check-to-time-of-use 3457 * race. The caller is responsible to use it correctly, for example: 3458 * 3459 * - from a non-preemptible section (of course) 3460 * 3461 * - from a thread that is bound to a single CPU 3462 * 3463 * - in a loop with very short iterations (e.g. a polling loop) 3464 */ 3465 bool single_task_running(void) 3466 { 3467 return raw_rq()->nr_running == 1; 3468 } 3469 EXPORT_SYMBOL(single_task_running); 3470 3471 unsigned long long nr_context_switches(void) 3472 { 3473 int i; 3474 unsigned long long sum = 0; 3475 3476 for_each_possible_cpu(i) 3477 sum += cpu_rq(i)->nr_switches; 3478 3479 return sum; 3480 } 3481 3482 /* 3483 * Consumers of these two interfaces, like for example the cpuidle menu 3484 * governor, are using nonsensical data. Preferring shallow idle state selection 3485 * for a CPU that has IO-wait which might not even end up running the task when 3486 * it does become runnable. 3487 */ 3488 3489 unsigned long nr_iowait_cpu(int cpu) 3490 { 3491 return atomic_read(&cpu_rq(cpu)->nr_iowait); 3492 } 3493 3494 /* 3495 * IO-wait accounting, and how its mostly bollocks (on SMP). 3496 * 3497 * The idea behind IO-wait account is to account the idle time that we could 3498 * have spend running if it were not for IO. That is, if we were to improve the 3499 * storage performance, we'd have a proportional reduction in IO-wait time. 3500 * 3501 * This all works nicely on UP, where, when a task blocks on IO, we account 3502 * idle time as IO-wait, because if the storage were faster, it could've been 3503 * running and we'd not be idle. 3504 * 3505 * This has been extended to SMP, by doing the same for each CPU. This however 3506 * is broken. 3507 * 3508 * Imagine for instance the case where two tasks block on one CPU, only the one 3509 * CPU will have IO-wait accounted, while the other has regular idle. Even 3510 * though, if the storage were faster, both could've ran at the same time, 3511 * utilising both CPUs. 3512 * 3513 * This means, that when looking globally, the current IO-wait accounting on 3514 * SMP is a lower bound, by reason of under accounting. 3515 * 3516 * Worse, since the numbers are provided per CPU, they are sometimes 3517 * interpreted per CPU, and that is nonsensical. A blocked task isn't strictly 3518 * associated with any one particular CPU, it can wake to another CPU than it 3519 * blocked on. This means the per CPU IO-wait number is meaningless. 3520 * 3521 * Task CPU affinities can make all that even more 'interesting'. 3522 */ 3523 3524 unsigned long nr_iowait(void) 3525 { 3526 unsigned long i, sum = 0; 3527 3528 for_each_possible_cpu(i) 3529 sum += nr_iowait_cpu(i); 3530 3531 return sum; 3532 } 3533 3534 #ifdef CONFIG_SMP 3535 3536 /* 3537 * sched_exec - execve() is a valuable balancing opportunity, because at 3538 * this point the task has the smallest effective memory and cache footprint. 3539 */ 3540 void sched_exec(void) 3541 { 3542 struct task_struct *p = current; 3543 unsigned long flags; 3544 int dest_cpu; 3545 3546 raw_spin_lock_irqsave(&p->pi_lock, flags); 3547 dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), SD_BALANCE_EXEC, 0); 3548 if (dest_cpu == smp_processor_id()) 3549 goto unlock; 3550 3551 if (likely(cpu_active(dest_cpu))) { 3552 struct migration_arg arg = { p, dest_cpu }; 3553 3554 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 3555 stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg); 3556 return; 3557 } 3558 unlock: 3559 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 3560 } 3561 3562 #endif 3563 3564 DEFINE_PER_CPU(struct kernel_stat, kstat); 3565 DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat); 3566 3567 EXPORT_PER_CPU_SYMBOL(kstat); 3568 EXPORT_PER_CPU_SYMBOL(kernel_cpustat); 3569 3570 /* 3571 * The function fair_sched_class.update_curr accesses the struct curr 3572 * and its field curr->exec_start; when called from task_sched_runtime(), 3573 * we observe a high rate of cache misses in practice. 3574 * Prefetching this data results in improved performance. 3575 */ 3576 static inline void prefetch_curr_exec_start(struct task_struct *p) 3577 { 3578 #ifdef CONFIG_FAIR_GROUP_SCHED 3579 struct sched_entity *curr = (&p->se)->cfs_rq->curr; 3580 #else 3581 struct sched_entity *curr = (&task_rq(p)->cfs)->curr; 3582 #endif 3583 prefetch(curr); 3584 prefetch(&curr->exec_start); 3585 } 3586 3587 /* 3588 * Return accounted runtime for the task. 3589 * In case the task is currently running, return the runtime plus current's 3590 * pending runtime that have not been accounted yet. 3591 */ 3592 unsigned long long task_sched_runtime(struct task_struct *p) 3593 { 3594 struct rq_flags rf; 3595 struct rq *rq; 3596 u64 ns; 3597 3598 #if defined(CONFIG_64BIT) && defined(CONFIG_SMP) 3599 /* 3600 * 64-bit doesn't need locks to atomically read a 64-bit value. 3601 * So we have a optimization chance when the task's delta_exec is 0. 3602 * Reading ->on_cpu is racy, but this is ok. 3603 * 3604 * If we race with it leaving CPU, we'll take a lock. So we're correct. 3605 * If we race with it entering CPU, unaccounted time is 0. This is 3606 * indistinguishable from the read occurring a few cycles earlier. 3607 * If we see ->on_cpu without ->on_rq, the task is leaving, and has 3608 * been accounted, so we're correct here as well. 3609 */ 3610 if (!p->on_cpu || !task_on_rq_queued(p)) 3611 return p->se.sum_exec_runtime; 3612 #endif 3613 3614 rq = task_rq_lock(p, &rf); 3615 /* 3616 * Must be ->curr _and_ ->on_rq. If dequeued, we would 3617 * project cycles that may never be accounted to this 3618 * thread, breaking clock_gettime(). 3619 */ 3620 if (task_current(rq, p) && task_on_rq_queued(p)) { 3621 prefetch_curr_exec_start(p); 3622 update_rq_clock(rq); 3623 p->sched_class->update_curr(rq); 3624 } 3625 ns = p->se.sum_exec_runtime; 3626 task_rq_unlock(rq, p, &rf); 3627 3628 return ns; 3629 } 3630 3631 DEFINE_PER_CPU(unsigned long, thermal_pressure); 3632 3633 void arch_set_thermal_pressure(struct cpumask *cpus, 3634 unsigned long th_pressure) 3635 { 3636 int cpu; 3637 3638 for_each_cpu(cpu, cpus) 3639 WRITE_ONCE(per_cpu(thermal_pressure, cpu), th_pressure); 3640 } 3641 3642 /* 3643 * This function gets called by the timer code, with HZ frequency. 3644 * We call it with interrupts disabled. 3645 */ 3646 void scheduler_tick(void) 3647 { 3648 int cpu = smp_processor_id(); 3649 struct rq *rq = cpu_rq(cpu); 3650 struct task_struct *curr = rq->curr; 3651 struct rq_flags rf; 3652 unsigned long thermal_pressure; 3653 3654 arch_scale_freq_tick(); 3655 sched_clock_tick(); 3656 3657 rq_lock(rq, &rf); 3658 3659 update_rq_clock(rq); 3660 thermal_pressure = arch_scale_thermal_pressure(cpu_of(rq)); 3661 update_thermal_load_avg(rq_clock_thermal(rq), rq, thermal_pressure); 3662 curr->sched_class->task_tick(rq, curr, 0); 3663 calc_global_load_tick(rq); 3664 psi_task_tick(rq); 3665 3666 rq_unlock(rq, &rf); 3667 3668 perf_event_task_tick(); 3669 3670 #ifdef CONFIG_SMP 3671 rq->idle_balance = idle_cpu(cpu); 3672 trigger_load_balance(rq); 3673 #endif 3674 } 3675 3676 #ifdef CONFIG_NO_HZ_FULL 3677 3678 struct tick_work { 3679 int cpu; 3680 atomic_t state; 3681 struct delayed_work work; 3682 }; 3683 /* Values for ->state, see diagram below. */ 3684 #define TICK_SCHED_REMOTE_OFFLINE 0 3685 #define TICK_SCHED_REMOTE_OFFLINING 1 3686 #define TICK_SCHED_REMOTE_RUNNING 2 3687 3688 /* 3689 * State diagram for ->state: 3690 * 3691 * 3692 * TICK_SCHED_REMOTE_OFFLINE 3693 * | ^ 3694 * | | 3695 * | | sched_tick_remote() 3696 * | | 3697 * | | 3698 * +--TICK_SCHED_REMOTE_OFFLINING 3699 * | ^ 3700 * | | 3701 * sched_tick_start() | | sched_tick_stop() 3702 * | | 3703 * V | 3704 * TICK_SCHED_REMOTE_RUNNING 3705 * 3706 * 3707 * Other transitions get WARN_ON_ONCE(), except that sched_tick_remote() 3708 * and sched_tick_start() are happy to leave the state in RUNNING. 3709 */ 3710 3711 static struct tick_work __percpu *tick_work_cpu; 3712 3713 static void sched_tick_remote(struct work_struct *work) 3714 { 3715 struct delayed_work *dwork = to_delayed_work(work); 3716 struct tick_work *twork = container_of(dwork, struct tick_work, work); 3717 int cpu = twork->cpu; 3718 struct rq *rq = cpu_rq(cpu); 3719 struct task_struct *curr; 3720 struct rq_flags rf; 3721 u64 delta; 3722 int os; 3723 3724 /* 3725 * Handle the tick only if it appears the remote CPU is running in full 3726 * dynticks mode. The check is racy by nature, but missing a tick or 3727 * having one too much is no big deal because the scheduler tick updates 3728 * statistics and checks timeslices in a time-independent way, regardless 3729 * of when exactly it is running. 3730 */ 3731 if (!tick_nohz_tick_stopped_cpu(cpu)) 3732 goto out_requeue; 3733 3734 rq_lock_irq(rq, &rf); 3735 curr = rq->curr; 3736 if (cpu_is_offline(cpu)) 3737 goto out_unlock; 3738 3739 update_rq_clock(rq); 3740 3741 if (!is_idle_task(curr)) { 3742 /* 3743 * Make sure the next tick runs within a reasonable 3744 * amount of time. 3745 */ 3746 delta = rq_clock_task(rq) - curr->se.exec_start; 3747 WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3); 3748 } 3749 curr->sched_class->task_tick(rq, curr, 0); 3750 3751 calc_load_nohz_remote(rq); 3752 out_unlock: 3753 rq_unlock_irq(rq, &rf); 3754 out_requeue: 3755 3756 /* 3757 * Run the remote tick once per second (1Hz). This arbitrary 3758 * frequency is large enough to avoid overload but short enough 3759 * to keep scheduler internal stats reasonably up to date. But 3760 * first update state to reflect hotplug activity if required. 3761 */ 3762 os = atomic_fetch_add_unless(&twork->state, -1, TICK_SCHED_REMOTE_RUNNING); 3763 WARN_ON_ONCE(os == TICK_SCHED_REMOTE_OFFLINE); 3764 if (os == TICK_SCHED_REMOTE_RUNNING) 3765 queue_delayed_work(system_unbound_wq, dwork, HZ); 3766 } 3767 3768 static void sched_tick_start(int cpu) 3769 { 3770 int os; 3771 struct tick_work *twork; 3772 3773 if (housekeeping_cpu(cpu, HK_FLAG_TICK)) 3774 return; 3775 3776 WARN_ON_ONCE(!tick_work_cpu); 3777 3778 twork = per_cpu_ptr(tick_work_cpu, cpu); 3779 os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_RUNNING); 3780 WARN_ON_ONCE(os == TICK_SCHED_REMOTE_RUNNING); 3781 if (os == TICK_SCHED_REMOTE_OFFLINE) { 3782 twork->cpu = cpu; 3783 INIT_DELAYED_WORK(&twork->work, sched_tick_remote); 3784 queue_delayed_work(system_unbound_wq, &twork->work, HZ); 3785 } 3786 } 3787 3788 #ifdef CONFIG_HOTPLUG_CPU 3789 static void sched_tick_stop(int cpu) 3790 { 3791 struct tick_work *twork; 3792 int os; 3793 3794 if (housekeeping_cpu(cpu, HK_FLAG_TICK)) 3795 return; 3796 3797 WARN_ON_ONCE(!tick_work_cpu); 3798 3799 twork = per_cpu_ptr(tick_work_cpu, cpu); 3800 /* There cannot be competing actions, but don't rely on stop-machine. */ 3801 os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_OFFLINING); 3802 WARN_ON_ONCE(os != TICK_SCHED_REMOTE_RUNNING); 3803 /* Don't cancel, as this would mess up the state machine. */ 3804 } 3805 #endif /* CONFIG_HOTPLUG_CPU */ 3806 3807 int __init sched_tick_offload_init(void) 3808 { 3809 tick_work_cpu = alloc_percpu(struct tick_work); 3810 BUG_ON(!tick_work_cpu); 3811 return 0; 3812 } 3813 3814 #else /* !CONFIG_NO_HZ_FULL */ 3815 static inline void sched_tick_start(int cpu) { } 3816 static inline void sched_tick_stop(int cpu) { } 3817 #endif 3818 3819 #if defined(CONFIG_PREEMPTION) && (defined(CONFIG_DEBUG_PREEMPT) || \ 3820 defined(CONFIG_TRACE_PREEMPT_TOGGLE)) 3821 /* 3822 * If the value passed in is equal to the current preempt count 3823 * then we just disabled preemption. Start timing the latency. 3824 */ 3825 static inline void preempt_latency_start(int val) 3826 { 3827 if (preempt_count() == val) { 3828 unsigned long ip = get_lock_parent_ip(); 3829 #ifdef CONFIG_DEBUG_PREEMPT 3830 current->preempt_disable_ip = ip; 3831 #endif 3832 trace_preempt_off(CALLER_ADDR0, ip); 3833 } 3834 } 3835 3836 void preempt_count_add(int val) 3837 { 3838 #ifdef CONFIG_DEBUG_PREEMPT 3839 /* 3840 * Underflow? 3841 */ 3842 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0))) 3843 return; 3844 #endif 3845 __preempt_count_add(val); 3846 #ifdef CONFIG_DEBUG_PREEMPT 3847 /* 3848 * Spinlock count overflowing soon? 3849 */ 3850 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >= 3851 PREEMPT_MASK - 10); 3852 #endif 3853 preempt_latency_start(val); 3854 } 3855 EXPORT_SYMBOL(preempt_count_add); 3856 NOKPROBE_SYMBOL(preempt_count_add); 3857 3858 /* 3859 * If the value passed in equals to the current preempt count 3860 * then we just enabled preemption. Stop timing the latency. 3861 */ 3862 static inline void preempt_latency_stop(int val) 3863 { 3864 if (preempt_count() == val) 3865 trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip()); 3866 } 3867 3868 void preempt_count_sub(int val) 3869 { 3870 #ifdef CONFIG_DEBUG_PREEMPT 3871 /* 3872 * Underflow? 3873 */ 3874 if (DEBUG_LOCKS_WARN_ON(val > preempt_count())) 3875 return; 3876 /* 3877 * Is the spinlock portion underflowing? 3878 */ 3879 if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) && 3880 !(preempt_count() & PREEMPT_MASK))) 3881 return; 3882 #endif 3883 3884 preempt_latency_stop(val); 3885 __preempt_count_sub(val); 3886 } 3887 EXPORT_SYMBOL(preempt_count_sub); 3888 NOKPROBE_SYMBOL(preempt_count_sub); 3889 3890 #else 3891 static inline void preempt_latency_start(int val) { } 3892 static inline void preempt_latency_stop(int val) { } 3893 #endif 3894 3895 static inline unsigned long get_preempt_disable_ip(struct task_struct *p) 3896 { 3897 #ifdef CONFIG_DEBUG_PREEMPT 3898 return p->preempt_disable_ip; 3899 #else 3900 return 0; 3901 #endif 3902 } 3903 3904 /* 3905 * Print scheduling while atomic bug: 3906 */ 3907 static noinline void __schedule_bug(struct task_struct *prev) 3908 { 3909 /* Save this before calling printk(), since that will clobber it */ 3910 unsigned long preempt_disable_ip = get_preempt_disable_ip(current); 3911 3912 if (oops_in_progress) 3913 return; 3914 3915 printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n", 3916 prev->comm, prev->pid, preempt_count()); 3917 3918 debug_show_held_locks(prev); 3919 print_modules(); 3920 if (irqs_disabled()) 3921 print_irqtrace_events(prev); 3922 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT) 3923 && in_atomic_preempt_off()) { 3924 pr_err("Preemption disabled at:"); 3925 print_ip_sym(preempt_disable_ip); 3926 pr_cont("\n"); 3927 } 3928 if (panic_on_warn) 3929 panic("scheduling while atomic\n"); 3930 3931 dump_stack(); 3932 add_taint(TAINT_WARN, LOCKDEP_STILL_OK); 3933 } 3934 3935 /* 3936 * Various schedule()-time debugging checks and statistics: 3937 */ 3938 static inline void schedule_debug(struct task_struct *prev, bool preempt) 3939 { 3940 #ifdef CONFIG_SCHED_STACK_END_CHECK 3941 if (task_stack_end_corrupted(prev)) 3942 panic("corrupted stack end detected inside scheduler\n"); 3943 3944 if (task_scs_end_corrupted(prev)) 3945 panic("corrupted shadow stack detected inside scheduler\n"); 3946 #endif 3947 3948 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP 3949 if (!preempt && prev->state && prev->non_block_count) { 3950 printk(KERN_ERR "BUG: scheduling in a non-blocking section: %s/%d/%i\n", 3951 prev->comm, prev->pid, prev->non_block_count); 3952 dump_stack(); 3953 add_taint(TAINT_WARN, LOCKDEP_STILL_OK); 3954 } 3955 #endif 3956 3957 if (unlikely(in_atomic_preempt_off())) { 3958 __schedule_bug(prev); 3959 preempt_count_set(PREEMPT_DISABLED); 3960 } 3961 rcu_sleep_check(); 3962 3963 profile_hit(SCHED_PROFILING, __builtin_return_address(0)); 3964 3965 schedstat_inc(this_rq()->sched_count); 3966 } 3967 3968 static void put_prev_task_balance(struct rq *rq, struct task_struct *prev, 3969 struct rq_flags *rf) 3970 { 3971 #ifdef CONFIG_SMP 3972 const struct sched_class *class; 3973 /* 3974 * We must do the balancing pass before put_prev_task(), such 3975 * that when we release the rq->lock the task is in the same 3976 * state as before we took rq->lock. 3977 * 3978 * We can terminate the balance pass as soon as we know there is 3979 * a runnable task of @class priority or higher. 3980 */ 3981 for_class_range(class, prev->sched_class, &idle_sched_class) { 3982 if (class->balance(rq, prev, rf)) 3983 break; 3984 } 3985 #endif 3986 3987 put_prev_task(rq, prev); 3988 } 3989 3990 /* 3991 * Pick up the highest-prio task: 3992 */ 3993 static inline struct task_struct * 3994 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) 3995 { 3996 const struct sched_class *class; 3997 struct task_struct *p; 3998 3999 /* 4000 * Optimization: we know that if all tasks are in the fair class we can 4001 * call that function directly, but only if the @prev task wasn't of a 4002 * higher scheduling class, because otherwise those loose the 4003 * opportunity to pull in more work from other CPUs. 4004 */ 4005 if (likely((prev->sched_class == &idle_sched_class || 4006 prev->sched_class == &fair_sched_class) && 4007 rq->nr_running == rq->cfs.h_nr_running)) { 4008 4009 p = pick_next_task_fair(rq, prev, rf); 4010 if (unlikely(p == RETRY_TASK)) 4011 goto restart; 4012 4013 /* Assumes fair_sched_class->next == idle_sched_class */ 4014 if (!p) { 4015 put_prev_task(rq, prev); 4016 p = pick_next_task_idle(rq); 4017 } 4018 4019 return p; 4020 } 4021 4022 restart: 4023 put_prev_task_balance(rq, prev, rf); 4024 4025 for_each_class(class) { 4026 p = class->pick_next_task(rq); 4027 if (p) 4028 return p; 4029 } 4030 4031 /* The idle class should always have a runnable task: */ 4032 BUG(); 4033 } 4034 4035 /* 4036 * __schedule() is the main scheduler function. 4037 * 4038 * The main means of driving the scheduler and thus entering this function are: 4039 * 4040 * 1. Explicit blocking: mutex, semaphore, waitqueue, etc. 4041 * 4042 * 2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return 4043 * paths. For example, see arch/x86/entry_64.S. 4044 * 4045 * To drive preemption between tasks, the scheduler sets the flag in timer 4046 * interrupt handler scheduler_tick(). 4047 * 4048 * 3. Wakeups don't really cause entry into schedule(). They add a 4049 * task to the run-queue and that's it. 4050 * 4051 * Now, if the new task added to the run-queue preempts the current 4052 * task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets 4053 * called on the nearest possible occasion: 4054 * 4055 * - If the kernel is preemptible (CONFIG_PREEMPTION=y): 4056 * 4057 * - in syscall or exception context, at the next outmost 4058 * preempt_enable(). (this might be as soon as the wake_up()'s 4059 * spin_unlock()!) 4060 * 4061 * - in IRQ context, return from interrupt-handler to 4062 * preemptible context 4063 * 4064 * - If the kernel is not preemptible (CONFIG_PREEMPTION is not set) 4065 * then at the next: 4066 * 4067 * - cond_resched() call 4068 * - explicit schedule() call 4069 * - return from syscall or exception to user-space 4070 * - return from interrupt-handler to user-space 4071 * 4072 * WARNING: must be called with preemption disabled! 4073 */ 4074 static void __sched notrace __schedule(bool preempt) 4075 { 4076 struct task_struct *prev, *next; 4077 unsigned long *switch_count; 4078 struct rq_flags rf; 4079 struct rq *rq; 4080 int cpu; 4081 4082 cpu = smp_processor_id(); 4083 rq = cpu_rq(cpu); 4084 prev = rq->curr; 4085 4086 schedule_debug(prev, preempt); 4087 4088 if (sched_feat(HRTICK)) 4089 hrtick_clear(rq); 4090 4091 local_irq_disable(); 4092 rcu_note_context_switch(preempt); 4093 4094 /* 4095 * Make sure that signal_pending_state()->signal_pending() below 4096 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE) 4097 * done by the caller to avoid the race with signal_wake_up(). 4098 * 4099 * The membarrier system call requires a full memory barrier 4100 * after coming from user-space, before storing to rq->curr. 4101 */ 4102 rq_lock(rq, &rf); 4103 smp_mb__after_spinlock(); 4104 4105 /* Promote REQ to ACT */ 4106 rq->clock_update_flags <<= 1; 4107 update_rq_clock(rq); 4108 4109 switch_count = &prev->nivcsw; 4110 if (!preempt && prev->state) { 4111 if (signal_pending_state(prev->state, prev)) { 4112 prev->state = TASK_RUNNING; 4113 } else { 4114 deactivate_task(rq, prev, DEQUEUE_SLEEP | DEQUEUE_NOCLOCK); 4115 4116 if (prev->in_iowait) { 4117 atomic_inc(&rq->nr_iowait); 4118 delayacct_blkio_start(); 4119 } 4120 } 4121 switch_count = &prev->nvcsw; 4122 } 4123 4124 next = pick_next_task(rq, prev, &rf); 4125 clear_tsk_need_resched(prev); 4126 clear_preempt_need_resched(); 4127 4128 if (likely(prev != next)) { 4129 rq->nr_switches++; 4130 /* 4131 * RCU users of rcu_dereference(rq->curr) may not see 4132 * changes to task_struct made by pick_next_task(). 4133 */ 4134 RCU_INIT_POINTER(rq->curr, next); 4135 /* 4136 * The membarrier system call requires each architecture 4137 * to have a full memory barrier after updating 4138 * rq->curr, before returning to user-space. 4139 * 4140 * Here are the schemes providing that barrier on the 4141 * various architectures: 4142 * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC. 4143 * switch_mm() rely on membarrier_arch_switch_mm() on PowerPC. 4144 * - finish_lock_switch() for weakly-ordered 4145 * architectures where spin_unlock is a full barrier, 4146 * - switch_to() for arm64 (weakly-ordered, spin_unlock 4147 * is a RELEASE barrier), 4148 */ 4149 ++*switch_count; 4150 4151 psi_sched_switch(prev, next, !task_on_rq_queued(prev)); 4152 4153 trace_sched_switch(preempt, prev, next); 4154 4155 /* Also unlocks the rq: */ 4156 rq = context_switch(rq, prev, next, &rf); 4157 } else { 4158 rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP); 4159 rq_unlock_irq(rq, &rf); 4160 } 4161 4162 balance_callback(rq); 4163 } 4164 4165 void __noreturn do_task_dead(void) 4166 { 4167 /* Causes final put_task_struct in finish_task_switch(): */ 4168 set_special_state(TASK_DEAD); 4169 4170 /* Tell freezer to ignore us: */ 4171 current->flags |= PF_NOFREEZE; 4172 4173 __schedule(false); 4174 BUG(); 4175 4176 /* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */ 4177 for (;;) 4178 cpu_relax(); 4179 } 4180 4181 static inline void sched_submit_work(struct task_struct *tsk) 4182 { 4183 if (!tsk->state) 4184 return; 4185 4186 /* 4187 * If a worker went to sleep, notify and ask workqueue whether 4188 * it wants to wake up a task to maintain concurrency. 4189 * As this function is called inside the schedule() context, 4190 * we disable preemption to avoid it calling schedule() again 4191 * in the possible wakeup of a kworker and because wq_worker_sleeping() 4192 * requires it. 4193 */ 4194 if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER)) { 4195 preempt_disable(); 4196 if (tsk->flags & PF_WQ_WORKER) 4197 wq_worker_sleeping(tsk); 4198 else 4199 io_wq_worker_sleeping(tsk); 4200 preempt_enable_no_resched(); 4201 } 4202 4203 if (tsk_is_pi_blocked(tsk)) 4204 return; 4205 4206 /* 4207 * If we are going to sleep and we have plugged IO queued, 4208 * make sure to submit it to avoid deadlocks. 4209 */ 4210 if (blk_needs_flush_plug(tsk)) 4211 blk_schedule_flush_plug(tsk); 4212 } 4213 4214 static void sched_update_worker(struct task_struct *tsk) 4215 { 4216 if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER)) { 4217 if (tsk->flags & PF_WQ_WORKER) 4218 wq_worker_running(tsk); 4219 else 4220 io_wq_worker_running(tsk); 4221 } 4222 } 4223 4224 asmlinkage __visible void __sched schedule(void) 4225 { 4226 struct task_struct *tsk = current; 4227 4228 sched_submit_work(tsk); 4229 do { 4230 preempt_disable(); 4231 __schedule(false); 4232 sched_preempt_enable_no_resched(); 4233 } while (need_resched()); 4234 sched_update_worker(tsk); 4235 } 4236 EXPORT_SYMBOL(schedule); 4237 4238 /* 4239 * synchronize_rcu_tasks() makes sure that no task is stuck in preempted 4240 * state (have scheduled out non-voluntarily) by making sure that all 4241 * tasks have either left the run queue or have gone into user space. 4242 * As idle tasks do not do either, they must not ever be preempted 4243 * (schedule out non-voluntarily). 4244 * 4245 * schedule_idle() is similar to schedule_preempt_disable() except that it 4246 * never enables preemption because it does not call sched_submit_work(). 4247 */ 4248 void __sched schedule_idle(void) 4249 { 4250 /* 4251 * As this skips calling sched_submit_work(), which the idle task does 4252 * regardless because that function is a nop when the task is in a 4253 * TASK_RUNNING state, make sure this isn't used someplace that the 4254 * current task can be in any other state. Note, idle is always in the 4255 * TASK_RUNNING state. 4256 */ 4257 WARN_ON_ONCE(current->state); 4258 do { 4259 __schedule(false); 4260 } while (need_resched()); 4261 } 4262 4263 #ifdef CONFIG_CONTEXT_TRACKING 4264 asmlinkage __visible void __sched schedule_user(void) 4265 { 4266 /* 4267 * If we come here after a random call to set_need_resched(), 4268 * or we have been woken up remotely but the IPI has not yet arrived, 4269 * we haven't yet exited the RCU idle mode. Do it here manually until 4270 * we find a better solution. 4271 * 4272 * NB: There are buggy callers of this function. Ideally we 4273 * should warn if prev_state != CONTEXT_USER, but that will trigger 4274 * too frequently to make sense yet. 4275 */ 4276 enum ctx_state prev_state = exception_enter(); 4277 schedule(); 4278 exception_exit(prev_state); 4279 } 4280 #endif 4281 4282 /** 4283 * schedule_preempt_disabled - called with preemption disabled 4284 * 4285 * Returns with preemption disabled. Note: preempt_count must be 1 4286 */ 4287 void __sched schedule_preempt_disabled(void) 4288 { 4289 sched_preempt_enable_no_resched(); 4290 schedule(); 4291 preempt_disable(); 4292 } 4293 4294 static void __sched notrace preempt_schedule_common(void) 4295 { 4296 do { 4297 /* 4298 * Because the function tracer can trace preempt_count_sub() 4299 * and it also uses preempt_enable/disable_notrace(), if 4300 * NEED_RESCHED is set, the preempt_enable_notrace() called 4301 * by the function tracer will call this function again and 4302 * cause infinite recursion. 4303 * 4304 * Preemption must be disabled here before the function 4305 * tracer can trace. Break up preempt_disable() into two 4306 * calls. One to disable preemption without fear of being 4307 * traced. The other to still record the preemption latency, 4308 * which can also be traced by the function tracer. 4309 */ 4310 preempt_disable_notrace(); 4311 preempt_latency_start(1); 4312 __schedule(true); 4313 preempt_latency_stop(1); 4314 preempt_enable_no_resched_notrace(); 4315 4316 /* 4317 * Check again in case we missed a preemption opportunity 4318 * between schedule and now. 4319 */ 4320 } while (need_resched()); 4321 } 4322 4323 #ifdef CONFIG_PREEMPTION 4324 /* 4325 * This is the entry point to schedule() from in-kernel preemption 4326 * off of preempt_enable. 4327 */ 4328 asmlinkage __visible void __sched notrace preempt_schedule(void) 4329 { 4330 /* 4331 * If there is a non-zero preempt_count or interrupts are disabled, 4332 * we do not want to preempt the current task. Just return.. 4333 */ 4334 if (likely(!preemptible())) 4335 return; 4336 4337 preempt_schedule_common(); 4338 } 4339 NOKPROBE_SYMBOL(preempt_schedule); 4340 EXPORT_SYMBOL(preempt_schedule); 4341 4342 /** 4343 * preempt_schedule_notrace - preempt_schedule called by tracing 4344 * 4345 * The tracing infrastructure uses preempt_enable_notrace to prevent 4346 * recursion and tracing preempt enabling caused by the tracing 4347 * infrastructure itself. But as tracing can happen in areas coming 4348 * from userspace or just about to enter userspace, a preempt enable 4349 * can occur before user_exit() is called. This will cause the scheduler 4350 * to be called when the system is still in usermode. 4351 * 4352 * To prevent this, the preempt_enable_notrace will use this function 4353 * instead of preempt_schedule() to exit user context if needed before 4354 * calling the scheduler. 4355 */ 4356 asmlinkage __visible void __sched notrace preempt_schedule_notrace(void) 4357 { 4358 enum ctx_state prev_ctx; 4359 4360 if (likely(!preemptible())) 4361 return; 4362 4363 do { 4364 /* 4365 * Because the function tracer can trace preempt_count_sub() 4366 * and it also uses preempt_enable/disable_notrace(), if 4367 * NEED_RESCHED is set, the preempt_enable_notrace() called 4368 * by the function tracer will call this function again and 4369 * cause infinite recursion. 4370 * 4371 * Preemption must be disabled here before the function 4372 * tracer can trace. Break up preempt_disable() into two 4373 * calls. One to disable preemption without fear of being 4374 * traced. The other to still record the preemption latency, 4375 * which can also be traced by the function tracer. 4376 */ 4377 preempt_disable_notrace(); 4378 preempt_latency_start(1); 4379 /* 4380 * Needs preempt disabled in case user_exit() is traced 4381 * and the tracer calls preempt_enable_notrace() causing 4382 * an infinite recursion. 4383 */ 4384 prev_ctx = exception_enter(); 4385 __schedule(true); 4386 exception_exit(prev_ctx); 4387 4388 preempt_latency_stop(1); 4389 preempt_enable_no_resched_notrace(); 4390 } while (need_resched()); 4391 } 4392 EXPORT_SYMBOL_GPL(preempt_schedule_notrace); 4393 4394 #endif /* CONFIG_PREEMPTION */ 4395 4396 /* 4397 * This is the entry point to schedule() from kernel preemption 4398 * off of irq context. 4399 * Note, that this is called and return with irqs disabled. This will 4400 * protect us against recursive calling from irq. 4401 */ 4402 asmlinkage __visible void __sched preempt_schedule_irq(void) 4403 { 4404 enum ctx_state prev_state; 4405 4406 /* Catch callers which need to be fixed */ 4407 BUG_ON(preempt_count() || !irqs_disabled()); 4408 4409 prev_state = exception_enter(); 4410 4411 do { 4412 preempt_disable(); 4413 local_irq_enable(); 4414 __schedule(true); 4415 local_irq_disable(); 4416 sched_preempt_enable_no_resched(); 4417 } while (need_resched()); 4418 4419 exception_exit(prev_state); 4420 } 4421 4422 int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags, 4423 void *key) 4424 { 4425 return try_to_wake_up(curr->private, mode, wake_flags); 4426 } 4427 EXPORT_SYMBOL(default_wake_function); 4428 4429 #ifdef CONFIG_RT_MUTEXES 4430 4431 static inline int __rt_effective_prio(struct task_struct *pi_task, int prio) 4432 { 4433 if (pi_task) 4434 prio = min(prio, pi_task->prio); 4435 4436 return prio; 4437 } 4438 4439 static inline int rt_effective_prio(struct task_struct *p, int prio) 4440 { 4441 struct task_struct *pi_task = rt_mutex_get_top_task(p); 4442 4443 return __rt_effective_prio(pi_task, prio); 4444 } 4445 4446 /* 4447 * rt_mutex_setprio - set the current priority of a task 4448 * @p: task to boost 4449 * @pi_task: donor task 4450 * 4451 * This function changes the 'effective' priority of a task. It does 4452 * not touch ->normal_prio like __setscheduler(). 4453 * 4454 * Used by the rt_mutex code to implement priority inheritance 4455 * logic. Call site only calls if the priority of the task changed. 4456 */ 4457 void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task) 4458 { 4459 int prio, oldprio, queued, running, queue_flag = 4460 DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; 4461 const struct sched_class *prev_class; 4462 struct rq_flags rf; 4463 struct rq *rq; 4464 4465 /* XXX used to be waiter->prio, not waiter->task->prio */ 4466 prio = __rt_effective_prio(pi_task, p->normal_prio); 4467 4468 /* 4469 * If nothing changed; bail early. 4470 */ 4471 if (p->pi_top_task == pi_task && prio == p->prio && !dl_prio(prio)) 4472 return; 4473 4474 rq = __task_rq_lock(p, &rf); 4475 update_rq_clock(rq); 4476 /* 4477 * Set under pi_lock && rq->lock, such that the value can be used under 4478 * either lock. 4479 * 4480 * Note that there is loads of tricky to make this pointer cache work 4481 * right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to 4482 * ensure a task is de-boosted (pi_task is set to NULL) before the 4483 * task is allowed to run again (and can exit). This ensures the pointer 4484 * points to a blocked task -- which guaratees the task is present. 4485 */ 4486 p->pi_top_task = pi_task; 4487 4488 /* 4489 * For FIFO/RR we only need to set prio, if that matches we're done. 4490 */ 4491 if (prio == p->prio && !dl_prio(prio)) 4492 goto out_unlock; 4493 4494 /* 4495 * Idle task boosting is a nono in general. There is one 4496 * exception, when PREEMPT_RT and NOHZ is active: 4497 * 4498 * The idle task calls get_next_timer_interrupt() and holds 4499 * the timer wheel base->lock on the CPU and another CPU wants 4500 * to access the timer (probably to cancel it). We can safely 4501 * ignore the boosting request, as the idle CPU runs this code 4502 * with interrupts disabled and will complete the lock 4503 * protected section without being interrupted. So there is no 4504 * real need to boost. 4505 */ 4506 if (unlikely(p == rq->idle)) { 4507 WARN_ON(p != rq->curr); 4508 WARN_ON(p->pi_blocked_on); 4509 goto out_unlock; 4510 } 4511 4512 trace_sched_pi_setprio(p, pi_task); 4513 oldprio = p->prio; 4514 4515 if (oldprio == prio) 4516 queue_flag &= ~DEQUEUE_MOVE; 4517 4518 prev_class = p->sched_class; 4519 queued = task_on_rq_queued(p); 4520 running = task_current(rq, p); 4521 if (queued) 4522 dequeue_task(rq, p, queue_flag); 4523 if (running) 4524 put_prev_task(rq, p); 4525 4526 /* 4527 * Boosting condition are: 4528 * 1. -rt task is running and holds mutex A 4529 * --> -dl task blocks on mutex A 4530 * 4531 * 2. -dl task is running and holds mutex A 4532 * --> -dl task blocks on mutex A and could preempt the 4533 * running task 4534 */ 4535 if (dl_prio(prio)) { 4536 if (!dl_prio(p->normal_prio) || 4537 (pi_task && dl_entity_preempt(&pi_task->dl, &p->dl))) { 4538 p->dl.dl_boosted = 1; 4539 queue_flag |= ENQUEUE_REPLENISH; 4540 } else 4541 p->dl.dl_boosted = 0; 4542 p->sched_class = &dl_sched_class; 4543 } else if (rt_prio(prio)) { 4544 if (dl_prio(oldprio)) 4545 p->dl.dl_boosted = 0; 4546 if (oldprio < prio) 4547 queue_flag |= ENQUEUE_HEAD; 4548 p->sched_class = &rt_sched_class; 4549 } else { 4550 if (dl_prio(oldprio)) 4551 p->dl.dl_boosted = 0; 4552 if (rt_prio(oldprio)) 4553 p->rt.timeout = 0; 4554 p->sched_class = &fair_sched_class; 4555 } 4556 4557 p->prio = prio; 4558 4559 if (queued) 4560 enqueue_task(rq, p, queue_flag); 4561 if (running) 4562 set_next_task(rq, p); 4563 4564 check_class_changed(rq, p, prev_class, oldprio); 4565 out_unlock: 4566 /* Avoid rq from going away on us: */ 4567 preempt_disable(); 4568 __task_rq_unlock(rq, &rf); 4569 4570 balance_callback(rq); 4571 preempt_enable(); 4572 } 4573 #else 4574 static inline int rt_effective_prio(struct task_struct *p, int prio) 4575 { 4576 return prio; 4577 } 4578 #endif 4579 4580 void set_user_nice(struct task_struct *p, long nice) 4581 { 4582 bool queued, running; 4583 int old_prio; 4584 struct rq_flags rf; 4585 struct rq *rq; 4586 4587 if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE) 4588 return; 4589 /* 4590 * We have to be careful, if called from sys_setpriority(), 4591 * the task might be in the middle of scheduling on another CPU. 4592 */ 4593 rq = task_rq_lock(p, &rf); 4594 update_rq_clock(rq); 4595 4596 /* 4597 * The RT priorities are set via sched_setscheduler(), but we still 4598 * allow the 'normal' nice value to be set - but as expected 4599 * it wont have any effect on scheduling until the task is 4600 * SCHED_DEADLINE, SCHED_FIFO or SCHED_RR: 4601 */ 4602 if (task_has_dl_policy(p) || task_has_rt_policy(p)) { 4603 p->static_prio = NICE_TO_PRIO(nice); 4604 goto out_unlock; 4605 } 4606 queued = task_on_rq_queued(p); 4607 running = task_current(rq, p); 4608 if (queued) 4609 dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK); 4610 if (running) 4611 put_prev_task(rq, p); 4612 4613 p->static_prio = NICE_TO_PRIO(nice); 4614 set_load_weight(p, true); 4615 old_prio = p->prio; 4616 p->prio = effective_prio(p); 4617 4618 if (queued) 4619 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); 4620 if (running) 4621 set_next_task(rq, p); 4622 4623 /* 4624 * If the task increased its priority or is running and 4625 * lowered its priority, then reschedule its CPU: 4626 */ 4627 p->sched_class->prio_changed(rq, p, old_prio); 4628 4629 out_unlock: 4630 task_rq_unlock(rq, p, &rf); 4631 } 4632 EXPORT_SYMBOL(set_user_nice); 4633 4634 /* 4635 * can_nice - check if a task can reduce its nice value 4636 * @p: task 4637 * @nice: nice value 4638 */ 4639 int can_nice(const struct task_struct *p, const int nice) 4640 { 4641 /* Convert nice value [19,-20] to rlimit style value [1,40]: */ 4642 int nice_rlim = nice_to_rlimit(nice); 4643 4644 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) || 4645 capable(CAP_SYS_NICE)); 4646 } 4647 4648 #ifdef __ARCH_WANT_SYS_NICE 4649 4650 /* 4651 * sys_nice - change the priority of the current process. 4652 * @increment: priority increment 4653 * 4654 * sys_setpriority is a more generic, but much slower function that 4655 * does similar things. 4656 */ 4657 SYSCALL_DEFINE1(nice, int, increment) 4658 { 4659 long nice, retval; 4660 4661 /* 4662 * Setpriority might change our priority at the same moment. 4663 * We don't have to worry. Conceptually one call occurs first 4664 * and we have a single winner. 4665 */ 4666 increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH); 4667 nice = task_nice(current) + increment; 4668 4669 nice = clamp_val(nice, MIN_NICE, MAX_NICE); 4670 if (increment < 0 && !can_nice(current, nice)) 4671 return -EPERM; 4672 4673 retval = security_task_setnice(current, nice); 4674 if (retval) 4675 return retval; 4676 4677 set_user_nice(current, nice); 4678 return 0; 4679 } 4680 4681 #endif 4682 4683 /** 4684 * task_prio - return the priority value of a given task. 4685 * @p: the task in question. 4686 * 4687 * Return: The priority value as seen by users in /proc. 4688 * RT tasks are offset by -200. Normal tasks are centered 4689 * around 0, value goes from -16 to +15. 4690 */ 4691 int task_prio(const struct task_struct *p) 4692 { 4693 return p->prio - MAX_RT_PRIO; 4694 } 4695 4696 /** 4697 * idle_cpu - is a given CPU idle currently? 4698 * @cpu: the processor in question. 4699 * 4700 * Return: 1 if the CPU is currently idle. 0 otherwise. 4701 */ 4702 int idle_cpu(int cpu) 4703 { 4704 struct rq *rq = cpu_rq(cpu); 4705 4706 if (rq->curr != rq->idle) 4707 return 0; 4708 4709 if (rq->nr_running) 4710 return 0; 4711 4712 #ifdef CONFIG_SMP 4713 if (rq->ttwu_pending) 4714 return 0; 4715 #endif 4716 4717 return 1; 4718 } 4719 4720 /** 4721 * available_idle_cpu - is a given CPU idle for enqueuing work. 4722 * @cpu: the CPU in question. 4723 * 4724 * Return: 1 if the CPU is currently idle. 0 otherwise. 4725 */ 4726 int available_idle_cpu(int cpu) 4727 { 4728 if (!idle_cpu(cpu)) 4729 return 0; 4730 4731 if (vcpu_is_preempted(cpu)) 4732 return 0; 4733 4734 return 1; 4735 } 4736 4737 /** 4738 * idle_task - return the idle task for a given CPU. 4739 * @cpu: the processor in question. 4740 * 4741 * Return: The idle task for the CPU @cpu. 4742 */ 4743 struct task_struct *idle_task(int cpu) 4744 { 4745 return cpu_rq(cpu)->idle; 4746 } 4747 4748 /** 4749 * find_process_by_pid - find a process with a matching PID value. 4750 * @pid: the pid in question. 4751 * 4752 * The task of @pid, if found. %NULL otherwise. 4753 */ 4754 static struct task_struct *find_process_by_pid(pid_t pid) 4755 { 4756 return pid ? find_task_by_vpid(pid) : current; 4757 } 4758 4759 /* 4760 * sched_setparam() passes in -1 for its policy, to let the functions 4761 * it calls know not to change it. 4762 */ 4763 #define SETPARAM_POLICY -1 4764 4765 static void __setscheduler_params(struct task_struct *p, 4766 const struct sched_attr *attr) 4767 { 4768 int policy = attr->sched_policy; 4769 4770 if (policy == SETPARAM_POLICY) 4771 policy = p->policy; 4772 4773 p->policy = policy; 4774 4775 if (dl_policy(policy)) 4776 __setparam_dl(p, attr); 4777 else if (fair_policy(policy)) 4778 p->static_prio = NICE_TO_PRIO(attr->sched_nice); 4779 4780 /* 4781 * __sched_setscheduler() ensures attr->sched_priority == 0 when 4782 * !rt_policy. Always setting this ensures that things like 4783 * getparam()/getattr() don't report silly values for !rt tasks. 4784 */ 4785 p->rt_priority = attr->sched_priority; 4786 p->normal_prio = normal_prio(p); 4787 set_load_weight(p, true); 4788 } 4789 4790 /* Actually do priority change: must hold pi & rq lock. */ 4791 static void __setscheduler(struct rq *rq, struct task_struct *p, 4792 const struct sched_attr *attr, bool keep_boost) 4793 { 4794 /* 4795 * If params can't change scheduling class changes aren't allowed 4796 * either. 4797 */ 4798 if (attr->sched_flags & SCHED_FLAG_KEEP_PARAMS) 4799 return; 4800 4801 __setscheduler_params(p, attr); 4802 4803 /* 4804 * Keep a potential priority boosting if called from 4805 * sched_setscheduler(). 4806 */ 4807 p->prio = normal_prio(p); 4808 if (keep_boost) 4809 p->prio = rt_effective_prio(p, p->prio); 4810 4811 if (dl_prio(p->prio)) 4812 p->sched_class = &dl_sched_class; 4813 else if (rt_prio(p->prio)) 4814 p->sched_class = &rt_sched_class; 4815 else 4816 p->sched_class = &fair_sched_class; 4817 } 4818 4819 /* 4820 * Check the target process has a UID that matches the current process's: 4821 */ 4822 static bool check_same_owner(struct task_struct *p) 4823 { 4824 const struct cred *cred = current_cred(), *pcred; 4825 bool match; 4826 4827 rcu_read_lock(); 4828 pcred = __task_cred(p); 4829 match = (uid_eq(cred->euid, pcred->euid) || 4830 uid_eq(cred->euid, pcred->uid)); 4831 rcu_read_unlock(); 4832 return match; 4833 } 4834 4835 static int __sched_setscheduler(struct task_struct *p, 4836 const struct sched_attr *attr, 4837 bool user, bool pi) 4838 { 4839 int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 : 4840 MAX_RT_PRIO - 1 - attr->sched_priority; 4841 int retval, oldprio, oldpolicy = -1, queued, running; 4842 int new_effective_prio, policy = attr->sched_policy; 4843 const struct sched_class *prev_class; 4844 struct rq_flags rf; 4845 int reset_on_fork; 4846 int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; 4847 struct rq *rq; 4848 4849 /* The pi code expects interrupts enabled */ 4850 BUG_ON(pi && in_interrupt()); 4851 recheck: 4852 /* Double check policy once rq lock held: */ 4853 if (policy < 0) { 4854 reset_on_fork = p->sched_reset_on_fork; 4855 policy = oldpolicy = p->policy; 4856 } else { 4857 reset_on_fork = !!(attr->sched_flags & SCHED_FLAG_RESET_ON_FORK); 4858 4859 if (!valid_policy(policy)) 4860 return -EINVAL; 4861 } 4862 4863 if (attr->sched_flags & ~(SCHED_FLAG_ALL | SCHED_FLAG_SUGOV)) 4864 return -EINVAL; 4865 4866 /* 4867 * Valid priorities for SCHED_FIFO and SCHED_RR are 4868 * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL, 4869 * SCHED_BATCH and SCHED_IDLE is 0. 4870 */ 4871 if ((p->mm && attr->sched_priority > MAX_USER_RT_PRIO-1) || 4872 (!p->mm && attr->sched_priority > MAX_RT_PRIO-1)) 4873 return -EINVAL; 4874 if ((dl_policy(policy) && !__checkparam_dl(attr)) || 4875 (rt_policy(policy) != (attr->sched_priority != 0))) 4876 return -EINVAL; 4877 4878 /* 4879 * Allow unprivileged RT tasks to decrease priority: 4880 */ 4881 if (user && !capable(CAP_SYS_NICE)) { 4882 if (fair_policy(policy)) { 4883 if (attr->sched_nice < task_nice(p) && 4884 !can_nice(p, attr->sched_nice)) 4885 return -EPERM; 4886 } 4887 4888 if (rt_policy(policy)) { 4889 unsigned long rlim_rtprio = 4890 task_rlimit(p, RLIMIT_RTPRIO); 4891 4892 /* Can't set/change the rt policy: */ 4893 if (policy != p->policy && !rlim_rtprio) 4894 return -EPERM; 4895 4896 /* Can't increase priority: */ 4897 if (attr->sched_priority > p->rt_priority && 4898 attr->sched_priority > rlim_rtprio) 4899 return -EPERM; 4900 } 4901 4902 /* 4903 * Can't set/change SCHED_DEADLINE policy at all for now 4904 * (safest behavior); in the future we would like to allow 4905 * unprivileged DL tasks to increase their relative deadline 4906 * or reduce their runtime (both ways reducing utilization) 4907 */ 4908 if (dl_policy(policy)) 4909 return -EPERM; 4910 4911 /* 4912 * Treat SCHED_IDLE as nice 20. Only allow a switch to 4913 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it. 4914 */ 4915 if (task_has_idle_policy(p) && !idle_policy(policy)) { 4916 if (!can_nice(p, task_nice(p))) 4917 return -EPERM; 4918 } 4919 4920 /* Can't change other user's priorities: */ 4921 if (!check_same_owner(p)) 4922 return -EPERM; 4923 4924 /* Normal users shall not reset the sched_reset_on_fork flag: */ 4925 if (p->sched_reset_on_fork && !reset_on_fork) 4926 return -EPERM; 4927 } 4928 4929 if (user) { 4930 if (attr->sched_flags & SCHED_FLAG_SUGOV) 4931 return -EINVAL; 4932 4933 retval = security_task_setscheduler(p); 4934 if (retval) 4935 return retval; 4936 } 4937 4938 /* Update task specific "requested" clamps */ 4939 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) { 4940 retval = uclamp_validate(p, attr); 4941 if (retval) 4942 return retval; 4943 } 4944 4945 if (pi) 4946 cpuset_read_lock(); 4947 4948 /* 4949 * Make sure no PI-waiters arrive (or leave) while we are 4950 * changing the priority of the task: 4951 * 4952 * To be able to change p->policy safely, the appropriate 4953 * runqueue lock must be held. 4954 */ 4955 rq = task_rq_lock(p, &rf); 4956 update_rq_clock(rq); 4957 4958 /* 4959 * Changing the policy of the stop threads its a very bad idea: 4960 */ 4961 if (p == rq->stop) { 4962 retval = -EINVAL; 4963 goto unlock; 4964 } 4965 4966 /* 4967 * If not changing anything there's no need to proceed further, 4968 * but store a possible modification of reset_on_fork. 4969 */ 4970 if (unlikely(policy == p->policy)) { 4971 if (fair_policy(policy) && attr->sched_nice != task_nice(p)) 4972 goto change; 4973 if (rt_policy(policy) && attr->sched_priority != p->rt_priority) 4974 goto change; 4975 if (dl_policy(policy) && dl_param_changed(p, attr)) 4976 goto change; 4977 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) 4978 goto change; 4979 4980 p->sched_reset_on_fork = reset_on_fork; 4981 retval = 0; 4982 goto unlock; 4983 } 4984 change: 4985 4986 if (user) { 4987 #ifdef CONFIG_RT_GROUP_SCHED 4988 /* 4989 * Do not allow realtime tasks into groups that have no runtime 4990 * assigned. 4991 */ 4992 if (rt_bandwidth_enabled() && rt_policy(policy) && 4993 task_group(p)->rt_bandwidth.rt_runtime == 0 && 4994 !task_group_is_autogroup(task_group(p))) { 4995 retval = -EPERM; 4996 goto unlock; 4997 } 4998 #endif 4999 #ifdef CONFIG_SMP 5000 if (dl_bandwidth_enabled() && dl_policy(policy) && 5001 !(attr->sched_flags & SCHED_FLAG_SUGOV)) { 5002 cpumask_t *span = rq->rd->span; 5003 5004 /* 5005 * Don't allow tasks with an affinity mask smaller than 5006 * the entire root_domain to become SCHED_DEADLINE. We 5007 * will also fail if there's no bandwidth available. 5008 */ 5009 if (!cpumask_subset(span, p->cpus_ptr) || 5010 rq->rd->dl_bw.bw == 0) { 5011 retval = -EPERM; 5012 goto unlock; 5013 } 5014 } 5015 #endif 5016 } 5017 5018 /* Re-check policy now with rq lock held: */ 5019 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { 5020 policy = oldpolicy = -1; 5021 task_rq_unlock(rq, p, &rf); 5022 if (pi) 5023 cpuset_read_unlock(); 5024 goto recheck; 5025 } 5026 5027 /* 5028 * If setscheduling to SCHED_DEADLINE (or changing the parameters 5029 * of a SCHED_DEADLINE task) we need to check if enough bandwidth 5030 * is available. 5031 */ 5032 if ((dl_policy(policy) || dl_task(p)) && sched_dl_overflow(p, policy, attr)) { 5033 retval = -EBUSY; 5034 goto unlock; 5035 } 5036 5037 p->sched_reset_on_fork = reset_on_fork; 5038 oldprio = p->prio; 5039 5040 if (pi) { 5041 /* 5042 * Take priority boosted tasks into account. If the new 5043 * effective priority is unchanged, we just store the new 5044 * normal parameters and do not touch the scheduler class and 5045 * the runqueue. This will be done when the task deboost 5046 * itself. 5047 */ 5048 new_effective_prio = rt_effective_prio(p, newprio); 5049 if (new_effective_prio == oldprio) 5050 queue_flags &= ~DEQUEUE_MOVE; 5051 } 5052 5053 queued = task_on_rq_queued(p); 5054 running = task_current(rq, p); 5055 if (queued) 5056 dequeue_task(rq, p, queue_flags); 5057 if (running) 5058 put_prev_task(rq, p); 5059 5060 prev_class = p->sched_class; 5061 5062 __setscheduler(rq, p, attr, pi); 5063 __setscheduler_uclamp(p, attr); 5064 5065 if (queued) { 5066 /* 5067 * We enqueue to tail when the priority of a task is 5068 * increased (user space view). 5069 */ 5070 if (oldprio < p->prio) 5071 queue_flags |= ENQUEUE_HEAD; 5072 5073 enqueue_task(rq, p, queue_flags); 5074 } 5075 if (running) 5076 set_next_task(rq, p); 5077 5078 check_class_changed(rq, p, prev_class, oldprio); 5079 5080 /* Avoid rq from going away on us: */ 5081 preempt_disable(); 5082 task_rq_unlock(rq, p, &rf); 5083 5084 if (pi) { 5085 cpuset_read_unlock(); 5086 rt_mutex_adjust_pi(p); 5087 } 5088 5089 /* Run balance callbacks after we've adjusted the PI chain: */ 5090 balance_callback(rq); 5091 preempt_enable(); 5092 5093 return 0; 5094 5095 unlock: 5096 task_rq_unlock(rq, p, &rf); 5097 if (pi) 5098 cpuset_read_unlock(); 5099 return retval; 5100 } 5101 5102 static int _sched_setscheduler(struct task_struct *p, int policy, 5103 const struct sched_param *param, bool check) 5104 { 5105 struct sched_attr attr = { 5106 .sched_policy = policy, 5107 .sched_priority = param->sched_priority, 5108 .sched_nice = PRIO_TO_NICE(p->static_prio), 5109 }; 5110 5111 /* Fixup the legacy SCHED_RESET_ON_FORK hack. */ 5112 if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) { 5113 attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; 5114 policy &= ~SCHED_RESET_ON_FORK; 5115 attr.sched_policy = policy; 5116 } 5117 5118 return __sched_setscheduler(p, &attr, check, true); 5119 } 5120 /** 5121 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread. 5122 * @p: the task in question. 5123 * @policy: new policy. 5124 * @param: structure containing the new RT priority. 5125 * 5126 * Return: 0 on success. An error code otherwise. 5127 * 5128 * NOTE that the task may be already dead. 5129 */ 5130 int sched_setscheduler(struct task_struct *p, int policy, 5131 const struct sched_param *param) 5132 { 5133 return _sched_setscheduler(p, policy, param, true); 5134 } 5135 EXPORT_SYMBOL_GPL(sched_setscheduler); 5136 5137 int sched_setattr(struct task_struct *p, const struct sched_attr *attr) 5138 { 5139 return __sched_setscheduler(p, attr, true, true); 5140 } 5141 EXPORT_SYMBOL_GPL(sched_setattr); 5142 5143 int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr) 5144 { 5145 return __sched_setscheduler(p, attr, false, true); 5146 } 5147 5148 /** 5149 * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace. 5150 * @p: the task in question. 5151 * @policy: new policy. 5152 * @param: structure containing the new RT priority. 5153 * 5154 * Just like sched_setscheduler, only don't bother checking if the 5155 * current context has permission. For example, this is needed in 5156 * stop_machine(): we create temporary high priority worker threads, 5157 * but our caller might not have that capability. 5158 * 5159 * Return: 0 on success. An error code otherwise. 5160 */ 5161 int sched_setscheduler_nocheck(struct task_struct *p, int policy, 5162 const struct sched_param *param) 5163 { 5164 return _sched_setscheduler(p, policy, param, false); 5165 } 5166 EXPORT_SYMBOL_GPL(sched_setscheduler_nocheck); 5167 5168 static int 5169 do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) 5170 { 5171 struct sched_param lparam; 5172 struct task_struct *p; 5173 int retval; 5174 5175 if (!param || pid < 0) 5176 return -EINVAL; 5177 if (copy_from_user(&lparam, param, sizeof(struct sched_param))) 5178 return -EFAULT; 5179 5180 rcu_read_lock(); 5181 retval = -ESRCH; 5182 p = find_process_by_pid(pid); 5183 if (likely(p)) 5184 get_task_struct(p); 5185 rcu_read_unlock(); 5186 5187 if (likely(p)) { 5188 retval = sched_setscheduler(p, policy, &lparam); 5189 put_task_struct(p); 5190 } 5191 5192 return retval; 5193 } 5194 5195 /* 5196 * Mimics kernel/events/core.c perf_copy_attr(). 5197 */ 5198 static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *attr) 5199 { 5200 u32 size; 5201 int ret; 5202 5203 /* Zero the full structure, so that a short copy will be nice: */ 5204 memset(attr, 0, sizeof(*attr)); 5205 5206 ret = get_user(size, &uattr->size); 5207 if (ret) 5208 return ret; 5209 5210 /* ABI compatibility quirk: */ 5211 if (!size) 5212 size = SCHED_ATTR_SIZE_VER0; 5213 if (size < SCHED_ATTR_SIZE_VER0 || size > PAGE_SIZE) 5214 goto err_size; 5215 5216 ret = copy_struct_from_user(attr, sizeof(*attr), uattr, size); 5217 if (ret) { 5218 if (ret == -E2BIG) 5219 goto err_size; 5220 return ret; 5221 } 5222 5223 if ((attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) && 5224 size < SCHED_ATTR_SIZE_VER1) 5225 return -EINVAL; 5226 5227 /* 5228 * XXX: Do we want to be lenient like existing syscalls; or do we want 5229 * to be strict and return an error on out-of-bounds values? 5230 */ 5231 attr->sched_nice = clamp(attr->sched_nice, MIN_NICE, MAX_NICE); 5232 5233 return 0; 5234 5235 err_size: 5236 put_user(sizeof(*attr), &uattr->size); 5237 return -E2BIG; 5238 } 5239 5240 /** 5241 * sys_sched_setscheduler - set/change the scheduler policy and RT priority 5242 * @pid: the pid in question. 5243 * @policy: new policy. 5244 * @param: structure containing the new RT priority. 5245 * 5246 * Return: 0 on success. An error code otherwise. 5247 */ 5248 SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param) 5249 { 5250 if (policy < 0) 5251 return -EINVAL; 5252 5253 return do_sched_setscheduler(pid, policy, param); 5254 } 5255 5256 /** 5257 * sys_sched_setparam - set/change the RT priority of a thread 5258 * @pid: the pid in question. 5259 * @param: structure containing the new RT priority. 5260 * 5261 * Return: 0 on success. An error code otherwise. 5262 */ 5263 SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) 5264 { 5265 return do_sched_setscheduler(pid, SETPARAM_POLICY, param); 5266 } 5267 5268 /** 5269 * sys_sched_setattr - same as above, but with extended sched_attr 5270 * @pid: the pid in question. 5271 * @uattr: structure containing the extended parameters. 5272 * @flags: for future extension. 5273 */ 5274 SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr, 5275 unsigned int, flags) 5276 { 5277 struct sched_attr attr; 5278 struct task_struct *p; 5279 int retval; 5280 5281 if (!uattr || pid < 0 || flags) 5282 return -EINVAL; 5283 5284 retval = sched_copy_attr(uattr, &attr); 5285 if (retval) 5286 return retval; 5287 5288 if ((int)attr.sched_policy < 0) 5289 return -EINVAL; 5290 if (attr.sched_flags & SCHED_FLAG_KEEP_POLICY) 5291 attr.sched_policy = SETPARAM_POLICY; 5292 5293 rcu_read_lock(); 5294 retval = -ESRCH; 5295 p = find_process_by_pid(pid); 5296 if (likely(p)) 5297 get_task_struct(p); 5298 rcu_read_unlock(); 5299 5300 if (likely(p)) { 5301 retval = sched_setattr(p, &attr); 5302 put_task_struct(p); 5303 } 5304 5305 return retval; 5306 } 5307 5308 /** 5309 * sys_sched_getscheduler - get the policy (scheduling class) of a thread 5310 * @pid: the pid in question. 5311 * 5312 * Return: On success, the policy of the thread. Otherwise, a negative error 5313 * code. 5314 */ 5315 SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) 5316 { 5317 struct task_struct *p; 5318 int retval; 5319 5320 if (pid < 0) 5321 return -EINVAL; 5322 5323 retval = -ESRCH; 5324 rcu_read_lock(); 5325 p = find_process_by_pid(pid); 5326 if (p) { 5327 retval = security_task_getscheduler(p); 5328 if (!retval) 5329 retval = p->policy 5330 | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0); 5331 } 5332 rcu_read_unlock(); 5333 return retval; 5334 } 5335 5336 /** 5337 * sys_sched_getparam - get the RT priority of a thread 5338 * @pid: the pid in question. 5339 * @param: structure containing the RT priority. 5340 * 5341 * Return: On success, 0 and the RT priority is in @param. Otherwise, an error 5342 * code. 5343 */ 5344 SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) 5345 { 5346 struct sched_param lp = { .sched_priority = 0 }; 5347 struct task_struct *p; 5348 int retval; 5349 5350 if (!param || pid < 0) 5351 return -EINVAL; 5352 5353 rcu_read_lock(); 5354 p = find_process_by_pid(pid); 5355 retval = -ESRCH; 5356 if (!p) 5357 goto out_unlock; 5358 5359 retval = security_task_getscheduler(p); 5360 if (retval) 5361 goto out_unlock; 5362 5363 if (task_has_rt_policy(p)) 5364 lp.sched_priority = p->rt_priority; 5365 rcu_read_unlock(); 5366 5367 /* 5368 * This one might sleep, we cannot do it with a spinlock held ... 5369 */ 5370 retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0; 5371 5372 return retval; 5373 5374 out_unlock: 5375 rcu_read_unlock(); 5376 return retval; 5377 } 5378 5379 /* 5380 * Copy the kernel size attribute structure (which might be larger 5381 * than what user-space knows about) to user-space. 5382 * 5383 * Note that all cases are valid: user-space buffer can be larger or 5384 * smaller than the kernel-space buffer. The usual case is that both 5385 * have the same size. 5386 */ 5387 static int 5388 sched_attr_copy_to_user(struct sched_attr __user *uattr, 5389 struct sched_attr *kattr, 5390 unsigned int usize) 5391 { 5392 unsigned int ksize = sizeof(*kattr); 5393 5394 if (!access_ok(uattr, usize)) 5395 return -EFAULT; 5396 5397 /* 5398 * sched_getattr() ABI forwards and backwards compatibility: 5399 * 5400 * If usize == ksize then we just copy everything to user-space and all is good. 5401 * 5402 * If usize < ksize then we only copy as much as user-space has space for, 5403 * this keeps ABI compatibility as well. We skip the rest. 5404 * 5405 * If usize > ksize then user-space is using a newer version of the ABI, 5406 * which part the kernel doesn't know about. Just ignore it - tooling can 5407 * detect the kernel's knowledge of attributes from the attr->size value 5408 * which is set to ksize in this case. 5409 */ 5410 kattr->size = min(usize, ksize); 5411 5412 if (copy_to_user(uattr, kattr, kattr->size)) 5413 return -EFAULT; 5414 5415 return 0; 5416 } 5417 5418 /** 5419 * sys_sched_getattr - similar to sched_getparam, but with sched_attr 5420 * @pid: the pid in question. 5421 * @uattr: structure containing the extended parameters. 5422 * @usize: sizeof(attr) for fwd/bwd comp. 5423 * @flags: for future extension. 5424 */ 5425 SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, 5426 unsigned int, usize, unsigned int, flags) 5427 { 5428 struct sched_attr kattr = { }; 5429 struct task_struct *p; 5430 int retval; 5431 5432 if (!uattr || pid < 0 || usize > PAGE_SIZE || 5433 usize < SCHED_ATTR_SIZE_VER0 || flags) 5434 return -EINVAL; 5435 5436 rcu_read_lock(); 5437 p = find_process_by_pid(pid); 5438 retval = -ESRCH; 5439 if (!p) 5440 goto out_unlock; 5441 5442 retval = security_task_getscheduler(p); 5443 if (retval) 5444 goto out_unlock; 5445 5446 kattr.sched_policy = p->policy; 5447 if (p->sched_reset_on_fork) 5448 kattr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; 5449 if (task_has_dl_policy(p)) 5450 __getparam_dl(p, &kattr); 5451 else if (task_has_rt_policy(p)) 5452 kattr.sched_priority = p->rt_priority; 5453 else 5454 kattr.sched_nice = task_nice(p); 5455 5456 #ifdef CONFIG_UCLAMP_TASK 5457 kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value; 5458 kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value; 5459 #endif 5460 5461 rcu_read_unlock(); 5462 5463 return sched_attr_copy_to_user(uattr, &kattr, usize); 5464 5465 out_unlock: 5466 rcu_read_unlock(); 5467 return retval; 5468 } 5469 5470 long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) 5471 { 5472 cpumask_var_t cpus_allowed, new_mask; 5473 struct task_struct *p; 5474 int retval; 5475 5476 rcu_read_lock(); 5477 5478 p = find_process_by_pid(pid); 5479 if (!p) { 5480 rcu_read_unlock(); 5481 return -ESRCH; 5482 } 5483 5484 /* Prevent p going away */ 5485 get_task_struct(p); 5486 rcu_read_unlock(); 5487 5488 if (p->flags & PF_NO_SETAFFINITY) { 5489 retval = -EINVAL; 5490 goto out_put_task; 5491 } 5492 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) { 5493 retval = -ENOMEM; 5494 goto out_put_task; 5495 } 5496 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) { 5497 retval = -ENOMEM; 5498 goto out_free_cpus_allowed; 5499 } 5500 retval = -EPERM; 5501 if (!check_same_owner(p)) { 5502 rcu_read_lock(); 5503 if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) { 5504 rcu_read_unlock(); 5505 goto out_free_new_mask; 5506 } 5507 rcu_read_unlock(); 5508 } 5509 5510 retval = security_task_setscheduler(p); 5511 if (retval) 5512 goto out_free_new_mask; 5513 5514 5515 cpuset_cpus_allowed(p, cpus_allowed); 5516 cpumask_and(new_mask, in_mask, cpus_allowed); 5517 5518 /* 5519 * Since bandwidth control happens on root_domain basis, 5520 * if admission test is enabled, we only admit -deadline 5521 * tasks allowed to run on all the CPUs in the task's 5522 * root_domain. 5523 */ 5524 #ifdef CONFIG_SMP 5525 if (task_has_dl_policy(p) && dl_bandwidth_enabled()) { 5526 rcu_read_lock(); 5527 if (!cpumask_subset(task_rq(p)->rd->span, new_mask)) { 5528 retval = -EBUSY; 5529 rcu_read_unlock(); 5530 goto out_free_new_mask; 5531 } 5532 rcu_read_unlock(); 5533 } 5534 #endif 5535 again: 5536 retval = __set_cpus_allowed_ptr(p, new_mask, true); 5537 5538 if (!retval) { 5539 cpuset_cpus_allowed(p, cpus_allowed); 5540 if (!cpumask_subset(new_mask, cpus_allowed)) { 5541 /* 5542 * We must have raced with a concurrent cpuset 5543 * update. Just reset the cpus_allowed to the 5544 * cpuset's cpus_allowed 5545 */ 5546 cpumask_copy(new_mask, cpus_allowed); 5547 goto again; 5548 } 5549 } 5550 out_free_new_mask: 5551 free_cpumask_var(new_mask); 5552 out_free_cpus_allowed: 5553 free_cpumask_var(cpus_allowed); 5554 out_put_task: 5555 put_task_struct(p); 5556 return retval; 5557 } 5558 5559 static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, 5560 struct cpumask *new_mask) 5561 { 5562 if (len < cpumask_size()) 5563 cpumask_clear(new_mask); 5564 else if (len > cpumask_size()) 5565 len = cpumask_size(); 5566 5567 return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0; 5568 } 5569 5570 /** 5571 * sys_sched_setaffinity - set the CPU affinity of a process 5572 * @pid: pid of the process 5573 * @len: length in bytes of the bitmask pointed to by user_mask_ptr 5574 * @user_mask_ptr: user-space pointer to the new CPU mask 5575 * 5576 * Return: 0 on success. An error code otherwise. 5577 */ 5578 SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len, 5579 unsigned long __user *, user_mask_ptr) 5580 { 5581 cpumask_var_t new_mask; 5582 int retval; 5583 5584 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) 5585 return -ENOMEM; 5586 5587 retval = get_user_cpu_mask(user_mask_ptr, len, new_mask); 5588 if (retval == 0) 5589 retval = sched_setaffinity(pid, new_mask); 5590 free_cpumask_var(new_mask); 5591 return retval; 5592 } 5593 5594 long sched_getaffinity(pid_t pid, struct cpumask *mask) 5595 { 5596 struct task_struct *p; 5597 unsigned long flags; 5598 int retval; 5599 5600 rcu_read_lock(); 5601 5602 retval = -ESRCH; 5603 p = find_process_by_pid(pid); 5604 if (!p) 5605 goto out_unlock; 5606 5607 retval = security_task_getscheduler(p); 5608 if (retval) 5609 goto out_unlock; 5610 5611 raw_spin_lock_irqsave(&p->pi_lock, flags); 5612 cpumask_and(mask, &p->cpus_mask, cpu_active_mask); 5613 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 5614 5615 out_unlock: 5616 rcu_read_unlock(); 5617 5618 return retval; 5619 } 5620 5621 /** 5622 * sys_sched_getaffinity - get the CPU affinity of a process 5623 * @pid: pid of the process 5624 * @len: length in bytes of the bitmask pointed to by user_mask_ptr 5625 * @user_mask_ptr: user-space pointer to hold the current CPU mask 5626 * 5627 * Return: size of CPU mask copied to user_mask_ptr on success. An 5628 * error code otherwise. 5629 */ 5630 SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, 5631 unsigned long __user *, user_mask_ptr) 5632 { 5633 int ret; 5634 cpumask_var_t mask; 5635 5636 if ((len * BITS_PER_BYTE) < nr_cpu_ids) 5637 return -EINVAL; 5638 if (len & (sizeof(unsigned long)-1)) 5639 return -EINVAL; 5640 5641 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) 5642 return -ENOMEM; 5643 5644 ret = sched_getaffinity(pid, mask); 5645 if (ret == 0) { 5646 unsigned int retlen = min(len, cpumask_size()); 5647 5648 if (copy_to_user(user_mask_ptr, mask, retlen)) 5649 ret = -EFAULT; 5650 else 5651 ret = retlen; 5652 } 5653 free_cpumask_var(mask); 5654 5655 return ret; 5656 } 5657 5658 /** 5659 * sys_sched_yield - yield the current processor to other threads. 5660 * 5661 * This function yields the current CPU to other tasks. If there are no 5662 * other threads running on this CPU then this function will return. 5663 * 5664 * Return: 0. 5665 */ 5666 static void do_sched_yield(void) 5667 { 5668 struct rq_flags rf; 5669 struct rq *rq; 5670 5671 rq = this_rq_lock_irq(&rf); 5672 5673 schedstat_inc(rq->yld_count); 5674 current->sched_class->yield_task(rq); 5675 5676 /* 5677 * Since we are going to call schedule() anyway, there's 5678 * no need to preempt or enable interrupts: 5679 */ 5680 preempt_disable(); 5681 rq_unlock(rq, &rf); 5682 sched_preempt_enable_no_resched(); 5683 5684 schedule(); 5685 } 5686 5687 SYSCALL_DEFINE0(sched_yield) 5688 { 5689 do_sched_yield(); 5690 return 0; 5691 } 5692 5693 #ifndef CONFIG_PREEMPTION 5694 int __sched _cond_resched(void) 5695 { 5696 if (should_resched(0)) { 5697 preempt_schedule_common(); 5698 return 1; 5699 } 5700 rcu_all_qs(); 5701 return 0; 5702 } 5703 EXPORT_SYMBOL(_cond_resched); 5704 #endif 5705 5706 /* 5707 * __cond_resched_lock() - if a reschedule is pending, drop the given lock, 5708 * call schedule, and on return reacquire the lock. 5709 * 5710 * This works OK both with and without CONFIG_PREEMPTION. We do strange low-level 5711 * operations here to prevent schedule() from being called twice (once via 5712 * spin_unlock(), once by hand). 5713 */ 5714 int __cond_resched_lock(spinlock_t *lock) 5715 { 5716 int resched = should_resched(PREEMPT_LOCK_OFFSET); 5717 int ret = 0; 5718 5719 lockdep_assert_held(lock); 5720 5721 if (spin_needbreak(lock) || resched) { 5722 spin_unlock(lock); 5723 if (resched) 5724 preempt_schedule_common(); 5725 else 5726 cpu_relax(); 5727 ret = 1; 5728 spin_lock(lock); 5729 } 5730 return ret; 5731 } 5732 EXPORT_SYMBOL(__cond_resched_lock); 5733 5734 /** 5735 * yield - yield the current processor to other threads. 5736 * 5737 * Do not ever use this function, there's a 99% chance you're doing it wrong. 5738 * 5739 * The scheduler is at all times free to pick the calling task as the most 5740 * eligible task to run, if removing the yield() call from your code breaks 5741 * it, its already broken. 5742 * 5743 * Typical broken usage is: 5744 * 5745 * while (!event) 5746 * yield(); 5747 * 5748 * where one assumes that yield() will let 'the other' process run that will 5749 * make event true. If the current task is a SCHED_FIFO task that will never 5750 * happen. Never use yield() as a progress guarantee!! 5751 * 5752 * If you want to use yield() to wait for something, use wait_event(). 5753 * If you want to use yield() to be 'nice' for others, use cond_resched(). 5754 * If you still want to use yield(), do not! 5755 */ 5756 void __sched yield(void) 5757 { 5758 set_current_state(TASK_RUNNING); 5759 do_sched_yield(); 5760 } 5761 EXPORT_SYMBOL(yield); 5762 5763 /** 5764 * yield_to - yield the current processor to another thread in 5765 * your thread group, or accelerate that thread toward the 5766 * processor it's on. 5767 * @p: target task 5768 * @preempt: whether task preemption is allowed or not 5769 * 5770 * It's the caller's job to ensure that the target task struct 5771 * can't go away on us before we can do any checks. 5772 * 5773 * Return: 5774 * true (>0) if we indeed boosted the target task. 5775 * false (0) if we failed to boost the target. 5776 * -ESRCH if there's no task to yield to. 5777 */ 5778 int __sched yield_to(struct task_struct *p, bool preempt) 5779 { 5780 struct task_struct *curr = current; 5781 struct rq *rq, *p_rq; 5782 unsigned long flags; 5783 int yielded = 0; 5784 5785 local_irq_save(flags); 5786 rq = this_rq(); 5787 5788 again: 5789 p_rq = task_rq(p); 5790 /* 5791 * If we're the only runnable task on the rq and target rq also 5792 * has only one task, there's absolutely no point in yielding. 5793 */ 5794 if (rq->nr_running == 1 && p_rq->nr_running == 1) { 5795 yielded = -ESRCH; 5796 goto out_irq; 5797 } 5798 5799 double_rq_lock(rq, p_rq); 5800 if (task_rq(p) != p_rq) { 5801 double_rq_unlock(rq, p_rq); 5802 goto again; 5803 } 5804 5805 if (!curr->sched_class->yield_to_task) 5806 goto out_unlock; 5807 5808 if (curr->sched_class != p->sched_class) 5809 goto out_unlock; 5810 5811 if (task_running(p_rq, p) || p->state) 5812 goto out_unlock; 5813 5814 yielded = curr->sched_class->yield_to_task(rq, p, preempt); 5815 if (yielded) { 5816 schedstat_inc(rq->yld_count); 5817 /* 5818 * Make p's CPU reschedule; pick_next_entity takes care of 5819 * fairness. 5820 */ 5821 if (preempt && rq != p_rq) 5822 resched_curr(p_rq); 5823 } 5824 5825 out_unlock: 5826 double_rq_unlock(rq, p_rq); 5827 out_irq: 5828 local_irq_restore(flags); 5829 5830 if (yielded > 0) 5831 schedule(); 5832 5833 return yielded; 5834 } 5835 EXPORT_SYMBOL_GPL(yield_to); 5836 5837 int io_schedule_prepare(void) 5838 { 5839 int old_iowait = current->in_iowait; 5840 5841 current->in_iowait = 1; 5842 blk_schedule_flush_plug(current); 5843 5844 return old_iowait; 5845 } 5846 5847 void io_schedule_finish(int token) 5848 { 5849 current->in_iowait = token; 5850 } 5851 5852 /* 5853 * This task is about to go to sleep on IO. Increment rq->nr_iowait so 5854 * that process accounting knows that this is a task in IO wait state. 5855 */ 5856 long __sched io_schedule_timeout(long timeout) 5857 { 5858 int token; 5859 long ret; 5860 5861 token = io_schedule_prepare(); 5862 ret = schedule_timeout(timeout); 5863 io_schedule_finish(token); 5864 5865 return ret; 5866 } 5867 EXPORT_SYMBOL(io_schedule_timeout); 5868 5869 void __sched io_schedule(void) 5870 { 5871 int token; 5872 5873 token = io_schedule_prepare(); 5874 schedule(); 5875 io_schedule_finish(token); 5876 } 5877 EXPORT_SYMBOL(io_schedule); 5878 5879 /** 5880 * sys_sched_get_priority_max - return maximum RT priority. 5881 * @policy: scheduling class. 5882 * 5883 * Return: On success, this syscall returns the maximum 5884 * rt_priority that can be used by a given scheduling class. 5885 * On failure, a negative error code is returned. 5886 */ 5887 SYSCALL_DEFINE1(sched_get_priority_max, int, policy) 5888 { 5889 int ret = -EINVAL; 5890 5891 switch (policy) { 5892 case SCHED_FIFO: 5893 case SCHED_RR: 5894 ret = MAX_USER_RT_PRIO-1; 5895 break; 5896 case SCHED_DEADLINE: 5897 case SCHED_NORMAL: 5898 case SCHED_BATCH: 5899 case SCHED_IDLE: 5900 ret = 0; 5901 break; 5902 } 5903 return ret; 5904 } 5905 5906 /** 5907 * sys_sched_get_priority_min - return minimum RT priority. 5908 * @policy: scheduling class. 5909 * 5910 * Return: On success, this syscall returns the minimum 5911 * rt_priority that can be used by a given scheduling class. 5912 * On failure, a negative error code is returned. 5913 */ 5914 SYSCALL_DEFINE1(sched_get_priority_min, int, policy) 5915 { 5916 int ret = -EINVAL; 5917 5918 switch (policy) { 5919 case SCHED_FIFO: 5920 case SCHED_RR: 5921 ret = 1; 5922 break; 5923 case SCHED_DEADLINE: 5924 case SCHED_NORMAL: 5925 case SCHED_BATCH: 5926 case SCHED_IDLE: 5927 ret = 0; 5928 } 5929 return ret; 5930 } 5931 5932 static int sched_rr_get_interval(pid_t pid, struct timespec64 *t) 5933 { 5934 struct task_struct *p; 5935 unsigned int time_slice; 5936 struct rq_flags rf; 5937 struct rq *rq; 5938 int retval; 5939 5940 if (pid < 0) 5941 return -EINVAL; 5942 5943 retval = -ESRCH; 5944 rcu_read_lock(); 5945 p = find_process_by_pid(pid); 5946 if (!p) 5947 goto out_unlock; 5948 5949 retval = security_task_getscheduler(p); 5950 if (retval) 5951 goto out_unlock; 5952 5953 rq = task_rq_lock(p, &rf); 5954 time_slice = 0; 5955 if (p->sched_class->get_rr_interval) 5956 time_slice = p->sched_class->get_rr_interval(rq, p); 5957 task_rq_unlock(rq, p, &rf); 5958 5959 rcu_read_unlock(); 5960 jiffies_to_timespec64(time_slice, t); 5961 return 0; 5962 5963 out_unlock: 5964 rcu_read_unlock(); 5965 return retval; 5966 } 5967 5968 /** 5969 * sys_sched_rr_get_interval - return the default timeslice of a process. 5970 * @pid: pid of the process. 5971 * @interval: userspace pointer to the timeslice value. 5972 * 5973 * this syscall writes the default timeslice value of a given process 5974 * into the user-space timespec buffer. A value of '0' means infinity. 5975 * 5976 * Return: On success, 0 and the timeslice is in @interval. Otherwise, 5977 * an error code. 5978 */ 5979 SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, 5980 struct __kernel_timespec __user *, interval) 5981 { 5982 struct timespec64 t; 5983 int retval = sched_rr_get_interval(pid, &t); 5984 5985 if (retval == 0) 5986 retval = put_timespec64(&t, interval); 5987 5988 return retval; 5989 } 5990 5991 #ifdef CONFIG_COMPAT_32BIT_TIME 5992 SYSCALL_DEFINE2(sched_rr_get_interval_time32, pid_t, pid, 5993 struct old_timespec32 __user *, interval) 5994 { 5995 struct timespec64 t; 5996 int retval = sched_rr_get_interval(pid, &t); 5997 5998 if (retval == 0) 5999 retval = put_old_timespec32(&t, interval); 6000 return retval; 6001 } 6002 #endif 6003 6004 void sched_show_task(struct task_struct *p) 6005 { 6006 unsigned long free = 0; 6007 int ppid; 6008 6009 if (!try_get_task_stack(p)) 6010 return; 6011 6012 printk(KERN_INFO "%-15.15s %c", p->comm, task_state_to_char(p)); 6013 6014 if (p->state == TASK_RUNNING) 6015 printk(KERN_CONT " running task "); 6016 #ifdef CONFIG_DEBUG_STACK_USAGE 6017 free = stack_not_used(p); 6018 #endif 6019 ppid = 0; 6020 rcu_read_lock(); 6021 if (pid_alive(p)) 6022 ppid = task_pid_nr(rcu_dereference(p->real_parent)); 6023 rcu_read_unlock(); 6024 printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free, 6025 task_pid_nr(p), ppid, 6026 (unsigned long)task_thread_info(p)->flags); 6027 6028 print_worker_info(KERN_INFO, p); 6029 show_stack(p, NULL); 6030 put_task_stack(p); 6031 } 6032 EXPORT_SYMBOL_GPL(sched_show_task); 6033 6034 static inline bool 6035 state_filter_match(unsigned long state_filter, struct task_struct *p) 6036 { 6037 /* no filter, everything matches */ 6038 if (!state_filter) 6039 return true; 6040 6041 /* filter, but doesn't match */ 6042 if (!(p->state & state_filter)) 6043 return false; 6044 6045 /* 6046 * When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows 6047 * TASK_KILLABLE). 6048 */ 6049 if (state_filter == TASK_UNINTERRUPTIBLE && p->state == TASK_IDLE) 6050 return false; 6051 6052 return true; 6053 } 6054 6055 6056 void show_state_filter(unsigned long state_filter) 6057 { 6058 struct task_struct *g, *p; 6059 6060 #if BITS_PER_LONG == 32 6061 printk(KERN_INFO 6062 " task PC stack pid father\n"); 6063 #else 6064 printk(KERN_INFO 6065 " task PC stack pid father\n"); 6066 #endif 6067 rcu_read_lock(); 6068 for_each_process_thread(g, p) { 6069 /* 6070 * reset the NMI-timeout, listing all files on a slow 6071 * console might take a lot of time: 6072 * Also, reset softlockup watchdogs on all CPUs, because 6073 * another CPU might be blocked waiting for us to process 6074 * an IPI. 6075 */ 6076 touch_nmi_watchdog(); 6077 touch_all_softlockup_watchdogs(); 6078 if (state_filter_match(state_filter, p)) 6079 sched_show_task(p); 6080 } 6081 6082 #ifdef CONFIG_SCHED_DEBUG 6083 if (!state_filter) 6084 sysrq_sched_debug_show(); 6085 #endif 6086 rcu_read_unlock(); 6087 /* 6088 * Only show locks if all tasks are dumped: 6089 */ 6090 if (!state_filter) 6091 debug_show_all_locks(); 6092 } 6093 6094 /** 6095 * init_idle - set up an idle thread for a given CPU 6096 * @idle: task in question 6097 * @cpu: CPU the idle task belongs to 6098 * 6099 * NOTE: this function does not set the idle thread's NEED_RESCHED 6100 * flag, to make booting more robust. 6101 */ 6102 void init_idle(struct task_struct *idle, int cpu) 6103 { 6104 struct rq *rq = cpu_rq(cpu); 6105 unsigned long flags; 6106 6107 __sched_fork(0, idle); 6108 6109 raw_spin_lock_irqsave(&idle->pi_lock, flags); 6110 raw_spin_lock(&rq->lock); 6111 6112 idle->state = TASK_RUNNING; 6113 idle->se.exec_start = sched_clock(); 6114 idle->flags |= PF_IDLE; 6115 6116 scs_task_reset(idle); 6117 kasan_unpoison_task_stack(idle); 6118 6119 #ifdef CONFIG_SMP 6120 /* 6121 * Its possible that init_idle() gets called multiple times on a task, 6122 * in that case do_set_cpus_allowed() will not do the right thing. 6123 * 6124 * And since this is boot we can forgo the serialization. 6125 */ 6126 set_cpus_allowed_common(idle, cpumask_of(cpu)); 6127 #endif 6128 /* 6129 * We're having a chicken and egg problem, even though we are 6130 * holding rq->lock, the CPU isn't yet set to this CPU so the 6131 * lockdep check in task_group() will fail. 6132 * 6133 * Similar case to sched_fork(). / Alternatively we could 6134 * use task_rq_lock() here and obtain the other rq->lock. 6135 * 6136 * Silence PROVE_RCU 6137 */ 6138 rcu_read_lock(); 6139 __set_task_cpu(idle, cpu); 6140 rcu_read_unlock(); 6141 6142 rq->idle = idle; 6143 rcu_assign_pointer(rq->curr, idle); 6144 idle->on_rq = TASK_ON_RQ_QUEUED; 6145 #ifdef CONFIG_SMP 6146 idle->on_cpu = 1; 6147 #endif 6148 raw_spin_unlock(&rq->lock); 6149 raw_spin_unlock_irqrestore(&idle->pi_lock, flags); 6150 6151 /* Set the preempt count _outside_ the spinlocks! */ 6152 init_idle_preempt_count(idle, cpu); 6153 6154 /* 6155 * The idle tasks have their own, simple scheduling class: 6156 */ 6157 idle->sched_class = &idle_sched_class; 6158 ftrace_graph_init_idle_task(idle, cpu); 6159 vtime_init_idle(idle, cpu); 6160 #ifdef CONFIG_SMP 6161 sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu); 6162 #endif 6163 } 6164 6165 #ifdef CONFIG_SMP 6166 6167 int cpuset_cpumask_can_shrink(const struct cpumask *cur, 6168 const struct cpumask *trial) 6169 { 6170 int ret = 1; 6171 6172 if (!cpumask_weight(cur)) 6173 return ret; 6174 6175 ret = dl_cpuset_cpumask_can_shrink(cur, trial); 6176 6177 return ret; 6178 } 6179 6180 int task_can_attach(struct task_struct *p, 6181 const struct cpumask *cs_cpus_allowed) 6182 { 6183 int ret = 0; 6184 6185 /* 6186 * Kthreads which disallow setaffinity shouldn't be moved 6187 * to a new cpuset; we don't want to change their CPU 6188 * affinity and isolating such threads by their set of 6189 * allowed nodes is unnecessary. Thus, cpusets are not 6190 * applicable for such threads. This prevents checking for 6191 * success of set_cpus_allowed_ptr() on all attached tasks 6192 * before cpus_mask may be changed. 6193 */ 6194 if (p->flags & PF_NO_SETAFFINITY) { 6195 ret = -EINVAL; 6196 goto out; 6197 } 6198 6199 if (dl_task(p) && !cpumask_intersects(task_rq(p)->rd->span, 6200 cs_cpus_allowed)) 6201 ret = dl_task_can_attach(p, cs_cpus_allowed); 6202 6203 out: 6204 return ret; 6205 } 6206 6207 bool sched_smp_initialized __read_mostly; 6208 6209 #ifdef CONFIG_NUMA_BALANCING 6210 /* Migrate current task p to target_cpu */ 6211 int migrate_task_to(struct task_struct *p, int target_cpu) 6212 { 6213 struct migration_arg arg = { p, target_cpu }; 6214 int curr_cpu = task_cpu(p); 6215 6216 if (curr_cpu == target_cpu) 6217 return 0; 6218 6219 if (!cpumask_test_cpu(target_cpu, p->cpus_ptr)) 6220 return -EINVAL; 6221 6222 /* TODO: This is not properly updating schedstats */ 6223 6224 trace_sched_move_numa(p, curr_cpu, target_cpu); 6225 return stop_one_cpu(curr_cpu, migration_cpu_stop, &arg); 6226 } 6227 6228 /* 6229 * Requeue a task on a given node and accurately track the number of NUMA 6230 * tasks on the runqueues 6231 */ 6232 void sched_setnuma(struct task_struct *p, int nid) 6233 { 6234 bool queued, running; 6235 struct rq_flags rf; 6236 struct rq *rq; 6237 6238 rq = task_rq_lock(p, &rf); 6239 queued = task_on_rq_queued(p); 6240 running = task_current(rq, p); 6241 6242 if (queued) 6243 dequeue_task(rq, p, DEQUEUE_SAVE); 6244 if (running) 6245 put_prev_task(rq, p); 6246 6247 p->numa_preferred_nid = nid; 6248 6249 if (queued) 6250 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); 6251 if (running) 6252 set_next_task(rq, p); 6253 task_rq_unlock(rq, p, &rf); 6254 } 6255 #endif /* CONFIG_NUMA_BALANCING */ 6256 6257 #ifdef CONFIG_HOTPLUG_CPU 6258 /* 6259 * Ensure that the idle task is using init_mm right before its CPU goes 6260 * offline. 6261 */ 6262 void idle_task_exit(void) 6263 { 6264 struct mm_struct *mm = current->active_mm; 6265 6266 BUG_ON(cpu_online(smp_processor_id())); 6267 BUG_ON(current != this_rq()->idle); 6268 6269 if (mm != &init_mm) { 6270 switch_mm(mm, &init_mm, current); 6271 finish_arch_post_lock_switch(); 6272 } 6273 6274 /* finish_cpu(), as ran on the BP, will clean up the active_mm state */ 6275 } 6276 6277 /* 6278 * Since this CPU is going 'away' for a while, fold any nr_active delta 6279 * we might have. Assumes we're called after migrate_tasks() so that the 6280 * nr_active count is stable. We need to take the teardown thread which 6281 * is calling this into account, so we hand in adjust = 1 to the load 6282 * calculation. 6283 * 6284 * Also see the comment "Global load-average calculations". 6285 */ 6286 static void calc_load_migrate(struct rq *rq) 6287 { 6288 long delta = calc_load_fold_active(rq, 1); 6289 if (delta) 6290 atomic_long_add(delta, &calc_load_tasks); 6291 } 6292 6293 static struct task_struct *__pick_migrate_task(struct rq *rq) 6294 { 6295 const struct sched_class *class; 6296 struct task_struct *next; 6297 6298 for_each_class(class) { 6299 next = class->pick_next_task(rq); 6300 if (next) { 6301 next->sched_class->put_prev_task(rq, next); 6302 return next; 6303 } 6304 } 6305 6306 /* The idle class should always have a runnable task */ 6307 BUG(); 6308 } 6309 6310 /* 6311 * Migrate all tasks from the rq, sleeping tasks will be migrated by 6312 * try_to_wake_up()->select_task_rq(). 6313 * 6314 * Called with rq->lock held even though we'er in stop_machine() and 6315 * there's no concurrency possible, we hold the required locks anyway 6316 * because of lock validation efforts. 6317 */ 6318 static void migrate_tasks(struct rq *dead_rq, struct rq_flags *rf) 6319 { 6320 struct rq *rq = dead_rq; 6321 struct task_struct *next, *stop = rq->stop; 6322 struct rq_flags orf = *rf; 6323 int dest_cpu; 6324 6325 /* 6326 * Fudge the rq selection such that the below task selection loop 6327 * doesn't get stuck on the currently eligible stop task. 6328 * 6329 * We're currently inside stop_machine() and the rq is either stuck 6330 * in the stop_machine_cpu_stop() loop, or we're executing this code, 6331 * either way we should never end up calling schedule() until we're 6332 * done here. 6333 */ 6334 rq->stop = NULL; 6335 6336 /* 6337 * put_prev_task() and pick_next_task() sched 6338 * class method both need to have an up-to-date 6339 * value of rq->clock[_task] 6340 */ 6341 update_rq_clock(rq); 6342 6343 for (;;) { 6344 /* 6345 * There's this thread running, bail when that's the only 6346 * remaining thread: 6347 */ 6348 if (rq->nr_running == 1) 6349 break; 6350 6351 next = __pick_migrate_task(rq); 6352 6353 /* 6354 * Rules for changing task_struct::cpus_mask are holding 6355 * both pi_lock and rq->lock, such that holding either 6356 * stabilizes the mask. 6357 * 6358 * Drop rq->lock is not quite as disastrous as it usually is 6359 * because !cpu_active at this point, which means load-balance 6360 * will not interfere. Also, stop-machine. 6361 */ 6362 rq_unlock(rq, rf); 6363 raw_spin_lock(&next->pi_lock); 6364 rq_relock(rq, rf); 6365 6366 /* 6367 * Since we're inside stop-machine, _nothing_ should have 6368 * changed the task, WARN if weird stuff happened, because in 6369 * that case the above rq->lock drop is a fail too. 6370 */ 6371 if (WARN_ON(task_rq(next) != rq || !task_on_rq_queued(next))) { 6372 raw_spin_unlock(&next->pi_lock); 6373 continue; 6374 } 6375 6376 /* Find suitable destination for @next, with force if needed. */ 6377 dest_cpu = select_fallback_rq(dead_rq->cpu, next); 6378 rq = __migrate_task(rq, rf, next, dest_cpu); 6379 if (rq != dead_rq) { 6380 rq_unlock(rq, rf); 6381 rq = dead_rq; 6382 *rf = orf; 6383 rq_relock(rq, rf); 6384 } 6385 raw_spin_unlock(&next->pi_lock); 6386 } 6387 6388 rq->stop = stop; 6389 } 6390 #endif /* CONFIG_HOTPLUG_CPU */ 6391 6392 void set_rq_online(struct rq *rq) 6393 { 6394 if (!rq->online) { 6395 const struct sched_class *class; 6396 6397 cpumask_set_cpu(rq->cpu, rq->rd->online); 6398 rq->online = 1; 6399 6400 for_each_class(class) { 6401 if (class->rq_online) 6402 class->rq_online(rq); 6403 } 6404 } 6405 } 6406 6407 void set_rq_offline(struct rq *rq) 6408 { 6409 if (rq->online) { 6410 const struct sched_class *class; 6411 6412 for_each_class(class) { 6413 if (class->rq_offline) 6414 class->rq_offline(rq); 6415 } 6416 6417 cpumask_clear_cpu(rq->cpu, rq->rd->online); 6418 rq->online = 0; 6419 } 6420 } 6421 6422 /* 6423 * used to mark begin/end of suspend/resume: 6424 */ 6425 static int num_cpus_frozen; 6426 6427 /* 6428 * Update cpusets according to cpu_active mask. If cpusets are 6429 * disabled, cpuset_update_active_cpus() becomes a simple wrapper 6430 * around partition_sched_domains(). 6431 * 6432 * If we come here as part of a suspend/resume, don't touch cpusets because we 6433 * want to restore it back to its original state upon resume anyway. 6434 */ 6435 static void cpuset_cpu_active(void) 6436 { 6437 if (cpuhp_tasks_frozen) { 6438 /* 6439 * num_cpus_frozen tracks how many CPUs are involved in suspend 6440 * resume sequence. As long as this is not the last online 6441 * operation in the resume sequence, just build a single sched 6442 * domain, ignoring cpusets. 6443 */ 6444 partition_sched_domains(1, NULL, NULL); 6445 if (--num_cpus_frozen) 6446 return; 6447 /* 6448 * This is the last CPU online operation. So fall through and 6449 * restore the original sched domains by considering the 6450 * cpuset configurations. 6451 */ 6452 cpuset_force_rebuild(); 6453 } 6454 cpuset_update_active_cpus(); 6455 } 6456 6457 static int cpuset_cpu_inactive(unsigned int cpu) 6458 { 6459 if (!cpuhp_tasks_frozen) { 6460 if (dl_cpu_busy(cpu)) 6461 return -EBUSY; 6462 cpuset_update_active_cpus(); 6463 } else { 6464 num_cpus_frozen++; 6465 partition_sched_domains(1, NULL, NULL); 6466 } 6467 return 0; 6468 } 6469 6470 int sched_cpu_activate(unsigned int cpu) 6471 { 6472 struct rq *rq = cpu_rq(cpu); 6473 struct rq_flags rf; 6474 6475 #ifdef CONFIG_SCHED_SMT 6476 /* 6477 * When going up, increment the number of cores with SMT present. 6478 */ 6479 if (cpumask_weight(cpu_smt_mask(cpu)) == 2) 6480 static_branch_inc_cpuslocked(&sched_smt_present); 6481 #endif 6482 set_cpu_active(cpu, true); 6483 6484 if (sched_smp_initialized) { 6485 sched_domains_numa_masks_set(cpu); 6486 cpuset_cpu_active(); 6487 } 6488 6489 /* 6490 * Put the rq online, if not already. This happens: 6491 * 6492 * 1) In the early boot process, because we build the real domains 6493 * after all CPUs have been brought up. 6494 * 6495 * 2) At runtime, if cpuset_cpu_active() fails to rebuild the 6496 * domains. 6497 */ 6498 rq_lock_irqsave(rq, &rf); 6499 if (rq->rd) { 6500 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 6501 set_rq_online(rq); 6502 } 6503 rq_unlock_irqrestore(rq, &rf); 6504 6505 return 0; 6506 } 6507 6508 int sched_cpu_deactivate(unsigned int cpu) 6509 { 6510 int ret; 6511 6512 set_cpu_active(cpu, false); 6513 /* 6514 * We've cleared cpu_active_mask, wait for all preempt-disabled and RCU 6515 * users of this state to go away such that all new such users will 6516 * observe it. 6517 * 6518 * Do sync before park smpboot threads to take care the rcu boost case. 6519 */ 6520 synchronize_rcu(); 6521 6522 #ifdef CONFIG_SCHED_SMT 6523 /* 6524 * When going down, decrement the number of cores with SMT present. 6525 */ 6526 if (cpumask_weight(cpu_smt_mask(cpu)) == 2) 6527 static_branch_dec_cpuslocked(&sched_smt_present); 6528 #endif 6529 6530 if (!sched_smp_initialized) 6531 return 0; 6532 6533 ret = cpuset_cpu_inactive(cpu); 6534 if (ret) { 6535 set_cpu_active(cpu, true); 6536 return ret; 6537 } 6538 sched_domains_numa_masks_clear(cpu); 6539 return 0; 6540 } 6541 6542 static void sched_rq_cpu_starting(unsigned int cpu) 6543 { 6544 struct rq *rq = cpu_rq(cpu); 6545 6546 rq->calc_load_update = calc_load_update; 6547 update_max_interval(); 6548 } 6549 6550 int sched_cpu_starting(unsigned int cpu) 6551 { 6552 sched_rq_cpu_starting(cpu); 6553 sched_tick_start(cpu); 6554 return 0; 6555 } 6556 6557 #ifdef CONFIG_HOTPLUG_CPU 6558 int sched_cpu_dying(unsigned int cpu) 6559 { 6560 struct rq *rq = cpu_rq(cpu); 6561 struct rq_flags rf; 6562 6563 /* Handle pending wakeups and then migrate everything off */ 6564 sched_tick_stop(cpu); 6565 6566 rq_lock_irqsave(rq, &rf); 6567 if (rq->rd) { 6568 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 6569 set_rq_offline(rq); 6570 } 6571 migrate_tasks(rq, &rf); 6572 BUG_ON(rq->nr_running != 1); 6573 rq_unlock_irqrestore(rq, &rf); 6574 6575 calc_load_migrate(rq); 6576 update_max_interval(); 6577 nohz_balance_exit_idle(rq); 6578 hrtick_clear(rq); 6579 return 0; 6580 } 6581 #endif 6582 6583 void __init sched_init_smp(void) 6584 { 6585 sched_init_numa(); 6586 6587 /* 6588 * There's no userspace yet to cause hotplug operations; hence all the 6589 * CPU masks are stable and all blatant races in the below code cannot 6590 * happen. 6591 */ 6592 mutex_lock(&sched_domains_mutex); 6593 sched_init_domains(cpu_active_mask); 6594 mutex_unlock(&sched_domains_mutex); 6595 6596 /* Move init over to a non-isolated CPU */ 6597 if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_FLAG_DOMAIN)) < 0) 6598 BUG(); 6599 sched_init_granularity(); 6600 6601 init_sched_rt_class(); 6602 init_sched_dl_class(); 6603 6604 sched_smp_initialized = true; 6605 } 6606 6607 static int __init migration_init(void) 6608 { 6609 sched_cpu_starting(smp_processor_id()); 6610 return 0; 6611 } 6612 early_initcall(migration_init); 6613 6614 #else 6615 void __init sched_init_smp(void) 6616 { 6617 sched_init_granularity(); 6618 } 6619 #endif /* CONFIG_SMP */ 6620 6621 int in_sched_functions(unsigned long addr) 6622 { 6623 return in_lock_functions(addr) || 6624 (addr >= (unsigned long)__sched_text_start 6625 && addr < (unsigned long)__sched_text_end); 6626 } 6627 6628 #ifdef CONFIG_CGROUP_SCHED 6629 /* 6630 * Default task group. 6631 * Every task in system belongs to this group at bootup. 6632 */ 6633 struct task_group root_task_group; 6634 LIST_HEAD(task_groups); 6635 6636 /* Cacheline aligned slab cache for task_group */ 6637 static struct kmem_cache *task_group_cache __read_mostly; 6638 #endif 6639 6640 DECLARE_PER_CPU(cpumask_var_t, load_balance_mask); 6641 DECLARE_PER_CPU(cpumask_var_t, select_idle_mask); 6642 6643 void __init sched_init(void) 6644 { 6645 unsigned long ptr = 0; 6646 int i; 6647 6648 wait_bit_init(); 6649 6650 #ifdef CONFIG_FAIR_GROUP_SCHED 6651 ptr += 2 * nr_cpu_ids * sizeof(void **); 6652 #endif 6653 #ifdef CONFIG_RT_GROUP_SCHED 6654 ptr += 2 * nr_cpu_ids * sizeof(void **); 6655 #endif 6656 if (ptr) { 6657 ptr = (unsigned long)kzalloc(ptr, GFP_NOWAIT); 6658 6659 #ifdef CONFIG_FAIR_GROUP_SCHED 6660 root_task_group.se = (struct sched_entity **)ptr; 6661 ptr += nr_cpu_ids * sizeof(void **); 6662 6663 root_task_group.cfs_rq = (struct cfs_rq **)ptr; 6664 ptr += nr_cpu_ids * sizeof(void **); 6665 6666 root_task_group.shares = ROOT_TASK_GROUP_LOAD; 6667 init_cfs_bandwidth(&root_task_group.cfs_bandwidth); 6668 #endif /* CONFIG_FAIR_GROUP_SCHED */ 6669 #ifdef CONFIG_RT_GROUP_SCHED 6670 root_task_group.rt_se = (struct sched_rt_entity **)ptr; 6671 ptr += nr_cpu_ids * sizeof(void **); 6672 6673 root_task_group.rt_rq = (struct rt_rq **)ptr; 6674 ptr += nr_cpu_ids * sizeof(void **); 6675 6676 #endif /* CONFIG_RT_GROUP_SCHED */ 6677 } 6678 #ifdef CONFIG_CPUMASK_OFFSTACK 6679 for_each_possible_cpu(i) { 6680 per_cpu(load_balance_mask, i) = (cpumask_var_t)kzalloc_node( 6681 cpumask_size(), GFP_KERNEL, cpu_to_node(i)); 6682 per_cpu(select_idle_mask, i) = (cpumask_var_t)kzalloc_node( 6683 cpumask_size(), GFP_KERNEL, cpu_to_node(i)); 6684 } 6685 #endif /* CONFIG_CPUMASK_OFFSTACK */ 6686 6687 init_rt_bandwidth(&def_rt_bandwidth, global_rt_period(), global_rt_runtime()); 6688 init_dl_bandwidth(&def_dl_bandwidth, global_rt_period(), global_rt_runtime()); 6689 6690 #ifdef CONFIG_SMP 6691 init_defrootdomain(); 6692 #endif 6693 6694 #ifdef CONFIG_RT_GROUP_SCHED 6695 init_rt_bandwidth(&root_task_group.rt_bandwidth, 6696 global_rt_period(), global_rt_runtime()); 6697 #endif /* CONFIG_RT_GROUP_SCHED */ 6698 6699 #ifdef CONFIG_CGROUP_SCHED 6700 task_group_cache = KMEM_CACHE(task_group, 0); 6701 6702 list_add(&root_task_group.list, &task_groups); 6703 INIT_LIST_HEAD(&root_task_group.children); 6704 INIT_LIST_HEAD(&root_task_group.siblings); 6705 autogroup_init(&init_task); 6706 #endif /* CONFIG_CGROUP_SCHED */ 6707 6708 for_each_possible_cpu(i) { 6709 struct rq *rq; 6710 6711 rq = cpu_rq(i); 6712 raw_spin_lock_init(&rq->lock); 6713 rq->nr_running = 0; 6714 rq->calc_load_active = 0; 6715 rq->calc_load_update = jiffies + LOAD_FREQ; 6716 init_cfs_rq(&rq->cfs); 6717 init_rt_rq(&rq->rt); 6718 init_dl_rq(&rq->dl); 6719 #ifdef CONFIG_FAIR_GROUP_SCHED 6720 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list); 6721 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list; 6722 /* 6723 * How much CPU bandwidth does root_task_group get? 6724 * 6725 * In case of task-groups formed thr' the cgroup filesystem, it 6726 * gets 100% of the CPU resources in the system. This overall 6727 * system CPU resource is divided among the tasks of 6728 * root_task_group and its child task-groups in a fair manner, 6729 * based on each entity's (task or task-group's) weight 6730 * (se->load.weight). 6731 * 6732 * In other words, if root_task_group has 10 tasks of weight 6733 * 1024) and two child groups A0 and A1 (of weight 1024 each), 6734 * then A0's share of the CPU resource is: 6735 * 6736 * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33% 6737 * 6738 * We achieve this by letting root_task_group's tasks sit 6739 * directly in rq->cfs (i.e root_task_group->se[] = NULL). 6740 */ 6741 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL); 6742 #endif /* CONFIG_FAIR_GROUP_SCHED */ 6743 6744 rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime; 6745 #ifdef CONFIG_RT_GROUP_SCHED 6746 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL); 6747 #endif 6748 #ifdef CONFIG_SMP 6749 rq->sd = NULL; 6750 rq->rd = NULL; 6751 rq->cpu_capacity = rq->cpu_capacity_orig = SCHED_CAPACITY_SCALE; 6752 rq->balance_callback = NULL; 6753 rq->active_balance = 0; 6754 rq->next_balance = jiffies; 6755 rq->push_cpu = 0; 6756 rq->cpu = i; 6757 rq->online = 0; 6758 rq->idle_stamp = 0; 6759 rq->avg_idle = 2*sysctl_sched_migration_cost; 6760 rq->max_idle_balance_cost = sysctl_sched_migration_cost; 6761 6762 INIT_LIST_HEAD(&rq->cfs_tasks); 6763 6764 rq_attach_root(rq, &def_root_domain); 6765 #ifdef CONFIG_NO_HZ_COMMON 6766 rq->last_blocked_load_update_tick = jiffies; 6767 atomic_set(&rq->nohz_flags, 0); 6768 6769 rq_csd_init(rq, &rq->nohz_csd, nohz_csd_func); 6770 #endif 6771 #endif /* CONFIG_SMP */ 6772 hrtick_rq_init(rq); 6773 atomic_set(&rq->nr_iowait, 0); 6774 } 6775 6776 set_load_weight(&init_task, false); 6777 6778 /* 6779 * The boot idle thread does lazy MMU switching as well: 6780 */ 6781 mmgrab(&init_mm); 6782 enter_lazy_tlb(&init_mm, current); 6783 6784 /* 6785 * Make us the idle thread. Technically, schedule() should not be 6786 * called from this thread, however somewhere below it might be, 6787 * but because we are the idle thread, we just pick up running again 6788 * when this runqueue becomes "idle". 6789 */ 6790 init_idle(current, smp_processor_id()); 6791 6792 calc_load_update = jiffies + LOAD_FREQ; 6793 6794 #ifdef CONFIG_SMP 6795 idle_thread_set_boot_cpu(); 6796 #endif 6797 init_sched_fair_class(); 6798 6799 init_schedstats(); 6800 6801 psi_init(); 6802 6803 init_uclamp(); 6804 6805 scheduler_running = 1; 6806 } 6807 6808 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP 6809 static inline int preempt_count_equals(int preempt_offset) 6810 { 6811 int nested = preempt_count() + rcu_preempt_depth(); 6812 6813 return (nested == preempt_offset); 6814 } 6815 6816 void __might_sleep(const char *file, int line, int preempt_offset) 6817 { 6818 /* 6819 * Blocking primitives will set (and therefore destroy) current->state, 6820 * since we will exit with TASK_RUNNING make sure we enter with it, 6821 * otherwise we will destroy state. 6822 */ 6823 WARN_ONCE(current->state != TASK_RUNNING && current->task_state_change, 6824 "do not call blocking ops when !TASK_RUNNING; " 6825 "state=%lx set at [<%p>] %pS\n", 6826 current->state, 6827 (void *)current->task_state_change, 6828 (void *)current->task_state_change); 6829 6830 ___might_sleep(file, line, preempt_offset); 6831 } 6832 EXPORT_SYMBOL(__might_sleep); 6833 6834 void ___might_sleep(const char *file, int line, int preempt_offset) 6835 { 6836 /* Ratelimiting timestamp: */ 6837 static unsigned long prev_jiffy; 6838 6839 unsigned long preempt_disable_ip; 6840 6841 /* WARN_ON_ONCE() by default, no rate limit required: */ 6842 rcu_sleep_check(); 6843 6844 if ((preempt_count_equals(preempt_offset) && !irqs_disabled() && 6845 !is_idle_task(current) && !current->non_block_count) || 6846 system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING || 6847 oops_in_progress) 6848 return; 6849 6850 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) 6851 return; 6852 prev_jiffy = jiffies; 6853 6854 /* Save this before calling printk(), since that will clobber it: */ 6855 preempt_disable_ip = get_preempt_disable_ip(current); 6856 6857 printk(KERN_ERR 6858 "BUG: sleeping function called from invalid context at %s:%d\n", 6859 file, line); 6860 printk(KERN_ERR 6861 "in_atomic(): %d, irqs_disabled(): %d, non_block: %d, pid: %d, name: %s\n", 6862 in_atomic(), irqs_disabled(), current->non_block_count, 6863 current->pid, current->comm); 6864 6865 if (task_stack_end_corrupted(current)) 6866 printk(KERN_EMERG "Thread overran stack, or stack corrupted\n"); 6867 6868 debug_show_held_locks(current); 6869 if (irqs_disabled()) 6870 print_irqtrace_events(current); 6871 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT) 6872 && !preempt_count_equals(preempt_offset)) { 6873 pr_err("Preemption disabled at:"); 6874 print_ip_sym(preempt_disable_ip); 6875 pr_cont("\n"); 6876 } 6877 dump_stack(); 6878 add_taint(TAINT_WARN, LOCKDEP_STILL_OK); 6879 } 6880 EXPORT_SYMBOL(___might_sleep); 6881 6882 void __cant_sleep(const char *file, int line, int preempt_offset) 6883 { 6884 static unsigned long prev_jiffy; 6885 6886 if (irqs_disabled()) 6887 return; 6888 6889 if (!IS_ENABLED(CONFIG_PREEMPT_COUNT)) 6890 return; 6891 6892 if (preempt_count() > preempt_offset) 6893 return; 6894 6895 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) 6896 return; 6897 prev_jiffy = jiffies; 6898 6899 printk(KERN_ERR "BUG: assuming atomic context at %s:%d\n", file, line); 6900 printk(KERN_ERR "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n", 6901 in_atomic(), irqs_disabled(), 6902 current->pid, current->comm); 6903 6904 debug_show_held_locks(current); 6905 dump_stack(); 6906 add_taint(TAINT_WARN, LOCKDEP_STILL_OK); 6907 } 6908 EXPORT_SYMBOL_GPL(__cant_sleep); 6909 #endif 6910 6911 #ifdef CONFIG_MAGIC_SYSRQ 6912 void normalize_rt_tasks(void) 6913 { 6914 struct task_struct *g, *p; 6915 struct sched_attr attr = { 6916 .sched_policy = SCHED_NORMAL, 6917 }; 6918 6919 read_lock(&tasklist_lock); 6920 for_each_process_thread(g, p) { 6921 /* 6922 * Only normalize user tasks: 6923 */ 6924 if (p->flags & PF_KTHREAD) 6925 continue; 6926 6927 p->se.exec_start = 0; 6928 schedstat_set(p->se.statistics.wait_start, 0); 6929 schedstat_set(p->se.statistics.sleep_start, 0); 6930 schedstat_set(p->se.statistics.block_start, 0); 6931 6932 if (!dl_task(p) && !rt_task(p)) { 6933 /* 6934 * Renice negative nice level userspace 6935 * tasks back to 0: 6936 */ 6937 if (task_nice(p) < 0) 6938 set_user_nice(p, 0); 6939 continue; 6940 } 6941 6942 __sched_setscheduler(p, &attr, false, false); 6943 } 6944 read_unlock(&tasklist_lock); 6945 } 6946 6947 #endif /* CONFIG_MAGIC_SYSRQ */ 6948 6949 #if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) 6950 /* 6951 * These functions are only useful for the IA64 MCA handling, or kdb. 6952 * 6953 * They can only be called when the whole system has been 6954 * stopped - every CPU needs to be quiescent, and no scheduling 6955 * activity can take place. Using them for anything else would 6956 * be a serious bug, and as a result, they aren't even visible 6957 * under any other configuration. 6958 */ 6959 6960 /** 6961 * curr_task - return the current task for a given CPU. 6962 * @cpu: the processor in question. 6963 * 6964 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! 6965 * 6966 * Return: The current task for @cpu. 6967 */ 6968 struct task_struct *curr_task(int cpu) 6969 { 6970 return cpu_curr(cpu); 6971 } 6972 6973 #endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */ 6974 6975 #ifdef CONFIG_IA64 6976 /** 6977 * ia64_set_curr_task - set the current task for a given CPU. 6978 * @cpu: the processor in question. 6979 * @p: the task pointer to set. 6980 * 6981 * Description: This function must only be used when non-maskable interrupts 6982 * are serviced on a separate stack. It allows the architecture to switch the 6983 * notion of the current task on a CPU in a non-blocking manner. This function 6984 * must be called with all CPU's synchronized, and interrupts disabled, the 6985 * and caller must save the original value of the current task (see 6986 * curr_task() above) and restore that value before reenabling interrupts and 6987 * re-starting the system. 6988 * 6989 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! 6990 */ 6991 void ia64_set_curr_task(int cpu, struct task_struct *p) 6992 { 6993 cpu_curr(cpu) = p; 6994 } 6995 6996 #endif 6997 6998 #ifdef CONFIG_CGROUP_SCHED 6999 /* task_group_lock serializes the addition/removal of task groups */ 7000 static DEFINE_SPINLOCK(task_group_lock); 7001 7002 static inline void alloc_uclamp_sched_group(struct task_group *tg, 7003 struct task_group *parent) 7004 { 7005 #ifdef CONFIG_UCLAMP_TASK_GROUP 7006 enum uclamp_id clamp_id; 7007 7008 for_each_clamp_id(clamp_id) { 7009 uclamp_se_set(&tg->uclamp_req[clamp_id], 7010 uclamp_none(clamp_id), false); 7011 tg->uclamp[clamp_id] = parent->uclamp[clamp_id]; 7012 } 7013 #endif 7014 } 7015 7016 static void sched_free_group(struct task_group *tg) 7017 { 7018 free_fair_sched_group(tg); 7019 free_rt_sched_group(tg); 7020 autogroup_free(tg); 7021 kmem_cache_free(task_group_cache, tg); 7022 } 7023 7024 /* allocate runqueue etc for a new task group */ 7025 struct task_group *sched_create_group(struct task_group *parent) 7026 { 7027 struct task_group *tg; 7028 7029 tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO); 7030 if (!tg) 7031 return ERR_PTR(-ENOMEM); 7032 7033 if (!alloc_fair_sched_group(tg, parent)) 7034 goto err; 7035 7036 if (!alloc_rt_sched_group(tg, parent)) 7037 goto err; 7038 7039 alloc_uclamp_sched_group(tg, parent); 7040 7041 return tg; 7042 7043 err: 7044 sched_free_group(tg); 7045 return ERR_PTR(-ENOMEM); 7046 } 7047 7048 void sched_online_group(struct task_group *tg, struct task_group *parent) 7049 { 7050 unsigned long flags; 7051 7052 spin_lock_irqsave(&task_group_lock, flags); 7053 list_add_rcu(&tg->list, &task_groups); 7054 7055 /* Root should already exist: */ 7056 WARN_ON(!parent); 7057 7058 tg->parent = parent; 7059 INIT_LIST_HEAD(&tg->children); 7060 list_add_rcu(&tg->siblings, &parent->children); 7061 spin_unlock_irqrestore(&task_group_lock, flags); 7062 7063 online_fair_sched_group(tg); 7064 } 7065 7066 /* rcu callback to free various structures associated with a task group */ 7067 static void sched_free_group_rcu(struct rcu_head *rhp) 7068 { 7069 /* Now it should be safe to free those cfs_rqs: */ 7070 sched_free_group(container_of(rhp, struct task_group, rcu)); 7071 } 7072 7073 void sched_destroy_group(struct task_group *tg) 7074 { 7075 /* Wait for possible concurrent references to cfs_rqs complete: */ 7076 call_rcu(&tg->rcu, sched_free_group_rcu); 7077 } 7078 7079 void sched_offline_group(struct task_group *tg) 7080 { 7081 unsigned long flags; 7082 7083 /* End participation in shares distribution: */ 7084 unregister_fair_sched_group(tg); 7085 7086 spin_lock_irqsave(&task_group_lock, flags); 7087 list_del_rcu(&tg->list); 7088 list_del_rcu(&tg->siblings); 7089 spin_unlock_irqrestore(&task_group_lock, flags); 7090 } 7091 7092 static void sched_change_group(struct task_struct *tsk, int type) 7093 { 7094 struct task_group *tg; 7095 7096 /* 7097 * All callers are synchronized by task_rq_lock(); we do not use RCU 7098 * which is pointless here. Thus, we pass "true" to task_css_check() 7099 * to prevent lockdep warnings. 7100 */ 7101 tg = container_of(task_css_check(tsk, cpu_cgrp_id, true), 7102 struct task_group, css); 7103 tg = autogroup_task_group(tsk, tg); 7104 tsk->sched_task_group = tg; 7105 7106 #ifdef CONFIG_FAIR_GROUP_SCHED 7107 if (tsk->sched_class->task_change_group) 7108 tsk->sched_class->task_change_group(tsk, type); 7109 else 7110 #endif 7111 set_task_rq(tsk, task_cpu(tsk)); 7112 } 7113 7114 /* 7115 * Change task's runqueue when it moves between groups. 7116 * 7117 * The caller of this function should have put the task in its new group by 7118 * now. This function just updates tsk->se.cfs_rq and tsk->se.parent to reflect 7119 * its new group. 7120 */ 7121 void sched_move_task(struct task_struct *tsk) 7122 { 7123 int queued, running, queue_flags = 7124 DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; 7125 struct rq_flags rf; 7126 struct rq *rq; 7127 7128 rq = task_rq_lock(tsk, &rf); 7129 update_rq_clock(rq); 7130 7131 running = task_current(rq, tsk); 7132 queued = task_on_rq_queued(tsk); 7133 7134 if (queued) 7135 dequeue_task(rq, tsk, queue_flags); 7136 if (running) 7137 put_prev_task(rq, tsk); 7138 7139 sched_change_group(tsk, TASK_MOVE_GROUP); 7140 7141 if (queued) 7142 enqueue_task(rq, tsk, queue_flags); 7143 if (running) { 7144 set_next_task(rq, tsk); 7145 /* 7146 * After changing group, the running task may have joined a 7147 * throttled one but it's still the running task. Trigger a 7148 * resched to make sure that task can still run. 7149 */ 7150 resched_curr(rq); 7151 } 7152 7153 task_rq_unlock(rq, tsk, &rf); 7154 } 7155 7156 static inline struct task_group *css_tg(struct cgroup_subsys_state *css) 7157 { 7158 return css ? container_of(css, struct task_group, css) : NULL; 7159 } 7160 7161 static struct cgroup_subsys_state * 7162 cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) 7163 { 7164 struct task_group *parent = css_tg(parent_css); 7165 struct task_group *tg; 7166 7167 if (!parent) { 7168 /* This is early initialization for the top cgroup */ 7169 return &root_task_group.css; 7170 } 7171 7172 tg = sched_create_group(parent); 7173 if (IS_ERR(tg)) 7174 return ERR_PTR(-ENOMEM); 7175 7176 return &tg->css; 7177 } 7178 7179 /* Expose task group only after completing cgroup initialization */ 7180 static int cpu_cgroup_css_online(struct cgroup_subsys_state *css) 7181 { 7182 struct task_group *tg = css_tg(css); 7183 struct task_group *parent = css_tg(css->parent); 7184 7185 if (parent) 7186 sched_online_group(tg, parent); 7187 7188 #ifdef CONFIG_UCLAMP_TASK_GROUP 7189 /* Propagate the effective uclamp value for the new group */ 7190 cpu_util_update_eff(css); 7191 #endif 7192 7193 return 0; 7194 } 7195 7196 static void cpu_cgroup_css_released(struct cgroup_subsys_state *css) 7197 { 7198 struct task_group *tg = css_tg(css); 7199 7200 sched_offline_group(tg); 7201 } 7202 7203 static void cpu_cgroup_css_free(struct cgroup_subsys_state *css) 7204 { 7205 struct task_group *tg = css_tg(css); 7206 7207 /* 7208 * Relies on the RCU grace period between css_released() and this. 7209 */ 7210 sched_free_group(tg); 7211 } 7212 7213 /* 7214 * This is called before wake_up_new_task(), therefore we really only 7215 * have to set its group bits, all the other stuff does not apply. 7216 */ 7217 static void cpu_cgroup_fork(struct task_struct *task) 7218 { 7219 struct rq_flags rf; 7220 struct rq *rq; 7221 7222 rq = task_rq_lock(task, &rf); 7223 7224 update_rq_clock(rq); 7225 sched_change_group(task, TASK_SET_GROUP); 7226 7227 task_rq_unlock(rq, task, &rf); 7228 } 7229 7230 static int cpu_cgroup_can_attach(struct cgroup_taskset *tset) 7231 { 7232 struct task_struct *task; 7233 struct cgroup_subsys_state *css; 7234 int ret = 0; 7235 7236 cgroup_taskset_for_each(task, css, tset) { 7237 #ifdef CONFIG_RT_GROUP_SCHED 7238 if (!sched_rt_can_attach(css_tg(css), task)) 7239 return -EINVAL; 7240 #endif 7241 /* 7242 * Serialize against wake_up_new_task() such that if its 7243 * running, we're sure to observe its full state. 7244 */ 7245 raw_spin_lock_irq(&task->pi_lock); 7246 /* 7247 * Avoid calling sched_move_task() before wake_up_new_task() 7248 * has happened. This would lead to problems with PELT, due to 7249 * move wanting to detach+attach while we're not attached yet. 7250 */ 7251 if (task->state == TASK_NEW) 7252 ret = -EINVAL; 7253 raw_spin_unlock_irq(&task->pi_lock); 7254 7255 if (ret) 7256 break; 7257 } 7258 return ret; 7259 } 7260 7261 static void cpu_cgroup_attach(struct cgroup_taskset *tset) 7262 { 7263 struct task_struct *task; 7264 struct cgroup_subsys_state *css; 7265 7266 cgroup_taskset_for_each(task, css, tset) 7267 sched_move_task(task); 7268 } 7269 7270 #ifdef CONFIG_UCLAMP_TASK_GROUP 7271 static void cpu_util_update_eff(struct cgroup_subsys_state *css) 7272 { 7273 struct cgroup_subsys_state *top_css = css; 7274 struct uclamp_se *uc_parent = NULL; 7275 struct uclamp_se *uc_se = NULL; 7276 unsigned int eff[UCLAMP_CNT]; 7277 enum uclamp_id clamp_id; 7278 unsigned int clamps; 7279 7280 css_for_each_descendant_pre(css, top_css) { 7281 uc_parent = css_tg(css)->parent 7282 ? css_tg(css)->parent->uclamp : NULL; 7283 7284 for_each_clamp_id(clamp_id) { 7285 /* Assume effective clamps matches requested clamps */ 7286 eff[clamp_id] = css_tg(css)->uclamp_req[clamp_id].value; 7287 /* Cap effective clamps with parent's effective clamps */ 7288 if (uc_parent && 7289 eff[clamp_id] > uc_parent[clamp_id].value) { 7290 eff[clamp_id] = uc_parent[clamp_id].value; 7291 } 7292 } 7293 /* Ensure protection is always capped by limit */ 7294 eff[UCLAMP_MIN] = min(eff[UCLAMP_MIN], eff[UCLAMP_MAX]); 7295 7296 /* Propagate most restrictive effective clamps */ 7297 clamps = 0x0; 7298 uc_se = css_tg(css)->uclamp; 7299 for_each_clamp_id(clamp_id) { 7300 if (eff[clamp_id] == uc_se[clamp_id].value) 7301 continue; 7302 uc_se[clamp_id].value = eff[clamp_id]; 7303 uc_se[clamp_id].bucket_id = uclamp_bucket_id(eff[clamp_id]); 7304 clamps |= (0x1 << clamp_id); 7305 } 7306 if (!clamps) { 7307 css = css_rightmost_descendant(css); 7308 continue; 7309 } 7310 7311 /* Immediately update descendants RUNNABLE tasks */ 7312 uclamp_update_active_tasks(css, clamps); 7313 } 7314 } 7315 7316 /* 7317 * Integer 10^N with a given N exponent by casting to integer the literal "1eN" 7318 * C expression. Since there is no way to convert a macro argument (N) into a 7319 * character constant, use two levels of macros. 7320 */ 7321 #define _POW10(exp) ((unsigned int)1e##exp) 7322 #define POW10(exp) _POW10(exp) 7323 7324 struct uclamp_request { 7325 #define UCLAMP_PERCENT_SHIFT 2 7326 #define UCLAMP_PERCENT_SCALE (100 * POW10(UCLAMP_PERCENT_SHIFT)) 7327 s64 percent; 7328 u64 util; 7329 int ret; 7330 }; 7331 7332 static inline struct uclamp_request 7333 capacity_from_percent(char *buf) 7334 { 7335 struct uclamp_request req = { 7336 .percent = UCLAMP_PERCENT_SCALE, 7337 .util = SCHED_CAPACITY_SCALE, 7338 .ret = 0, 7339 }; 7340 7341 buf = strim(buf); 7342 if (strcmp(buf, "max")) { 7343 req.ret = cgroup_parse_float(buf, UCLAMP_PERCENT_SHIFT, 7344 &req.percent); 7345 if (req.ret) 7346 return req; 7347 if ((u64)req.percent > UCLAMP_PERCENT_SCALE) { 7348 req.ret = -ERANGE; 7349 return req; 7350 } 7351 7352 req.util = req.percent << SCHED_CAPACITY_SHIFT; 7353 req.util = DIV_ROUND_CLOSEST_ULL(req.util, UCLAMP_PERCENT_SCALE); 7354 } 7355 7356 return req; 7357 } 7358 7359 static ssize_t cpu_uclamp_write(struct kernfs_open_file *of, char *buf, 7360 size_t nbytes, loff_t off, 7361 enum uclamp_id clamp_id) 7362 { 7363 struct uclamp_request req; 7364 struct task_group *tg; 7365 7366 req = capacity_from_percent(buf); 7367 if (req.ret) 7368 return req.ret; 7369 7370 mutex_lock(&uclamp_mutex); 7371 rcu_read_lock(); 7372 7373 tg = css_tg(of_css(of)); 7374 if (tg->uclamp_req[clamp_id].value != req.util) 7375 uclamp_se_set(&tg->uclamp_req[clamp_id], req.util, false); 7376 7377 /* 7378 * Because of not recoverable conversion rounding we keep track of the 7379 * exact requested value 7380 */ 7381 tg->uclamp_pct[clamp_id] = req.percent; 7382 7383 /* Update effective clamps to track the most restrictive value */ 7384 cpu_util_update_eff(of_css(of)); 7385 7386 rcu_read_unlock(); 7387 mutex_unlock(&uclamp_mutex); 7388 7389 return nbytes; 7390 } 7391 7392 static ssize_t cpu_uclamp_min_write(struct kernfs_open_file *of, 7393 char *buf, size_t nbytes, 7394 loff_t off) 7395 { 7396 return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MIN); 7397 } 7398 7399 static ssize_t cpu_uclamp_max_write(struct kernfs_open_file *of, 7400 char *buf, size_t nbytes, 7401 loff_t off) 7402 { 7403 return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MAX); 7404 } 7405 7406 static inline void cpu_uclamp_print(struct seq_file *sf, 7407 enum uclamp_id clamp_id) 7408 { 7409 struct task_group *tg; 7410 u64 util_clamp; 7411 u64 percent; 7412 u32 rem; 7413 7414 rcu_read_lock(); 7415 tg = css_tg(seq_css(sf)); 7416 util_clamp = tg->uclamp_req[clamp_id].value; 7417 rcu_read_unlock(); 7418 7419 if (util_clamp == SCHED_CAPACITY_SCALE) { 7420 seq_puts(sf, "max\n"); 7421 return; 7422 } 7423 7424 percent = tg->uclamp_pct[clamp_id]; 7425 percent = div_u64_rem(percent, POW10(UCLAMP_PERCENT_SHIFT), &rem); 7426 seq_printf(sf, "%llu.%0*u\n", percent, UCLAMP_PERCENT_SHIFT, rem); 7427 } 7428 7429 static int cpu_uclamp_min_show(struct seq_file *sf, void *v) 7430 { 7431 cpu_uclamp_print(sf, UCLAMP_MIN); 7432 return 0; 7433 } 7434 7435 static int cpu_uclamp_max_show(struct seq_file *sf, void *v) 7436 { 7437 cpu_uclamp_print(sf, UCLAMP_MAX); 7438 return 0; 7439 } 7440 #endif /* CONFIG_UCLAMP_TASK_GROUP */ 7441 7442 #ifdef CONFIG_FAIR_GROUP_SCHED 7443 static int cpu_shares_write_u64(struct cgroup_subsys_state *css, 7444 struct cftype *cftype, u64 shareval) 7445 { 7446 if (shareval > scale_load_down(ULONG_MAX)) 7447 shareval = MAX_SHARES; 7448 return sched_group_set_shares(css_tg(css), scale_load(shareval)); 7449 } 7450 7451 static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css, 7452 struct cftype *cft) 7453 { 7454 struct task_group *tg = css_tg(css); 7455 7456 return (u64) scale_load_down(tg->shares); 7457 } 7458 7459 #ifdef CONFIG_CFS_BANDWIDTH 7460 static DEFINE_MUTEX(cfs_constraints_mutex); 7461 7462 const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */ 7463 static const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */ 7464 /* More than 203 days if BW_SHIFT equals 20. */ 7465 static const u64 max_cfs_runtime = MAX_BW * NSEC_PER_USEC; 7466 7467 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime); 7468 7469 static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota) 7470 { 7471 int i, ret = 0, runtime_enabled, runtime_was_enabled; 7472 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 7473 7474 if (tg == &root_task_group) 7475 return -EINVAL; 7476 7477 /* 7478 * Ensure we have at some amount of bandwidth every period. This is 7479 * to prevent reaching a state of large arrears when throttled via 7480 * entity_tick() resulting in prolonged exit starvation. 7481 */ 7482 if (quota < min_cfs_quota_period || period < min_cfs_quota_period) 7483 return -EINVAL; 7484 7485 /* 7486 * Likewise, bound things on the otherside by preventing insane quota 7487 * periods. This also allows us to normalize in computing quota 7488 * feasibility. 7489 */ 7490 if (period > max_cfs_quota_period) 7491 return -EINVAL; 7492 7493 /* 7494 * Bound quota to defend quota against overflow during bandwidth shift. 7495 */ 7496 if (quota != RUNTIME_INF && quota > max_cfs_runtime) 7497 return -EINVAL; 7498 7499 /* 7500 * Prevent race between setting of cfs_rq->runtime_enabled and 7501 * unthrottle_offline_cfs_rqs(). 7502 */ 7503 get_online_cpus(); 7504 mutex_lock(&cfs_constraints_mutex); 7505 ret = __cfs_schedulable(tg, period, quota); 7506 if (ret) 7507 goto out_unlock; 7508 7509 runtime_enabled = quota != RUNTIME_INF; 7510 runtime_was_enabled = cfs_b->quota != RUNTIME_INF; 7511 /* 7512 * If we need to toggle cfs_bandwidth_used, off->on must occur 7513 * before making related changes, and on->off must occur afterwards 7514 */ 7515 if (runtime_enabled && !runtime_was_enabled) 7516 cfs_bandwidth_usage_inc(); 7517 raw_spin_lock_irq(&cfs_b->lock); 7518 cfs_b->period = ns_to_ktime(period); 7519 cfs_b->quota = quota; 7520 7521 __refill_cfs_bandwidth_runtime(cfs_b); 7522 7523 /* Restart the period timer (if active) to handle new period expiry: */ 7524 if (runtime_enabled) 7525 start_cfs_bandwidth(cfs_b); 7526 7527 raw_spin_unlock_irq(&cfs_b->lock); 7528 7529 for_each_online_cpu(i) { 7530 struct cfs_rq *cfs_rq = tg->cfs_rq[i]; 7531 struct rq *rq = cfs_rq->rq; 7532 struct rq_flags rf; 7533 7534 rq_lock_irq(rq, &rf); 7535 cfs_rq->runtime_enabled = runtime_enabled; 7536 cfs_rq->runtime_remaining = 0; 7537 7538 if (cfs_rq->throttled) 7539 unthrottle_cfs_rq(cfs_rq); 7540 rq_unlock_irq(rq, &rf); 7541 } 7542 if (runtime_was_enabled && !runtime_enabled) 7543 cfs_bandwidth_usage_dec(); 7544 out_unlock: 7545 mutex_unlock(&cfs_constraints_mutex); 7546 put_online_cpus(); 7547 7548 return ret; 7549 } 7550 7551 static int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us) 7552 { 7553 u64 quota, period; 7554 7555 period = ktime_to_ns(tg->cfs_bandwidth.period); 7556 if (cfs_quota_us < 0) 7557 quota = RUNTIME_INF; 7558 else if ((u64)cfs_quota_us <= U64_MAX / NSEC_PER_USEC) 7559 quota = (u64)cfs_quota_us * NSEC_PER_USEC; 7560 else 7561 return -EINVAL; 7562 7563 return tg_set_cfs_bandwidth(tg, period, quota); 7564 } 7565 7566 static long tg_get_cfs_quota(struct task_group *tg) 7567 { 7568 u64 quota_us; 7569 7570 if (tg->cfs_bandwidth.quota == RUNTIME_INF) 7571 return -1; 7572 7573 quota_us = tg->cfs_bandwidth.quota; 7574 do_div(quota_us, NSEC_PER_USEC); 7575 7576 return quota_us; 7577 } 7578 7579 static int tg_set_cfs_period(struct task_group *tg, long cfs_period_us) 7580 { 7581 u64 quota, period; 7582 7583 if ((u64)cfs_period_us > U64_MAX / NSEC_PER_USEC) 7584 return -EINVAL; 7585 7586 period = (u64)cfs_period_us * NSEC_PER_USEC; 7587 quota = tg->cfs_bandwidth.quota; 7588 7589 return tg_set_cfs_bandwidth(tg, period, quota); 7590 } 7591 7592 static long tg_get_cfs_period(struct task_group *tg) 7593 { 7594 u64 cfs_period_us; 7595 7596 cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period); 7597 do_div(cfs_period_us, NSEC_PER_USEC); 7598 7599 return cfs_period_us; 7600 } 7601 7602 static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css, 7603 struct cftype *cft) 7604 { 7605 return tg_get_cfs_quota(css_tg(css)); 7606 } 7607 7608 static int cpu_cfs_quota_write_s64(struct cgroup_subsys_state *css, 7609 struct cftype *cftype, s64 cfs_quota_us) 7610 { 7611 return tg_set_cfs_quota(css_tg(css), cfs_quota_us); 7612 } 7613 7614 static u64 cpu_cfs_period_read_u64(struct cgroup_subsys_state *css, 7615 struct cftype *cft) 7616 { 7617 return tg_get_cfs_period(css_tg(css)); 7618 } 7619 7620 static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css, 7621 struct cftype *cftype, u64 cfs_period_us) 7622 { 7623 return tg_set_cfs_period(css_tg(css), cfs_period_us); 7624 } 7625 7626 struct cfs_schedulable_data { 7627 struct task_group *tg; 7628 u64 period, quota; 7629 }; 7630 7631 /* 7632 * normalize group quota/period to be quota/max_period 7633 * note: units are usecs 7634 */ 7635 static u64 normalize_cfs_quota(struct task_group *tg, 7636 struct cfs_schedulable_data *d) 7637 { 7638 u64 quota, period; 7639 7640 if (tg == d->tg) { 7641 period = d->period; 7642 quota = d->quota; 7643 } else { 7644 period = tg_get_cfs_period(tg); 7645 quota = tg_get_cfs_quota(tg); 7646 } 7647 7648 /* note: these should typically be equivalent */ 7649 if (quota == RUNTIME_INF || quota == -1) 7650 return RUNTIME_INF; 7651 7652 return to_ratio(period, quota); 7653 } 7654 7655 static int tg_cfs_schedulable_down(struct task_group *tg, void *data) 7656 { 7657 struct cfs_schedulable_data *d = data; 7658 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 7659 s64 quota = 0, parent_quota = -1; 7660 7661 if (!tg->parent) { 7662 quota = RUNTIME_INF; 7663 } else { 7664 struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth; 7665 7666 quota = normalize_cfs_quota(tg, d); 7667 parent_quota = parent_b->hierarchical_quota; 7668 7669 /* 7670 * Ensure max(child_quota) <= parent_quota. On cgroup2, 7671 * always take the min. On cgroup1, only inherit when no 7672 * limit is set: 7673 */ 7674 if (cgroup_subsys_on_dfl(cpu_cgrp_subsys)) { 7675 quota = min(quota, parent_quota); 7676 } else { 7677 if (quota == RUNTIME_INF) 7678 quota = parent_quota; 7679 else if (parent_quota != RUNTIME_INF && quota > parent_quota) 7680 return -EINVAL; 7681 } 7682 } 7683 cfs_b->hierarchical_quota = quota; 7684 7685 return 0; 7686 } 7687 7688 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota) 7689 { 7690 int ret; 7691 struct cfs_schedulable_data data = { 7692 .tg = tg, 7693 .period = period, 7694 .quota = quota, 7695 }; 7696 7697 if (quota != RUNTIME_INF) { 7698 do_div(data.period, NSEC_PER_USEC); 7699 do_div(data.quota, NSEC_PER_USEC); 7700 } 7701 7702 rcu_read_lock(); 7703 ret = walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data); 7704 rcu_read_unlock(); 7705 7706 return ret; 7707 } 7708 7709 static int cpu_cfs_stat_show(struct seq_file *sf, void *v) 7710 { 7711 struct task_group *tg = css_tg(seq_css(sf)); 7712 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 7713 7714 seq_printf(sf, "nr_periods %d\n", cfs_b->nr_periods); 7715 seq_printf(sf, "nr_throttled %d\n", cfs_b->nr_throttled); 7716 seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time); 7717 7718 if (schedstat_enabled() && tg != &root_task_group) { 7719 u64 ws = 0; 7720 int i; 7721 7722 for_each_possible_cpu(i) 7723 ws += schedstat_val(tg->se[i]->statistics.wait_sum); 7724 7725 seq_printf(sf, "wait_sum %llu\n", ws); 7726 } 7727 7728 return 0; 7729 } 7730 #endif /* CONFIG_CFS_BANDWIDTH */ 7731 #endif /* CONFIG_FAIR_GROUP_SCHED */ 7732 7733 #ifdef CONFIG_RT_GROUP_SCHED 7734 static int cpu_rt_runtime_write(struct cgroup_subsys_state *css, 7735 struct cftype *cft, s64 val) 7736 { 7737 return sched_group_set_rt_runtime(css_tg(css), val); 7738 } 7739 7740 static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css, 7741 struct cftype *cft) 7742 { 7743 return sched_group_rt_runtime(css_tg(css)); 7744 } 7745 7746 static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css, 7747 struct cftype *cftype, u64 rt_period_us) 7748 { 7749 return sched_group_set_rt_period(css_tg(css), rt_period_us); 7750 } 7751 7752 static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css, 7753 struct cftype *cft) 7754 { 7755 return sched_group_rt_period(css_tg(css)); 7756 } 7757 #endif /* CONFIG_RT_GROUP_SCHED */ 7758 7759 static struct cftype cpu_legacy_files[] = { 7760 #ifdef CONFIG_FAIR_GROUP_SCHED 7761 { 7762 .name = "shares", 7763 .read_u64 = cpu_shares_read_u64, 7764 .write_u64 = cpu_shares_write_u64, 7765 }, 7766 #endif 7767 #ifdef CONFIG_CFS_BANDWIDTH 7768 { 7769 .name = "cfs_quota_us", 7770 .read_s64 = cpu_cfs_quota_read_s64, 7771 .write_s64 = cpu_cfs_quota_write_s64, 7772 }, 7773 { 7774 .name = "cfs_period_us", 7775 .read_u64 = cpu_cfs_period_read_u64, 7776 .write_u64 = cpu_cfs_period_write_u64, 7777 }, 7778 { 7779 .name = "stat", 7780 .seq_show = cpu_cfs_stat_show, 7781 }, 7782 #endif 7783 #ifdef CONFIG_RT_GROUP_SCHED 7784 { 7785 .name = "rt_runtime_us", 7786 .read_s64 = cpu_rt_runtime_read, 7787 .write_s64 = cpu_rt_runtime_write, 7788 }, 7789 { 7790 .name = "rt_period_us", 7791 .read_u64 = cpu_rt_period_read_uint, 7792 .write_u64 = cpu_rt_period_write_uint, 7793 }, 7794 #endif 7795 #ifdef CONFIG_UCLAMP_TASK_GROUP 7796 { 7797 .name = "uclamp.min", 7798 .flags = CFTYPE_NOT_ON_ROOT, 7799 .seq_show = cpu_uclamp_min_show, 7800 .write = cpu_uclamp_min_write, 7801 }, 7802 { 7803 .name = "uclamp.max", 7804 .flags = CFTYPE_NOT_ON_ROOT, 7805 .seq_show = cpu_uclamp_max_show, 7806 .write = cpu_uclamp_max_write, 7807 }, 7808 #endif 7809 { } /* Terminate */ 7810 }; 7811 7812 static int cpu_extra_stat_show(struct seq_file *sf, 7813 struct cgroup_subsys_state *css) 7814 { 7815 #ifdef CONFIG_CFS_BANDWIDTH 7816 { 7817 struct task_group *tg = css_tg(css); 7818 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 7819 u64 throttled_usec; 7820 7821 throttled_usec = cfs_b->throttled_time; 7822 do_div(throttled_usec, NSEC_PER_USEC); 7823 7824 seq_printf(sf, "nr_periods %d\n" 7825 "nr_throttled %d\n" 7826 "throttled_usec %llu\n", 7827 cfs_b->nr_periods, cfs_b->nr_throttled, 7828 throttled_usec); 7829 } 7830 #endif 7831 return 0; 7832 } 7833 7834 #ifdef CONFIG_FAIR_GROUP_SCHED 7835 static u64 cpu_weight_read_u64(struct cgroup_subsys_state *css, 7836 struct cftype *cft) 7837 { 7838 struct task_group *tg = css_tg(css); 7839 u64 weight = scale_load_down(tg->shares); 7840 7841 return DIV_ROUND_CLOSEST_ULL(weight * CGROUP_WEIGHT_DFL, 1024); 7842 } 7843 7844 static int cpu_weight_write_u64(struct cgroup_subsys_state *css, 7845 struct cftype *cft, u64 weight) 7846 { 7847 /* 7848 * cgroup weight knobs should use the common MIN, DFL and MAX 7849 * values which are 1, 100 and 10000 respectively. While it loses 7850 * a bit of range on both ends, it maps pretty well onto the shares 7851 * value used by scheduler and the round-trip conversions preserve 7852 * the original value over the entire range. 7853 */ 7854 if (weight < CGROUP_WEIGHT_MIN || weight > CGROUP_WEIGHT_MAX) 7855 return -ERANGE; 7856 7857 weight = DIV_ROUND_CLOSEST_ULL(weight * 1024, CGROUP_WEIGHT_DFL); 7858 7859 return sched_group_set_shares(css_tg(css), scale_load(weight)); 7860 } 7861 7862 static s64 cpu_weight_nice_read_s64(struct cgroup_subsys_state *css, 7863 struct cftype *cft) 7864 { 7865 unsigned long weight = scale_load_down(css_tg(css)->shares); 7866 int last_delta = INT_MAX; 7867 int prio, delta; 7868 7869 /* find the closest nice value to the current weight */ 7870 for (prio = 0; prio < ARRAY_SIZE(sched_prio_to_weight); prio++) { 7871 delta = abs(sched_prio_to_weight[prio] - weight); 7872 if (delta >= last_delta) 7873 break; 7874 last_delta = delta; 7875 } 7876 7877 return PRIO_TO_NICE(prio - 1 + MAX_RT_PRIO); 7878 } 7879 7880 static int cpu_weight_nice_write_s64(struct cgroup_subsys_state *css, 7881 struct cftype *cft, s64 nice) 7882 { 7883 unsigned long weight; 7884 int idx; 7885 7886 if (nice < MIN_NICE || nice > MAX_NICE) 7887 return -ERANGE; 7888 7889 idx = NICE_TO_PRIO(nice) - MAX_RT_PRIO; 7890 idx = array_index_nospec(idx, 40); 7891 weight = sched_prio_to_weight[idx]; 7892 7893 return sched_group_set_shares(css_tg(css), scale_load(weight)); 7894 } 7895 #endif 7896 7897 static void __maybe_unused cpu_period_quota_print(struct seq_file *sf, 7898 long period, long quota) 7899 { 7900 if (quota < 0) 7901 seq_puts(sf, "max"); 7902 else 7903 seq_printf(sf, "%ld", quota); 7904 7905 seq_printf(sf, " %ld\n", period); 7906 } 7907 7908 /* caller should put the current value in *@periodp before calling */ 7909 static int __maybe_unused cpu_period_quota_parse(char *buf, 7910 u64 *periodp, u64 *quotap) 7911 { 7912 char tok[21]; /* U64_MAX */ 7913 7914 if (sscanf(buf, "%20s %llu", tok, periodp) < 1) 7915 return -EINVAL; 7916 7917 *periodp *= NSEC_PER_USEC; 7918 7919 if (sscanf(tok, "%llu", quotap)) 7920 *quotap *= NSEC_PER_USEC; 7921 else if (!strcmp(tok, "max")) 7922 *quotap = RUNTIME_INF; 7923 else 7924 return -EINVAL; 7925 7926 return 0; 7927 } 7928 7929 #ifdef CONFIG_CFS_BANDWIDTH 7930 static int cpu_max_show(struct seq_file *sf, void *v) 7931 { 7932 struct task_group *tg = css_tg(seq_css(sf)); 7933 7934 cpu_period_quota_print(sf, tg_get_cfs_period(tg), tg_get_cfs_quota(tg)); 7935 return 0; 7936 } 7937 7938 static ssize_t cpu_max_write(struct kernfs_open_file *of, 7939 char *buf, size_t nbytes, loff_t off) 7940 { 7941 struct task_group *tg = css_tg(of_css(of)); 7942 u64 period = tg_get_cfs_period(tg); 7943 u64 quota; 7944 int ret; 7945 7946 ret = cpu_period_quota_parse(buf, &period, "a); 7947 if (!ret) 7948 ret = tg_set_cfs_bandwidth(tg, period, quota); 7949 return ret ?: nbytes; 7950 } 7951 #endif 7952 7953 static struct cftype cpu_files[] = { 7954 #ifdef CONFIG_FAIR_GROUP_SCHED 7955 { 7956 .name = "weight", 7957 .flags = CFTYPE_NOT_ON_ROOT, 7958 .read_u64 = cpu_weight_read_u64, 7959 .write_u64 = cpu_weight_write_u64, 7960 }, 7961 { 7962 .name = "weight.nice", 7963 .flags = CFTYPE_NOT_ON_ROOT, 7964 .read_s64 = cpu_weight_nice_read_s64, 7965 .write_s64 = cpu_weight_nice_write_s64, 7966 }, 7967 #endif 7968 #ifdef CONFIG_CFS_BANDWIDTH 7969 { 7970 .name = "max", 7971 .flags = CFTYPE_NOT_ON_ROOT, 7972 .seq_show = cpu_max_show, 7973 .write = cpu_max_write, 7974 }, 7975 #endif 7976 #ifdef CONFIG_UCLAMP_TASK_GROUP 7977 { 7978 .name = "uclamp.min", 7979 .flags = CFTYPE_NOT_ON_ROOT, 7980 .seq_show = cpu_uclamp_min_show, 7981 .write = cpu_uclamp_min_write, 7982 }, 7983 { 7984 .name = "uclamp.max", 7985 .flags = CFTYPE_NOT_ON_ROOT, 7986 .seq_show = cpu_uclamp_max_show, 7987 .write = cpu_uclamp_max_write, 7988 }, 7989 #endif 7990 { } /* terminate */ 7991 }; 7992 7993 struct cgroup_subsys cpu_cgrp_subsys = { 7994 .css_alloc = cpu_cgroup_css_alloc, 7995 .css_online = cpu_cgroup_css_online, 7996 .css_released = cpu_cgroup_css_released, 7997 .css_free = cpu_cgroup_css_free, 7998 .css_extra_stat_show = cpu_extra_stat_show, 7999 .fork = cpu_cgroup_fork, 8000 .can_attach = cpu_cgroup_can_attach, 8001 .attach = cpu_cgroup_attach, 8002 .legacy_cftypes = cpu_legacy_files, 8003 .dfl_cftypes = cpu_files, 8004 .early_init = true, 8005 .threaded = true, 8006 }; 8007 8008 #endif /* CONFIG_CGROUP_SCHED */ 8009 8010 void dump_cpu_task(int cpu) 8011 { 8012 pr_info("Task dump for CPU %d:\n", cpu); 8013 sched_show_task(cpu_curr(cpu)); 8014 } 8015 8016 /* 8017 * Nice levels are multiplicative, with a gentle 10% change for every 8018 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to 8019 * nice 1, it will get ~10% less CPU time than another CPU-bound task 8020 * that remained on nice 0. 8021 * 8022 * The "10% effect" is relative and cumulative: from _any_ nice level, 8023 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level 8024 * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25. 8025 * If a task goes up by ~10% and another task goes down by ~10% then 8026 * the relative distance between them is ~25%.) 8027 */ 8028 const int sched_prio_to_weight[40] = { 8029 /* -20 */ 88761, 71755, 56483, 46273, 36291, 8030 /* -15 */ 29154, 23254, 18705, 14949, 11916, 8031 /* -10 */ 9548, 7620, 6100, 4904, 3906, 8032 /* -5 */ 3121, 2501, 1991, 1586, 1277, 8033 /* 0 */ 1024, 820, 655, 526, 423, 8034 /* 5 */ 335, 272, 215, 172, 137, 8035 /* 10 */ 110, 87, 70, 56, 45, 8036 /* 15 */ 36, 29, 23, 18, 15, 8037 }; 8038 8039 /* 8040 * Inverse (2^32/x) values of the sched_prio_to_weight[] array, precalculated. 8041 * 8042 * In cases where the weight does not change often, we can use the 8043 * precalculated inverse to speed up arithmetics by turning divisions 8044 * into multiplications: 8045 */ 8046 const u32 sched_prio_to_wmult[40] = { 8047 /* -20 */ 48388, 59856, 76040, 92818, 118348, 8048 /* -15 */ 147320, 184698, 229616, 287308, 360437, 8049 /* -10 */ 449829, 563644, 704093, 875809, 1099582, 8050 /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326, 8051 /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587, 8052 /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126, 8053 /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717, 8054 /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153, 8055 }; 8056 8057 #undef CREATE_TRACE_POINTS 8058