1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * kernel/sched/core.c 4 * 5 * Core kernel scheduler code and related syscalls 6 * 7 * Copyright (C) 1991-2002 Linus Torvalds 8 */ 9 #include "sched.h" 10 11 #include <linux/nospec.h> 12 13 #include <linux/kcov.h> 14 15 #include <asm/switch_to.h> 16 #include <asm/tlb.h> 17 18 #include "../workqueue_internal.h" 19 #include "../../fs/io-wq.h" 20 #include "../smpboot.h" 21 22 #include "pelt.h" 23 24 #define CREATE_TRACE_POINTS 25 #include <trace/events/sched.h> 26 27 /* 28 * Export tracepoints that act as a bare tracehook (ie: have no trace event 29 * associated with them) to allow external modules to probe them. 30 */ 31 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_cfs_tp); 32 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_rt_tp); 33 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_dl_tp); 34 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp); 35 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_se_tp); 36 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_overutilized_tp); 37 38 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); 39 40 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_JUMP_LABEL) 41 /* 42 * Debugging: various feature bits 43 * 44 * If SCHED_DEBUG is disabled, each compilation unit has its own copy of 45 * sysctl_sched_features, defined in sched.h, to allow constants propagation 46 * at compile time and compiler optimization based on features default. 47 */ 48 #define SCHED_FEAT(name, enabled) \ 49 (1UL << __SCHED_FEAT_##name) * enabled | 50 const_debug unsigned int sysctl_sched_features = 51 #include "features.h" 52 0; 53 #undef SCHED_FEAT 54 #endif 55 56 /* 57 * Number of tasks to iterate in a single balance run. 58 * Limited because this is done with IRQs disabled. 59 */ 60 const_debug unsigned int sysctl_sched_nr_migrate = 32; 61 62 /* 63 * period over which we measure -rt task CPU usage in us. 64 * default: 1s 65 */ 66 unsigned int sysctl_sched_rt_period = 1000000; 67 68 __read_mostly int scheduler_running; 69 70 /* 71 * part of the period that we allow rt tasks to run in us. 72 * default: 0.95s 73 */ 74 int sysctl_sched_rt_runtime = 950000; 75 76 /* 77 * __task_rq_lock - lock the rq @p resides on. 78 */ 79 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) 80 __acquires(rq->lock) 81 { 82 struct rq *rq; 83 84 lockdep_assert_held(&p->pi_lock); 85 86 for (;;) { 87 rq = task_rq(p); 88 raw_spin_lock(&rq->lock); 89 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { 90 rq_pin_lock(rq, rf); 91 return rq; 92 } 93 raw_spin_unlock(&rq->lock); 94 95 while (unlikely(task_on_rq_migrating(p))) 96 cpu_relax(); 97 } 98 } 99 100 /* 101 * task_rq_lock - lock p->pi_lock and lock the rq @p resides on. 102 */ 103 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) 104 __acquires(p->pi_lock) 105 __acquires(rq->lock) 106 { 107 struct rq *rq; 108 109 for (;;) { 110 raw_spin_lock_irqsave(&p->pi_lock, rf->flags); 111 rq = task_rq(p); 112 raw_spin_lock(&rq->lock); 113 /* 114 * move_queued_task() task_rq_lock() 115 * 116 * ACQUIRE (rq->lock) 117 * [S] ->on_rq = MIGRATING [L] rq = task_rq() 118 * WMB (__set_task_cpu()) ACQUIRE (rq->lock); 119 * [S] ->cpu = new_cpu [L] task_rq() 120 * [L] ->on_rq 121 * RELEASE (rq->lock) 122 * 123 * If we observe the old CPU in task_rq_lock(), the acquire of 124 * the old rq->lock will fully serialize against the stores. 125 * 126 * If we observe the new CPU in task_rq_lock(), the address 127 * dependency headed by '[L] rq = task_rq()' and the acquire 128 * will pair with the WMB to ensure we then also see migrating. 129 */ 130 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { 131 rq_pin_lock(rq, rf); 132 return rq; 133 } 134 raw_spin_unlock(&rq->lock); 135 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); 136 137 while (unlikely(task_on_rq_migrating(p))) 138 cpu_relax(); 139 } 140 } 141 142 /* 143 * RQ-clock updating methods: 144 */ 145 146 static void update_rq_clock_task(struct rq *rq, s64 delta) 147 { 148 /* 149 * In theory, the compile should just see 0 here, and optimize out the call 150 * to sched_rt_avg_update. But I don't trust it... 151 */ 152 s64 __maybe_unused steal = 0, irq_delta = 0; 153 154 #ifdef CONFIG_IRQ_TIME_ACCOUNTING 155 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time; 156 157 /* 158 * Since irq_time is only updated on {soft,}irq_exit, we might run into 159 * this case when a previous update_rq_clock() happened inside a 160 * {soft,}irq region. 161 * 162 * When this happens, we stop ->clock_task and only update the 163 * prev_irq_time stamp to account for the part that fit, so that a next 164 * update will consume the rest. This ensures ->clock_task is 165 * monotonic. 166 * 167 * It does however cause some slight miss-attribution of {soft,}irq 168 * time, a more accurate solution would be to update the irq_time using 169 * the current rq->clock timestamp, except that would require using 170 * atomic ops. 171 */ 172 if (irq_delta > delta) 173 irq_delta = delta; 174 175 rq->prev_irq_time += irq_delta; 176 delta -= irq_delta; 177 #endif 178 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING 179 if (static_key_false((¶virt_steal_rq_enabled))) { 180 steal = paravirt_steal_clock(cpu_of(rq)); 181 steal -= rq->prev_steal_time_rq; 182 183 if (unlikely(steal > delta)) 184 steal = delta; 185 186 rq->prev_steal_time_rq += steal; 187 delta -= steal; 188 } 189 #endif 190 191 rq->clock_task += delta; 192 193 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ 194 if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY)) 195 update_irq_load_avg(rq, irq_delta + steal); 196 #endif 197 update_rq_clock_pelt(rq, delta); 198 } 199 200 void update_rq_clock(struct rq *rq) 201 { 202 s64 delta; 203 204 lockdep_assert_held(&rq->lock); 205 206 if (rq->clock_update_flags & RQCF_ACT_SKIP) 207 return; 208 209 #ifdef CONFIG_SCHED_DEBUG 210 if (sched_feat(WARN_DOUBLE_CLOCK)) 211 SCHED_WARN_ON(rq->clock_update_flags & RQCF_UPDATED); 212 rq->clock_update_flags |= RQCF_UPDATED; 213 #endif 214 215 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock; 216 if (delta < 0) 217 return; 218 rq->clock += delta; 219 update_rq_clock_task(rq, delta); 220 } 221 222 223 #ifdef CONFIG_SCHED_HRTICK 224 /* 225 * Use HR-timers to deliver accurate preemption points. 226 */ 227 228 static void hrtick_clear(struct rq *rq) 229 { 230 if (hrtimer_active(&rq->hrtick_timer)) 231 hrtimer_cancel(&rq->hrtick_timer); 232 } 233 234 /* 235 * High-resolution timer tick. 236 * Runs from hardirq context with interrupts disabled. 237 */ 238 static enum hrtimer_restart hrtick(struct hrtimer *timer) 239 { 240 struct rq *rq = container_of(timer, struct rq, hrtick_timer); 241 struct rq_flags rf; 242 243 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); 244 245 rq_lock(rq, &rf); 246 update_rq_clock(rq); 247 rq->curr->sched_class->task_tick(rq, rq->curr, 1); 248 rq_unlock(rq, &rf); 249 250 return HRTIMER_NORESTART; 251 } 252 253 #ifdef CONFIG_SMP 254 255 static void __hrtick_restart(struct rq *rq) 256 { 257 struct hrtimer *timer = &rq->hrtick_timer; 258 259 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED_HARD); 260 } 261 262 /* 263 * called from hardirq (IPI) context 264 */ 265 static void __hrtick_start(void *arg) 266 { 267 struct rq *rq = arg; 268 struct rq_flags rf; 269 270 rq_lock(rq, &rf); 271 __hrtick_restart(rq); 272 rq->hrtick_csd_pending = 0; 273 rq_unlock(rq, &rf); 274 } 275 276 /* 277 * Called to set the hrtick timer state. 278 * 279 * called with rq->lock held and irqs disabled 280 */ 281 void hrtick_start(struct rq *rq, u64 delay) 282 { 283 struct hrtimer *timer = &rq->hrtick_timer; 284 ktime_t time; 285 s64 delta; 286 287 /* 288 * Don't schedule slices shorter than 10000ns, that just 289 * doesn't make sense and can cause timer DoS. 290 */ 291 delta = max_t(s64, delay, 10000LL); 292 time = ktime_add_ns(timer->base->get_time(), delta); 293 294 hrtimer_set_expires(timer, time); 295 296 if (rq == this_rq()) { 297 __hrtick_restart(rq); 298 } else if (!rq->hrtick_csd_pending) { 299 smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd); 300 rq->hrtick_csd_pending = 1; 301 } 302 } 303 304 #else 305 /* 306 * Called to set the hrtick timer state. 307 * 308 * called with rq->lock held and irqs disabled 309 */ 310 void hrtick_start(struct rq *rq, u64 delay) 311 { 312 /* 313 * Don't schedule slices shorter than 10000ns, that just 314 * doesn't make sense. Rely on vruntime for fairness. 315 */ 316 delay = max_t(u64, delay, 10000LL); 317 hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay), 318 HRTIMER_MODE_REL_PINNED_HARD); 319 } 320 #endif /* CONFIG_SMP */ 321 322 static void hrtick_rq_init(struct rq *rq) 323 { 324 #ifdef CONFIG_SMP 325 rq->hrtick_csd_pending = 0; 326 327 rq->hrtick_csd.flags = 0; 328 rq->hrtick_csd.func = __hrtick_start; 329 rq->hrtick_csd.info = rq; 330 #endif 331 332 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); 333 rq->hrtick_timer.function = hrtick; 334 } 335 #else /* CONFIG_SCHED_HRTICK */ 336 static inline void hrtick_clear(struct rq *rq) 337 { 338 } 339 340 static inline void hrtick_rq_init(struct rq *rq) 341 { 342 } 343 #endif /* CONFIG_SCHED_HRTICK */ 344 345 /* 346 * cmpxchg based fetch_or, macro so it works for different integer types 347 */ 348 #define fetch_or(ptr, mask) \ 349 ({ \ 350 typeof(ptr) _ptr = (ptr); \ 351 typeof(mask) _mask = (mask); \ 352 typeof(*_ptr) _old, _val = *_ptr; \ 353 \ 354 for (;;) { \ 355 _old = cmpxchg(_ptr, _val, _val | _mask); \ 356 if (_old == _val) \ 357 break; \ 358 _val = _old; \ 359 } \ 360 _old; \ 361 }) 362 363 #if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG) 364 /* 365 * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG, 366 * this avoids any races wrt polling state changes and thereby avoids 367 * spurious IPIs. 368 */ 369 static bool set_nr_and_not_polling(struct task_struct *p) 370 { 371 struct thread_info *ti = task_thread_info(p); 372 return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG); 373 } 374 375 /* 376 * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set. 377 * 378 * If this returns true, then the idle task promises to call 379 * sched_ttwu_pending() and reschedule soon. 380 */ 381 static bool set_nr_if_polling(struct task_struct *p) 382 { 383 struct thread_info *ti = task_thread_info(p); 384 typeof(ti->flags) old, val = READ_ONCE(ti->flags); 385 386 for (;;) { 387 if (!(val & _TIF_POLLING_NRFLAG)) 388 return false; 389 if (val & _TIF_NEED_RESCHED) 390 return true; 391 old = cmpxchg(&ti->flags, val, val | _TIF_NEED_RESCHED); 392 if (old == val) 393 break; 394 val = old; 395 } 396 return true; 397 } 398 399 #else 400 static bool set_nr_and_not_polling(struct task_struct *p) 401 { 402 set_tsk_need_resched(p); 403 return true; 404 } 405 406 #ifdef CONFIG_SMP 407 static bool set_nr_if_polling(struct task_struct *p) 408 { 409 return false; 410 } 411 #endif 412 #endif 413 414 static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task) 415 { 416 struct wake_q_node *node = &task->wake_q; 417 418 /* 419 * Atomically grab the task, if ->wake_q is !nil already it means 420 * its already queued (either by us or someone else) and will get the 421 * wakeup due to that. 422 * 423 * In order to ensure that a pending wakeup will observe our pending 424 * state, even in the failed case, an explicit smp_mb() must be used. 425 */ 426 smp_mb__before_atomic(); 427 if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL))) 428 return false; 429 430 /* 431 * The head is context local, there can be no concurrency. 432 */ 433 *head->lastp = node; 434 head->lastp = &node->next; 435 return true; 436 } 437 438 /** 439 * wake_q_add() - queue a wakeup for 'later' waking. 440 * @head: the wake_q_head to add @task to 441 * @task: the task to queue for 'later' wakeup 442 * 443 * Queue a task for later wakeup, most likely by the wake_up_q() call in the 444 * same context, _HOWEVER_ this is not guaranteed, the wakeup can come 445 * instantly. 446 * 447 * This function must be used as-if it were wake_up_process(); IOW the task 448 * must be ready to be woken at this location. 449 */ 450 void wake_q_add(struct wake_q_head *head, struct task_struct *task) 451 { 452 if (__wake_q_add(head, task)) 453 get_task_struct(task); 454 } 455 456 /** 457 * wake_q_add_safe() - safely queue a wakeup for 'later' waking. 458 * @head: the wake_q_head to add @task to 459 * @task: the task to queue for 'later' wakeup 460 * 461 * Queue a task for later wakeup, most likely by the wake_up_q() call in the 462 * same context, _HOWEVER_ this is not guaranteed, the wakeup can come 463 * instantly. 464 * 465 * This function must be used as-if it were wake_up_process(); IOW the task 466 * must be ready to be woken at this location. 467 * 468 * This function is essentially a task-safe equivalent to wake_q_add(). Callers 469 * that already hold reference to @task can call the 'safe' version and trust 470 * wake_q to do the right thing depending whether or not the @task is already 471 * queued for wakeup. 472 */ 473 void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task) 474 { 475 if (!__wake_q_add(head, task)) 476 put_task_struct(task); 477 } 478 479 void wake_up_q(struct wake_q_head *head) 480 { 481 struct wake_q_node *node = head->first; 482 483 while (node != WAKE_Q_TAIL) { 484 struct task_struct *task; 485 486 task = container_of(node, struct task_struct, wake_q); 487 BUG_ON(!task); 488 /* Task can safely be re-inserted now: */ 489 node = node->next; 490 task->wake_q.next = NULL; 491 492 /* 493 * wake_up_process() executes a full barrier, which pairs with 494 * the queueing in wake_q_add() so as not to miss wakeups. 495 */ 496 wake_up_process(task); 497 put_task_struct(task); 498 } 499 } 500 501 /* 502 * resched_curr - mark rq's current task 'to be rescheduled now'. 503 * 504 * On UP this means the setting of the need_resched flag, on SMP it 505 * might also involve a cross-CPU call to trigger the scheduler on 506 * the target CPU. 507 */ 508 void resched_curr(struct rq *rq) 509 { 510 struct task_struct *curr = rq->curr; 511 int cpu; 512 513 lockdep_assert_held(&rq->lock); 514 515 if (test_tsk_need_resched(curr)) 516 return; 517 518 cpu = cpu_of(rq); 519 520 if (cpu == smp_processor_id()) { 521 set_tsk_need_resched(curr); 522 set_preempt_need_resched(); 523 return; 524 } 525 526 if (set_nr_and_not_polling(curr)) 527 smp_send_reschedule(cpu); 528 else 529 trace_sched_wake_idle_without_ipi(cpu); 530 } 531 532 void resched_cpu(int cpu) 533 { 534 struct rq *rq = cpu_rq(cpu); 535 unsigned long flags; 536 537 raw_spin_lock_irqsave(&rq->lock, flags); 538 if (cpu_online(cpu) || cpu == smp_processor_id()) 539 resched_curr(rq); 540 raw_spin_unlock_irqrestore(&rq->lock, flags); 541 } 542 543 #ifdef CONFIG_SMP 544 #ifdef CONFIG_NO_HZ_COMMON 545 /* 546 * In the semi idle case, use the nearest busy CPU for migrating timers 547 * from an idle CPU. This is good for power-savings. 548 * 549 * We don't do similar optimization for completely idle system, as 550 * selecting an idle CPU will add more delays to the timers than intended 551 * (as that CPU's timer base may not be uptodate wrt jiffies etc). 552 */ 553 int get_nohz_timer_target(void) 554 { 555 int i, cpu = smp_processor_id(), default_cpu = -1; 556 struct sched_domain *sd; 557 558 if (housekeeping_cpu(cpu, HK_FLAG_TIMER)) { 559 if (!idle_cpu(cpu)) 560 return cpu; 561 default_cpu = cpu; 562 } 563 564 rcu_read_lock(); 565 for_each_domain(cpu, sd) { 566 for_each_cpu_and(i, sched_domain_span(sd), 567 housekeeping_cpumask(HK_FLAG_TIMER)) { 568 if (cpu == i) 569 continue; 570 571 if (!idle_cpu(i)) { 572 cpu = i; 573 goto unlock; 574 } 575 } 576 } 577 578 if (default_cpu == -1) 579 default_cpu = housekeeping_any_cpu(HK_FLAG_TIMER); 580 cpu = default_cpu; 581 unlock: 582 rcu_read_unlock(); 583 return cpu; 584 } 585 586 /* 587 * When add_timer_on() enqueues a timer into the timer wheel of an 588 * idle CPU then this timer might expire before the next timer event 589 * which is scheduled to wake up that CPU. In case of a completely 590 * idle system the next event might even be infinite time into the 591 * future. wake_up_idle_cpu() ensures that the CPU is woken up and 592 * leaves the inner idle loop so the newly added timer is taken into 593 * account when the CPU goes back to idle and evaluates the timer 594 * wheel for the next timer event. 595 */ 596 static void wake_up_idle_cpu(int cpu) 597 { 598 struct rq *rq = cpu_rq(cpu); 599 600 if (cpu == smp_processor_id()) 601 return; 602 603 if (set_nr_and_not_polling(rq->idle)) 604 smp_send_reschedule(cpu); 605 else 606 trace_sched_wake_idle_without_ipi(cpu); 607 } 608 609 static bool wake_up_full_nohz_cpu(int cpu) 610 { 611 /* 612 * We just need the target to call irq_exit() and re-evaluate 613 * the next tick. The nohz full kick at least implies that. 614 * If needed we can still optimize that later with an 615 * empty IRQ. 616 */ 617 if (cpu_is_offline(cpu)) 618 return true; /* Don't try to wake offline CPUs. */ 619 if (tick_nohz_full_cpu(cpu)) { 620 if (cpu != smp_processor_id() || 621 tick_nohz_tick_stopped()) 622 tick_nohz_full_kick_cpu(cpu); 623 return true; 624 } 625 626 return false; 627 } 628 629 /* 630 * Wake up the specified CPU. If the CPU is going offline, it is the 631 * caller's responsibility to deal with the lost wakeup, for example, 632 * by hooking into the CPU_DEAD notifier like timers and hrtimers do. 633 */ 634 void wake_up_nohz_cpu(int cpu) 635 { 636 if (!wake_up_full_nohz_cpu(cpu)) 637 wake_up_idle_cpu(cpu); 638 } 639 640 static inline bool got_nohz_idle_kick(void) 641 { 642 int cpu = smp_processor_id(); 643 644 if (!(atomic_read(nohz_flags(cpu)) & NOHZ_KICK_MASK)) 645 return false; 646 647 if (idle_cpu(cpu) && !need_resched()) 648 return true; 649 650 /* 651 * We can't run Idle Load Balance on this CPU for this time so we 652 * cancel it and clear NOHZ_BALANCE_KICK 653 */ 654 atomic_andnot(NOHZ_KICK_MASK, nohz_flags(cpu)); 655 return false; 656 } 657 658 #else /* CONFIG_NO_HZ_COMMON */ 659 660 static inline bool got_nohz_idle_kick(void) 661 { 662 return false; 663 } 664 665 #endif /* CONFIG_NO_HZ_COMMON */ 666 667 #ifdef CONFIG_NO_HZ_FULL 668 bool sched_can_stop_tick(struct rq *rq) 669 { 670 int fifo_nr_running; 671 672 /* Deadline tasks, even if single, need the tick */ 673 if (rq->dl.dl_nr_running) 674 return false; 675 676 /* 677 * If there are more than one RR tasks, we need the tick to effect the 678 * actual RR behaviour. 679 */ 680 if (rq->rt.rr_nr_running) { 681 if (rq->rt.rr_nr_running == 1) 682 return true; 683 else 684 return false; 685 } 686 687 /* 688 * If there's no RR tasks, but FIFO tasks, we can skip the tick, no 689 * forced preemption between FIFO tasks. 690 */ 691 fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running; 692 if (fifo_nr_running) 693 return true; 694 695 /* 696 * If there are no DL,RR/FIFO tasks, there must only be CFS tasks left; 697 * if there's more than one we need the tick for involuntary 698 * preemption. 699 */ 700 if (rq->nr_running > 1) 701 return false; 702 703 return true; 704 } 705 #endif /* CONFIG_NO_HZ_FULL */ 706 #endif /* CONFIG_SMP */ 707 708 #if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \ 709 (defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH))) 710 /* 711 * Iterate task_group tree rooted at *from, calling @down when first entering a 712 * node and @up when leaving it for the final time. 713 * 714 * Caller must hold rcu_lock or sufficient equivalent. 715 */ 716 int walk_tg_tree_from(struct task_group *from, 717 tg_visitor down, tg_visitor up, void *data) 718 { 719 struct task_group *parent, *child; 720 int ret; 721 722 parent = from; 723 724 down: 725 ret = (*down)(parent, data); 726 if (ret) 727 goto out; 728 list_for_each_entry_rcu(child, &parent->children, siblings) { 729 parent = child; 730 goto down; 731 732 up: 733 continue; 734 } 735 ret = (*up)(parent, data); 736 if (ret || parent == from) 737 goto out; 738 739 child = parent; 740 parent = parent->parent; 741 if (parent) 742 goto up; 743 out: 744 return ret; 745 } 746 747 int tg_nop(struct task_group *tg, void *data) 748 { 749 return 0; 750 } 751 #endif 752 753 static void set_load_weight(struct task_struct *p, bool update_load) 754 { 755 int prio = p->static_prio - MAX_RT_PRIO; 756 struct load_weight *load = &p->se.load; 757 758 /* 759 * SCHED_IDLE tasks get minimal weight: 760 */ 761 if (task_has_idle_policy(p)) { 762 load->weight = scale_load(WEIGHT_IDLEPRIO); 763 load->inv_weight = WMULT_IDLEPRIO; 764 p->se.runnable_weight = load->weight; 765 return; 766 } 767 768 /* 769 * SCHED_OTHER tasks have to update their load when changing their 770 * weight 771 */ 772 if (update_load && p->sched_class == &fair_sched_class) { 773 reweight_task(p, prio); 774 } else { 775 load->weight = scale_load(sched_prio_to_weight[prio]); 776 load->inv_weight = sched_prio_to_wmult[prio]; 777 p->se.runnable_weight = load->weight; 778 } 779 } 780 781 #ifdef CONFIG_UCLAMP_TASK 782 /* 783 * Serializes updates of utilization clamp values 784 * 785 * The (slow-path) user-space triggers utilization clamp value updates which 786 * can require updates on (fast-path) scheduler's data structures used to 787 * support enqueue/dequeue operations. 788 * While the per-CPU rq lock protects fast-path update operations, user-space 789 * requests are serialized using a mutex to reduce the risk of conflicting 790 * updates or API abuses. 791 */ 792 static DEFINE_MUTEX(uclamp_mutex); 793 794 /* Max allowed minimum utilization */ 795 unsigned int sysctl_sched_uclamp_util_min = SCHED_CAPACITY_SCALE; 796 797 /* Max allowed maximum utilization */ 798 unsigned int sysctl_sched_uclamp_util_max = SCHED_CAPACITY_SCALE; 799 800 /* All clamps are required to be less or equal than these values */ 801 static struct uclamp_se uclamp_default[UCLAMP_CNT]; 802 803 /* Integer rounded range for each bucket */ 804 #define UCLAMP_BUCKET_DELTA DIV_ROUND_CLOSEST(SCHED_CAPACITY_SCALE, UCLAMP_BUCKETS) 805 806 #define for_each_clamp_id(clamp_id) \ 807 for ((clamp_id) = 0; (clamp_id) < UCLAMP_CNT; (clamp_id)++) 808 809 static inline unsigned int uclamp_bucket_id(unsigned int clamp_value) 810 { 811 return clamp_value / UCLAMP_BUCKET_DELTA; 812 } 813 814 static inline unsigned int uclamp_bucket_base_value(unsigned int clamp_value) 815 { 816 return UCLAMP_BUCKET_DELTA * uclamp_bucket_id(clamp_value); 817 } 818 819 static inline unsigned int uclamp_none(enum uclamp_id clamp_id) 820 { 821 if (clamp_id == UCLAMP_MIN) 822 return 0; 823 return SCHED_CAPACITY_SCALE; 824 } 825 826 static inline void uclamp_se_set(struct uclamp_se *uc_se, 827 unsigned int value, bool user_defined) 828 { 829 uc_se->value = value; 830 uc_se->bucket_id = uclamp_bucket_id(value); 831 uc_se->user_defined = user_defined; 832 } 833 834 static inline unsigned int 835 uclamp_idle_value(struct rq *rq, enum uclamp_id clamp_id, 836 unsigned int clamp_value) 837 { 838 /* 839 * Avoid blocked utilization pushing up the frequency when we go 840 * idle (which drops the max-clamp) by retaining the last known 841 * max-clamp. 842 */ 843 if (clamp_id == UCLAMP_MAX) { 844 rq->uclamp_flags |= UCLAMP_FLAG_IDLE; 845 return clamp_value; 846 } 847 848 return uclamp_none(UCLAMP_MIN); 849 } 850 851 static inline void uclamp_idle_reset(struct rq *rq, enum uclamp_id clamp_id, 852 unsigned int clamp_value) 853 { 854 /* Reset max-clamp retention only on idle exit */ 855 if (!(rq->uclamp_flags & UCLAMP_FLAG_IDLE)) 856 return; 857 858 WRITE_ONCE(rq->uclamp[clamp_id].value, clamp_value); 859 } 860 861 static inline 862 unsigned int uclamp_rq_max_value(struct rq *rq, enum uclamp_id clamp_id, 863 unsigned int clamp_value) 864 { 865 struct uclamp_bucket *bucket = rq->uclamp[clamp_id].bucket; 866 int bucket_id = UCLAMP_BUCKETS - 1; 867 868 /* 869 * Since both min and max clamps are max aggregated, find the 870 * top most bucket with tasks in. 871 */ 872 for ( ; bucket_id >= 0; bucket_id--) { 873 if (!bucket[bucket_id].tasks) 874 continue; 875 return bucket[bucket_id].value; 876 } 877 878 /* No tasks -- default clamp values */ 879 return uclamp_idle_value(rq, clamp_id, clamp_value); 880 } 881 882 static inline struct uclamp_se 883 uclamp_tg_restrict(struct task_struct *p, enum uclamp_id clamp_id) 884 { 885 struct uclamp_se uc_req = p->uclamp_req[clamp_id]; 886 #ifdef CONFIG_UCLAMP_TASK_GROUP 887 struct uclamp_se uc_max; 888 889 /* 890 * Tasks in autogroups or root task group will be 891 * restricted by system defaults. 892 */ 893 if (task_group_is_autogroup(task_group(p))) 894 return uc_req; 895 if (task_group(p) == &root_task_group) 896 return uc_req; 897 898 uc_max = task_group(p)->uclamp[clamp_id]; 899 if (uc_req.value > uc_max.value || !uc_req.user_defined) 900 return uc_max; 901 #endif 902 903 return uc_req; 904 } 905 906 /* 907 * The effective clamp bucket index of a task depends on, by increasing 908 * priority: 909 * - the task specific clamp value, when explicitly requested from userspace 910 * - the task group effective clamp value, for tasks not either in the root 911 * group or in an autogroup 912 * - the system default clamp value, defined by the sysadmin 913 */ 914 static inline struct uclamp_se 915 uclamp_eff_get(struct task_struct *p, enum uclamp_id clamp_id) 916 { 917 struct uclamp_se uc_req = uclamp_tg_restrict(p, clamp_id); 918 struct uclamp_se uc_max = uclamp_default[clamp_id]; 919 920 /* System default restrictions always apply */ 921 if (unlikely(uc_req.value > uc_max.value)) 922 return uc_max; 923 924 return uc_req; 925 } 926 927 unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id) 928 { 929 struct uclamp_se uc_eff; 930 931 /* Task currently refcounted: use back-annotated (effective) value */ 932 if (p->uclamp[clamp_id].active) 933 return (unsigned long)p->uclamp[clamp_id].value; 934 935 uc_eff = uclamp_eff_get(p, clamp_id); 936 937 return (unsigned long)uc_eff.value; 938 } 939 940 /* 941 * When a task is enqueued on a rq, the clamp bucket currently defined by the 942 * task's uclamp::bucket_id is refcounted on that rq. This also immediately 943 * updates the rq's clamp value if required. 944 * 945 * Tasks can have a task-specific value requested from user-space, track 946 * within each bucket the maximum value for tasks refcounted in it. 947 * This "local max aggregation" allows to track the exact "requested" value 948 * for each bucket when all its RUNNABLE tasks require the same clamp. 949 */ 950 static inline void uclamp_rq_inc_id(struct rq *rq, struct task_struct *p, 951 enum uclamp_id clamp_id) 952 { 953 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id]; 954 struct uclamp_se *uc_se = &p->uclamp[clamp_id]; 955 struct uclamp_bucket *bucket; 956 957 lockdep_assert_held(&rq->lock); 958 959 /* Update task effective clamp */ 960 p->uclamp[clamp_id] = uclamp_eff_get(p, clamp_id); 961 962 bucket = &uc_rq->bucket[uc_se->bucket_id]; 963 bucket->tasks++; 964 uc_se->active = true; 965 966 uclamp_idle_reset(rq, clamp_id, uc_se->value); 967 968 /* 969 * Local max aggregation: rq buckets always track the max 970 * "requested" clamp value of its RUNNABLE tasks. 971 */ 972 if (bucket->tasks == 1 || uc_se->value > bucket->value) 973 bucket->value = uc_se->value; 974 975 if (uc_se->value > READ_ONCE(uc_rq->value)) 976 WRITE_ONCE(uc_rq->value, uc_se->value); 977 } 978 979 /* 980 * When a task is dequeued from a rq, the clamp bucket refcounted by the task 981 * is released. If this is the last task reference counting the rq's max 982 * active clamp value, then the rq's clamp value is updated. 983 * 984 * Both refcounted tasks and rq's cached clamp values are expected to be 985 * always valid. If it's detected they are not, as defensive programming, 986 * enforce the expected state and warn. 987 */ 988 static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p, 989 enum uclamp_id clamp_id) 990 { 991 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id]; 992 struct uclamp_se *uc_se = &p->uclamp[clamp_id]; 993 struct uclamp_bucket *bucket; 994 unsigned int bkt_clamp; 995 unsigned int rq_clamp; 996 997 lockdep_assert_held(&rq->lock); 998 999 bucket = &uc_rq->bucket[uc_se->bucket_id]; 1000 SCHED_WARN_ON(!bucket->tasks); 1001 if (likely(bucket->tasks)) 1002 bucket->tasks--; 1003 uc_se->active = false; 1004 1005 /* 1006 * Keep "local max aggregation" simple and accept to (possibly) 1007 * overboost some RUNNABLE tasks in the same bucket. 1008 * The rq clamp bucket value is reset to its base value whenever 1009 * there are no more RUNNABLE tasks refcounting it. 1010 */ 1011 if (likely(bucket->tasks)) 1012 return; 1013 1014 rq_clamp = READ_ONCE(uc_rq->value); 1015 /* 1016 * Defensive programming: this should never happen. If it happens, 1017 * e.g. due to future modification, warn and fixup the expected value. 1018 */ 1019 SCHED_WARN_ON(bucket->value > rq_clamp); 1020 if (bucket->value >= rq_clamp) { 1021 bkt_clamp = uclamp_rq_max_value(rq, clamp_id, uc_se->value); 1022 WRITE_ONCE(uc_rq->value, bkt_clamp); 1023 } 1024 } 1025 1026 static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) 1027 { 1028 enum uclamp_id clamp_id; 1029 1030 if (unlikely(!p->sched_class->uclamp_enabled)) 1031 return; 1032 1033 for_each_clamp_id(clamp_id) 1034 uclamp_rq_inc_id(rq, p, clamp_id); 1035 1036 /* Reset clamp idle holding when there is one RUNNABLE task */ 1037 if (rq->uclamp_flags & UCLAMP_FLAG_IDLE) 1038 rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE; 1039 } 1040 1041 static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) 1042 { 1043 enum uclamp_id clamp_id; 1044 1045 if (unlikely(!p->sched_class->uclamp_enabled)) 1046 return; 1047 1048 for_each_clamp_id(clamp_id) 1049 uclamp_rq_dec_id(rq, p, clamp_id); 1050 } 1051 1052 static inline void 1053 uclamp_update_active(struct task_struct *p, enum uclamp_id clamp_id) 1054 { 1055 struct rq_flags rf; 1056 struct rq *rq; 1057 1058 /* 1059 * Lock the task and the rq where the task is (or was) queued. 1060 * 1061 * We might lock the (previous) rq of a !RUNNABLE task, but that's the 1062 * price to pay to safely serialize util_{min,max} updates with 1063 * enqueues, dequeues and migration operations. 1064 * This is the same locking schema used by __set_cpus_allowed_ptr(). 1065 */ 1066 rq = task_rq_lock(p, &rf); 1067 1068 /* 1069 * Setting the clamp bucket is serialized by task_rq_lock(). 1070 * If the task is not yet RUNNABLE and its task_struct is not 1071 * affecting a valid clamp bucket, the next time it's enqueued, 1072 * it will already see the updated clamp bucket value. 1073 */ 1074 if (p->uclamp[clamp_id].active) { 1075 uclamp_rq_dec_id(rq, p, clamp_id); 1076 uclamp_rq_inc_id(rq, p, clamp_id); 1077 } 1078 1079 task_rq_unlock(rq, p, &rf); 1080 } 1081 1082 #ifdef CONFIG_UCLAMP_TASK_GROUP 1083 static inline void 1084 uclamp_update_active_tasks(struct cgroup_subsys_state *css, 1085 unsigned int clamps) 1086 { 1087 enum uclamp_id clamp_id; 1088 struct css_task_iter it; 1089 struct task_struct *p; 1090 1091 css_task_iter_start(css, 0, &it); 1092 while ((p = css_task_iter_next(&it))) { 1093 for_each_clamp_id(clamp_id) { 1094 if ((0x1 << clamp_id) & clamps) 1095 uclamp_update_active(p, clamp_id); 1096 } 1097 } 1098 css_task_iter_end(&it); 1099 } 1100 1101 static void cpu_util_update_eff(struct cgroup_subsys_state *css); 1102 static void uclamp_update_root_tg(void) 1103 { 1104 struct task_group *tg = &root_task_group; 1105 1106 uclamp_se_set(&tg->uclamp_req[UCLAMP_MIN], 1107 sysctl_sched_uclamp_util_min, false); 1108 uclamp_se_set(&tg->uclamp_req[UCLAMP_MAX], 1109 sysctl_sched_uclamp_util_max, false); 1110 1111 rcu_read_lock(); 1112 cpu_util_update_eff(&root_task_group.css); 1113 rcu_read_unlock(); 1114 } 1115 #else 1116 static void uclamp_update_root_tg(void) { } 1117 #endif 1118 1119 int sysctl_sched_uclamp_handler(struct ctl_table *table, int write, 1120 void __user *buffer, size_t *lenp, 1121 loff_t *ppos) 1122 { 1123 bool update_root_tg = false; 1124 int old_min, old_max; 1125 int result; 1126 1127 mutex_lock(&uclamp_mutex); 1128 old_min = sysctl_sched_uclamp_util_min; 1129 old_max = sysctl_sched_uclamp_util_max; 1130 1131 result = proc_dointvec(table, write, buffer, lenp, ppos); 1132 if (result) 1133 goto undo; 1134 if (!write) 1135 goto done; 1136 1137 if (sysctl_sched_uclamp_util_min > sysctl_sched_uclamp_util_max || 1138 sysctl_sched_uclamp_util_max > SCHED_CAPACITY_SCALE) { 1139 result = -EINVAL; 1140 goto undo; 1141 } 1142 1143 if (old_min != sysctl_sched_uclamp_util_min) { 1144 uclamp_se_set(&uclamp_default[UCLAMP_MIN], 1145 sysctl_sched_uclamp_util_min, false); 1146 update_root_tg = true; 1147 } 1148 if (old_max != sysctl_sched_uclamp_util_max) { 1149 uclamp_se_set(&uclamp_default[UCLAMP_MAX], 1150 sysctl_sched_uclamp_util_max, false); 1151 update_root_tg = true; 1152 } 1153 1154 if (update_root_tg) 1155 uclamp_update_root_tg(); 1156 1157 /* 1158 * We update all RUNNABLE tasks only when task groups are in use. 1159 * Otherwise, keep it simple and do just a lazy update at each next 1160 * task enqueue time. 1161 */ 1162 1163 goto done; 1164 1165 undo: 1166 sysctl_sched_uclamp_util_min = old_min; 1167 sysctl_sched_uclamp_util_max = old_max; 1168 done: 1169 mutex_unlock(&uclamp_mutex); 1170 1171 return result; 1172 } 1173 1174 static int uclamp_validate(struct task_struct *p, 1175 const struct sched_attr *attr) 1176 { 1177 unsigned int lower_bound = p->uclamp_req[UCLAMP_MIN].value; 1178 unsigned int upper_bound = p->uclamp_req[UCLAMP_MAX].value; 1179 1180 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN) 1181 lower_bound = attr->sched_util_min; 1182 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX) 1183 upper_bound = attr->sched_util_max; 1184 1185 if (lower_bound > upper_bound) 1186 return -EINVAL; 1187 if (upper_bound > SCHED_CAPACITY_SCALE) 1188 return -EINVAL; 1189 1190 return 0; 1191 } 1192 1193 static void __setscheduler_uclamp(struct task_struct *p, 1194 const struct sched_attr *attr) 1195 { 1196 enum uclamp_id clamp_id; 1197 1198 /* 1199 * On scheduling class change, reset to default clamps for tasks 1200 * without a task-specific value. 1201 */ 1202 for_each_clamp_id(clamp_id) { 1203 struct uclamp_se *uc_se = &p->uclamp_req[clamp_id]; 1204 unsigned int clamp_value = uclamp_none(clamp_id); 1205 1206 /* Keep using defined clamps across class changes */ 1207 if (uc_se->user_defined) 1208 continue; 1209 1210 /* By default, RT tasks always get 100% boost */ 1211 if (unlikely(rt_task(p) && clamp_id == UCLAMP_MIN)) 1212 clamp_value = uclamp_none(UCLAMP_MAX); 1213 1214 uclamp_se_set(uc_se, clamp_value, false); 1215 } 1216 1217 if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP))) 1218 return; 1219 1220 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN) { 1221 uclamp_se_set(&p->uclamp_req[UCLAMP_MIN], 1222 attr->sched_util_min, true); 1223 } 1224 1225 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX) { 1226 uclamp_se_set(&p->uclamp_req[UCLAMP_MAX], 1227 attr->sched_util_max, true); 1228 } 1229 } 1230 1231 static void uclamp_fork(struct task_struct *p) 1232 { 1233 enum uclamp_id clamp_id; 1234 1235 for_each_clamp_id(clamp_id) 1236 p->uclamp[clamp_id].active = false; 1237 1238 if (likely(!p->sched_reset_on_fork)) 1239 return; 1240 1241 for_each_clamp_id(clamp_id) { 1242 unsigned int clamp_value = uclamp_none(clamp_id); 1243 1244 /* By default, RT tasks always get 100% boost */ 1245 if (unlikely(rt_task(p) && clamp_id == UCLAMP_MIN)) 1246 clamp_value = uclamp_none(UCLAMP_MAX); 1247 1248 uclamp_se_set(&p->uclamp_req[clamp_id], clamp_value, false); 1249 } 1250 } 1251 1252 static void __init init_uclamp(void) 1253 { 1254 struct uclamp_se uc_max = {}; 1255 enum uclamp_id clamp_id; 1256 int cpu; 1257 1258 mutex_init(&uclamp_mutex); 1259 1260 for_each_possible_cpu(cpu) { 1261 memset(&cpu_rq(cpu)->uclamp, 0, 1262 sizeof(struct uclamp_rq)*UCLAMP_CNT); 1263 cpu_rq(cpu)->uclamp_flags = 0; 1264 } 1265 1266 for_each_clamp_id(clamp_id) { 1267 uclamp_se_set(&init_task.uclamp_req[clamp_id], 1268 uclamp_none(clamp_id), false); 1269 } 1270 1271 /* System defaults allow max clamp values for both indexes */ 1272 uclamp_se_set(&uc_max, uclamp_none(UCLAMP_MAX), false); 1273 for_each_clamp_id(clamp_id) { 1274 uclamp_default[clamp_id] = uc_max; 1275 #ifdef CONFIG_UCLAMP_TASK_GROUP 1276 root_task_group.uclamp_req[clamp_id] = uc_max; 1277 root_task_group.uclamp[clamp_id] = uc_max; 1278 #endif 1279 } 1280 } 1281 1282 #else /* CONFIG_UCLAMP_TASK */ 1283 static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) { } 1284 static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) { } 1285 static inline int uclamp_validate(struct task_struct *p, 1286 const struct sched_attr *attr) 1287 { 1288 return -EOPNOTSUPP; 1289 } 1290 static void __setscheduler_uclamp(struct task_struct *p, 1291 const struct sched_attr *attr) { } 1292 static inline void uclamp_fork(struct task_struct *p) { } 1293 static inline void init_uclamp(void) { } 1294 #endif /* CONFIG_UCLAMP_TASK */ 1295 1296 static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags) 1297 { 1298 if (!(flags & ENQUEUE_NOCLOCK)) 1299 update_rq_clock(rq); 1300 1301 if (!(flags & ENQUEUE_RESTORE)) { 1302 sched_info_queued(rq, p); 1303 psi_enqueue(p, flags & ENQUEUE_WAKEUP); 1304 } 1305 1306 uclamp_rq_inc(rq, p); 1307 p->sched_class->enqueue_task(rq, p, flags); 1308 } 1309 1310 static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags) 1311 { 1312 if (!(flags & DEQUEUE_NOCLOCK)) 1313 update_rq_clock(rq); 1314 1315 if (!(flags & DEQUEUE_SAVE)) { 1316 sched_info_dequeued(rq, p); 1317 psi_dequeue(p, flags & DEQUEUE_SLEEP); 1318 } 1319 1320 uclamp_rq_dec(rq, p); 1321 p->sched_class->dequeue_task(rq, p, flags); 1322 } 1323 1324 void activate_task(struct rq *rq, struct task_struct *p, int flags) 1325 { 1326 if (task_contributes_to_load(p)) 1327 rq->nr_uninterruptible--; 1328 1329 enqueue_task(rq, p, flags); 1330 1331 p->on_rq = TASK_ON_RQ_QUEUED; 1332 } 1333 1334 void deactivate_task(struct rq *rq, struct task_struct *p, int flags) 1335 { 1336 p->on_rq = (flags & DEQUEUE_SLEEP) ? 0 : TASK_ON_RQ_MIGRATING; 1337 1338 if (task_contributes_to_load(p)) 1339 rq->nr_uninterruptible++; 1340 1341 dequeue_task(rq, p, flags); 1342 } 1343 1344 /* 1345 * __normal_prio - return the priority that is based on the static prio 1346 */ 1347 static inline int __normal_prio(struct task_struct *p) 1348 { 1349 return p->static_prio; 1350 } 1351 1352 /* 1353 * Calculate the expected normal priority: i.e. priority 1354 * without taking RT-inheritance into account. Might be 1355 * boosted by interactivity modifiers. Changes upon fork, 1356 * setprio syscalls, and whenever the interactivity 1357 * estimator recalculates. 1358 */ 1359 static inline int normal_prio(struct task_struct *p) 1360 { 1361 int prio; 1362 1363 if (task_has_dl_policy(p)) 1364 prio = MAX_DL_PRIO-1; 1365 else if (task_has_rt_policy(p)) 1366 prio = MAX_RT_PRIO-1 - p->rt_priority; 1367 else 1368 prio = __normal_prio(p); 1369 return prio; 1370 } 1371 1372 /* 1373 * Calculate the current priority, i.e. the priority 1374 * taken into account by the scheduler. This value might 1375 * be boosted by RT tasks, or might be boosted by 1376 * interactivity modifiers. Will be RT if the task got 1377 * RT-boosted. If not then it returns p->normal_prio. 1378 */ 1379 static int effective_prio(struct task_struct *p) 1380 { 1381 p->normal_prio = normal_prio(p); 1382 /* 1383 * If we are RT tasks or we were boosted to RT priority, 1384 * keep the priority unchanged. Otherwise, update priority 1385 * to the normal priority: 1386 */ 1387 if (!rt_prio(p->prio)) 1388 return p->normal_prio; 1389 return p->prio; 1390 } 1391 1392 /** 1393 * task_curr - is this task currently executing on a CPU? 1394 * @p: the task in question. 1395 * 1396 * Return: 1 if the task is currently executing. 0 otherwise. 1397 */ 1398 inline int task_curr(const struct task_struct *p) 1399 { 1400 return cpu_curr(task_cpu(p)) == p; 1401 } 1402 1403 /* 1404 * switched_from, switched_to and prio_changed must _NOT_ drop rq->lock, 1405 * use the balance_callback list if you want balancing. 1406 * 1407 * this means any call to check_class_changed() must be followed by a call to 1408 * balance_callback(). 1409 */ 1410 static inline void check_class_changed(struct rq *rq, struct task_struct *p, 1411 const struct sched_class *prev_class, 1412 int oldprio) 1413 { 1414 if (prev_class != p->sched_class) { 1415 if (prev_class->switched_from) 1416 prev_class->switched_from(rq, p); 1417 1418 p->sched_class->switched_to(rq, p); 1419 } else if (oldprio != p->prio || dl_task(p)) 1420 p->sched_class->prio_changed(rq, p, oldprio); 1421 } 1422 1423 void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) 1424 { 1425 const struct sched_class *class; 1426 1427 if (p->sched_class == rq->curr->sched_class) { 1428 rq->curr->sched_class->check_preempt_curr(rq, p, flags); 1429 } else { 1430 for_each_class(class) { 1431 if (class == rq->curr->sched_class) 1432 break; 1433 if (class == p->sched_class) { 1434 resched_curr(rq); 1435 break; 1436 } 1437 } 1438 } 1439 1440 /* 1441 * A queue event has occurred, and we're going to schedule. In 1442 * this case, we can save a useless back to back clock update. 1443 */ 1444 if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr)) 1445 rq_clock_skip_update(rq); 1446 } 1447 1448 #ifdef CONFIG_SMP 1449 1450 /* 1451 * Per-CPU kthreads are allowed to run on !active && online CPUs, see 1452 * __set_cpus_allowed_ptr() and select_fallback_rq(). 1453 */ 1454 static inline bool is_cpu_allowed(struct task_struct *p, int cpu) 1455 { 1456 if (!cpumask_test_cpu(cpu, p->cpus_ptr)) 1457 return false; 1458 1459 if (is_per_cpu_kthread(p)) 1460 return cpu_online(cpu); 1461 1462 return cpu_active(cpu); 1463 } 1464 1465 /* 1466 * This is how migration works: 1467 * 1468 * 1) we invoke migration_cpu_stop() on the target CPU using 1469 * stop_one_cpu(). 1470 * 2) stopper starts to run (implicitly forcing the migrated thread 1471 * off the CPU) 1472 * 3) it checks whether the migrated task is still in the wrong runqueue. 1473 * 4) if it's in the wrong runqueue then the migration thread removes 1474 * it and puts it into the right queue. 1475 * 5) stopper completes and stop_one_cpu() returns and the migration 1476 * is done. 1477 */ 1478 1479 /* 1480 * move_queued_task - move a queued task to new rq. 1481 * 1482 * Returns (locked) new rq. Old rq's lock is released. 1483 */ 1484 static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf, 1485 struct task_struct *p, int new_cpu) 1486 { 1487 lockdep_assert_held(&rq->lock); 1488 1489 WRITE_ONCE(p->on_rq, TASK_ON_RQ_MIGRATING); 1490 dequeue_task(rq, p, DEQUEUE_NOCLOCK); 1491 set_task_cpu(p, new_cpu); 1492 rq_unlock(rq, rf); 1493 1494 rq = cpu_rq(new_cpu); 1495 1496 rq_lock(rq, rf); 1497 BUG_ON(task_cpu(p) != new_cpu); 1498 enqueue_task(rq, p, 0); 1499 p->on_rq = TASK_ON_RQ_QUEUED; 1500 check_preempt_curr(rq, p, 0); 1501 1502 return rq; 1503 } 1504 1505 struct migration_arg { 1506 struct task_struct *task; 1507 int dest_cpu; 1508 }; 1509 1510 /* 1511 * Move (not current) task off this CPU, onto the destination CPU. We're doing 1512 * this because either it can't run here any more (set_cpus_allowed() 1513 * away from this CPU, or CPU going down), or because we're 1514 * attempting to rebalance this task on exec (sched_exec). 1515 * 1516 * So we race with normal scheduler movements, but that's OK, as long 1517 * as the task is no longer on this CPU. 1518 */ 1519 static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf, 1520 struct task_struct *p, int dest_cpu) 1521 { 1522 /* Affinity changed (again). */ 1523 if (!is_cpu_allowed(p, dest_cpu)) 1524 return rq; 1525 1526 update_rq_clock(rq); 1527 rq = move_queued_task(rq, rf, p, dest_cpu); 1528 1529 return rq; 1530 } 1531 1532 /* 1533 * migration_cpu_stop - this will be executed by a highprio stopper thread 1534 * and performs thread migration by bumping thread off CPU then 1535 * 'pushing' onto another runqueue. 1536 */ 1537 static int migration_cpu_stop(void *data) 1538 { 1539 struct migration_arg *arg = data; 1540 struct task_struct *p = arg->task; 1541 struct rq *rq = this_rq(); 1542 struct rq_flags rf; 1543 1544 /* 1545 * The original target CPU might have gone down and we might 1546 * be on another CPU but it doesn't matter. 1547 */ 1548 local_irq_disable(); 1549 /* 1550 * We need to explicitly wake pending tasks before running 1551 * __migrate_task() such that we will not miss enforcing cpus_ptr 1552 * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test. 1553 */ 1554 sched_ttwu_pending(); 1555 1556 raw_spin_lock(&p->pi_lock); 1557 rq_lock(rq, &rf); 1558 /* 1559 * If task_rq(p) != rq, it cannot be migrated here, because we're 1560 * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because 1561 * we're holding p->pi_lock. 1562 */ 1563 if (task_rq(p) == rq) { 1564 if (task_on_rq_queued(p)) 1565 rq = __migrate_task(rq, &rf, p, arg->dest_cpu); 1566 else 1567 p->wake_cpu = arg->dest_cpu; 1568 } 1569 rq_unlock(rq, &rf); 1570 raw_spin_unlock(&p->pi_lock); 1571 1572 local_irq_enable(); 1573 return 0; 1574 } 1575 1576 /* 1577 * sched_class::set_cpus_allowed must do the below, but is not required to 1578 * actually call this function. 1579 */ 1580 void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask) 1581 { 1582 cpumask_copy(&p->cpus_mask, new_mask); 1583 p->nr_cpus_allowed = cpumask_weight(new_mask); 1584 } 1585 1586 void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) 1587 { 1588 struct rq *rq = task_rq(p); 1589 bool queued, running; 1590 1591 lockdep_assert_held(&p->pi_lock); 1592 1593 queued = task_on_rq_queued(p); 1594 running = task_current(rq, p); 1595 1596 if (queued) { 1597 /* 1598 * Because __kthread_bind() calls this on blocked tasks without 1599 * holding rq->lock. 1600 */ 1601 lockdep_assert_held(&rq->lock); 1602 dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK); 1603 } 1604 if (running) 1605 put_prev_task(rq, p); 1606 1607 p->sched_class->set_cpus_allowed(p, new_mask); 1608 1609 if (queued) 1610 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); 1611 if (running) 1612 set_next_task(rq, p); 1613 } 1614 1615 /* 1616 * Change a given task's CPU affinity. Migrate the thread to a 1617 * proper CPU and schedule it away if the CPU it's executing on 1618 * is removed from the allowed bitmask. 1619 * 1620 * NOTE: the caller must have a valid reference to the task, the 1621 * task must not exit() & deallocate itself prematurely. The 1622 * call is not atomic; no spinlocks may be held. 1623 */ 1624 static int __set_cpus_allowed_ptr(struct task_struct *p, 1625 const struct cpumask *new_mask, bool check) 1626 { 1627 const struct cpumask *cpu_valid_mask = cpu_active_mask; 1628 unsigned int dest_cpu; 1629 struct rq_flags rf; 1630 struct rq *rq; 1631 int ret = 0; 1632 1633 rq = task_rq_lock(p, &rf); 1634 update_rq_clock(rq); 1635 1636 if (p->flags & PF_KTHREAD) { 1637 /* 1638 * Kernel threads are allowed on online && !active CPUs 1639 */ 1640 cpu_valid_mask = cpu_online_mask; 1641 } 1642 1643 /* 1644 * Must re-check here, to close a race against __kthread_bind(), 1645 * sched_setaffinity() is not guaranteed to observe the flag. 1646 */ 1647 if (check && (p->flags & PF_NO_SETAFFINITY)) { 1648 ret = -EINVAL; 1649 goto out; 1650 } 1651 1652 if (cpumask_equal(p->cpus_ptr, new_mask)) 1653 goto out; 1654 1655 dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask); 1656 if (dest_cpu >= nr_cpu_ids) { 1657 ret = -EINVAL; 1658 goto out; 1659 } 1660 1661 do_set_cpus_allowed(p, new_mask); 1662 1663 if (p->flags & PF_KTHREAD) { 1664 /* 1665 * For kernel threads that do indeed end up on online && 1666 * !active we want to ensure they are strict per-CPU threads. 1667 */ 1668 WARN_ON(cpumask_intersects(new_mask, cpu_online_mask) && 1669 !cpumask_intersects(new_mask, cpu_active_mask) && 1670 p->nr_cpus_allowed != 1); 1671 } 1672 1673 /* Can the task run on the task's current CPU? If so, we're done */ 1674 if (cpumask_test_cpu(task_cpu(p), new_mask)) 1675 goto out; 1676 1677 if (task_running(rq, p) || p->state == TASK_WAKING) { 1678 struct migration_arg arg = { p, dest_cpu }; 1679 /* Need help from migration thread: drop lock and wait. */ 1680 task_rq_unlock(rq, p, &rf); 1681 stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg); 1682 return 0; 1683 } else if (task_on_rq_queued(p)) { 1684 /* 1685 * OK, since we're going to drop the lock immediately 1686 * afterwards anyway. 1687 */ 1688 rq = move_queued_task(rq, &rf, p, dest_cpu); 1689 } 1690 out: 1691 task_rq_unlock(rq, p, &rf); 1692 1693 return ret; 1694 } 1695 1696 int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) 1697 { 1698 return __set_cpus_allowed_ptr(p, new_mask, false); 1699 } 1700 EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr); 1701 1702 void set_task_cpu(struct task_struct *p, unsigned int new_cpu) 1703 { 1704 #ifdef CONFIG_SCHED_DEBUG 1705 /* 1706 * We should never call set_task_cpu() on a blocked task, 1707 * ttwu() will sort out the placement. 1708 */ 1709 WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING && 1710 !p->on_rq); 1711 1712 /* 1713 * Migrating fair class task must have p->on_rq = TASK_ON_RQ_MIGRATING, 1714 * because schedstat_wait_{start,end} rebase migrating task's wait_start 1715 * time relying on p->on_rq. 1716 */ 1717 WARN_ON_ONCE(p->state == TASK_RUNNING && 1718 p->sched_class == &fair_sched_class && 1719 (p->on_rq && !task_on_rq_migrating(p))); 1720 1721 #ifdef CONFIG_LOCKDEP 1722 /* 1723 * The caller should hold either p->pi_lock or rq->lock, when changing 1724 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks. 1725 * 1726 * sched_move_task() holds both and thus holding either pins the cgroup, 1727 * see task_group(). 1728 * 1729 * Furthermore, all task_rq users should acquire both locks, see 1730 * task_rq_lock(). 1731 */ 1732 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) || 1733 lockdep_is_held(&task_rq(p)->lock))); 1734 #endif 1735 /* 1736 * Clearly, migrating tasks to offline CPUs is a fairly daft thing. 1737 */ 1738 WARN_ON_ONCE(!cpu_online(new_cpu)); 1739 #endif 1740 1741 trace_sched_migrate_task(p, new_cpu); 1742 1743 if (task_cpu(p) != new_cpu) { 1744 if (p->sched_class->migrate_task_rq) 1745 p->sched_class->migrate_task_rq(p, new_cpu); 1746 p->se.nr_migrations++; 1747 rseq_migrate(p); 1748 perf_event_task_migrate(p); 1749 } 1750 1751 __set_task_cpu(p, new_cpu); 1752 } 1753 1754 #ifdef CONFIG_NUMA_BALANCING 1755 static void __migrate_swap_task(struct task_struct *p, int cpu) 1756 { 1757 if (task_on_rq_queued(p)) { 1758 struct rq *src_rq, *dst_rq; 1759 struct rq_flags srf, drf; 1760 1761 src_rq = task_rq(p); 1762 dst_rq = cpu_rq(cpu); 1763 1764 rq_pin_lock(src_rq, &srf); 1765 rq_pin_lock(dst_rq, &drf); 1766 1767 deactivate_task(src_rq, p, 0); 1768 set_task_cpu(p, cpu); 1769 activate_task(dst_rq, p, 0); 1770 check_preempt_curr(dst_rq, p, 0); 1771 1772 rq_unpin_lock(dst_rq, &drf); 1773 rq_unpin_lock(src_rq, &srf); 1774 1775 } else { 1776 /* 1777 * Task isn't running anymore; make it appear like we migrated 1778 * it before it went to sleep. This means on wakeup we make the 1779 * previous CPU our target instead of where it really is. 1780 */ 1781 p->wake_cpu = cpu; 1782 } 1783 } 1784 1785 struct migration_swap_arg { 1786 struct task_struct *src_task, *dst_task; 1787 int src_cpu, dst_cpu; 1788 }; 1789 1790 static int migrate_swap_stop(void *data) 1791 { 1792 struct migration_swap_arg *arg = data; 1793 struct rq *src_rq, *dst_rq; 1794 int ret = -EAGAIN; 1795 1796 if (!cpu_active(arg->src_cpu) || !cpu_active(arg->dst_cpu)) 1797 return -EAGAIN; 1798 1799 src_rq = cpu_rq(arg->src_cpu); 1800 dst_rq = cpu_rq(arg->dst_cpu); 1801 1802 double_raw_lock(&arg->src_task->pi_lock, 1803 &arg->dst_task->pi_lock); 1804 double_rq_lock(src_rq, dst_rq); 1805 1806 if (task_cpu(arg->dst_task) != arg->dst_cpu) 1807 goto unlock; 1808 1809 if (task_cpu(arg->src_task) != arg->src_cpu) 1810 goto unlock; 1811 1812 if (!cpumask_test_cpu(arg->dst_cpu, arg->src_task->cpus_ptr)) 1813 goto unlock; 1814 1815 if (!cpumask_test_cpu(arg->src_cpu, arg->dst_task->cpus_ptr)) 1816 goto unlock; 1817 1818 __migrate_swap_task(arg->src_task, arg->dst_cpu); 1819 __migrate_swap_task(arg->dst_task, arg->src_cpu); 1820 1821 ret = 0; 1822 1823 unlock: 1824 double_rq_unlock(src_rq, dst_rq); 1825 raw_spin_unlock(&arg->dst_task->pi_lock); 1826 raw_spin_unlock(&arg->src_task->pi_lock); 1827 1828 return ret; 1829 } 1830 1831 /* 1832 * Cross migrate two tasks 1833 */ 1834 int migrate_swap(struct task_struct *cur, struct task_struct *p, 1835 int target_cpu, int curr_cpu) 1836 { 1837 struct migration_swap_arg arg; 1838 int ret = -EINVAL; 1839 1840 arg = (struct migration_swap_arg){ 1841 .src_task = cur, 1842 .src_cpu = curr_cpu, 1843 .dst_task = p, 1844 .dst_cpu = target_cpu, 1845 }; 1846 1847 if (arg.src_cpu == arg.dst_cpu) 1848 goto out; 1849 1850 /* 1851 * These three tests are all lockless; this is OK since all of them 1852 * will be re-checked with proper locks held further down the line. 1853 */ 1854 if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu)) 1855 goto out; 1856 1857 if (!cpumask_test_cpu(arg.dst_cpu, arg.src_task->cpus_ptr)) 1858 goto out; 1859 1860 if (!cpumask_test_cpu(arg.src_cpu, arg.dst_task->cpus_ptr)) 1861 goto out; 1862 1863 trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu); 1864 ret = stop_two_cpus(arg.dst_cpu, arg.src_cpu, migrate_swap_stop, &arg); 1865 1866 out: 1867 return ret; 1868 } 1869 #endif /* CONFIG_NUMA_BALANCING */ 1870 1871 /* 1872 * wait_task_inactive - wait for a thread to unschedule. 1873 * 1874 * If @match_state is nonzero, it's the @p->state value just checked and 1875 * not expected to change. If it changes, i.e. @p might have woken up, 1876 * then return zero. When we succeed in waiting for @p to be off its CPU, 1877 * we return a positive number (its total switch count). If a second call 1878 * a short while later returns the same number, the caller can be sure that 1879 * @p has remained unscheduled the whole time. 1880 * 1881 * The caller must ensure that the task *will* unschedule sometime soon, 1882 * else this function might spin for a *long* time. This function can't 1883 * be called with interrupts off, or it may introduce deadlock with 1884 * smp_call_function() if an IPI is sent by the same process we are 1885 * waiting to become inactive. 1886 */ 1887 unsigned long wait_task_inactive(struct task_struct *p, long match_state) 1888 { 1889 int running, queued; 1890 struct rq_flags rf; 1891 unsigned long ncsw; 1892 struct rq *rq; 1893 1894 for (;;) { 1895 /* 1896 * We do the initial early heuristics without holding 1897 * any task-queue locks at all. We'll only try to get 1898 * the runqueue lock when things look like they will 1899 * work out! 1900 */ 1901 rq = task_rq(p); 1902 1903 /* 1904 * If the task is actively running on another CPU 1905 * still, just relax and busy-wait without holding 1906 * any locks. 1907 * 1908 * NOTE! Since we don't hold any locks, it's not 1909 * even sure that "rq" stays as the right runqueue! 1910 * But we don't care, since "task_running()" will 1911 * return false if the runqueue has changed and p 1912 * is actually now running somewhere else! 1913 */ 1914 while (task_running(rq, p)) { 1915 if (match_state && unlikely(p->state != match_state)) 1916 return 0; 1917 cpu_relax(); 1918 } 1919 1920 /* 1921 * Ok, time to look more closely! We need the rq 1922 * lock now, to be *sure*. If we're wrong, we'll 1923 * just go back and repeat. 1924 */ 1925 rq = task_rq_lock(p, &rf); 1926 trace_sched_wait_task(p); 1927 running = task_running(rq, p); 1928 queued = task_on_rq_queued(p); 1929 ncsw = 0; 1930 if (!match_state || p->state == match_state) 1931 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ 1932 task_rq_unlock(rq, p, &rf); 1933 1934 /* 1935 * If it changed from the expected state, bail out now. 1936 */ 1937 if (unlikely(!ncsw)) 1938 break; 1939 1940 /* 1941 * Was it really running after all now that we 1942 * checked with the proper locks actually held? 1943 * 1944 * Oops. Go back and try again.. 1945 */ 1946 if (unlikely(running)) { 1947 cpu_relax(); 1948 continue; 1949 } 1950 1951 /* 1952 * It's not enough that it's not actively running, 1953 * it must be off the runqueue _entirely_, and not 1954 * preempted! 1955 * 1956 * So if it was still runnable (but just not actively 1957 * running right now), it's preempted, and we should 1958 * yield - it could be a while. 1959 */ 1960 if (unlikely(queued)) { 1961 ktime_t to = NSEC_PER_SEC / HZ; 1962 1963 set_current_state(TASK_UNINTERRUPTIBLE); 1964 schedule_hrtimeout(&to, HRTIMER_MODE_REL); 1965 continue; 1966 } 1967 1968 /* 1969 * Ahh, all good. It wasn't running, and it wasn't 1970 * runnable, which means that it will never become 1971 * running in the future either. We're all done! 1972 */ 1973 break; 1974 } 1975 1976 return ncsw; 1977 } 1978 1979 /*** 1980 * kick_process - kick a running thread to enter/exit the kernel 1981 * @p: the to-be-kicked thread 1982 * 1983 * Cause a process which is running on another CPU to enter 1984 * kernel-mode, without any delay. (to get signals handled.) 1985 * 1986 * NOTE: this function doesn't have to take the runqueue lock, 1987 * because all it wants to ensure is that the remote task enters 1988 * the kernel. If the IPI races and the task has been migrated 1989 * to another CPU then no harm is done and the purpose has been 1990 * achieved as well. 1991 */ 1992 void kick_process(struct task_struct *p) 1993 { 1994 int cpu; 1995 1996 preempt_disable(); 1997 cpu = task_cpu(p); 1998 if ((cpu != smp_processor_id()) && task_curr(p)) 1999 smp_send_reschedule(cpu); 2000 preempt_enable(); 2001 } 2002 EXPORT_SYMBOL_GPL(kick_process); 2003 2004 /* 2005 * ->cpus_ptr is protected by both rq->lock and p->pi_lock 2006 * 2007 * A few notes on cpu_active vs cpu_online: 2008 * 2009 * - cpu_active must be a subset of cpu_online 2010 * 2011 * - on CPU-up we allow per-CPU kthreads on the online && !active CPU, 2012 * see __set_cpus_allowed_ptr(). At this point the newly online 2013 * CPU isn't yet part of the sched domains, and balancing will not 2014 * see it. 2015 * 2016 * - on CPU-down we clear cpu_active() to mask the sched domains and 2017 * avoid the load balancer to place new tasks on the to be removed 2018 * CPU. Existing tasks will remain running there and will be taken 2019 * off. 2020 * 2021 * This means that fallback selection must not select !active CPUs. 2022 * And can assume that any active CPU must be online. Conversely 2023 * select_task_rq() below may allow selection of !active CPUs in order 2024 * to satisfy the above rules. 2025 */ 2026 static int select_fallback_rq(int cpu, struct task_struct *p) 2027 { 2028 int nid = cpu_to_node(cpu); 2029 const struct cpumask *nodemask = NULL; 2030 enum { cpuset, possible, fail } state = cpuset; 2031 int dest_cpu; 2032 2033 /* 2034 * If the node that the CPU is on has been offlined, cpu_to_node() 2035 * will return -1. There is no CPU on the node, and we should 2036 * select the CPU on the other node. 2037 */ 2038 if (nid != -1) { 2039 nodemask = cpumask_of_node(nid); 2040 2041 /* Look for allowed, online CPU in same node. */ 2042 for_each_cpu(dest_cpu, nodemask) { 2043 if (!cpu_active(dest_cpu)) 2044 continue; 2045 if (cpumask_test_cpu(dest_cpu, p->cpus_ptr)) 2046 return dest_cpu; 2047 } 2048 } 2049 2050 for (;;) { 2051 /* Any allowed, online CPU? */ 2052 for_each_cpu(dest_cpu, p->cpus_ptr) { 2053 if (!is_cpu_allowed(p, dest_cpu)) 2054 continue; 2055 2056 goto out; 2057 } 2058 2059 /* No more Mr. Nice Guy. */ 2060 switch (state) { 2061 case cpuset: 2062 if (IS_ENABLED(CONFIG_CPUSETS)) { 2063 cpuset_cpus_allowed_fallback(p); 2064 state = possible; 2065 break; 2066 } 2067 /* Fall-through */ 2068 case possible: 2069 do_set_cpus_allowed(p, cpu_possible_mask); 2070 state = fail; 2071 break; 2072 2073 case fail: 2074 BUG(); 2075 break; 2076 } 2077 } 2078 2079 out: 2080 if (state != cpuset) { 2081 /* 2082 * Don't tell them about moving exiting tasks or 2083 * kernel threads (both mm NULL), since they never 2084 * leave kernel. 2085 */ 2086 if (p->mm && printk_ratelimit()) { 2087 printk_deferred("process %d (%s) no longer affine to cpu%d\n", 2088 task_pid_nr(p), p->comm, cpu); 2089 } 2090 } 2091 2092 return dest_cpu; 2093 } 2094 2095 /* 2096 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_ptr is stable. 2097 */ 2098 static inline 2099 int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags) 2100 { 2101 lockdep_assert_held(&p->pi_lock); 2102 2103 if (p->nr_cpus_allowed > 1) 2104 cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags); 2105 else 2106 cpu = cpumask_any(p->cpus_ptr); 2107 2108 /* 2109 * In order not to call set_task_cpu() on a blocking task we need 2110 * to rely on ttwu() to place the task on a valid ->cpus_ptr 2111 * CPU. 2112 * 2113 * Since this is common to all placement strategies, this lives here. 2114 * 2115 * [ this allows ->select_task() to simply return task_cpu(p) and 2116 * not worry about this generic constraint ] 2117 */ 2118 if (unlikely(!is_cpu_allowed(p, cpu))) 2119 cpu = select_fallback_rq(task_cpu(p), p); 2120 2121 return cpu; 2122 } 2123 2124 static void update_avg(u64 *avg, u64 sample) 2125 { 2126 s64 diff = sample - *avg; 2127 *avg += diff >> 3; 2128 } 2129 2130 void sched_set_stop_task(int cpu, struct task_struct *stop) 2131 { 2132 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; 2133 struct task_struct *old_stop = cpu_rq(cpu)->stop; 2134 2135 if (stop) { 2136 /* 2137 * Make it appear like a SCHED_FIFO task, its something 2138 * userspace knows about and won't get confused about. 2139 * 2140 * Also, it will make PI more or less work without too 2141 * much confusion -- but then, stop work should not 2142 * rely on PI working anyway. 2143 */ 2144 sched_setscheduler_nocheck(stop, SCHED_FIFO, ¶m); 2145 2146 stop->sched_class = &stop_sched_class; 2147 } 2148 2149 cpu_rq(cpu)->stop = stop; 2150 2151 if (old_stop) { 2152 /* 2153 * Reset it back to a normal scheduling class so that 2154 * it can die in pieces. 2155 */ 2156 old_stop->sched_class = &rt_sched_class; 2157 } 2158 } 2159 2160 #else 2161 2162 static inline int __set_cpus_allowed_ptr(struct task_struct *p, 2163 const struct cpumask *new_mask, bool check) 2164 { 2165 return set_cpus_allowed_ptr(p, new_mask); 2166 } 2167 2168 #endif /* CONFIG_SMP */ 2169 2170 static void 2171 ttwu_stat(struct task_struct *p, int cpu, int wake_flags) 2172 { 2173 struct rq *rq; 2174 2175 if (!schedstat_enabled()) 2176 return; 2177 2178 rq = this_rq(); 2179 2180 #ifdef CONFIG_SMP 2181 if (cpu == rq->cpu) { 2182 __schedstat_inc(rq->ttwu_local); 2183 __schedstat_inc(p->se.statistics.nr_wakeups_local); 2184 } else { 2185 struct sched_domain *sd; 2186 2187 __schedstat_inc(p->se.statistics.nr_wakeups_remote); 2188 rcu_read_lock(); 2189 for_each_domain(rq->cpu, sd) { 2190 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) { 2191 __schedstat_inc(sd->ttwu_wake_remote); 2192 break; 2193 } 2194 } 2195 rcu_read_unlock(); 2196 } 2197 2198 if (wake_flags & WF_MIGRATED) 2199 __schedstat_inc(p->se.statistics.nr_wakeups_migrate); 2200 #endif /* CONFIG_SMP */ 2201 2202 __schedstat_inc(rq->ttwu_count); 2203 __schedstat_inc(p->se.statistics.nr_wakeups); 2204 2205 if (wake_flags & WF_SYNC) 2206 __schedstat_inc(p->se.statistics.nr_wakeups_sync); 2207 } 2208 2209 /* 2210 * Mark the task runnable and perform wakeup-preemption. 2211 */ 2212 static void ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags, 2213 struct rq_flags *rf) 2214 { 2215 check_preempt_curr(rq, p, wake_flags); 2216 p->state = TASK_RUNNING; 2217 trace_sched_wakeup(p); 2218 2219 #ifdef CONFIG_SMP 2220 if (p->sched_class->task_woken) { 2221 /* 2222 * Our task @p is fully woken up and running; so its safe to 2223 * drop the rq->lock, hereafter rq is only used for statistics. 2224 */ 2225 rq_unpin_lock(rq, rf); 2226 p->sched_class->task_woken(rq, p); 2227 rq_repin_lock(rq, rf); 2228 } 2229 2230 if (rq->idle_stamp) { 2231 u64 delta = rq_clock(rq) - rq->idle_stamp; 2232 u64 max = 2*rq->max_idle_balance_cost; 2233 2234 update_avg(&rq->avg_idle, delta); 2235 2236 if (rq->avg_idle > max) 2237 rq->avg_idle = max; 2238 2239 rq->idle_stamp = 0; 2240 } 2241 #endif 2242 } 2243 2244 static void 2245 ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags, 2246 struct rq_flags *rf) 2247 { 2248 int en_flags = ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK; 2249 2250 lockdep_assert_held(&rq->lock); 2251 2252 #ifdef CONFIG_SMP 2253 if (p->sched_contributes_to_load) 2254 rq->nr_uninterruptible--; 2255 2256 if (wake_flags & WF_MIGRATED) 2257 en_flags |= ENQUEUE_MIGRATED; 2258 #endif 2259 2260 activate_task(rq, p, en_flags); 2261 ttwu_do_wakeup(rq, p, wake_flags, rf); 2262 } 2263 2264 /* 2265 * Called in case the task @p isn't fully descheduled from its runqueue, 2266 * in this case we must do a remote wakeup. Its a 'light' wakeup though, 2267 * since all we need to do is flip p->state to TASK_RUNNING, since 2268 * the task is still ->on_rq. 2269 */ 2270 static int ttwu_remote(struct task_struct *p, int wake_flags) 2271 { 2272 struct rq_flags rf; 2273 struct rq *rq; 2274 int ret = 0; 2275 2276 rq = __task_rq_lock(p, &rf); 2277 if (task_on_rq_queued(p)) { 2278 /* check_preempt_curr() may use rq clock */ 2279 update_rq_clock(rq); 2280 ttwu_do_wakeup(rq, p, wake_flags, &rf); 2281 ret = 1; 2282 } 2283 __task_rq_unlock(rq, &rf); 2284 2285 return ret; 2286 } 2287 2288 #ifdef CONFIG_SMP 2289 void sched_ttwu_pending(void) 2290 { 2291 struct rq *rq = this_rq(); 2292 struct llist_node *llist = llist_del_all(&rq->wake_list); 2293 struct task_struct *p, *t; 2294 struct rq_flags rf; 2295 2296 if (!llist) 2297 return; 2298 2299 rq_lock_irqsave(rq, &rf); 2300 update_rq_clock(rq); 2301 2302 llist_for_each_entry_safe(p, t, llist, wake_entry) 2303 ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0, &rf); 2304 2305 rq_unlock_irqrestore(rq, &rf); 2306 } 2307 2308 void scheduler_ipi(void) 2309 { 2310 /* 2311 * Fold TIF_NEED_RESCHED into the preempt_count; anybody setting 2312 * TIF_NEED_RESCHED remotely (for the first time) will also send 2313 * this IPI. 2314 */ 2315 preempt_fold_need_resched(); 2316 2317 if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick()) 2318 return; 2319 2320 /* 2321 * Not all reschedule IPI handlers call irq_enter/irq_exit, since 2322 * traditionally all their work was done from the interrupt return 2323 * path. Now that we actually do some work, we need to make sure 2324 * we do call them. 2325 * 2326 * Some archs already do call them, luckily irq_enter/exit nest 2327 * properly. 2328 * 2329 * Arguably we should visit all archs and update all handlers, 2330 * however a fair share of IPIs are still resched only so this would 2331 * somewhat pessimize the simple resched case. 2332 */ 2333 irq_enter(); 2334 sched_ttwu_pending(); 2335 2336 /* 2337 * Check if someone kicked us for doing the nohz idle load balance. 2338 */ 2339 if (unlikely(got_nohz_idle_kick())) { 2340 this_rq()->idle_balance = 1; 2341 raise_softirq_irqoff(SCHED_SOFTIRQ); 2342 } 2343 irq_exit(); 2344 } 2345 2346 static void ttwu_queue_remote(struct task_struct *p, int cpu, int wake_flags) 2347 { 2348 struct rq *rq = cpu_rq(cpu); 2349 2350 p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED); 2351 2352 if (llist_add(&p->wake_entry, &cpu_rq(cpu)->wake_list)) { 2353 if (!set_nr_if_polling(rq->idle)) 2354 smp_send_reschedule(cpu); 2355 else 2356 trace_sched_wake_idle_without_ipi(cpu); 2357 } 2358 } 2359 2360 void wake_up_if_idle(int cpu) 2361 { 2362 struct rq *rq = cpu_rq(cpu); 2363 struct rq_flags rf; 2364 2365 rcu_read_lock(); 2366 2367 if (!is_idle_task(rcu_dereference(rq->curr))) 2368 goto out; 2369 2370 if (set_nr_if_polling(rq->idle)) { 2371 trace_sched_wake_idle_without_ipi(cpu); 2372 } else { 2373 rq_lock_irqsave(rq, &rf); 2374 if (is_idle_task(rq->curr)) 2375 smp_send_reschedule(cpu); 2376 /* Else CPU is not idle, do nothing here: */ 2377 rq_unlock_irqrestore(rq, &rf); 2378 } 2379 2380 out: 2381 rcu_read_unlock(); 2382 } 2383 2384 bool cpus_share_cache(int this_cpu, int that_cpu) 2385 { 2386 return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu); 2387 } 2388 #endif /* CONFIG_SMP */ 2389 2390 static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags) 2391 { 2392 struct rq *rq = cpu_rq(cpu); 2393 struct rq_flags rf; 2394 2395 #if defined(CONFIG_SMP) 2396 if (sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) { 2397 sched_clock_cpu(cpu); /* Sync clocks across CPUs */ 2398 ttwu_queue_remote(p, cpu, wake_flags); 2399 return; 2400 } 2401 #endif 2402 2403 rq_lock(rq, &rf); 2404 update_rq_clock(rq); 2405 ttwu_do_activate(rq, p, wake_flags, &rf); 2406 rq_unlock(rq, &rf); 2407 } 2408 2409 /* 2410 * Notes on Program-Order guarantees on SMP systems. 2411 * 2412 * MIGRATION 2413 * 2414 * The basic program-order guarantee on SMP systems is that when a task [t] 2415 * migrates, all its activity on its old CPU [c0] happens-before any subsequent 2416 * execution on its new CPU [c1]. 2417 * 2418 * For migration (of runnable tasks) this is provided by the following means: 2419 * 2420 * A) UNLOCK of the rq(c0)->lock scheduling out task t 2421 * B) migration for t is required to synchronize *both* rq(c0)->lock and 2422 * rq(c1)->lock (if not at the same time, then in that order). 2423 * C) LOCK of the rq(c1)->lock scheduling in task 2424 * 2425 * Release/acquire chaining guarantees that B happens after A and C after B. 2426 * Note: the CPU doing B need not be c0 or c1 2427 * 2428 * Example: 2429 * 2430 * CPU0 CPU1 CPU2 2431 * 2432 * LOCK rq(0)->lock 2433 * sched-out X 2434 * sched-in Y 2435 * UNLOCK rq(0)->lock 2436 * 2437 * LOCK rq(0)->lock // orders against CPU0 2438 * dequeue X 2439 * UNLOCK rq(0)->lock 2440 * 2441 * LOCK rq(1)->lock 2442 * enqueue X 2443 * UNLOCK rq(1)->lock 2444 * 2445 * LOCK rq(1)->lock // orders against CPU2 2446 * sched-out Z 2447 * sched-in X 2448 * UNLOCK rq(1)->lock 2449 * 2450 * 2451 * BLOCKING -- aka. SLEEP + WAKEUP 2452 * 2453 * For blocking we (obviously) need to provide the same guarantee as for 2454 * migration. However the means are completely different as there is no lock 2455 * chain to provide order. Instead we do: 2456 * 2457 * 1) smp_store_release(X->on_cpu, 0) 2458 * 2) smp_cond_load_acquire(!X->on_cpu) 2459 * 2460 * Example: 2461 * 2462 * CPU0 (schedule) CPU1 (try_to_wake_up) CPU2 (schedule) 2463 * 2464 * LOCK rq(0)->lock LOCK X->pi_lock 2465 * dequeue X 2466 * sched-out X 2467 * smp_store_release(X->on_cpu, 0); 2468 * 2469 * smp_cond_load_acquire(&X->on_cpu, !VAL); 2470 * X->state = WAKING 2471 * set_task_cpu(X,2) 2472 * 2473 * LOCK rq(2)->lock 2474 * enqueue X 2475 * X->state = RUNNING 2476 * UNLOCK rq(2)->lock 2477 * 2478 * LOCK rq(2)->lock // orders against CPU1 2479 * sched-out Z 2480 * sched-in X 2481 * UNLOCK rq(2)->lock 2482 * 2483 * UNLOCK X->pi_lock 2484 * UNLOCK rq(0)->lock 2485 * 2486 * 2487 * However, for wakeups there is a second guarantee we must provide, namely we 2488 * must ensure that CONDITION=1 done by the caller can not be reordered with 2489 * accesses to the task state; see try_to_wake_up() and set_current_state(). 2490 */ 2491 2492 /** 2493 * try_to_wake_up - wake up a thread 2494 * @p: the thread to be awakened 2495 * @state: the mask of task states that can be woken 2496 * @wake_flags: wake modifier flags (WF_*) 2497 * 2498 * If (@state & @p->state) @p->state = TASK_RUNNING. 2499 * 2500 * If the task was not queued/runnable, also place it back on a runqueue. 2501 * 2502 * Atomic against schedule() which would dequeue a task, also see 2503 * set_current_state(). 2504 * 2505 * This function executes a full memory barrier before accessing the task 2506 * state; see set_current_state(). 2507 * 2508 * Return: %true if @p->state changes (an actual wakeup was done), 2509 * %false otherwise. 2510 */ 2511 static int 2512 try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) 2513 { 2514 unsigned long flags; 2515 int cpu, success = 0; 2516 2517 preempt_disable(); 2518 if (p == current) { 2519 /* 2520 * We're waking current, this means 'p->on_rq' and 'task_cpu(p) 2521 * == smp_processor_id()'. Together this means we can special 2522 * case the whole 'p->on_rq && ttwu_remote()' case below 2523 * without taking any locks. 2524 * 2525 * In particular: 2526 * - we rely on Program-Order guarantees for all the ordering, 2527 * - we're serialized against set_special_state() by virtue of 2528 * it disabling IRQs (this allows not taking ->pi_lock). 2529 */ 2530 if (!(p->state & state)) 2531 goto out; 2532 2533 success = 1; 2534 cpu = task_cpu(p); 2535 trace_sched_waking(p); 2536 p->state = TASK_RUNNING; 2537 trace_sched_wakeup(p); 2538 goto out; 2539 } 2540 2541 /* 2542 * If we are going to wake up a thread waiting for CONDITION we 2543 * need to ensure that CONDITION=1 done by the caller can not be 2544 * reordered with p->state check below. This pairs with mb() in 2545 * set_current_state() the waiting thread does. 2546 */ 2547 raw_spin_lock_irqsave(&p->pi_lock, flags); 2548 smp_mb__after_spinlock(); 2549 if (!(p->state & state)) 2550 goto unlock; 2551 2552 trace_sched_waking(p); 2553 2554 /* We're going to change ->state: */ 2555 success = 1; 2556 cpu = task_cpu(p); 2557 2558 /* 2559 * Ensure we load p->on_rq _after_ p->state, otherwise it would 2560 * be possible to, falsely, observe p->on_rq == 0 and get stuck 2561 * in smp_cond_load_acquire() below. 2562 * 2563 * sched_ttwu_pending() try_to_wake_up() 2564 * STORE p->on_rq = 1 LOAD p->state 2565 * UNLOCK rq->lock 2566 * 2567 * __schedule() (switch to task 'p') 2568 * LOCK rq->lock smp_rmb(); 2569 * smp_mb__after_spinlock(); 2570 * UNLOCK rq->lock 2571 * 2572 * [task p] 2573 * STORE p->state = UNINTERRUPTIBLE LOAD p->on_rq 2574 * 2575 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in 2576 * __schedule(). See the comment for smp_mb__after_spinlock(). 2577 */ 2578 smp_rmb(); 2579 if (p->on_rq && ttwu_remote(p, wake_flags)) 2580 goto unlock; 2581 2582 #ifdef CONFIG_SMP 2583 /* 2584 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be 2585 * possible to, falsely, observe p->on_cpu == 0. 2586 * 2587 * One must be running (->on_cpu == 1) in order to remove oneself 2588 * from the runqueue. 2589 * 2590 * __schedule() (switch to task 'p') try_to_wake_up() 2591 * STORE p->on_cpu = 1 LOAD p->on_rq 2592 * UNLOCK rq->lock 2593 * 2594 * __schedule() (put 'p' to sleep) 2595 * LOCK rq->lock smp_rmb(); 2596 * smp_mb__after_spinlock(); 2597 * STORE p->on_rq = 0 LOAD p->on_cpu 2598 * 2599 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in 2600 * __schedule(). See the comment for smp_mb__after_spinlock(). 2601 */ 2602 smp_rmb(); 2603 2604 /* 2605 * If the owning (remote) CPU is still in the middle of schedule() with 2606 * this task as prev, wait until its done referencing the task. 2607 * 2608 * Pairs with the smp_store_release() in finish_task(). 2609 * 2610 * This ensures that tasks getting woken will be fully ordered against 2611 * their previous state and preserve Program Order. 2612 */ 2613 smp_cond_load_acquire(&p->on_cpu, !VAL); 2614 2615 p->sched_contributes_to_load = !!task_contributes_to_load(p); 2616 p->state = TASK_WAKING; 2617 2618 if (p->in_iowait) { 2619 delayacct_blkio_end(p); 2620 atomic_dec(&task_rq(p)->nr_iowait); 2621 } 2622 2623 cpu = select_task_rq(p, p->wake_cpu, SD_BALANCE_WAKE, wake_flags); 2624 if (task_cpu(p) != cpu) { 2625 wake_flags |= WF_MIGRATED; 2626 psi_ttwu_dequeue(p); 2627 set_task_cpu(p, cpu); 2628 } 2629 2630 #else /* CONFIG_SMP */ 2631 2632 if (p->in_iowait) { 2633 delayacct_blkio_end(p); 2634 atomic_dec(&task_rq(p)->nr_iowait); 2635 } 2636 2637 #endif /* CONFIG_SMP */ 2638 2639 ttwu_queue(p, cpu, wake_flags); 2640 unlock: 2641 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 2642 out: 2643 if (success) 2644 ttwu_stat(p, cpu, wake_flags); 2645 preempt_enable(); 2646 2647 return success; 2648 } 2649 2650 /** 2651 * wake_up_process - Wake up a specific process 2652 * @p: The process to be woken up. 2653 * 2654 * Attempt to wake up the nominated process and move it to the set of runnable 2655 * processes. 2656 * 2657 * Return: 1 if the process was woken up, 0 if it was already running. 2658 * 2659 * This function executes a full memory barrier before accessing the task state. 2660 */ 2661 int wake_up_process(struct task_struct *p) 2662 { 2663 return try_to_wake_up(p, TASK_NORMAL, 0); 2664 } 2665 EXPORT_SYMBOL(wake_up_process); 2666 2667 int wake_up_state(struct task_struct *p, unsigned int state) 2668 { 2669 return try_to_wake_up(p, state, 0); 2670 } 2671 2672 /* 2673 * Perform scheduler related setup for a newly forked process p. 2674 * p is forked by current. 2675 * 2676 * __sched_fork() is basic setup used by init_idle() too: 2677 */ 2678 static void __sched_fork(unsigned long clone_flags, struct task_struct *p) 2679 { 2680 p->on_rq = 0; 2681 2682 p->se.on_rq = 0; 2683 p->se.exec_start = 0; 2684 p->se.sum_exec_runtime = 0; 2685 p->se.prev_sum_exec_runtime = 0; 2686 p->se.nr_migrations = 0; 2687 p->se.vruntime = 0; 2688 INIT_LIST_HEAD(&p->se.group_node); 2689 2690 #ifdef CONFIG_FAIR_GROUP_SCHED 2691 p->se.cfs_rq = NULL; 2692 #endif 2693 2694 #ifdef CONFIG_SCHEDSTATS 2695 /* Even if schedstat is disabled, there should not be garbage */ 2696 memset(&p->se.statistics, 0, sizeof(p->se.statistics)); 2697 #endif 2698 2699 RB_CLEAR_NODE(&p->dl.rb_node); 2700 init_dl_task_timer(&p->dl); 2701 init_dl_inactive_task_timer(&p->dl); 2702 __dl_clear_params(p); 2703 2704 INIT_LIST_HEAD(&p->rt.run_list); 2705 p->rt.timeout = 0; 2706 p->rt.time_slice = sched_rr_timeslice; 2707 p->rt.on_rq = 0; 2708 p->rt.on_list = 0; 2709 2710 #ifdef CONFIG_PREEMPT_NOTIFIERS 2711 INIT_HLIST_HEAD(&p->preempt_notifiers); 2712 #endif 2713 2714 #ifdef CONFIG_COMPACTION 2715 p->capture_control = NULL; 2716 #endif 2717 init_numa_balancing(clone_flags, p); 2718 } 2719 2720 DEFINE_STATIC_KEY_FALSE(sched_numa_balancing); 2721 2722 #ifdef CONFIG_NUMA_BALANCING 2723 2724 void set_numabalancing_state(bool enabled) 2725 { 2726 if (enabled) 2727 static_branch_enable(&sched_numa_balancing); 2728 else 2729 static_branch_disable(&sched_numa_balancing); 2730 } 2731 2732 #ifdef CONFIG_PROC_SYSCTL 2733 int sysctl_numa_balancing(struct ctl_table *table, int write, 2734 void __user *buffer, size_t *lenp, loff_t *ppos) 2735 { 2736 struct ctl_table t; 2737 int err; 2738 int state = static_branch_likely(&sched_numa_balancing); 2739 2740 if (write && !capable(CAP_SYS_ADMIN)) 2741 return -EPERM; 2742 2743 t = *table; 2744 t.data = &state; 2745 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); 2746 if (err < 0) 2747 return err; 2748 if (write) 2749 set_numabalancing_state(state); 2750 return err; 2751 } 2752 #endif 2753 #endif 2754 2755 #ifdef CONFIG_SCHEDSTATS 2756 2757 DEFINE_STATIC_KEY_FALSE(sched_schedstats); 2758 static bool __initdata __sched_schedstats = false; 2759 2760 static void set_schedstats(bool enabled) 2761 { 2762 if (enabled) 2763 static_branch_enable(&sched_schedstats); 2764 else 2765 static_branch_disable(&sched_schedstats); 2766 } 2767 2768 void force_schedstat_enabled(void) 2769 { 2770 if (!schedstat_enabled()) { 2771 pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n"); 2772 static_branch_enable(&sched_schedstats); 2773 } 2774 } 2775 2776 static int __init setup_schedstats(char *str) 2777 { 2778 int ret = 0; 2779 if (!str) 2780 goto out; 2781 2782 /* 2783 * This code is called before jump labels have been set up, so we can't 2784 * change the static branch directly just yet. Instead set a temporary 2785 * variable so init_schedstats() can do it later. 2786 */ 2787 if (!strcmp(str, "enable")) { 2788 __sched_schedstats = true; 2789 ret = 1; 2790 } else if (!strcmp(str, "disable")) { 2791 __sched_schedstats = false; 2792 ret = 1; 2793 } 2794 out: 2795 if (!ret) 2796 pr_warn("Unable to parse schedstats=\n"); 2797 2798 return ret; 2799 } 2800 __setup("schedstats=", setup_schedstats); 2801 2802 static void __init init_schedstats(void) 2803 { 2804 set_schedstats(__sched_schedstats); 2805 } 2806 2807 #ifdef CONFIG_PROC_SYSCTL 2808 int sysctl_schedstats(struct ctl_table *table, int write, 2809 void __user *buffer, size_t *lenp, loff_t *ppos) 2810 { 2811 struct ctl_table t; 2812 int err; 2813 int state = static_branch_likely(&sched_schedstats); 2814 2815 if (write && !capable(CAP_SYS_ADMIN)) 2816 return -EPERM; 2817 2818 t = *table; 2819 t.data = &state; 2820 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); 2821 if (err < 0) 2822 return err; 2823 if (write) 2824 set_schedstats(state); 2825 return err; 2826 } 2827 #endif /* CONFIG_PROC_SYSCTL */ 2828 #else /* !CONFIG_SCHEDSTATS */ 2829 static inline void init_schedstats(void) {} 2830 #endif /* CONFIG_SCHEDSTATS */ 2831 2832 /* 2833 * fork()/clone()-time setup: 2834 */ 2835 int sched_fork(unsigned long clone_flags, struct task_struct *p) 2836 { 2837 unsigned long flags; 2838 2839 __sched_fork(clone_flags, p); 2840 /* 2841 * We mark the process as NEW here. This guarantees that 2842 * nobody will actually run it, and a signal or other external 2843 * event cannot wake it up and insert it on the runqueue either. 2844 */ 2845 p->state = TASK_NEW; 2846 2847 /* 2848 * Make sure we do not leak PI boosting priority to the child. 2849 */ 2850 p->prio = current->normal_prio; 2851 2852 uclamp_fork(p); 2853 2854 /* 2855 * Revert to default priority/policy on fork if requested. 2856 */ 2857 if (unlikely(p->sched_reset_on_fork)) { 2858 if (task_has_dl_policy(p) || task_has_rt_policy(p)) { 2859 p->policy = SCHED_NORMAL; 2860 p->static_prio = NICE_TO_PRIO(0); 2861 p->rt_priority = 0; 2862 } else if (PRIO_TO_NICE(p->static_prio) < 0) 2863 p->static_prio = NICE_TO_PRIO(0); 2864 2865 p->prio = p->normal_prio = __normal_prio(p); 2866 set_load_weight(p, false); 2867 2868 /* 2869 * We don't need the reset flag anymore after the fork. It has 2870 * fulfilled its duty: 2871 */ 2872 p->sched_reset_on_fork = 0; 2873 } 2874 2875 if (dl_prio(p->prio)) 2876 return -EAGAIN; 2877 else if (rt_prio(p->prio)) 2878 p->sched_class = &rt_sched_class; 2879 else 2880 p->sched_class = &fair_sched_class; 2881 2882 init_entity_runnable_average(&p->se); 2883 2884 /* 2885 * The child is not yet in the pid-hash so no cgroup attach races, 2886 * and the cgroup is pinned to this child due to cgroup_fork() 2887 * is ran before sched_fork(). 2888 * 2889 * Silence PROVE_RCU. 2890 */ 2891 raw_spin_lock_irqsave(&p->pi_lock, flags); 2892 /* 2893 * We're setting the CPU for the first time, we don't migrate, 2894 * so use __set_task_cpu(). 2895 */ 2896 __set_task_cpu(p, smp_processor_id()); 2897 if (p->sched_class->task_fork) 2898 p->sched_class->task_fork(p); 2899 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 2900 2901 #ifdef CONFIG_SCHED_INFO 2902 if (likely(sched_info_on())) 2903 memset(&p->sched_info, 0, sizeof(p->sched_info)); 2904 #endif 2905 #if defined(CONFIG_SMP) 2906 p->on_cpu = 0; 2907 #endif 2908 init_task_preempt_count(p); 2909 #ifdef CONFIG_SMP 2910 plist_node_init(&p->pushable_tasks, MAX_PRIO); 2911 RB_CLEAR_NODE(&p->pushable_dl_tasks); 2912 #endif 2913 return 0; 2914 } 2915 2916 unsigned long to_ratio(u64 period, u64 runtime) 2917 { 2918 if (runtime == RUNTIME_INF) 2919 return BW_UNIT; 2920 2921 /* 2922 * Doing this here saves a lot of checks in all 2923 * the calling paths, and returning zero seems 2924 * safe for them anyway. 2925 */ 2926 if (period == 0) 2927 return 0; 2928 2929 return div64_u64(runtime << BW_SHIFT, period); 2930 } 2931 2932 /* 2933 * wake_up_new_task - wake up a newly created task for the first time. 2934 * 2935 * This function will do some initial scheduler statistics housekeeping 2936 * that must be done for every newly created context, then puts the task 2937 * on the runqueue and wakes it. 2938 */ 2939 void wake_up_new_task(struct task_struct *p) 2940 { 2941 struct rq_flags rf; 2942 struct rq *rq; 2943 2944 raw_spin_lock_irqsave(&p->pi_lock, rf.flags); 2945 p->state = TASK_RUNNING; 2946 #ifdef CONFIG_SMP 2947 /* 2948 * Fork balancing, do it here and not earlier because: 2949 * - cpus_ptr can change in the fork path 2950 * - any previously selected CPU might disappear through hotplug 2951 * 2952 * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq, 2953 * as we're not fully set-up yet. 2954 */ 2955 p->recent_used_cpu = task_cpu(p); 2956 __set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0)); 2957 #endif 2958 rq = __task_rq_lock(p, &rf); 2959 update_rq_clock(rq); 2960 post_init_entity_util_avg(p); 2961 2962 activate_task(rq, p, ENQUEUE_NOCLOCK); 2963 trace_sched_wakeup_new(p); 2964 check_preempt_curr(rq, p, WF_FORK); 2965 #ifdef CONFIG_SMP 2966 if (p->sched_class->task_woken) { 2967 /* 2968 * Nothing relies on rq->lock after this, so its fine to 2969 * drop it. 2970 */ 2971 rq_unpin_lock(rq, &rf); 2972 p->sched_class->task_woken(rq, p); 2973 rq_repin_lock(rq, &rf); 2974 } 2975 #endif 2976 task_rq_unlock(rq, p, &rf); 2977 } 2978 2979 #ifdef CONFIG_PREEMPT_NOTIFIERS 2980 2981 static DEFINE_STATIC_KEY_FALSE(preempt_notifier_key); 2982 2983 void preempt_notifier_inc(void) 2984 { 2985 static_branch_inc(&preempt_notifier_key); 2986 } 2987 EXPORT_SYMBOL_GPL(preempt_notifier_inc); 2988 2989 void preempt_notifier_dec(void) 2990 { 2991 static_branch_dec(&preempt_notifier_key); 2992 } 2993 EXPORT_SYMBOL_GPL(preempt_notifier_dec); 2994 2995 /** 2996 * preempt_notifier_register - tell me when current is being preempted & rescheduled 2997 * @notifier: notifier struct to register 2998 */ 2999 void preempt_notifier_register(struct preempt_notifier *notifier) 3000 { 3001 if (!static_branch_unlikely(&preempt_notifier_key)) 3002 WARN(1, "registering preempt_notifier while notifiers disabled\n"); 3003 3004 hlist_add_head(¬ifier->link, ¤t->preempt_notifiers); 3005 } 3006 EXPORT_SYMBOL_GPL(preempt_notifier_register); 3007 3008 /** 3009 * preempt_notifier_unregister - no longer interested in preemption notifications 3010 * @notifier: notifier struct to unregister 3011 * 3012 * This is *not* safe to call from within a preemption notifier. 3013 */ 3014 void preempt_notifier_unregister(struct preempt_notifier *notifier) 3015 { 3016 hlist_del(¬ifier->link); 3017 } 3018 EXPORT_SYMBOL_GPL(preempt_notifier_unregister); 3019 3020 static void __fire_sched_in_preempt_notifiers(struct task_struct *curr) 3021 { 3022 struct preempt_notifier *notifier; 3023 3024 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) 3025 notifier->ops->sched_in(notifier, raw_smp_processor_id()); 3026 } 3027 3028 static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr) 3029 { 3030 if (static_branch_unlikely(&preempt_notifier_key)) 3031 __fire_sched_in_preempt_notifiers(curr); 3032 } 3033 3034 static void 3035 __fire_sched_out_preempt_notifiers(struct task_struct *curr, 3036 struct task_struct *next) 3037 { 3038 struct preempt_notifier *notifier; 3039 3040 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) 3041 notifier->ops->sched_out(notifier, next); 3042 } 3043 3044 static __always_inline void 3045 fire_sched_out_preempt_notifiers(struct task_struct *curr, 3046 struct task_struct *next) 3047 { 3048 if (static_branch_unlikely(&preempt_notifier_key)) 3049 __fire_sched_out_preempt_notifiers(curr, next); 3050 } 3051 3052 #else /* !CONFIG_PREEMPT_NOTIFIERS */ 3053 3054 static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr) 3055 { 3056 } 3057 3058 static inline void 3059 fire_sched_out_preempt_notifiers(struct task_struct *curr, 3060 struct task_struct *next) 3061 { 3062 } 3063 3064 #endif /* CONFIG_PREEMPT_NOTIFIERS */ 3065 3066 static inline void prepare_task(struct task_struct *next) 3067 { 3068 #ifdef CONFIG_SMP 3069 /* 3070 * Claim the task as running, we do this before switching to it 3071 * such that any running task will have this set. 3072 */ 3073 next->on_cpu = 1; 3074 #endif 3075 } 3076 3077 static inline void finish_task(struct task_struct *prev) 3078 { 3079 #ifdef CONFIG_SMP 3080 /* 3081 * After ->on_cpu is cleared, the task can be moved to a different CPU. 3082 * We must ensure this doesn't happen until the switch is completely 3083 * finished. 3084 * 3085 * In particular, the load of prev->state in finish_task_switch() must 3086 * happen before this. 3087 * 3088 * Pairs with the smp_cond_load_acquire() in try_to_wake_up(). 3089 */ 3090 smp_store_release(&prev->on_cpu, 0); 3091 #endif 3092 } 3093 3094 static inline void 3095 prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf) 3096 { 3097 /* 3098 * Since the runqueue lock will be released by the next 3099 * task (which is an invalid locking op but in the case 3100 * of the scheduler it's an obvious special-case), so we 3101 * do an early lockdep release here: 3102 */ 3103 rq_unpin_lock(rq, rf); 3104 spin_release(&rq->lock.dep_map, _THIS_IP_); 3105 #ifdef CONFIG_DEBUG_SPINLOCK 3106 /* this is a valid case when another task releases the spinlock */ 3107 rq->lock.owner = next; 3108 #endif 3109 } 3110 3111 static inline void finish_lock_switch(struct rq *rq) 3112 { 3113 /* 3114 * If we are tracking spinlock dependencies then we have to 3115 * fix up the runqueue lock - which gets 'carried over' from 3116 * prev into current: 3117 */ 3118 spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_); 3119 raw_spin_unlock_irq(&rq->lock); 3120 } 3121 3122 /* 3123 * NOP if the arch has not defined these: 3124 */ 3125 3126 #ifndef prepare_arch_switch 3127 # define prepare_arch_switch(next) do { } while (0) 3128 #endif 3129 3130 #ifndef finish_arch_post_lock_switch 3131 # define finish_arch_post_lock_switch() do { } while (0) 3132 #endif 3133 3134 /** 3135 * prepare_task_switch - prepare to switch tasks 3136 * @rq: the runqueue preparing to switch 3137 * @prev: the current task that is being switched out 3138 * @next: the task we are going to switch to. 3139 * 3140 * This is called with the rq lock held and interrupts off. It must 3141 * be paired with a subsequent finish_task_switch after the context 3142 * switch. 3143 * 3144 * prepare_task_switch sets up locking and calls architecture specific 3145 * hooks. 3146 */ 3147 static inline void 3148 prepare_task_switch(struct rq *rq, struct task_struct *prev, 3149 struct task_struct *next) 3150 { 3151 kcov_prepare_switch(prev); 3152 sched_info_switch(rq, prev, next); 3153 perf_event_task_sched_out(prev, next); 3154 rseq_preempt(prev); 3155 fire_sched_out_preempt_notifiers(prev, next); 3156 prepare_task(next); 3157 prepare_arch_switch(next); 3158 } 3159 3160 /** 3161 * finish_task_switch - clean up after a task-switch 3162 * @prev: the thread we just switched away from. 3163 * 3164 * finish_task_switch must be called after the context switch, paired 3165 * with a prepare_task_switch call before the context switch. 3166 * finish_task_switch will reconcile locking set up by prepare_task_switch, 3167 * and do any other architecture-specific cleanup actions. 3168 * 3169 * Note that we may have delayed dropping an mm in context_switch(). If 3170 * so, we finish that here outside of the runqueue lock. (Doing it 3171 * with the lock held can cause deadlocks; see schedule() for 3172 * details.) 3173 * 3174 * The context switch have flipped the stack from under us and restored the 3175 * local variables which were saved when this task called schedule() in the 3176 * past. prev == current is still correct but we need to recalculate this_rq 3177 * because prev may have moved to another CPU. 3178 */ 3179 static struct rq *finish_task_switch(struct task_struct *prev) 3180 __releases(rq->lock) 3181 { 3182 struct rq *rq = this_rq(); 3183 struct mm_struct *mm = rq->prev_mm; 3184 long prev_state; 3185 3186 /* 3187 * The previous task will have left us with a preempt_count of 2 3188 * because it left us after: 3189 * 3190 * schedule() 3191 * preempt_disable(); // 1 3192 * __schedule() 3193 * raw_spin_lock_irq(&rq->lock) // 2 3194 * 3195 * Also, see FORK_PREEMPT_COUNT. 3196 */ 3197 if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET, 3198 "corrupted preempt_count: %s/%d/0x%x\n", 3199 current->comm, current->pid, preempt_count())) 3200 preempt_count_set(FORK_PREEMPT_COUNT); 3201 3202 rq->prev_mm = NULL; 3203 3204 /* 3205 * A task struct has one reference for the use as "current". 3206 * If a task dies, then it sets TASK_DEAD in tsk->state and calls 3207 * schedule one last time. The schedule call will never return, and 3208 * the scheduled task must drop that reference. 3209 * 3210 * We must observe prev->state before clearing prev->on_cpu (in 3211 * finish_task), otherwise a concurrent wakeup can get prev 3212 * running on another CPU and we could rave with its RUNNING -> DEAD 3213 * transition, resulting in a double drop. 3214 */ 3215 prev_state = prev->state; 3216 vtime_task_switch(prev); 3217 perf_event_task_sched_in(prev, current); 3218 finish_task(prev); 3219 finish_lock_switch(rq); 3220 finish_arch_post_lock_switch(); 3221 kcov_finish_switch(current); 3222 3223 fire_sched_in_preempt_notifiers(current); 3224 /* 3225 * When switching through a kernel thread, the loop in 3226 * membarrier_{private,global}_expedited() may have observed that 3227 * kernel thread and not issued an IPI. It is therefore possible to 3228 * schedule between user->kernel->user threads without passing though 3229 * switch_mm(). Membarrier requires a barrier after storing to 3230 * rq->curr, before returning to userspace, so provide them here: 3231 * 3232 * - a full memory barrier for {PRIVATE,GLOBAL}_EXPEDITED, implicitly 3233 * provided by mmdrop(), 3234 * - a sync_core for SYNC_CORE. 3235 */ 3236 if (mm) { 3237 membarrier_mm_sync_core_before_usermode(mm); 3238 mmdrop(mm); 3239 } 3240 if (unlikely(prev_state == TASK_DEAD)) { 3241 if (prev->sched_class->task_dead) 3242 prev->sched_class->task_dead(prev); 3243 3244 /* 3245 * Remove function-return probe instances associated with this 3246 * task and put them back on the free list. 3247 */ 3248 kprobe_flush_task(prev); 3249 3250 /* Task is done with its stack. */ 3251 put_task_stack(prev); 3252 3253 put_task_struct_rcu_user(prev); 3254 } 3255 3256 tick_nohz_task_switch(); 3257 return rq; 3258 } 3259 3260 #ifdef CONFIG_SMP 3261 3262 /* rq->lock is NOT held, but preemption is disabled */ 3263 static void __balance_callback(struct rq *rq) 3264 { 3265 struct callback_head *head, *next; 3266 void (*func)(struct rq *rq); 3267 unsigned long flags; 3268 3269 raw_spin_lock_irqsave(&rq->lock, flags); 3270 head = rq->balance_callback; 3271 rq->balance_callback = NULL; 3272 while (head) { 3273 func = (void (*)(struct rq *))head->func; 3274 next = head->next; 3275 head->next = NULL; 3276 head = next; 3277 3278 func(rq); 3279 } 3280 raw_spin_unlock_irqrestore(&rq->lock, flags); 3281 } 3282 3283 static inline void balance_callback(struct rq *rq) 3284 { 3285 if (unlikely(rq->balance_callback)) 3286 __balance_callback(rq); 3287 } 3288 3289 #else 3290 3291 static inline void balance_callback(struct rq *rq) 3292 { 3293 } 3294 3295 #endif 3296 3297 /** 3298 * schedule_tail - first thing a freshly forked thread must call. 3299 * @prev: the thread we just switched away from. 3300 */ 3301 asmlinkage __visible void schedule_tail(struct task_struct *prev) 3302 __releases(rq->lock) 3303 { 3304 struct rq *rq; 3305 3306 /* 3307 * New tasks start with FORK_PREEMPT_COUNT, see there and 3308 * finish_task_switch() for details. 3309 * 3310 * finish_task_switch() will drop rq->lock() and lower preempt_count 3311 * and the preempt_enable() will end up enabling preemption (on 3312 * PREEMPT_COUNT kernels). 3313 */ 3314 3315 rq = finish_task_switch(prev); 3316 balance_callback(rq); 3317 preempt_enable(); 3318 3319 if (current->set_child_tid) 3320 put_user(task_pid_vnr(current), current->set_child_tid); 3321 3322 calculate_sigpending(); 3323 } 3324 3325 /* 3326 * context_switch - switch to the new MM and the new thread's register state. 3327 */ 3328 static __always_inline struct rq * 3329 context_switch(struct rq *rq, struct task_struct *prev, 3330 struct task_struct *next, struct rq_flags *rf) 3331 { 3332 prepare_task_switch(rq, prev, next); 3333 3334 /* 3335 * For paravirt, this is coupled with an exit in switch_to to 3336 * combine the page table reload and the switch backend into 3337 * one hypercall. 3338 */ 3339 arch_start_context_switch(prev); 3340 3341 /* 3342 * kernel -> kernel lazy + transfer active 3343 * user -> kernel lazy + mmgrab() active 3344 * 3345 * kernel -> user switch + mmdrop() active 3346 * user -> user switch 3347 */ 3348 if (!next->mm) { // to kernel 3349 enter_lazy_tlb(prev->active_mm, next); 3350 3351 next->active_mm = prev->active_mm; 3352 if (prev->mm) // from user 3353 mmgrab(prev->active_mm); 3354 else 3355 prev->active_mm = NULL; 3356 } else { // to user 3357 membarrier_switch_mm(rq, prev->active_mm, next->mm); 3358 /* 3359 * sys_membarrier() requires an smp_mb() between setting 3360 * rq->curr / membarrier_switch_mm() and returning to userspace. 3361 * 3362 * The below provides this either through switch_mm(), or in 3363 * case 'prev->active_mm == next->mm' through 3364 * finish_task_switch()'s mmdrop(). 3365 */ 3366 switch_mm_irqs_off(prev->active_mm, next->mm, next); 3367 3368 if (!prev->mm) { // from kernel 3369 /* will mmdrop() in finish_task_switch(). */ 3370 rq->prev_mm = prev->active_mm; 3371 prev->active_mm = NULL; 3372 } 3373 } 3374 3375 rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP); 3376 3377 prepare_lock_switch(rq, next, rf); 3378 3379 /* Here we just switch the register state and the stack. */ 3380 switch_to(prev, next, prev); 3381 barrier(); 3382 3383 return finish_task_switch(prev); 3384 } 3385 3386 /* 3387 * nr_running and nr_context_switches: 3388 * 3389 * externally visible scheduler statistics: current number of runnable 3390 * threads, total number of context switches performed since bootup. 3391 */ 3392 unsigned long nr_running(void) 3393 { 3394 unsigned long i, sum = 0; 3395 3396 for_each_online_cpu(i) 3397 sum += cpu_rq(i)->nr_running; 3398 3399 return sum; 3400 } 3401 3402 /* 3403 * Check if only the current task is running on the CPU. 3404 * 3405 * Caution: this function does not check that the caller has disabled 3406 * preemption, thus the result might have a time-of-check-to-time-of-use 3407 * race. The caller is responsible to use it correctly, for example: 3408 * 3409 * - from a non-preemptible section (of course) 3410 * 3411 * - from a thread that is bound to a single CPU 3412 * 3413 * - in a loop with very short iterations (e.g. a polling loop) 3414 */ 3415 bool single_task_running(void) 3416 { 3417 return raw_rq()->nr_running == 1; 3418 } 3419 EXPORT_SYMBOL(single_task_running); 3420 3421 unsigned long long nr_context_switches(void) 3422 { 3423 int i; 3424 unsigned long long sum = 0; 3425 3426 for_each_possible_cpu(i) 3427 sum += cpu_rq(i)->nr_switches; 3428 3429 return sum; 3430 } 3431 3432 /* 3433 * Consumers of these two interfaces, like for example the cpuidle menu 3434 * governor, are using nonsensical data. Preferring shallow idle state selection 3435 * for a CPU that has IO-wait which might not even end up running the task when 3436 * it does become runnable. 3437 */ 3438 3439 unsigned long nr_iowait_cpu(int cpu) 3440 { 3441 return atomic_read(&cpu_rq(cpu)->nr_iowait); 3442 } 3443 3444 /* 3445 * IO-wait accounting, and how its mostly bollocks (on SMP). 3446 * 3447 * The idea behind IO-wait account is to account the idle time that we could 3448 * have spend running if it were not for IO. That is, if we were to improve the 3449 * storage performance, we'd have a proportional reduction in IO-wait time. 3450 * 3451 * This all works nicely on UP, where, when a task blocks on IO, we account 3452 * idle time as IO-wait, because if the storage were faster, it could've been 3453 * running and we'd not be idle. 3454 * 3455 * This has been extended to SMP, by doing the same for each CPU. This however 3456 * is broken. 3457 * 3458 * Imagine for instance the case where two tasks block on one CPU, only the one 3459 * CPU will have IO-wait accounted, while the other has regular idle. Even 3460 * though, if the storage were faster, both could've ran at the same time, 3461 * utilising both CPUs. 3462 * 3463 * This means, that when looking globally, the current IO-wait accounting on 3464 * SMP is a lower bound, by reason of under accounting. 3465 * 3466 * Worse, since the numbers are provided per CPU, they are sometimes 3467 * interpreted per CPU, and that is nonsensical. A blocked task isn't strictly 3468 * associated with any one particular CPU, it can wake to another CPU than it 3469 * blocked on. This means the per CPU IO-wait number is meaningless. 3470 * 3471 * Task CPU affinities can make all that even more 'interesting'. 3472 */ 3473 3474 unsigned long nr_iowait(void) 3475 { 3476 unsigned long i, sum = 0; 3477 3478 for_each_possible_cpu(i) 3479 sum += nr_iowait_cpu(i); 3480 3481 return sum; 3482 } 3483 3484 #ifdef CONFIG_SMP 3485 3486 /* 3487 * sched_exec - execve() is a valuable balancing opportunity, because at 3488 * this point the task has the smallest effective memory and cache footprint. 3489 */ 3490 void sched_exec(void) 3491 { 3492 struct task_struct *p = current; 3493 unsigned long flags; 3494 int dest_cpu; 3495 3496 raw_spin_lock_irqsave(&p->pi_lock, flags); 3497 dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), SD_BALANCE_EXEC, 0); 3498 if (dest_cpu == smp_processor_id()) 3499 goto unlock; 3500 3501 if (likely(cpu_active(dest_cpu))) { 3502 struct migration_arg arg = { p, dest_cpu }; 3503 3504 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 3505 stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg); 3506 return; 3507 } 3508 unlock: 3509 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 3510 } 3511 3512 #endif 3513 3514 DEFINE_PER_CPU(struct kernel_stat, kstat); 3515 DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat); 3516 3517 EXPORT_PER_CPU_SYMBOL(kstat); 3518 EXPORT_PER_CPU_SYMBOL(kernel_cpustat); 3519 3520 /* 3521 * The function fair_sched_class.update_curr accesses the struct curr 3522 * and its field curr->exec_start; when called from task_sched_runtime(), 3523 * we observe a high rate of cache misses in practice. 3524 * Prefetching this data results in improved performance. 3525 */ 3526 static inline void prefetch_curr_exec_start(struct task_struct *p) 3527 { 3528 #ifdef CONFIG_FAIR_GROUP_SCHED 3529 struct sched_entity *curr = (&p->se)->cfs_rq->curr; 3530 #else 3531 struct sched_entity *curr = (&task_rq(p)->cfs)->curr; 3532 #endif 3533 prefetch(curr); 3534 prefetch(&curr->exec_start); 3535 } 3536 3537 /* 3538 * Return accounted runtime for the task. 3539 * In case the task is currently running, return the runtime plus current's 3540 * pending runtime that have not been accounted yet. 3541 */ 3542 unsigned long long task_sched_runtime(struct task_struct *p) 3543 { 3544 struct rq_flags rf; 3545 struct rq *rq; 3546 u64 ns; 3547 3548 #if defined(CONFIG_64BIT) && defined(CONFIG_SMP) 3549 /* 3550 * 64-bit doesn't need locks to atomically read a 64-bit value. 3551 * So we have a optimization chance when the task's delta_exec is 0. 3552 * Reading ->on_cpu is racy, but this is ok. 3553 * 3554 * If we race with it leaving CPU, we'll take a lock. So we're correct. 3555 * If we race with it entering CPU, unaccounted time is 0. This is 3556 * indistinguishable from the read occurring a few cycles earlier. 3557 * If we see ->on_cpu without ->on_rq, the task is leaving, and has 3558 * been accounted, so we're correct here as well. 3559 */ 3560 if (!p->on_cpu || !task_on_rq_queued(p)) 3561 return p->se.sum_exec_runtime; 3562 #endif 3563 3564 rq = task_rq_lock(p, &rf); 3565 /* 3566 * Must be ->curr _and_ ->on_rq. If dequeued, we would 3567 * project cycles that may never be accounted to this 3568 * thread, breaking clock_gettime(). 3569 */ 3570 if (task_current(rq, p) && task_on_rq_queued(p)) { 3571 prefetch_curr_exec_start(p); 3572 update_rq_clock(rq); 3573 p->sched_class->update_curr(rq); 3574 } 3575 ns = p->se.sum_exec_runtime; 3576 task_rq_unlock(rq, p, &rf); 3577 3578 return ns; 3579 } 3580 3581 /* 3582 * This function gets called by the timer code, with HZ frequency. 3583 * We call it with interrupts disabled. 3584 */ 3585 void scheduler_tick(void) 3586 { 3587 int cpu = smp_processor_id(); 3588 struct rq *rq = cpu_rq(cpu); 3589 struct task_struct *curr = rq->curr; 3590 struct rq_flags rf; 3591 3592 sched_clock_tick(); 3593 3594 rq_lock(rq, &rf); 3595 3596 update_rq_clock(rq); 3597 curr->sched_class->task_tick(rq, curr, 0); 3598 calc_global_load_tick(rq); 3599 psi_task_tick(rq); 3600 3601 rq_unlock(rq, &rf); 3602 3603 perf_event_task_tick(); 3604 3605 #ifdef CONFIG_SMP 3606 rq->idle_balance = idle_cpu(cpu); 3607 trigger_load_balance(rq); 3608 #endif 3609 } 3610 3611 #ifdef CONFIG_NO_HZ_FULL 3612 3613 struct tick_work { 3614 int cpu; 3615 atomic_t state; 3616 struct delayed_work work; 3617 }; 3618 /* Values for ->state, see diagram below. */ 3619 #define TICK_SCHED_REMOTE_OFFLINE 0 3620 #define TICK_SCHED_REMOTE_OFFLINING 1 3621 #define TICK_SCHED_REMOTE_RUNNING 2 3622 3623 /* 3624 * State diagram for ->state: 3625 * 3626 * 3627 * TICK_SCHED_REMOTE_OFFLINE 3628 * | ^ 3629 * | | 3630 * | | sched_tick_remote() 3631 * | | 3632 * | | 3633 * +--TICK_SCHED_REMOTE_OFFLINING 3634 * | ^ 3635 * | | 3636 * sched_tick_start() | | sched_tick_stop() 3637 * | | 3638 * V | 3639 * TICK_SCHED_REMOTE_RUNNING 3640 * 3641 * 3642 * Other transitions get WARN_ON_ONCE(), except that sched_tick_remote() 3643 * and sched_tick_start() are happy to leave the state in RUNNING. 3644 */ 3645 3646 static struct tick_work __percpu *tick_work_cpu; 3647 3648 static void sched_tick_remote(struct work_struct *work) 3649 { 3650 struct delayed_work *dwork = to_delayed_work(work); 3651 struct tick_work *twork = container_of(dwork, struct tick_work, work); 3652 int cpu = twork->cpu; 3653 struct rq *rq = cpu_rq(cpu); 3654 struct task_struct *curr; 3655 struct rq_flags rf; 3656 u64 delta; 3657 int os; 3658 3659 /* 3660 * Handle the tick only if it appears the remote CPU is running in full 3661 * dynticks mode. The check is racy by nature, but missing a tick or 3662 * having one too much is no big deal because the scheduler tick updates 3663 * statistics and checks timeslices in a time-independent way, regardless 3664 * of when exactly it is running. 3665 */ 3666 if (!tick_nohz_tick_stopped_cpu(cpu)) 3667 goto out_requeue; 3668 3669 rq_lock_irq(rq, &rf); 3670 curr = rq->curr; 3671 if (cpu_is_offline(cpu)) 3672 goto out_unlock; 3673 3674 curr = rq->curr; 3675 update_rq_clock(rq); 3676 3677 if (!is_idle_task(curr)) { 3678 /* 3679 * Make sure the next tick runs within a reasonable 3680 * amount of time. 3681 */ 3682 delta = rq_clock_task(rq) - curr->se.exec_start; 3683 WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3); 3684 } 3685 curr->sched_class->task_tick(rq, curr, 0); 3686 3687 calc_load_nohz_remote(rq); 3688 out_unlock: 3689 rq_unlock_irq(rq, &rf); 3690 out_requeue: 3691 3692 /* 3693 * Run the remote tick once per second (1Hz). This arbitrary 3694 * frequency is large enough to avoid overload but short enough 3695 * to keep scheduler internal stats reasonably up to date. But 3696 * first update state to reflect hotplug activity if required. 3697 */ 3698 os = atomic_fetch_add_unless(&twork->state, -1, TICK_SCHED_REMOTE_RUNNING); 3699 WARN_ON_ONCE(os == TICK_SCHED_REMOTE_OFFLINE); 3700 if (os == TICK_SCHED_REMOTE_RUNNING) 3701 queue_delayed_work(system_unbound_wq, dwork, HZ); 3702 } 3703 3704 static void sched_tick_start(int cpu) 3705 { 3706 int os; 3707 struct tick_work *twork; 3708 3709 if (housekeeping_cpu(cpu, HK_FLAG_TICK)) 3710 return; 3711 3712 WARN_ON_ONCE(!tick_work_cpu); 3713 3714 twork = per_cpu_ptr(tick_work_cpu, cpu); 3715 os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_RUNNING); 3716 WARN_ON_ONCE(os == TICK_SCHED_REMOTE_RUNNING); 3717 if (os == TICK_SCHED_REMOTE_OFFLINE) { 3718 twork->cpu = cpu; 3719 INIT_DELAYED_WORK(&twork->work, sched_tick_remote); 3720 queue_delayed_work(system_unbound_wq, &twork->work, HZ); 3721 } 3722 } 3723 3724 #ifdef CONFIG_HOTPLUG_CPU 3725 static void sched_tick_stop(int cpu) 3726 { 3727 struct tick_work *twork; 3728 int os; 3729 3730 if (housekeeping_cpu(cpu, HK_FLAG_TICK)) 3731 return; 3732 3733 WARN_ON_ONCE(!tick_work_cpu); 3734 3735 twork = per_cpu_ptr(tick_work_cpu, cpu); 3736 /* There cannot be competing actions, but don't rely on stop-machine. */ 3737 os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_OFFLINING); 3738 WARN_ON_ONCE(os != TICK_SCHED_REMOTE_RUNNING); 3739 /* Don't cancel, as this would mess up the state machine. */ 3740 } 3741 #endif /* CONFIG_HOTPLUG_CPU */ 3742 3743 int __init sched_tick_offload_init(void) 3744 { 3745 tick_work_cpu = alloc_percpu(struct tick_work); 3746 BUG_ON(!tick_work_cpu); 3747 return 0; 3748 } 3749 3750 #else /* !CONFIG_NO_HZ_FULL */ 3751 static inline void sched_tick_start(int cpu) { } 3752 static inline void sched_tick_stop(int cpu) { } 3753 #endif 3754 3755 #if defined(CONFIG_PREEMPTION) && (defined(CONFIG_DEBUG_PREEMPT) || \ 3756 defined(CONFIG_TRACE_PREEMPT_TOGGLE)) 3757 /* 3758 * If the value passed in is equal to the current preempt count 3759 * then we just disabled preemption. Start timing the latency. 3760 */ 3761 static inline void preempt_latency_start(int val) 3762 { 3763 if (preempt_count() == val) { 3764 unsigned long ip = get_lock_parent_ip(); 3765 #ifdef CONFIG_DEBUG_PREEMPT 3766 current->preempt_disable_ip = ip; 3767 #endif 3768 trace_preempt_off(CALLER_ADDR0, ip); 3769 } 3770 } 3771 3772 void preempt_count_add(int val) 3773 { 3774 #ifdef CONFIG_DEBUG_PREEMPT 3775 /* 3776 * Underflow? 3777 */ 3778 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0))) 3779 return; 3780 #endif 3781 __preempt_count_add(val); 3782 #ifdef CONFIG_DEBUG_PREEMPT 3783 /* 3784 * Spinlock count overflowing soon? 3785 */ 3786 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >= 3787 PREEMPT_MASK - 10); 3788 #endif 3789 preempt_latency_start(val); 3790 } 3791 EXPORT_SYMBOL(preempt_count_add); 3792 NOKPROBE_SYMBOL(preempt_count_add); 3793 3794 /* 3795 * If the value passed in equals to the current preempt count 3796 * then we just enabled preemption. Stop timing the latency. 3797 */ 3798 static inline void preempt_latency_stop(int val) 3799 { 3800 if (preempt_count() == val) 3801 trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip()); 3802 } 3803 3804 void preempt_count_sub(int val) 3805 { 3806 #ifdef CONFIG_DEBUG_PREEMPT 3807 /* 3808 * Underflow? 3809 */ 3810 if (DEBUG_LOCKS_WARN_ON(val > preempt_count())) 3811 return; 3812 /* 3813 * Is the spinlock portion underflowing? 3814 */ 3815 if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) && 3816 !(preempt_count() & PREEMPT_MASK))) 3817 return; 3818 #endif 3819 3820 preempt_latency_stop(val); 3821 __preempt_count_sub(val); 3822 } 3823 EXPORT_SYMBOL(preempt_count_sub); 3824 NOKPROBE_SYMBOL(preempt_count_sub); 3825 3826 #else 3827 static inline void preempt_latency_start(int val) { } 3828 static inline void preempt_latency_stop(int val) { } 3829 #endif 3830 3831 static inline unsigned long get_preempt_disable_ip(struct task_struct *p) 3832 { 3833 #ifdef CONFIG_DEBUG_PREEMPT 3834 return p->preempt_disable_ip; 3835 #else 3836 return 0; 3837 #endif 3838 } 3839 3840 /* 3841 * Print scheduling while atomic bug: 3842 */ 3843 static noinline void __schedule_bug(struct task_struct *prev) 3844 { 3845 /* Save this before calling printk(), since that will clobber it */ 3846 unsigned long preempt_disable_ip = get_preempt_disable_ip(current); 3847 3848 if (oops_in_progress) 3849 return; 3850 3851 printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n", 3852 prev->comm, prev->pid, preempt_count()); 3853 3854 debug_show_held_locks(prev); 3855 print_modules(); 3856 if (irqs_disabled()) 3857 print_irqtrace_events(prev); 3858 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT) 3859 && in_atomic_preempt_off()) { 3860 pr_err("Preemption disabled at:"); 3861 print_ip_sym(preempt_disable_ip); 3862 pr_cont("\n"); 3863 } 3864 if (panic_on_warn) 3865 panic("scheduling while atomic\n"); 3866 3867 dump_stack(); 3868 add_taint(TAINT_WARN, LOCKDEP_STILL_OK); 3869 } 3870 3871 /* 3872 * Various schedule()-time debugging checks and statistics: 3873 */ 3874 static inline void schedule_debug(struct task_struct *prev, bool preempt) 3875 { 3876 #ifdef CONFIG_SCHED_STACK_END_CHECK 3877 if (task_stack_end_corrupted(prev)) 3878 panic("corrupted stack end detected inside scheduler\n"); 3879 #endif 3880 3881 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP 3882 if (!preempt && prev->state && prev->non_block_count) { 3883 printk(KERN_ERR "BUG: scheduling in a non-blocking section: %s/%d/%i\n", 3884 prev->comm, prev->pid, prev->non_block_count); 3885 dump_stack(); 3886 add_taint(TAINT_WARN, LOCKDEP_STILL_OK); 3887 } 3888 #endif 3889 3890 if (unlikely(in_atomic_preempt_off())) { 3891 __schedule_bug(prev); 3892 preempt_count_set(PREEMPT_DISABLED); 3893 } 3894 rcu_sleep_check(); 3895 3896 profile_hit(SCHED_PROFILING, __builtin_return_address(0)); 3897 3898 schedstat_inc(this_rq()->sched_count); 3899 } 3900 3901 /* 3902 * Pick up the highest-prio task: 3903 */ 3904 static inline struct task_struct * 3905 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) 3906 { 3907 const struct sched_class *class; 3908 struct task_struct *p; 3909 3910 /* 3911 * Optimization: we know that if all tasks are in the fair class we can 3912 * call that function directly, but only if the @prev task wasn't of a 3913 * higher scheduling class, because otherwise those loose the 3914 * opportunity to pull in more work from other CPUs. 3915 */ 3916 if (likely((prev->sched_class == &idle_sched_class || 3917 prev->sched_class == &fair_sched_class) && 3918 rq->nr_running == rq->cfs.h_nr_running)) { 3919 3920 p = pick_next_task_fair(rq, prev, rf); 3921 if (unlikely(p == RETRY_TASK)) 3922 goto restart; 3923 3924 /* Assumes fair_sched_class->next == idle_sched_class */ 3925 if (!p) { 3926 put_prev_task(rq, prev); 3927 p = pick_next_task_idle(rq); 3928 } 3929 3930 return p; 3931 } 3932 3933 restart: 3934 #ifdef CONFIG_SMP 3935 /* 3936 * We must do the balancing pass before put_next_task(), such 3937 * that when we release the rq->lock the task is in the same 3938 * state as before we took rq->lock. 3939 * 3940 * We can terminate the balance pass as soon as we know there is 3941 * a runnable task of @class priority or higher. 3942 */ 3943 for_class_range(class, prev->sched_class, &idle_sched_class) { 3944 if (class->balance(rq, prev, rf)) 3945 break; 3946 } 3947 #endif 3948 3949 put_prev_task(rq, prev); 3950 3951 for_each_class(class) { 3952 p = class->pick_next_task(rq); 3953 if (p) 3954 return p; 3955 } 3956 3957 /* The idle class should always have a runnable task: */ 3958 BUG(); 3959 } 3960 3961 /* 3962 * __schedule() is the main scheduler function. 3963 * 3964 * The main means of driving the scheduler and thus entering this function are: 3965 * 3966 * 1. Explicit blocking: mutex, semaphore, waitqueue, etc. 3967 * 3968 * 2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return 3969 * paths. For example, see arch/x86/entry_64.S. 3970 * 3971 * To drive preemption between tasks, the scheduler sets the flag in timer 3972 * interrupt handler scheduler_tick(). 3973 * 3974 * 3. Wakeups don't really cause entry into schedule(). They add a 3975 * task to the run-queue and that's it. 3976 * 3977 * Now, if the new task added to the run-queue preempts the current 3978 * task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets 3979 * called on the nearest possible occasion: 3980 * 3981 * - If the kernel is preemptible (CONFIG_PREEMPTION=y): 3982 * 3983 * - in syscall or exception context, at the next outmost 3984 * preempt_enable(). (this might be as soon as the wake_up()'s 3985 * spin_unlock()!) 3986 * 3987 * - in IRQ context, return from interrupt-handler to 3988 * preemptible context 3989 * 3990 * - If the kernel is not preemptible (CONFIG_PREEMPTION is not set) 3991 * then at the next: 3992 * 3993 * - cond_resched() call 3994 * - explicit schedule() call 3995 * - return from syscall or exception to user-space 3996 * - return from interrupt-handler to user-space 3997 * 3998 * WARNING: must be called with preemption disabled! 3999 */ 4000 static void __sched notrace __schedule(bool preempt) 4001 { 4002 struct task_struct *prev, *next; 4003 unsigned long *switch_count; 4004 struct rq_flags rf; 4005 struct rq *rq; 4006 int cpu; 4007 4008 cpu = smp_processor_id(); 4009 rq = cpu_rq(cpu); 4010 prev = rq->curr; 4011 4012 schedule_debug(prev, preempt); 4013 4014 if (sched_feat(HRTICK)) 4015 hrtick_clear(rq); 4016 4017 local_irq_disable(); 4018 rcu_note_context_switch(preempt); 4019 4020 /* 4021 * Make sure that signal_pending_state()->signal_pending() below 4022 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE) 4023 * done by the caller to avoid the race with signal_wake_up(). 4024 * 4025 * The membarrier system call requires a full memory barrier 4026 * after coming from user-space, before storing to rq->curr. 4027 */ 4028 rq_lock(rq, &rf); 4029 smp_mb__after_spinlock(); 4030 4031 /* Promote REQ to ACT */ 4032 rq->clock_update_flags <<= 1; 4033 update_rq_clock(rq); 4034 4035 switch_count = &prev->nivcsw; 4036 if (!preempt && prev->state) { 4037 if (signal_pending_state(prev->state, prev)) { 4038 prev->state = TASK_RUNNING; 4039 } else { 4040 deactivate_task(rq, prev, DEQUEUE_SLEEP | DEQUEUE_NOCLOCK); 4041 4042 if (prev->in_iowait) { 4043 atomic_inc(&rq->nr_iowait); 4044 delayacct_blkio_start(); 4045 } 4046 } 4047 switch_count = &prev->nvcsw; 4048 } 4049 4050 next = pick_next_task(rq, prev, &rf); 4051 clear_tsk_need_resched(prev); 4052 clear_preempt_need_resched(); 4053 4054 if (likely(prev != next)) { 4055 rq->nr_switches++; 4056 /* 4057 * RCU users of rcu_dereference(rq->curr) may not see 4058 * changes to task_struct made by pick_next_task(). 4059 */ 4060 RCU_INIT_POINTER(rq->curr, next); 4061 /* 4062 * The membarrier system call requires each architecture 4063 * to have a full memory barrier after updating 4064 * rq->curr, before returning to user-space. 4065 * 4066 * Here are the schemes providing that barrier on the 4067 * various architectures: 4068 * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC. 4069 * switch_mm() rely on membarrier_arch_switch_mm() on PowerPC. 4070 * - finish_lock_switch() for weakly-ordered 4071 * architectures where spin_unlock is a full barrier, 4072 * - switch_to() for arm64 (weakly-ordered, spin_unlock 4073 * is a RELEASE barrier), 4074 */ 4075 ++*switch_count; 4076 4077 trace_sched_switch(preempt, prev, next); 4078 4079 /* Also unlocks the rq: */ 4080 rq = context_switch(rq, prev, next, &rf); 4081 } else { 4082 rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP); 4083 rq_unlock_irq(rq, &rf); 4084 } 4085 4086 balance_callback(rq); 4087 } 4088 4089 void __noreturn do_task_dead(void) 4090 { 4091 /* Causes final put_task_struct in finish_task_switch(): */ 4092 set_special_state(TASK_DEAD); 4093 4094 /* Tell freezer to ignore us: */ 4095 current->flags |= PF_NOFREEZE; 4096 4097 __schedule(false); 4098 BUG(); 4099 4100 /* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */ 4101 for (;;) 4102 cpu_relax(); 4103 } 4104 4105 static inline void sched_submit_work(struct task_struct *tsk) 4106 { 4107 if (!tsk->state) 4108 return; 4109 4110 /* 4111 * If a worker went to sleep, notify and ask workqueue whether 4112 * it wants to wake up a task to maintain concurrency. 4113 * As this function is called inside the schedule() context, 4114 * we disable preemption to avoid it calling schedule() again 4115 * in the possible wakeup of a kworker. 4116 */ 4117 if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER)) { 4118 preempt_disable(); 4119 if (tsk->flags & PF_WQ_WORKER) 4120 wq_worker_sleeping(tsk); 4121 else 4122 io_wq_worker_sleeping(tsk); 4123 preempt_enable_no_resched(); 4124 } 4125 4126 if (tsk_is_pi_blocked(tsk)) 4127 return; 4128 4129 /* 4130 * If we are going to sleep and we have plugged IO queued, 4131 * make sure to submit it to avoid deadlocks. 4132 */ 4133 if (blk_needs_flush_plug(tsk)) 4134 blk_schedule_flush_plug(tsk); 4135 } 4136 4137 static void sched_update_worker(struct task_struct *tsk) 4138 { 4139 if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER)) { 4140 if (tsk->flags & PF_WQ_WORKER) 4141 wq_worker_running(tsk); 4142 else 4143 io_wq_worker_running(tsk); 4144 } 4145 } 4146 4147 asmlinkage __visible void __sched schedule(void) 4148 { 4149 struct task_struct *tsk = current; 4150 4151 sched_submit_work(tsk); 4152 do { 4153 preempt_disable(); 4154 __schedule(false); 4155 sched_preempt_enable_no_resched(); 4156 } while (need_resched()); 4157 sched_update_worker(tsk); 4158 } 4159 EXPORT_SYMBOL(schedule); 4160 4161 /* 4162 * synchronize_rcu_tasks() makes sure that no task is stuck in preempted 4163 * state (have scheduled out non-voluntarily) by making sure that all 4164 * tasks have either left the run queue or have gone into user space. 4165 * As idle tasks do not do either, they must not ever be preempted 4166 * (schedule out non-voluntarily). 4167 * 4168 * schedule_idle() is similar to schedule_preempt_disable() except that it 4169 * never enables preemption because it does not call sched_submit_work(). 4170 */ 4171 void __sched schedule_idle(void) 4172 { 4173 /* 4174 * As this skips calling sched_submit_work(), which the idle task does 4175 * regardless because that function is a nop when the task is in a 4176 * TASK_RUNNING state, make sure this isn't used someplace that the 4177 * current task can be in any other state. Note, idle is always in the 4178 * TASK_RUNNING state. 4179 */ 4180 WARN_ON_ONCE(current->state); 4181 do { 4182 __schedule(false); 4183 } while (need_resched()); 4184 } 4185 4186 #ifdef CONFIG_CONTEXT_TRACKING 4187 asmlinkage __visible void __sched schedule_user(void) 4188 { 4189 /* 4190 * If we come here after a random call to set_need_resched(), 4191 * or we have been woken up remotely but the IPI has not yet arrived, 4192 * we haven't yet exited the RCU idle mode. Do it here manually until 4193 * we find a better solution. 4194 * 4195 * NB: There are buggy callers of this function. Ideally we 4196 * should warn if prev_state != CONTEXT_USER, but that will trigger 4197 * too frequently to make sense yet. 4198 */ 4199 enum ctx_state prev_state = exception_enter(); 4200 schedule(); 4201 exception_exit(prev_state); 4202 } 4203 #endif 4204 4205 /** 4206 * schedule_preempt_disabled - called with preemption disabled 4207 * 4208 * Returns with preemption disabled. Note: preempt_count must be 1 4209 */ 4210 void __sched schedule_preempt_disabled(void) 4211 { 4212 sched_preempt_enable_no_resched(); 4213 schedule(); 4214 preempt_disable(); 4215 } 4216 4217 static void __sched notrace preempt_schedule_common(void) 4218 { 4219 do { 4220 /* 4221 * Because the function tracer can trace preempt_count_sub() 4222 * and it also uses preempt_enable/disable_notrace(), if 4223 * NEED_RESCHED is set, the preempt_enable_notrace() called 4224 * by the function tracer will call this function again and 4225 * cause infinite recursion. 4226 * 4227 * Preemption must be disabled here before the function 4228 * tracer can trace. Break up preempt_disable() into two 4229 * calls. One to disable preemption without fear of being 4230 * traced. The other to still record the preemption latency, 4231 * which can also be traced by the function tracer. 4232 */ 4233 preempt_disable_notrace(); 4234 preempt_latency_start(1); 4235 __schedule(true); 4236 preempt_latency_stop(1); 4237 preempt_enable_no_resched_notrace(); 4238 4239 /* 4240 * Check again in case we missed a preemption opportunity 4241 * between schedule and now. 4242 */ 4243 } while (need_resched()); 4244 } 4245 4246 #ifdef CONFIG_PREEMPTION 4247 /* 4248 * This is the entry point to schedule() from in-kernel preemption 4249 * off of preempt_enable. 4250 */ 4251 asmlinkage __visible void __sched notrace preempt_schedule(void) 4252 { 4253 /* 4254 * If there is a non-zero preempt_count or interrupts are disabled, 4255 * we do not want to preempt the current task. Just return.. 4256 */ 4257 if (likely(!preemptible())) 4258 return; 4259 4260 preempt_schedule_common(); 4261 } 4262 NOKPROBE_SYMBOL(preempt_schedule); 4263 EXPORT_SYMBOL(preempt_schedule); 4264 4265 /** 4266 * preempt_schedule_notrace - preempt_schedule called by tracing 4267 * 4268 * The tracing infrastructure uses preempt_enable_notrace to prevent 4269 * recursion and tracing preempt enabling caused by the tracing 4270 * infrastructure itself. But as tracing can happen in areas coming 4271 * from userspace or just about to enter userspace, a preempt enable 4272 * can occur before user_exit() is called. This will cause the scheduler 4273 * to be called when the system is still in usermode. 4274 * 4275 * To prevent this, the preempt_enable_notrace will use this function 4276 * instead of preempt_schedule() to exit user context if needed before 4277 * calling the scheduler. 4278 */ 4279 asmlinkage __visible void __sched notrace preempt_schedule_notrace(void) 4280 { 4281 enum ctx_state prev_ctx; 4282 4283 if (likely(!preemptible())) 4284 return; 4285 4286 do { 4287 /* 4288 * Because the function tracer can trace preempt_count_sub() 4289 * and it also uses preempt_enable/disable_notrace(), if 4290 * NEED_RESCHED is set, the preempt_enable_notrace() called 4291 * by the function tracer will call this function again and 4292 * cause infinite recursion. 4293 * 4294 * Preemption must be disabled here before the function 4295 * tracer can trace. Break up preempt_disable() into two 4296 * calls. One to disable preemption without fear of being 4297 * traced. The other to still record the preemption latency, 4298 * which can also be traced by the function tracer. 4299 */ 4300 preempt_disable_notrace(); 4301 preempt_latency_start(1); 4302 /* 4303 * Needs preempt disabled in case user_exit() is traced 4304 * and the tracer calls preempt_enable_notrace() causing 4305 * an infinite recursion. 4306 */ 4307 prev_ctx = exception_enter(); 4308 __schedule(true); 4309 exception_exit(prev_ctx); 4310 4311 preempt_latency_stop(1); 4312 preempt_enable_no_resched_notrace(); 4313 } while (need_resched()); 4314 } 4315 EXPORT_SYMBOL_GPL(preempt_schedule_notrace); 4316 4317 #endif /* CONFIG_PREEMPTION */ 4318 4319 /* 4320 * This is the entry point to schedule() from kernel preemption 4321 * off of irq context. 4322 * Note, that this is called and return with irqs disabled. This will 4323 * protect us against recursive calling from irq. 4324 */ 4325 asmlinkage __visible void __sched preempt_schedule_irq(void) 4326 { 4327 enum ctx_state prev_state; 4328 4329 /* Catch callers which need to be fixed */ 4330 BUG_ON(preempt_count() || !irqs_disabled()); 4331 4332 prev_state = exception_enter(); 4333 4334 do { 4335 preempt_disable(); 4336 local_irq_enable(); 4337 __schedule(true); 4338 local_irq_disable(); 4339 sched_preempt_enable_no_resched(); 4340 } while (need_resched()); 4341 4342 exception_exit(prev_state); 4343 } 4344 4345 int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags, 4346 void *key) 4347 { 4348 return try_to_wake_up(curr->private, mode, wake_flags); 4349 } 4350 EXPORT_SYMBOL(default_wake_function); 4351 4352 #ifdef CONFIG_RT_MUTEXES 4353 4354 static inline int __rt_effective_prio(struct task_struct *pi_task, int prio) 4355 { 4356 if (pi_task) 4357 prio = min(prio, pi_task->prio); 4358 4359 return prio; 4360 } 4361 4362 static inline int rt_effective_prio(struct task_struct *p, int prio) 4363 { 4364 struct task_struct *pi_task = rt_mutex_get_top_task(p); 4365 4366 return __rt_effective_prio(pi_task, prio); 4367 } 4368 4369 /* 4370 * rt_mutex_setprio - set the current priority of a task 4371 * @p: task to boost 4372 * @pi_task: donor task 4373 * 4374 * This function changes the 'effective' priority of a task. It does 4375 * not touch ->normal_prio like __setscheduler(). 4376 * 4377 * Used by the rt_mutex code to implement priority inheritance 4378 * logic. Call site only calls if the priority of the task changed. 4379 */ 4380 void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task) 4381 { 4382 int prio, oldprio, queued, running, queue_flag = 4383 DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; 4384 const struct sched_class *prev_class; 4385 struct rq_flags rf; 4386 struct rq *rq; 4387 4388 /* XXX used to be waiter->prio, not waiter->task->prio */ 4389 prio = __rt_effective_prio(pi_task, p->normal_prio); 4390 4391 /* 4392 * If nothing changed; bail early. 4393 */ 4394 if (p->pi_top_task == pi_task && prio == p->prio && !dl_prio(prio)) 4395 return; 4396 4397 rq = __task_rq_lock(p, &rf); 4398 update_rq_clock(rq); 4399 /* 4400 * Set under pi_lock && rq->lock, such that the value can be used under 4401 * either lock. 4402 * 4403 * Note that there is loads of tricky to make this pointer cache work 4404 * right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to 4405 * ensure a task is de-boosted (pi_task is set to NULL) before the 4406 * task is allowed to run again (and can exit). This ensures the pointer 4407 * points to a blocked task -- which guaratees the task is present. 4408 */ 4409 p->pi_top_task = pi_task; 4410 4411 /* 4412 * For FIFO/RR we only need to set prio, if that matches we're done. 4413 */ 4414 if (prio == p->prio && !dl_prio(prio)) 4415 goto out_unlock; 4416 4417 /* 4418 * Idle task boosting is a nono in general. There is one 4419 * exception, when PREEMPT_RT and NOHZ is active: 4420 * 4421 * The idle task calls get_next_timer_interrupt() and holds 4422 * the timer wheel base->lock on the CPU and another CPU wants 4423 * to access the timer (probably to cancel it). We can safely 4424 * ignore the boosting request, as the idle CPU runs this code 4425 * with interrupts disabled and will complete the lock 4426 * protected section without being interrupted. So there is no 4427 * real need to boost. 4428 */ 4429 if (unlikely(p == rq->idle)) { 4430 WARN_ON(p != rq->curr); 4431 WARN_ON(p->pi_blocked_on); 4432 goto out_unlock; 4433 } 4434 4435 trace_sched_pi_setprio(p, pi_task); 4436 oldprio = p->prio; 4437 4438 if (oldprio == prio) 4439 queue_flag &= ~DEQUEUE_MOVE; 4440 4441 prev_class = p->sched_class; 4442 queued = task_on_rq_queued(p); 4443 running = task_current(rq, p); 4444 if (queued) 4445 dequeue_task(rq, p, queue_flag); 4446 if (running) 4447 put_prev_task(rq, p); 4448 4449 /* 4450 * Boosting condition are: 4451 * 1. -rt task is running and holds mutex A 4452 * --> -dl task blocks on mutex A 4453 * 4454 * 2. -dl task is running and holds mutex A 4455 * --> -dl task blocks on mutex A and could preempt the 4456 * running task 4457 */ 4458 if (dl_prio(prio)) { 4459 if (!dl_prio(p->normal_prio) || 4460 (pi_task && dl_entity_preempt(&pi_task->dl, &p->dl))) { 4461 p->dl.dl_boosted = 1; 4462 queue_flag |= ENQUEUE_REPLENISH; 4463 } else 4464 p->dl.dl_boosted = 0; 4465 p->sched_class = &dl_sched_class; 4466 } else if (rt_prio(prio)) { 4467 if (dl_prio(oldprio)) 4468 p->dl.dl_boosted = 0; 4469 if (oldprio < prio) 4470 queue_flag |= ENQUEUE_HEAD; 4471 p->sched_class = &rt_sched_class; 4472 } else { 4473 if (dl_prio(oldprio)) 4474 p->dl.dl_boosted = 0; 4475 if (rt_prio(oldprio)) 4476 p->rt.timeout = 0; 4477 p->sched_class = &fair_sched_class; 4478 } 4479 4480 p->prio = prio; 4481 4482 if (queued) 4483 enqueue_task(rq, p, queue_flag); 4484 if (running) 4485 set_next_task(rq, p); 4486 4487 check_class_changed(rq, p, prev_class, oldprio); 4488 out_unlock: 4489 /* Avoid rq from going away on us: */ 4490 preempt_disable(); 4491 __task_rq_unlock(rq, &rf); 4492 4493 balance_callback(rq); 4494 preempt_enable(); 4495 } 4496 #else 4497 static inline int rt_effective_prio(struct task_struct *p, int prio) 4498 { 4499 return prio; 4500 } 4501 #endif 4502 4503 void set_user_nice(struct task_struct *p, long nice) 4504 { 4505 bool queued, running; 4506 int old_prio; 4507 struct rq_flags rf; 4508 struct rq *rq; 4509 4510 if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE) 4511 return; 4512 /* 4513 * We have to be careful, if called from sys_setpriority(), 4514 * the task might be in the middle of scheduling on another CPU. 4515 */ 4516 rq = task_rq_lock(p, &rf); 4517 update_rq_clock(rq); 4518 4519 /* 4520 * The RT priorities are set via sched_setscheduler(), but we still 4521 * allow the 'normal' nice value to be set - but as expected 4522 * it wont have any effect on scheduling until the task is 4523 * SCHED_DEADLINE, SCHED_FIFO or SCHED_RR: 4524 */ 4525 if (task_has_dl_policy(p) || task_has_rt_policy(p)) { 4526 p->static_prio = NICE_TO_PRIO(nice); 4527 goto out_unlock; 4528 } 4529 queued = task_on_rq_queued(p); 4530 running = task_current(rq, p); 4531 if (queued) 4532 dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK); 4533 if (running) 4534 put_prev_task(rq, p); 4535 4536 p->static_prio = NICE_TO_PRIO(nice); 4537 set_load_weight(p, true); 4538 old_prio = p->prio; 4539 p->prio = effective_prio(p); 4540 4541 if (queued) 4542 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); 4543 if (running) 4544 set_next_task(rq, p); 4545 4546 /* 4547 * If the task increased its priority or is running and 4548 * lowered its priority, then reschedule its CPU: 4549 */ 4550 p->sched_class->prio_changed(rq, p, old_prio); 4551 4552 out_unlock: 4553 task_rq_unlock(rq, p, &rf); 4554 } 4555 EXPORT_SYMBOL(set_user_nice); 4556 4557 /* 4558 * can_nice - check if a task can reduce its nice value 4559 * @p: task 4560 * @nice: nice value 4561 */ 4562 int can_nice(const struct task_struct *p, const int nice) 4563 { 4564 /* Convert nice value [19,-20] to rlimit style value [1,40]: */ 4565 int nice_rlim = nice_to_rlimit(nice); 4566 4567 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) || 4568 capable(CAP_SYS_NICE)); 4569 } 4570 4571 #ifdef __ARCH_WANT_SYS_NICE 4572 4573 /* 4574 * sys_nice - change the priority of the current process. 4575 * @increment: priority increment 4576 * 4577 * sys_setpriority is a more generic, but much slower function that 4578 * does similar things. 4579 */ 4580 SYSCALL_DEFINE1(nice, int, increment) 4581 { 4582 long nice, retval; 4583 4584 /* 4585 * Setpriority might change our priority at the same moment. 4586 * We don't have to worry. Conceptually one call occurs first 4587 * and we have a single winner. 4588 */ 4589 increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH); 4590 nice = task_nice(current) + increment; 4591 4592 nice = clamp_val(nice, MIN_NICE, MAX_NICE); 4593 if (increment < 0 && !can_nice(current, nice)) 4594 return -EPERM; 4595 4596 retval = security_task_setnice(current, nice); 4597 if (retval) 4598 return retval; 4599 4600 set_user_nice(current, nice); 4601 return 0; 4602 } 4603 4604 #endif 4605 4606 /** 4607 * task_prio - return the priority value of a given task. 4608 * @p: the task in question. 4609 * 4610 * Return: The priority value as seen by users in /proc. 4611 * RT tasks are offset by -200. Normal tasks are centered 4612 * around 0, value goes from -16 to +15. 4613 */ 4614 int task_prio(const struct task_struct *p) 4615 { 4616 return p->prio - MAX_RT_PRIO; 4617 } 4618 4619 /** 4620 * idle_cpu - is a given CPU idle currently? 4621 * @cpu: the processor in question. 4622 * 4623 * Return: 1 if the CPU is currently idle. 0 otherwise. 4624 */ 4625 int idle_cpu(int cpu) 4626 { 4627 struct rq *rq = cpu_rq(cpu); 4628 4629 if (rq->curr != rq->idle) 4630 return 0; 4631 4632 if (rq->nr_running) 4633 return 0; 4634 4635 #ifdef CONFIG_SMP 4636 if (!llist_empty(&rq->wake_list)) 4637 return 0; 4638 #endif 4639 4640 return 1; 4641 } 4642 4643 /** 4644 * available_idle_cpu - is a given CPU idle for enqueuing work. 4645 * @cpu: the CPU in question. 4646 * 4647 * Return: 1 if the CPU is currently idle. 0 otherwise. 4648 */ 4649 int available_idle_cpu(int cpu) 4650 { 4651 if (!idle_cpu(cpu)) 4652 return 0; 4653 4654 if (vcpu_is_preempted(cpu)) 4655 return 0; 4656 4657 return 1; 4658 } 4659 4660 /** 4661 * idle_task - return the idle task for a given CPU. 4662 * @cpu: the processor in question. 4663 * 4664 * Return: The idle task for the CPU @cpu. 4665 */ 4666 struct task_struct *idle_task(int cpu) 4667 { 4668 return cpu_rq(cpu)->idle; 4669 } 4670 4671 /** 4672 * find_process_by_pid - find a process with a matching PID value. 4673 * @pid: the pid in question. 4674 * 4675 * The task of @pid, if found. %NULL otherwise. 4676 */ 4677 static struct task_struct *find_process_by_pid(pid_t pid) 4678 { 4679 return pid ? find_task_by_vpid(pid) : current; 4680 } 4681 4682 /* 4683 * sched_setparam() passes in -1 for its policy, to let the functions 4684 * it calls know not to change it. 4685 */ 4686 #define SETPARAM_POLICY -1 4687 4688 static void __setscheduler_params(struct task_struct *p, 4689 const struct sched_attr *attr) 4690 { 4691 int policy = attr->sched_policy; 4692 4693 if (policy == SETPARAM_POLICY) 4694 policy = p->policy; 4695 4696 p->policy = policy; 4697 4698 if (dl_policy(policy)) 4699 __setparam_dl(p, attr); 4700 else if (fair_policy(policy)) 4701 p->static_prio = NICE_TO_PRIO(attr->sched_nice); 4702 4703 /* 4704 * __sched_setscheduler() ensures attr->sched_priority == 0 when 4705 * !rt_policy. Always setting this ensures that things like 4706 * getparam()/getattr() don't report silly values for !rt tasks. 4707 */ 4708 p->rt_priority = attr->sched_priority; 4709 p->normal_prio = normal_prio(p); 4710 set_load_weight(p, true); 4711 } 4712 4713 /* Actually do priority change: must hold pi & rq lock. */ 4714 static void __setscheduler(struct rq *rq, struct task_struct *p, 4715 const struct sched_attr *attr, bool keep_boost) 4716 { 4717 /* 4718 * If params can't change scheduling class changes aren't allowed 4719 * either. 4720 */ 4721 if (attr->sched_flags & SCHED_FLAG_KEEP_PARAMS) 4722 return; 4723 4724 __setscheduler_params(p, attr); 4725 4726 /* 4727 * Keep a potential priority boosting if called from 4728 * sched_setscheduler(). 4729 */ 4730 p->prio = normal_prio(p); 4731 if (keep_boost) 4732 p->prio = rt_effective_prio(p, p->prio); 4733 4734 if (dl_prio(p->prio)) 4735 p->sched_class = &dl_sched_class; 4736 else if (rt_prio(p->prio)) 4737 p->sched_class = &rt_sched_class; 4738 else 4739 p->sched_class = &fair_sched_class; 4740 } 4741 4742 /* 4743 * Check the target process has a UID that matches the current process's: 4744 */ 4745 static bool check_same_owner(struct task_struct *p) 4746 { 4747 const struct cred *cred = current_cred(), *pcred; 4748 bool match; 4749 4750 rcu_read_lock(); 4751 pcred = __task_cred(p); 4752 match = (uid_eq(cred->euid, pcred->euid) || 4753 uid_eq(cred->euid, pcred->uid)); 4754 rcu_read_unlock(); 4755 return match; 4756 } 4757 4758 static int __sched_setscheduler(struct task_struct *p, 4759 const struct sched_attr *attr, 4760 bool user, bool pi) 4761 { 4762 int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 : 4763 MAX_RT_PRIO - 1 - attr->sched_priority; 4764 int retval, oldprio, oldpolicy = -1, queued, running; 4765 int new_effective_prio, policy = attr->sched_policy; 4766 const struct sched_class *prev_class; 4767 struct rq_flags rf; 4768 int reset_on_fork; 4769 int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; 4770 struct rq *rq; 4771 4772 /* The pi code expects interrupts enabled */ 4773 BUG_ON(pi && in_interrupt()); 4774 recheck: 4775 /* Double check policy once rq lock held: */ 4776 if (policy < 0) { 4777 reset_on_fork = p->sched_reset_on_fork; 4778 policy = oldpolicy = p->policy; 4779 } else { 4780 reset_on_fork = !!(attr->sched_flags & SCHED_FLAG_RESET_ON_FORK); 4781 4782 if (!valid_policy(policy)) 4783 return -EINVAL; 4784 } 4785 4786 if (attr->sched_flags & ~(SCHED_FLAG_ALL | SCHED_FLAG_SUGOV)) 4787 return -EINVAL; 4788 4789 /* 4790 * Valid priorities for SCHED_FIFO and SCHED_RR are 4791 * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL, 4792 * SCHED_BATCH and SCHED_IDLE is 0. 4793 */ 4794 if ((p->mm && attr->sched_priority > MAX_USER_RT_PRIO-1) || 4795 (!p->mm && attr->sched_priority > MAX_RT_PRIO-1)) 4796 return -EINVAL; 4797 if ((dl_policy(policy) && !__checkparam_dl(attr)) || 4798 (rt_policy(policy) != (attr->sched_priority != 0))) 4799 return -EINVAL; 4800 4801 /* 4802 * Allow unprivileged RT tasks to decrease priority: 4803 */ 4804 if (user && !capable(CAP_SYS_NICE)) { 4805 if (fair_policy(policy)) { 4806 if (attr->sched_nice < task_nice(p) && 4807 !can_nice(p, attr->sched_nice)) 4808 return -EPERM; 4809 } 4810 4811 if (rt_policy(policy)) { 4812 unsigned long rlim_rtprio = 4813 task_rlimit(p, RLIMIT_RTPRIO); 4814 4815 /* Can't set/change the rt policy: */ 4816 if (policy != p->policy && !rlim_rtprio) 4817 return -EPERM; 4818 4819 /* Can't increase priority: */ 4820 if (attr->sched_priority > p->rt_priority && 4821 attr->sched_priority > rlim_rtprio) 4822 return -EPERM; 4823 } 4824 4825 /* 4826 * Can't set/change SCHED_DEADLINE policy at all for now 4827 * (safest behavior); in the future we would like to allow 4828 * unprivileged DL tasks to increase their relative deadline 4829 * or reduce their runtime (both ways reducing utilization) 4830 */ 4831 if (dl_policy(policy)) 4832 return -EPERM; 4833 4834 /* 4835 * Treat SCHED_IDLE as nice 20. Only allow a switch to 4836 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it. 4837 */ 4838 if (task_has_idle_policy(p) && !idle_policy(policy)) { 4839 if (!can_nice(p, task_nice(p))) 4840 return -EPERM; 4841 } 4842 4843 /* Can't change other user's priorities: */ 4844 if (!check_same_owner(p)) 4845 return -EPERM; 4846 4847 /* Normal users shall not reset the sched_reset_on_fork flag: */ 4848 if (p->sched_reset_on_fork && !reset_on_fork) 4849 return -EPERM; 4850 } 4851 4852 if (user) { 4853 if (attr->sched_flags & SCHED_FLAG_SUGOV) 4854 return -EINVAL; 4855 4856 retval = security_task_setscheduler(p); 4857 if (retval) 4858 return retval; 4859 } 4860 4861 /* Update task specific "requested" clamps */ 4862 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) { 4863 retval = uclamp_validate(p, attr); 4864 if (retval) 4865 return retval; 4866 } 4867 4868 if (pi) 4869 cpuset_read_lock(); 4870 4871 /* 4872 * Make sure no PI-waiters arrive (or leave) while we are 4873 * changing the priority of the task: 4874 * 4875 * To be able to change p->policy safely, the appropriate 4876 * runqueue lock must be held. 4877 */ 4878 rq = task_rq_lock(p, &rf); 4879 update_rq_clock(rq); 4880 4881 /* 4882 * Changing the policy of the stop threads its a very bad idea: 4883 */ 4884 if (p == rq->stop) { 4885 retval = -EINVAL; 4886 goto unlock; 4887 } 4888 4889 /* 4890 * If not changing anything there's no need to proceed further, 4891 * but store a possible modification of reset_on_fork. 4892 */ 4893 if (unlikely(policy == p->policy)) { 4894 if (fair_policy(policy) && attr->sched_nice != task_nice(p)) 4895 goto change; 4896 if (rt_policy(policy) && attr->sched_priority != p->rt_priority) 4897 goto change; 4898 if (dl_policy(policy) && dl_param_changed(p, attr)) 4899 goto change; 4900 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) 4901 goto change; 4902 4903 p->sched_reset_on_fork = reset_on_fork; 4904 retval = 0; 4905 goto unlock; 4906 } 4907 change: 4908 4909 if (user) { 4910 #ifdef CONFIG_RT_GROUP_SCHED 4911 /* 4912 * Do not allow realtime tasks into groups that have no runtime 4913 * assigned. 4914 */ 4915 if (rt_bandwidth_enabled() && rt_policy(policy) && 4916 task_group(p)->rt_bandwidth.rt_runtime == 0 && 4917 !task_group_is_autogroup(task_group(p))) { 4918 retval = -EPERM; 4919 goto unlock; 4920 } 4921 #endif 4922 #ifdef CONFIG_SMP 4923 if (dl_bandwidth_enabled() && dl_policy(policy) && 4924 !(attr->sched_flags & SCHED_FLAG_SUGOV)) { 4925 cpumask_t *span = rq->rd->span; 4926 4927 /* 4928 * Don't allow tasks with an affinity mask smaller than 4929 * the entire root_domain to become SCHED_DEADLINE. We 4930 * will also fail if there's no bandwidth available. 4931 */ 4932 if (!cpumask_subset(span, p->cpus_ptr) || 4933 rq->rd->dl_bw.bw == 0) { 4934 retval = -EPERM; 4935 goto unlock; 4936 } 4937 } 4938 #endif 4939 } 4940 4941 /* Re-check policy now with rq lock held: */ 4942 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { 4943 policy = oldpolicy = -1; 4944 task_rq_unlock(rq, p, &rf); 4945 if (pi) 4946 cpuset_read_unlock(); 4947 goto recheck; 4948 } 4949 4950 /* 4951 * If setscheduling to SCHED_DEADLINE (or changing the parameters 4952 * of a SCHED_DEADLINE task) we need to check if enough bandwidth 4953 * is available. 4954 */ 4955 if ((dl_policy(policy) || dl_task(p)) && sched_dl_overflow(p, policy, attr)) { 4956 retval = -EBUSY; 4957 goto unlock; 4958 } 4959 4960 p->sched_reset_on_fork = reset_on_fork; 4961 oldprio = p->prio; 4962 4963 if (pi) { 4964 /* 4965 * Take priority boosted tasks into account. If the new 4966 * effective priority is unchanged, we just store the new 4967 * normal parameters and do not touch the scheduler class and 4968 * the runqueue. This will be done when the task deboost 4969 * itself. 4970 */ 4971 new_effective_prio = rt_effective_prio(p, newprio); 4972 if (new_effective_prio == oldprio) 4973 queue_flags &= ~DEQUEUE_MOVE; 4974 } 4975 4976 queued = task_on_rq_queued(p); 4977 running = task_current(rq, p); 4978 if (queued) 4979 dequeue_task(rq, p, queue_flags); 4980 if (running) 4981 put_prev_task(rq, p); 4982 4983 prev_class = p->sched_class; 4984 4985 __setscheduler(rq, p, attr, pi); 4986 __setscheduler_uclamp(p, attr); 4987 4988 if (queued) { 4989 /* 4990 * We enqueue to tail when the priority of a task is 4991 * increased (user space view). 4992 */ 4993 if (oldprio < p->prio) 4994 queue_flags |= ENQUEUE_HEAD; 4995 4996 enqueue_task(rq, p, queue_flags); 4997 } 4998 if (running) 4999 set_next_task(rq, p); 5000 5001 check_class_changed(rq, p, prev_class, oldprio); 5002 5003 /* Avoid rq from going away on us: */ 5004 preempt_disable(); 5005 task_rq_unlock(rq, p, &rf); 5006 5007 if (pi) { 5008 cpuset_read_unlock(); 5009 rt_mutex_adjust_pi(p); 5010 } 5011 5012 /* Run balance callbacks after we've adjusted the PI chain: */ 5013 balance_callback(rq); 5014 preempt_enable(); 5015 5016 return 0; 5017 5018 unlock: 5019 task_rq_unlock(rq, p, &rf); 5020 if (pi) 5021 cpuset_read_unlock(); 5022 return retval; 5023 } 5024 5025 static int _sched_setscheduler(struct task_struct *p, int policy, 5026 const struct sched_param *param, bool check) 5027 { 5028 struct sched_attr attr = { 5029 .sched_policy = policy, 5030 .sched_priority = param->sched_priority, 5031 .sched_nice = PRIO_TO_NICE(p->static_prio), 5032 }; 5033 5034 /* Fixup the legacy SCHED_RESET_ON_FORK hack. */ 5035 if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) { 5036 attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; 5037 policy &= ~SCHED_RESET_ON_FORK; 5038 attr.sched_policy = policy; 5039 } 5040 5041 return __sched_setscheduler(p, &attr, check, true); 5042 } 5043 /** 5044 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread. 5045 * @p: the task in question. 5046 * @policy: new policy. 5047 * @param: structure containing the new RT priority. 5048 * 5049 * Return: 0 on success. An error code otherwise. 5050 * 5051 * NOTE that the task may be already dead. 5052 */ 5053 int sched_setscheduler(struct task_struct *p, int policy, 5054 const struct sched_param *param) 5055 { 5056 return _sched_setscheduler(p, policy, param, true); 5057 } 5058 EXPORT_SYMBOL_GPL(sched_setscheduler); 5059 5060 int sched_setattr(struct task_struct *p, const struct sched_attr *attr) 5061 { 5062 return __sched_setscheduler(p, attr, true, true); 5063 } 5064 EXPORT_SYMBOL_GPL(sched_setattr); 5065 5066 int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr) 5067 { 5068 return __sched_setscheduler(p, attr, false, true); 5069 } 5070 5071 /** 5072 * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace. 5073 * @p: the task in question. 5074 * @policy: new policy. 5075 * @param: structure containing the new RT priority. 5076 * 5077 * Just like sched_setscheduler, only don't bother checking if the 5078 * current context has permission. For example, this is needed in 5079 * stop_machine(): we create temporary high priority worker threads, 5080 * but our caller might not have that capability. 5081 * 5082 * Return: 0 on success. An error code otherwise. 5083 */ 5084 int sched_setscheduler_nocheck(struct task_struct *p, int policy, 5085 const struct sched_param *param) 5086 { 5087 return _sched_setscheduler(p, policy, param, false); 5088 } 5089 EXPORT_SYMBOL_GPL(sched_setscheduler_nocheck); 5090 5091 static int 5092 do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) 5093 { 5094 struct sched_param lparam; 5095 struct task_struct *p; 5096 int retval; 5097 5098 if (!param || pid < 0) 5099 return -EINVAL; 5100 if (copy_from_user(&lparam, param, sizeof(struct sched_param))) 5101 return -EFAULT; 5102 5103 rcu_read_lock(); 5104 retval = -ESRCH; 5105 p = find_process_by_pid(pid); 5106 if (likely(p)) 5107 get_task_struct(p); 5108 rcu_read_unlock(); 5109 5110 if (likely(p)) { 5111 retval = sched_setscheduler(p, policy, &lparam); 5112 put_task_struct(p); 5113 } 5114 5115 return retval; 5116 } 5117 5118 /* 5119 * Mimics kernel/events/core.c perf_copy_attr(). 5120 */ 5121 static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *attr) 5122 { 5123 u32 size; 5124 int ret; 5125 5126 /* Zero the full structure, so that a short copy will be nice: */ 5127 memset(attr, 0, sizeof(*attr)); 5128 5129 ret = get_user(size, &uattr->size); 5130 if (ret) 5131 return ret; 5132 5133 /* ABI compatibility quirk: */ 5134 if (!size) 5135 size = SCHED_ATTR_SIZE_VER0; 5136 if (size < SCHED_ATTR_SIZE_VER0 || size > PAGE_SIZE) 5137 goto err_size; 5138 5139 ret = copy_struct_from_user(attr, sizeof(*attr), uattr, size); 5140 if (ret) { 5141 if (ret == -E2BIG) 5142 goto err_size; 5143 return ret; 5144 } 5145 5146 if ((attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) && 5147 size < SCHED_ATTR_SIZE_VER1) 5148 return -EINVAL; 5149 5150 /* 5151 * XXX: Do we want to be lenient like existing syscalls; or do we want 5152 * to be strict and return an error on out-of-bounds values? 5153 */ 5154 attr->sched_nice = clamp(attr->sched_nice, MIN_NICE, MAX_NICE); 5155 5156 return 0; 5157 5158 err_size: 5159 put_user(sizeof(*attr), &uattr->size); 5160 return -E2BIG; 5161 } 5162 5163 /** 5164 * sys_sched_setscheduler - set/change the scheduler policy and RT priority 5165 * @pid: the pid in question. 5166 * @policy: new policy. 5167 * @param: structure containing the new RT priority. 5168 * 5169 * Return: 0 on success. An error code otherwise. 5170 */ 5171 SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param) 5172 { 5173 if (policy < 0) 5174 return -EINVAL; 5175 5176 return do_sched_setscheduler(pid, policy, param); 5177 } 5178 5179 /** 5180 * sys_sched_setparam - set/change the RT priority of a thread 5181 * @pid: the pid in question. 5182 * @param: structure containing the new RT priority. 5183 * 5184 * Return: 0 on success. An error code otherwise. 5185 */ 5186 SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) 5187 { 5188 return do_sched_setscheduler(pid, SETPARAM_POLICY, param); 5189 } 5190 5191 /** 5192 * sys_sched_setattr - same as above, but with extended sched_attr 5193 * @pid: the pid in question. 5194 * @uattr: structure containing the extended parameters. 5195 * @flags: for future extension. 5196 */ 5197 SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr, 5198 unsigned int, flags) 5199 { 5200 struct sched_attr attr; 5201 struct task_struct *p; 5202 int retval; 5203 5204 if (!uattr || pid < 0 || flags) 5205 return -EINVAL; 5206 5207 retval = sched_copy_attr(uattr, &attr); 5208 if (retval) 5209 return retval; 5210 5211 if ((int)attr.sched_policy < 0) 5212 return -EINVAL; 5213 if (attr.sched_flags & SCHED_FLAG_KEEP_POLICY) 5214 attr.sched_policy = SETPARAM_POLICY; 5215 5216 rcu_read_lock(); 5217 retval = -ESRCH; 5218 p = find_process_by_pid(pid); 5219 if (likely(p)) 5220 get_task_struct(p); 5221 rcu_read_unlock(); 5222 5223 if (likely(p)) { 5224 retval = sched_setattr(p, &attr); 5225 put_task_struct(p); 5226 } 5227 5228 return retval; 5229 } 5230 5231 /** 5232 * sys_sched_getscheduler - get the policy (scheduling class) of a thread 5233 * @pid: the pid in question. 5234 * 5235 * Return: On success, the policy of the thread. Otherwise, a negative error 5236 * code. 5237 */ 5238 SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) 5239 { 5240 struct task_struct *p; 5241 int retval; 5242 5243 if (pid < 0) 5244 return -EINVAL; 5245 5246 retval = -ESRCH; 5247 rcu_read_lock(); 5248 p = find_process_by_pid(pid); 5249 if (p) { 5250 retval = security_task_getscheduler(p); 5251 if (!retval) 5252 retval = p->policy 5253 | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0); 5254 } 5255 rcu_read_unlock(); 5256 return retval; 5257 } 5258 5259 /** 5260 * sys_sched_getparam - get the RT priority of a thread 5261 * @pid: the pid in question. 5262 * @param: structure containing the RT priority. 5263 * 5264 * Return: On success, 0 and the RT priority is in @param. Otherwise, an error 5265 * code. 5266 */ 5267 SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) 5268 { 5269 struct sched_param lp = { .sched_priority = 0 }; 5270 struct task_struct *p; 5271 int retval; 5272 5273 if (!param || pid < 0) 5274 return -EINVAL; 5275 5276 rcu_read_lock(); 5277 p = find_process_by_pid(pid); 5278 retval = -ESRCH; 5279 if (!p) 5280 goto out_unlock; 5281 5282 retval = security_task_getscheduler(p); 5283 if (retval) 5284 goto out_unlock; 5285 5286 if (task_has_rt_policy(p)) 5287 lp.sched_priority = p->rt_priority; 5288 rcu_read_unlock(); 5289 5290 /* 5291 * This one might sleep, we cannot do it with a spinlock held ... 5292 */ 5293 retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0; 5294 5295 return retval; 5296 5297 out_unlock: 5298 rcu_read_unlock(); 5299 return retval; 5300 } 5301 5302 /* 5303 * Copy the kernel size attribute structure (which might be larger 5304 * than what user-space knows about) to user-space. 5305 * 5306 * Note that all cases are valid: user-space buffer can be larger or 5307 * smaller than the kernel-space buffer. The usual case is that both 5308 * have the same size. 5309 */ 5310 static int 5311 sched_attr_copy_to_user(struct sched_attr __user *uattr, 5312 struct sched_attr *kattr, 5313 unsigned int usize) 5314 { 5315 unsigned int ksize = sizeof(*kattr); 5316 5317 if (!access_ok(uattr, usize)) 5318 return -EFAULT; 5319 5320 /* 5321 * sched_getattr() ABI forwards and backwards compatibility: 5322 * 5323 * If usize == ksize then we just copy everything to user-space and all is good. 5324 * 5325 * If usize < ksize then we only copy as much as user-space has space for, 5326 * this keeps ABI compatibility as well. We skip the rest. 5327 * 5328 * If usize > ksize then user-space is using a newer version of the ABI, 5329 * which part the kernel doesn't know about. Just ignore it - tooling can 5330 * detect the kernel's knowledge of attributes from the attr->size value 5331 * which is set to ksize in this case. 5332 */ 5333 kattr->size = min(usize, ksize); 5334 5335 if (copy_to_user(uattr, kattr, kattr->size)) 5336 return -EFAULT; 5337 5338 return 0; 5339 } 5340 5341 /** 5342 * sys_sched_getattr - similar to sched_getparam, but with sched_attr 5343 * @pid: the pid in question. 5344 * @uattr: structure containing the extended parameters. 5345 * @usize: sizeof(attr) for fwd/bwd comp. 5346 * @flags: for future extension. 5347 */ 5348 SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, 5349 unsigned int, usize, unsigned int, flags) 5350 { 5351 struct sched_attr kattr = { }; 5352 struct task_struct *p; 5353 int retval; 5354 5355 if (!uattr || pid < 0 || usize > PAGE_SIZE || 5356 usize < SCHED_ATTR_SIZE_VER0 || flags) 5357 return -EINVAL; 5358 5359 rcu_read_lock(); 5360 p = find_process_by_pid(pid); 5361 retval = -ESRCH; 5362 if (!p) 5363 goto out_unlock; 5364 5365 retval = security_task_getscheduler(p); 5366 if (retval) 5367 goto out_unlock; 5368 5369 kattr.sched_policy = p->policy; 5370 if (p->sched_reset_on_fork) 5371 kattr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; 5372 if (task_has_dl_policy(p)) 5373 __getparam_dl(p, &kattr); 5374 else if (task_has_rt_policy(p)) 5375 kattr.sched_priority = p->rt_priority; 5376 else 5377 kattr.sched_nice = task_nice(p); 5378 5379 #ifdef CONFIG_UCLAMP_TASK 5380 kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value; 5381 kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value; 5382 #endif 5383 5384 rcu_read_unlock(); 5385 5386 return sched_attr_copy_to_user(uattr, &kattr, usize); 5387 5388 out_unlock: 5389 rcu_read_unlock(); 5390 return retval; 5391 } 5392 5393 long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) 5394 { 5395 cpumask_var_t cpus_allowed, new_mask; 5396 struct task_struct *p; 5397 int retval; 5398 5399 rcu_read_lock(); 5400 5401 p = find_process_by_pid(pid); 5402 if (!p) { 5403 rcu_read_unlock(); 5404 return -ESRCH; 5405 } 5406 5407 /* Prevent p going away */ 5408 get_task_struct(p); 5409 rcu_read_unlock(); 5410 5411 if (p->flags & PF_NO_SETAFFINITY) { 5412 retval = -EINVAL; 5413 goto out_put_task; 5414 } 5415 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) { 5416 retval = -ENOMEM; 5417 goto out_put_task; 5418 } 5419 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) { 5420 retval = -ENOMEM; 5421 goto out_free_cpus_allowed; 5422 } 5423 retval = -EPERM; 5424 if (!check_same_owner(p)) { 5425 rcu_read_lock(); 5426 if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) { 5427 rcu_read_unlock(); 5428 goto out_free_new_mask; 5429 } 5430 rcu_read_unlock(); 5431 } 5432 5433 retval = security_task_setscheduler(p); 5434 if (retval) 5435 goto out_free_new_mask; 5436 5437 5438 cpuset_cpus_allowed(p, cpus_allowed); 5439 cpumask_and(new_mask, in_mask, cpus_allowed); 5440 5441 /* 5442 * Since bandwidth control happens on root_domain basis, 5443 * if admission test is enabled, we only admit -deadline 5444 * tasks allowed to run on all the CPUs in the task's 5445 * root_domain. 5446 */ 5447 #ifdef CONFIG_SMP 5448 if (task_has_dl_policy(p) && dl_bandwidth_enabled()) { 5449 rcu_read_lock(); 5450 if (!cpumask_subset(task_rq(p)->rd->span, new_mask)) { 5451 retval = -EBUSY; 5452 rcu_read_unlock(); 5453 goto out_free_new_mask; 5454 } 5455 rcu_read_unlock(); 5456 } 5457 #endif 5458 again: 5459 retval = __set_cpus_allowed_ptr(p, new_mask, true); 5460 5461 if (!retval) { 5462 cpuset_cpus_allowed(p, cpus_allowed); 5463 if (!cpumask_subset(new_mask, cpus_allowed)) { 5464 /* 5465 * We must have raced with a concurrent cpuset 5466 * update. Just reset the cpus_allowed to the 5467 * cpuset's cpus_allowed 5468 */ 5469 cpumask_copy(new_mask, cpus_allowed); 5470 goto again; 5471 } 5472 } 5473 out_free_new_mask: 5474 free_cpumask_var(new_mask); 5475 out_free_cpus_allowed: 5476 free_cpumask_var(cpus_allowed); 5477 out_put_task: 5478 put_task_struct(p); 5479 return retval; 5480 } 5481 5482 static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, 5483 struct cpumask *new_mask) 5484 { 5485 if (len < cpumask_size()) 5486 cpumask_clear(new_mask); 5487 else if (len > cpumask_size()) 5488 len = cpumask_size(); 5489 5490 return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0; 5491 } 5492 5493 /** 5494 * sys_sched_setaffinity - set the CPU affinity of a process 5495 * @pid: pid of the process 5496 * @len: length in bytes of the bitmask pointed to by user_mask_ptr 5497 * @user_mask_ptr: user-space pointer to the new CPU mask 5498 * 5499 * Return: 0 on success. An error code otherwise. 5500 */ 5501 SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len, 5502 unsigned long __user *, user_mask_ptr) 5503 { 5504 cpumask_var_t new_mask; 5505 int retval; 5506 5507 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) 5508 return -ENOMEM; 5509 5510 retval = get_user_cpu_mask(user_mask_ptr, len, new_mask); 5511 if (retval == 0) 5512 retval = sched_setaffinity(pid, new_mask); 5513 free_cpumask_var(new_mask); 5514 return retval; 5515 } 5516 5517 long sched_getaffinity(pid_t pid, struct cpumask *mask) 5518 { 5519 struct task_struct *p; 5520 unsigned long flags; 5521 int retval; 5522 5523 rcu_read_lock(); 5524 5525 retval = -ESRCH; 5526 p = find_process_by_pid(pid); 5527 if (!p) 5528 goto out_unlock; 5529 5530 retval = security_task_getscheduler(p); 5531 if (retval) 5532 goto out_unlock; 5533 5534 raw_spin_lock_irqsave(&p->pi_lock, flags); 5535 cpumask_and(mask, &p->cpus_mask, cpu_active_mask); 5536 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 5537 5538 out_unlock: 5539 rcu_read_unlock(); 5540 5541 return retval; 5542 } 5543 5544 /** 5545 * sys_sched_getaffinity - get the CPU affinity of a process 5546 * @pid: pid of the process 5547 * @len: length in bytes of the bitmask pointed to by user_mask_ptr 5548 * @user_mask_ptr: user-space pointer to hold the current CPU mask 5549 * 5550 * Return: size of CPU mask copied to user_mask_ptr on success. An 5551 * error code otherwise. 5552 */ 5553 SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, 5554 unsigned long __user *, user_mask_ptr) 5555 { 5556 int ret; 5557 cpumask_var_t mask; 5558 5559 if ((len * BITS_PER_BYTE) < nr_cpu_ids) 5560 return -EINVAL; 5561 if (len & (sizeof(unsigned long)-1)) 5562 return -EINVAL; 5563 5564 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) 5565 return -ENOMEM; 5566 5567 ret = sched_getaffinity(pid, mask); 5568 if (ret == 0) { 5569 unsigned int retlen = min(len, cpumask_size()); 5570 5571 if (copy_to_user(user_mask_ptr, mask, retlen)) 5572 ret = -EFAULT; 5573 else 5574 ret = retlen; 5575 } 5576 free_cpumask_var(mask); 5577 5578 return ret; 5579 } 5580 5581 /** 5582 * sys_sched_yield - yield the current processor to other threads. 5583 * 5584 * This function yields the current CPU to other tasks. If there are no 5585 * other threads running on this CPU then this function will return. 5586 * 5587 * Return: 0. 5588 */ 5589 static void do_sched_yield(void) 5590 { 5591 struct rq_flags rf; 5592 struct rq *rq; 5593 5594 rq = this_rq_lock_irq(&rf); 5595 5596 schedstat_inc(rq->yld_count); 5597 current->sched_class->yield_task(rq); 5598 5599 /* 5600 * Since we are going to call schedule() anyway, there's 5601 * no need to preempt or enable interrupts: 5602 */ 5603 preempt_disable(); 5604 rq_unlock(rq, &rf); 5605 sched_preempt_enable_no_resched(); 5606 5607 schedule(); 5608 } 5609 5610 SYSCALL_DEFINE0(sched_yield) 5611 { 5612 do_sched_yield(); 5613 return 0; 5614 } 5615 5616 #ifndef CONFIG_PREEMPTION 5617 int __sched _cond_resched(void) 5618 { 5619 if (should_resched(0)) { 5620 preempt_schedule_common(); 5621 return 1; 5622 } 5623 rcu_all_qs(); 5624 return 0; 5625 } 5626 EXPORT_SYMBOL(_cond_resched); 5627 #endif 5628 5629 /* 5630 * __cond_resched_lock() - if a reschedule is pending, drop the given lock, 5631 * call schedule, and on return reacquire the lock. 5632 * 5633 * This works OK both with and without CONFIG_PREEMPTION. We do strange low-level 5634 * operations here to prevent schedule() from being called twice (once via 5635 * spin_unlock(), once by hand). 5636 */ 5637 int __cond_resched_lock(spinlock_t *lock) 5638 { 5639 int resched = should_resched(PREEMPT_LOCK_OFFSET); 5640 int ret = 0; 5641 5642 lockdep_assert_held(lock); 5643 5644 if (spin_needbreak(lock) || resched) { 5645 spin_unlock(lock); 5646 if (resched) 5647 preempt_schedule_common(); 5648 else 5649 cpu_relax(); 5650 ret = 1; 5651 spin_lock(lock); 5652 } 5653 return ret; 5654 } 5655 EXPORT_SYMBOL(__cond_resched_lock); 5656 5657 /** 5658 * yield - yield the current processor to other threads. 5659 * 5660 * Do not ever use this function, there's a 99% chance you're doing it wrong. 5661 * 5662 * The scheduler is at all times free to pick the calling task as the most 5663 * eligible task to run, if removing the yield() call from your code breaks 5664 * it, its already broken. 5665 * 5666 * Typical broken usage is: 5667 * 5668 * while (!event) 5669 * yield(); 5670 * 5671 * where one assumes that yield() will let 'the other' process run that will 5672 * make event true. If the current task is a SCHED_FIFO task that will never 5673 * happen. Never use yield() as a progress guarantee!! 5674 * 5675 * If you want to use yield() to wait for something, use wait_event(). 5676 * If you want to use yield() to be 'nice' for others, use cond_resched(). 5677 * If you still want to use yield(), do not! 5678 */ 5679 void __sched yield(void) 5680 { 5681 set_current_state(TASK_RUNNING); 5682 do_sched_yield(); 5683 } 5684 EXPORT_SYMBOL(yield); 5685 5686 /** 5687 * yield_to - yield the current processor to another thread in 5688 * your thread group, or accelerate that thread toward the 5689 * processor it's on. 5690 * @p: target task 5691 * @preempt: whether task preemption is allowed or not 5692 * 5693 * It's the caller's job to ensure that the target task struct 5694 * can't go away on us before we can do any checks. 5695 * 5696 * Return: 5697 * true (>0) if we indeed boosted the target task. 5698 * false (0) if we failed to boost the target. 5699 * -ESRCH if there's no task to yield to. 5700 */ 5701 int __sched yield_to(struct task_struct *p, bool preempt) 5702 { 5703 struct task_struct *curr = current; 5704 struct rq *rq, *p_rq; 5705 unsigned long flags; 5706 int yielded = 0; 5707 5708 local_irq_save(flags); 5709 rq = this_rq(); 5710 5711 again: 5712 p_rq = task_rq(p); 5713 /* 5714 * If we're the only runnable task on the rq and target rq also 5715 * has only one task, there's absolutely no point in yielding. 5716 */ 5717 if (rq->nr_running == 1 && p_rq->nr_running == 1) { 5718 yielded = -ESRCH; 5719 goto out_irq; 5720 } 5721 5722 double_rq_lock(rq, p_rq); 5723 if (task_rq(p) != p_rq) { 5724 double_rq_unlock(rq, p_rq); 5725 goto again; 5726 } 5727 5728 if (!curr->sched_class->yield_to_task) 5729 goto out_unlock; 5730 5731 if (curr->sched_class != p->sched_class) 5732 goto out_unlock; 5733 5734 if (task_running(p_rq, p) || p->state) 5735 goto out_unlock; 5736 5737 yielded = curr->sched_class->yield_to_task(rq, p, preempt); 5738 if (yielded) { 5739 schedstat_inc(rq->yld_count); 5740 /* 5741 * Make p's CPU reschedule; pick_next_entity takes care of 5742 * fairness. 5743 */ 5744 if (preempt && rq != p_rq) 5745 resched_curr(p_rq); 5746 } 5747 5748 out_unlock: 5749 double_rq_unlock(rq, p_rq); 5750 out_irq: 5751 local_irq_restore(flags); 5752 5753 if (yielded > 0) 5754 schedule(); 5755 5756 return yielded; 5757 } 5758 EXPORT_SYMBOL_GPL(yield_to); 5759 5760 int io_schedule_prepare(void) 5761 { 5762 int old_iowait = current->in_iowait; 5763 5764 current->in_iowait = 1; 5765 blk_schedule_flush_plug(current); 5766 5767 return old_iowait; 5768 } 5769 5770 void io_schedule_finish(int token) 5771 { 5772 current->in_iowait = token; 5773 } 5774 5775 /* 5776 * This task is about to go to sleep on IO. Increment rq->nr_iowait so 5777 * that process accounting knows that this is a task in IO wait state. 5778 */ 5779 long __sched io_schedule_timeout(long timeout) 5780 { 5781 int token; 5782 long ret; 5783 5784 token = io_schedule_prepare(); 5785 ret = schedule_timeout(timeout); 5786 io_schedule_finish(token); 5787 5788 return ret; 5789 } 5790 EXPORT_SYMBOL(io_schedule_timeout); 5791 5792 void __sched io_schedule(void) 5793 { 5794 int token; 5795 5796 token = io_schedule_prepare(); 5797 schedule(); 5798 io_schedule_finish(token); 5799 } 5800 EXPORT_SYMBOL(io_schedule); 5801 5802 /** 5803 * sys_sched_get_priority_max - return maximum RT priority. 5804 * @policy: scheduling class. 5805 * 5806 * Return: On success, this syscall returns the maximum 5807 * rt_priority that can be used by a given scheduling class. 5808 * On failure, a negative error code is returned. 5809 */ 5810 SYSCALL_DEFINE1(sched_get_priority_max, int, policy) 5811 { 5812 int ret = -EINVAL; 5813 5814 switch (policy) { 5815 case SCHED_FIFO: 5816 case SCHED_RR: 5817 ret = MAX_USER_RT_PRIO-1; 5818 break; 5819 case SCHED_DEADLINE: 5820 case SCHED_NORMAL: 5821 case SCHED_BATCH: 5822 case SCHED_IDLE: 5823 ret = 0; 5824 break; 5825 } 5826 return ret; 5827 } 5828 5829 /** 5830 * sys_sched_get_priority_min - return minimum RT priority. 5831 * @policy: scheduling class. 5832 * 5833 * Return: On success, this syscall returns the minimum 5834 * rt_priority that can be used by a given scheduling class. 5835 * On failure, a negative error code is returned. 5836 */ 5837 SYSCALL_DEFINE1(sched_get_priority_min, int, policy) 5838 { 5839 int ret = -EINVAL; 5840 5841 switch (policy) { 5842 case SCHED_FIFO: 5843 case SCHED_RR: 5844 ret = 1; 5845 break; 5846 case SCHED_DEADLINE: 5847 case SCHED_NORMAL: 5848 case SCHED_BATCH: 5849 case SCHED_IDLE: 5850 ret = 0; 5851 } 5852 return ret; 5853 } 5854 5855 static int sched_rr_get_interval(pid_t pid, struct timespec64 *t) 5856 { 5857 struct task_struct *p; 5858 unsigned int time_slice; 5859 struct rq_flags rf; 5860 struct rq *rq; 5861 int retval; 5862 5863 if (pid < 0) 5864 return -EINVAL; 5865 5866 retval = -ESRCH; 5867 rcu_read_lock(); 5868 p = find_process_by_pid(pid); 5869 if (!p) 5870 goto out_unlock; 5871 5872 retval = security_task_getscheduler(p); 5873 if (retval) 5874 goto out_unlock; 5875 5876 rq = task_rq_lock(p, &rf); 5877 time_slice = 0; 5878 if (p->sched_class->get_rr_interval) 5879 time_slice = p->sched_class->get_rr_interval(rq, p); 5880 task_rq_unlock(rq, p, &rf); 5881 5882 rcu_read_unlock(); 5883 jiffies_to_timespec64(time_slice, t); 5884 return 0; 5885 5886 out_unlock: 5887 rcu_read_unlock(); 5888 return retval; 5889 } 5890 5891 /** 5892 * sys_sched_rr_get_interval - return the default timeslice of a process. 5893 * @pid: pid of the process. 5894 * @interval: userspace pointer to the timeslice value. 5895 * 5896 * this syscall writes the default timeslice value of a given process 5897 * into the user-space timespec buffer. A value of '0' means infinity. 5898 * 5899 * Return: On success, 0 and the timeslice is in @interval. Otherwise, 5900 * an error code. 5901 */ 5902 SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, 5903 struct __kernel_timespec __user *, interval) 5904 { 5905 struct timespec64 t; 5906 int retval = sched_rr_get_interval(pid, &t); 5907 5908 if (retval == 0) 5909 retval = put_timespec64(&t, interval); 5910 5911 return retval; 5912 } 5913 5914 #ifdef CONFIG_COMPAT_32BIT_TIME 5915 SYSCALL_DEFINE2(sched_rr_get_interval_time32, pid_t, pid, 5916 struct old_timespec32 __user *, interval) 5917 { 5918 struct timespec64 t; 5919 int retval = sched_rr_get_interval(pid, &t); 5920 5921 if (retval == 0) 5922 retval = put_old_timespec32(&t, interval); 5923 return retval; 5924 } 5925 #endif 5926 5927 void sched_show_task(struct task_struct *p) 5928 { 5929 unsigned long free = 0; 5930 int ppid; 5931 5932 if (!try_get_task_stack(p)) 5933 return; 5934 5935 printk(KERN_INFO "%-15.15s %c", p->comm, task_state_to_char(p)); 5936 5937 if (p->state == TASK_RUNNING) 5938 printk(KERN_CONT " running task "); 5939 #ifdef CONFIG_DEBUG_STACK_USAGE 5940 free = stack_not_used(p); 5941 #endif 5942 ppid = 0; 5943 rcu_read_lock(); 5944 if (pid_alive(p)) 5945 ppid = task_pid_nr(rcu_dereference(p->real_parent)); 5946 rcu_read_unlock(); 5947 printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free, 5948 task_pid_nr(p), ppid, 5949 (unsigned long)task_thread_info(p)->flags); 5950 5951 print_worker_info(KERN_INFO, p); 5952 show_stack(p, NULL); 5953 put_task_stack(p); 5954 } 5955 EXPORT_SYMBOL_GPL(sched_show_task); 5956 5957 static inline bool 5958 state_filter_match(unsigned long state_filter, struct task_struct *p) 5959 { 5960 /* no filter, everything matches */ 5961 if (!state_filter) 5962 return true; 5963 5964 /* filter, but doesn't match */ 5965 if (!(p->state & state_filter)) 5966 return false; 5967 5968 /* 5969 * When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows 5970 * TASK_KILLABLE). 5971 */ 5972 if (state_filter == TASK_UNINTERRUPTIBLE && p->state == TASK_IDLE) 5973 return false; 5974 5975 return true; 5976 } 5977 5978 5979 void show_state_filter(unsigned long state_filter) 5980 { 5981 struct task_struct *g, *p; 5982 5983 #if BITS_PER_LONG == 32 5984 printk(KERN_INFO 5985 " task PC stack pid father\n"); 5986 #else 5987 printk(KERN_INFO 5988 " task PC stack pid father\n"); 5989 #endif 5990 rcu_read_lock(); 5991 for_each_process_thread(g, p) { 5992 /* 5993 * reset the NMI-timeout, listing all files on a slow 5994 * console might take a lot of time: 5995 * Also, reset softlockup watchdogs on all CPUs, because 5996 * another CPU might be blocked waiting for us to process 5997 * an IPI. 5998 */ 5999 touch_nmi_watchdog(); 6000 touch_all_softlockup_watchdogs(); 6001 if (state_filter_match(state_filter, p)) 6002 sched_show_task(p); 6003 } 6004 6005 #ifdef CONFIG_SCHED_DEBUG 6006 if (!state_filter) 6007 sysrq_sched_debug_show(); 6008 #endif 6009 rcu_read_unlock(); 6010 /* 6011 * Only show locks if all tasks are dumped: 6012 */ 6013 if (!state_filter) 6014 debug_show_all_locks(); 6015 } 6016 6017 /** 6018 * init_idle - set up an idle thread for a given CPU 6019 * @idle: task in question 6020 * @cpu: CPU the idle task belongs to 6021 * 6022 * NOTE: this function does not set the idle thread's NEED_RESCHED 6023 * flag, to make booting more robust. 6024 */ 6025 void init_idle(struct task_struct *idle, int cpu) 6026 { 6027 struct rq *rq = cpu_rq(cpu); 6028 unsigned long flags; 6029 6030 __sched_fork(0, idle); 6031 6032 raw_spin_lock_irqsave(&idle->pi_lock, flags); 6033 raw_spin_lock(&rq->lock); 6034 6035 idle->state = TASK_RUNNING; 6036 idle->se.exec_start = sched_clock(); 6037 idle->flags |= PF_IDLE; 6038 6039 kasan_unpoison_task_stack(idle); 6040 6041 #ifdef CONFIG_SMP 6042 /* 6043 * Its possible that init_idle() gets called multiple times on a task, 6044 * in that case do_set_cpus_allowed() will not do the right thing. 6045 * 6046 * And since this is boot we can forgo the serialization. 6047 */ 6048 set_cpus_allowed_common(idle, cpumask_of(cpu)); 6049 #endif 6050 /* 6051 * We're having a chicken and egg problem, even though we are 6052 * holding rq->lock, the CPU isn't yet set to this CPU so the 6053 * lockdep check in task_group() will fail. 6054 * 6055 * Similar case to sched_fork(). / Alternatively we could 6056 * use task_rq_lock() here and obtain the other rq->lock. 6057 * 6058 * Silence PROVE_RCU 6059 */ 6060 rcu_read_lock(); 6061 __set_task_cpu(idle, cpu); 6062 rcu_read_unlock(); 6063 6064 rq->idle = idle; 6065 rcu_assign_pointer(rq->curr, idle); 6066 idle->on_rq = TASK_ON_RQ_QUEUED; 6067 #ifdef CONFIG_SMP 6068 idle->on_cpu = 1; 6069 #endif 6070 raw_spin_unlock(&rq->lock); 6071 raw_spin_unlock_irqrestore(&idle->pi_lock, flags); 6072 6073 /* Set the preempt count _outside_ the spinlocks! */ 6074 init_idle_preempt_count(idle, cpu); 6075 6076 /* 6077 * The idle tasks have their own, simple scheduling class: 6078 */ 6079 idle->sched_class = &idle_sched_class; 6080 ftrace_graph_init_idle_task(idle, cpu); 6081 vtime_init_idle(idle, cpu); 6082 #ifdef CONFIG_SMP 6083 sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu); 6084 #endif 6085 } 6086 6087 #ifdef CONFIG_SMP 6088 6089 int cpuset_cpumask_can_shrink(const struct cpumask *cur, 6090 const struct cpumask *trial) 6091 { 6092 int ret = 1; 6093 6094 if (!cpumask_weight(cur)) 6095 return ret; 6096 6097 ret = dl_cpuset_cpumask_can_shrink(cur, trial); 6098 6099 return ret; 6100 } 6101 6102 int task_can_attach(struct task_struct *p, 6103 const struct cpumask *cs_cpus_allowed) 6104 { 6105 int ret = 0; 6106 6107 /* 6108 * Kthreads which disallow setaffinity shouldn't be moved 6109 * to a new cpuset; we don't want to change their CPU 6110 * affinity and isolating such threads by their set of 6111 * allowed nodes is unnecessary. Thus, cpusets are not 6112 * applicable for such threads. This prevents checking for 6113 * success of set_cpus_allowed_ptr() on all attached tasks 6114 * before cpus_mask may be changed. 6115 */ 6116 if (p->flags & PF_NO_SETAFFINITY) { 6117 ret = -EINVAL; 6118 goto out; 6119 } 6120 6121 if (dl_task(p) && !cpumask_intersects(task_rq(p)->rd->span, 6122 cs_cpus_allowed)) 6123 ret = dl_task_can_attach(p, cs_cpus_allowed); 6124 6125 out: 6126 return ret; 6127 } 6128 6129 bool sched_smp_initialized __read_mostly; 6130 6131 #ifdef CONFIG_NUMA_BALANCING 6132 /* Migrate current task p to target_cpu */ 6133 int migrate_task_to(struct task_struct *p, int target_cpu) 6134 { 6135 struct migration_arg arg = { p, target_cpu }; 6136 int curr_cpu = task_cpu(p); 6137 6138 if (curr_cpu == target_cpu) 6139 return 0; 6140 6141 if (!cpumask_test_cpu(target_cpu, p->cpus_ptr)) 6142 return -EINVAL; 6143 6144 /* TODO: This is not properly updating schedstats */ 6145 6146 trace_sched_move_numa(p, curr_cpu, target_cpu); 6147 return stop_one_cpu(curr_cpu, migration_cpu_stop, &arg); 6148 } 6149 6150 /* 6151 * Requeue a task on a given node and accurately track the number of NUMA 6152 * tasks on the runqueues 6153 */ 6154 void sched_setnuma(struct task_struct *p, int nid) 6155 { 6156 bool queued, running; 6157 struct rq_flags rf; 6158 struct rq *rq; 6159 6160 rq = task_rq_lock(p, &rf); 6161 queued = task_on_rq_queued(p); 6162 running = task_current(rq, p); 6163 6164 if (queued) 6165 dequeue_task(rq, p, DEQUEUE_SAVE); 6166 if (running) 6167 put_prev_task(rq, p); 6168 6169 p->numa_preferred_nid = nid; 6170 6171 if (queued) 6172 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); 6173 if (running) 6174 set_next_task(rq, p); 6175 task_rq_unlock(rq, p, &rf); 6176 } 6177 #endif /* CONFIG_NUMA_BALANCING */ 6178 6179 #ifdef CONFIG_HOTPLUG_CPU 6180 /* 6181 * Ensure that the idle task is using init_mm right before its CPU goes 6182 * offline. 6183 */ 6184 void idle_task_exit(void) 6185 { 6186 struct mm_struct *mm = current->active_mm; 6187 6188 BUG_ON(cpu_online(smp_processor_id())); 6189 6190 if (mm != &init_mm) { 6191 switch_mm(mm, &init_mm, current); 6192 current->active_mm = &init_mm; 6193 finish_arch_post_lock_switch(); 6194 } 6195 mmdrop(mm); 6196 } 6197 6198 /* 6199 * Since this CPU is going 'away' for a while, fold any nr_active delta 6200 * we might have. Assumes we're called after migrate_tasks() so that the 6201 * nr_active count is stable. We need to take the teardown thread which 6202 * is calling this into account, so we hand in adjust = 1 to the load 6203 * calculation. 6204 * 6205 * Also see the comment "Global load-average calculations". 6206 */ 6207 static void calc_load_migrate(struct rq *rq) 6208 { 6209 long delta = calc_load_fold_active(rq, 1); 6210 if (delta) 6211 atomic_long_add(delta, &calc_load_tasks); 6212 } 6213 6214 static struct task_struct *__pick_migrate_task(struct rq *rq) 6215 { 6216 const struct sched_class *class; 6217 struct task_struct *next; 6218 6219 for_each_class(class) { 6220 next = class->pick_next_task(rq); 6221 if (next) { 6222 next->sched_class->put_prev_task(rq, next); 6223 return next; 6224 } 6225 } 6226 6227 /* The idle class should always have a runnable task */ 6228 BUG(); 6229 } 6230 6231 /* 6232 * Migrate all tasks from the rq, sleeping tasks will be migrated by 6233 * try_to_wake_up()->select_task_rq(). 6234 * 6235 * Called with rq->lock held even though we'er in stop_machine() and 6236 * there's no concurrency possible, we hold the required locks anyway 6237 * because of lock validation efforts. 6238 */ 6239 static void migrate_tasks(struct rq *dead_rq, struct rq_flags *rf) 6240 { 6241 struct rq *rq = dead_rq; 6242 struct task_struct *next, *stop = rq->stop; 6243 struct rq_flags orf = *rf; 6244 int dest_cpu; 6245 6246 /* 6247 * Fudge the rq selection such that the below task selection loop 6248 * doesn't get stuck on the currently eligible stop task. 6249 * 6250 * We're currently inside stop_machine() and the rq is either stuck 6251 * in the stop_machine_cpu_stop() loop, or we're executing this code, 6252 * either way we should never end up calling schedule() until we're 6253 * done here. 6254 */ 6255 rq->stop = NULL; 6256 6257 /* 6258 * put_prev_task() and pick_next_task() sched 6259 * class method both need to have an up-to-date 6260 * value of rq->clock[_task] 6261 */ 6262 update_rq_clock(rq); 6263 6264 for (;;) { 6265 /* 6266 * There's this thread running, bail when that's the only 6267 * remaining thread: 6268 */ 6269 if (rq->nr_running == 1) 6270 break; 6271 6272 next = __pick_migrate_task(rq); 6273 6274 /* 6275 * Rules for changing task_struct::cpus_mask are holding 6276 * both pi_lock and rq->lock, such that holding either 6277 * stabilizes the mask. 6278 * 6279 * Drop rq->lock is not quite as disastrous as it usually is 6280 * because !cpu_active at this point, which means load-balance 6281 * will not interfere. Also, stop-machine. 6282 */ 6283 rq_unlock(rq, rf); 6284 raw_spin_lock(&next->pi_lock); 6285 rq_relock(rq, rf); 6286 6287 /* 6288 * Since we're inside stop-machine, _nothing_ should have 6289 * changed the task, WARN if weird stuff happened, because in 6290 * that case the above rq->lock drop is a fail too. 6291 */ 6292 if (WARN_ON(task_rq(next) != rq || !task_on_rq_queued(next))) { 6293 raw_spin_unlock(&next->pi_lock); 6294 continue; 6295 } 6296 6297 /* Find suitable destination for @next, with force if needed. */ 6298 dest_cpu = select_fallback_rq(dead_rq->cpu, next); 6299 rq = __migrate_task(rq, rf, next, dest_cpu); 6300 if (rq != dead_rq) { 6301 rq_unlock(rq, rf); 6302 rq = dead_rq; 6303 *rf = orf; 6304 rq_relock(rq, rf); 6305 } 6306 raw_spin_unlock(&next->pi_lock); 6307 } 6308 6309 rq->stop = stop; 6310 } 6311 #endif /* CONFIG_HOTPLUG_CPU */ 6312 6313 void set_rq_online(struct rq *rq) 6314 { 6315 if (!rq->online) { 6316 const struct sched_class *class; 6317 6318 cpumask_set_cpu(rq->cpu, rq->rd->online); 6319 rq->online = 1; 6320 6321 for_each_class(class) { 6322 if (class->rq_online) 6323 class->rq_online(rq); 6324 } 6325 } 6326 } 6327 6328 void set_rq_offline(struct rq *rq) 6329 { 6330 if (rq->online) { 6331 const struct sched_class *class; 6332 6333 for_each_class(class) { 6334 if (class->rq_offline) 6335 class->rq_offline(rq); 6336 } 6337 6338 cpumask_clear_cpu(rq->cpu, rq->rd->online); 6339 rq->online = 0; 6340 } 6341 } 6342 6343 /* 6344 * used to mark begin/end of suspend/resume: 6345 */ 6346 static int num_cpus_frozen; 6347 6348 /* 6349 * Update cpusets according to cpu_active mask. If cpusets are 6350 * disabled, cpuset_update_active_cpus() becomes a simple wrapper 6351 * around partition_sched_domains(). 6352 * 6353 * If we come here as part of a suspend/resume, don't touch cpusets because we 6354 * want to restore it back to its original state upon resume anyway. 6355 */ 6356 static void cpuset_cpu_active(void) 6357 { 6358 if (cpuhp_tasks_frozen) { 6359 /* 6360 * num_cpus_frozen tracks how many CPUs are involved in suspend 6361 * resume sequence. As long as this is not the last online 6362 * operation in the resume sequence, just build a single sched 6363 * domain, ignoring cpusets. 6364 */ 6365 partition_sched_domains(1, NULL, NULL); 6366 if (--num_cpus_frozen) 6367 return; 6368 /* 6369 * This is the last CPU online operation. So fall through and 6370 * restore the original sched domains by considering the 6371 * cpuset configurations. 6372 */ 6373 cpuset_force_rebuild(); 6374 } 6375 cpuset_update_active_cpus(); 6376 } 6377 6378 static int cpuset_cpu_inactive(unsigned int cpu) 6379 { 6380 if (!cpuhp_tasks_frozen) { 6381 if (dl_cpu_busy(cpu)) 6382 return -EBUSY; 6383 cpuset_update_active_cpus(); 6384 } else { 6385 num_cpus_frozen++; 6386 partition_sched_domains(1, NULL, NULL); 6387 } 6388 return 0; 6389 } 6390 6391 int sched_cpu_activate(unsigned int cpu) 6392 { 6393 struct rq *rq = cpu_rq(cpu); 6394 struct rq_flags rf; 6395 6396 #ifdef CONFIG_SCHED_SMT 6397 /* 6398 * When going up, increment the number of cores with SMT present. 6399 */ 6400 if (cpumask_weight(cpu_smt_mask(cpu)) == 2) 6401 static_branch_inc_cpuslocked(&sched_smt_present); 6402 #endif 6403 set_cpu_active(cpu, true); 6404 6405 if (sched_smp_initialized) { 6406 sched_domains_numa_masks_set(cpu); 6407 cpuset_cpu_active(); 6408 } 6409 6410 /* 6411 * Put the rq online, if not already. This happens: 6412 * 6413 * 1) In the early boot process, because we build the real domains 6414 * after all CPUs have been brought up. 6415 * 6416 * 2) At runtime, if cpuset_cpu_active() fails to rebuild the 6417 * domains. 6418 */ 6419 rq_lock_irqsave(rq, &rf); 6420 if (rq->rd) { 6421 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 6422 set_rq_online(rq); 6423 } 6424 rq_unlock_irqrestore(rq, &rf); 6425 6426 return 0; 6427 } 6428 6429 int sched_cpu_deactivate(unsigned int cpu) 6430 { 6431 int ret; 6432 6433 set_cpu_active(cpu, false); 6434 /* 6435 * We've cleared cpu_active_mask, wait for all preempt-disabled and RCU 6436 * users of this state to go away such that all new such users will 6437 * observe it. 6438 * 6439 * Do sync before park smpboot threads to take care the rcu boost case. 6440 */ 6441 synchronize_rcu(); 6442 6443 #ifdef CONFIG_SCHED_SMT 6444 /* 6445 * When going down, decrement the number of cores with SMT present. 6446 */ 6447 if (cpumask_weight(cpu_smt_mask(cpu)) == 2) 6448 static_branch_dec_cpuslocked(&sched_smt_present); 6449 #endif 6450 6451 if (!sched_smp_initialized) 6452 return 0; 6453 6454 ret = cpuset_cpu_inactive(cpu); 6455 if (ret) { 6456 set_cpu_active(cpu, true); 6457 return ret; 6458 } 6459 sched_domains_numa_masks_clear(cpu); 6460 return 0; 6461 } 6462 6463 static void sched_rq_cpu_starting(unsigned int cpu) 6464 { 6465 struct rq *rq = cpu_rq(cpu); 6466 6467 rq->calc_load_update = calc_load_update; 6468 update_max_interval(); 6469 } 6470 6471 int sched_cpu_starting(unsigned int cpu) 6472 { 6473 sched_rq_cpu_starting(cpu); 6474 sched_tick_start(cpu); 6475 return 0; 6476 } 6477 6478 #ifdef CONFIG_HOTPLUG_CPU 6479 int sched_cpu_dying(unsigned int cpu) 6480 { 6481 struct rq *rq = cpu_rq(cpu); 6482 struct rq_flags rf; 6483 6484 /* Handle pending wakeups and then migrate everything off */ 6485 sched_ttwu_pending(); 6486 sched_tick_stop(cpu); 6487 6488 rq_lock_irqsave(rq, &rf); 6489 if (rq->rd) { 6490 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 6491 set_rq_offline(rq); 6492 } 6493 migrate_tasks(rq, &rf); 6494 BUG_ON(rq->nr_running != 1); 6495 rq_unlock_irqrestore(rq, &rf); 6496 6497 calc_load_migrate(rq); 6498 update_max_interval(); 6499 nohz_balance_exit_idle(rq); 6500 hrtick_clear(rq); 6501 return 0; 6502 } 6503 #endif 6504 6505 void __init sched_init_smp(void) 6506 { 6507 sched_init_numa(); 6508 6509 /* 6510 * There's no userspace yet to cause hotplug operations; hence all the 6511 * CPU masks are stable and all blatant races in the below code cannot 6512 * happen. 6513 */ 6514 mutex_lock(&sched_domains_mutex); 6515 sched_init_domains(cpu_active_mask); 6516 mutex_unlock(&sched_domains_mutex); 6517 6518 /* Move init over to a non-isolated CPU */ 6519 if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_FLAG_DOMAIN)) < 0) 6520 BUG(); 6521 sched_init_granularity(); 6522 6523 init_sched_rt_class(); 6524 init_sched_dl_class(); 6525 6526 sched_smp_initialized = true; 6527 } 6528 6529 static int __init migration_init(void) 6530 { 6531 sched_cpu_starting(smp_processor_id()); 6532 return 0; 6533 } 6534 early_initcall(migration_init); 6535 6536 #else 6537 void __init sched_init_smp(void) 6538 { 6539 sched_init_granularity(); 6540 } 6541 #endif /* CONFIG_SMP */ 6542 6543 int in_sched_functions(unsigned long addr) 6544 { 6545 return in_lock_functions(addr) || 6546 (addr >= (unsigned long)__sched_text_start 6547 && addr < (unsigned long)__sched_text_end); 6548 } 6549 6550 #ifdef CONFIG_CGROUP_SCHED 6551 /* 6552 * Default task group. 6553 * Every task in system belongs to this group at bootup. 6554 */ 6555 struct task_group root_task_group; 6556 LIST_HEAD(task_groups); 6557 6558 /* Cacheline aligned slab cache for task_group */ 6559 static struct kmem_cache *task_group_cache __read_mostly; 6560 #endif 6561 6562 DECLARE_PER_CPU(cpumask_var_t, load_balance_mask); 6563 DECLARE_PER_CPU(cpumask_var_t, select_idle_mask); 6564 6565 void __init sched_init(void) 6566 { 6567 unsigned long ptr = 0; 6568 int i; 6569 6570 wait_bit_init(); 6571 6572 #ifdef CONFIG_FAIR_GROUP_SCHED 6573 ptr += 2 * nr_cpu_ids * sizeof(void **); 6574 #endif 6575 #ifdef CONFIG_RT_GROUP_SCHED 6576 ptr += 2 * nr_cpu_ids * sizeof(void **); 6577 #endif 6578 if (ptr) { 6579 ptr = (unsigned long)kzalloc(ptr, GFP_NOWAIT); 6580 6581 #ifdef CONFIG_FAIR_GROUP_SCHED 6582 root_task_group.se = (struct sched_entity **)ptr; 6583 ptr += nr_cpu_ids * sizeof(void **); 6584 6585 root_task_group.cfs_rq = (struct cfs_rq **)ptr; 6586 ptr += nr_cpu_ids * sizeof(void **); 6587 6588 #endif /* CONFIG_FAIR_GROUP_SCHED */ 6589 #ifdef CONFIG_RT_GROUP_SCHED 6590 root_task_group.rt_se = (struct sched_rt_entity **)ptr; 6591 ptr += nr_cpu_ids * sizeof(void **); 6592 6593 root_task_group.rt_rq = (struct rt_rq **)ptr; 6594 ptr += nr_cpu_ids * sizeof(void **); 6595 6596 #endif /* CONFIG_RT_GROUP_SCHED */ 6597 } 6598 #ifdef CONFIG_CPUMASK_OFFSTACK 6599 for_each_possible_cpu(i) { 6600 per_cpu(load_balance_mask, i) = (cpumask_var_t)kzalloc_node( 6601 cpumask_size(), GFP_KERNEL, cpu_to_node(i)); 6602 per_cpu(select_idle_mask, i) = (cpumask_var_t)kzalloc_node( 6603 cpumask_size(), GFP_KERNEL, cpu_to_node(i)); 6604 } 6605 #endif /* CONFIG_CPUMASK_OFFSTACK */ 6606 6607 init_rt_bandwidth(&def_rt_bandwidth, global_rt_period(), global_rt_runtime()); 6608 init_dl_bandwidth(&def_dl_bandwidth, global_rt_period(), global_rt_runtime()); 6609 6610 #ifdef CONFIG_SMP 6611 init_defrootdomain(); 6612 #endif 6613 6614 #ifdef CONFIG_RT_GROUP_SCHED 6615 init_rt_bandwidth(&root_task_group.rt_bandwidth, 6616 global_rt_period(), global_rt_runtime()); 6617 #endif /* CONFIG_RT_GROUP_SCHED */ 6618 6619 #ifdef CONFIG_CGROUP_SCHED 6620 task_group_cache = KMEM_CACHE(task_group, 0); 6621 6622 list_add(&root_task_group.list, &task_groups); 6623 INIT_LIST_HEAD(&root_task_group.children); 6624 INIT_LIST_HEAD(&root_task_group.siblings); 6625 autogroup_init(&init_task); 6626 #endif /* CONFIG_CGROUP_SCHED */ 6627 6628 for_each_possible_cpu(i) { 6629 struct rq *rq; 6630 6631 rq = cpu_rq(i); 6632 raw_spin_lock_init(&rq->lock); 6633 rq->nr_running = 0; 6634 rq->calc_load_active = 0; 6635 rq->calc_load_update = jiffies + LOAD_FREQ; 6636 init_cfs_rq(&rq->cfs); 6637 init_rt_rq(&rq->rt); 6638 init_dl_rq(&rq->dl); 6639 #ifdef CONFIG_FAIR_GROUP_SCHED 6640 root_task_group.shares = ROOT_TASK_GROUP_LOAD; 6641 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list); 6642 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list; 6643 /* 6644 * How much CPU bandwidth does root_task_group get? 6645 * 6646 * In case of task-groups formed thr' the cgroup filesystem, it 6647 * gets 100% of the CPU resources in the system. This overall 6648 * system CPU resource is divided among the tasks of 6649 * root_task_group and its child task-groups in a fair manner, 6650 * based on each entity's (task or task-group's) weight 6651 * (se->load.weight). 6652 * 6653 * In other words, if root_task_group has 10 tasks of weight 6654 * 1024) and two child groups A0 and A1 (of weight 1024 each), 6655 * then A0's share of the CPU resource is: 6656 * 6657 * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33% 6658 * 6659 * We achieve this by letting root_task_group's tasks sit 6660 * directly in rq->cfs (i.e root_task_group->se[] = NULL). 6661 */ 6662 init_cfs_bandwidth(&root_task_group.cfs_bandwidth); 6663 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL); 6664 #endif /* CONFIG_FAIR_GROUP_SCHED */ 6665 6666 rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime; 6667 #ifdef CONFIG_RT_GROUP_SCHED 6668 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL); 6669 #endif 6670 #ifdef CONFIG_SMP 6671 rq->sd = NULL; 6672 rq->rd = NULL; 6673 rq->cpu_capacity = rq->cpu_capacity_orig = SCHED_CAPACITY_SCALE; 6674 rq->balance_callback = NULL; 6675 rq->active_balance = 0; 6676 rq->next_balance = jiffies; 6677 rq->push_cpu = 0; 6678 rq->cpu = i; 6679 rq->online = 0; 6680 rq->idle_stamp = 0; 6681 rq->avg_idle = 2*sysctl_sched_migration_cost; 6682 rq->max_idle_balance_cost = sysctl_sched_migration_cost; 6683 6684 INIT_LIST_HEAD(&rq->cfs_tasks); 6685 6686 rq_attach_root(rq, &def_root_domain); 6687 #ifdef CONFIG_NO_HZ_COMMON 6688 rq->last_load_update_tick = jiffies; 6689 rq->last_blocked_load_update_tick = jiffies; 6690 atomic_set(&rq->nohz_flags, 0); 6691 #endif 6692 #endif /* CONFIG_SMP */ 6693 hrtick_rq_init(rq); 6694 atomic_set(&rq->nr_iowait, 0); 6695 } 6696 6697 set_load_weight(&init_task, false); 6698 6699 /* 6700 * The boot idle thread does lazy MMU switching as well: 6701 */ 6702 mmgrab(&init_mm); 6703 enter_lazy_tlb(&init_mm, current); 6704 6705 /* 6706 * Make us the idle thread. Technically, schedule() should not be 6707 * called from this thread, however somewhere below it might be, 6708 * but because we are the idle thread, we just pick up running again 6709 * when this runqueue becomes "idle". 6710 */ 6711 init_idle(current, smp_processor_id()); 6712 6713 calc_load_update = jiffies + LOAD_FREQ; 6714 6715 #ifdef CONFIG_SMP 6716 idle_thread_set_boot_cpu(); 6717 #endif 6718 init_sched_fair_class(); 6719 6720 init_schedstats(); 6721 6722 psi_init(); 6723 6724 init_uclamp(); 6725 6726 scheduler_running = 1; 6727 } 6728 6729 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP 6730 static inline int preempt_count_equals(int preempt_offset) 6731 { 6732 int nested = preempt_count() + rcu_preempt_depth(); 6733 6734 return (nested == preempt_offset); 6735 } 6736 6737 void __might_sleep(const char *file, int line, int preempt_offset) 6738 { 6739 /* 6740 * Blocking primitives will set (and therefore destroy) current->state, 6741 * since we will exit with TASK_RUNNING make sure we enter with it, 6742 * otherwise we will destroy state. 6743 */ 6744 WARN_ONCE(current->state != TASK_RUNNING && current->task_state_change, 6745 "do not call blocking ops when !TASK_RUNNING; " 6746 "state=%lx set at [<%p>] %pS\n", 6747 current->state, 6748 (void *)current->task_state_change, 6749 (void *)current->task_state_change); 6750 6751 ___might_sleep(file, line, preempt_offset); 6752 } 6753 EXPORT_SYMBOL(__might_sleep); 6754 6755 void ___might_sleep(const char *file, int line, int preempt_offset) 6756 { 6757 /* Ratelimiting timestamp: */ 6758 static unsigned long prev_jiffy; 6759 6760 unsigned long preempt_disable_ip; 6761 6762 /* WARN_ON_ONCE() by default, no rate limit required: */ 6763 rcu_sleep_check(); 6764 6765 if ((preempt_count_equals(preempt_offset) && !irqs_disabled() && 6766 !is_idle_task(current) && !current->non_block_count) || 6767 system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING || 6768 oops_in_progress) 6769 return; 6770 6771 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) 6772 return; 6773 prev_jiffy = jiffies; 6774 6775 /* Save this before calling printk(), since that will clobber it: */ 6776 preempt_disable_ip = get_preempt_disable_ip(current); 6777 6778 printk(KERN_ERR 6779 "BUG: sleeping function called from invalid context at %s:%d\n", 6780 file, line); 6781 printk(KERN_ERR 6782 "in_atomic(): %d, irqs_disabled(): %d, non_block: %d, pid: %d, name: %s\n", 6783 in_atomic(), irqs_disabled(), current->non_block_count, 6784 current->pid, current->comm); 6785 6786 if (task_stack_end_corrupted(current)) 6787 printk(KERN_EMERG "Thread overran stack, or stack corrupted\n"); 6788 6789 debug_show_held_locks(current); 6790 if (irqs_disabled()) 6791 print_irqtrace_events(current); 6792 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT) 6793 && !preempt_count_equals(preempt_offset)) { 6794 pr_err("Preemption disabled at:"); 6795 print_ip_sym(preempt_disable_ip); 6796 pr_cont("\n"); 6797 } 6798 dump_stack(); 6799 add_taint(TAINT_WARN, LOCKDEP_STILL_OK); 6800 } 6801 EXPORT_SYMBOL(___might_sleep); 6802 6803 void __cant_sleep(const char *file, int line, int preempt_offset) 6804 { 6805 static unsigned long prev_jiffy; 6806 6807 if (irqs_disabled()) 6808 return; 6809 6810 if (!IS_ENABLED(CONFIG_PREEMPT_COUNT)) 6811 return; 6812 6813 if (preempt_count() > preempt_offset) 6814 return; 6815 6816 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) 6817 return; 6818 prev_jiffy = jiffies; 6819 6820 printk(KERN_ERR "BUG: assuming atomic context at %s:%d\n", file, line); 6821 printk(KERN_ERR "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n", 6822 in_atomic(), irqs_disabled(), 6823 current->pid, current->comm); 6824 6825 debug_show_held_locks(current); 6826 dump_stack(); 6827 add_taint(TAINT_WARN, LOCKDEP_STILL_OK); 6828 } 6829 EXPORT_SYMBOL_GPL(__cant_sleep); 6830 #endif 6831 6832 #ifdef CONFIG_MAGIC_SYSRQ 6833 void normalize_rt_tasks(void) 6834 { 6835 struct task_struct *g, *p; 6836 struct sched_attr attr = { 6837 .sched_policy = SCHED_NORMAL, 6838 }; 6839 6840 read_lock(&tasklist_lock); 6841 for_each_process_thread(g, p) { 6842 /* 6843 * Only normalize user tasks: 6844 */ 6845 if (p->flags & PF_KTHREAD) 6846 continue; 6847 6848 p->se.exec_start = 0; 6849 schedstat_set(p->se.statistics.wait_start, 0); 6850 schedstat_set(p->se.statistics.sleep_start, 0); 6851 schedstat_set(p->se.statistics.block_start, 0); 6852 6853 if (!dl_task(p) && !rt_task(p)) { 6854 /* 6855 * Renice negative nice level userspace 6856 * tasks back to 0: 6857 */ 6858 if (task_nice(p) < 0) 6859 set_user_nice(p, 0); 6860 continue; 6861 } 6862 6863 __sched_setscheduler(p, &attr, false, false); 6864 } 6865 read_unlock(&tasklist_lock); 6866 } 6867 6868 #endif /* CONFIG_MAGIC_SYSRQ */ 6869 6870 #if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) 6871 /* 6872 * These functions are only useful for the IA64 MCA handling, or kdb. 6873 * 6874 * They can only be called when the whole system has been 6875 * stopped - every CPU needs to be quiescent, and no scheduling 6876 * activity can take place. Using them for anything else would 6877 * be a serious bug, and as a result, they aren't even visible 6878 * under any other configuration. 6879 */ 6880 6881 /** 6882 * curr_task - return the current task for a given CPU. 6883 * @cpu: the processor in question. 6884 * 6885 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! 6886 * 6887 * Return: The current task for @cpu. 6888 */ 6889 struct task_struct *curr_task(int cpu) 6890 { 6891 return cpu_curr(cpu); 6892 } 6893 6894 #endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */ 6895 6896 #ifdef CONFIG_IA64 6897 /** 6898 * ia64_set_curr_task - set the current task for a given CPU. 6899 * @cpu: the processor in question. 6900 * @p: the task pointer to set. 6901 * 6902 * Description: This function must only be used when non-maskable interrupts 6903 * are serviced on a separate stack. It allows the architecture to switch the 6904 * notion of the current task on a CPU in a non-blocking manner. This function 6905 * must be called with all CPU's synchronized, and interrupts disabled, the 6906 * and caller must save the original value of the current task (see 6907 * curr_task() above) and restore that value before reenabling interrupts and 6908 * re-starting the system. 6909 * 6910 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! 6911 */ 6912 void ia64_set_curr_task(int cpu, struct task_struct *p) 6913 { 6914 cpu_curr(cpu) = p; 6915 } 6916 6917 #endif 6918 6919 #ifdef CONFIG_CGROUP_SCHED 6920 /* task_group_lock serializes the addition/removal of task groups */ 6921 static DEFINE_SPINLOCK(task_group_lock); 6922 6923 static inline void alloc_uclamp_sched_group(struct task_group *tg, 6924 struct task_group *parent) 6925 { 6926 #ifdef CONFIG_UCLAMP_TASK_GROUP 6927 enum uclamp_id clamp_id; 6928 6929 for_each_clamp_id(clamp_id) { 6930 uclamp_se_set(&tg->uclamp_req[clamp_id], 6931 uclamp_none(clamp_id), false); 6932 tg->uclamp[clamp_id] = parent->uclamp[clamp_id]; 6933 } 6934 #endif 6935 } 6936 6937 static void sched_free_group(struct task_group *tg) 6938 { 6939 free_fair_sched_group(tg); 6940 free_rt_sched_group(tg); 6941 autogroup_free(tg); 6942 kmem_cache_free(task_group_cache, tg); 6943 } 6944 6945 /* allocate runqueue etc for a new task group */ 6946 struct task_group *sched_create_group(struct task_group *parent) 6947 { 6948 struct task_group *tg; 6949 6950 tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO); 6951 if (!tg) 6952 return ERR_PTR(-ENOMEM); 6953 6954 if (!alloc_fair_sched_group(tg, parent)) 6955 goto err; 6956 6957 if (!alloc_rt_sched_group(tg, parent)) 6958 goto err; 6959 6960 alloc_uclamp_sched_group(tg, parent); 6961 6962 return tg; 6963 6964 err: 6965 sched_free_group(tg); 6966 return ERR_PTR(-ENOMEM); 6967 } 6968 6969 void sched_online_group(struct task_group *tg, struct task_group *parent) 6970 { 6971 unsigned long flags; 6972 6973 spin_lock_irqsave(&task_group_lock, flags); 6974 list_add_rcu(&tg->list, &task_groups); 6975 6976 /* Root should already exist: */ 6977 WARN_ON(!parent); 6978 6979 tg->parent = parent; 6980 INIT_LIST_HEAD(&tg->children); 6981 list_add_rcu(&tg->siblings, &parent->children); 6982 spin_unlock_irqrestore(&task_group_lock, flags); 6983 6984 online_fair_sched_group(tg); 6985 } 6986 6987 /* rcu callback to free various structures associated with a task group */ 6988 static void sched_free_group_rcu(struct rcu_head *rhp) 6989 { 6990 /* Now it should be safe to free those cfs_rqs: */ 6991 sched_free_group(container_of(rhp, struct task_group, rcu)); 6992 } 6993 6994 void sched_destroy_group(struct task_group *tg) 6995 { 6996 /* Wait for possible concurrent references to cfs_rqs complete: */ 6997 call_rcu(&tg->rcu, sched_free_group_rcu); 6998 } 6999 7000 void sched_offline_group(struct task_group *tg) 7001 { 7002 unsigned long flags; 7003 7004 /* End participation in shares distribution: */ 7005 unregister_fair_sched_group(tg); 7006 7007 spin_lock_irqsave(&task_group_lock, flags); 7008 list_del_rcu(&tg->list); 7009 list_del_rcu(&tg->siblings); 7010 spin_unlock_irqrestore(&task_group_lock, flags); 7011 } 7012 7013 static void sched_change_group(struct task_struct *tsk, int type) 7014 { 7015 struct task_group *tg; 7016 7017 /* 7018 * All callers are synchronized by task_rq_lock(); we do not use RCU 7019 * which is pointless here. Thus, we pass "true" to task_css_check() 7020 * to prevent lockdep warnings. 7021 */ 7022 tg = container_of(task_css_check(tsk, cpu_cgrp_id, true), 7023 struct task_group, css); 7024 tg = autogroup_task_group(tsk, tg); 7025 tsk->sched_task_group = tg; 7026 7027 #ifdef CONFIG_FAIR_GROUP_SCHED 7028 if (tsk->sched_class->task_change_group) 7029 tsk->sched_class->task_change_group(tsk, type); 7030 else 7031 #endif 7032 set_task_rq(tsk, task_cpu(tsk)); 7033 } 7034 7035 /* 7036 * Change task's runqueue when it moves between groups. 7037 * 7038 * The caller of this function should have put the task in its new group by 7039 * now. This function just updates tsk->se.cfs_rq and tsk->se.parent to reflect 7040 * its new group. 7041 */ 7042 void sched_move_task(struct task_struct *tsk) 7043 { 7044 int queued, running, queue_flags = 7045 DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; 7046 struct rq_flags rf; 7047 struct rq *rq; 7048 7049 rq = task_rq_lock(tsk, &rf); 7050 update_rq_clock(rq); 7051 7052 running = task_current(rq, tsk); 7053 queued = task_on_rq_queued(tsk); 7054 7055 if (queued) 7056 dequeue_task(rq, tsk, queue_flags); 7057 if (running) 7058 put_prev_task(rq, tsk); 7059 7060 sched_change_group(tsk, TASK_MOVE_GROUP); 7061 7062 if (queued) 7063 enqueue_task(rq, tsk, queue_flags); 7064 if (running) { 7065 set_next_task(rq, tsk); 7066 /* 7067 * After changing group, the running task may have joined a 7068 * throttled one but it's still the running task. Trigger a 7069 * resched to make sure that task can still run. 7070 */ 7071 resched_curr(rq); 7072 } 7073 7074 task_rq_unlock(rq, tsk, &rf); 7075 } 7076 7077 static inline struct task_group *css_tg(struct cgroup_subsys_state *css) 7078 { 7079 return css ? container_of(css, struct task_group, css) : NULL; 7080 } 7081 7082 static struct cgroup_subsys_state * 7083 cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) 7084 { 7085 struct task_group *parent = css_tg(parent_css); 7086 struct task_group *tg; 7087 7088 if (!parent) { 7089 /* This is early initialization for the top cgroup */ 7090 return &root_task_group.css; 7091 } 7092 7093 tg = sched_create_group(parent); 7094 if (IS_ERR(tg)) 7095 return ERR_PTR(-ENOMEM); 7096 7097 return &tg->css; 7098 } 7099 7100 /* Expose task group only after completing cgroup initialization */ 7101 static int cpu_cgroup_css_online(struct cgroup_subsys_state *css) 7102 { 7103 struct task_group *tg = css_tg(css); 7104 struct task_group *parent = css_tg(css->parent); 7105 7106 if (parent) 7107 sched_online_group(tg, parent); 7108 7109 #ifdef CONFIG_UCLAMP_TASK_GROUP 7110 /* Propagate the effective uclamp value for the new group */ 7111 cpu_util_update_eff(css); 7112 #endif 7113 7114 return 0; 7115 } 7116 7117 static void cpu_cgroup_css_released(struct cgroup_subsys_state *css) 7118 { 7119 struct task_group *tg = css_tg(css); 7120 7121 sched_offline_group(tg); 7122 } 7123 7124 static void cpu_cgroup_css_free(struct cgroup_subsys_state *css) 7125 { 7126 struct task_group *tg = css_tg(css); 7127 7128 /* 7129 * Relies on the RCU grace period between css_released() and this. 7130 */ 7131 sched_free_group(tg); 7132 } 7133 7134 /* 7135 * This is called before wake_up_new_task(), therefore we really only 7136 * have to set its group bits, all the other stuff does not apply. 7137 */ 7138 static void cpu_cgroup_fork(struct task_struct *task) 7139 { 7140 struct rq_flags rf; 7141 struct rq *rq; 7142 7143 rq = task_rq_lock(task, &rf); 7144 7145 update_rq_clock(rq); 7146 sched_change_group(task, TASK_SET_GROUP); 7147 7148 task_rq_unlock(rq, task, &rf); 7149 } 7150 7151 static int cpu_cgroup_can_attach(struct cgroup_taskset *tset) 7152 { 7153 struct task_struct *task; 7154 struct cgroup_subsys_state *css; 7155 int ret = 0; 7156 7157 cgroup_taskset_for_each(task, css, tset) { 7158 #ifdef CONFIG_RT_GROUP_SCHED 7159 if (!sched_rt_can_attach(css_tg(css), task)) 7160 return -EINVAL; 7161 #endif 7162 /* 7163 * Serialize against wake_up_new_task() such that if its 7164 * running, we're sure to observe its full state. 7165 */ 7166 raw_spin_lock_irq(&task->pi_lock); 7167 /* 7168 * Avoid calling sched_move_task() before wake_up_new_task() 7169 * has happened. This would lead to problems with PELT, due to 7170 * move wanting to detach+attach while we're not attached yet. 7171 */ 7172 if (task->state == TASK_NEW) 7173 ret = -EINVAL; 7174 raw_spin_unlock_irq(&task->pi_lock); 7175 7176 if (ret) 7177 break; 7178 } 7179 return ret; 7180 } 7181 7182 static void cpu_cgroup_attach(struct cgroup_taskset *tset) 7183 { 7184 struct task_struct *task; 7185 struct cgroup_subsys_state *css; 7186 7187 cgroup_taskset_for_each(task, css, tset) 7188 sched_move_task(task); 7189 } 7190 7191 #ifdef CONFIG_UCLAMP_TASK_GROUP 7192 static void cpu_util_update_eff(struct cgroup_subsys_state *css) 7193 { 7194 struct cgroup_subsys_state *top_css = css; 7195 struct uclamp_se *uc_parent = NULL; 7196 struct uclamp_se *uc_se = NULL; 7197 unsigned int eff[UCLAMP_CNT]; 7198 enum uclamp_id clamp_id; 7199 unsigned int clamps; 7200 7201 css_for_each_descendant_pre(css, top_css) { 7202 uc_parent = css_tg(css)->parent 7203 ? css_tg(css)->parent->uclamp : NULL; 7204 7205 for_each_clamp_id(clamp_id) { 7206 /* Assume effective clamps matches requested clamps */ 7207 eff[clamp_id] = css_tg(css)->uclamp_req[clamp_id].value; 7208 /* Cap effective clamps with parent's effective clamps */ 7209 if (uc_parent && 7210 eff[clamp_id] > uc_parent[clamp_id].value) { 7211 eff[clamp_id] = uc_parent[clamp_id].value; 7212 } 7213 } 7214 /* Ensure protection is always capped by limit */ 7215 eff[UCLAMP_MIN] = min(eff[UCLAMP_MIN], eff[UCLAMP_MAX]); 7216 7217 /* Propagate most restrictive effective clamps */ 7218 clamps = 0x0; 7219 uc_se = css_tg(css)->uclamp; 7220 for_each_clamp_id(clamp_id) { 7221 if (eff[clamp_id] == uc_se[clamp_id].value) 7222 continue; 7223 uc_se[clamp_id].value = eff[clamp_id]; 7224 uc_se[clamp_id].bucket_id = uclamp_bucket_id(eff[clamp_id]); 7225 clamps |= (0x1 << clamp_id); 7226 } 7227 if (!clamps) { 7228 css = css_rightmost_descendant(css); 7229 continue; 7230 } 7231 7232 /* Immediately update descendants RUNNABLE tasks */ 7233 uclamp_update_active_tasks(css, clamps); 7234 } 7235 } 7236 7237 /* 7238 * Integer 10^N with a given N exponent by casting to integer the literal "1eN" 7239 * C expression. Since there is no way to convert a macro argument (N) into a 7240 * character constant, use two levels of macros. 7241 */ 7242 #define _POW10(exp) ((unsigned int)1e##exp) 7243 #define POW10(exp) _POW10(exp) 7244 7245 struct uclamp_request { 7246 #define UCLAMP_PERCENT_SHIFT 2 7247 #define UCLAMP_PERCENT_SCALE (100 * POW10(UCLAMP_PERCENT_SHIFT)) 7248 s64 percent; 7249 u64 util; 7250 int ret; 7251 }; 7252 7253 static inline struct uclamp_request 7254 capacity_from_percent(char *buf) 7255 { 7256 struct uclamp_request req = { 7257 .percent = UCLAMP_PERCENT_SCALE, 7258 .util = SCHED_CAPACITY_SCALE, 7259 .ret = 0, 7260 }; 7261 7262 buf = strim(buf); 7263 if (strcmp(buf, "max")) { 7264 req.ret = cgroup_parse_float(buf, UCLAMP_PERCENT_SHIFT, 7265 &req.percent); 7266 if (req.ret) 7267 return req; 7268 if ((u64)req.percent > UCLAMP_PERCENT_SCALE) { 7269 req.ret = -ERANGE; 7270 return req; 7271 } 7272 7273 req.util = req.percent << SCHED_CAPACITY_SHIFT; 7274 req.util = DIV_ROUND_CLOSEST_ULL(req.util, UCLAMP_PERCENT_SCALE); 7275 } 7276 7277 return req; 7278 } 7279 7280 static ssize_t cpu_uclamp_write(struct kernfs_open_file *of, char *buf, 7281 size_t nbytes, loff_t off, 7282 enum uclamp_id clamp_id) 7283 { 7284 struct uclamp_request req; 7285 struct task_group *tg; 7286 7287 req = capacity_from_percent(buf); 7288 if (req.ret) 7289 return req.ret; 7290 7291 mutex_lock(&uclamp_mutex); 7292 rcu_read_lock(); 7293 7294 tg = css_tg(of_css(of)); 7295 if (tg->uclamp_req[clamp_id].value != req.util) 7296 uclamp_se_set(&tg->uclamp_req[clamp_id], req.util, false); 7297 7298 /* 7299 * Because of not recoverable conversion rounding we keep track of the 7300 * exact requested value 7301 */ 7302 tg->uclamp_pct[clamp_id] = req.percent; 7303 7304 /* Update effective clamps to track the most restrictive value */ 7305 cpu_util_update_eff(of_css(of)); 7306 7307 rcu_read_unlock(); 7308 mutex_unlock(&uclamp_mutex); 7309 7310 return nbytes; 7311 } 7312 7313 static ssize_t cpu_uclamp_min_write(struct kernfs_open_file *of, 7314 char *buf, size_t nbytes, 7315 loff_t off) 7316 { 7317 return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MIN); 7318 } 7319 7320 static ssize_t cpu_uclamp_max_write(struct kernfs_open_file *of, 7321 char *buf, size_t nbytes, 7322 loff_t off) 7323 { 7324 return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MAX); 7325 } 7326 7327 static inline void cpu_uclamp_print(struct seq_file *sf, 7328 enum uclamp_id clamp_id) 7329 { 7330 struct task_group *tg; 7331 u64 util_clamp; 7332 u64 percent; 7333 u32 rem; 7334 7335 rcu_read_lock(); 7336 tg = css_tg(seq_css(sf)); 7337 util_clamp = tg->uclamp_req[clamp_id].value; 7338 rcu_read_unlock(); 7339 7340 if (util_clamp == SCHED_CAPACITY_SCALE) { 7341 seq_puts(sf, "max\n"); 7342 return; 7343 } 7344 7345 percent = tg->uclamp_pct[clamp_id]; 7346 percent = div_u64_rem(percent, POW10(UCLAMP_PERCENT_SHIFT), &rem); 7347 seq_printf(sf, "%llu.%0*u\n", percent, UCLAMP_PERCENT_SHIFT, rem); 7348 } 7349 7350 static int cpu_uclamp_min_show(struct seq_file *sf, void *v) 7351 { 7352 cpu_uclamp_print(sf, UCLAMP_MIN); 7353 return 0; 7354 } 7355 7356 static int cpu_uclamp_max_show(struct seq_file *sf, void *v) 7357 { 7358 cpu_uclamp_print(sf, UCLAMP_MAX); 7359 return 0; 7360 } 7361 #endif /* CONFIG_UCLAMP_TASK_GROUP */ 7362 7363 #ifdef CONFIG_FAIR_GROUP_SCHED 7364 static int cpu_shares_write_u64(struct cgroup_subsys_state *css, 7365 struct cftype *cftype, u64 shareval) 7366 { 7367 if (shareval > scale_load_down(ULONG_MAX)) 7368 shareval = MAX_SHARES; 7369 return sched_group_set_shares(css_tg(css), scale_load(shareval)); 7370 } 7371 7372 static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css, 7373 struct cftype *cft) 7374 { 7375 struct task_group *tg = css_tg(css); 7376 7377 return (u64) scale_load_down(tg->shares); 7378 } 7379 7380 #ifdef CONFIG_CFS_BANDWIDTH 7381 static DEFINE_MUTEX(cfs_constraints_mutex); 7382 7383 const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */ 7384 static const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */ 7385 7386 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime); 7387 7388 static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota) 7389 { 7390 int i, ret = 0, runtime_enabled, runtime_was_enabled; 7391 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 7392 7393 if (tg == &root_task_group) 7394 return -EINVAL; 7395 7396 /* 7397 * Ensure we have at some amount of bandwidth every period. This is 7398 * to prevent reaching a state of large arrears when throttled via 7399 * entity_tick() resulting in prolonged exit starvation. 7400 */ 7401 if (quota < min_cfs_quota_period || period < min_cfs_quota_period) 7402 return -EINVAL; 7403 7404 /* 7405 * Likewise, bound things on the otherside by preventing insane quota 7406 * periods. This also allows us to normalize in computing quota 7407 * feasibility. 7408 */ 7409 if (period > max_cfs_quota_period) 7410 return -EINVAL; 7411 7412 /* 7413 * Prevent race between setting of cfs_rq->runtime_enabled and 7414 * unthrottle_offline_cfs_rqs(). 7415 */ 7416 get_online_cpus(); 7417 mutex_lock(&cfs_constraints_mutex); 7418 ret = __cfs_schedulable(tg, period, quota); 7419 if (ret) 7420 goto out_unlock; 7421 7422 runtime_enabled = quota != RUNTIME_INF; 7423 runtime_was_enabled = cfs_b->quota != RUNTIME_INF; 7424 /* 7425 * If we need to toggle cfs_bandwidth_used, off->on must occur 7426 * before making related changes, and on->off must occur afterwards 7427 */ 7428 if (runtime_enabled && !runtime_was_enabled) 7429 cfs_bandwidth_usage_inc(); 7430 raw_spin_lock_irq(&cfs_b->lock); 7431 cfs_b->period = ns_to_ktime(period); 7432 cfs_b->quota = quota; 7433 7434 __refill_cfs_bandwidth_runtime(cfs_b); 7435 7436 /* Restart the period timer (if active) to handle new period expiry: */ 7437 if (runtime_enabled) 7438 start_cfs_bandwidth(cfs_b); 7439 7440 raw_spin_unlock_irq(&cfs_b->lock); 7441 7442 for_each_online_cpu(i) { 7443 struct cfs_rq *cfs_rq = tg->cfs_rq[i]; 7444 struct rq *rq = cfs_rq->rq; 7445 struct rq_flags rf; 7446 7447 rq_lock_irq(rq, &rf); 7448 cfs_rq->runtime_enabled = runtime_enabled; 7449 cfs_rq->runtime_remaining = 0; 7450 7451 if (cfs_rq->throttled) 7452 unthrottle_cfs_rq(cfs_rq); 7453 rq_unlock_irq(rq, &rf); 7454 } 7455 if (runtime_was_enabled && !runtime_enabled) 7456 cfs_bandwidth_usage_dec(); 7457 out_unlock: 7458 mutex_unlock(&cfs_constraints_mutex); 7459 put_online_cpus(); 7460 7461 return ret; 7462 } 7463 7464 static int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us) 7465 { 7466 u64 quota, period; 7467 7468 period = ktime_to_ns(tg->cfs_bandwidth.period); 7469 if (cfs_quota_us < 0) 7470 quota = RUNTIME_INF; 7471 else if ((u64)cfs_quota_us <= U64_MAX / NSEC_PER_USEC) 7472 quota = (u64)cfs_quota_us * NSEC_PER_USEC; 7473 else 7474 return -EINVAL; 7475 7476 return tg_set_cfs_bandwidth(tg, period, quota); 7477 } 7478 7479 static long tg_get_cfs_quota(struct task_group *tg) 7480 { 7481 u64 quota_us; 7482 7483 if (tg->cfs_bandwidth.quota == RUNTIME_INF) 7484 return -1; 7485 7486 quota_us = tg->cfs_bandwidth.quota; 7487 do_div(quota_us, NSEC_PER_USEC); 7488 7489 return quota_us; 7490 } 7491 7492 static int tg_set_cfs_period(struct task_group *tg, long cfs_period_us) 7493 { 7494 u64 quota, period; 7495 7496 if ((u64)cfs_period_us > U64_MAX / NSEC_PER_USEC) 7497 return -EINVAL; 7498 7499 period = (u64)cfs_period_us * NSEC_PER_USEC; 7500 quota = tg->cfs_bandwidth.quota; 7501 7502 return tg_set_cfs_bandwidth(tg, period, quota); 7503 } 7504 7505 static long tg_get_cfs_period(struct task_group *tg) 7506 { 7507 u64 cfs_period_us; 7508 7509 cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period); 7510 do_div(cfs_period_us, NSEC_PER_USEC); 7511 7512 return cfs_period_us; 7513 } 7514 7515 static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css, 7516 struct cftype *cft) 7517 { 7518 return tg_get_cfs_quota(css_tg(css)); 7519 } 7520 7521 static int cpu_cfs_quota_write_s64(struct cgroup_subsys_state *css, 7522 struct cftype *cftype, s64 cfs_quota_us) 7523 { 7524 return tg_set_cfs_quota(css_tg(css), cfs_quota_us); 7525 } 7526 7527 static u64 cpu_cfs_period_read_u64(struct cgroup_subsys_state *css, 7528 struct cftype *cft) 7529 { 7530 return tg_get_cfs_period(css_tg(css)); 7531 } 7532 7533 static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css, 7534 struct cftype *cftype, u64 cfs_period_us) 7535 { 7536 return tg_set_cfs_period(css_tg(css), cfs_period_us); 7537 } 7538 7539 struct cfs_schedulable_data { 7540 struct task_group *tg; 7541 u64 period, quota; 7542 }; 7543 7544 /* 7545 * normalize group quota/period to be quota/max_period 7546 * note: units are usecs 7547 */ 7548 static u64 normalize_cfs_quota(struct task_group *tg, 7549 struct cfs_schedulable_data *d) 7550 { 7551 u64 quota, period; 7552 7553 if (tg == d->tg) { 7554 period = d->period; 7555 quota = d->quota; 7556 } else { 7557 period = tg_get_cfs_period(tg); 7558 quota = tg_get_cfs_quota(tg); 7559 } 7560 7561 /* note: these should typically be equivalent */ 7562 if (quota == RUNTIME_INF || quota == -1) 7563 return RUNTIME_INF; 7564 7565 return to_ratio(period, quota); 7566 } 7567 7568 static int tg_cfs_schedulable_down(struct task_group *tg, void *data) 7569 { 7570 struct cfs_schedulable_data *d = data; 7571 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 7572 s64 quota = 0, parent_quota = -1; 7573 7574 if (!tg->parent) { 7575 quota = RUNTIME_INF; 7576 } else { 7577 struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth; 7578 7579 quota = normalize_cfs_quota(tg, d); 7580 parent_quota = parent_b->hierarchical_quota; 7581 7582 /* 7583 * Ensure max(child_quota) <= parent_quota. On cgroup2, 7584 * always take the min. On cgroup1, only inherit when no 7585 * limit is set: 7586 */ 7587 if (cgroup_subsys_on_dfl(cpu_cgrp_subsys)) { 7588 quota = min(quota, parent_quota); 7589 } else { 7590 if (quota == RUNTIME_INF) 7591 quota = parent_quota; 7592 else if (parent_quota != RUNTIME_INF && quota > parent_quota) 7593 return -EINVAL; 7594 } 7595 } 7596 cfs_b->hierarchical_quota = quota; 7597 7598 return 0; 7599 } 7600 7601 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota) 7602 { 7603 int ret; 7604 struct cfs_schedulable_data data = { 7605 .tg = tg, 7606 .period = period, 7607 .quota = quota, 7608 }; 7609 7610 if (quota != RUNTIME_INF) { 7611 do_div(data.period, NSEC_PER_USEC); 7612 do_div(data.quota, NSEC_PER_USEC); 7613 } 7614 7615 rcu_read_lock(); 7616 ret = walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data); 7617 rcu_read_unlock(); 7618 7619 return ret; 7620 } 7621 7622 static int cpu_cfs_stat_show(struct seq_file *sf, void *v) 7623 { 7624 struct task_group *tg = css_tg(seq_css(sf)); 7625 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 7626 7627 seq_printf(sf, "nr_periods %d\n", cfs_b->nr_periods); 7628 seq_printf(sf, "nr_throttled %d\n", cfs_b->nr_throttled); 7629 seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time); 7630 7631 if (schedstat_enabled() && tg != &root_task_group) { 7632 u64 ws = 0; 7633 int i; 7634 7635 for_each_possible_cpu(i) 7636 ws += schedstat_val(tg->se[i]->statistics.wait_sum); 7637 7638 seq_printf(sf, "wait_sum %llu\n", ws); 7639 } 7640 7641 return 0; 7642 } 7643 #endif /* CONFIG_CFS_BANDWIDTH */ 7644 #endif /* CONFIG_FAIR_GROUP_SCHED */ 7645 7646 #ifdef CONFIG_RT_GROUP_SCHED 7647 static int cpu_rt_runtime_write(struct cgroup_subsys_state *css, 7648 struct cftype *cft, s64 val) 7649 { 7650 return sched_group_set_rt_runtime(css_tg(css), val); 7651 } 7652 7653 static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css, 7654 struct cftype *cft) 7655 { 7656 return sched_group_rt_runtime(css_tg(css)); 7657 } 7658 7659 static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css, 7660 struct cftype *cftype, u64 rt_period_us) 7661 { 7662 return sched_group_set_rt_period(css_tg(css), rt_period_us); 7663 } 7664 7665 static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css, 7666 struct cftype *cft) 7667 { 7668 return sched_group_rt_period(css_tg(css)); 7669 } 7670 #endif /* CONFIG_RT_GROUP_SCHED */ 7671 7672 static struct cftype cpu_legacy_files[] = { 7673 #ifdef CONFIG_FAIR_GROUP_SCHED 7674 { 7675 .name = "shares", 7676 .read_u64 = cpu_shares_read_u64, 7677 .write_u64 = cpu_shares_write_u64, 7678 }, 7679 #endif 7680 #ifdef CONFIG_CFS_BANDWIDTH 7681 { 7682 .name = "cfs_quota_us", 7683 .read_s64 = cpu_cfs_quota_read_s64, 7684 .write_s64 = cpu_cfs_quota_write_s64, 7685 }, 7686 { 7687 .name = "cfs_period_us", 7688 .read_u64 = cpu_cfs_period_read_u64, 7689 .write_u64 = cpu_cfs_period_write_u64, 7690 }, 7691 { 7692 .name = "stat", 7693 .seq_show = cpu_cfs_stat_show, 7694 }, 7695 #endif 7696 #ifdef CONFIG_RT_GROUP_SCHED 7697 { 7698 .name = "rt_runtime_us", 7699 .read_s64 = cpu_rt_runtime_read, 7700 .write_s64 = cpu_rt_runtime_write, 7701 }, 7702 { 7703 .name = "rt_period_us", 7704 .read_u64 = cpu_rt_period_read_uint, 7705 .write_u64 = cpu_rt_period_write_uint, 7706 }, 7707 #endif 7708 #ifdef CONFIG_UCLAMP_TASK_GROUP 7709 { 7710 .name = "uclamp.min", 7711 .flags = CFTYPE_NOT_ON_ROOT, 7712 .seq_show = cpu_uclamp_min_show, 7713 .write = cpu_uclamp_min_write, 7714 }, 7715 { 7716 .name = "uclamp.max", 7717 .flags = CFTYPE_NOT_ON_ROOT, 7718 .seq_show = cpu_uclamp_max_show, 7719 .write = cpu_uclamp_max_write, 7720 }, 7721 #endif 7722 { } /* Terminate */ 7723 }; 7724 7725 static int cpu_extra_stat_show(struct seq_file *sf, 7726 struct cgroup_subsys_state *css) 7727 { 7728 #ifdef CONFIG_CFS_BANDWIDTH 7729 { 7730 struct task_group *tg = css_tg(css); 7731 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 7732 u64 throttled_usec; 7733 7734 throttled_usec = cfs_b->throttled_time; 7735 do_div(throttled_usec, NSEC_PER_USEC); 7736 7737 seq_printf(sf, "nr_periods %d\n" 7738 "nr_throttled %d\n" 7739 "throttled_usec %llu\n", 7740 cfs_b->nr_periods, cfs_b->nr_throttled, 7741 throttled_usec); 7742 } 7743 #endif 7744 return 0; 7745 } 7746 7747 #ifdef CONFIG_FAIR_GROUP_SCHED 7748 static u64 cpu_weight_read_u64(struct cgroup_subsys_state *css, 7749 struct cftype *cft) 7750 { 7751 struct task_group *tg = css_tg(css); 7752 u64 weight = scale_load_down(tg->shares); 7753 7754 return DIV_ROUND_CLOSEST_ULL(weight * CGROUP_WEIGHT_DFL, 1024); 7755 } 7756 7757 static int cpu_weight_write_u64(struct cgroup_subsys_state *css, 7758 struct cftype *cft, u64 weight) 7759 { 7760 /* 7761 * cgroup weight knobs should use the common MIN, DFL and MAX 7762 * values which are 1, 100 and 10000 respectively. While it loses 7763 * a bit of range on both ends, it maps pretty well onto the shares 7764 * value used by scheduler and the round-trip conversions preserve 7765 * the original value over the entire range. 7766 */ 7767 if (weight < CGROUP_WEIGHT_MIN || weight > CGROUP_WEIGHT_MAX) 7768 return -ERANGE; 7769 7770 weight = DIV_ROUND_CLOSEST_ULL(weight * 1024, CGROUP_WEIGHT_DFL); 7771 7772 return sched_group_set_shares(css_tg(css), scale_load(weight)); 7773 } 7774 7775 static s64 cpu_weight_nice_read_s64(struct cgroup_subsys_state *css, 7776 struct cftype *cft) 7777 { 7778 unsigned long weight = scale_load_down(css_tg(css)->shares); 7779 int last_delta = INT_MAX; 7780 int prio, delta; 7781 7782 /* find the closest nice value to the current weight */ 7783 for (prio = 0; prio < ARRAY_SIZE(sched_prio_to_weight); prio++) { 7784 delta = abs(sched_prio_to_weight[prio] - weight); 7785 if (delta >= last_delta) 7786 break; 7787 last_delta = delta; 7788 } 7789 7790 return PRIO_TO_NICE(prio - 1 + MAX_RT_PRIO); 7791 } 7792 7793 static int cpu_weight_nice_write_s64(struct cgroup_subsys_state *css, 7794 struct cftype *cft, s64 nice) 7795 { 7796 unsigned long weight; 7797 int idx; 7798 7799 if (nice < MIN_NICE || nice > MAX_NICE) 7800 return -ERANGE; 7801 7802 idx = NICE_TO_PRIO(nice) - MAX_RT_PRIO; 7803 idx = array_index_nospec(idx, 40); 7804 weight = sched_prio_to_weight[idx]; 7805 7806 return sched_group_set_shares(css_tg(css), scale_load(weight)); 7807 } 7808 #endif 7809 7810 static void __maybe_unused cpu_period_quota_print(struct seq_file *sf, 7811 long period, long quota) 7812 { 7813 if (quota < 0) 7814 seq_puts(sf, "max"); 7815 else 7816 seq_printf(sf, "%ld", quota); 7817 7818 seq_printf(sf, " %ld\n", period); 7819 } 7820 7821 /* caller should put the current value in *@periodp before calling */ 7822 static int __maybe_unused cpu_period_quota_parse(char *buf, 7823 u64 *periodp, u64 *quotap) 7824 { 7825 char tok[21]; /* U64_MAX */ 7826 7827 if (sscanf(buf, "%20s %llu", tok, periodp) < 1) 7828 return -EINVAL; 7829 7830 *periodp *= NSEC_PER_USEC; 7831 7832 if (sscanf(tok, "%llu", quotap)) 7833 *quotap *= NSEC_PER_USEC; 7834 else if (!strcmp(tok, "max")) 7835 *quotap = RUNTIME_INF; 7836 else 7837 return -EINVAL; 7838 7839 return 0; 7840 } 7841 7842 #ifdef CONFIG_CFS_BANDWIDTH 7843 static int cpu_max_show(struct seq_file *sf, void *v) 7844 { 7845 struct task_group *tg = css_tg(seq_css(sf)); 7846 7847 cpu_period_quota_print(sf, tg_get_cfs_period(tg), tg_get_cfs_quota(tg)); 7848 return 0; 7849 } 7850 7851 static ssize_t cpu_max_write(struct kernfs_open_file *of, 7852 char *buf, size_t nbytes, loff_t off) 7853 { 7854 struct task_group *tg = css_tg(of_css(of)); 7855 u64 period = tg_get_cfs_period(tg); 7856 u64 quota; 7857 int ret; 7858 7859 ret = cpu_period_quota_parse(buf, &period, "a); 7860 if (!ret) 7861 ret = tg_set_cfs_bandwidth(tg, period, quota); 7862 return ret ?: nbytes; 7863 } 7864 #endif 7865 7866 static struct cftype cpu_files[] = { 7867 #ifdef CONFIG_FAIR_GROUP_SCHED 7868 { 7869 .name = "weight", 7870 .flags = CFTYPE_NOT_ON_ROOT, 7871 .read_u64 = cpu_weight_read_u64, 7872 .write_u64 = cpu_weight_write_u64, 7873 }, 7874 { 7875 .name = "weight.nice", 7876 .flags = CFTYPE_NOT_ON_ROOT, 7877 .read_s64 = cpu_weight_nice_read_s64, 7878 .write_s64 = cpu_weight_nice_write_s64, 7879 }, 7880 #endif 7881 #ifdef CONFIG_CFS_BANDWIDTH 7882 { 7883 .name = "max", 7884 .flags = CFTYPE_NOT_ON_ROOT, 7885 .seq_show = cpu_max_show, 7886 .write = cpu_max_write, 7887 }, 7888 #endif 7889 #ifdef CONFIG_UCLAMP_TASK_GROUP 7890 { 7891 .name = "uclamp.min", 7892 .flags = CFTYPE_NOT_ON_ROOT, 7893 .seq_show = cpu_uclamp_min_show, 7894 .write = cpu_uclamp_min_write, 7895 }, 7896 { 7897 .name = "uclamp.max", 7898 .flags = CFTYPE_NOT_ON_ROOT, 7899 .seq_show = cpu_uclamp_max_show, 7900 .write = cpu_uclamp_max_write, 7901 }, 7902 #endif 7903 { } /* terminate */ 7904 }; 7905 7906 struct cgroup_subsys cpu_cgrp_subsys = { 7907 .css_alloc = cpu_cgroup_css_alloc, 7908 .css_online = cpu_cgroup_css_online, 7909 .css_released = cpu_cgroup_css_released, 7910 .css_free = cpu_cgroup_css_free, 7911 .css_extra_stat_show = cpu_extra_stat_show, 7912 .fork = cpu_cgroup_fork, 7913 .can_attach = cpu_cgroup_can_attach, 7914 .attach = cpu_cgroup_attach, 7915 .legacy_cftypes = cpu_legacy_files, 7916 .dfl_cftypes = cpu_files, 7917 .early_init = true, 7918 .threaded = true, 7919 }; 7920 7921 #endif /* CONFIG_CGROUP_SCHED */ 7922 7923 void dump_cpu_task(int cpu) 7924 { 7925 pr_info("Task dump for CPU %d:\n", cpu); 7926 sched_show_task(cpu_curr(cpu)); 7927 } 7928 7929 /* 7930 * Nice levels are multiplicative, with a gentle 10% change for every 7931 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to 7932 * nice 1, it will get ~10% less CPU time than another CPU-bound task 7933 * that remained on nice 0. 7934 * 7935 * The "10% effect" is relative and cumulative: from _any_ nice level, 7936 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level 7937 * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25. 7938 * If a task goes up by ~10% and another task goes down by ~10% then 7939 * the relative distance between them is ~25%.) 7940 */ 7941 const int sched_prio_to_weight[40] = { 7942 /* -20 */ 88761, 71755, 56483, 46273, 36291, 7943 /* -15 */ 29154, 23254, 18705, 14949, 11916, 7944 /* -10 */ 9548, 7620, 6100, 4904, 3906, 7945 /* -5 */ 3121, 2501, 1991, 1586, 1277, 7946 /* 0 */ 1024, 820, 655, 526, 423, 7947 /* 5 */ 335, 272, 215, 172, 137, 7948 /* 10 */ 110, 87, 70, 56, 45, 7949 /* 15 */ 36, 29, 23, 18, 15, 7950 }; 7951 7952 /* 7953 * Inverse (2^32/x) values of the sched_prio_to_weight[] array, precalculated. 7954 * 7955 * In cases where the weight does not change often, we can use the 7956 * precalculated inverse to speed up arithmetics by turning divisions 7957 * into multiplications: 7958 */ 7959 const u32 sched_prio_to_wmult[40] = { 7960 /* -20 */ 48388, 59856, 76040, 92818, 118348, 7961 /* -15 */ 147320, 184698, 229616, 287308, 360437, 7962 /* -10 */ 449829, 563644, 704093, 875809, 1099582, 7963 /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326, 7964 /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587, 7965 /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126, 7966 /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717, 7967 /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153, 7968 }; 7969 7970 #undef CREATE_TRACE_POINTS 7971