1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * kernel/sched/core.c 4 * 5 * Core kernel scheduler code and related syscalls 6 * 7 * Copyright (C) 1991-2002 Linus Torvalds 8 */ 9 #include "sched.h" 10 11 #include <linux/nospec.h> 12 13 #include <linux/kcov.h> 14 15 #include <asm/switch_to.h> 16 #include <asm/tlb.h> 17 18 #include "../workqueue_internal.h" 19 #include "../smpboot.h" 20 21 #include "pelt.h" 22 23 #define CREATE_TRACE_POINTS 24 #include <trace/events/sched.h> 25 26 /* 27 * Export tracepoints that act as a bare tracehook (ie: have no trace event 28 * associated with them) to allow external modules to probe them. 29 */ 30 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_cfs_tp); 31 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_rt_tp); 32 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_dl_tp); 33 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp); 34 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_se_tp); 35 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_overutilized_tp); 36 37 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); 38 39 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_JUMP_LABEL) 40 /* 41 * Debugging: various feature bits 42 * 43 * If SCHED_DEBUG is disabled, each compilation unit has its own copy of 44 * sysctl_sched_features, defined in sched.h, to allow constants propagation 45 * at compile time and compiler optimization based on features default. 46 */ 47 #define SCHED_FEAT(name, enabled) \ 48 (1UL << __SCHED_FEAT_##name) * enabled | 49 const_debug unsigned int sysctl_sched_features = 50 #include "features.h" 51 0; 52 #undef SCHED_FEAT 53 #endif 54 55 /* 56 * Number of tasks to iterate in a single balance run. 57 * Limited because this is done with IRQs disabled. 58 */ 59 const_debug unsigned int sysctl_sched_nr_migrate = 32; 60 61 /* 62 * period over which we measure -rt task CPU usage in us. 63 * default: 1s 64 */ 65 unsigned int sysctl_sched_rt_period = 1000000; 66 67 __read_mostly int scheduler_running; 68 69 /* 70 * part of the period that we allow rt tasks to run in us. 71 * default: 0.95s 72 */ 73 int sysctl_sched_rt_runtime = 950000; 74 75 /* 76 * __task_rq_lock - lock the rq @p resides on. 77 */ 78 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) 79 __acquires(rq->lock) 80 { 81 struct rq *rq; 82 83 lockdep_assert_held(&p->pi_lock); 84 85 for (;;) { 86 rq = task_rq(p); 87 raw_spin_lock(&rq->lock); 88 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { 89 rq_pin_lock(rq, rf); 90 return rq; 91 } 92 raw_spin_unlock(&rq->lock); 93 94 while (unlikely(task_on_rq_migrating(p))) 95 cpu_relax(); 96 } 97 } 98 99 /* 100 * task_rq_lock - lock p->pi_lock and lock the rq @p resides on. 101 */ 102 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) 103 __acquires(p->pi_lock) 104 __acquires(rq->lock) 105 { 106 struct rq *rq; 107 108 for (;;) { 109 raw_spin_lock_irqsave(&p->pi_lock, rf->flags); 110 rq = task_rq(p); 111 raw_spin_lock(&rq->lock); 112 /* 113 * move_queued_task() task_rq_lock() 114 * 115 * ACQUIRE (rq->lock) 116 * [S] ->on_rq = MIGRATING [L] rq = task_rq() 117 * WMB (__set_task_cpu()) ACQUIRE (rq->lock); 118 * [S] ->cpu = new_cpu [L] task_rq() 119 * [L] ->on_rq 120 * RELEASE (rq->lock) 121 * 122 * If we observe the old CPU in task_rq_lock(), the acquire of 123 * the old rq->lock will fully serialize against the stores. 124 * 125 * If we observe the new CPU in task_rq_lock(), the address 126 * dependency headed by '[L] rq = task_rq()' and the acquire 127 * will pair with the WMB to ensure we then also see migrating. 128 */ 129 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { 130 rq_pin_lock(rq, rf); 131 return rq; 132 } 133 raw_spin_unlock(&rq->lock); 134 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); 135 136 while (unlikely(task_on_rq_migrating(p))) 137 cpu_relax(); 138 } 139 } 140 141 /* 142 * RQ-clock updating methods: 143 */ 144 145 static void update_rq_clock_task(struct rq *rq, s64 delta) 146 { 147 /* 148 * In theory, the compile should just see 0 here, and optimize out the call 149 * to sched_rt_avg_update. But I don't trust it... 150 */ 151 s64 __maybe_unused steal = 0, irq_delta = 0; 152 153 #ifdef CONFIG_IRQ_TIME_ACCOUNTING 154 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time; 155 156 /* 157 * Since irq_time is only updated on {soft,}irq_exit, we might run into 158 * this case when a previous update_rq_clock() happened inside a 159 * {soft,}irq region. 160 * 161 * When this happens, we stop ->clock_task and only update the 162 * prev_irq_time stamp to account for the part that fit, so that a next 163 * update will consume the rest. This ensures ->clock_task is 164 * monotonic. 165 * 166 * It does however cause some slight miss-attribution of {soft,}irq 167 * time, a more accurate solution would be to update the irq_time using 168 * the current rq->clock timestamp, except that would require using 169 * atomic ops. 170 */ 171 if (irq_delta > delta) 172 irq_delta = delta; 173 174 rq->prev_irq_time += irq_delta; 175 delta -= irq_delta; 176 #endif 177 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING 178 if (static_key_false((¶virt_steal_rq_enabled))) { 179 steal = paravirt_steal_clock(cpu_of(rq)); 180 steal -= rq->prev_steal_time_rq; 181 182 if (unlikely(steal > delta)) 183 steal = delta; 184 185 rq->prev_steal_time_rq += steal; 186 delta -= steal; 187 } 188 #endif 189 190 rq->clock_task += delta; 191 192 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ 193 if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY)) 194 update_irq_load_avg(rq, irq_delta + steal); 195 #endif 196 update_rq_clock_pelt(rq, delta); 197 } 198 199 void update_rq_clock(struct rq *rq) 200 { 201 s64 delta; 202 203 lockdep_assert_held(&rq->lock); 204 205 if (rq->clock_update_flags & RQCF_ACT_SKIP) 206 return; 207 208 #ifdef CONFIG_SCHED_DEBUG 209 if (sched_feat(WARN_DOUBLE_CLOCK)) 210 SCHED_WARN_ON(rq->clock_update_flags & RQCF_UPDATED); 211 rq->clock_update_flags |= RQCF_UPDATED; 212 #endif 213 214 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock; 215 if (delta < 0) 216 return; 217 rq->clock += delta; 218 update_rq_clock_task(rq, delta); 219 } 220 221 222 #ifdef CONFIG_SCHED_HRTICK 223 /* 224 * Use HR-timers to deliver accurate preemption points. 225 */ 226 227 static void hrtick_clear(struct rq *rq) 228 { 229 if (hrtimer_active(&rq->hrtick_timer)) 230 hrtimer_cancel(&rq->hrtick_timer); 231 } 232 233 /* 234 * High-resolution timer tick. 235 * Runs from hardirq context with interrupts disabled. 236 */ 237 static enum hrtimer_restart hrtick(struct hrtimer *timer) 238 { 239 struct rq *rq = container_of(timer, struct rq, hrtick_timer); 240 struct rq_flags rf; 241 242 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); 243 244 rq_lock(rq, &rf); 245 update_rq_clock(rq); 246 rq->curr->sched_class->task_tick(rq, rq->curr, 1); 247 rq_unlock(rq, &rf); 248 249 return HRTIMER_NORESTART; 250 } 251 252 #ifdef CONFIG_SMP 253 254 static void __hrtick_restart(struct rq *rq) 255 { 256 struct hrtimer *timer = &rq->hrtick_timer; 257 258 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED); 259 } 260 261 /* 262 * called from hardirq (IPI) context 263 */ 264 static void __hrtick_start(void *arg) 265 { 266 struct rq *rq = arg; 267 struct rq_flags rf; 268 269 rq_lock(rq, &rf); 270 __hrtick_restart(rq); 271 rq->hrtick_csd_pending = 0; 272 rq_unlock(rq, &rf); 273 } 274 275 /* 276 * Called to set the hrtick timer state. 277 * 278 * called with rq->lock held and irqs disabled 279 */ 280 void hrtick_start(struct rq *rq, u64 delay) 281 { 282 struct hrtimer *timer = &rq->hrtick_timer; 283 ktime_t time; 284 s64 delta; 285 286 /* 287 * Don't schedule slices shorter than 10000ns, that just 288 * doesn't make sense and can cause timer DoS. 289 */ 290 delta = max_t(s64, delay, 10000LL); 291 time = ktime_add_ns(timer->base->get_time(), delta); 292 293 hrtimer_set_expires(timer, time); 294 295 if (rq == this_rq()) { 296 __hrtick_restart(rq); 297 } else if (!rq->hrtick_csd_pending) { 298 smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd); 299 rq->hrtick_csd_pending = 1; 300 } 301 } 302 303 #else 304 /* 305 * Called to set the hrtick timer state. 306 * 307 * called with rq->lock held and irqs disabled 308 */ 309 void hrtick_start(struct rq *rq, u64 delay) 310 { 311 /* 312 * Don't schedule slices shorter than 10000ns, that just 313 * doesn't make sense. Rely on vruntime for fairness. 314 */ 315 delay = max_t(u64, delay, 10000LL); 316 hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay), 317 HRTIMER_MODE_REL_PINNED); 318 } 319 #endif /* CONFIG_SMP */ 320 321 static void hrtick_rq_init(struct rq *rq) 322 { 323 #ifdef CONFIG_SMP 324 rq->hrtick_csd_pending = 0; 325 326 rq->hrtick_csd.flags = 0; 327 rq->hrtick_csd.func = __hrtick_start; 328 rq->hrtick_csd.info = rq; 329 #endif 330 331 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 332 rq->hrtick_timer.function = hrtick; 333 } 334 #else /* CONFIG_SCHED_HRTICK */ 335 static inline void hrtick_clear(struct rq *rq) 336 { 337 } 338 339 static inline void hrtick_rq_init(struct rq *rq) 340 { 341 } 342 #endif /* CONFIG_SCHED_HRTICK */ 343 344 /* 345 * cmpxchg based fetch_or, macro so it works for different integer types 346 */ 347 #define fetch_or(ptr, mask) \ 348 ({ \ 349 typeof(ptr) _ptr = (ptr); \ 350 typeof(mask) _mask = (mask); \ 351 typeof(*_ptr) _old, _val = *_ptr; \ 352 \ 353 for (;;) { \ 354 _old = cmpxchg(_ptr, _val, _val | _mask); \ 355 if (_old == _val) \ 356 break; \ 357 _val = _old; \ 358 } \ 359 _old; \ 360 }) 361 362 #if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG) 363 /* 364 * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG, 365 * this avoids any races wrt polling state changes and thereby avoids 366 * spurious IPIs. 367 */ 368 static bool set_nr_and_not_polling(struct task_struct *p) 369 { 370 struct thread_info *ti = task_thread_info(p); 371 return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG); 372 } 373 374 /* 375 * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set. 376 * 377 * If this returns true, then the idle task promises to call 378 * sched_ttwu_pending() and reschedule soon. 379 */ 380 static bool set_nr_if_polling(struct task_struct *p) 381 { 382 struct thread_info *ti = task_thread_info(p); 383 typeof(ti->flags) old, val = READ_ONCE(ti->flags); 384 385 for (;;) { 386 if (!(val & _TIF_POLLING_NRFLAG)) 387 return false; 388 if (val & _TIF_NEED_RESCHED) 389 return true; 390 old = cmpxchg(&ti->flags, val, val | _TIF_NEED_RESCHED); 391 if (old == val) 392 break; 393 val = old; 394 } 395 return true; 396 } 397 398 #else 399 static bool set_nr_and_not_polling(struct task_struct *p) 400 { 401 set_tsk_need_resched(p); 402 return true; 403 } 404 405 #ifdef CONFIG_SMP 406 static bool set_nr_if_polling(struct task_struct *p) 407 { 408 return false; 409 } 410 #endif 411 #endif 412 413 static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task) 414 { 415 struct wake_q_node *node = &task->wake_q; 416 417 /* 418 * Atomically grab the task, if ->wake_q is !nil already it means 419 * its already queued (either by us or someone else) and will get the 420 * wakeup due to that. 421 * 422 * In order to ensure that a pending wakeup will observe our pending 423 * state, even in the failed case, an explicit smp_mb() must be used. 424 */ 425 smp_mb__before_atomic(); 426 if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL))) 427 return false; 428 429 /* 430 * The head is context local, there can be no concurrency. 431 */ 432 *head->lastp = node; 433 head->lastp = &node->next; 434 return true; 435 } 436 437 /** 438 * wake_q_add() - queue a wakeup for 'later' waking. 439 * @head: the wake_q_head to add @task to 440 * @task: the task to queue for 'later' wakeup 441 * 442 * Queue a task for later wakeup, most likely by the wake_up_q() call in the 443 * same context, _HOWEVER_ this is not guaranteed, the wakeup can come 444 * instantly. 445 * 446 * This function must be used as-if it were wake_up_process(); IOW the task 447 * must be ready to be woken at this location. 448 */ 449 void wake_q_add(struct wake_q_head *head, struct task_struct *task) 450 { 451 if (__wake_q_add(head, task)) 452 get_task_struct(task); 453 } 454 455 /** 456 * wake_q_add_safe() - safely queue a wakeup for 'later' waking. 457 * @head: the wake_q_head to add @task to 458 * @task: the task to queue for 'later' wakeup 459 * 460 * Queue a task for later wakeup, most likely by the wake_up_q() call in the 461 * same context, _HOWEVER_ this is not guaranteed, the wakeup can come 462 * instantly. 463 * 464 * This function must be used as-if it were wake_up_process(); IOW the task 465 * must be ready to be woken at this location. 466 * 467 * This function is essentially a task-safe equivalent to wake_q_add(). Callers 468 * that already hold reference to @task can call the 'safe' version and trust 469 * wake_q to do the right thing depending whether or not the @task is already 470 * queued for wakeup. 471 */ 472 void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task) 473 { 474 if (!__wake_q_add(head, task)) 475 put_task_struct(task); 476 } 477 478 void wake_up_q(struct wake_q_head *head) 479 { 480 struct wake_q_node *node = head->first; 481 482 while (node != WAKE_Q_TAIL) { 483 struct task_struct *task; 484 485 task = container_of(node, struct task_struct, wake_q); 486 BUG_ON(!task); 487 /* Task can safely be re-inserted now: */ 488 node = node->next; 489 task->wake_q.next = NULL; 490 491 /* 492 * wake_up_process() executes a full barrier, which pairs with 493 * the queueing in wake_q_add() so as not to miss wakeups. 494 */ 495 wake_up_process(task); 496 put_task_struct(task); 497 } 498 } 499 500 /* 501 * resched_curr - mark rq's current task 'to be rescheduled now'. 502 * 503 * On UP this means the setting of the need_resched flag, on SMP it 504 * might also involve a cross-CPU call to trigger the scheduler on 505 * the target CPU. 506 */ 507 void resched_curr(struct rq *rq) 508 { 509 struct task_struct *curr = rq->curr; 510 int cpu; 511 512 lockdep_assert_held(&rq->lock); 513 514 if (test_tsk_need_resched(curr)) 515 return; 516 517 cpu = cpu_of(rq); 518 519 if (cpu == smp_processor_id()) { 520 set_tsk_need_resched(curr); 521 set_preempt_need_resched(); 522 return; 523 } 524 525 if (set_nr_and_not_polling(curr)) 526 smp_send_reschedule(cpu); 527 else 528 trace_sched_wake_idle_without_ipi(cpu); 529 } 530 531 void resched_cpu(int cpu) 532 { 533 struct rq *rq = cpu_rq(cpu); 534 unsigned long flags; 535 536 raw_spin_lock_irqsave(&rq->lock, flags); 537 if (cpu_online(cpu) || cpu == smp_processor_id()) 538 resched_curr(rq); 539 raw_spin_unlock_irqrestore(&rq->lock, flags); 540 } 541 542 #ifdef CONFIG_SMP 543 #ifdef CONFIG_NO_HZ_COMMON 544 /* 545 * In the semi idle case, use the nearest busy CPU for migrating timers 546 * from an idle CPU. This is good for power-savings. 547 * 548 * We don't do similar optimization for completely idle system, as 549 * selecting an idle CPU will add more delays to the timers than intended 550 * (as that CPU's timer base may not be uptodate wrt jiffies etc). 551 */ 552 int get_nohz_timer_target(void) 553 { 554 int i, cpu = smp_processor_id(); 555 struct sched_domain *sd; 556 557 if (!idle_cpu(cpu) && housekeeping_cpu(cpu, HK_FLAG_TIMER)) 558 return cpu; 559 560 rcu_read_lock(); 561 for_each_domain(cpu, sd) { 562 for_each_cpu(i, sched_domain_span(sd)) { 563 if (cpu == i) 564 continue; 565 566 if (!idle_cpu(i) && housekeeping_cpu(i, HK_FLAG_TIMER)) { 567 cpu = i; 568 goto unlock; 569 } 570 } 571 } 572 573 if (!housekeeping_cpu(cpu, HK_FLAG_TIMER)) 574 cpu = housekeeping_any_cpu(HK_FLAG_TIMER); 575 unlock: 576 rcu_read_unlock(); 577 return cpu; 578 } 579 580 /* 581 * When add_timer_on() enqueues a timer into the timer wheel of an 582 * idle CPU then this timer might expire before the next timer event 583 * which is scheduled to wake up that CPU. In case of a completely 584 * idle system the next event might even be infinite time into the 585 * future. wake_up_idle_cpu() ensures that the CPU is woken up and 586 * leaves the inner idle loop so the newly added timer is taken into 587 * account when the CPU goes back to idle and evaluates the timer 588 * wheel for the next timer event. 589 */ 590 static void wake_up_idle_cpu(int cpu) 591 { 592 struct rq *rq = cpu_rq(cpu); 593 594 if (cpu == smp_processor_id()) 595 return; 596 597 if (set_nr_and_not_polling(rq->idle)) 598 smp_send_reschedule(cpu); 599 else 600 trace_sched_wake_idle_without_ipi(cpu); 601 } 602 603 static bool wake_up_full_nohz_cpu(int cpu) 604 { 605 /* 606 * We just need the target to call irq_exit() and re-evaluate 607 * the next tick. The nohz full kick at least implies that. 608 * If needed we can still optimize that later with an 609 * empty IRQ. 610 */ 611 if (cpu_is_offline(cpu)) 612 return true; /* Don't try to wake offline CPUs. */ 613 if (tick_nohz_full_cpu(cpu)) { 614 if (cpu != smp_processor_id() || 615 tick_nohz_tick_stopped()) 616 tick_nohz_full_kick_cpu(cpu); 617 return true; 618 } 619 620 return false; 621 } 622 623 /* 624 * Wake up the specified CPU. If the CPU is going offline, it is the 625 * caller's responsibility to deal with the lost wakeup, for example, 626 * by hooking into the CPU_DEAD notifier like timers and hrtimers do. 627 */ 628 void wake_up_nohz_cpu(int cpu) 629 { 630 if (!wake_up_full_nohz_cpu(cpu)) 631 wake_up_idle_cpu(cpu); 632 } 633 634 static inline bool got_nohz_idle_kick(void) 635 { 636 int cpu = smp_processor_id(); 637 638 if (!(atomic_read(nohz_flags(cpu)) & NOHZ_KICK_MASK)) 639 return false; 640 641 if (idle_cpu(cpu) && !need_resched()) 642 return true; 643 644 /* 645 * We can't run Idle Load Balance on this CPU for this time so we 646 * cancel it and clear NOHZ_BALANCE_KICK 647 */ 648 atomic_andnot(NOHZ_KICK_MASK, nohz_flags(cpu)); 649 return false; 650 } 651 652 #else /* CONFIG_NO_HZ_COMMON */ 653 654 static inline bool got_nohz_idle_kick(void) 655 { 656 return false; 657 } 658 659 #endif /* CONFIG_NO_HZ_COMMON */ 660 661 #ifdef CONFIG_NO_HZ_FULL 662 bool sched_can_stop_tick(struct rq *rq) 663 { 664 int fifo_nr_running; 665 666 /* Deadline tasks, even if single, need the tick */ 667 if (rq->dl.dl_nr_running) 668 return false; 669 670 /* 671 * If there are more than one RR tasks, we need the tick to effect the 672 * actual RR behaviour. 673 */ 674 if (rq->rt.rr_nr_running) { 675 if (rq->rt.rr_nr_running == 1) 676 return true; 677 else 678 return false; 679 } 680 681 /* 682 * If there's no RR tasks, but FIFO tasks, we can skip the tick, no 683 * forced preemption between FIFO tasks. 684 */ 685 fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running; 686 if (fifo_nr_running) 687 return true; 688 689 /* 690 * If there are no DL,RR/FIFO tasks, there must only be CFS tasks left; 691 * if there's more than one we need the tick for involuntary 692 * preemption. 693 */ 694 if (rq->nr_running > 1) 695 return false; 696 697 return true; 698 } 699 #endif /* CONFIG_NO_HZ_FULL */ 700 #endif /* CONFIG_SMP */ 701 702 #if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \ 703 (defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH))) 704 /* 705 * Iterate task_group tree rooted at *from, calling @down when first entering a 706 * node and @up when leaving it for the final time. 707 * 708 * Caller must hold rcu_lock or sufficient equivalent. 709 */ 710 int walk_tg_tree_from(struct task_group *from, 711 tg_visitor down, tg_visitor up, void *data) 712 { 713 struct task_group *parent, *child; 714 int ret; 715 716 parent = from; 717 718 down: 719 ret = (*down)(parent, data); 720 if (ret) 721 goto out; 722 list_for_each_entry_rcu(child, &parent->children, siblings) { 723 parent = child; 724 goto down; 725 726 up: 727 continue; 728 } 729 ret = (*up)(parent, data); 730 if (ret || parent == from) 731 goto out; 732 733 child = parent; 734 parent = parent->parent; 735 if (parent) 736 goto up; 737 out: 738 return ret; 739 } 740 741 int tg_nop(struct task_group *tg, void *data) 742 { 743 return 0; 744 } 745 #endif 746 747 static void set_load_weight(struct task_struct *p, bool update_load) 748 { 749 int prio = p->static_prio - MAX_RT_PRIO; 750 struct load_weight *load = &p->se.load; 751 752 /* 753 * SCHED_IDLE tasks get minimal weight: 754 */ 755 if (task_has_idle_policy(p)) { 756 load->weight = scale_load(WEIGHT_IDLEPRIO); 757 load->inv_weight = WMULT_IDLEPRIO; 758 p->se.runnable_weight = load->weight; 759 return; 760 } 761 762 /* 763 * SCHED_OTHER tasks have to update their load when changing their 764 * weight 765 */ 766 if (update_load && p->sched_class == &fair_sched_class) { 767 reweight_task(p, prio); 768 } else { 769 load->weight = scale_load(sched_prio_to_weight[prio]); 770 load->inv_weight = sched_prio_to_wmult[prio]; 771 p->se.runnable_weight = load->weight; 772 } 773 } 774 775 #ifdef CONFIG_UCLAMP_TASK 776 /* Max allowed minimum utilization */ 777 unsigned int sysctl_sched_uclamp_util_min = SCHED_CAPACITY_SCALE; 778 779 /* Max allowed maximum utilization */ 780 unsigned int sysctl_sched_uclamp_util_max = SCHED_CAPACITY_SCALE; 781 782 /* All clamps are required to be less or equal than these values */ 783 static struct uclamp_se uclamp_default[UCLAMP_CNT]; 784 785 /* Integer rounded range for each bucket */ 786 #define UCLAMP_BUCKET_DELTA DIV_ROUND_CLOSEST(SCHED_CAPACITY_SCALE, UCLAMP_BUCKETS) 787 788 #define for_each_clamp_id(clamp_id) \ 789 for ((clamp_id) = 0; (clamp_id) < UCLAMP_CNT; (clamp_id)++) 790 791 static inline unsigned int uclamp_bucket_id(unsigned int clamp_value) 792 { 793 return clamp_value / UCLAMP_BUCKET_DELTA; 794 } 795 796 static inline unsigned int uclamp_bucket_base_value(unsigned int clamp_value) 797 { 798 return UCLAMP_BUCKET_DELTA * uclamp_bucket_id(clamp_value); 799 } 800 801 static inline unsigned int uclamp_none(int clamp_id) 802 { 803 if (clamp_id == UCLAMP_MIN) 804 return 0; 805 return SCHED_CAPACITY_SCALE; 806 } 807 808 static inline void uclamp_se_set(struct uclamp_se *uc_se, 809 unsigned int value, bool user_defined) 810 { 811 uc_se->value = value; 812 uc_se->bucket_id = uclamp_bucket_id(value); 813 uc_se->user_defined = user_defined; 814 } 815 816 static inline unsigned int 817 uclamp_idle_value(struct rq *rq, unsigned int clamp_id, 818 unsigned int clamp_value) 819 { 820 /* 821 * Avoid blocked utilization pushing up the frequency when we go 822 * idle (which drops the max-clamp) by retaining the last known 823 * max-clamp. 824 */ 825 if (clamp_id == UCLAMP_MAX) { 826 rq->uclamp_flags |= UCLAMP_FLAG_IDLE; 827 return clamp_value; 828 } 829 830 return uclamp_none(UCLAMP_MIN); 831 } 832 833 static inline void uclamp_idle_reset(struct rq *rq, unsigned int clamp_id, 834 unsigned int clamp_value) 835 { 836 /* Reset max-clamp retention only on idle exit */ 837 if (!(rq->uclamp_flags & UCLAMP_FLAG_IDLE)) 838 return; 839 840 WRITE_ONCE(rq->uclamp[clamp_id].value, clamp_value); 841 } 842 843 static inline 844 unsigned int uclamp_rq_max_value(struct rq *rq, unsigned int clamp_id, 845 unsigned int clamp_value) 846 { 847 struct uclamp_bucket *bucket = rq->uclamp[clamp_id].bucket; 848 int bucket_id = UCLAMP_BUCKETS - 1; 849 850 /* 851 * Since both min and max clamps are max aggregated, find the 852 * top most bucket with tasks in. 853 */ 854 for ( ; bucket_id >= 0; bucket_id--) { 855 if (!bucket[bucket_id].tasks) 856 continue; 857 return bucket[bucket_id].value; 858 } 859 860 /* No tasks -- default clamp values */ 861 return uclamp_idle_value(rq, clamp_id, clamp_value); 862 } 863 864 /* 865 * The effective clamp bucket index of a task depends on, by increasing 866 * priority: 867 * - the task specific clamp value, when explicitly requested from userspace 868 * - the system default clamp value, defined by the sysadmin 869 */ 870 static inline struct uclamp_se 871 uclamp_eff_get(struct task_struct *p, unsigned int clamp_id) 872 { 873 struct uclamp_se uc_req = p->uclamp_req[clamp_id]; 874 struct uclamp_se uc_max = uclamp_default[clamp_id]; 875 876 /* System default restrictions always apply */ 877 if (unlikely(uc_req.value > uc_max.value)) 878 return uc_max; 879 880 return uc_req; 881 } 882 883 unsigned int uclamp_eff_value(struct task_struct *p, unsigned int clamp_id) 884 { 885 struct uclamp_se uc_eff; 886 887 /* Task currently refcounted: use back-annotated (effective) value */ 888 if (p->uclamp[clamp_id].active) 889 return p->uclamp[clamp_id].value; 890 891 uc_eff = uclamp_eff_get(p, clamp_id); 892 893 return uc_eff.value; 894 } 895 896 /* 897 * When a task is enqueued on a rq, the clamp bucket currently defined by the 898 * task's uclamp::bucket_id is refcounted on that rq. This also immediately 899 * updates the rq's clamp value if required. 900 * 901 * Tasks can have a task-specific value requested from user-space, track 902 * within each bucket the maximum value for tasks refcounted in it. 903 * This "local max aggregation" allows to track the exact "requested" value 904 * for each bucket when all its RUNNABLE tasks require the same clamp. 905 */ 906 static inline void uclamp_rq_inc_id(struct rq *rq, struct task_struct *p, 907 unsigned int clamp_id) 908 { 909 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id]; 910 struct uclamp_se *uc_se = &p->uclamp[clamp_id]; 911 struct uclamp_bucket *bucket; 912 913 lockdep_assert_held(&rq->lock); 914 915 /* Update task effective clamp */ 916 p->uclamp[clamp_id] = uclamp_eff_get(p, clamp_id); 917 918 bucket = &uc_rq->bucket[uc_se->bucket_id]; 919 bucket->tasks++; 920 uc_se->active = true; 921 922 uclamp_idle_reset(rq, clamp_id, uc_se->value); 923 924 /* 925 * Local max aggregation: rq buckets always track the max 926 * "requested" clamp value of its RUNNABLE tasks. 927 */ 928 if (bucket->tasks == 1 || uc_se->value > bucket->value) 929 bucket->value = uc_se->value; 930 931 if (uc_se->value > READ_ONCE(uc_rq->value)) 932 WRITE_ONCE(uc_rq->value, uc_se->value); 933 } 934 935 /* 936 * When a task is dequeued from a rq, the clamp bucket refcounted by the task 937 * is released. If this is the last task reference counting the rq's max 938 * active clamp value, then the rq's clamp value is updated. 939 * 940 * Both refcounted tasks and rq's cached clamp values are expected to be 941 * always valid. If it's detected they are not, as defensive programming, 942 * enforce the expected state and warn. 943 */ 944 static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p, 945 unsigned int clamp_id) 946 { 947 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id]; 948 struct uclamp_se *uc_se = &p->uclamp[clamp_id]; 949 struct uclamp_bucket *bucket; 950 unsigned int bkt_clamp; 951 unsigned int rq_clamp; 952 953 lockdep_assert_held(&rq->lock); 954 955 bucket = &uc_rq->bucket[uc_se->bucket_id]; 956 SCHED_WARN_ON(!bucket->tasks); 957 if (likely(bucket->tasks)) 958 bucket->tasks--; 959 uc_se->active = false; 960 961 /* 962 * Keep "local max aggregation" simple and accept to (possibly) 963 * overboost some RUNNABLE tasks in the same bucket. 964 * The rq clamp bucket value is reset to its base value whenever 965 * there are no more RUNNABLE tasks refcounting it. 966 */ 967 if (likely(bucket->tasks)) 968 return; 969 970 rq_clamp = READ_ONCE(uc_rq->value); 971 /* 972 * Defensive programming: this should never happen. If it happens, 973 * e.g. due to future modification, warn and fixup the expected value. 974 */ 975 SCHED_WARN_ON(bucket->value > rq_clamp); 976 if (bucket->value >= rq_clamp) { 977 bkt_clamp = uclamp_rq_max_value(rq, clamp_id, uc_se->value); 978 WRITE_ONCE(uc_rq->value, bkt_clamp); 979 } 980 } 981 982 static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) 983 { 984 unsigned int clamp_id; 985 986 if (unlikely(!p->sched_class->uclamp_enabled)) 987 return; 988 989 for_each_clamp_id(clamp_id) 990 uclamp_rq_inc_id(rq, p, clamp_id); 991 992 /* Reset clamp idle holding when there is one RUNNABLE task */ 993 if (rq->uclamp_flags & UCLAMP_FLAG_IDLE) 994 rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE; 995 } 996 997 static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) 998 { 999 unsigned int clamp_id; 1000 1001 if (unlikely(!p->sched_class->uclamp_enabled)) 1002 return; 1003 1004 for_each_clamp_id(clamp_id) 1005 uclamp_rq_dec_id(rq, p, clamp_id); 1006 } 1007 1008 int sysctl_sched_uclamp_handler(struct ctl_table *table, int write, 1009 void __user *buffer, size_t *lenp, 1010 loff_t *ppos) 1011 { 1012 int old_min, old_max; 1013 static DEFINE_MUTEX(mutex); 1014 int result; 1015 1016 mutex_lock(&mutex); 1017 old_min = sysctl_sched_uclamp_util_min; 1018 old_max = sysctl_sched_uclamp_util_max; 1019 1020 result = proc_dointvec(table, write, buffer, lenp, ppos); 1021 if (result) 1022 goto undo; 1023 if (!write) 1024 goto done; 1025 1026 if (sysctl_sched_uclamp_util_min > sysctl_sched_uclamp_util_max || 1027 sysctl_sched_uclamp_util_max > SCHED_CAPACITY_SCALE) { 1028 result = -EINVAL; 1029 goto undo; 1030 } 1031 1032 if (old_min != sysctl_sched_uclamp_util_min) { 1033 uclamp_se_set(&uclamp_default[UCLAMP_MIN], 1034 sysctl_sched_uclamp_util_min, false); 1035 } 1036 if (old_max != sysctl_sched_uclamp_util_max) { 1037 uclamp_se_set(&uclamp_default[UCLAMP_MAX], 1038 sysctl_sched_uclamp_util_max, false); 1039 } 1040 1041 /* 1042 * Updating all the RUNNABLE task is expensive, keep it simple and do 1043 * just a lazy update at each next enqueue time. 1044 */ 1045 goto done; 1046 1047 undo: 1048 sysctl_sched_uclamp_util_min = old_min; 1049 sysctl_sched_uclamp_util_max = old_max; 1050 done: 1051 mutex_unlock(&mutex); 1052 1053 return result; 1054 } 1055 1056 static int uclamp_validate(struct task_struct *p, 1057 const struct sched_attr *attr) 1058 { 1059 unsigned int lower_bound = p->uclamp_req[UCLAMP_MIN].value; 1060 unsigned int upper_bound = p->uclamp_req[UCLAMP_MAX].value; 1061 1062 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN) 1063 lower_bound = attr->sched_util_min; 1064 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX) 1065 upper_bound = attr->sched_util_max; 1066 1067 if (lower_bound > upper_bound) 1068 return -EINVAL; 1069 if (upper_bound > SCHED_CAPACITY_SCALE) 1070 return -EINVAL; 1071 1072 return 0; 1073 } 1074 1075 static void __setscheduler_uclamp(struct task_struct *p, 1076 const struct sched_attr *attr) 1077 { 1078 unsigned int clamp_id; 1079 1080 /* 1081 * On scheduling class change, reset to default clamps for tasks 1082 * without a task-specific value. 1083 */ 1084 for_each_clamp_id(clamp_id) { 1085 struct uclamp_se *uc_se = &p->uclamp_req[clamp_id]; 1086 unsigned int clamp_value = uclamp_none(clamp_id); 1087 1088 /* Keep using defined clamps across class changes */ 1089 if (uc_se->user_defined) 1090 continue; 1091 1092 /* By default, RT tasks always get 100% boost */ 1093 if (unlikely(rt_task(p) && clamp_id == UCLAMP_MIN)) 1094 clamp_value = uclamp_none(UCLAMP_MAX); 1095 1096 uclamp_se_set(uc_se, clamp_value, false); 1097 } 1098 1099 if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP))) 1100 return; 1101 1102 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN) { 1103 uclamp_se_set(&p->uclamp_req[UCLAMP_MIN], 1104 attr->sched_util_min, true); 1105 } 1106 1107 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX) { 1108 uclamp_se_set(&p->uclamp_req[UCLAMP_MAX], 1109 attr->sched_util_max, true); 1110 } 1111 } 1112 1113 static void uclamp_fork(struct task_struct *p) 1114 { 1115 unsigned int clamp_id; 1116 1117 for_each_clamp_id(clamp_id) 1118 p->uclamp[clamp_id].active = false; 1119 1120 if (likely(!p->sched_reset_on_fork)) 1121 return; 1122 1123 for_each_clamp_id(clamp_id) { 1124 unsigned int clamp_value = uclamp_none(clamp_id); 1125 1126 /* By default, RT tasks always get 100% boost */ 1127 if (unlikely(rt_task(p) && clamp_id == UCLAMP_MIN)) 1128 clamp_value = uclamp_none(UCLAMP_MAX); 1129 1130 uclamp_se_set(&p->uclamp_req[clamp_id], clamp_value, false); 1131 } 1132 } 1133 1134 static void __init init_uclamp(void) 1135 { 1136 struct uclamp_se uc_max = {}; 1137 unsigned int clamp_id; 1138 int cpu; 1139 1140 for_each_possible_cpu(cpu) { 1141 memset(&cpu_rq(cpu)->uclamp, 0, sizeof(struct uclamp_rq)); 1142 cpu_rq(cpu)->uclamp_flags = 0; 1143 } 1144 1145 for_each_clamp_id(clamp_id) { 1146 uclamp_se_set(&init_task.uclamp_req[clamp_id], 1147 uclamp_none(clamp_id), false); 1148 } 1149 1150 /* System defaults allow max clamp values for both indexes */ 1151 uclamp_se_set(&uc_max, uclamp_none(UCLAMP_MAX), false); 1152 for_each_clamp_id(clamp_id) 1153 uclamp_default[clamp_id] = uc_max; 1154 } 1155 1156 #else /* CONFIG_UCLAMP_TASK */ 1157 static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) { } 1158 static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) { } 1159 static inline int uclamp_validate(struct task_struct *p, 1160 const struct sched_attr *attr) 1161 { 1162 return -EOPNOTSUPP; 1163 } 1164 static void __setscheduler_uclamp(struct task_struct *p, 1165 const struct sched_attr *attr) { } 1166 static inline void uclamp_fork(struct task_struct *p) { } 1167 static inline void init_uclamp(void) { } 1168 #endif /* CONFIG_UCLAMP_TASK */ 1169 1170 static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags) 1171 { 1172 if (!(flags & ENQUEUE_NOCLOCK)) 1173 update_rq_clock(rq); 1174 1175 if (!(flags & ENQUEUE_RESTORE)) { 1176 sched_info_queued(rq, p); 1177 psi_enqueue(p, flags & ENQUEUE_WAKEUP); 1178 } 1179 1180 uclamp_rq_inc(rq, p); 1181 p->sched_class->enqueue_task(rq, p, flags); 1182 } 1183 1184 static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags) 1185 { 1186 if (!(flags & DEQUEUE_NOCLOCK)) 1187 update_rq_clock(rq); 1188 1189 if (!(flags & DEQUEUE_SAVE)) { 1190 sched_info_dequeued(rq, p); 1191 psi_dequeue(p, flags & DEQUEUE_SLEEP); 1192 } 1193 1194 uclamp_rq_dec(rq, p); 1195 p->sched_class->dequeue_task(rq, p, flags); 1196 } 1197 1198 void activate_task(struct rq *rq, struct task_struct *p, int flags) 1199 { 1200 if (task_contributes_to_load(p)) 1201 rq->nr_uninterruptible--; 1202 1203 enqueue_task(rq, p, flags); 1204 1205 p->on_rq = TASK_ON_RQ_QUEUED; 1206 } 1207 1208 void deactivate_task(struct rq *rq, struct task_struct *p, int flags) 1209 { 1210 p->on_rq = (flags & DEQUEUE_SLEEP) ? 0 : TASK_ON_RQ_MIGRATING; 1211 1212 if (task_contributes_to_load(p)) 1213 rq->nr_uninterruptible++; 1214 1215 dequeue_task(rq, p, flags); 1216 } 1217 1218 /* 1219 * __normal_prio - return the priority that is based on the static prio 1220 */ 1221 static inline int __normal_prio(struct task_struct *p) 1222 { 1223 return p->static_prio; 1224 } 1225 1226 /* 1227 * Calculate the expected normal priority: i.e. priority 1228 * without taking RT-inheritance into account. Might be 1229 * boosted by interactivity modifiers. Changes upon fork, 1230 * setprio syscalls, and whenever the interactivity 1231 * estimator recalculates. 1232 */ 1233 static inline int normal_prio(struct task_struct *p) 1234 { 1235 int prio; 1236 1237 if (task_has_dl_policy(p)) 1238 prio = MAX_DL_PRIO-1; 1239 else if (task_has_rt_policy(p)) 1240 prio = MAX_RT_PRIO-1 - p->rt_priority; 1241 else 1242 prio = __normal_prio(p); 1243 return prio; 1244 } 1245 1246 /* 1247 * Calculate the current priority, i.e. the priority 1248 * taken into account by the scheduler. This value might 1249 * be boosted by RT tasks, or might be boosted by 1250 * interactivity modifiers. Will be RT if the task got 1251 * RT-boosted. If not then it returns p->normal_prio. 1252 */ 1253 static int effective_prio(struct task_struct *p) 1254 { 1255 p->normal_prio = normal_prio(p); 1256 /* 1257 * If we are RT tasks or we were boosted to RT priority, 1258 * keep the priority unchanged. Otherwise, update priority 1259 * to the normal priority: 1260 */ 1261 if (!rt_prio(p->prio)) 1262 return p->normal_prio; 1263 return p->prio; 1264 } 1265 1266 /** 1267 * task_curr - is this task currently executing on a CPU? 1268 * @p: the task in question. 1269 * 1270 * Return: 1 if the task is currently executing. 0 otherwise. 1271 */ 1272 inline int task_curr(const struct task_struct *p) 1273 { 1274 return cpu_curr(task_cpu(p)) == p; 1275 } 1276 1277 /* 1278 * switched_from, switched_to and prio_changed must _NOT_ drop rq->lock, 1279 * use the balance_callback list if you want balancing. 1280 * 1281 * this means any call to check_class_changed() must be followed by a call to 1282 * balance_callback(). 1283 */ 1284 static inline void check_class_changed(struct rq *rq, struct task_struct *p, 1285 const struct sched_class *prev_class, 1286 int oldprio) 1287 { 1288 if (prev_class != p->sched_class) { 1289 if (prev_class->switched_from) 1290 prev_class->switched_from(rq, p); 1291 1292 p->sched_class->switched_to(rq, p); 1293 } else if (oldprio != p->prio || dl_task(p)) 1294 p->sched_class->prio_changed(rq, p, oldprio); 1295 } 1296 1297 void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) 1298 { 1299 const struct sched_class *class; 1300 1301 if (p->sched_class == rq->curr->sched_class) { 1302 rq->curr->sched_class->check_preempt_curr(rq, p, flags); 1303 } else { 1304 for_each_class(class) { 1305 if (class == rq->curr->sched_class) 1306 break; 1307 if (class == p->sched_class) { 1308 resched_curr(rq); 1309 break; 1310 } 1311 } 1312 } 1313 1314 /* 1315 * A queue event has occurred, and we're going to schedule. In 1316 * this case, we can save a useless back to back clock update. 1317 */ 1318 if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr)) 1319 rq_clock_skip_update(rq); 1320 } 1321 1322 #ifdef CONFIG_SMP 1323 1324 static inline bool is_per_cpu_kthread(struct task_struct *p) 1325 { 1326 if (!(p->flags & PF_KTHREAD)) 1327 return false; 1328 1329 if (p->nr_cpus_allowed != 1) 1330 return false; 1331 1332 return true; 1333 } 1334 1335 /* 1336 * Per-CPU kthreads are allowed to run on !active && online CPUs, see 1337 * __set_cpus_allowed_ptr() and select_fallback_rq(). 1338 */ 1339 static inline bool is_cpu_allowed(struct task_struct *p, int cpu) 1340 { 1341 if (!cpumask_test_cpu(cpu, p->cpus_ptr)) 1342 return false; 1343 1344 if (is_per_cpu_kthread(p)) 1345 return cpu_online(cpu); 1346 1347 return cpu_active(cpu); 1348 } 1349 1350 /* 1351 * This is how migration works: 1352 * 1353 * 1) we invoke migration_cpu_stop() on the target CPU using 1354 * stop_one_cpu(). 1355 * 2) stopper starts to run (implicitly forcing the migrated thread 1356 * off the CPU) 1357 * 3) it checks whether the migrated task is still in the wrong runqueue. 1358 * 4) if it's in the wrong runqueue then the migration thread removes 1359 * it and puts it into the right queue. 1360 * 5) stopper completes and stop_one_cpu() returns and the migration 1361 * is done. 1362 */ 1363 1364 /* 1365 * move_queued_task - move a queued task to new rq. 1366 * 1367 * Returns (locked) new rq. Old rq's lock is released. 1368 */ 1369 static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf, 1370 struct task_struct *p, int new_cpu) 1371 { 1372 lockdep_assert_held(&rq->lock); 1373 1374 WRITE_ONCE(p->on_rq, TASK_ON_RQ_MIGRATING); 1375 dequeue_task(rq, p, DEQUEUE_NOCLOCK); 1376 set_task_cpu(p, new_cpu); 1377 rq_unlock(rq, rf); 1378 1379 rq = cpu_rq(new_cpu); 1380 1381 rq_lock(rq, rf); 1382 BUG_ON(task_cpu(p) != new_cpu); 1383 enqueue_task(rq, p, 0); 1384 p->on_rq = TASK_ON_RQ_QUEUED; 1385 check_preempt_curr(rq, p, 0); 1386 1387 return rq; 1388 } 1389 1390 struct migration_arg { 1391 struct task_struct *task; 1392 int dest_cpu; 1393 }; 1394 1395 /* 1396 * Move (not current) task off this CPU, onto the destination CPU. We're doing 1397 * this because either it can't run here any more (set_cpus_allowed() 1398 * away from this CPU, or CPU going down), or because we're 1399 * attempting to rebalance this task on exec (sched_exec). 1400 * 1401 * So we race with normal scheduler movements, but that's OK, as long 1402 * as the task is no longer on this CPU. 1403 */ 1404 static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf, 1405 struct task_struct *p, int dest_cpu) 1406 { 1407 /* Affinity changed (again). */ 1408 if (!is_cpu_allowed(p, dest_cpu)) 1409 return rq; 1410 1411 update_rq_clock(rq); 1412 rq = move_queued_task(rq, rf, p, dest_cpu); 1413 1414 return rq; 1415 } 1416 1417 /* 1418 * migration_cpu_stop - this will be executed by a highprio stopper thread 1419 * and performs thread migration by bumping thread off CPU then 1420 * 'pushing' onto another runqueue. 1421 */ 1422 static int migration_cpu_stop(void *data) 1423 { 1424 struct migration_arg *arg = data; 1425 struct task_struct *p = arg->task; 1426 struct rq *rq = this_rq(); 1427 struct rq_flags rf; 1428 1429 /* 1430 * The original target CPU might have gone down and we might 1431 * be on another CPU but it doesn't matter. 1432 */ 1433 local_irq_disable(); 1434 /* 1435 * We need to explicitly wake pending tasks before running 1436 * __migrate_task() such that we will not miss enforcing cpus_ptr 1437 * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test. 1438 */ 1439 sched_ttwu_pending(); 1440 1441 raw_spin_lock(&p->pi_lock); 1442 rq_lock(rq, &rf); 1443 /* 1444 * If task_rq(p) != rq, it cannot be migrated here, because we're 1445 * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because 1446 * we're holding p->pi_lock. 1447 */ 1448 if (task_rq(p) == rq) { 1449 if (task_on_rq_queued(p)) 1450 rq = __migrate_task(rq, &rf, p, arg->dest_cpu); 1451 else 1452 p->wake_cpu = arg->dest_cpu; 1453 } 1454 rq_unlock(rq, &rf); 1455 raw_spin_unlock(&p->pi_lock); 1456 1457 local_irq_enable(); 1458 return 0; 1459 } 1460 1461 /* 1462 * sched_class::set_cpus_allowed must do the below, but is not required to 1463 * actually call this function. 1464 */ 1465 void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask) 1466 { 1467 cpumask_copy(&p->cpus_mask, new_mask); 1468 p->nr_cpus_allowed = cpumask_weight(new_mask); 1469 } 1470 1471 void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) 1472 { 1473 struct rq *rq = task_rq(p); 1474 bool queued, running; 1475 1476 lockdep_assert_held(&p->pi_lock); 1477 1478 queued = task_on_rq_queued(p); 1479 running = task_current(rq, p); 1480 1481 if (queued) { 1482 /* 1483 * Because __kthread_bind() calls this on blocked tasks without 1484 * holding rq->lock. 1485 */ 1486 lockdep_assert_held(&rq->lock); 1487 dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK); 1488 } 1489 if (running) 1490 put_prev_task(rq, p); 1491 1492 p->sched_class->set_cpus_allowed(p, new_mask); 1493 1494 if (queued) 1495 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); 1496 if (running) 1497 set_curr_task(rq, p); 1498 } 1499 1500 /* 1501 * Change a given task's CPU affinity. Migrate the thread to a 1502 * proper CPU and schedule it away if the CPU it's executing on 1503 * is removed from the allowed bitmask. 1504 * 1505 * NOTE: the caller must have a valid reference to the task, the 1506 * task must not exit() & deallocate itself prematurely. The 1507 * call is not atomic; no spinlocks may be held. 1508 */ 1509 static int __set_cpus_allowed_ptr(struct task_struct *p, 1510 const struct cpumask *new_mask, bool check) 1511 { 1512 const struct cpumask *cpu_valid_mask = cpu_active_mask; 1513 unsigned int dest_cpu; 1514 struct rq_flags rf; 1515 struct rq *rq; 1516 int ret = 0; 1517 1518 rq = task_rq_lock(p, &rf); 1519 update_rq_clock(rq); 1520 1521 if (p->flags & PF_KTHREAD) { 1522 /* 1523 * Kernel threads are allowed on online && !active CPUs 1524 */ 1525 cpu_valid_mask = cpu_online_mask; 1526 } 1527 1528 /* 1529 * Must re-check here, to close a race against __kthread_bind(), 1530 * sched_setaffinity() is not guaranteed to observe the flag. 1531 */ 1532 if (check && (p->flags & PF_NO_SETAFFINITY)) { 1533 ret = -EINVAL; 1534 goto out; 1535 } 1536 1537 if (cpumask_equal(p->cpus_ptr, new_mask)) 1538 goto out; 1539 1540 if (!cpumask_intersects(new_mask, cpu_valid_mask)) { 1541 ret = -EINVAL; 1542 goto out; 1543 } 1544 1545 do_set_cpus_allowed(p, new_mask); 1546 1547 if (p->flags & PF_KTHREAD) { 1548 /* 1549 * For kernel threads that do indeed end up on online && 1550 * !active we want to ensure they are strict per-CPU threads. 1551 */ 1552 WARN_ON(cpumask_intersects(new_mask, cpu_online_mask) && 1553 !cpumask_intersects(new_mask, cpu_active_mask) && 1554 p->nr_cpus_allowed != 1); 1555 } 1556 1557 /* Can the task run on the task's current CPU? If so, we're done */ 1558 if (cpumask_test_cpu(task_cpu(p), new_mask)) 1559 goto out; 1560 1561 dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask); 1562 if (task_running(rq, p) || p->state == TASK_WAKING) { 1563 struct migration_arg arg = { p, dest_cpu }; 1564 /* Need help from migration thread: drop lock and wait. */ 1565 task_rq_unlock(rq, p, &rf); 1566 stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg); 1567 return 0; 1568 } else if (task_on_rq_queued(p)) { 1569 /* 1570 * OK, since we're going to drop the lock immediately 1571 * afterwards anyway. 1572 */ 1573 rq = move_queued_task(rq, &rf, p, dest_cpu); 1574 } 1575 out: 1576 task_rq_unlock(rq, p, &rf); 1577 1578 return ret; 1579 } 1580 1581 int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) 1582 { 1583 return __set_cpus_allowed_ptr(p, new_mask, false); 1584 } 1585 EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr); 1586 1587 void set_task_cpu(struct task_struct *p, unsigned int new_cpu) 1588 { 1589 #ifdef CONFIG_SCHED_DEBUG 1590 /* 1591 * We should never call set_task_cpu() on a blocked task, 1592 * ttwu() will sort out the placement. 1593 */ 1594 WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING && 1595 !p->on_rq); 1596 1597 /* 1598 * Migrating fair class task must have p->on_rq = TASK_ON_RQ_MIGRATING, 1599 * because schedstat_wait_{start,end} rebase migrating task's wait_start 1600 * time relying on p->on_rq. 1601 */ 1602 WARN_ON_ONCE(p->state == TASK_RUNNING && 1603 p->sched_class == &fair_sched_class && 1604 (p->on_rq && !task_on_rq_migrating(p))); 1605 1606 #ifdef CONFIG_LOCKDEP 1607 /* 1608 * The caller should hold either p->pi_lock or rq->lock, when changing 1609 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks. 1610 * 1611 * sched_move_task() holds both and thus holding either pins the cgroup, 1612 * see task_group(). 1613 * 1614 * Furthermore, all task_rq users should acquire both locks, see 1615 * task_rq_lock(). 1616 */ 1617 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) || 1618 lockdep_is_held(&task_rq(p)->lock))); 1619 #endif 1620 /* 1621 * Clearly, migrating tasks to offline CPUs is a fairly daft thing. 1622 */ 1623 WARN_ON_ONCE(!cpu_online(new_cpu)); 1624 #endif 1625 1626 trace_sched_migrate_task(p, new_cpu); 1627 1628 if (task_cpu(p) != new_cpu) { 1629 if (p->sched_class->migrate_task_rq) 1630 p->sched_class->migrate_task_rq(p, new_cpu); 1631 p->se.nr_migrations++; 1632 rseq_migrate(p); 1633 perf_event_task_migrate(p); 1634 } 1635 1636 __set_task_cpu(p, new_cpu); 1637 } 1638 1639 #ifdef CONFIG_NUMA_BALANCING 1640 static void __migrate_swap_task(struct task_struct *p, int cpu) 1641 { 1642 if (task_on_rq_queued(p)) { 1643 struct rq *src_rq, *dst_rq; 1644 struct rq_flags srf, drf; 1645 1646 src_rq = task_rq(p); 1647 dst_rq = cpu_rq(cpu); 1648 1649 rq_pin_lock(src_rq, &srf); 1650 rq_pin_lock(dst_rq, &drf); 1651 1652 deactivate_task(src_rq, p, 0); 1653 set_task_cpu(p, cpu); 1654 activate_task(dst_rq, p, 0); 1655 check_preempt_curr(dst_rq, p, 0); 1656 1657 rq_unpin_lock(dst_rq, &drf); 1658 rq_unpin_lock(src_rq, &srf); 1659 1660 } else { 1661 /* 1662 * Task isn't running anymore; make it appear like we migrated 1663 * it before it went to sleep. This means on wakeup we make the 1664 * previous CPU our target instead of where it really is. 1665 */ 1666 p->wake_cpu = cpu; 1667 } 1668 } 1669 1670 struct migration_swap_arg { 1671 struct task_struct *src_task, *dst_task; 1672 int src_cpu, dst_cpu; 1673 }; 1674 1675 static int migrate_swap_stop(void *data) 1676 { 1677 struct migration_swap_arg *arg = data; 1678 struct rq *src_rq, *dst_rq; 1679 int ret = -EAGAIN; 1680 1681 if (!cpu_active(arg->src_cpu) || !cpu_active(arg->dst_cpu)) 1682 return -EAGAIN; 1683 1684 src_rq = cpu_rq(arg->src_cpu); 1685 dst_rq = cpu_rq(arg->dst_cpu); 1686 1687 double_raw_lock(&arg->src_task->pi_lock, 1688 &arg->dst_task->pi_lock); 1689 double_rq_lock(src_rq, dst_rq); 1690 1691 if (task_cpu(arg->dst_task) != arg->dst_cpu) 1692 goto unlock; 1693 1694 if (task_cpu(arg->src_task) != arg->src_cpu) 1695 goto unlock; 1696 1697 if (!cpumask_test_cpu(arg->dst_cpu, arg->src_task->cpus_ptr)) 1698 goto unlock; 1699 1700 if (!cpumask_test_cpu(arg->src_cpu, arg->dst_task->cpus_ptr)) 1701 goto unlock; 1702 1703 __migrate_swap_task(arg->src_task, arg->dst_cpu); 1704 __migrate_swap_task(arg->dst_task, arg->src_cpu); 1705 1706 ret = 0; 1707 1708 unlock: 1709 double_rq_unlock(src_rq, dst_rq); 1710 raw_spin_unlock(&arg->dst_task->pi_lock); 1711 raw_spin_unlock(&arg->src_task->pi_lock); 1712 1713 return ret; 1714 } 1715 1716 /* 1717 * Cross migrate two tasks 1718 */ 1719 int migrate_swap(struct task_struct *cur, struct task_struct *p, 1720 int target_cpu, int curr_cpu) 1721 { 1722 struct migration_swap_arg arg; 1723 int ret = -EINVAL; 1724 1725 arg = (struct migration_swap_arg){ 1726 .src_task = cur, 1727 .src_cpu = curr_cpu, 1728 .dst_task = p, 1729 .dst_cpu = target_cpu, 1730 }; 1731 1732 if (arg.src_cpu == arg.dst_cpu) 1733 goto out; 1734 1735 /* 1736 * These three tests are all lockless; this is OK since all of them 1737 * will be re-checked with proper locks held further down the line. 1738 */ 1739 if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu)) 1740 goto out; 1741 1742 if (!cpumask_test_cpu(arg.dst_cpu, arg.src_task->cpus_ptr)) 1743 goto out; 1744 1745 if (!cpumask_test_cpu(arg.src_cpu, arg.dst_task->cpus_ptr)) 1746 goto out; 1747 1748 trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu); 1749 ret = stop_two_cpus(arg.dst_cpu, arg.src_cpu, migrate_swap_stop, &arg); 1750 1751 out: 1752 return ret; 1753 } 1754 #endif /* CONFIG_NUMA_BALANCING */ 1755 1756 /* 1757 * wait_task_inactive - wait for a thread to unschedule. 1758 * 1759 * If @match_state is nonzero, it's the @p->state value just checked and 1760 * not expected to change. If it changes, i.e. @p might have woken up, 1761 * then return zero. When we succeed in waiting for @p to be off its CPU, 1762 * we return a positive number (its total switch count). If a second call 1763 * a short while later returns the same number, the caller can be sure that 1764 * @p has remained unscheduled the whole time. 1765 * 1766 * The caller must ensure that the task *will* unschedule sometime soon, 1767 * else this function might spin for a *long* time. This function can't 1768 * be called with interrupts off, or it may introduce deadlock with 1769 * smp_call_function() if an IPI is sent by the same process we are 1770 * waiting to become inactive. 1771 */ 1772 unsigned long wait_task_inactive(struct task_struct *p, long match_state) 1773 { 1774 int running, queued; 1775 struct rq_flags rf; 1776 unsigned long ncsw; 1777 struct rq *rq; 1778 1779 for (;;) { 1780 /* 1781 * We do the initial early heuristics without holding 1782 * any task-queue locks at all. We'll only try to get 1783 * the runqueue lock when things look like they will 1784 * work out! 1785 */ 1786 rq = task_rq(p); 1787 1788 /* 1789 * If the task is actively running on another CPU 1790 * still, just relax and busy-wait without holding 1791 * any locks. 1792 * 1793 * NOTE! Since we don't hold any locks, it's not 1794 * even sure that "rq" stays as the right runqueue! 1795 * But we don't care, since "task_running()" will 1796 * return false if the runqueue has changed and p 1797 * is actually now running somewhere else! 1798 */ 1799 while (task_running(rq, p)) { 1800 if (match_state && unlikely(p->state != match_state)) 1801 return 0; 1802 cpu_relax(); 1803 } 1804 1805 /* 1806 * Ok, time to look more closely! We need the rq 1807 * lock now, to be *sure*. If we're wrong, we'll 1808 * just go back and repeat. 1809 */ 1810 rq = task_rq_lock(p, &rf); 1811 trace_sched_wait_task(p); 1812 running = task_running(rq, p); 1813 queued = task_on_rq_queued(p); 1814 ncsw = 0; 1815 if (!match_state || p->state == match_state) 1816 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ 1817 task_rq_unlock(rq, p, &rf); 1818 1819 /* 1820 * If it changed from the expected state, bail out now. 1821 */ 1822 if (unlikely(!ncsw)) 1823 break; 1824 1825 /* 1826 * Was it really running after all now that we 1827 * checked with the proper locks actually held? 1828 * 1829 * Oops. Go back and try again.. 1830 */ 1831 if (unlikely(running)) { 1832 cpu_relax(); 1833 continue; 1834 } 1835 1836 /* 1837 * It's not enough that it's not actively running, 1838 * it must be off the runqueue _entirely_, and not 1839 * preempted! 1840 * 1841 * So if it was still runnable (but just not actively 1842 * running right now), it's preempted, and we should 1843 * yield - it could be a while. 1844 */ 1845 if (unlikely(queued)) { 1846 ktime_t to = NSEC_PER_SEC / HZ; 1847 1848 set_current_state(TASK_UNINTERRUPTIBLE); 1849 schedule_hrtimeout(&to, HRTIMER_MODE_REL); 1850 continue; 1851 } 1852 1853 /* 1854 * Ahh, all good. It wasn't running, and it wasn't 1855 * runnable, which means that it will never become 1856 * running in the future either. We're all done! 1857 */ 1858 break; 1859 } 1860 1861 return ncsw; 1862 } 1863 1864 /*** 1865 * kick_process - kick a running thread to enter/exit the kernel 1866 * @p: the to-be-kicked thread 1867 * 1868 * Cause a process which is running on another CPU to enter 1869 * kernel-mode, without any delay. (to get signals handled.) 1870 * 1871 * NOTE: this function doesn't have to take the runqueue lock, 1872 * because all it wants to ensure is that the remote task enters 1873 * the kernel. If the IPI races and the task has been migrated 1874 * to another CPU then no harm is done and the purpose has been 1875 * achieved as well. 1876 */ 1877 void kick_process(struct task_struct *p) 1878 { 1879 int cpu; 1880 1881 preempt_disable(); 1882 cpu = task_cpu(p); 1883 if ((cpu != smp_processor_id()) && task_curr(p)) 1884 smp_send_reschedule(cpu); 1885 preempt_enable(); 1886 } 1887 EXPORT_SYMBOL_GPL(kick_process); 1888 1889 /* 1890 * ->cpus_ptr is protected by both rq->lock and p->pi_lock 1891 * 1892 * A few notes on cpu_active vs cpu_online: 1893 * 1894 * - cpu_active must be a subset of cpu_online 1895 * 1896 * - on CPU-up we allow per-CPU kthreads on the online && !active CPU, 1897 * see __set_cpus_allowed_ptr(). At this point the newly online 1898 * CPU isn't yet part of the sched domains, and balancing will not 1899 * see it. 1900 * 1901 * - on CPU-down we clear cpu_active() to mask the sched domains and 1902 * avoid the load balancer to place new tasks on the to be removed 1903 * CPU. Existing tasks will remain running there and will be taken 1904 * off. 1905 * 1906 * This means that fallback selection must not select !active CPUs. 1907 * And can assume that any active CPU must be online. Conversely 1908 * select_task_rq() below may allow selection of !active CPUs in order 1909 * to satisfy the above rules. 1910 */ 1911 static int select_fallback_rq(int cpu, struct task_struct *p) 1912 { 1913 int nid = cpu_to_node(cpu); 1914 const struct cpumask *nodemask = NULL; 1915 enum { cpuset, possible, fail } state = cpuset; 1916 int dest_cpu; 1917 1918 /* 1919 * If the node that the CPU is on has been offlined, cpu_to_node() 1920 * will return -1. There is no CPU on the node, and we should 1921 * select the CPU on the other node. 1922 */ 1923 if (nid != -1) { 1924 nodemask = cpumask_of_node(nid); 1925 1926 /* Look for allowed, online CPU in same node. */ 1927 for_each_cpu(dest_cpu, nodemask) { 1928 if (!cpu_active(dest_cpu)) 1929 continue; 1930 if (cpumask_test_cpu(dest_cpu, p->cpus_ptr)) 1931 return dest_cpu; 1932 } 1933 } 1934 1935 for (;;) { 1936 /* Any allowed, online CPU? */ 1937 for_each_cpu(dest_cpu, p->cpus_ptr) { 1938 if (!is_cpu_allowed(p, dest_cpu)) 1939 continue; 1940 1941 goto out; 1942 } 1943 1944 /* No more Mr. Nice Guy. */ 1945 switch (state) { 1946 case cpuset: 1947 if (IS_ENABLED(CONFIG_CPUSETS)) { 1948 cpuset_cpus_allowed_fallback(p); 1949 state = possible; 1950 break; 1951 } 1952 /* Fall-through */ 1953 case possible: 1954 do_set_cpus_allowed(p, cpu_possible_mask); 1955 state = fail; 1956 break; 1957 1958 case fail: 1959 BUG(); 1960 break; 1961 } 1962 } 1963 1964 out: 1965 if (state != cpuset) { 1966 /* 1967 * Don't tell them about moving exiting tasks or 1968 * kernel threads (both mm NULL), since they never 1969 * leave kernel. 1970 */ 1971 if (p->mm && printk_ratelimit()) { 1972 printk_deferred("process %d (%s) no longer affine to cpu%d\n", 1973 task_pid_nr(p), p->comm, cpu); 1974 } 1975 } 1976 1977 return dest_cpu; 1978 } 1979 1980 /* 1981 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_ptr is stable. 1982 */ 1983 static inline 1984 int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags) 1985 { 1986 lockdep_assert_held(&p->pi_lock); 1987 1988 if (p->nr_cpus_allowed > 1) 1989 cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags); 1990 else 1991 cpu = cpumask_any(p->cpus_ptr); 1992 1993 /* 1994 * In order not to call set_task_cpu() on a blocking task we need 1995 * to rely on ttwu() to place the task on a valid ->cpus_ptr 1996 * CPU. 1997 * 1998 * Since this is common to all placement strategies, this lives here. 1999 * 2000 * [ this allows ->select_task() to simply return task_cpu(p) and 2001 * not worry about this generic constraint ] 2002 */ 2003 if (unlikely(!is_cpu_allowed(p, cpu))) 2004 cpu = select_fallback_rq(task_cpu(p), p); 2005 2006 return cpu; 2007 } 2008 2009 static void update_avg(u64 *avg, u64 sample) 2010 { 2011 s64 diff = sample - *avg; 2012 *avg += diff >> 3; 2013 } 2014 2015 void sched_set_stop_task(int cpu, struct task_struct *stop) 2016 { 2017 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; 2018 struct task_struct *old_stop = cpu_rq(cpu)->stop; 2019 2020 if (stop) { 2021 /* 2022 * Make it appear like a SCHED_FIFO task, its something 2023 * userspace knows about and won't get confused about. 2024 * 2025 * Also, it will make PI more or less work without too 2026 * much confusion -- but then, stop work should not 2027 * rely on PI working anyway. 2028 */ 2029 sched_setscheduler_nocheck(stop, SCHED_FIFO, ¶m); 2030 2031 stop->sched_class = &stop_sched_class; 2032 } 2033 2034 cpu_rq(cpu)->stop = stop; 2035 2036 if (old_stop) { 2037 /* 2038 * Reset it back to a normal scheduling class so that 2039 * it can die in pieces. 2040 */ 2041 old_stop->sched_class = &rt_sched_class; 2042 } 2043 } 2044 2045 #else 2046 2047 static inline int __set_cpus_allowed_ptr(struct task_struct *p, 2048 const struct cpumask *new_mask, bool check) 2049 { 2050 return set_cpus_allowed_ptr(p, new_mask); 2051 } 2052 2053 #endif /* CONFIG_SMP */ 2054 2055 static void 2056 ttwu_stat(struct task_struct *p, int cpu, int wake_flags) 2057 { 2058 struct rq *rq; 2059 2060 if (!schedstat_enabled()) 2061 return; 2062 2063 rq = this_rq(); 2064 2065 #ifdef CONFIG_SMP 2066 if (cpu == rq->cpu) { 2067 __schedstat_inc(rq->ttwu_local); 2068 __schedstat_inc(p->se.statistics.nr_wakeups_local); 2069 } else { 2070 struct sched_domain *sd; 2071 2072 __schedstat_inc(p->se.statistics.nr_wakeups_remote); 2073 rcu_read_lock(); 2074 for_each_domain(rq->cpu, sd) { 2075 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) { 2076 __schedstat_inc(sd->ttwu_wake_remote); 2077 break; 2078 } 2079 } 2080 rcu_read_unlock(); 2081 } 2082 2083 if (wake_flags & WF_MIGRATED) 2084 __schedstat_inc(p->se.statistics.nr_wakeups_migrate); 2085 #endif /* CONFIG_SMP */ 2086 2087 __schedstat_inc(rq->ttwu_count); 2088 __schedstat_inc(p->se.statistics.nr_wakeups); 2089 2090 if (wake_flags & WF_SYNC) 2091 __schedstat_inc(p->se.statistics.nr_wakeups_sync); 2092 } 2093 2094 /* 2095 * Mark the task runnable and perform wakeup-preemption. 2096 */ 2097 static void ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags, 2098 struct rq_flags *rf) 2099 { 2100 check_preempt_curr(rq, p, wake_flags); 2101 p->state = TASK_RUNNING; 2102 trace_sched_wakeup(p); 2103 2104 #ifdef CONFIG_SMP 2105 if (p->sched_class->task_woken) { 2106 /* 2107 * Our task @p is fully woken up and running; so its safe to 2108 * drop the rq->lock, hereafter rq is only used for statistics. 2109 */ 2110 rq_unpin_lock(rq, rf); 2111 p->sched_class->task_woken(rq, p); 2112 rq_repin_lock(rq, rf); 2113 } 2114 2115 if (rq->idle_stamp) { 2116 u64 delta = rq_clock(rq) - rq->idle_stamp; 2117 u64 max = 2*rq->max_idle_balance_cost; 2118 2119 update_avg(&rq->avg_idle, delta); 2120 2121 if (rq->avg_idle > max) 2122 rq->avg_idle = max; 2123 2124 rq->idle_stamp = 0; 2125 } 2126 #endif 2127 } 2128 2129 static void 2130 ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags, 2131 struct rq_flags *rf) 2132 { 2133 int en_flags = ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK; 2134 2135 lockdep_assert_held(&rq->lock); 2136 2137 #ifdef CONFIG_SMP 2138 if (p->sched_contributes_to_load) 2139 rq->nr_uninterruptible--; 2140 2141 if (wake_flags & WF_MIGRATED) 2142 en_flags |= ENQUEUE_MIGRATED; 2143 #endif 2144 2145 activate_task(rq, p, en_flags); 2146 ttwu_do_wakeup(rq, p, wake_flags, rf); 2147 } 2148 2149 /* 2150 * Called in case the task @p isn't fully descheduled from its runqueue, 2151 * in this case we must do a remote wakeup. Its a 'light' wakeup though, 2152 * since all we need to do is flip p->state to TASK_RUNNING, since 2153 * the task is still ->on_rq. 2154 */ 2155 static int ttwu_remote(struct task_struct *p, int wake_flags) 2156 { 2157 struct rq_flags rf; 2158 struct rq *rq; 2159 int ret = 0; 2160 2161 rq = __task_rq_lock(p, &rf); 2162 if (task_on_rq_queued(p)) { 2163 /* check_preempt_curr() may use rq clock */ 2164 update_rq_clock(rq); 2165 ttwu_do_wakeup(rq, p, wake_flags, &rf); 2166 ret = 1; 2167 } 2168 __task_rq_unlock(rq, &rf); 2169 2170 return ret; 2171 } 2172 2173 #ifdef CONFIG_SMP 2174 void sched_ttwu_pending(void) 2175 { 2176 struct rq *rq = this_rq(); 2177 struct llist_node *llist = llist_del_all(&rq->wake_list); 2178 struct task_struct *p, *t; 2179 struct rq_flags rf; 2180 2181 if (!llist) 2182 return; 2183 2184 rq_lock_irqsave(rq, &rf); 2185 update_rq_clock(rq); 2186 2187 llist_for_each_entry_safe(p, t, llist, wake_entry) 2188 ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0, &rf); 2189 2190 rq_unlock_irqrestore(rq, &rf); 2191 } 2192 2193 void scheduler_ipi(void) 2194 { 2195 /* 2196 * Fold TIF_NEED_RESCHED into the preempt_count; anybody setting 2197 * TIF_NEED_RESCHED remotely (for the first time) will also send 2198 * this IPI. 2199 */ 2200 preempt_fold_need_resched(); 2201 2202 if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick()) 2203 return; 2204 2205 /* 2206 * Not all reschedule IPI handlers call irq_enter/irq_exit, since 2207 * traditionally all their work was done from the interrupt return 2208 * path. Now that we actually do some work, we need to make sure 2209 * we do call them. 2210 * 2211 * Some archs already do call them, luckily irq_enter/exit nest 2212 * properly. 2213 * 2214 * Arguably we should visit all archs and update all handlers, 2215 * however a fair share of IPIs are still resched only so this would 2216 * somewhat pessimize the simple resched case. 2217 */ 2218 irq_enter(); 2219 sched_ttwu_pending(); 2220 2221 /* 2222 * Check if someone kicked us for doing the nohz idle load balance. 2223 */ 2224 if (unlikely(got_nohz_idle_kick())) { 2225 this_rq()->idle_balance = 1; 2226 raise_softirq_irqoff(SCHED_SOFTIRQ); 2227 } 2228 irq_exit(); 2229 } 2230 2231 static void ttwu_queue_remote(struct task_struct *p, int cpu, int wake_flags) 2232 { 2233 struct rq *rq = cpu_rq(cpu); 2234 2235 p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED); 2236 2237 if (llist_add(&p->wake_entry, &cpu_rq(cpu)->wake_list)) { 2238 if (!set_nr_if_polling(rq->idle)) 2239 smp_send_reschedule(cpu); 2240 else 2241 trace_sched_wake_idle_without_ipi(cpu); 2242 } 2243 } 2244 2245 void wake_up_if_idle(int cpu) 2246 { 2247 struct rq *rq = cpu_rq(cpu); 2248 struct rq_flags rf; 2249 2250 rcu_read_lock(); 2251 2252 if (!is_idle_task(rcu_dereference(rq->curr))) 2253 goto out; 2254 2255 if (set_nr_if_polling(rq->idle)) { 2256 trace_sched_wake_idle_without_ipi(cpu); 2257 } else { 2258 rq_lock_irqsave(rq, &rf); 2259 if (is_idle_task(rq->curr)) 2260 smp_send_reschedule(cpu); 2261 /* Else CPU is not idle, do nothing here: */ 2262 rq_unlock_irqrestore(rq, &rf); 2263 } 2264 2265 out: 2266 rcu_read_unlock(); 2267 } 2268 2269 bool cpus_share_cache(int this_cpu, int that_cpu) 2270 { 2271 return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu); 2272 } 2273 #endif /* CONFIG_SMP */ 2274 2275 static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags) 2276 { 2277 struct rq *rq = cpu_rq(cpu); 2278 struct rq_flags rf; 2279 2280 #if defined(CONFIG_SMP) 2281 if (sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) { 2282 sched_clock_cpu(cpu); /* Sync clocks across CPUs */ 2283 ttwu_queue_remote(p, cpu, wake_flags); 2284 return; 2285 } 2286 #endif 2287 2288 rq_lock(rq, &rf); 2289 update_rq_clock(rq); 2290 ttwu_do_activate(rq, p, wake_flags, &rf); 2291 rq_unlock(rq, &rf); 2292 } 2293 2294 /* 2295 * Notes on Program-Order guarantees on SMP systems. 2296 * 2297 * MIGRATION 2298 * 2299 * The basic program-order guarantee on SMP systems is that when a task [t] 2300 * migrates, all its activity on its old CPU [c0] happens-before any subsequent 2301 * execution on its new CPU [c1]. 2302 * 2303 * For migration (of runnable tasks) this is provided by the following means: 2304 * 2305 * A) UNLOCK of the rq(c0)->lock scheduling out task t 2306 * B) migration for t is required to synchronize *both* rq(c0)->lock and 2307 * rq(c1)->lock (if not at the same time, then in that order). 2308 * C) LOCK of the rq(c1)->lock scheduling in task 2309 * 2310 * Release/acquire chaining guarantees that B happens after A and C after B. 2311 * Note: the CPU doing B need not be c0 or c1 2312 * 2313 * Example: 2314 * 2315 * CPU0 CPU1 CPU2 2316 * 2317 * LOCK rq(0)->lock 2318 * sched-out X 2319 * sched-in Y 2320 * UNLOCK rq(0)->lock 2321 * 2322 * LOCK rq(0)->lock // orders against CPU0 2323 * dequeue X 2324 * UNLOCK rq(0)->lock 2325 * 2326 * LOCK rq(1)->lock 2327 * enqueue X 2328 * UNLOCK rq(1)->lock 2329 * 2330 * LOCK rq(1)->lock // orders against CPU2 2331 * sched-out Z 2332 * sched-in X 2333 * UNLOCK rq(1)->lock 2334 * 2335 * 2336 * BLOCKING -- aka. SLEEP + WAKEUP 2337 * 2338 * For blocking we (obviously) need to provide the same guarantee as for 2339 * migration. However the means are completely different as there is no lock 2340 * chain to provide order. Instead we do: 2341 * 2342 * 1) smp_store_release(X->on_cpu, 0) 2343 * 2) smp_cond_load_acquire(!X->on_cpu) 2344 * 2345 * Example: 2346 * 2347 * CPU0 (schedule) CPU1 (try_to_wake_up) CPU2 (schedule) 2348 * 2349 * LOCK rq(0)->lock LOCK X->pi_lock 2350 * dequeue X 2351 * sched-out X 2352 * smp_store_release(X->on_cpu, 0); 2353 * 2354 * smp_cond_load_acquire(&X->on_cpu, !VAL); 2355 * X->state = WAKING 2356 * set_task_cpu(X,2) 2357 * 2358 * LOCK rq(2)->lock 2359 * enqueue X 2360 * X->state = RUNNING 2361 * UNLOCK rq(2)->lock 2362 * 2363 * LOCK rq(2)->lock // orders against CPU1 2364 * sched-out Z 2365 * sched-in X 2366 * UNLOCK rq(2)->lock 2367 * 2368 * UNLOCK X->pi_lock 2369 * UNLOCK rq(0)->lock 2370 * 2371 * 2372 * However, for wakeups there is a second guarantee we must provide, namely we 2373 * must ensure that CONDITION=1 done by the caller can not be reordered with 2374 * accesses to the task state; see try_to_wake_up() and set_current_state(). 2375 */ 2376 2377 /** 2378 * try_to_wake_up - wake up a thread 2379 * @p: the thread to be awakened 2380 * @state: the mask of task states that can be woken 2381 * @wake_flags: wake modifier flags (WF_*) 2382 * 2383 * If (@state & @p->state) @p->state = TASK_RUNNING. 2384 * 2385 * If the task was not queued/runnable, also place it back on a runqueue. 2386 * 2387 * Atomic against schedule() which would dequeue a task, also see 2388 * set_current_state(). 2389 * 2390 * This function executes a full memory barrier before accessing the task 2391 * state; see set_current_state(). 2392 * 2393 * Return: %true if @p->state changes (an actual wakeup was done), 2394 * %false otherwise. 2395 */ 2396 static int 2397 try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) 2398 { 2399 unsigned long flags; 2400 int cpu, success = 0; 2401 2402 preempt_disable(); 2403 if (p == current) { 2404 /* 2405 * We're waking current, this means 'p->on_rq' and 'task_cpu(p) 2406 * == smp_processor_id()'. Together this means we can special 2407 * case the whole 'p->on_rq && ttwu_remote()' case below 2408 * without taking any locks. 2409 * 2410 * In particular: 2411 * - we rely on Program-Order guarantees for all the ordering, 2412 * - we're serialized against set_special_state() by virtue of 2413 * it disabling IRQs (this allows not taking ->pi_lock). 2414 */ 2415 if (!(p->state & state)) 2416 goto out; 2417 2418 success = 1; 2419 cpu = task_cpu(p); 2420 trace_sched_waking(p); 2421 p->state = TASK_RUNNING; 2422 trace_sched_wakeup(p); 2423 goto out; 2424 } 2425 2426 /* 2427 * If we are going to wake up a thread waiting for CONDITION we 2428 * need to ensure that CONDITION=1 done by the caller can not be 2429 * reordered with p->state check below. This pairs with mb() in 2430 * set_current_state() the waiting thread does. 2431 */ 2432 raw_spin_lock_irqsave(&p->pi_lock, flags); 2433 smp_mb__after_spinlock(); 2434 if (!(p->state & state)) 2435 goto unlock; 2436 2437 trace_sched_waking(p); 2438 2439 /* We're going to change ->state: */ 2440 success = 1; 2441 cpu = task_cpu(p); 2442 2443 /* 2444 * Ensure we load p->on_rq _after_ p->state, otherwise it would 2445 * be possible to, falsely, observe p->on_rq == 0 and get stuck 2446 * in smp_cond_load_acquire() below. 2447 * 2448 * sched_ttwu_pending() try_to_wake_up() 2449 * STORE p->on_rq = 1 LOAD p->state 2450 * UNLOCK rq->lock 2451 * 2452 * __schedule() (switch to task 'p') 2453 * LOCK rq->lock smp_rmb(); 2454 * smp_mb__after_spinlock(); 2455 * UNLOCK rq->lock 2456 * 2457 * [task p] 2458 * STORE p->state = UNINTERRUPTIBLE LOAD p->on_rq 2459 * 2460 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in 2461 * __schedule(). See the comment for smp_mb__after_spinlock(). 2462 */ 2463 smp_rmb(); 2464 if (p->on_rq && ttwu_remote(p, wake_flags)) 2465 goto unlock; 2466 2467 #ifdef CONFIG_SMP 2468 /* 2469 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be 2470 * possible to, falsely, observe p->on_cpu == 0. 2471 * 2472 * One must be running (->on_cpu == 1) in order to remove oneself 2473 * from the runqueue. 2474 * 2475 * __schedule() (switch to task 'p') try_to_wake_up() 2476 * STORE p->on_cpu = 1 LOAD p->on_rq 2477 * UNLOCK rq->lock 2478 * 2479 * __schedule() (put 'p' to sleep) 2480 * LOCK rq->lock smp_rmb(); 2481 * smp_mb__after_spinlock(); 2482 * STORE p->on_rq = 0 LOAD p->on_cpu 2483 * 2484 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in 2485 * __schedule(). See the comment for smp_mb__after_spinlock(). 2486 */ 2487 smp_rmb(); 2488 2489 /* 2490 * If the owning (remote) CPU is still in the middle of schedule() with 2491 * this task as prev, wait until its done referencing the task. 2492 * 2493 * Pairs with the smp_store_release() in finish_task(). 2494 * 2495 * This ensures that tasks getting woken will be fully ordered against 2496 * their previous state and preserve Program Order. 2497 */ 2498 smp_cond_load_acquire(&p->on_cpu, !VAL); 2499 2500 p->sched_contributes_to_load = !!task_contributes_to_load(p); 2501 p->state = TASK_WAKING; 2502 2503 if (p->in_iowait) { 2504 delayacct_blkio_end(p); 2505 atomic_dec(&task_rq(p)->nr_iowait); 2506 } 2507 2508 cpu = select_task_rq(p, p->wake_cpu, SD_BALANCE_WAKE, wake_flags); 2509 if (task_cpu(p) != cpu) { 2510 wake_flags |= WF_MIGRATED; 2511 psi_ttwu_dequeue(p); 2512 set_task_cpu(p, cpu); 2513 } 2514 2515 #else /* CONFIG_SMP */ 2516 2517 if (p->in_iowait) { 2518 delayacct_blkio_end(p); 2519 atomic_dec(&task_rq(p)->nr_iowait); 2520 } 2521 2522 #endif /* CONFIG_SMP */ 2523 2524 ttwu_queue(p, cpu, wake_flags); 2525 unlock: 2526 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 2527 out: 2528 if (success) 2529 ttwu_stat(p, cpu, wake_flags); 2530 preempt_enable(); 2531 2532 return success; 2533 } 2534 2535 /** 2536 * wake_up_process - Wake up a specific process 2537 * @p: The process to be woken up. 2538 * 2539 * Attempt to wake up the nominated process and move it to the set of runnable 2540 * processes. 2541 * 2542 * Return: 1 if the process was woken up, 0 if it was already running. 2543 * 2544 * This function executes a full memory barrier before accessing the task state. 2545 */ 2546 int wake_up_process(struct task_struct *p) 2547 { 2548 return try_to_wake_up(p, TASK_NORMAL, 0); 2549 } 2550 EXPORT_SYMBOL(wake_up_process); 2551 2552 int wake_up_state(struct task_struct *p, unsigned int state) 2553 { 2554 return try_to_wake_up(p, state, 0); 2555 } 2556 2557 /* 2558 * Perform scheduler related setup for a newly forked process p. 2559 * p is forked by current. 2560 * 2561 * __sched_fork() is basic setup used by init_idle() too: 2562 */ 2563 static void __sched_fork(unsigned long clone_flags, struct task_struct *p) 2564 { 2565 p->on_rq = 0; 2566 2567 p->se.on_rq = 0; 2568 p->se.exec_start = 0; 2569 p->se.sum_exec_runtime = 0; 2570 p->se.prev_sum_exec_runtime = 0; 2571 p->se.nr_migrations = 0; 2572 p->se.vruntime = 0; 2573 INIT_LIST_HEAD(&p->se.group_node); 2574 2575 #ifdef CONFIG_FAIR_GROUP_SCHED 2576 p->se.cfs_rq = NULL; 2577 #endif 2578 2579 #ifdef CONFIG_SCHEDSTATS 2580 /* Even if schedstat is disabled, there should not be garbage */ 2581 memset(&p->se.statistics, 0, sizeof(p->se.statistics)); 2582 #endif 2583 2584 RB_CLEAR_NODE(&p->dl.rb_node); 2585 init_dl_task_timer(&p->dl); 2586 init_dl_inactive_task_timer(&p->dl); 2587 __dl_clear_params(p); 2588 2589 INIT_LIST_HEAD(&p->rt.run_list); 2590 p->rt.timeout = 0; 2591 p->rt.time_slice = sched_rr_timeslice; 2592 p->rt.on_rq = 0; 2593 p->rt.on_list = 0; 2594 2595 #ifdef CONFIG_PREEMPT_NOTIFIERS 2596 INIT_HLIST_HEAD(&p->preempt_notifiers); 2597 #endif 2598 2599 #ifdef CONFIG_COMPACTION 2600 p->capture_control = NULL; 2601 #endif 2602 init_numa_balancing(clone_flags, p); 2603 } 2604 2605 DEFINE_STATIC_KEY_FALSE(sched_numa_balancing); 2606 2607 #ifdef CONFIG_NUMA_BALANCING 2608 2609 void set_numabalancing_state(bool enabled) 2610 { 2611 if (enabled) 2612 static_branch_enable(&sched_numa_balancing); 2613 else 2614 static_branch_disable(&sched_numa_balancing); 2615 } 2616 2617 #ifdef CONFIG_PROC_SYSCTL 2618 int sysctl_numa_balancing(struct ctl_table *table, int write, 2619 void __user *buffer, size_t *lenp, loff_t *ppos) 2620 { 2621 struct ctl_table t; 2622 int err; 2623 int state = static_branch_likely(&sched_numa_balancing); 2624 2625 if (write && !capable(CAP_SYS_ADMIN)) 2626 return -EPERM; 2627 2628 t = *table; 2629 t.data = &state; 2630 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); 2631 if (err < 0) 2632 return err; 2633 if (write) 2634 set_numabalancing_state(state); 2635 return err; 2636 } 2637 #endif 2638 #endif 2639 2640 #ifdef CONFIG_SCHEDSTATS 2641 2642 DEFINE_STATIC_KEY_FALSE(sched_schedstats); 2643 static bool __initdata __sched_schedstats = false; 2644 2645 static void set_schedstats(bool enabled) 2646 { 2647 if (enabled) 2648 static_branch_enable(&sched_schedstats); 2649 else 2650 static_branch_disable(&sched_schedstats); 2651 } 2652 2653 void force_schedstat_enabled(void) 2654 { 2655 if (!schedstat_enabled()) { 2656 pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n"); 2657 static_branch_enable(&sched_schedstats); 2658 } 2659 } 2660 2661 static int __init setup_schedstats(char *str) 2662 { 2663 int ret = 0; 2664 if (!str) 2665 goto out; 2666 2667 /* 2668 * This code is called before jump labels have been set up, so we can't 2669 * change the static branch directly just yet. Instead set a temporary 2670 * variable so init_schedstats() can do it later. 2671 */ 2672 if (!strcmp(str, "enable")) { 2673 __sched_schedstats = true; 2674 ret = 1; 2675 } else if (!strcmp(str, "disable")) { 2676 __sched_schedstats = false; 2677 ret = 1; 2678 } 2679 out: 2680 if (!ret) 2681 pr_warn("Unable to parse schedstats=\n"); 2682 2683 return ret; 2684 } 2685 __setup("schedstats=", setup_schedstats); 2686 2687 static void __init init_schedstats(void) 2688 { 2689 set_schedstats(__sched_schedstats); 2690 } 2691 2692 #ifdef CONFIG_PROC_SYSCTL 2693 int sysctl_schedstats(struct ctl_table *table, int write, 2694 void __user *buffer, size_t *lenp, loff_t *ppos) 2695 { 2696 struct ctl_table t; 2697 int err; 2698 int state = static_branch_likely(&sched_schedstats); 2699 2700 if (write && !capable(CAP_SYS_ADMIN)) 2701 return -EPERM; 2702 2703 t = *table; 2704 t.data = &state; 2705 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); 2706 if (err < 0) 2707 return err; 2708 if (write) 2709 set_schedstats(state); 2710 return err; 2711 } 2712 #endif /* CONFIG_PROC_SYSCTL */ 2713 #else /* !CONFIG_SCHEDSTATS */ 2714 static inline void init_schedstats(void) {} 2715 #endif /* CONFIG_SCHEDSTATS */ 2716 2717 /* 2718 * fork()/clone()-time setup: 2719 */ 2720 int sched_fork(unsigned long clone_flags, struct task_struct *p) 2721 { 2722 unsigned long flags; 2723 2724 __sched_fork(clone_flags, p); 2725 /* 2726 * We mark the process as NEW here. This guarantees that 2727 * nobody will actually run it, and a signal or other external 2728 * event cannot wake it up and insert it on the runqueue either. 2729 */ 2730 p->state = TASK_NEW; 2731 2732 /* 2733 * Make sure we do not leak PI boosting priority to the child. 2734 */ 2735 p->prio = current->normal_prio; 2736 2737 uclamp_fork(p); 2738 2739 /* 2740 * Revert to default priority/policy on fork if requested. 2741 */ 2742 if (unlikely(p->sched_reset_on_fork)) { 2743 if (task_has_dl_policy(p) || task_has_rt_policy(p)) { 2744 p->policy = SCHED_NORMAL; 2745 p->static_prio = NICE_TO_PRIO(0); 2746 p->rt_priority = 0; 2747 } else if (PRIO_TO_NICE(p->static_prio) < 0) 2748 p->static_prio = NICE_TO_PRIO(0); 2749 2750 p->prio = p->normal_prio = __normal_prio(p); 2751 set_load_weight(p, false); 2752 2753 /* 2754 * We don't need the reset flag anymore after the fork. It has 2755 * fulfilled its duty: 2756 */ 2757 p->sched_reset_on_fork = 0; 2758 } 2759 2760 if (dl_prio(p->prio)) 2761 return -EAGAIN; 2762 else if (rt_prio(p->prio)) 2763 p->sched_class = &rt_sched_class; 2764 else 2765 p->sched_class = &fair_sched_class; 2766 2767 init_entity_runnable_average(&p->se); 2768 2769 /* 2770 * The child is not yet in the pid-hash so no cgroup attach races, 2771 * and the cgroup is pinned to this child due to cgroup_fork() 2772 * is ran before sched_fork(). 2773 * 2774 * Silence PROVE_RCU. 2775 */ 2776 raw_spin_lock_irqsave(&p->pi_lock, flags); 2777 /* 2778 * We're setting the CPU for the first time, we don't migrate, 2779 * so use __set_task_cpu(). 2780 */ 2781 __set_task_cpu(p, smp_processor_id()); 2782 if (p->sched_class->task_fork) 2783 p->sched_class->task_fork(p); 2784 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 2785 2786 #ifdef CONFIG_SCHED_INFO 2787 if (likely(sched_info_on())) 2788 memset(&p->sched_info, 0, sizeof(p->sched_info)); 2789 #endif 2790 #if defined(CONFIG_SMP) 2791 p->on_cpu = 0; 2792 #endif 2793 init_task_preempt_count(p); 2794 #ifdef CONFIG_SMP 2795 plist_node_init(&p->pushable_tasks, MAX_PRIO); 2796 RB_CLEAR_NODE(&p->pushable_dl_tasks); 2797 #endif 2798 return 0; 2799 } 2800 2801 unsigned long to_ratio(u64 period, u64 runtime) 2802 { 2803 if (runtime == RUNTIME_INF) 2804 return BW_UNIT; 2805 2806 /* 2807 * Doing this here saves a lot of checks in all 2808 * the calling paths, and returning zero seems 2809 * safe for them anyway. 2810 */ 2811 if (period == 0) 2812 return 0; 2813 2814 return div64_u64(runtime << BW_SHIFT, period); 2815 } 2816 2817 /* 2818 * wake_up_new_task - wake up a newly created task for the first time. 2819 * 2820 * This function will do some initial scheduler statistics housekeeping 2821 * that must be done for every newly created context, then puts the task 2822 * on the runqueue and wakes it. 2823 */ 2824 void wake_up_new_task(struct task_struct *p) 2825 { 2826 struct rq_flags rf; 2827 struct rq *rq; 2828 2829 raw_spin_lock_irqsave(&p->pi_lock, rf.flags); 2830 p->state = TASK_RUNNING; 2831 #ifdef CONFIG_SMP 2832 /* 2833 * Fork balancing, do it here and not earlier because: 2834 * - cpus_ptr can change in the fork path 2835 * - any previously selected CPU might disappear through hotplug 2836 * 2837 * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq, 2838 * as we're not fully set-up yet. 2839 */ 2840 p->recent_used_cpu = task_cpu(p); 2841 __set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0)); 2842 #endif 2843 rq = __task_rq_lock(p, &rf); 2844 update_rq_clock(rq); 2845 post_init_entity_util_avg(p); 2846 2847 activate_task(rq, p, ENQUEUE_NOCLOCK); 2848 trace_sched_wakeup_new(p); 2849 check_preempt_curr(rq, p, WF_FORK); 2850 #ifdef CONFIG_SMP 2851 if (p->sched_class->task_woken) { 2852 /* 2853 * Nothing relies on rq->lock after this, so its fine to 2854 * drop it. 2855 */ 2856 rq_unpin_lock(rq, &rf); 2857 p->sched_class->task_woken(rq, p); 2858 rq_repin_lock(rq, &rf); 2859 } 2860 #endif 2861 task_rq_unlock(rq, p, &rf); 2862 } 2863 2864 #ifdef CONFIG_PREEMPT_NOTIFIERS 2865 2866 static DEFINE_STATIC_KEY_FALSE(preempt_notifier_key); 2867 2868 void preempt_notifier_inc(void) 2869 { 2870 static_branch_inc(&preempt_notifier_key); 2871 } 2872 EXPORT_SYMBOL_GPL(preempt_notifier_inc); 2873 2874 void preempt_notifier_dec(void) 2875 { 2876 static_branch_dec(&preempt_notifier_key); 2877 } 2878 EXPORT_SYMBOL_GPL(preempt_notifier_dec); 2879 2880 /** 2881 * preempt_notifier_register - tell me when current is being preempted & rescheduled 2882 * @notifier: notifier struct to register 2883 */ 2884 void preempt_notifier_register(struct preempt_notifier *notifier) 2885 { 2886 if (!static_branch_unlikely(&preempt_notifier_key)) 2887 WARN(1, "registering preempt_notifier while notifiers disabled\n"); 2888 2889 hlist_add_head(¬ifier->link, ¤t->preempt_notifiers); 2890 } 2891 EXPORT_SYMBOL_GPL(preempt_notifier_register); 2892 2893 /** 2894 * preempt_notifier_unregister - no longer interested in preemption notifications 2895 * @notifier: notifier struct to unregister 2896 * 2897 * This is *not* safe to call from within a preemption notifier. 2898 */ 2899 void preempt_notifier_unregister(struct preempt_notifier *notifier) 2900 { 2901 hlist_del(¬ifier->link); 2902 } 2903 EXPORT_SYMBOL_GPL(preempt_notifier_unregister); 2904 2905 static void __fire_sched_in_preempt_notifiers(struct task_struct *curr) 2906 { 2907 struct preempt_notifier *notifier; 2908 2909 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) 2910 notifier->ops->sched_in(notifier, raw_smp_processor_id()); 2911 } 2912 2913 static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr) 2914 { 2915 if (static_branch_unlikely(&preempt_notifier_key)) 2916 __fire_sched_in_preempt_notifiers(curr); 2917 } 2918 2919 static void 2920 __fire_sched_out_preempt_notifiers(struct task_struct *curr, 2921 struct task_struct *next) 2922 { 2923 struct preempt_notifier *notifier; 2924 2925 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) 2926 notifier->ops->sched_out(notifier, next); 2927 } 2928 2929 static __always_inline void 2930 fire_sched_out_preempt_notifiers(struct task_struct *curr, 2931 struct task_struct *next) 2932 { 2933 if (static_branch_unlikely(&preempt_notifier_key)) 2934 __fire_sched_out_preempt_notifiers(curr, next); 2935 } 2936 2937 #else /* !CONFIG_PREEMPT_NOTIFIERS */ 2938 2939 static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr) 2940 { 2941 } 2942 2943 static inline void 2944 fire_sched_out_preempt_notifiers(struct task_struct *curr, 2945 struct task_struct *next) 2946 { 2947 } 2948 2949 #endif /* CONFIG_PREEMPT_NOTIFIERS */ 2950 2951 static inline void prepare_task(struct task_struct *next) 2952 { 2953 #ifdef CONFIG_SMP 2954 /* 2955 * Claim the task as running, we do this before switching to it 2956 * such that any running task will have this set. 2957 */ 2958 next->on_cpu = 1; 2959 #endif 2960 } 2961 2962 static inline void finish_task(struct task_struct *prev) 2963 { 2964 #ifdef CONFIG_SMP 2965 /* 2966 * After ->on_cpu is cleared, the task can be moved to a different CPU. 2967 * We must ensure this doesn't happen until the switch is completely 2968 * finished. 2969 * 2970 * In particular, the load of prev->state in finish_task_switch() must 2971 * happen before this. 2972 * 2973 * Pairs with the smp_cond_load_acquire() in try_to_wake_up(). 2974 */ 2975 smp_store_release(&prev->on_cpu, 0); 2976 #endif 2977 } 2978 2979 static inline void 2980 prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf) 2981 { 2982 /* 2983 * Since the runqueue lock will be released by the next 2984 * task (which is an invalid locking op but in the case 2985 * of the scheduler it's an obvious special-case), so we 2986 * do an early lockdep release here: 2987 */ 2988 rq_unpin_lock(rq, rf); 2989 spin_release(&rq->lock.dep_map, 1, _THIS_IP_); 2990 #ifdef CONFIG_DEBUG_SPINLOCK 2991 /* this is a valid case when another task releases the spinlock */ 2992 rq->lock.owner = next; 2993 #endif 2994 } 2995 2996 static inline void finish_lock_switch(struct rq *rq) 2997 { 2998 /* 2999 * If we are tracking spinlock dependencies then we have to 3000 * fix up the runqueue lock - which gets 'carried over' from 3001 * prev into current: 3002 */ 3003 spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_); 3004 raw_spin_unlock_irq(&rq->lock); 3005 } 3006 3007 /* 3008 * NOP if the arch has not defined these: 3009 */ 3010 3011 #ifndef prepare_arch_switch 3012 # define prepare_arch_switch(next) do { } while (0) 3013 #endif 3014 3015 #ifndef finish_arch_post_lock_switch 3016 # define finish_arch_post_lock_switch() do { } while (0) 3017 #endif 3018 3019 /** 3020 * prepare_task_switch - prepare to switch tasks 3021 * @rq: the runqueue preparing to switch 3022 * @prev: the current task that is being switched out 3023 * @next: the task we are going to switch to. 3024 * 3025 * This is called with the rq lock held and interrupts off. It must 3026 * be paired with a subsequent finish_task_switch after the context 3027 * switch. 3028 * 3029 * prepare_task_switch sets up locking and calls architecture specific 3030 * hooks. 3031 */ 3032 static inline void 3033 prepare_task_switch(struct rq *rq, struct task_struct *prev, 3034 struct task_struct *next) 3035 { 3036 kcov_prepare_switch(prev); 3037 sched_info_switch(rq, prev, next); 3038 perf_event_task_sched_out(prev, next); 3039 rseq_preempt(prev); 3040 fire_sched_out_preempt_notifiers(prev, next); 3041 prepare_task(next); 3042 prepare_arch_switch(next); 3043 } 3044 3045 /** 3046 * finish_task_switch - clean up after a task-switch 3047 * @prev: the thread we just switched away from. 3048 * 3049 * finish_task_switch must be called after the context switch, paired 3050 * with a prepare_task_switch call before the context switch. 3051 * finish_task_switch will reconcile locking set up by prepare_task_switch, 3052 * and do any other architecture-specific cleanup actions. 3053 * 3054 * Note that we may have delayed dropping an mm in context_switch(). If 3055 * so, we finish that here outside of the runqueue lock. (Doing it 3056 * with the lock held can cause deadlocks; see schedule() for 3057 * details.) 3058 * 3059 * The context switch have flipped the stack from under us and restored the 3060 * local variables which were saved when this task called schedule() in the 3061 * past. prev == current is still correct but we need to recalculate this_rq 3062 * because prev may have moved to another CPU. 3063 */ 3064 static struct rq *finish_task_switch(struct task_struct *prev) 3065 __releases(rq->lock) 3066 { 3067 struct rq *rq = this_rq(); 3068 struct mm_struct *mm = rq->prev_mm; 3069 long prev_state; 3070 3071 /* 3072 * The previous task will have left us with a preempt_count of 2 3073 * because it left us after: 3074 * 3075 * schedule() 3076 * preempt_disable(); // 1 3077 * __schedule() 3078 * raw_spin_lock_irq(&rq->lock) // 2 3079 * 3080 * Also, see FORK_PREEMPT_COUNT. 3081 */ 3082 if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET, 3083 "corrupted preempt_count: %s/%d/0x%x\n", 3084 current->comm, current->pid, preempt_count())) 3085 preempt_count_set(FORK_PREEMPT_COUNT); 3086 3087 rq->prev_mm = NULL; 3088 3089 /* 3090 * A task struct has one reference for the use as "current". 3091 * If a task dies, then it sets TASK_DEAD in tsk->state and calls 3092 * schedule one last time. The schedule call will never return, and 3093 * the scheduled task must drop that reference. 3094 * 3095 * We must observe prev->state before clearing prev->on_cpu (in 3096 * finish_task), otherwise a concurrent wakeup can get prev 3097 * running on another CPU and we could rave with its RUNNING -> DEAD 3098 * transition, resulting in a double drop. 3099 */ 3100 prev_state = prev->state; 3101 vtime_task_switch(prev); 3102 perf_event_task_sched_in(prev, current); 3103 finish_task(prev); 3104 finish_lock_switch(rq); 3105 finish_arch_post_lock_switch(); 3106 kcov_finish_switch(current); 3107 3108 fire_sched_in_preempt_notifiers(current); 3109 /* 3110 * When switching through a kernel thread, the loop in 3111 * membarrier_{private,global}_expedited() may have observed that 3112 * kernel thread and not issued an IPI. It is therefore possible to 3113 * schedule between user->kernel->user threads without passing though 3114 * switch_mm(). Membarrier requires a barrier after storing to 3115 * rq->curr, before returning to userspace, so provide them here: 3116 * 3117 * - a full memory barrier for {PRIVATE,GLOBAL}_EXPEDITED, implicitly 3118 * provided by mmdrop(), 3119 * - a sync_core for SYNC_CORE. 3120 */ 3121 if (mm) { 3122 membarrier_mm_sync_core_before_usermode(mm); 3123 mmdrop(mm); 3124 } 3125 if (unlikely(prev_state == TASK_DEAD)) { 3126 if (prev->sched_class->task_dead) 3127 prev->sched_class->task_dead(prev); 3128 3129 /* 3130 * Remove function-return probe instances associated with this 3131 * task and put them back on the free list. 3132 */ 3133 kprobe_flush_task(prev); 3134 3135 /* Task is done with its stack. */ 3136 put_task_stack(prev); 3137 3138 put_task_struct(prev); 3139 } 3140 3141 tick_nohz_task_switch(); 3142 return rq; 3143 } 3144 3145 #ifdef CONFIG_SMP 3146 3147 /* rq->lock is NOT held, but preemption is disabled */ 3148 static void __balance_callback(struct rq *rq) 3149 { 3150 struct callback_head *head, *next; 3151 void (*func)(struct rq *rq); 3152 unsigned long flags; 3153 3154 raw_spin_lock_irqsave(&rq->lock, flags); 3155 head = rq->balance_callback; 3156 rq->balance_callback = NULL; 3157 while (head) { 3158 func = (void (*)(struct rq *))head->func; 3159 next = head->next; 3160 head->next = NULL; 3161 head = next; 3162 3163 func(rq); 3164 } 3165 raw_spin_unlock_irqrestore(&rq->lock, flags); 3166 } 3167 3168 static inline void balance_callback(struct rq *rq) 3169 { 3170 if (unlikely(rq->balance_callback)) 3171 __balance_callback(rq); 3172 } 3173 3174 #else 3175 3176 static inline void balance_callback(struct rq *rq) 3177 { 3178 } 3179 3180 #endif 3181 3182 /** 3183 * schedule_tail - first thing a freshly forked thread must call. 3184 * @prev: the thread we just switched away from. 3185 */ 3186 asmlinkage __visible void schedule_tail(struct task_struct *prev) 3187 __releases(rq->lock) 3188 { 3189 struct rq *rq; 3190 3191 /* 3192 * New tasks start with FORK_PREEMPT_COUNT, see there and 3193 * finish_task_switch() for details. 3194 * 3195 * finish_task_switch() will drop rq->lock() and lower preempt_count 3196 * and the preempt_enable() will end up enabling preemption (on 3197 * PREEMPT_COUNT kernels). 3198 */ 3199 3200 rq = finish_task_switch(prev); 3201 balance_callback(rq); 3202 preempt_enable(); 3203 3204 if (current->set_child_tid) 3205 put_user(task_pid_vnr(current), current->set_child_tid); 3206 3207 calculate_sigpending(); 3208 } 3209 3210 /* 3211 * context_switch - switch to the new MM and the new thread's register state. 3212 */ 3213 static __always_inline struct rq * 3214 context_switch(struct rq *rq, struct task_struct *prev, 3215 struct task_struct *next, struct rq_flags *rf) 3216 { 3217 struct mm_struct *mm, *oldmm; 3218 3219 prepare_task_switch(rq, prev, next); 3220 3221 mm = next->mm; 3222 oldmm = prev->active_mm; 3223 /* 3224 * For paravirt, this is coupled with an exit in switch_to to 3225 * combine the page table reload and the switch backend into 3226 * one hypercall. 3227 */ 3228 arch_start_context_switch(prev); 3229 3230 /* 3231 * If mm is non-NULL, we pass through switch_mm(). If mm is 3232 * NULL, we will pass through mmdrop() in finish_task_switch(). 3233 * Both of these contain the full memory barrier required by 3234 * membarrier after storing to rq->curr, before returning to 3235 * user-space. 3236 */ 3237 if (!mm) { 3238 next->active_mm = oldmm; 3239 mmgrab(oldmm); 3240 enter_lazy_tlb(oldmm, next); 3241 } else 3242 switch_mm_irqs_off(oldmm, mm, next); 3243 3244 if (!prev->mm) { 3245 prev->active_mm = NULL; 3246 rq->prev_mm = oldmm; 3247 } 3248 3249 rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP); 3250 3251 prepare_lock_switch(rq, next, rf); 3252 3253 /* Here we just switch the register state and the stack. */ 3254 switch_to(prev, next, prev); 3255 barrier(); 3256 3257 return finish_task_switch(prev); 3258 } 3259 3260 /* 3261 * nr_running and nr_context_switches: 3262 * 3263 * externally visible scheduler statistics: current number of runnable 3264 * threads, total number of context switches performed since bootup. 3265 */ 3266 unsigned long nr_running(void) 3267 { 3268 unsigned long i, sum = 0; 3269 3270 for_each_online_cpu(i) 3271 sum += cpu_rq(i)->nr_running; 3272 3273 return sum; 3274 } 3275 3276 /* 3277 * Check if only the current task is running on the CPU. 3278 * 3279 * Caution: this function does not check that the caller has disabled 3280 * preemption, thus the result might have a time-of-check-to-time-of-use 3281 * race. The caller is responsible to use it correctly, for example: 3282 * 3283 * - from a non-preemptible section (of course) 3284 * 3285 * - from a thread that is bound to a single CPU 3286 * 3287 * - in a loop with very short iterations (e.g. a polling loop) 3288 */ 3289 bool single_task_running(void) 3290 { 3291 return raw_rq()->nr_running == 1; 3292 } 3293 EXPORT_SYMBOL(single_task_running); 3294 3295 unsigned long long nr_context_switches(void) 3296 { 3297 int i; 3298 unsigned long long sum = 0; 3299 3300 for_each_possible_cpu(i) 3301 sum += cpu_rq(i)->nr_switches; 3302 3303 return sum; 3304 } 3305 3306 /* 3307 * Consumers of these two interfaces, like for example the cpuidle menu 3308 * governor, are using nonsensical data. Preferring shallow idle state selection 3309 * for a CPU that has IO-wait which might not even end up running the task when 3310 * it does become runnable. 3311 */ 3312 3313 unsigned long nr_iowait_cpu(int cpu) 3314 { 3315 return atomic_read(&cpu_rq(cpu)->nr_iowait); 3316 } 3317 3318 /* 3319 * IO-wait accounting, and how its mostly bollocks (on SMP). 3320 * 3321 * The idea behind IO-wait account is to account the idle time that we could 3322 * have spend running if it were not for IO. That is, if we were to improve the 3323 * storage performance, we'd have a proportional reduction in IO-wait time. 3324 * 3325 * This all works nicely on UP, where, when a task blocks on IO, we account 3326 * idle time as IO-wait, because if the storage were faster, it could've been 3327 * running and we'd not be idle. 3328 * 3329 * This has been extended to SMP, by doing the same for each CPU. This however 3330 * is broken. 3331 * 3332 * Imagine for instance the case where two tasks block on one CPU, only the one 3333 * CPU will have IO-wait accounted, while the other has regular idle. Even 3334 * though, if the storage were faster, both could've ran at the same time, 3335 * utilising both CPUs. 3336 * 3337 * This means, that when looking globally, the current IO-wait accounting on 3338 * SMP is a lower bound, by reason of under accounting. 3339 * 3340 * Worse, since the numbers are provided per CPU, they are sometimes 3341 * interpreted per CPU, and that is nonsensical. A blocked task isn't strictly 3342 * associated with any one particular CPU, it can wake to another CPU than it 3343 * blocked on. This means the per CPU IO-wait number is meaningless. 3344 * 3345 * Task CPU affinities can make all that even more 'interesting'. 3346 */ 3347 3348 unsigned long nr_iowait(void) 3349 { 3350 unsigned long i, sum = 0; 3351 3352 for_each_possible_cpu(i) 3353 sum += nr_iowait_cpu(i); 3354 3355 return sum; 3356 } 3357 3358 #ifdef CONFIG_SMP 3359 3360 /* 3361 * sched_exec - execve() is a valuable balancing opportunity, because at 3362 * this point the task has the smallest effective memory and cache footprint. 3363 */ 3364 void sched_exec(void) 3365 { 3366 struct task_struct *p = current; 3367 unsigned long flags; 3368 int dest_cpu; 3369 3370 raw_spin_lock_irqsave(&p->pi_lock, flags); 3371 dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), SD_BALANCE_EXEC, 0); 3372 if (dest_cpu == smp_processor_id()) 3373 goto unlock; 3374 3375 if (likely(cpu_active(dest_cpu))) { 3376 struct migration_arg arg = { p, dest_cpu }; 3377 3378 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 3379 stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg); 3380 return; 3381 } 3382 unlock: 3383 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 3384 } 3385 3386 #endif 3387 3388 DEFINE_PER_CPU(struct kernel_stat, kstat); 3389 DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat); 3390 3391 EXPORT_PER_CPU_SYMBOL(kstat); 3392 EXPORT_PER_CPU_SYMBOL(kernel_cpustat); 3393 3394 /* 3395 * The function fair_sched_class.update_curr accesses the struct curr 3396 * and its field curr->exec_start; when called from task_sched_runtime(), 3397 * we observe a high rate of cache misses in practice. 3398 * Prefetching this data results in improved performance. 3399 */ 3400 static inline void prefetch_curr_exec_start(struct task_struct *p) 3401 { 3402 #ifdef CONFIG_FAIR_GROUP_SCHED 3403 struct sched_entity *curr = (&p->se)->cfs_rq->curr; 3404 #else 3405 struct sched_entity *curr = (&task_rq(p)->cfs)->curr; 3406 #endif 3407 prefetch(curr); 3408 prefetch(&curr->exec_start); 3409 } 3410 3411 /* 3412 * Return accounted runtime for the task. 3413 * In case the task is currently running, return the runtime plus current's 3414 * pending runtime that have not been accounted yet. 3415 */ 3416 unsigned long long task_sched_runtime(struct task_struct *p) 3417 { 3418 struct rq_flags rf; 3419 struct rq *rq; 3420 u64 ns; 3421 3422 #if defined(CONFIG_64BIT) && defined(CONFIG_SMP) 3423 /* 3424 * 64-bit doesn't need locks to atomically read a 64-bit value. 3425 * So we have a optimization chance when the task's delta_exec is 0. 3426 * Reading ->on_cpu is racy, but this is ok. 3427 * 3428 * If we race with it leaving CPU, we'll take a lock. So we're correct. 3429 * If we race with it entering CPU, unaccounted time is 0. This is 3430 * indistinguishable from the read occurring a few cycles earlier. 3431 * If we see ->on_cpu without ->on_rq, the task is leaving, and has 3432 * been accounted, so we're correct here as well. 3433 */ 3434 if (!p->on_cpu || !task_on_rq_queued(p)) 3435 return p->se.sum_exec_runtime; 3436 #endif 3437 3438 rq = task_rq_lock(p, &rf); 3439 /* 3440 * Must be ->curr _and_ ->on_rq. If dequeued, we would 3441 * project cycles that may never be accounted to this 3442 * thread, breaking clock_gettime(). 3443 */ 3444 if (task_current(rq, p) && task_on_rq_queued(p)) { 3445 prefetch_curr_exec_start(p); 3446 update_rq_clock(rq); 3447 p->sched_class->update_curr(rq); 3448 } 3449 ns = p->se.sum_exec_runtime; 3450 task_rq_unlock(rq, p, &rf); 3451 3452 return ns; 3453 } 3454 3455 /* 3456 * This function gets called by the timer code, with HZ frequency. 3457 * We call it with interrupts disabled. 3458 */ 3459 void scheduler_tick(void) 3460 { 3461 int cpu = smp_processor_id(); 3462 struct rq *rq = cpu_rq(cpu); 3463 struct task_struct *curr = rq->curr; 3464 struct rq_flags rf; 3465 3466 sched_clock_tick(); 3467 3468 rq_lock(rq, &rf); 3469 3470 update_rq_clock(rq); 3471 curr->sched_class->task_tick(rq, curr, 0); 3472 calc_global_load_tick(rq); 3473 psi_task_tick(rq); 3474 3475 rq_unlock(rq, &rf); 3476 3477 perf_event_task_tick(); 3478 3479 #ifdef CONFIG_SMP 3480 rq->idle_balance = idle_cpu(cpu); 3481 trigger_load_balance(rq); 3482 #endif 3483 } 3484 3485 #ifdef CONFIG_NO_HZ_FULL 3486 3487 struct tick_work { 3488 int cpu; 3489 struct delayed_work work; 3490 }; 3491 3492 static struct tick_work __percpu *tick_work_cpu; 3493 3494 static void sched_tick_remote(struct work_struct *work) 3495 { 3496 struct delayed_work *dwork = to_delayed_work(work); 3497 struct tick_work *twork = container_of(dwork, struct tick_work, work); 3498 int cpu = twork->cpu; 3499 struct rq *rq = cpu_rq(cpu); 3500 struct task_struct *curr; 3501 struct rq_flags rf; 3502 u64 delta; 3503 3504 /* 3505 * Handle the tick only if it appears the remote CPU is running in full 3506 * dynticks mode. The check is racy by nature, but missing a tick or 3507 * having one too much is no big deal because the scheduler tick updates 3508 * statistics and checks timeslices in a time-independent way, regardless 3509 * of when exactly it is running. 3510 */ 3511 if (idle_cpu(cpu) || !tick_nohz_tick_stopped_cpu(cpu)) 3512 goto out_requeue; 3513 3514 rq_lock_irq(rq, &rf); 3515 curr = rq->curr; 3516 if (is_idle_task(curr)) 3517 goto out_unlock; 3518 3519 update_rq_clock(rq); 3520 delta = rq_clock_task(rq) - curr->se.exec_start; 3521 3522 /* 3523 * Make sure the next tick runs within a reasonable 3524 * amount of time. 3525 */ 3526 WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3); 3527 curr->sched_class->task_tick(rq, curr, 0); 3528 3529 out_unlock: 3530 rq_unlock_irq(rq, &rf); 3531 3532 out_requeue: 3533 /* 3534 * Run the remote tick once per second (1Hz). This arbitrary 3535 * frequency is large enough to avoid overload but short enough 3536 * to keep scheduler internal stats reasonably up to date. 3537 */ 3538 queue_delayed_work(system_unbound_wq, dwork, HZ); 3539 } 3540 3541 static void sched_tick_start(int cpu) 3542 { 3543 struct tick_work *twork; 3544 3545 if (housekeeping_cpu(cpu, HK_FLAG_TICK)) 3546 return; 3547 3548 WARN_ON_ONCE(!tick_work_cpu); 3549 3550 twork = per_cpu_ptr(tick_work_cpu, cpu); 3551 twork->cpu = cpu; 3552 INIT_DELAYED_WORK(&twork->work, sched_tick_remote); 3553 queue_delayed_work(system_unbound_wq, &twork->work, HZ); 3554 } 3555 3556 #ifdef CONFIG_HOTPLUG_CPU 3557 static void sched_tick_stop(int cpu) 3558 { 3559 struct tick_work *twork; 3560 3561 if (housekeeping_cpu(cpu, HK_FLAG_TICK)) 3562 return; 3563 3564 WARN_ON_ONCE(!tick_work_cpu); 3565 3566 twork = per_cpu_ptr(tick_work_cpu, cpu); 3567 cancel_delayed_work_sync(&twork->work); 3568 } 3569 #endif /* CONFIG_HOTPLUG_CPU */ 3570 3571 int __init sched_tick_offload_init(void) 3572 { 3573 tick_work_cpu = alloc_percpu(struct tick_work); 3574 BUG_ON(!tick_work_cpu); 3575 3576 return 0; 3577 } 3578 3579 #else /* !CONFIG_NO_HZ_FULL */ 3580 static inline void sched_tick_start(int cpu) { } 3581 static inline void sched_tick_stop(int cpu) { } 3582 #endif 3583 3584 #if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \ 3585 defined(CONFIG_TRACE_PREEMPT_TOGGLE)) 3586 /* 3587 * If the value passed in is equal to the current preempt count 3588 * then we just disabled preemption. Start timing the latency. 3589 */ 3590 static inline void preempt_latency_start(int val) 3591 { 3592 if (preempt_count() == val) { 3593 unsigned long ip = get_lock_parent_ip(); 3594 #ifdef CONFIG_DEBUG_PREEMPT 3595 current->preempt_disable_ip = ip; 3596 #endif 3597 trace_preempt_off(CALLER_ADDR0, ip); 3598 } 3599 } 3600 3601 void preempt_count_add(int val) 3602 { 3603 #ifdef CONFIG_DEBUG_PREEMPT 3604 /* 3605 * Underflow? 3606 */ 3607 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0))) 3608 return; 3609 #endif 3610 __preempt_count_add(val); 3611 #ifdef CONFIG_DEBUG_PREEMPT 3612 /* 3613 * Spinlock count overflowing soon? 3614 */ 3615 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >= 3616 PREEMPT_MASK - 10); 3617 #endif 3618 preempt_latency_start(val); 3619 } 3620 EXPORT_SYMBOL(preempt_count_add); 3621 NOKPROBE_SYMBOL(preempt_count_add); 3622 3623 /* 3624 * If the value passed in equals to the current preempt count 3625 * then we just enabled preemption. Stop timing the latency. 3626 */ 3627 static inline void preempt_latency_stop(int val) 3628 { 3629 if (preempt_count() == val) 3630 trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip()); 3631 } 3632 3633 void preempt_count_sub(int val) 3634 { 3635 #ifdef CONFIG_DEBUG_PREEMPT 3636 /* 3637 * Underflow? 3638 */ 3639 if (DEBUG_LOCKS_WARN_ON(val > preempt_count())) 3640 return; 3641 /* 3642 * Is the spinlock portion underflowing? 3643 */ 3644 if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) && 3645 !(preempt_count() & PREEMPT_MASK))) 3646 return; 3647 #endif 3648 3649 preempt_latency_stop(val); 3650 __preempt_count_sub(val); 3651 } 3652 EXPORT_SYMBOL(preempt_count_sub); 3653 NOKPROBE_SYMBOL(preempt_count_sub); 3654 3655 #else 3656 static inline void preempt_latency_start(int val) { } 3657 static inline void preempt_latency_stop(int val) { } 3658 #endif 3659 3660 static inline unsigned long get_preempt_disable_ip(struct task_struct *p) 3661 { 3662 #ifdef CONFIG_DEBUG_PREEMPT 3663 return p->preempt_disable_ip; 3664 #else 3665 return 0; 3666 #endif 3667 } 3668 3669 /* 3670 * Print scheduling while atomic bug: 3671 */ 3672 static noinline void __schedule_bug(struct task_struct *prev) 3673 { 3674 /* Save this before calling printk(), since that will clobber it */ 3675 unsigned long preempt_disable_ip = get_preempt_disable_ip(current); 3676 3677 if (oops_in_progress) 3678 return; 3679 3680 printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n", 3681 prev->comm, prev->pid, preempt_count()); 3682 3683 debug_show_held_locks(prev); 3684 print_modules(); 3685 if (irqs_disabled()) 3686 print_irqtrace_events(prev); 3687 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT) 3688 && in_atomic_preempt_off()) { 3689 pr_err("Preemption disabled at:"); 3690 print_ip_sym(preempt_disable_ip); 3691 pr_cont("\n"); 3692 } 3693 if (panic_on_warn) 3694 panic("scheduling while atomic\n"); 3695 3696 dump_stack(); 3697 add_taint(TAINT_WARN, LOCKDEP_STILL_OK); 3698 } 3699 3700 /* 3701 * Various schedule()-time debugging checks and statistics: 3702 */ 3703 static inline void schedule_debug(struct task_struct *prev) 3704 { 3705 #ifdef CONFIG_SCHED_STACK_END_CHECK 3706 if (task_stack_end_corrupted(prev)) 3707 panic("corrupted stack end detected inside scheduler\n"); 3708 #endif 3709 3710 if (unlikely(in_atomic_preempt_off())) { 3711 __schedule_bug(prev); 3712 preempt_count_set(PREEMPT_DISABLED); 3713 } 3714 rcu_sleep_check(); 3715 3716 profile_hit(SCHED_PROFILING, __builtin_return_address(0)); 3717 3718 schedstat_inc(this_rq()->sched_count); 3719 } 3720 3721 /* 3722 * Pick up the highest-prio task: 3723 */ 3724 static inline struct task_struct * 3725 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) 3726 { 3727 const struct sched_class *class; 3728 struct task_struct *p; 3729 3730 /* 3731 * Optimization: we know that if all tasks are in the fair class we can 3732 * call that function directly, but only if the @prev task wasn't of a 3733 * higher scheduling class, because otherwise those loose the 3734 * opportunity to pull in more work from other CPUs. 3735 */ 3736 if (likely((prev->sched_class == &idle_sched_class || 3737 prev->sched_class == &fair_sched_class) && 3738 rq->nr_running == rq->cfs.h_nr_running)) { 3739 3740 p = fair_sched_class.pick_next_task(rq, prev, rf); 3741 if (unlikely(p == RETRY_TASK)) 3742 goto again; 3743 3744 /* Assumes fair_sched_class->next == idle_sched_class */ 3745 if (unlikely(!p)) 3746 p = idle_sched_class.pick_next_task(rq, prev, rf); 3747 3748 return p; 3749 } 3750 3751 again: 3752 for_each_class(class) { 3753 p = class->pick_next_task(rq, prev, rf); 3754 if (p) { 3755 if (unlikely(p == RETRY_TASK)) 3756 goto again; 3757 return p; 3758 } 3759 } 3760 3761 /* The idle class should always have a runnable task: */ 3762 BUG(); 3763 } 3764 3765 /* 3766 * __schedule() is the main scheduler function. 3767 * 3768 * The main means of driving the scheduler and thus entering this function are: 3769 * 3770 * 1. Explicit blocking: mutex, semaphore, waitqueue, etc. 3771 * 3772 * 2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return 3773 * paths. For example, see arch/x86/entry_64.S. 3774 * 3775 * To drive preemption between tasks, the scheduler sets the flag in timer 3776 * interrupt handler scheduler_tick(). 3777 * 3778 * 3. Wakeups don't really cause entry into schedule(). They add a 3779 * task to the run-queue and that's it. 3780 * 3781 * Now, if the new task added to the run-queue preempts the current 3782 * task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets 3783 * called on the nearest possible occasion: 3784 * 3785 * - If the kernel is preemptible (CONFIG_PREEMPT=y): 3786 * 3787 * - in syscall or exception context, at the next outmost 3788 * preempt_enable(). (this might be as soon as the wake_up()'s 3789 * spin_unlock()!) 3790 * 3791 * - in IRQ context, return from interrupt-handler to 3792 * preemptible context 3793 * 3794 * - If the kernel is not preemptible (CONFIG_PREEMPT is not set) 3795 * then at the next: 3796 * 3797 * - cond_resched() call 3798 * - explicit schedule() call 3799 * - return from syscall or exception to user-space 3800 * - return from interrupt-handler to user-space 3801 * 3802 * WARNING: must be called with preemption disabled! 3803 */ 3804 static void __sched notrace __schedule(bool preempt) 3805 { 3806 struct task_struct *prev, *next; 3807 unsigned long *switch_count; 3808 struct rq_flags rf; 3809 struct rq *rq; 3810 int cpu; 3811 3812 cpu = smp_processor_id(); 3813 rq = cpu_rq(cpu); 3814 prev = rq->curr; 3815 3816 schedule_debug(prev); 3817 3818 if (sched_feat(HRTICK)) 3819 hrtick_clear(rq); 3820 3821 local_irq_disable(); 3822 rcu_note_context_switch(preempt); 3823 3824 /* 3825 * Make sure that signal_pending_state()->signal_pending() below 3826 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE) 3827 * done by the caller to avoid the race with signal_wake_up(). 3828 * 3829 * The membarrier system call requires a full memory barrier 3830 * after coming from user-space, before storing to rq->curr. 3831 */ 3832 rq_lock(rq, &rf); 3833 smp_mb__after_spinlock(); 3834 3835 /* Promote REQ to ACT */ 3836 rq->clock_update_flags <<= 1; 3837 update_rq_clock(rq); 3838 3839 switch_count = &prev->nivcsw; 3840 if (!preempt && prev->state) { 3841 if (signal_pending_state(prev->state, prev)) { 3842 prev->state = TASK_RUNNING; 3843 } else { 3844 deactivate_task(rq, prev, DEQUEUE_SLEEP | DEQUEUE_NOCLOCK); 3845 3846 if (prev->in_iowait) { 3847 atomic_inc(&rq->nr_iowait); 3848 delayacct_blkio_start(); 3849 } 3850 } 3851 switch_count = &prev->nvcsw; 3852 } 3853 3854 next = pick_next_task(rq, prev, &rf); 3855 clear_tsk_need_resched(prev); 3856 clear_preempt_need_resched(); 3857 3858 if (likely(prev != next)) { 3859 rq->nr_switches++; 3860 rq->curr = next; 3861 /* 3862 * The membarrier system call requires each architecture 3863 * to have a full memory barrier after updating 3864 * rq->curr, before returning to user-space. 3865 * 3866 * Here are the schemes providing that barrier on the 3867 * various architectures: 3868 * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC. 3869 * switch_mm() rely on membarrier_arch_switch_mm() on PowerPC. 3870 * - finish_lock_switch() for weakly-ordered 3871 * architectures where spin_unlock is a full barrier, 3872 * - switch_to() for arm64 (weakly-ordered, spin_unlock 3873 * is a RELEASE barrier), 3874 */ 3875 ++*switch_count; 3876 3877 trace_sched_switch(preempt, prev, next); 3878 3879 /* Also unlocks the rq: */ 3880 rq = context_switch(rq, prev, next, &rf); 3881 } else { 3882 rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP); 3883 rq_unlock_irq(rq, &rf); 3884 } 3885 3886 balance_callback(rq); 3887 } 3888 3889 void __noreturn do_task_dead(void) 3890 { 3891 /* Causes final put_task_struct in finish_task_switch(): */ 3892 set_special_state(TASK_DEAD); 3893 3894 /* Tell freezer to ignore us: */ 3895 current->flags |= PF_NOFREEZE; 3896 3897 __schedule(false); 3898 BUG(); 3899 3900 /* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */ 3901 for (;;) 3902 cpu_relax(); 3903 } 3904 3905 static inline void sched_submit_work(struct task_struct *tsk) 3906 { 3907 if (!tsk->state || tsk_is_pi_blocked(tsk)) 3908 return; 3909 3910 /* 3911 * If a worker went to sleep, notify and ask workqueue whether 3912 * it wants to wake up a task to maintain concurrency. 3913 * As this function is called inside the schedule() context, 3914 * we disable preemption to avoid it calling schedule() again 3915 * in the possible wakeup of a kworker. 3916 */ 3917 if (tsk->flags & PF_WQ_WORKER) { 3918 preempt_disable(); 3919 wq_worker_sleeping(tsk); 3920 preempt_enable_no_resched(); 3921 } 3922 3923 /* 3924 * If we are going to sleep and we have plugged IO queued, 3925 * make sure to submit it to avoid deadlocks. 3926 */ 3927 if (blk_needs_flush_plug(tsk)) 3928 blk_schedule_flush_plug(tsk); 3929 } 3930 3931 static void sched_update_worker(struct task_struct *tsk) 3932 { 3933 if (tsk->flags & PF_WQ_WORKER) 3934 wq_worker_running(tsk); 3935 } 3936 3937 asmlinkage __visible void __sched schedule(void) 3938 { 3939 struct task_struct *tsk = current; 3940 3941 sched_submit_work(tsk); 3942 do { 3943 preempt_disable(); 3944 __schedule(false); 3945 sched_preempt_enable_no_resched(); 3946 } while (need_resched()); 3947 sched_update_worker(tsk); 3948 } 3949 EXPORT_SYMBOL(schedule); 3950 3951 /* 3952 * synchronize_rcu_tasks() makes sure that no task is stuck in preempted 3953 * state (have scheduled out non-voluntarily) by making sure that all 3954 * tasks have either left the run queue or have gone into user space. 3955 * As idle tasks do not do either, they must not ever be preempted 3956 * (schedule out non-voluntarily). 3957 * 3958 * schedule_idle() is similar to schedule_preempt_disable() except that it 3959 * never enables preemption because it does not call sched_submit_work(). 3960 */ 3961 void __sched schedule_idle(void) 3962 { 3963 /* 3964 * As this skips calling sched_submit_work(), which the idle task does 3965 * regardless because that function is a nop when the task is in a 3966 * TASK_RUNNING state, make sure this isn't used someplace that the 3967 * current task can be in any other state. Note, idle is always in the 3968 * TASK_RUNNING state. 3969 */ 3970 WARN_ON_ONCE(current->state); 3971 do { 3972 __schedule(false); 3973 } while (need_resched()); 3974 } 3975 3976 #ifdef CONFIG_CONTEXT_TRACKING 3977 asmlinkage __visible void __sched schedule_user(void) 3978 { 3979 /* 3980 * If we come here after a random call to set_need_resched(), 3981 * or we have been woken up remotely but the IPI has not yet arrived, 3982 * we haven't yet exited the RCU idle mode. Do it here manually until 3983 * we find a better solution. 3984 * 3985 * NB: There are buggy callers of this function. Ideally we 3986 * should warn if prev_state != CONTEXT_USER, but that will trigger 3987 * too frequently to make sense yet. 3988 */ 3989 enum ctx_state prev_state = exception_enter(); 3990 schedule(); 3991 exception_exit(prev_state); 3992 } 3993 #endif 3994 3995 /** 3996 * schedule_preempt_disabled - called with preemption disabled 3997 * 3998 * Returns with preemption disabled. Note: preempt_count must be 1 3999 */ 4000 void __sched schedule_preempt_disabled(void) 4001 { 4002 sched_preempt_enable_no_resched(); 4003 schedule(); 4004 preempt_disable(); 4005 } 4006 4007 static void __sched notrace preempt_schedule_common(void) 4008 { 4009 do { 4010 /* 4011 * Because the function tracer can trace preempt_count_sub() 4012 * and it also uses preempt_enable/disable_notrace(), if 4013 * NEED_RESCHED is set, the preempt_enable_notrace() called 4014 * by the function tracer will call this function again and 4015 * cause infinite recursion. 4016 * 4017 * Preemption must be disabled here before the function 4018 * tracer can trace. Break up preempt_disable() into two 4019 * calls. One to disable preemption without fear of being 4020 * traced. The other to still record the preemption latency, 4021 * which can also be traced by the function tracer. 4022 */ 4023 preempt_disable_notrace(); 4024 preempt_latency_start(1); 4025 __schedule(true); 4026 preempt_latency_stop(1); 4027 preempt_enable_no_resched_notrace(); 4028 4029 /* 4030 * Check again in case we missed a preemption opportunity 4031 * between schedule and now. 4032 */ 4033 } while (need_resched()); 4034 } 4035 4036 #ifdef CONFIG_PREEMPT 4037 /* 4038 * this is the entry point to schedule() from in-kernel preemption 4039 * off of preempt_enable. Kernel preemptions off return from interrupt 4040 * occur there and call schedule directly. 4041 */ 4042 asmlinkage __visible void __sched notrace preempt_schedule(void) 4043 { 4044 /* 4045 * If there is a non-zero preempt_count or interrupts are disabled, 4046 * we do not want to preempt the current task. Just return.. 4047 */ 4048 if (likely(!preemptible())) 4049 return; 4050 4051 preempt_schedule_common(); 4052 } 4053 NOKPROBE_SYMBOL(preempt_schedule); 4054 EXPORT_SYMBOL(preempt_schedule); 4055 4056 /** 4057 * preempt_schedule_notrace - preempt_schedule called by tracing 4058 * 4059 * The tracing infrastructure uses preempt_enable_notrace to prevent 4060 * recursion and tracing preempt enabling caused by the tracing 4061 * infrastructure itself. But as tracing can happen in areas coming 4062 * from userspace or just about to enter userspace, a preempt enable 4063 * can occur before user_exit() is called. This will cause the scheduler 4064 * to be called when the system is still in usermode. 4065 * 4066 * To prevent this, the preempt_enable_notrace will use this function 4067 * instead of preempt_schedule() to exit user context if needed before 4068 * calling the scheduler. 4069 */ 4070 asmlinkage __visible void __sched notrace preempt_schedule_notrace(void) 4071 { 4072 enum ctx_state prev_ctx; 4073 4074 if (likely(!preemptible())) 4075 return; 4076 4077 do { 4078 /* 4079 * Because the function tracer can trace preempt_count_sub() 4080 * and it also uses preempt_enable/disable_notrace(), if 4081 * NEED_RESCHED is set, the preempt_enable_notrace() called 4082 * by the function tracer will call this function again and 4083 * cause infinite recursion. 4084 * 4085 * Preemption must be disabled here before the function 4086 * tracer can trace. Break up preempt_disable() into two 4087 * calls. One to disable preemption without fear of being 4088 * traced. The other to still record the preemption latency, 4089 * which can also be traced by the function tracer. 4090 */ 4091 preempt_disable_notrace(); 4092 preempt_latency_start(1); 4093 /* 4094 * Needs preempt disabled in case user_exit() is traced 4095 * and the tracer calls preempt_enable_notrace() causing 4096 * an infinite recursion. 4097 */ 4098 prev_ctx = exception_enter(); 4099 __schedule(true); 4100 exception_exit(prev_ctx); 4101 4102 preempt_latency_stop(1); 4103 preempt_enable_no_resched_notrace(); 4104 } while (need_resched()); 4105 } 4106 EXPORT_SYMBOL_GPL(preempt_schedule_notrace); 4107 4108 #endif /* CONFIG_PREEMPT */ 4109 4110 /* 4111 * this is the entry point to schedule() from kernel preemption 4112 * off of irq context. 4113 * Note, that this is called and return with irqs disabled. This will 4114 * protect us against recursive calling from irq. 4115 */ 4116 asmlinkage __visible void __sched preempt_schedule_irq(void) 4117 { 4118 enum ctx_state prev_state; 4119 4120 /* Catch callers which need to be fixed */ 4121 BUG_ON(preempt_count() || !irqs_disabled()); 4122 4123 prev_state = exception_enter(); 4124 4125 do { 4126 preempt_disable(); 4127 local_irq_enable(); 4128 __schedule(true); 4129 local_irq_disable(); 4130 sched_preempt_enable_no_resched(); 4131 } while (need_resched()); 4132 4133 exception_exit(prev_state); 4134 } 4135 4136 int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags, 4137 void *key) 4138 { 4139 return try_to_wake_up(curr->private, mode, wake_flags); 4140 } 4141 EXPORT_SYMBOL(default_wake_function); 4142 4143 #ifdef CONFIG_RT_MUTEXES 4144 4145 static inline int __rt_effective_prio(struct task_struct *pi_task, int prio) 4146 { 4147 if (pi_task) 4148 prio = min(prio, pi_task->prio); 4149 4150 return prio; 4151 } 4152 4153 static inline int rt_effective_prio(struct task_struct *p, int prio) 4154 { 4155 struct task_struct *pi_task = rt_mutex_get_top_task(p); 4156 4157 return __rt_effective_prio(pi_task, prio); 4158 } 4159 4160 /* 4161 * rt_mutex_setprio - set the current priority of a task 4162 * @p: task to boost 4163 * @pi_task: donor task 4164 * 4165 * This function changes the 'effective' priority of a task. It does 4166 * not touch ->normal_prio like __setscheduler(). 4167 * 4168 * Used by the rt_mutex code to implement priority inheritance 4169 * logic. Call site only calls if the priority of the task changed. 4170 */ 4171 void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task) 4172 { 4173 int prio, oldprio, queued, running, queue_flag = 4174 DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; 4175 const struct sched_class *prev_class; 4176 struct rq_flags rf; 4177 struct rq *rq; 4178 4179 /* XXX used to be waiter->prio, not waiter->task->prio */ 4180 prio = __rt_effective_prio(pi_task, p->normal_prio); 4181 4182 /* 4183 * If nothing changed; bail early. 4184 */ 4185 if (p->pi_top_task == pi_task && prio == p->prio && !dl_prio(prio)) 4186 return; 4187 4188 rq = __task_rq_lock(p, &rf); 4189 update_rq_clock(rq); 4190 /* 4191 * Set under pi_lock && rq->lock, such that the value can be used under 4192 * either lock. 4193 * 4194 * Note that there is loads of tricky to make this pointer cache work 4195 * right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to 4196 * ensure a task is de-boosted (pi_task is set to NULL) before the 4197 * task is allowed to run again (and can exit). This ensures the pointer 4198 * points to a blocked task -- which guaratees the task is present. 4199 */ 4200 p->pi_top_task = pi_task; 4201 4202 /* 4203 * For FIFO/RR we only need to set prio, if that matches we're done. 4204 */ 4205 if (prio == p->prio && !dl_prio(prio)) 4206 goto out_unlock; 4207 4208 /* 4209 * Idle task boosting is a nono in general. There is one 4210 * exception, when PREEMPT_RT and NOHZ is active: 4211 * 4212 * The idle task calls get_next_timer_interrupt() and holds 4213 * the timer wheel base->lock on the CPU and another CPU wants 4214 * to access the timer (probably to cancel it). We can safely 4215 * ignore the boosting request, as the idle CPU runs this code 4216 * with interrupts disabled and will complete the lock 4217 * protected section without being interrupted. So there is no 4218 * real need to boost. 4219 */ 4220 if (unlikely(p == rq->idle)) { 4221 WARN_ON(p != rq->curr); 4222 WARN_ON(p->pi_blocked_on); 4223 goto out_unlock; 4224 } 4225 4226 trace_sched_pi_setprio(p, pi_task); 4227 oldprio = p->prio; 4228 4229 if (oldprio == prio) 4230 queue_flag &= ~DEQUEUE_MOVE; 4231 4232 prev_class = p->sched_class; 4233 queued = task_on_rq_queued(p); 4234 running = task_current(rq, p); 4235 if (queued) 4236 dequeue_task(rq, p, queue_flag); 4237 if (running) 4238 put_prev_task(rq, p); 4239 4240 /* 4241 * Boosting condition are: 4242 * 1. -rt task is running and holds mutex A 4243 * --> -dl task blocks on mutex A 4244 * 4245 * 2. -dl task is running and holds mutex A 4246 * --> -dl task blocks on mutex A and could preempt the 4247 * running task 4248 */ 4249 if (dl_prio(prio)) { 4250 if (!dl_prio(p->normal_prio) || 4251 (pi_task && dl_entity_preempt(&pi_task->dl, &p->dl))) { 4252 p->dl.dl_boosted = 1; 4253 queue_flag |= ENQUEUE_REPLENISH; 4254 } else 4255 p->dl.dl_boosted = 0; 4256 p->sched_class = &dl_sched_class; 4257 } else if (rt_prio(prio)) { 4258 if (dl_prio(oldprio)) 4259 p->dl.dl_boosted = 0; 4260 if (oldprio < prio) 4261 queue_flag |= ENQUEUE_HEAD; 4262 p->sched_class = &rt_sched_class; 4263 } else { 4264 if (dl_prio(oldprio)) 4265 p->dl.dl_boosted = 0; 4266 if (rt_prio(oldprio)) 4267 p->rt.timeout = 0; 4268 p->sched_class = &fair_sched_class; 4269 } 4270 4271 p->prio = prio; 4272 4273 if (queued) 4274 enqueue_task(rq, p, queue_flag); 4275 if (running) 4276 set_curr_task(rq, p); 4277 4278 check_class_changed(rq, p, prev_class, oldprio); 4279 out_unlock: 4280 /* Avoid rq from going away on us: */ 4281 preempt_disable(); 4282 __task_rq_unlock(rq, &rf); 4283 4284 balance_callback(rq); 4285 preempt_enable(); 4286 } 4287 #else 4288 static inline int rt_effective_prio(struct task_struct *p, int prio) 4289 { 4290 return prio; 4291 } 4292 #endif 4293 4294 void set_user_nice(struct task_struct *p, long nice) 4295 { 4296 bool queued, running; 4297 int old_prio, delta; 4298 struct rq_flags rf; 4299 struct rq *rq; 4300 4301 if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE) 4302 return; 4303 /* 4304 * We have to be careful, if called from sys_setpriority(), 4305 * the task might be in the middle of scheduling on another CPU. 4306 */ 4307 rq = task_rq_lock(p, &rf); 4308 update_rq_clock(rq); 4309 4310 /* 4311 * The RT priorities are set via sched_setscheduler(), but we still 4312 * allow the 'normal' nice value to be set - but as expected 4313 * it wont have any effect on scheduling until the task is 4314 * SCHED_DEADLINE, SCHED_FIFO or SCHED_RR: 4315 */ 4316 if (task_has_dl_policy(p) || task_has_rt_policy(p)) { 4317 p->static_prio = NICE_TO_PRIO(nice); 4318 goto out_unlock; 4319 } 4320 queued = task_on_rq_queued(p); 4321 running = task_current(rq, p); 4322 if (queued) 4323 dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK); 4324 if (running) 4325 put_prev_task(rq, p); 4326 4327 p->static_prio = NICE_TO_PRIO(nice); 4328 set_load_weight(p, true); 4329 old_prio = p->prio; 4330 p->prio = effective_prio(p); 4331 delta = p->prio - old_prio; 4332 4333 if (queued) { 4334 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); 4335 /* 4336 * If the task increased its priority or is running and 4337 * lowered its priority, then reschedule its CPU: 4338 */ 4339 if (delta < 0 || (delta > 0 && task_running(rq, p))) 4340 resched_curr(rq); 4341 } 4342 if (running) 4343 set_curr_task(rq, p); 4344 out_unlock: 4345 task_rq_unlock(rq, p, &rf); 4346 } 4347 EXPORT_SYMBOL(set_user_nice); 4348 4349 /* 4350 * can_nice - check if a task can reduce its nice value 4351 * @p: task 4352 * @nice: nice value 4353 */ 4354 int can_nice(const struct task_struct *p, const int nice) 4355 { 4356 /* Convert nice value [19,-20] to rlimit style value [1,40]: */ 4357 int nice_rlim = nice_to_rlimit(nice); 4358 4359 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) || 4360 capable(CAP_SYS_NICE)); 4361 } 4362 4363 #ifdef __ARCH_WANT_SYS_NICE 4364 4365 /* 4366 * sys_nice - change the priority of the current process. 4367 * @increment: priority increment 4368 * 4369 * sys_setpriority is a more generic, but much slower function that 4370 * does similar things. 4371 */ 4372 SYSCALL_DEFINE1(nice, int, increment) 4373 { 4374 long nice, retval; 4375 4376 /* 4377 * Setpriority might change our priority at the same moment. 4378 * We don't have to worry. Conceptually one call occurs first 4379 * and we have a single winner. 4380 */ 4381 increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH); 4382 nice = task_nice(current) + increment; 4383 4384 nice = clamp_val(nice, MIN_NICE, MAX_NICE); 4385 if (increment < 0 && !can_nice(current, nice)) 4386 return -EPERM; 4387 4388 retval = security_task_setnice(current, nice); 4389 if (retval) 4390 return retval; 4391 4392 set_user_nice(current, nice); 4393 return 0; 4394 } 4395 4396 #endif 4397 4398 /** 4399 * task_prio - return the priority value of a given task. 4400 * @p: the task in question. 4401 * 4402 * Return: The priority value as seen by users in /proc. 4403 * RT tasks are offset by -200. Normal tasks are centered 4404 * around 0, value goes from -16 to +15. 4405 */ 4406 int task_prio(const struct task_struct *p) 4407 { 4408 return p->prio - MAX_RT_PRIO; 4409 } 4410 4411 /** 4412 * idle_cpu - is a given CPU idle currently? 4413 * @cpu: the processor in question. 4414 * 4415 * Return: 1 if the CPU is currently idle. 0 otherwise. 4416 */ 4417 int idle_cpu(int cpu) 4418 { 4419 struct rq *rq = cpu_rq(cpu); 4420 4421 if (rq->curr != rq->idle) 4422 return 0; 4423 4424 if (rq->nr_running) 4425 return 0; 4426 4427 #ifdef CONFIG_SMP 4428 if (!llist_empty(&rq->wake_list)) 4429 return 0; 4430 #endif 4431 4432 return 1; 4433 } 4434 4435 /** 4436 * available_idle_cpu - is a given CPU idle for enqueuing work. 4437 * @cpu: the CPU in question. 4438 * 4439 * Return: 1 if the CPU is currently idle. 0 otherwise. 4440 */ 4441 int available_idle_cpu(int cpu) 4442 { 4443 if (!idle_cpu(cpu)) 4444 return 0; 4445 4446 if (vcpu_is_preempted(cpu)) 4447 return 0; 4448 4449 return 1; 4450 } 4451 4452 /** 4453 * idle_task - return the idle task for a given CPU. 4454 * @cpu: the processor in question. 4455 * 4456 * Return: The idle task for the CPU @cpu. 4457 */ 4458 struct task_struct *idle_task(int cpu) 4459 { 4460 return cpu_rq(cpu)->idle; 4461 } 4462 4463 /** 4464 * find_process_by_pid - find a process with a matching PID value. 4465 * @pid: the pid in question. 4466 * 4467 * The task of @pid, if found. %NULL otherwise. 4468 */ 4469 static struct task_struct *find_process_by_pid(pid_t pid) 4470 { 4471 return pid ? find_task_by_vpid(pid) : current; 4472 } 4473 4474 /* 4475 * sched_setparam() passes in -1 for its policy, to let the functions 4476 * it calls know not to change it. 4477 */ 4478 #define SETPARAM_POLICY -1 4479 4480 static void __setscheduler_params(struct task_struct *p, 4481 const struct sched_attr *attr) 4482 { 4483 int policy = attr->sched_policy; 4484 4485 if (policy == SETPARAM_POLICY) 4486 policy = p->policy; 4487 4488 p->policy = policy; 4489 4490 if (dl_policy(policy)) 4491 __setparam_dl(p, attr); 4492 else if (fair_policy(policy)) 4493 p->static_prio = NICE_TO_PRIO(attr->sched_nice); 4494 4495 /* 4496 * __sched_setscheduler() ensures attr->sched_priority == 0 when 4497 * !rt_policy. Always setting this ensures that things like 4498 * getparam()/getattr() don't report silly values for !rt tasks. 4499 */ 4500 p->rt_priority = attr->sched_priority; 4501 p->normal_prio = normal_prio(p); 4502 set_load_weight(p, true); 4503 } 4504 4505 /* Actually do priority change: must hold pi & rq lock. */ 4506 static void __setscheduler(struct rq *rq, struct task_struct *p, 4507 const struct sched_attr *attr, bool keep_boost) 4508 { 4509 /* 4510 * If params can't change scheduling class changes aren't allowed 4511 * either. 4512 */ 4513 if (attr->sched_flags & SCHED_FLAG_KEEP_PARAMS) 4514 return; 4515 4516 __setscheduler_params(p, attr); 4517 4518 /* 4519 * Keep a potential priority boosting if called from 4520 * sched_setscheduler(). 4521 */ 4522 p->prio = normal_prio(p); 4523 if (keep_boost) 4524 p->prio = rt_effective_prio(p, p->prio); 4525 4526 if (dl_prio(p->prio)) 4527 p->sched_class = &dl_sched_class; 4528 else if (rt_prio(p->prio)) 4529 p->sched_class = &rt_sched_class; 4530 else 4531 p->sched_class = &fair_sched_class; 4532 } 4533 4534 /* 4535 * Check the target process has a UID that matches the current process's: 4536 */ 4537 static bool check_same_owner(struct task_struct *p) 4538 { 4539 const struct cred *cred = current_cred(), *pcred; 4540 bool match; 4541 4542 rcu_read_lock(); 4543 pcred = __task_cred(p); 4544 match = (uid_eq(cred->euid, pcred->euid) || 4545 uid_eq(cred->euid, pcred->uid)); 4546 rcu_read_unlock(); 4547 return match; 4548 } 4549 4550 static int __sched_setscheduler(struct task_struct *p, 4551 const struct sched_attr *attr, 4552 bool user, bool pi) 4553 { 4554 int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 : 4555 MAX_RT_PRIO - 1 - attr->sched_priority; 4556 int retval, oldprio, oldpolicy = -1, queued, running; 4557 int new_effective_prio, policy = attr->sched_policy; 4558 const struct sched_class *prev_class; 4559 struct rq_flags rf; 4560 int reset_on_fork; 4561 int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; 4562 struct rq *rq; 4563 4564 /* The pi code expects interrupts enabled */ 4565 BUG_ON(pi && in_interrupt()); 4566 recheck: 4567 /* Double check policy once rq lock held: */ 4568 if (policy < 0) { 4569 reset_on_fork = p->sched_reset_on_fork; 4570 policy = oldpolicy = p->policy; 4571 } else { 4572 reset_on_fork = !!(attr->sched_flags & SCHED_FLAG_RESET_ON_FORK); 4573 4574 if (!valid_policy(policy)) 4575 return -EINVAL; 4576 } 4577 4578 if (attr->sched_flags & ~(SCHED_FLAG_ALL | SCHED_FLAG_SUGOV)) 4579 return -EINVAL; 4580 4581 /* 4582 * Valid priorities for SCHED_FIFO and SCHED_RR are 4583 * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL, 4584 * SCHED_BATCH and SCHED_IDLE is 0. 4585 */ 4586 if ((p->mm && attr->sched_priority > MAX_USER_RT_PRIO-1) || 4587 (!p->mm && attr->sched_priority > MAX_RT_PRIO-1)) 4588 return -EINVAL; 4589 if ((dl_policy(policy) && !__checkparam_dl(attr)) || 4590 (rt_policy(policy) != (attr->sched_priority != 0))) 4591 return -EINVAL; 4592 4593 /* 4594 * Allow unprivileged RT tasks to decrease priority: 4595 */ 4596 if (user && !capable(CAP_SYS_NICE)) { 4597 if (fair_policy(policy)) { 4598 if (attr->sched_nice < task_nice(p) && 4599 !can_nice(p, attr->sched_nice)) 4600 return -EPERM; 4601 } 4602 4603 if (rt_policy(policy)) { 4604 unsigned long rlim_rtprio = 4605 task_rlimit(p, RLIMIT_RTPRIO); 4606 4607 /* Can't set/change the rt policy: */ 4608 if (policy != p->policy && !rlim_rtprio) 4609 return -EPERM; 4610 4611 /* Can't increase priority: */ 4612 if (attr->sched_priority > p->rt_priority && 4613 attr->sched_priority > rlim_rtprio) 4614 return -EPERM; 4615 } 4616 4617 /* 4618 * Can't set/change SCHED_DEADLINE policy at all for now 4619 * (safest behavior); in the future we would like to allow 4620 * unprivileged DL tasks to increase their relative deadline 4621 * or reduce their runtime (both ways reducing utilization) 4622 */ 4623 if (dl_policy(policy)) 4624 return -EPERM; 4625 4626 /* 4627 * Treat SCHED_IDLE as nice 20. Only allow a switch to 4628 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it. 4629 */ 4630 if (task_has_idle_policy(p) && !idle_policy(policy)) { 4631 if (!can_nice(p, task_nice(p))) 4632 return -EPERM; 4633 } 4634 4635 /* Can't change other user's priorities: */ 4636 if (!check_same_owner(p)) 4637 return -EPERM; 4638 4639 /* Normal users shall not reset the sched_reset_on_fork flag: */ 4640 if (p->sched_reset_on_fork && !reset_on_fork) 4641 return -EPERM; 4642 } 4643 4644 if (user) { 4645 if (attr->sched_flags & SCHED_FLAG_SUGOV) 4646 return -EINVAL; 4647 4648 retval = security_task_setscheduler(p); 4649 if (retval) 4650 return retval; 4651 } 4652 4653 /* Update task specific "requested" clamps */ 4654 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) { 4655 retval = uclamp_validate(p, attr); 4656 if (retval) 4657 return retval; 4658 } 4659 4660 /* 4661 * Make sure no PI-waiters arrive (or leave) while we are 4662 * changing the priority of the task: 4663 * 4664 * To be able to change p->policy safely, the appropriate 4665 * runqueue lock must be held. 4666 */ 4667 rq = task_rq_lock(p, &rf); 4668 update_rq_clock(rq); 4669 4670 /* 4671 * Changing the policy of the stop threads its a very bad idea: 4672 */ 4673 if (p == rq->stop) { 4674 task_rq_unlock(rq, p, &rf); 4675 return -EINVAL; 4676 } 4677 4678 /* 4679 * If not changing anything there's no need to proceed further, 4680 * but store a possible modification of reset_on_fork. 4681 */ 4682 if (unlikely(policy == p->policy)) { 4683 if (fair_policy(policy) && attr->sched_nice != task_nice(p)) 4684 goto change; 4685 if (rt_policy(policy) && attr->sched_priority != p->rt_priority) 4686 goto change; 4687 if (dl_policy(policy) && dl_param_changed(p, attr)) 4688 goto change; 4689 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) 4690 goto change; 4691 4692 p->sched_reset_on_fork = reset_on_fork; 4693 task_rq_unlock(rq, p, &rf); 4694 return 0; 4695 } 4696 change: 4697 4698 if (user) { 4699 #ifdef CONFIG_RT_GROUP_SCHED 4700 /* 4701 * Do not allow realtime tasks into groups that have no runtime 4702 * assigned. 4703 */ 4704 if (rt_bandwidth_enabled() && rt_policy(policy) && 4705 task_group(p)->rt_bandwidth.rt_runtime == 0 && 4706 !task_group_is_autogroup(task_group(p))) { 4707 task_rq_unlock(rq, p, &rf); 4708 return -EPERM; 4709 } 4710 #endif 4711 #ifdef CONFIG_SMP 4712 if (dl_bandwidth_enabled() && dl_policy(policy) && 4713 !(attr->sched_flags & SCHED_FLAG_SUGOV)) { 4714 cpumask_t *span = rq->rd->span; 4715 4716 /* 4717 * Don't allow tasks with an affinity mask smaller than 4718 * the entire root_domain to become SCHED_DEADLINE. We 4719 * will also fail if there's no bandwidth available. 4720 */ 4721 if (!cpumask_subset(span, p->cpus_ptr) || 4722 rq->rd->dl_bw.bw == 0) { 4723 task_rq_unlock(rq, p, &rf); 4724 return -EPERM; 4725 } 4726 } 4727 #endif 4728 } 4729 4730 /* Re-check policy now with rq lock held: */ 4731 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { 4732 policy = oldpolicy = -1; 4733 task_rq_unlock(rq, p, &rf); 4734 goto recheck; 4735 } 4736 4737 /* 4738 * If setscheduling to SCHED_DEADLINE (or changing the parameters 4739 * of a SCHED_DEADLINE task) we need to check if enough bandwidth 4740 * is available. 4741 */ 4742 if ((dl_policy(policy) || dl_task(p)) && sched_dl_overflow(p, policy, attr)) { 4743 task_rq_unlock(rq, p, &rf); 4744 return -EBUSY; 4745 } 4746 4747 p->sched_reset_on_fork = reset_on_fork; 4748 oldprio = p->prio; 4749 4750 if (pi) { 4751 /* 4752 * Take priority boosted tasks into account. If the new 4753 * effective priority is unchanged, we just store the new 4754 * normal parameters and do not touch the scheduler class and 4755 * the runqueue. This will be done when the task deboost 4756 * itself. 4757 */ 4758 new_effective_prio = rt_effective_prio(p, newprio); 4759 if (new_effective_prio == oldprio) 4760 queue_flags &= ~DEQUEUE_MOVE; 4761 } 4762 4763 queued = task_on_rq_queued(p); 4764 running = task_current(rq, p); 4765 if (queued) 4766 dequeue_task(rq, p, queue_flags); 4767 if (running) 4768 put_prev_task(rq, p); 4769 4770 prev_class = p->sched_class; 4771 4772 __setscheduler(rq, p, attr, pi); 4773 __setscheduler_uclamp(p, attr); 4774 4775 if (queued) { 4776 /* 4777 * We enqueue to tail when the priority of a task is 4778 * increased (user space view). 4779 */ 4780 if (oldprio < p->prio) 4781 queue_flags |= ENQUEUE_HEAD; 4782 4783 enqueue_task(rq, p, queue_flags); 4784 } 4785 if (running) 4786 set_curr_task(rq, p); 4787 4788 check_class_changed(rq, p, prev_class, oldprio); 4789 4790 /* Avoid rq from going away on us: */ 4791 preempt_disable(); 4792 task_rq_unlock(rq, p, &rf); 4793 4794 if (pi) 4795 rt_mutex_adjust_pi(p); 4796 4797 /* Run balance callbacks after we've adjusted the PI chain: */ 4798 balance_callback(rq); 4799 preempt_enable(); 4800 4801 return 0; 4802 } 4803 4804 static int _sched_setscheduler(struct task_struct *p, int policy, 4805 const struct sched_param *param, bool check) 4806 { 4807 struct sched_attr attr = { 4808 .sched_policy = policy, 4809 .sched_priority = param->sched_priority, 4810 .sched_nice = PRIO_TO_NICE(p->static_prio), 4811 }; 4812 4813 /* Fixup the legacy SCHED_RESET_ON_FORK hack. */ 4814 if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) { 4815 attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; 4816 policy &= ~SCHED_RESET_ON_FORK; 4817 attr.sched_policy = policy; 4818 } 4819 4820 return __sched_setscheduler(p, &attr, check, true); 4821 } 4822 /** 4823 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread. 4824 * @p: the task in question. 4825 * @policy: new policy. 4826 * @param: structure containing the new RT priority. 4827 * 4828 * Return: 0 on success. An error code otherwise. 4829 * 4830 * NOTE that the task may be already dead. 4831 */ 4832 int sched_setscheduler(struct task_struct *p, int policy, 4833 const struct sched_param *param) 4834 { 4835 return _sched_setscheduler(p, policy, param, true); 4836 } 4837 EXPORT_SYMBOL_GPL(sched_setscheduler); 4838 4839 int sched_setattr(struct task_struct *p, const struct sched_attr *attr) 4840 { 4841 return __sched_setscheduler(p, attr, true, true); 4842 } 4843 EXPORT_SYMBOL_GPL(sched_setattr); 4844 4845 int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr) 4846 { 4847 return __sched_setscheduler(p, attr, false, true); 4848 } 4849 4850 /** 4851 * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace. 4852 * @p: the task in question. 4853 * @policy: new policy. 4854 * @param: structure containing the new RT priority. 4855 * 4856 * Just like sched_setscheduler, only don't bother checking if the 4857 * current context has permission. For example, this is needed in 4858 * stop_machine(): we create temporary high priority worker threads, 4859 * but our caller might not have that capability. 4860 * 4861 * Return: 0 on success. An error code otherwise. 4862 */ 4863 int sched_setscheduler_nocheck(struct task_struct *p, int policy, 4864 const struct sched_param *param) 4865 { 4866 return _sched_setscheduler(p, policy, param, false); 4867 } 4868 EXPORT_SYMBOL_GPL(sched_setscheduler_nocheck); 4869 4870 static int 4871 do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) 4872 { 4873 struct sched_param lparam; 4874 struct task_struct *p; 4875 int retval; 4876 4877 if (!param || pid < 0) 4878 return -EINVAL; 4879 if (copy_from_user(&lparam, param, sizeof(struct sched_param))) 4880 return -EFAULT; 4881 4882 rcu_read_lock(); 4883 retval = -ESRCH; 4884 p = find_process_by_pid(pid); 4885 if (p != NULL) 4886 retval = sched_setscheduler(p, policy, &lparam); 4887 rcu_read_unlock(); 4888 4889 return retval; 4890 } 4891 4892 /* 4893 * Mimics kernel/events/core.c perf_copy_attr(). 4894 */ 4895 static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *attr) 4896 { 4897 u32 size; 4898 int ret; 4899 4900 if (!access_ok(uattr, SCHED_ATTR_SIZE_VER0)) 4901 return -EFAULT; 4902 4903 /* Zero the full structure, so that a short copy will be nice: */ 4904 memset(attr, 0, sizeof(*attr)); 4905 4906 ret = get_user(size, &uattr->size); 4907 if (ret) 4908 return ret; 4909 4910 /* Bail out on silly large: */ 4911 if (size > PAGE_SIZE) 4912 goto err_size; 4913 4914 /* ABI compatibility quirk: */ 4915 if (!size) 4916 size = SCHED_ATTR_SIZE_VER0; 4917 4918 if (size < SCHED_ATTR_SIZE_VER0) 4919 goto err_size; 4920 4921 /* 4922 * If we're handed a bigger struct than we know of, 4923 * ensure all the unknown bits are 0 - i.e. new 4924 * user-space does not rely on any kernel feature 4925 * extensions we dont know about yet. 4926 */ 4927 if (size > sizeof(*attr)) { 4928 unsigned char __user *addr; 4929 unsigned char __user *end; 4930 unsigned char val; 4931 4932 addr = (void __user *)uattr + sizeof(*attr); 4933 end = (void __user *)uattr + size; 4934 4935 for (; addr < end; addr++) { 4936 ret = get_user(val, addr); 4937 if (ret) 4938 return ret; 4939 if (val) 4940 goto err_size; 4941 } 4942 size = sizeof(*attr); 4943 } 4944 4945 ret = copy_from_user(attr, uattr, size); 4946 if (ret) 4947 return -EFAULT; 4948 4949 if ((attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) && 4950 size < SCHED_ATTR_SIZE_VER1) 4951 return -EINVAL; 4952 4953 /* 4954 * XXX: Do we want to be lenient like existing syscalls; or do we want 4955 * to be strict and return an error on out-of-bounds values? 4956 */ 4957 attr->sched_nice = clamp(attr->sched_nice, MIN_NICE, MAX_NICE); 4958 4959 return 0; 4960 4961 err_size: 4962 put_user(sizeof(*attr), &uattr->size); 4963 return -E2BIG; 4964 } 4965 4966 /** 4967 * sys_sched_setscheduler - set/change the scheduler policy and RT priority 4968 * @pid: the pid in question. 4969 * @policy: new policy. 4970 * @param: structure containing the new RT priority. 4971 * 4972 * Return: 0 on success. An error code otherwise. 4973 */ 4974 SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param) 4975 { 4976 if (policy < 0) 4977 return -EINVAL; 4978 4979 return do_sched_setscheduler(pid, policy, param); 4980 } 4981 4982 /** 4983 * sys_sched_setparam - set/change the RT priority of a thread 4984 * @pid: the pid in question. 4985 * @param: structure containing the new RT priority. 4986 * 4987 * Return: 0 on success. An error code otherwise. 4988 */ 4989 SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) 4990 { 4991 return do_sched_setscheduler(pid, SETPARAM_POLICY, param); 4992 } 4993 4994 /** 4995 * sys_sched_setattr - same as above, but with extended sched_attr 4996 * @pid: the pid in question. 4997 * @uattr: structure containing the extended parameters. 4998 * @flags: for future extension. 4999 */ 5000 SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr, 5001 unsigned int, flags) 5002 { 5003 struct sched_attr attr; 5004 struct task_struct *p; 5005 int retval; 5006 5007 if (!uattr || pid < 0 || flags) 5008 return -EINVAL; 5009 5010 retval = sched_copy_attr(uattr, &attr); 5011 if (retval) 5012 return retval; 5013 5014 if ((int)attr.sched_policy < 0) 5015 return -EINVAL; 5016 if (attr.sched_flags & SCHED_FLAG_KEEP_POLICY) 5017 attr.sched_policy = SETPARAM_POLICY; 5018 5019 rcu_read_lock(); 5020 retval = -ESRCH; 5021 p = find_process_by_pid(pid); 5022 if (likely(p)) 5023 get_task_struct(p); 5024 rcu_read_unlock(); 5025 5026 if (likely(p)) { 5027 retval = sched_setattr(p, &attr); 5028 put_task_struct(p); 5029 } 5030 5031 return retval; 5032 } 5033 5034 /** 5035 * sys_sched_getscheduler - get the policy (scheduling class) of a thread 5036 * @pid: the pid in question. 5037 * 5038 * Return: On success, the policy of the thread. Otherwise, a negative error 5039 * code. 5040 */ 5041 SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) 5042 { 5043 struct task_struct *p; 5044 int retval; 5045 5046 if (pid < 0) 5047 return -EINVAL; 5048 5049 retval = -ESRCH; 5050 rcu_read_lock(); 5051 p = find_process_by_pid(pid); 5052 if (p) { 5053 retval = security_task_getscheduler(p); 5054 if (!retval) 5055 retval = p->policy 5056 | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0); 5057 } 5058 rcu_read_unlock(); 5059 return retval; 5060 } 5061 5062 /** 5063 * sys_sched_getparam - get the RT priority of a thread 5064 * @pid: the pid in question. 5065 * @param: structure containing the RT priority. 5066 * 5067 * Return: On success, 0 and the RT priority is in @param. Otherwise, an error 5068 * code. 5069 */ 5070 SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) 5071 { 5072 struct sched_param lp = { .sched_priority = 0 }; 5073 struct task_struct *p; 5074 int retval; 5075 5076 if (!param || pid < 0) 5077 return -EINVAL; 5078 5079 rcu_read_lock(); 5080 p = find_process_by_pid(pid); 5081 retval = -ESRCH; 5082 if (!p) 5083 goto out_unlock; 5084 5085 retval = security_task_getscheduler(p); 5086 if (retval) 5087 goto out_unlock; 5088 5089 if (task_has_rt_policy(p)) 5090 lp.sched_priority = p->rt_priority; 5091 rcu_read_unlock(); 5092 5093 /* 5094 * This one might sleep, we cannot do it with a spinlock held ... 5095 */ 5096 retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0; 5097 5098 return retval; 5099 5100 out_unlock: 5101 rcu_read_unlock(); 5102 return retval; 5103 } 5104 5105 static int sched_read_attr(struct sched_attr __user *uattr, 5106 struct sched_attr *attr, 5107 unsigned int usize) 5108 { 5109 int ret; 5110 5111 if (!access_ok(uattr, usize)) 5112 return -EFAULT; 5113 5114 /* 5115 * If we're handed a smaller struct than we know of, 5116 * ensure all the unknown bits are 0 - i.e. old 5117 * user-space does not get uncomplete information. 5118 */ 5119 if (usize < sizeof(*attr)) { 5120 unsigned char *addr; 5121 unsigned char *end; 5122 5123 addr = (void *)attr + usize; 5124 end = (void *)attr + sizeof(*attr); 5125 5126 for (; addr < end; addr++) { 5127 if (*addr) 5128 return -EFBIG; 5129 } 5130 5131 attr->size = usize; 5132 } 5133 5134 ret = copy_to_user(uattr, attr, attr->size); 5135 if (ret) 5136 return -EFAULT; 5137 5138 return 0; 5139 } 5140 5141 /** 5142 * sys_sched_getattr - similar to sched_getparam, but with sched_attr 5143 * @pid: the pid in question. 5144 * @uattr: structure containing the extended parameters. 5145 * @size: sizeof(attr) for fwd/bwd comp. 5146 * @flags: for future extension. 5147 */ 5148 SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, 5149 unsigned int, size, unsigned int, flags) 5150 { 5151 struct sched_attr attr = { 5152 .size = sizeof(struct sched_attr), 5153 }; 5154 struct task_struct *p; 5155 int retval; 5156 5157 if (!uattr || pid < 0 || size > PAGE_SIZE || 5158 size < SCHED_ATTR_SIZE_VER0 || flags) 5159 return -EINVAL; 5160 5161 rcu_read_lock(); 5162 p = find_process_by_pid(pid); 5163 retval = -ESRCH; 5164 if (!p) 5165 goto out_unlock; 5166 5167 retval = security_task_getscheduler(p); 5168 if (retval) 5169 goto out_unlock; 5170 5171 attr.sched_policy = p->policy; 5172 if (p->sched_reset_on_fork) 5173 attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; 5174 if (task_has_dl_policy(p)) 5175 __getparam_dl(p, &attr); 5176 else if (task_has_rt_policy(p)) 5177 attr.sched_priority = p->rt_priority; 5178 else 5179 attr.sched_nice = task_nice(p); 5180 5181 #ifdef CONFIG_UCLAMP_TASK 5182 attr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value; 5183 attr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value; 5184 #endif 5185 5186 rcu_read_unlock(); 5187 5188 retval = sched_read_attr(uattr, &attr, size); 5189 return retval; 5190 5191 out_unlock: 5192 rcu_read_unlock(); 5193 return retval; 5194 } 5195 5196 long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) 5197 { 5198 cpumask_var_t cpus_allowed, new_mask; 5199 struct task_struct *p; 5200 int retval; 5201 5202 rcu_read_lock(); 5203 5204 p = find_process_by_pid(pid); 5205 if (!p) { 5206 rcu_read_unlock(); 5207 return -ESRCH; 5208 } 5209 5210 /* Prevent p going away */ 5211 get_task_struct(p); 5212 rcu_read_unlock(); 5213 5214 if (p->flags & PF_NO_SETAFFINITY) { 5215 retval = -EINVAL; 5216 goto out_put_task; 5217 } 5218 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) { 5219 retval = -ENOMEM; 5220 goto out_put_task; 5221 } 5222 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) { 5223 retval = -ENOMEM; 5224 goto out_free_cpus_allowed; 5225 } 5226 retval = -EPERM; 5227 if (!check_same_owner(p)) { 5228 rcu_read_lock(); 5229 if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) { 5230 rcu_read_unlock(); 5231 goto out_free_new_mask; 5232 } 5233 rcu_read_unlock(); 5234 } 5235 5236 retval = security_task_setscheduler(p); 5237 if (retval) 5238 goto out_free_new_mask; 5239 5240 5241 cpuset_cpus_allowed(p, cpus_allowed); 5242 cpumask_and(new_mask, in_mask, cpus_allowed); 5243 5244 /* 5245 * Since bandwidth control happens on root_domain basis, 5246 * if admission test is enabled, we only admit -deadline 5247 * tasks allowed to run on all the CPUs in the task's 5248 * root_domain. 5249 */ 5250 #ifdef CONFIG_SMP 5251 if (task_has_dl_policy(p) && dl_bandwidth_enabled()) { 5252 rcu_read_lock(); 5253 if (!cpumask_subset(task_rq(p)->rd->span, new_mask)) { 5254 retval = -EBUSY; 5255 rcu_read_unlock(); 5256 goto out_free_new_mask; 5257 } 5258 rcu_read_unlock(); 5259 } 5260 #endif 5261 again: 5262 retval = __set_cpus_allowed_ptr(p, new_mask, true); 5263 5264 if (!retval) { 5265 cpuset_cpus_allowed(p, cpus_allowed); 5266 if (!cpumask_subset(new_mask, cpus_allowed)) { 5267 /* 5268 * We must have raced with a concurrent cpuset 5269 * update. Just reset the cpus_allowed to the 5270 * cpuset's cpus_allowed 5271 */ 5272 cpumask_copy(new_mask, cpus_allowed); 5273 goto again; 5274 } 5275 } 5276 out_free_new_mask: 5277 free_cpumask_var(new_mask); 5278 out_free_cpus_allowed: 5279 free_cpumask_var(cpus_allowed); 5280 out_put_task: 5281 put_task_struct(p); 5282 return retval; 5283 } 5284 5285 static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, 5286 struct cpumask *new_mask) 5287 { 5288 if (len < cpumask_size()) 5289 cpumask_clear(new_mask); 5290 else if (len > cpumask_size()) 5291 len = cpumask_size(); 5292 5293 return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0; 5294 } 5295 5296 /** 5297 * sys_sched_setaffinity - set the CPU affinity of a process 5298 * @pid: pid of the process 5299 * @len: length in bytes of the bitmask pointed to by user_mask_ptr 5300 * @user_mask_ptr: user-space pointer to the new CPU mask 5301 * 5302 * Return: 0 on success. An error code otherwise. 5303 */ 5304 SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len, 5305 unsigned long __user *, user_mask_ptr) 5306 { 5307 cpumask_var_t new_mask; 5308 int retval; 5309 5310 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) 5311 return -ENOMEM; 5312 5313 retval = get_user_cpu_mask(user_mask_ptr, len, new_mask); 5314 if (retval == 0) 5315 retval = sched_setaffinity(pid, new_mask); 5316 free_cpumask_var(new_mask); 5317 return retval; 5318 } 5319 5320 long sched_getaffinity(pid_t pid, struct cpumask *mask) 5321 { 5322 struct task_struct *p; 5323 unsigned long flags; 5324 int retval; 5325 5326 rcu_read_lock(); 5327 5328 retval = -ESRCH; 5329 p = find_process_by_pid(pid); 5330 if (!p) 5331 goto out_unlock; 5332 5333 retval = security_task_getscheduler(p); 5334 if (retval) 5335 goto out_unlock; 5336 5337 raw_spin_lock_irqsave(&p->pi_lock, flags); 5338 cpumask_and(mask, &p->cpus_mask, cpu_active_mask); 5339 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 5340 5341 out_unlock: 5342 rcu_read_unlock(); 5343 5344 return retval; 5345 } 5346 5347 /** 5348 * sys_sched_getaffinity - get the CPU affinity of a process 5349 * @pid: pid of the process 5350 * @len: length in bytes of the bitmask pointed to by user_mask_ptr 5351 * @user_mask_ptr: user-space pointer to hold the current CPU mask 5352 * 5353 * Return: size of CPU mask copied to user_mask_ptr on success. An 5354 * error code otherwise. 5355 */ 5356 SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, 5357 unsigned long __user *, user_mask_ptr) 5358 { 5359 int ret; 5360 cpumask_var_t mask; 5361 5362 if ((len * BITS_PER_BYTE) < nr_cpu_ids) 5363 return -EINVAL; 5364 if (len & (sizeof(unsigned long)-1)) 5365 return -EINVAL; 5366 5367 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) 5368 return -ENOMEM; 5369 5370 ret = sched_getaffinity(pid, mask); 5371 if (ret == 0) { 5372 unsigned int retlen = min(len, cpumask_size()); 5373 5374 if (copy_to_user(user_mask_ptr, mask, retlen)) 5375 ret = -EFAULT; 5376 else 5377 ret = retlen; 5378 } 5379 free_cpumask_var(mask); 5380 5381 return ret; 5382 } 5383 5384 /** 5385 * sys_sched_yield - yield the current processor to other threads. 5386 * 5387 * This function yields the current CPU to other tasks. If there are no 5388 * other threads running on this CPU then this function will return. 5389 * 5390 * Return: 0. 5391 */ 5392 static void do_sched_yield(void) 5393 { 5394 struct rq_flags rf; 5395 struct rq *rq; 5396 5397 rq = this_rq_lock_irq(&rf); 5398 5399 schedstat_inc(rq->yld_count); 5400 current->sched_class->yield_task(rq); 5401 5402 /* 5403 * Since we are going to call schedule() anyway, there's 5404 * no need to preempt or enable interrupts: 5405 */ 5406 preempt_disable(); 5407 rq_unlock(rq, &rf); 5408 sched_preempt_enable_no_resched(); 5409 5410 schedule(); 5411 } 5412 5413 SYSCALL_DEFINE0(sched_yield) 5414 { 5415 do_sched_yield(); 5416 return 0; 5417 } 5418 5419 #ifndef CONFIG_PREEMPT 5420 int __sched _cond_resched(void) 5421 { 5422 if (should_resched(0)) { 5423 preempt_schedule_common(); 5424 return 1; 5425 } 5426 rcu_all_qs(); 5427 return 0; 5428 } 5429 EXPORT_SYMBOL(_cond_resched); 5430 #endif 5431 5432 /* 5433 * __cond_resched_lock() - if a reschedule is pending, drop the given lock, 5434 * call schedule, and on return reacquire the lock. 5435 * 5436 * This works OK both with and without CONFIG_PREEMPT. We do strange low-level 5437 * operations here to prevent schedule() from being called twice (once via 5438 * spin_unlock(), once by hand). 5439 */ 5440 int __cond_resched_lock(spinlock_t *lock) 5441 { 5442 int resched = should_resched(PREEMPT_LOCK_OFFSET); 5443 int ret = 0; 5444 5445 lockdep_assert_held(lock); 5446 5447 if (spin_needbreak(lock) || resched) { 5448 spin_unlock(lock); 5449 if (resched) 5450 preempt_schedule_common(); 5451 else 5452 cpu_relax(); 5453 ret = 1; 5454 spin_lock(lock); 5455 } 5456 return ret; 5457 } 5458 EXPORT_SYMBOL(__cond_resched_lock); 5459 5460 /** 5461 * yield - yield the current processor to other threads. 5462 * 5463 * Do not ever use this function, there's a 99% chance you're doing it wrong. 5464 * 5465 * The scheduler is at all times free to pick the calling task as the most 5466 * eligible task to run, if removing the yield() call from your code breaks 5467 * it, its already broken. 5468 * 5469 * Typical broken usage is: 5470 * 5471 * while (!event) 5472 * yield(); 5473 * 5474 * where one assumes that yield() will let 'the other' process run that will 5475 * make event true. If the current task is a SCHED_FIFO task that will never 5476 * happen. Never use yield() as a progress guarantee!! 5477 * 5478 * If you want to use yield() to wait for something, use wait_event(). 5479 * If you want to use yield() to be 'nice' for others, use cond_resched(). 5480 * If you still want to use yield(), do not! 5481 */ 5482 void __sched yield(void) 5483 { 5484 set_current_state(TASK_RUNNING); 5485 do_sched_yield(); 5486 } 5487 EXPORT_SYMBOL(yield); 5488 5489 /** 5490 * yield_to - yield the current processor to another thread in 5491 * your thread group, or accelerate that thread toward the 5492 * processor it's on. 5493 * @p: target task 5494 * @preempt: whether task preemption is allowed or not 5495 * 5496 * It's the caller's job to ensure that the target task struct 5497 * can't go away on us before we can do any checks. 5498 * 5499 * Return: 5500 * true (>0) if we indeed boosted the target task. 5501 * false (0) if we failed to boost the target. 5502 * -ESRCH if there's no task to yield to. 5503 */ 5504 int __sched yield_to(struct task_struct *p, bool preempt) 5505 { 5506 struct task_struct *curr = current; 5507 struct rq *rq, *p_rq; 5508 unsigned long flags; 5509 int yielded = 0; 5510 5511 local_irq_save(flags); 5512 rq = this_rq(); 5513 5514 again: 5515 p_rq = task_rq(p); 5516 /* 5517 * If we're the only runnable task on the rq and target rq also 5518 * has only one task, there's absolutely no point in yielding. 5519 */ 5520 if (rq->nr_running == 1 && p_rq->nr_running == 1) { 5521 yielded = -ESRCH; 5522 goto out_irq; 5523 } 5524 5525 double_rq_lock(rq, p_rq); 5526 if (task_rq(p) != p_rq) { 5527 double_rq_unlock(rq, p_rq); 5528 goto again; 5529 } 5530 5531 if (!curr->sched_class->yield_to_task) 5532 goto out_unlock; 5533 5534 if (curr->sched_class != p->sched_class) 5535 goto out_unlock; 5536 5537 if (task_running(p_rq, p) || p->state) 5538 goto out_unlock; 5539 5540 yielded = curr->sched_class->yield_to_task(rq, p, preempt); 5541 if (yielded) { 5542 schedstat_inc(rq->yld_count); 5543 /* 5544 * Make p's CPU reschedule; pick_next_entity takes care of 5545 * fairness. 5546 */ 5547 if (preempt && rq != p_rq) 5548 resched_curr(p_rq); 5549 } 5550 5551 out_unlock: 5552 double_rq_unlock(rq, p_rq); 5553 out_irq: 5554 local_irq_restore(flags); 5555 5556 if (yielded > 0) 5557 schedule(); 5558 5559 return yielded; 5560 } 5561 EXPORT_SYMBOL_GPL(yield_to); 5562 5563 int io_schedule_prepare(void) 5564 { 5565 int old_iowait = current->in_iowait; 5566 5567 current->in_iowait = 1; 5568 blk_schedule_flush_plug(current); 5569 5570 return old_iowait; 5571 } 5572 5573 void io_schedule_finish(int token) 5574 { 5575 current->in_iowait = token; 5576 } 5577 5578 /* 5579 * This task is about to go to sleep on IO. Increment rq->nr_iowait so 5580 * that process accounting knows that this is a task in IO wait state. 5581 */ 5582 long __sched io_schedule_timeout(long timeout) 5583 { 5584 int token; 5585 long ret; 5586 5587 token = io_schedule_prepare(); 5588 ret = schedule_timeout(timeout); 5589 io_schedule_finish(token); 5590 5591 return ret; 5592 } 5593 EXPORT_SYMBOL(io_schedule_timeout); 5594 5595 void __sched io_schedule(void) 5596 { 5597 int token; 5598 5599 token = io_schedule_prepare(); 5600 schedule(); 5601 io_schedule_finish(token); 5602 } 5603 EXPORT_SYMBOL(io_schedule); 5604 5605 /** 5606 * sys_sched_get_priority_max - return maximum RT priority. 5607 * @policy: scheduling class. 5608 * 5609 * Return: On success, this syscall returns the maximum 5610 * rt_priority that can be used by a given scheduling class. 5611 * On failure, a negative error code is returned. 5612 */ 5613 SYSCALL_DEFINE1(sched_get_priority_max, int, policy) 5614 { 5615 int ret = -EINVAL; 5616 5617 switch (policy) { 5618 case SCHED_FIFO: 5619 case SCHED_RR: 5620 ret = MAX_USER_RT_PRIO-1; 5621 break; 5622 case SCHED_DEADLINE: 5623 case SCHED_NORMAL: 5624 case SCHED_BATCH: 5625 case SCHED_IDLE: 5626 ret = 0; 5627 break; 5628 } 5629 return ret; 5630 } 5631 5632 /** 5633 * sys_sched_get_priority_min - return minimum RT priority. 5634 * @policy: scheduling class. 5635 * 5636 * Return: On success, this syscall returns the minimum 5637 * rt_priority that can be used by a given scheduling class. 5638 * On failure, a negative error code is returned. 5639 */ 5640 SYSCALL_DEFINE1(sched_get_priority_min, int, policy) 5641 { 5642 int ret = -EINVAL; 5643 5644 switch (policy) { 5645 case SCHED_FIFO: 5646 case SCHED_RR: 5647 ret = 1; 5648 break; 5649 case SCHED_DEADLINE: 5650 case SCHED_NORMAL: 5651 case SCHED_BATCH: 5652 case SCHED_IDLE: 5653 ret = 0; 5654 } 5655 return ret; 5656 } 5657 5658 static int sched_rr_get_interval(pid_t pid, struct timespec64 *t) 5659 { 5660 struct task_struct *p; 5661 unsigned int time_slice; 5662 struct rq_flags rf; 5663 struct rq *rq; 5664 int retval; 5665 5666 if (pid < 0) 5667 return -EINVAL; 5668 5669 retval = -ESRCH; 5670 rcu_read_lock(); 5671 p = find_process_by_pid(pid); 5672 if (!p) 5673 goto out_unlock; 5674 5675 retval = security_task_getscheduler(p); 5676 if (retval) 5677 goto out_unlock; 5678 5679 rq = task_rq_lock(p, &rf); 5680 time_slice = 0; 5681 if (p->sched_class->get_rr_interval) 5682 time_slice = p->sched_class->get_rr_interval(rq, p); 5683 task_rq_unlock(rq, p, &rf); 5684 5685 rcu_read_unlock(); 5686 jiffies_to_timespec64(time_slice, t); 5687 return 0; 5688 5689 out_unlock: 5690 rcu_read_unlock(); 5691 return retval; 5692 } 5693 5694 /** 5695 * sys_sched_rr_get_interval - return the default timeslice of a process. 5696 * @pid: pid of the process. 5697 * @interval: userspace pointer to the timeslice value. 5698 * 5699 * this syscall writes the default timeslice value of a given process 5700 * into the user-space timespec buffer. A value of '0' means infinity. 5701 * 5702 * Return: On success, 0 and the timeslice is in @interval. Otherwise, 5703 * an error code. 5704 */ 5705 SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, 5706 struct __kernel_timespec __user *, interval) 5707 { 5708 struct timespec64 t; 5709 int retval = sched_rr_get_interval(pid, &t); 5710 5711 if (retval == 0) 5712 retval = put_timespec64(&t, interval); 5713 5714 return retval; 5715 } 5716 5717 #ifdef CONFIG_COMPAT_32BIT_TIME 5718 SYSCALL_DEFINE2(sched_rr_get_interval_time32, pid_t, pid, 5719 struct old_timespec32 __user *, interval) 5720 { 5721 struct timespec64 t; 5722 int retval = sched_rr_get_interval(pid, &t); 5723 5724 if (retval == 0) 5725 retval = put_old_timespec32(&t, interval); 5726 return retval; 5727 } 5728 #endif 5729 5730 void sched_show_task(struct task_struct *p) 5731 { 5732 unsigned long free = 0; 5733 int ppid; 5734 5735 if (!try_get_task_stack(p)) 5736 return; 5737 5738 printk(KERN_INFO "%-15.15s %c", p->comm, task_state_to_char(p)); 5739 5740 if (p->state == TASK_RUNNING) 5741 printk(KERN_CONT " running task "); 5742 #ifdef CONFIG_DEBUG_STACK_USAGE 5743 free = stack_not_used(p); 5744 #endif 5745 ppid = 0; 5746 rcu_read_lock(); 5747 if (pid_alive(p)) 5748 ppid = task_pid_nr(rcu_dereference(p->real_parent)); 5749 rcu_read_unlock(); 5750 printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free, 5751 task_pid_nr(p), ppid, 5752 (unsigned long)task_thread_info(p)->flags); 5753 5754 print_worker_info(KERN_INFO, p); 5755 show_stack(p, NULL); 5756 put_task_stack(p); 5757 } 5758 EXPORT_SYMBOL_GPL(sched_show_task); 5759 5760 static inline bool 5761 state_filter_match(unsigned long state_filter, struct task_struct *p) 5762 { 5763 /* no filter, everything matches */ 5764 if (!state_filter) 5765 return true; 5766 5767 /* filter, but doesn't match */ 5768 if (!(p->state & state_filter)) 5769 return false; 5770 5771 /* 5772 * When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows 5773 * TASK_KILLABLE). 5774 */ 5775 if (state_filter == TASK_UNINTERRUPTIBLE && p->state == TASK_IDLE) 5776 return false; 5777 5778 return true; 5779 } 5780 5781 5782 void show_state_filter(unsigned long state_filter) 5783 { 5784 struct task_struct *g, *p; 5785 5786 #if BITS_PER_LONG == 32 5787 printk(KERN_INFO 5788 " task PC stack pid father\n"); 5789 #else 5790 printk(KERN_INFO 5791 " task PC stack pid father\n"); 5792 #endif 5793 rcu_read_lock(); 5794 for_each_process_thread(g, p) { 5795 /* 5796 * reset the NMI-timeout, listing all files on a slow 5797 * console might take a lot of time: 5798 * Also, reset softlockup watchdogs on all CPUs, because 5799 * another CPU might be blocked waiting for us to process 5800 * an IPI. 5801 */ 5802 touch_nmi_watchdog(); 5803 touch_all_softlockup_watchdogs(); 5804 if (state_filter_match(state_filter, p)) 5805 sched_show_task(p); 5806 } 5807 5808 #ifdef CONFIG_SCHED_DEBUG 5809 if (!state_filter) 5810 sysrq_sched_debug_show(); 5811 #endif 5812 rcu_read_unlock(); 5813 /* 5814 * Only show locks if all tasks are dumped: 5815 */ 5816 if (!state_filter) 5817 debug_show_all_locks(); 5818 } 5819 5820 /** 5821 * init_idle - set up an idle thread for a given CPU 5822 * @idle: task in question 5823 * @cpu: CPU the idle task belongs to 5824 * 5825 * NOTE: this function does not set the idle thread's NEED_RESCHED 5826 * flag, to make booting more robust. 5827 */ 5828 void init_idle(struct task_struct *idle, int cpu) 5829 { 5830 struct rq *rq = cpu_rq(cpu); 5831 unsigned long flags; 5832 5833 raw_spin_lock_irqsave(&idle->pi_lock, flags); 5834 raw_spin_lock(&rq->lock); 5835 5836 __sched_fork(0, idle); 5837 idle->state = TASK_RUNNING; 5838 idle->se.exec_start = sched_clock(); 5839 idle->flags |= PF_IDLE; 5840 5841 kasan_unpoison_task_stack(idle); 5842 5843 #ifdef CONFIG_SMP 5844 /* 5845 * Its possible that init_idle() gets called multiple times on a task, 5846 * in that case do_set_cpus_allowed() will not do the right thing. 5847 * 5848 * And since this is boot we can forgo the serialization. 5849 */ 5850 set_cpus_allowed_common(idle, cpumask_of(cpu)); 5851 #endif 5852 /* 5853 * We're having a chicken and egg problem, even though we are 5854 * holding rq->lock, the CPU isn't yet set to this CPU so the 5855 * lockdep check in task_group() will fail. 5856 * 5857 * Similar case to sched_fork(). / Alternatively we could 5858 * use task_rq_lock() here and obtain the other rq->lock. 5859 * 5860 * Silence PROVE_RCU 5861 */ 5862 rcu_read_lock(); 5863 __set_task_cpu(idle, cpu); 5864 rcu_read_unlock(); 5865 5866 rq->curr = rq->idle = idle; 5867 idle->on_rq = TASK_ON_RQ_QUEUED; 5868 #ifdef CONFIG_SMP 5869 idle->on_cpu = 1; 5870 #endif 5871 raw_spin_unlock(&rq->lock); 5872 raw_spin_unlock_irqrestore(&idle->pi_lock, flags); 5873 5874 /* Set the preempt count _outside_ the spinlocks! */ 5875 init_idle_preempt_count(idle, cpu); 5876 5877 /* 5878 * The idle tasks have their own, simple scheduling class: 5879 */ 5880 idle->sched_class = &idle_sched_class; 5881 ftrace_graph_init_idle_task(idle, cpu); 5882 vtime_init_idle(idle, cpu); 5883 #ifdef CONFIG_SMP 5884 sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu); 5885 #endif 5886 } 5887 5888 #ifdef CONFIG_SMP 5889 5890 int cpuset_cpumask_can_shrink(const struct cpumask *cur, 5891 const struct cpumask *trial) 5892 { 5893 int ret = 1; 5894 5895 if (!cpumask_weight(cur)) 5896 return ret; 5897 5898 ret = dl_cpuset_cpumask_can_shrink(cur, trial); 5899 5900 return ret; 5901 } 5902 5903 int task_can_attach(struct task_struct *p, 5904 const struct cpumask *cs_cpus_allowed) 5905 { 5906 int ret = 0; 5907 5908 /* 5909 * Kthreads which disallow setaffinity shouldn't be moved 5910 * to a new cpuset; we don't want to change their CPU 5911 * affinity and isolating such threads by their set of 5912 * allowed nodes is unnecessary. Thus, cpusets are not 5913 * applicable for such threads. This prevents checking for 5914 * success of set_cpus_allowed_ptr() on all attached tasks 5915 * before cpus_mask may be changed. 5916 */ 5917 if (p->flags & PF_NO_SETAFFINITY) { 5918 ret = -EINVAL; 5919 goto out; 5920 } 5921 5922 if (dl_task(p) && !cpumask_intersects(task_rq(p)->rd->span, 5923 cs_cpus_allowed)) 5924 ret = dl_task_can_attach(p, cs_cpus_allowed); 5925 5926 out: 5927 return ret; 5928 } 5929 5930 bool sched_smp_initialized __read_mostly; 5931 5932 #ifdef CONFIG_NUMA_BALANCING 5933 /* Migrate current task p to target_cpu */ 5934 int migrate_task_to(struct task_struct *p, int target_cpu) 5935 { 5936 struct migration_arg arg = { p, target_cpu }; 5937 int curr_cpu = task_cpu(p); 5938 5939 if (curr_cpu == target_cpu) 5940 return 0; 5941 5942 if (!cpumask_test_cpu(target_cpu, p->cpus_ptr)) 5943 return -EINVAL; 5944 5945 /* TODO: This is not properly updating schedstats */ 5946 5947 trace_sched_move_numa(p, curr_cpu, target_cpu); 5948 return stop_one_cpu(curr_cpu, migration_cpu_stop, &arg); 5949 } 5950 5951 /* 5952 * Requeue a task on a given node and accurately track the number of NUMA 5953 * tasks on the runqueues 5954 */ 5955 void sched_setnuma(struct task_struct *p, int nid) 5956 { 5957 bool queued, running; 5958 struct rq_flags rf; 5959 struct rq *rq; 5960 5961 rq = task_rq_lock(p, &rf); 5962 queued = task_on_rq_queued(p); 5963 running = task_current(rq, p); 5964 5965 if (queued) 5966 dequeue_task(rq, p, DEQUEUE_SAVE); 5967 if (running) 5968 put_prev_task(rq, p); 5969 5970 p->numa_preferred_nid = nid; 5971 5972 if (queued) 5973 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); 5974 if (running) 5975 set_curr_task(rq, p); 5976 task_rq_unlock(rq, p, &rf); 5977 } 5978 #endif /* CONFIG_NUMA_BALANCING */ 5979 5980 #ifdef CONFIG_HOTPLUG_CPU 5981 /* 5982 * Ensure that the idle task is using init_mm right before its CPU goes 5983 * offline. 5984 */ 5985 void idle_task_exit(void) 5986 { 5987 struct mm_struct *mm = current->active_mm; 5988 5989 BUG_ON(cpu_online(smp_processor_id())); 5990 5991 if (mm != &init_mm) { 5992 switch_mm(mm, &init_mm, current); 5993 current->active_mm = &init_mm; 5994 finish_arch_post_lock_switch(); 5995 } 5996 mmdrop(mm); 5997 } 5998 5999 /* 6000 * Since this CPU is going 'away' for a while, fold any nr_active delta 6001 * we might have. Assumes we're called after migrate_tasks() so that the 6002 * nr_active count is stable. We need to take the teardown thread which 6003 * is calling this into account, so we hand in adjust = 1 to the load 6004 * calculation. 6005 * 6006 * Also see the comment "Global load-average calculations". 6007 */ 6008 static void calc_load_migrate(struct rq *rq) 6009 { 6010 long delta = calc_load_fold_active(rq, 1); 6011 if (delta) 6012 atomic_long_add(delta, &calc_load_tasks); 6013 } 6014 6015 static void put_prev_task_fake(struct rq *rq, struct task_struct *prev) 6016 { 6017 } 6018 6019 static const struct sched_class fake_sched_class = { 6020 .put_prev_task = put_prev_task_fake, 6021 }; 6022 6023 static struct task_struct fake_task = { 6024 /* 6025 * Avoid pull_{rt,dl}_task() 6026 */ 6027 .prio = MAX_PRIO + 1, 6028 .sched_class = &fake_sched_class, 6029 }; 6030 6031 /* 6032 * Migrate all tasks from the rq, sleeping tasks will be migrated by 6033 * try_to_wake_up()->select_task_rq(). 6034 * 6035 * Called with rq->lock held even though we'er in stop_machine() and 6036 * there's no concurrency possible, we hold the required locks anyway 6037 * because of lock validation efforts. 6038 */ 6039 static void migrate_tasks(struct rq *dead_rq, struct rq_flags *rf) 6040 { 6041 struct rq *rq = dead_rq; 6042 struct task_struct *next, *stop = rq->stop; 6043 struct rq_flags orf = *rf; 6044 int dest_cpu; 6045 6046 /* 6047 * Fudge the rq selection such that the below task selection loop 6048 * doesn't get stuck on the currently eligible stop task. 6049 * 6050 * We're currently inside stop_machine() and the rq is either stuck 6051 * in the stop_machine_cpu_stop() loop, or we're executing this code, 6052 * either way we should never end up calling schedule() until we're 6053 * done here. 6054 */ 6055 rq->stop = NULL; 6056 6057 /* 6058 * put_prev_task() and pick_next_task() sched 6059 * class method both need to have an up-to-date 6060 * value of rq->clock[_task] 6061 */ 6062 update_rq_clock(rq); 6063 6064 for (;;) { 6065 /* 6066 * There's this thread running, bail when that's the only 6067 * remaining thread: 6068 */ 6069 if (rq->nr_running == 1) 6070 break; 6071 6072 /* 6073 * pick_next_task() assumes pinned rq->lock: 6074 */ 6075 next = pick_next_task(rq, &fake_task, rf); 6076 BUG_ON(!next); 6077 put_prev_task(rq, next); 6078 6079 /* 6080 * Rules for changing task_struct::cpus_mask are holding 6081 * both pi_lock and rq->lock, such that holding either 6082 * stabilizes the mask. 6083 * 6084 * Drop rq->lock is not quite as disastrous as it usually is 6085 * because !cpu_active at this point, which means load-balance 6086 * will not interfere. Also, stop-machine. 6087 */ 6088 rq_unlock(rq, rf); 6089 raw_spin_lock(&next->pi_lock); 6090 rq_relock(rq, rf); 6091 6092 /* 6093 * Since we're inside stop-machine, _nothing_ should have 6094 * changed the task, WARN if weird stuff happened, because in 6095 * that case the above rq->lock drop is a fail too. 6096 */ 6097 if (WARN_ON(task_rq(next) != rq || !task_on_rq_queued(next))) { 6098 raw_spin_unlock(&next->pi_lock); 6099 continue; 6100 } 6101 6102 /* Find suitable destination for @next, with force if needed. */ 6103 dest_cpu = select_fallback_rq(dead_rq->cpu, next); 6104 rq = __migrate_task(rq, rf, next, dest_cpu); 6105 if (rq != dead_rq) { 6106 rq_unlock(rq, rf); 6107 rq = dead_rq; 6108 *rf = orf; 6109 rq_relock(rq, rf); 6110 } 6111 raw_spin_unlock(&next->pi_lock); 6112 } 6113 6114 rq->stop = stop; 6115 } 6116 #endif /* CONFIG_HOTPLUG_CPU */ 6117 6118 void set_rq_online(struct rq *rq) 6119 { 6120 if (!rq->online) { 6121 const struct sched_class *class; 6122 6123 cpumask_set_cpu(rq->cpu, rq->rd->online); 6124 rq->online = 1; 6125 6126 for_each_class(class) { 6127 if (class->rq_online) 6128 class->rq_online(rq); 6129 } 6130 } 6131 } 6132 6133 void set_rq_offline(struct rq *rq) 6134 { 6135 if (rq->online) { 6136 const struct sched_class *class; 6137 6138 for_each_class(class) { 6139 if (class->rq_offline) 6140 class->rq_offline(rq); 6141 } 6142 6143 cpumask_clear_cpu(rq->cpu, rq->rd->online); 6144 rq->online = 0; 6145 } 6146 } 6147 6148 /* 6149 * used to mark begin/end of suspend/resume: 6150 */ 6151 static int num_cpus_frozen; 6152 6153 /* 6154 * Update cpusets according to cpu_active mask. If cpusets are 6155 * disabled, cpuset_update_active_cpus() becomes a simple wrapper 6156 * around partition_sched_domains(). 6157 * 6158 * If we come here as part of a suspend/resume, don't touch cpusets because we 6159 * want to restore it back to its original state upon resume anyway. 6160 */ 6161 static void cpuset_cpu_active(void) 6162 { 6163 if (cpuhp_tasks_frozen) { 6164 /* 6165 * num_cpus_frozen tracks how many CPUs are involved in suspend 6166 * resume sequence. As long as this is not the last online 6167 * operation in the resume sequence, just build a single sched 6168 * domain, ignoring cpusets. 6169 */ 6170 partition_sched_domains(1, NULL, NULL); 6171 if (--num_cpus_frozen) 6172 return; 6173 /* 6174 * This is the last CPU online operation. So fall through and 6175 * restore the original sched domains by considering the 6176 * cpuset configurations. 6177 */ 6178 cpuset_force_rebuild(); 6179 } 6180 cpuset_update_active_cpus(); 6181 } 6182 6183 static int cpuset_cpu_inactive(unsigned int cpu) 6184 { 6185 if (!cpuhp_tasks_frozen) { 6186 if (dl_cpu_busy(cpu)) 6187 return -EBUSY; 6188 cpuset_update_active_cpus(); 6189 } else { 6190 num_cpus_frozen++; 6191 partition_sched_domains(1, NULL, NULL); 6192 } 6193 return 0; 6194 } 6195 6196 int sched_cpu_activate(unsigned int cpu) 6197 { 6198 struct rq *rq = cpu_rq(cpu); 6199 struct rq_flags rf; 6200 6201 #ifdef CONFIG_SCHED_SMT 6202 /* 6203 * When going up, increment the number of cores with SMT present. 6204 */ 6205 if (cpumask_weight(cpu_smt_mask(cpu)) == 2) 6206 static_branch_inc_cpuslocked(&sched_smt_present); 6207 #endif 6208 set_cpu_active(cpu, true); 6209 6210 if (sched_smp_initialized) { 6211 sched_domains_numa_masks_set(cpu); 6212 cpuset_cpu_active(); 6213 } 6214 6215 /* 6216 * Put the rq online, if not already. This happens: 6217 * 6218 * 1) In the early boot process, because we build the real domains 6219 * after all CPUs have been brought up. 6220 * 6221 * 2) At runtime, if cpuset_cpu_active() fails to rebuild the 6222 * domains. 6223 */ 6224 rq_lock_irqsave(rq, &rf); 6225 if (rq->rd) { 6226 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 6227 set_rq_online(rq); 6228 } 6229 rq_unlock_irqrestore(rq, &rf); 6230 6231 update_max_interval(); 6232 6233 return 0; 6234 } 6235 6236 int sched_cpu_deactivate(unsigned int cpu) 6237 { 6238 int ret; 6239 6240 set_cpu_active(cpu, false); 6241 /* 6242 * We've cleared cpu_active_mask, wait for all preempt-disabled and RCU 6243 * users of this state to go away such that all new such users will 6244 * observe it. 6245 * 6246 * Do sync before park smpboot threads to take care the rcu boost case. 6247 */ 6248 synchronize_rcu(); 6249 6250 #ifdef CONFIG_SCHED_SMT 6251 /* 6252 * When going down, decrement the number of cores with SMT present. 6253 */ 6254 if (cpumask_weight(cpu_smt_mask(cpu)) == 2) 6255 static_branch_dec_cpuslocked(&sched_smt_present); 6256 #endif 6257 6258 if (!sched_smp_initialized) 6259 return 0; 6260 6261 ret = cpuset_cpu_inactive(cpu); 6262 if (ret) { 6263 set_cpu_active(cpu, true); 6264 return ret; 6265 } 6266 sched_domains_numa_masks_clear(cpu); 6267 return 0; 6268 } 6269 6270 static void sched_rq_cpu_starting(unsigned int cpu) 6271 { 6272 struct rq *rq = cpu_rq(cpu); 6273 6274 rq->calc_load_update = calc_load_update; 6275 update_max_interval(); 6276 } 6277 6278 int sched_cpu_starting(unsigned int cpu) 6279 { 6280 sched_rq_cpu_starting(cpu); 6281 sched_tick_start(cpu); 6282 return 0; 6283 } 6284 6285 #ifdef CONFIG_HOTPLUG_CPU 6286 int sched_cpu_dying(unsigned int cpu) 6287 { 6288 struct rq *rq = cpu_rq(cpu); 6289 struct rq_flags rf; 6290 6291 /* Handle pending wakeups and then migrate everything off */ 6292 sched_ttwu_pending(); 6293 sched_tick_stop(cpu); 6294 6295 rq_lock_irqsave(rq, &rf); 6296 if (rq->rd) { 6297 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 6298 set_rq_offline(rq); 6299 } 6300 migrate_tasks(rq, &rf); 6301 BUG_ON(rq->nr_running != 1); 6302 rq_unlock_irqrestore(rq, &rf); 6303 6304 calc_load_migrate(rq); 6305 update_max_interval(); 6306 nohz_balance_exit_idle(rq); 6307 hrtick_clear(rq); 6308 return 0; 6309 } 6310 #endif 6311 6312 void __init sched_init_smp(void) 6313 { 6314 sched_init_numa(); 6315 6316 /* 6317 * There's no userspace yet to cause hotplug operations; hence all the 6318 * CPU masks are stable and all blatant races in the below code cannot 6319 * happen. 6320 */ 6321 mutex_lock(&sched_domains_mutex); 6322 sched_init_domains(cpu_active_mask); 6323 mutex_unlock(&sched_domains_mutex); 6324 6325 /* Move init over to a non-isolated CPU */ 6326 if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_FLAG_DOMAIN)) < 0) 6327 BUG(); 6328 sched_init_granularity(); 6329 6330 init_sched_rt_class(); 6331 init_sched_dl_class(); 6332 6333 sched_smp_initialized = true; 6334 } 6335 6336 static int __init migration_init(void) 6337 { 6338 sched_cpu_starting(smp_processor_id()); 6339 return 0; 6340 } 6341 early_initcall(migration_init); 6342 6343 #else 6344 void __init sched_init_smp(void) 6345 { 6346 sched_init_granularity(); 6347 } 6348 #endif /* CONFIG_SMP */ 6349 6350 int in_sched_functions(unsigned long addr) 6351 { 6352 return in_lock_functions(addr) || 6353 (addr >= (unsigned long)__sched_text_start 6354 && addr < (unsigned long)__sched_text_end); 6355 } 6356 6357 #ifdef CONFIG_CGROUP_SCHED 6358 /* 6359 * Default task group. 6360 * Every task in system belongs to this group at bootup. 6361 */ 6362 struct task_group root_task_group; 6363 LIST_HEAD(task_groups); 6364 6365 /* Cacheline aligned slab cache for task_group */ 6366 static struct kmem_cache *task_group_cache __read_mostly; 6367 #endif 6368 6369 DECLARE_PER_CPU(cpumask_var_t, load_balance_mask); 6370 DECLARE_PER_CPU(cpumask_var_t, select_idle_mask); 6371 6372 void __init sched_init(void) 6373 { 6374 unsigned long alloc_size = 0, ptr; 6375 int i; 6376 6377 wait_bit_init(); 6378 6379 #ifdef CONFIG_FAIR_GROUP_SCHED 6380 alloc_size += 2 * nr_cpu_ids * sizeof(void **); 6381 #endif 6382 #ifdef CONFIG_RT_GROUP_SCHED 6383 alloc_size += 2 * nr_cpu_ids * sizeof(void **); 6384 #endif 6385 if (alloc_size) { 6386 ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT); 6387 6388 #ifdef CONFIG_FAIR_GROUP_SCHED 6389 root_task_group.se = (struct sched_entity **)ptr; 6390 ptr += nr_cpu_ids * sizeof(void **); 6391 6392 root_task_group.cfs_rq = (struct cfs_rq **)ptr; 6393 ptr += nr_cpu_ids * sizeof(void **); 6394 6395 #endif /* CONFIG_FAIR_GROUP_SCHED */ 6396 #ifdef CONFIG_RT_GROUP_SCHED 6397 root_task_group.rt_se = (struct sched_rt_entity **)ptr; 6398 ptr += nr_cpu_ids * sizeof(void **); 6399 6400 root_task_group.rt_rq = (struct rt_rq **)ptr; 6401 ptr += nr_cpu_ids * sizeof(void **); 6402 6403 #endif /* CONFIG_RT_GROUP_SCHED */ 6404 } 6405 #ifdef CONFIG_CPUMASK_OFFSTACK 6406 for_each_possible_cpu(i) { 6407 per_cpu(load_balance_mask, i) = (cpumask_var_t)kzalloc_node( 6408 cpumask_size(), GFP_KERNEL, cpu_to_node(i)); 6409 per_cpu(select_idle_mask, i) = (cpumask_var_t)kzalloc_node( 6410 cpumask_size(), GFP_KERNEL, cpu_to_node(i)); 6411 } 6412 #endif /* CONFIG_CPUMASK_OFFSTACK */ 6413 6414 init_rt_bandwidth(&def_rt_bandwidth, global_rt_period(), global_rt_runtime()); 6415 init_dl_bandwidth(&def_dl_bandwidth, global_rt_period(), global_rt_runtime()); 6416 6417 #ifdef CONFIG_SMP 6418 init_defrootdomain(); 6419 #endif 6420 6421 #ifdef CONFIG_RT_GROUP_SCHED 6422 init_rt_bandwidth(&root_task_group.rt_bandwidth, 6423 global_rt_period(), global_rt_runtime()); 6424 #endif /* CONFIG_RT_GROUP_SCHED */ 6425 6426 #ifdef CONFIG_CGROUP_SCHED 6427 task_group_cache = KMEM_CACHE(task_group, 0); 6428 6429 list_add(&root_task_group.list, &task_groups); 6430 INIT_LIST_HEAD(&root_task_group.children); 6431 INIT_LIST_HEAD(&root_task_group.siblings); 6432 autogroup_init(&init_task); 6433 #endif /* CONFIG_CGROUP_SCHED */ 6434 6435 for_each_possible_cpu(i) { 6436 struct rq *rq; 6437 6438 rq = cpu_rq(i); 6439 raw_spin_lock_init(&rq->lock); 6440 rq->nr_running = 0; 6441 rq->calc_load_active = 0; 6442 rq->calc_load_update = jiffies + LOAD_FREQ; 6443 init_cfs_rq(&rq->cfs); 6444 init_rt_rq(&rq->rt); 6445 init_dl_rq(&rq->dl); 6446 #ifdef CONFIG_FAIR_GROUP_SCHED 6447 root_task_group.shares = ROOT_TASK_GROUP_LOAD; 6448 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list); 6449 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list; 6450 /* 6451 * How much CPU bandwidth does root_task_group get? 6452 * 6453 * In case of task-groups formed thr' the cgroup filesystem, it 6454 * gets 100% of the CPU resources in the system. This overall 6455 * system CPU resource is divided among the tasks of 6456 * root_task_group and its child task-groups in a fair manner, 6457 * based on each entity's (task or task-group's) weight 6458 * (se->load.weight). 6459 * 6460 * In other words, if root_task_group has 10 tasks of weight 6461 * 1024) and two child groups A0 and A1 (of weight 1024 each), 6462 * then A0's share of the CPU resource is: 6463 * 6464 * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33% 6465 * 6466 * We achieve this by letting root_task_group's tasks sit 6467 * directly in rq->cfs (i.e root_task_group->se[] = NULL). 6468 */ 6469 init_cfs_bandwidth(&root_task_group.cfs_bandwidth); 6470 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL); 6471 #endif /* CONFIG_FAIR_GROUP_SCHED */ 6472 6473 rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime; 6474 #ifdef CONFIG_RT_GROUP_SCHED 6475 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL); 6476 #endif 6477 #ifdef CONFIG_SMP 6478 rq->sd = NULL; 6479 rq->rd = NULL; 6480 rq->cpu_capacity = rq->cpu_capacity_orig = SCHED_CAPACITY_SCALE; 6481 rq->balance_callback = NULL; 6482 rq->active_balance = 0; 6483 rq->next_balance = jiffies; 6484 rq->push_cpu = 0; 6485 rq->cpu = i; 6486 rq->online = 0; 6487 rq->idle_stamp = 0; 6488 rq->avg_idle = 2*sysctl_sched_migration_cost; 6489 rq->max_idle_balance_cost = sysctl_sched_migration_cost; 6490 6491 INIT_LIST_HEAD(&rq->cfs_tasks); 6492 6493 rq_attach_root(rq, &def_root_domain); 6494 #ifdef CONFIG_NO_HZ_COMMON 6495 rq->last_load_update_tick = jiffies; 6496 rq->last_blocked_load_update_tick = jiffies; 6497 atomic_set(&rq->nohz_flags, 0); 6498 #endif 6499 #endif /* CONFIG_SMP */ 6500 hrtick_rq_init(rq); 6501 atomic_set(&rq->nr_iowait, 0); 6502 } 6503 6504 set_load_weight(&init_task, false); 6505 6506 /* 6507 * The boot idle thread does lazy MMU switching as well: 6508 */ 6509 mmgrab(&init_mm); 6510 enter_lazy_tlb(&init_mm, current); 6511 6512 /* 6513 * Make us the idle thread. Technically, schedule() should not be 6514 * called from this thread, however somewhere below it might be, 6515 * but because we are the idle thread, we just pick up running again 6516 * when this runqueue becomes "idle". 6517 */ 6518 init_idle(current, smp_processor_id()); 6519 6520 calc_load_update = jiffies + LOAD_FREQ; 6521 6522 #ifdef CONFIG_SMP 6523 idle_thread_set_boot_cpu(); 6524 #endif 6525 init_sched_fair_class(); 6526 6527 init_schedstats(); 6528 6529 psi_init(); 6530 6531 init_uclamp(); 6532 6533 scheduler_running = 1; 6534 } 6535 6536 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP 6537 static inline int preempt_count_equals(int preempt_offset) 6538 { 6539 int nested = preempt_count() + rcu_preempt_depth(); 6540 6541 return (nested == preempt_offset); 6542 } 6543 6544 void __might_sleep(const char *file, int line, int preempt_offset) 6545 { 6546 /* 6547 * Blocking primitives will set (and therefore destroy) current->state, 6548 * since we will exit with TASK_RUNNING make sure we enter with it, 6549 * otherwise we will destroy state. 6550 */ 6551 WARN_ONCE(current->state != TASK_RUNNING && current->task_state_change, 6552 "do not call blocking ops when !TASK_RUNNING; " 6553 "state=%lx set at [<%p>] %pS\n", 6554 current->state, 6555 (void *)current->task_state_change, 6556 (void *)current->task_state_change); 6557 6558 ___might_sleep(file, line, preempt_offset); 6559 } 6560 EXPORT_SYMBOL(__might_sleep); 6561 6562 void ___might_sleep(const char *file, int line, int preempt_offset) 6563 { 6564 /* Ratelimiting timestamp: */ 6565 static unsigned long prev_jiffy; 6566 6567 unsigned long preempt_disable_ip; 6568 6569 /* WARN_ON_ONCE() by default, no rate limit required: */ 6570 rcu_sleep_check(); 6571 6572 if ((preempt_count_equals(preempt_offset) && !irqs_disabled() && 6573 !is_idle_task(current)) || 6574 system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING || 6575 oops_in_progress) 6576 return; 6577 6578 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) 6579 return; 6580 prev_jiffy = jiffies; 6581 6582 /* Save this before calling printk(), since that will clobber it: */ 6583 preempt_disable_ip = get_preempt_disable_ip(current); 6584 6585 printk(KERN_ERR 6586 "BUG: sleeping function called from invalid context at %s:%d\n", 6587 file, line); 6588 printk(KERN_ERR 6589 "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n", 6590 in_atomic(), irqs_disabled(), 6591 current->pid, current->comm); 6592 6593 if (task_stack_end_corrupted(current)) 6594 printk(KERN_EMERG "Thread overran stack, or stack corrupted\n"); 6595 6596 debug_show_held_locks(current); 6597 if (irqs_disabled()) 6598 print_irqtrace_events(current); 6599 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT) 6600 && !preempt_count_equals(preempt_offset)) { 6601 pr_err("Preemption disabled at:"); 6602 print_ip_sym(preempt_disable_ip); 6603 pr_cont("\n"); 6604 } 6605 dump_stack(); 6606 add_taint(TAINT_WARN, LOCKDEP_STILL_OK); 6607 } 6608 EXPORT_SYMBOL(___might_sleep); 6609 6610 void __cant_sleep(const char *file, int line, int preempt_offset) 6611 { 6612 static unsigned long prev_jiffy; 6613 6614 if (irqs_disabled()) 6615 return; 6616 6617 if (!IS_ENABLED(CONFIG_PREEMPT_COUNT)) 6618 return; 6619 6620 if (preempt_count() > preempt_offset) 6621 return; 6622 6623 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) 6624 return; 6625 prev_jiffy = jiffies; 6626 6627 printk(KERN_ERR "BUG: assuming atomic context at %s:%d\n", file, line); 6628 printk(KERN_ERR "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n", 6629 in_atomic(), irqs_disabled(), 6630 current->pid, current->comm); 6631 6632 debug_show_held_locks(current); 6633 dump_stack(); 6634 add_taint(TAINT_WARN, LOCKDEP_STILL_OK); 6635 } 6636 EXPORT_SYMBOL_GPL(__cant_sleep); 6637 #endif 6638 6639 #ifdef CONFIG_MAGIC_SYSRQ 6640 void normalize_rt_tasks(void) 6641 { 6642 struct task_struct *g, *p; 6643 struct sched_attr attr = { 6644 .sched_policy = SCHED_NORMAL, 6645 }; 6646 6647 read_lock(&tasklist_lock); 6648 for_each_process_thread(g, p) { 6649 /* 6650 * Only normalize user tasks: 6651 */ 6652 if (p->flags & PF_KTHREAD) 6653 continue; 6654 6655 p->se.exec_start = 0; 6656 schedstat_set(p->se.statistics.wait_start, 0); 6657 schedstat_set(p->se.statistics.sleep_start, 0); 6658 schedstat_set(p->se.statistics.block_start, 0); 6659 6660 if (!dl_task(p) && !rt_task(p)) { 6661 /* 6662 * Renice negative nice level userspace 6663 * tasks back to 0: 6664 */ 6665 if (task_nice(p) < 0) 6666 set_user_nice(p, 0); 6667 continue; 6668 } 6669 6670 __sched_setscheduler(p, &attr, false, false); 6671 } 6672 read_unlock(&tasklist_lock); 6673 } 6674 6675 #endif /* CONFIG_MAGIC_SYSRQ */ 6676 6677 #if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) 6678 /* 6679 * These functions are only useful for the IA64 MCA handling, or kdb. 6680 * 6681 * They can only be called when the whole system has been 6682 * stopped - every CPU needs to be quiescent, and no scheduling 6683 * activity can take place. Using them for anything else would 6684 * be a serious bug, and as a result, they aren't even visible 6685 * under any other configuration. 6686 */ 6687 6688 /** 6689 * curr_task - return the current task for a given CPU. 6690 * @cpu: the processor in question. 6691 * 6692 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! 6693 * 6694 * Return: The current task for @cpu. 6695 */ 6696 struct task_struct *curr_task(int cpu) 6697 { 6698 return cpu_curr(cpu); 6699 } 6700 6701 #endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */ 6702 6703 #ifdef CONFIG_IA64 6704 /** 6705 * set_curr_task - set the current task for a given CPU. 6706 * @cpu: the processor in question. 6707 * @p: the task pointer to set. 6708 * 6709 * Description: This function must only be used when non-maskable interrupts 6710 * are serviced on a separate stack. It allows the architecture to switch the 6711 * notion of the current task on a CPU in a non-blocking manner. This function 6712 * must be called with all CPU's synchronized, and interrupts disabled, the 6713 * and caller must save the original value of the current task (see 6714 * curr_task() above) and restore that value before reenabling interrupts and 6715 * re-starting the system. 6716 * 6717 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! 6718 */ 6719 void ia64_set_curr_task(int cpu, struct task_struct *p) 6720 { 6721 cpu_curr(cpu) = p; 6722 } 6723 6724 #endif 6725 6726 #ifdef CONFIG_CGROUP_SCHED 6727 /* task_group_lock serializes the addition/removal of task groups */ 6728 static DEFINE_SPINLOCK(task_group_lock); 6729 6730 static void sched_free_group(struct task_group *tg) 6731 { 6732 free_fair_sched_group(tg); 6733 free_rt_sched_group(tg); 6734 autogroup_free(tg); 6735 kmem_cache_free(task_group_cache, tg); 6736 } 6737 6738 /* allocate runqueue etc for a new task group */ 6739 struct task_group *sched_create_group(struct task_group *parent) 6740 { 6741 struct task_group *tg; 6742 6743 tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO); 6744 if (!tg) 6745 return ERR_PTR(-ENOMEM); 6746 6747 if (!alloc_fair_sched_group(tg, parent)) 6748 goto err; 6749 6750 if (!alloc_rt_sched_group(tg, parent)) 6751 goto err; 6752 6753 return tg; 6754 6755 err: 6756 sched_free_group(tg); 6757 return ERR_PTR(-ENOMEM); 6758 } 6759 6760 void sched_online_group(struct task_group *tg, struct task_group *parent) 6761 { 6762 unsigned long flags; 6763 6764 spin_lock_irqsave(&task_group_lock, flags); 6765 list_add_rcu(&tg->list, &task_groups); 6766 6767 /* Root should already exist: */ 6768 WARN_ON(!parent); 6769 6770 tg->parent = parent; 6771 INIT_LIST_HEAD(&tg->children); 6772 list_add_rcu(&tg->siblings, &parent->children); 6773 spin_unlock_irqrestore(&task_group_lock, flags); 6774 6775 online_fair_sched_group(tg); 6776 } 6777 6778 /* rcu callback to free various structures associated with a task group */ 6779 static void sched_free_group_rcu(struct rcu_head *rhp) 6780 { 6781 /* Now it should be safe to free those cfs_rqs: */ 6782 sched_free_group(container_of(rhp, struct task_group, rcu)); 6783 } 6784 6785 void sched_destroy_group(struct task_group *tg) 6786 { 6787 /* Wait for possible concurrent references to cfs_rqs complete: */ 6788 call_rcu(&tg->rcu, sched_free_group_rcu); 6789 } 6790 6791 void sched_offline_group(struct task_group *tg) 6792 { 6793 unsigned long flags; 6794 6795 /* End participation in shares distribution: */ 6796 unregister_fair_sched_group(tg); 6797 6798 spin_lock_irqsave(&task_group_lock, flags); 6799 list_del_rcu(&tg->list); 6800 list_del_rcu(&tg->siblings); 6801 spin_unlock_irqrestore(&task_group_lock, flags); 6802 } 6803 6804 static void sched_change_group(struct task_struct *tsk, int type) 6805 { 6806 struct task_group *tg; 6807 6808 /* 6809 * All callers are synchronized by task_rq_lock(); we do not use RCU 6810 * which is pointless here. Thus, we pass "true" to task_css_check() 6811 * to prevent lockdep warnings. 6812 */ 6813 tg = container_of(task_css_check(tsk, cpu_cgrp_id, true), 6814 struct task_group, css); 6815 tg = autogroup_task_group(tsk, tg); 6816 tsk->sched_task_group = tg; 6817 6818 #ifdef CONFIG_FAIR_GROUP_SCHED 6819 if (tsk->sched_class->task_change_group) 6820 tsk->sched_class->task_change_group(tsk, type); 6821 else 6822 #endif 6823 set_task_rq(tsk, task_cpu(tsk)); 6824 } 6825 6826 /* 6827 * Change task's runqueue when it moves between groups. 6828 * 6829 * The caller of this function should have put the task in its new group by 6830 * now. This function just updates tsk->se.cfs_rq and tsk->se.parent to reflect 6831 * its new group. 6832 */ 6833 void sched_move_task(struct task_struct *tsk) 6834 { 6835 int queued, running, queue_flags = 6836 DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; 6837 struct rq_flags rf; 6838 struct rq *rq; 6839 6840 rq = task_rq_lock(tsk, &rf); 6841 update_rq_clock(rq); 6842 6843 running = task_current(rq, tsk); 6844 queued = task_on_rq_queued(tsk); 6845 6846 if (queued) 6847 dequeue_task(rq, tsk, queue_flags); 6848 if (running) 6849 put_prev_task(rq, tsk); 6850 6851 sched_change_group(tsk, TASK_MOVE_GROUP); 6852 6853 if (queued) 6854 enqueue_task(rq, tsk, queue_flags); 6855 if (running) 6856 set_curr_task(rq, tsk); 6857 6858 task_rq_unlock(rq, tsk, &rf); 6859 } 6860 6861 static inline struct task_group *css_tg(struct cgroup_subsys_state *css) 6862 { 6863 return css ? container_of(css, struct task_group, css) : NULL; 6864 } 6865 6866 static struct cgroup_subsys_state * 6867 cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) 6868 { 6869 struct task_group *parent = css_tg(parent_css); 6870 struct task_group *tg; 6871 6872 if (!parent) { 6873 /* This is early initialization for the top cgroup */ 6874 return &root_task_group.css; 6875 } 6876 6877 tg = sched_create_group(parent); 6878 if (IS_ERR(tg)) 6879 return ERR_PTR(-ENOMEM); 6880 6881 return &tg->css; 6882 } 6883 6884 /* Expose task group only after completing cgroup initialization */ 6885 static int cpu_cgroup_css_online(struct cgroup_subsys_state *css) 6886 { 6887 struct task_group *tg = css_tg(css); 6888 struct task_group *parent = css_tg(css->parent); 6889 6890 if (parent) 6891 sched_online_group(tg, parent); 6892 return 0; 6893 } 6894 6895 static void cpu_cgroup_css_released(struct cgroup_subsys_state *css) 6896 { 6897 struct task_group *tg = css_tg(css); 6898 6899 sched_offline_group(tg); 6900 } 6901 6902 static void cpu_cgroup_css_free(struct cgroup_subsys_state *css) 6903 { 6904 struct task_group *tg = css_tg(css); 6905 6906 /* 6907 * Relies on the RCU grace period between css_released() and this. 6908 */ 6909 sched_free_group(tg); 6910 } 6911 6912 /* 6913 * This is called before wake_up_new_task(), therefore we really only 6914 * have to set its group bits, all the other stuff does not apply. 6915 */ 6916 static void cpu_cgroup_fork(struct task_struct *task) 6917 { 6918 struct rq_flags rf; 6919 struct rq *rq; 6920 6921 rq = task_rq_lock(task, &rf); 6922 6923 update_rq_clock(rq); 6924 sched_change_group(task, TASK_SET_GROUP); 6925 6926 task_rq_unlock(rq, task, &rf); 6927 } 6928 6929 static int cpu_cgroup_can_attach(struct cgroup_taskset *tset) 6930 { 6931 struct task_struct *task; 6932 struct cgroup_subsys_state *css; 6933 int ret = 0; 6934 6935 cgroup_taskset_for_each(task, css, tset) { 6936 #ifdef CONFIG_RT_GROUP_SCHED 6937 if (!sched_rt_can_attach(css_tg(css), task)) 6938 return -EINVAL; 6939 #else 6940 /* We don't support RT-tasks being in separate groups */ 6941 if (task->sched_class != &fair_sched_class) 6942 return -EINVAL; 6943 #endif 6944 /* 6945 * Serialize against wake_up_new_task() such that if its 6946 * running, we're sure to observe its full state. 6947 */ 6948 raw_spin_lock_irq(&task->pi_lock); 6949 /* 6950 * Avoid calling sched_move_task() before wake_up_new_task() 6951 * has happened. This would lead to problems with PELT, due to 6952 * move wanting to detach+attach while we're not attached yet. 6953 */ 6954 if (task->state == TASK_NEW) 6955 ret = -EINVAL; 6956 raw_spin_unlock_irq(&task->pi_lock); 6957 6958 if (ret) 6959 break; 6960 } 6961 return ret; 6962 } 6963 6964 static void cpu_cgroup_attach(struct cgroup_taskset *tset) 6965 { 6966 struct task_struct *task; 6967 struct cgroup_subsys_state *css; 6968 6969 cgroup_taskset_for_each(task, css, tset) 6970 sched_move_task(task); 6971 } 6972 6973 #ifdef CONFIG_FAIR_GROUP_SCHED 6974 static int cpu_shares_write_u64(struct cgroup_subsys_state *css, 6975 struct cftype *cftype, u64 shareval) 6976 { 6977 if (shareval > scale_load_down(ULONG_MAX)) 6978 shareval = MAX_SHARES; 6979 return sched_group_set_shares(css_tg(css), scale_load(shareval)); 6980 } 6981 6982 static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css, 6983 struct cftype *cft) 6984 { 6985 struct task_group *tg = css_tg(css); 6986 6987 return (u64) scale_load_down(tg->shares); 6988 } 6989 6990 #ifdef CONFIG_CFS_BANDWIDTH 6991 static DEFINE_MUTEX(cfs_constraints_mutex); 6992 6993 const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */ 6994 static const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */ 6995 6996 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime); 6997 6998 static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota) 6999 { 7000 int i, ret = 0, runtime_enabled, runtime_was_enabled; 7001 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 7002 7003 if (tg == &root_task_group) 7004 return -EINVAL; 7005 7006 /* 7007 * Ensure we have at some amount of bandwidth every period. This is 7008 * to prevent reaching a state of large arrears when throttled via 7009 * entity_tick() resulting in prolonged exit starvation. 7010 */ 7011 if (quota < min_cfs_quota_period || period < min_cfs_quota_period) 7012 return -EINVAL; 7013 7014 /* 7015 * Likewise, bound things on the otherside by preventing insane quota 7016 * periods. This also allows us to normalize in computing quota 7017 * feasibility. 7018 */ 7019 if (period > max_cfs_quota_period) 7020 return -EINVAL; 7021 7022 /* 7023 * Prevent race between setting of cfs_rq->runtime_enabled and 7024 * unthrottle_offline_cfs_rqs(). 7025 */ 7026 get_online_cpus(); 7027 mutex_lock(&cfs_constraints_mutex); 7028 ret = __cfs_schedulable(tg, period, quota); 7029 if (ret) 7030 goto out_unlock; 7031 7032 runtime_enabled = quota != RUNTIME_INF; 7033 runtime_was_enabled = cfs_b->quota != RUNTIME_INF; 7034 /* 7035 * If we need to toggle cfs_bandwidth_used, off->on must occur 7036 * before making related changes, and on->off must occur afterwards 7037 */ 7038 if (runtime_enabled && !runtime_was_enabled) 7039 cfs_bandwidth_usage_inc(); 7040 raw_spin_lock_irq(&cfs_b->lock); 7041 cfs_b->period = ns_to_ktime(period); 7042 cfs_b->quota = quota; 7043 7044 __refill_cfs_bandwidth_runtime(cfs_b); 7045 7046 /* Restart the period timer (if active) to handle new period expiry: */ 7047 if (runtime_enabled) 7048 start_cfs_bandwidth(cfs_b); 7049 7050 raw_spin_unlock_irq(&cfs_b->lock); 7051 7052 for_each_online_cpu(i) { 7053 struct cfs_rq *cfs_rq = tg->cfs_rq[i]; 7054 struct rq *rq = cfs_rq->rq; 7055 struct rq_flags rf; 7056 7057 rq_lock_irq(rq, &rf); 7058 cfs_rq->runtime_enabled = runtime_enabled; 7059 cfs_rq->runtime_remaining = 0; 7060 7061 if (cfs_rq->throttled) 7062 unthrottle_cfs_rq(cfs_rq); 7063 rq_unlock_irq(rq, &rf); 7064 } 7065 if (runtime_was_enabled && !runtime_enabled) 7066 cfs_bandwidth_usage_dec(); 7067 out_unlock: 7068 mutex_unlock(&cfs_constraints_mutex); 7069 put_online_cpus(); 7070 7071 return ret; 7072 } 7073 7074 static int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us) 7075 { 7076 u64 quota, period; 7077 7078 period = ktime_to_ns(tg->cfs_bandwidth.period); 7079 if (cfs_quota_us < 0) 7080 quota = RUNTIME_INF; 7081 else if ((u64)cfs_quota_us <= U64_MAX / NSEC_PER_USEC) 7082 quota = (u64)cfs_quota_us * NSEC_PER_USEC; 7083 else 7084 return -EINVAL; 7085 7086 return tg_set_cfs_bandwidth(tg, period, quota); 7087 } 7088 7089 static long tg_get_cfs_quota(struct task_group *tg) 7090 { 7091 u64 quota_us; 7092 7093 if (tg->cfs_bandwidth.quota == RUNTIME_INF) 7094 return -1; 7095 7096 quota_us = tg->cfs_bandwidth.quota; 7097 do_div(quota_us, NSEC_PER_USEC); 7098 7099 return quota_us; 7100 } 7101 7102 static int tg_set_cfs_period(struct task_group *tg, long cfs_period_us) 7103 { 7104 u64 quota, period; 7105 7106 if ((u64)cfs_period_us > U64_MAX / NSEC_PER_USEC) 7107 return -EINVAL; 7108 7109 period = (u64)cfs_period_us * NSEC_PER_USEC; 7110 quota = tg->cfs_bandwidth.quota; 7111 7112 return tg_set_cfs_bandwidth(tg, period, quota); 7113 } 7114 7115 static long tg_get_cfs_period(struct task_group *tg) 7116 { 7117 u64 cfs_period_us; 7118 7119 cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period); 7120 do_div(cfs_period_us, NSEC_PER_USEC); 7121 7122 return cfs_period_us; 7123 } 7124 7125 static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css, 7126 struct cftype *cft) 7127 { 7128 return tg_get_cfs_quota(css_tg(css)); 7129 } 7130 7131 static int cpu_cfs_quota_write_s64(struct cgroup_subsys_state *css, 7132 struct cftype *cftype, s64 cfs_quota_us) 7133 { 7134 return tg_set_cfs_quota(css_tg(css), cfs_quota_us); 7135 } 7136 7137 static u64 cpu_cfs_period_read_u64(struct cgroup_subsys_state *css, 7138 struct cftype *cft) 7139 { 7140 return tg_get_cfs_period(css_tg(css)); 7141 } 7142 7143 static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css, 7144 struct cftype *cftype, u64 cfs_period_us) 7145 { 7146 return tg_set_cfs_period(css_tg(css), cfs_period_us); 7147 } 7148 7149 struct cfs_schedulable_data { 7150 struct task_group *tg; 7151 u64 period, quota; 7152 }; 7153 7154 /* 7155 * normalize group quota/period to be quota/max_period 7156 * note: units are usecs 7157 */ 7158 static u64 normalize_cfs_quota(struct task_group *tg, 7159 struct cfs_schedulable_data *d) 7160 { 7161 u64 quota, period; 7162 7163 if (tg == d->tg) { 7164 period = d->period; 7165 quota = d->quota; 7166 } else { 7167 period = tg_get_cfs_period(tg); 7168 quota = tg_get_cfs_quota(tg); 7169 } 7170 7171 /* note: these should typically be equivalent */ 7172 if (quota == RUNTIME_INF || quota == -1) 7173 return RUNTIME_INF; 7174 7175 return to_ratio(period, quota); 7176 } 7177 7178 static int tg_cfs_schedulable_down(struct task_group *tg, void *data) 7179 { 7180 struct cfs_schedulable_data *d = data; 7181 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 7182 s64 quota = 0, parent_quota = -1; 7183 7184 if (!tg->parent) { 7185 quota = RUNTIME_INF; 7186 } else { 7187 struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth; 7188 7189 quota = normalize_cfs_quota(tg, d); 7190 parent_quota = parent_b->hierarchical_quota; 7191 7192 /* 7193 * Ensure max(child_quota) <= parent_quota. On cgroup2, 7194 * always take the min. On cgroup1, only inherit when no 7195 * limit is set: 7196 */ 7197 if (cgroup_subsys_on_dfl(cpu_cgrp_subsys)) { 7198 quota = min(quota, parent_quota); 7199 } else { 7200 if (quota == RUNTIME_INF) 7201 quota = parent_quota; 7202 else if (parent_quota != RUNTIME_INF && quota > parent_quota) 7203 return -EINVAL; 7204 } 7205 } 7206 cfs_b->hierarchical_quota = quota; 7207 7208 return 0; 7209 } 7210 7211 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota) 7212 { 7213 int ret; 7214 struct cfs_schedulable_data data = { 7215 .tg = tg, 7216 .period = period, 7217 .quota = quota, 7218 }; 7219 7220 if (quota != RUNTIME_INF) { 7221 do_div(data.period, NSEC_PER_USEC); 7222 do_div(data.quota, NSEC_PER_USEC); 7223 } 7224 7225 rcu_read_lock(); 7226 ret = walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data); 7227 rcu_read_unlock(); 7228 7229 return ret; 7230 } 7231 7232 static int cpu_cfs_stat_show(struct seq_file *sf, void *v) 7233 { 7234 struct task_group *tg = css_tg(seq_css(sf)); 7235 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 7236 7237 seq_printf(sf, "nr_periods %d\n", cfs_b->nr_periods); 7238 seq_printf(sf, "nr_throttled %d\n", cfs_b->nr_throttled); 7239 seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time); 7240 7241 if (schedstat_enabled() && tg != &root_task_group) { 7242 u64 ws = 0; 7243 int i; 7244 7245 for_each_possible_cpu(i) 7246 ws += schedstat_val(tg->se[i]->statistics.wait_sum); 7247 7248 seq_printf(sf, "wait_sum %llu\n", ws); 7249 } 7250 7251 return 0; 7252 } 7253 #endif /* CONFIG_CFS_BANDWIDTH */ 7254 #endif /* CONFIG_FAIR_GROUP_SCHED */ 7255 7256 #ifdef CONFIG_RT_GROUP_SCHED 7257 static int cpu_rt_runtime_write(struct cgroup_subsys_state *css, 7258 struct cftype *cft, s64 val) 7259 { 7260 return sched_group_set_rt_runtime(css_tg(css), val); 7261 } 7262 7263 static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css, 7264 struct cftype *cft) 7265 { 7266 return sched_group_rt_runtime(css_tg(css)); 7267 } 7268 7269 static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css, 7270 struct cftype *cftype, u64 rt_period_us) 7271 { 7272 return sched_group_set_rt_period(css_tg(css), rt_period_us); 7273 } 7274 7275 static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css, 7276 struct cftype *cft) 7277 { 7278 return sched_group_rt_period(css_tg(css)); 7279 } 7280 #endif /* CONFIG_RT_GROUP_SCHED */ 7281 7282 static struct cftype cpu_legacy_files[] = { 7283 #ifdef CONFIG_FAIR_GROUP_SCHED 7284 { 7285 .name = "shares", 7286 .read_u64 = cpu_shares_read_u64, 7287 .write_u64 = cpu_shares_write_u64, 7288 }, 7289 #endif 7290 #ifdef CONFIG_CFS_BANDWIDTH 7291 { 7292 .name = "cfs_quota_us", 7293 .read_s64 = cpu_cfs_quota_read_s64, 7294 .write_s64 = cpu_cfs_quota_write_s64, 7295 }, 7296 { 7297 .name = "cfs_period_us", 7298 .read_u64 = cpu_cfs_period_read_u64, 7299 .write_u64 = cpu_cfs_period_write_u64, 7300 }, 7301 { 7302 .name = "stat", 7303 .seq_show = cpu_cfs_stat_show, 7304 }, 7305 #endif 7306 #ifdef CONFIG_RT_GROUP_SCHED 7307 { 7308 .name = "rt_runtime_us", 7309 .read_s64 = cpu_rt_runtime_read, 7310 .write_s64 = cpu_rt_runtime_write, 7311 }, 7312 { 7313 .name = "rt_period_us", 7314 .read_u64 = cpu_rt_period_read_uint, 7315 .write_u64 = cpu_rt_period_write_uint, 7316 }, 7317 #endif 7318 { } /* Terminate */ 7319 }; 7320 7321 static int cpu_extra_stat_show(struct seq_file *sf, 7322 struct cgroup_subsys_state *css) 7323 { 7324 #ifdef CONFIG_CFS_BANDWIDTH 7325 { 7326 struct task_group *tg = css_tg(css); 7327 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 7328 u64 throttled_usec; 7329 7330 throttled_usec = cfs_b->throttled_time; 7331 do_div(throttled_usec, NSEC_PER_USEC); 7332 7333 seq_printf(sf, "nr_periods %d\n" 7334 "nr_throttled %d\n" 7335 "throttled_usec %llu\n", 7336 cfs_b->nr_periods, cfs_b->nr_throttled, 7337 throttled_usec); 7338 } 7339 #endif 7340 return 0; 7341 } 7342 7343 #ifdef CONFIG_FAIR_GROUP_SCHED 7344 static u64 cpu_weight_read_u64(struct cgroup_subsys_state *css, 7345 struct cftype *cft) 7346 { 7347 struct task_group *tg = css_tg(css); 7348 u64 weight = scale_load_down(tg->shares); 7349 7350 return DIV_ROUND_CLOSEST_ULL(weight * CGROUP_WEIGHT_DFL, 1024); 7351 } 7352 7353 static int cpu_weight_write_u64(struct cgroup_subsys_state *css, 7354 struct cftype *cft, u64 weight) 7355 { 7356 /* 7357 * cgroup weight knobs should use the common MIN, DFL and MAX 7358 * values which are 1, 100 and 10000 respectively. While it loses 7359 * a bit of range on both ends, it maps pretty well onto the shares 7360 * value used by scheduler and the round-trip conversions preserve 7361 * the original value over the entire range. 7362 */ 7363 if (weight < CGROUP_WEIGHT_MIN || weight > CGROUP_WEIGHT_MAX) 7364 return -ERANGE; 7365 7366 weight = DIV_ROUND_CLOSEST_ULL(weight * 1024, CGROUP_WEIGHT_DFL); 7367 7368 return sched_group_set_shares(css_tg(css), scale_load(weight)); 7369 } 7370 7371 static s64 cpu_weight_nice_read_s64(struct cgroup_subsys_state *css, 7372 struct cftype *cft) 7373 { 7374 unsigned long weight = scale_load_down(css_tg(css)->shares); 7375 int last_delta = INT_MAX; 7376 int prio, delta; 7377 7378 /* find the closest nice value to the current weight */ 7379 for (prio = 0; prio < ARRAY_SIZE(sched_prio_to_weight); prio++) { 7380 delta = abs(sched_prio_to_weight[prio] - weight); 7381 if (delta >= last_delta) 7382 break; 7383 last_delta = delta; 7384 } 7385 7386 return PRIO_TO_NICE(prio - 1 + MAX_RT_PRIO); 7387 } 7388 7389 static int cpu_weight_nice_write_s64(struct cgroup_subsys_state *css, 7390 struct cftype *cft, s64 nice) 7391 { 7392 unsigned long weight; 7393 int idx; 7394 7395 if (nice < MIN_NICE || nice > MAX_NICE) 7396 return -ERANGE; 7397 7398 idx = NICE_TO_PRIO(nice) - MAX_RT_PRIO; 7399 idx = array_index_nospec(idx, 40); 7400 weight = sched_prio_to_weight[idx]; 7401 7402 return sched_group_set_shares(css_tg(css), scale_load(weight)); 7403 } 7404 #endif 7405 7406 static void __maybe_unused cpu_period_quota_print(struct seq_file *sf, 7407 long period, long quota) 7408 { 7409 if (quota < 0) 7410 seq_puts(sf, "max"); 7411 else 7412 seq_printf(sf, "%ld", quota); 7413 7414 seq_printf(sf, " %ld\n", period); 7415 } 7416 7417 /* caller should put the current value in *@periodp before calling */ 7418 static int __maybe_unused cpu_period_quota_parse(char *buf, 7419 u64 *periodp, u64 *quotap) 7420 { 7421 char tok[21]; /* U64_MAX */ 7422 7423 if (sscanf(buf, "%20s %llu", tok, periodp) < 1) 7424 return -EINVAL; 7425 7426 *periodp *= NSEC_PER_USEC; 7427 7428 if (sscanf(tok, "%llu", quotap)) 7429 *quotap *= NSEC_PER_USEC; 7430 else if (!strcmp(tok, "max")) 7431 *quotap = RUNTIME_INF; 7432 else 7433 return -EINVAL; 7434 7435 return 0; 7436 } 7437 7438 #ifdef CONFIG_CFS_BANDWIDTH 7439 static int cpu_max_show(struct seq_file *sf, void *v) 7440 { 7441 struct task_group *tg = css_tg(seq_css(sf)); 7442 7443 cpu_period_quota_print(sf, tg_get_cfs_period(tg), tg_get_cfs_quota(tg)); 7444 return 0; 7445 } 7446 7447 static ssize_t cpu_max_write(struct kernfs_open_file *of, 7448 char *buf, size_t nbytes, loff_t off) 7449 { 7450 struct task_group *tg = css_tg(of_css(of)); 7451 u64 period = tg_get_cfs_period(tg); 7452 u64 quota; 7453 int ret; 7454 7455 ret = cpu_period_quota_parse(buf, &period, "a); 7456 if (!ret) 7457 ret = tg_set_cfs_bandwidth(tg, period, quota); 7458 return ret ?: nbytes; 7459 } 7460 #endif 7461 7462 static struct cftype cpu_files[] = { 7463 #ifdef CONFIG_FAIR_GROUP_SCHED 7464 { 7465 .name = "weight", 7466 .flags = CFTYPE_NOT_ON_ROOT, 7467 .read_u64 = cpu_weight_read_u64, 7468 .write_u64 = cpu_weight_write_u64, 7469 }, 7470 { 7471 .name = "weight.nice", 7472 .flags = CFTYPE_NOT_ON_ROOT, 7473 .read_s64 = cpu_weight_nice_read_s64, 7474 .write_s64 = cpu_weight_nice_write_s64, 7475 }, 7476 #endif 7477 #ifdef CONFIG_CFS_BANDWIDTH 7478 { 7479 .name = "max", 7480 .flags = CFTYPE_NOT_ON_ROOT, 7481 .seq_show = cpu_max_show, 7482 .write = cpu_max_write, 7483 }, 7484 #endif 7485 { } /* terminate */ 7486 }; 7487 7488 struct cgroup_subsys cpu_cgrp_subsys = { 7489 .css_alloc = cpu_cgroup_css_alloc, 7490 .css_online = cpu_cgroup_css_online, 7491 .css_released = cpu_cgroup_css_released, 7492 .css_free = cpu_cgroup_css_free, 7493 .css_extra_stat_show = cpu_extra_stat_show, 7494 .fork = cpu_cgroup_fork, 7495 .can_attach = cpu_cgroup_can_attach, 7496 .attach = cpu_cgroup_attach, 7497 .legacy_cftypes = cpu_legacy_files, 7498 .dfl_cftypes = cpu_files, 7499 .early_init = true, 7500 .threaded = true, 7501 }; 7502 7503 #endif /* CONFIG_CGROUP_SCHED */ 7504 7505 void dump_cpu_task(int cpu) 7506 { 7507 pr_info("Task dump for CPU %d:\n", cpu); 7508 sched_show_task(cpu_curr(cpu)); 7509 } 7510 7511 /* 7512 * Nice levels are multiplicative, with a gentle 10% change for every 7513 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to 7514 * nice 1, it will get ~10% less CPU time than another CPU-bound task 7515 * that remained on nice 0. 7516 * 7517 * The "10% effect" is relative and cumulative: from _any_ nice level, 7518 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level 7519 * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25. 7520 * If a task goes up by ~10% and another task goes down by ~10% then 7521 * the relative distance between them is ~25%.) 7522 */ 7523 const int sched_prio_to_weight[40] = { 7524 /* -20 */ 88761, 71755, 56483, 46273, 36291, 7525 /* -15 */ 29154, 23254, 18705, 14949, 11916, 7526 /* -10 */ 9548, 7620, 6100, 4904, 3906, 7527 /* -5 */ 3121, 2501, 1991, 1586, 1277, 7528 /* 0 */ 1024, 820, 655, 526, 423, 7529 /* 5 */ 335, 272, 215, 172, 137, 7530 /* 10 */ 110, 87, 70, 56, 45, 7531 /* 15 */ 36, 29, 23, 18, 15, 7532 }; 7533 7534 /* 7535 * Inverse (2^32/x) values of the sched_prio_to_weight[] array, precalculated. 7536 * 7537 * In cases where the weight does not change often, we can use the 7538 * precalculated inverse to speed up arithmetics by turning divisions 7539 * into multiplications: 7540 */ 7541 const u32 sched_prio_to_wmult[40] = { 7542 /* -20 */ 48388, 59856, 76040, 92818, 118348, 7543 /* -15 */ 147320, 184698, 229616, 287308, 360437, 7544 /* -10 */ 449829, 563644, 704093, 875809, 1099582, 7545 /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326, 7546 /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587, 7547 /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126, 7548 /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717, 7549 /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153, 7550 }; 7551 7552 #undef CREATE_TRACE_POINTS 7553