1 /* 2 * kernel/sched/core.c 3 * 4 * Kernel scheduler and related syscalls 5 * 6 * Copyright (C) 1991-2002 Linus Torvalds 7 * 8 * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and 9 * make semaphores SMP safe 10 * 1998-11-19 Implemented schedule_timeout() and related stuff 11 * by Andrea Arcangeli 12 * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar: 13 * hybrid priority-list and round-robin design with 14 * an array-switch method of distributing timeslices 15 * and per-CPU runqueues. Cleanups and useful suggestions 16 * by Davide Libenzi, preemptible kernel bits by Robert Love. 17 * 2003-09-03 Interactivity tuning by Con Kolivas. 18 * 2004-04-02 Scheduler domains code by Nick Piggin 19 * 2007-04-15 Work begun on replacing all interactivity tuning with a 20 * fair scheduling design by Con Kolivas. 21 * 2007-05-05 Load balancing (smp-nice) and other improvements 22 * by Peter Williams 23 * 2007-05-06 Interactivity improvements to CFS by Mike Galbraith 24 * 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri 25 * 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins, 26 * Thomas Gleixner, Mike Kravetz 27 */ 28 29 #include <linux/mm.h> 30 #include <linux/module.h> 31 #include <linux/nmi.h> 32 #include <linux/init.h> 33 #include <linux/uaccess.h> 34 #include <linux/highmem.h> 35 #include <asm/mmu_context.h> 36 #include <linux/interrupt.h> 37 #include <linux/capability.h> 38 #include <linux/completion.h> 39 #include <linux/kernel_stat.h> 40 #include <linux/debug_locks.h> 41 #include <linux/perf_event.h> 42 #include <linux/security.h> 43 #include <linux/notifier.h> 44 #include <linux/profile.h> 45 #include <linux/freezer.h> 46 #include <linux/vmalloc.h> 47 #include <linux/blkdev.h> 48 #include <linux/delay.h> 49 #include <linux/pid_namespace.h> 50 #include <linux/smp.h> 51 #include <linux/threads.h> 52 #include <linux/timer.h> 53 #include <linux/rcupdate.h> 54 #include <linux/cpu.h> 55 #include <linux/cpuset.h> 56 #include <linux/percpu.h> 57 #include <linux/proc_fs.h> 58 #include <linux/seq_file.h> 59 #include <linux/sysctl.h> 60 #include <linux/syscalls.h> 61 #include <linux/times.h> 62 #include <linux/tsacct_kern.h> 63 #include <linux/kprobes.h> 64 #include <linux/delayacct.h> 65 #include <linux/unistd.h> 66 #include <linux/pagemap.h> 67 #include <linux/hrtimer.h> 68 #include <linux/tick.h> 69 #include <linux/debugfs.h> 70 #include <linux/ctype.h> 71 #include <linux/ftrace.h> 72 #include <linux/slab.h> 73 #include <linux/init_task.h> 74 #include <linux/binfmts.h> 75 #include <linux/context_tracking.h> 76 #include <linux/compiler.h> 77 78 #include <asm/switch_to.h> 79 #include <asm/tlb.h> 80 #include <asm/irq_regs.h> 81 #include <asm/mutex.h> 82 #ifdef CONFIG_PARAVIRT 83 #include <asm/paravirt.h> 84 #endif 85 86 #include "sched.h" 87 #include "../workqueue_internal.h" 88 #include "../smpboot.h" 89 90 #define CREATE_TRACE_POINTS 91 #include <trace/events/sched.h> 92 93 void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period) 94 { 95 unsigned long delta; 96 ktime_t soft, hard, now; 97 98 for (;;) { 99 if (hrtimer_active(period_timer)) 100 break; 101 102 now = hrtimer_cb_get_time(period_timer); 103 hrtimer_forward(period_timer, now, period); 104 105 soft = hrtimer_get_softexpires(period_timer); 106 hard = hrtimer_get_expires(period_timer); 107 delta = ktime_to_ns(ktime_sub(hard, soft)); 108 __hrtimer_start_range_ns(period_timer, soft, delta, 109 HRTIMER_MODE_ABS_PINNED, 0); 110 } 111 } 112 113 DEFINE_MUTEX(sched_domains_mutex); 114 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); 115 116 static void update_rq_clock_task(struct rq *rq, s64 delta); 117 118 void update_rq_clock(struct rq *rq) 119 { 120 s64 delta; 121 122 lockdep_assert_held(&rq->lock); 123 124 if (rq->clock_skip_update & RQCF_ACT_SKIP) 125 return; 126 127 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock; 128 if (delta < 0) 129 return; 130 rq->clock += delta; 131 update_rq_clock_task(rq, delta); 132 } 133 134 /* 135 * Debugging: various feature bits 136 */ 137 138 #define SCHED_FEAT(name, enabled) \ 139 (1UL << __SCHED_FEAT_##name) * enabled | 140 141 const_debug unsigned int sysctl_sched_features = 142 #include "features.h" 143 0; 144 145 #undef SCHED_FEAT 146 147 #ifdef CONFIG_SCHED_DEBUG 148 #define SCHED_FEAT(name, enabled) \ 149 #name , 150 151 static const char * const sched_feat_names[] = { 152 #include "features.h" 153 }; 154 155 #undef SCHED_FEAT 156 157 static int sched_feat_show(struct seq_file *m, void *v) 158 { 159 int i; 160 161 for (i = 0; i < __SCHED_FEAT_NR; i++) { 162 if (!(sysctl_sched_features & (1UL << i))) 163 seq_puts(m, "NO_"); 164 seq_printf(m, "%s ", sched_feat_names[i]); 165 } 166 seq_puts(m, "\n"); 167 168 return 0; 169 } 170 171 #ifdef HAVE_JUMP_LABEL 172 173 #define jump_label_key__true STATIC_KEY_INIT_TRUE 174 #define jump_label_key__false STATIC_KEY_INIT_FALSE 175 176 #define SCHED_FEAT(name, enabled) \ 177 jump_label_key__##enabled , 178 179 struct static_key sched_feat_keys[__SCHED_FEAT_NR] = { 180 #include "features.h" 181 }; 182 183 #undef SCHED_FEAT 184 185 static void sched_feat_disable(int i) 186 { 187 if (static_key_enabled(&sched_feat_keys[i])) 188 static_key_slow_dec(&sched_feat_keys[i]); 189 } 190 191 static void sched_feat_enable(int i) 192 { 193 if (!static_key_enabled(&sched_feat_keys[i])) 194 static_key_slow_inc(&sched_feat_keys[i]); 195 } 196 #else 197 static void sched_feat_disable(int i) { }; 198 static void sched_feat_enable(int i) { }; 199 #endif /* HAVE_JUMP_LABEL */ 200 201 static int sched_feat_set(char *cmp) 202 { 203 int i; 204 int neg = 0; 205 206 if (strncmp(cmp, "NO_", 3) == 0) { 207 neg = 1; 208 cmp += 3; 209 } 210 211 for (i = 0; i < __SCHED_FEAT_NR; i++) { 212 if (strcmp(cmp, sched_feat_names[i]) == 0) { 213 if (neg) { 214 sysctl_sched_features &= ~(1UL << i); 215 sched_feat_disable(i); 216 } else { 217 sysctl_sched_features |= (1UL << i); 218 sched_feat_enable(i); 219 } 220 break; 221 } 222 } 223 224 return i; 225 } 226 227 static ssize_t 228 sched_feat_write(struct file *filp, const char __user *ubuf, 229 size_t cnt, loff_t *ppos) 230 { 231 char buf[64]; 232 char *cmp; 233 int i; 234 struct inode *inode; 235 236 if (cnt > 63) 237 cnt = 63; 238 239 if (copy_from_user(&buf, ubuf, cnt)) 240 return -EFAULT; 241 242 buf[cnt] = 0; 243 cmp = strstrip(buf); 244 245 /* Ensure the static_key remains in a consistent state */ 246 inode = file_inode(filp); 247 mutex_lock(&inode->i_mutex); 248 i = sched_feat_set(cmp); 249 mutex_unlock(&inode->i_mutex); 250 if (i == __SCHED_FEAT_NR) 251 return -EINVAL; 252 253 *ppos += cnt; 254 255 return cnt; 256 } 257 258 static int sched_feat_open(struct inode *inode, struct file *filp) 259 { 260 return single_open(filp, sched_feat_show, NULL); 261 } 262 263 static const struct file_operations sched_feat_fops = { 264 .open = sched_feat_open, 265 .write = sched_feat_write, 266 .read = seq_read, 267 .llseek = seq_lseek, 268 .release = single_release, 269 }; 270 271 static __init int sched_init_debug(void) 272 { 273 debugfs_create_file("sched_features", 0644, NULL, NULL, 274 &sched_feat_fops); 275 276 return 0; 277 } 278 late_initcall(sched_init_debug); 279 #endif /* CONFIG_SCHED_DEBUG */ 280 281 /* 282 * Number of tasks to iterate in a single balance run. 283 * Limited because this is done with IRQs disabled. 284 */ 285 const_debug unsigned int sysctl_sched_nr_migrate = 32; 286 287 /* 288 * period over which we average the RT time consumption, measured 289 * in ms. 290 * 291 * default: 1s 292 */ 293 const_debug unsigned int sysctl_sched_time_avg = MSEC_PER_SEC; 294 295 /* 296 * period over which we measure -rt task cpu usage in us. 297 * default: 1s 298 */ 299 unsigned int sysctl_sched_rt_period = 1000000; 300 301 __read_mostly int scheduler_running; 302 303 /* 304 * part of the period that we allow rt tasks to run in us. 305 * default: 0.95s 306 */ 307 int sysctl_sched_rt_runtime = 950000; 308 309 /* cpus with isolated domains */ 310 cpumask_var_t cpu_isolated_map; 311 312 /* 313 * this_rq_lock - lock this runqueue and disable interrupts. 314 */ 315 static struct rq *this_rq_lock(void) 316 __acquires(rq->lock) 317 { 318 struct rq *rq; 319 320 local_irq_disable(); 321 rq = this_rq(); 322 raw_spin_lock(&rq->lock); 323 324 return rq; 325 } 326 327 #ifdef CONFIG_SCHED_HRTICK 328 /* 329 * Use HR-timers to deliver accurate preemption points. 330 */ 331 332 static void hrtick_clear(struct rq *rq) 333 { 334 if (hrtimer_active(&rq->hrtick_timer)) 335 hrtimer_cancel(&rq->hrtick_timer); 336 } 337 338 /* 339 * High-resolution timer tick. 340 * Runs from hardirq context with interrupts disabled. 341 */ 342 static enum hrtimer_restart hrtick(struct hrtimer *timer) 343 { 344 struct rq *rq = container_of(timer, struct rq, hrtick_timer); 345 346 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); 347 348 raw_spin_lock(&rq->lock); 349 update_rq_clock(rq); 350 rq->curr->sched_class->task_tick(rq, rq->curr, 1); 351 raw_spin_unlock(&rq->lock); 352 353 return HRTIMER_NORESTART; 354 } 355 356 #ifdef CONFIG_SMP 357 358 static int __hrtick_restart(struct rq *rq) 359 { 360 struct hrtimer *timer = &rq->hrtick_timer; 361 ktime_t time = hrtimer_get_softexpires(timer); 362 363 return __hrtimer_start_range_ns(timer, time, 0, HRTIMER_MODE_ABS_PINNED, 0); 364 } 365 366 /* 367 * called from hardirq (IPI) context 368 */ 369 static void __hrtick_start(void *arg) 370 { 371 struct rq *rq = arg; 372 373 raw_spin_lock(&rq->lock); 374 __hrtick_restart(rq); 375 rq->hrtick_csd_pending = 0; 376 raw_spin_unlock(&rq->lock); 377 } 378 379 /* 380 * Called to set the hrtick timer state. 381 * 382 * called with rq->lock held and irqs disabled 383 */ 384 void hrtick_start(struct rq *rq, u64 delay) 385 { 386 struct hrtimer *timer = &rq->hrtick_timer; 387 ktime_t time; 388 s64 delta; 389 390 /* 391 * Don't schedule slices shorter than 10000ns, that just 392 * doesn't make sense and can cause timer DoS. 393 */ 394 delta = max_t(s64, delay, 10000LL); 395 time = ktime_add_ns(timer->base->get_time(), delta); 396 397 hrtimer_set_expires(timer, time); 398 399 if (rq == this_rq()) { 400 __hrtick_restart(rq); 401 } else if (!rq->hrtick_csd_pending) { 402 smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd); 403 rq->hrtick_csd_pending = 1; 404 } 405 } 406 407 static int 408 hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu) 409 { 410 int cpu = (int)(long)hcpu; 411 412 switch (action) { 413 case CPU_UP_CANCELED: 414 case CPU_UP_CANCELED_FROZEN: 415 case CPU_DOWN_PREPARE: 416 case CPU_DOWN_PREPARE_FROZEN: 417 case CPU_DEAD: 418 case CPU_DEAD_FROZEN: 419 hrtick_clear(cpu_rq(cpu)); 420 return NOTIFY_OK; 421 } 422 423 return NOTIFY_DONE; 424 } 425 426 static __init void init_hrtick(void) 427 { 428 hotcpu_notifier(hotplug_hrtick, 0); 429 } 430 #else 431 /* 432 * Called to set the hrtick timer state. 433 * 434 * called with rq->lock held and irqs disabled 435 */ 436 void hrtick_start(struct rq *rq, u64 delay) 437 { 438 /* 439 * Don't schedule slices shorter than 10000ns, that just 440 * doesn't make sense. Rely on vruntime for fairness. 441 */ 442 delay = max_t(u64, delay, 10000LL); 443 __hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0, 444 HRTIMER_MODE_REL_PINNED, 0); 445 } 446 447 static inline void init_hrtick(void) 448 { 449 } 450 #endif /* CONFIG_SMP */ 451 452 static void init_rq_hrtick(struct rq *rq) 453 { 454 #ifdef CONFIG_SMP 455 rq->hrtick_csd_pending = 0; 456 457 rq->hrtick_csd.flags = 0; 458 rq->hrtick_csd.func = __hrtick_start; 459 rq->hrtick_csd.info = rq; 460 #endif 461 462 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 463 rq->hrtick_timer.function = hrtick; 464 } 465 #else /* CONFIG_SCHED_HRTICK */ 466 static inline void hrtick_clear(struct rq *rq) 467 { 468 } 469 470 static inline void init_rq_hrtick(struct rq *rq) 471 { 472 } 473 474 static inline void init_hrtick(void) 475 { 476 } 477 #endif /* CONFIG_SCHED_HRTICK */ 478 479 /* 480 * cmpxchg based fetch_or, macro so it works for different integer types 481 */ 482 #define fetch_or(ptr, val) \ 483 ({ typeof(*(ptr)) __old, __val = *(ptr); \ 484 for (;;) { \ 485 __old = cmpxchg((ptr), __val, __val | (val)); \ 486 if (__old == __val) \ 487 break; \ 488 __val = __old; \ 489 } \ 490 __old; \ 491 }) 492 493 #if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG) 494 /* 495 * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG, 496 * this avoids any races wrt polling state changes and thereby avoids 497 * spurious IPIs. 498 */ 499 static bool set_nr_and_not_polling(struct task_struct *p) 500 { 501 struct thread_info *ti = task_thread_info(p); 502 return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG); 503 } 504 505 /* 506 * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set. 507 * 508 * If this returns true, then the idle task promises to call 509 * sched_ttwu_pending() and reschedule soon. 510 */ 511 static bool set_nr_if_polling(struct task_struct *p) 512 { 513 struct thread_info *ti = task_thread_info(p); 514 typeof(ti->flags) old, val = ACCESS_ONCE(ti->flags); 515 516 for (;;) { 517 if (!(val & _TIF_POLLING_NRFLAG)) 518 return false; 519 if (val & _TIF_NEED_RESCHED) 520 return true; 521 old = cmpxchg(&ti->flags, val, val | _TIF_NEED_RESCHED); 522 if (old == val) 523 break; 524 val = old; 525 } 526 return true; 527 } 528 529 #else 530 static bool set_nr_and_not_polling(struct task_struct *p) 531 { 532 set_tsk_need_resched(p); 533 return true; 534 } 535 536 #ifdef CONFIG_SMP 537 static bool set_nr_if_polling(struct task_struct *p) 538 { 539 return false; 540 } 541 #endif 542 #endif 543 544 /* 545 * resched_curr - mark rq's current task 'to be rescheduled now'. 546 * 547 * On UP this means the setting of the need_resched flag, on SMP it 548 * might also involve a cross-CPU call to trigger the scheduler on 549 * the target CPU. 550 */ 551 void resched_curr(struct rq *rq) 552 { 553 struct task_struct *curr = rq->curr; 554 int cpu; 555 556 lockdep_assert_held(&rq->lock); 557 558 if (test_tsk_need_resched(curr)) 559 return; 560 561 cpu = cpu_of(rq); 562 563 if (cpu == smp_processor_id()) { 564 set_tsk_need_resched(curr); 565 set_preempt_need_resched(); 566 return; 567 } 568 569 if (set_nr_and_not_polling(curr)) 570 smp_send_reschedule(cpu); 571 else 572 trace_sched_wake_idle_without_ipi(cpu); 573 } 574 575 void resched_cpu(int cpu) 576 { 577 struct rq *rq = cpu_rq(cpu); 578 unsigned long flags; 579 580 if (!raw_spin_trylock_irqsave(&rq->lock, flags)) 581 return; 582 resched_curr(rq); 583 raw_spin_unlock_irqrestore(&rq->lock, flags); 584 } 585 586 #ifdef CONFIG_SMP 587 #ifdef CONFIG_NO_HZ_COMMON 588 /* 589 * In the semi idle case, use the nearest busy cpu for migrating timers 590 * from an idle cpu. This is good for power-savings. 591 * 592 * We don't do similar optimization for completely idle system, as 593 * selecting an idle cpu will add more delays to the timers than intended 594 * (as that cpu's timer base may not be uptodate wrt jiffies etc). 595 */ 596 int get_nohz_timer_target(int pinned) 597 { 598 int cpu = smp_processor_id(); 599 int i; 600 struct sched_domain *sd; 601 602 if (pinned || !get_sysctl_timer_migration() || !idle_cpu(cpu)) 603 return cpu; 604 605 rcu_read_lock(); 606 for_each_domain(cpu, sd) { 607 for_each_cpu(i, sched_domain_span(sd)) { 608 if (!idle_cpu(i)) { 609 cpu = i; 610 goto unlock; 611 } 612 } 613 } 614 unlock: 615 rcu_read_unlock(); 616 return cpu; 617 } 618 /* 619 * When add_timer_on() enqueues a timer into the timer wheel of an 620 * idle CPU then this timer might expire before the next timer event 621 * which is scheduled to wake up that CPU. In case of a completely 622 * idle system the next event might even be infinite time into the 623 * future. wake_up_idle_cpu() ensures that the CPU is woken up and 624 * leaves the inner idle loop so the newly added timer is taken into 625 * account when the CPU goes back to idle and evaluates the timer 626 * wheel for the next timer event. 627 */ 628 static void wake_up_idle_cpu(int cpu) 629 { 630 struct rq *rq = cpu_rq(cpu); 631 632 if (cpu == smp_processor_id()) 633 return; 634 635 if (set_nr_and_not_polling(rq->idle)) 636 smp_send_reschedule(cpu); 637 else 638 trace_sched_wake_idle_without_ipi(cpu); 639 } 640 641 static bool wake_up_full_nohz_cpu(int cpu) 642 { 643 /* 644 * We just need the target to call irq_exit() and re-evaluate 645 * the next tick. The nohz full kick at least implies that. 646 * If needed we can still optimize that later with an 647 * empty IRQ. 648 */ 649 if (tick_nohz_full_cpu(cpu)) { 650 if (cpu != smp_processor_id() || 651 tick_nohz_tick_stopped()) 652 tick_nohz_full_kick_cpu(cpu); 653 return true; 654 } 655 656 return false; 657 } 658 659 void wake_up_nohz_cpu(int cpu) 660 { 661 if (!wake_up_full_nohz_cpu(cpu)) 662 wake_up_idle_cpu(cpu); 663 } 664 665 static inline bool got_nohz_idle_kick(void) 666 { 667 int cpu = smp_processor_id(); 668 669 if (!test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu))) 670 return false; 671 672 if (idle_cpu(cpu) && !need_resched()) 673 return true; 674 675 /* 676 * We can't run Idle Load Balance on this CPU for this time so we 677 * cancel it and clear NOHZ_BALANCE_KICK 678 */ 679 clear_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu)); 680 return false; 681 } 682 683 #else /* CONFIG_NO_HZ_COMMON */ 684 685 static inline bool got_nohz_idle_kick(void) 686 { 687 return false; 688 } 689 690 #endif /* CONFIG_NO_HZ_COMMON */ 691 692 #ifdef CONFIG_NO_HZ_FULL 693 bool sched_can_stop_tick(void) 694 { 695 /* 696 * FIFO realtime policy runs the highest priority task. Other runnable 697 * tasks are of a lower priority. The scheduler tick does nothing. 698 */ 699 if (current->policy == SCHED_FIFO) 700 return true; 701 702 /* 703 * Round-robin realtime tasks time slice with other tasks at the same 704 * realtime priority. Is this task the only one at this priority? 705 */ 706 if (current->policy == SCHED_RR) { 707 struct sched_rt_entity *rt_se = ¤t->rt; 708 709 return rt_se->run_list.prev == rt_se->run_list.next; 710 } 711 712 /* 713 * More than one running task need preemption. 714 * nr_running update is assumed to be visible 715 * after IPI is sent from wakers. 716 */ 717 if (this_rq()->nr_running > 1) 718 return false; 719 720 return true; 721 } 722 #endif /* CONFIG_NO_HZ_FULL */ 723 724 void sched_avg_update(struct rq *rq) 725 { 726 s64 period = sched_avg_period(); 727 728 while ((s64)(rq_clock(rq) - rq->age_stamp) > period) { 729 /* 730 * Inline assembly required to prevent the compiler 731 * optimising this loop into a divmod call. 732 * See __iter_div_u64_rem() for another example of this. 733 */ 734 asm("" : "+rm" (rq->age_stamp)); 735 rq->age_stamp += period; 736 rq->rt_avg /= 2; 737 } 738 } 739 740 #endif /* CONFIG_SMP */ 741 742 #if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \ 743 (defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH))) 744 /* 745 * Iterate task_group tree rooted at *from, calling @down when first entering a 746 * node and @up when leaving it for the final time. 747 * 748 * Caller must hold rcu_lock or sufficient equivalent. 749 */ 750 int walk_tg_tree_from(struct task_group *from, 751 tg_visitor down, tg_visitor up, void *data) 752 { 753 struct task_group *parent, *child; 754 int ret; 755 756 parent = from; 757 758 down: 759 ret = (*down)(parent, data); 760 if (ret) 761 goto out; 762 list_for_each_entry_rcu(child, &parent->children, siblings) { 763 parent = child; 764 goto down; 765 766 up: 767 continue; 768 } 769 ret = (*up)(parent, data); 770 if (ret || parent == from) 771 goto out; 772 773 child = parent; 774 parent = parent->parent; 775 if (parent) 776 goto up; 777 out: 778 return ret; 779 } 780 781 int tg_nop(struct task_group *tg, void *data) 782 { 783 return 0; 784 } 785 #endif 786 787 static void set_load_weight(struct task_struct *p) 788 { 789 int prio = p->static_prio - MAX_RT_PRIO; 790 struct load_weight *load = &p->se.load; 791 792 /* 793 * SCHED_IDLE tasks get minimal weight: 794 */ 795 if (p->policy == SCHED_IDLE) { 796 load->weight = scale_load(WEIGHT_IDLEPRIO); 797 load->inv_weight = WMULT_IDLEPRIO; 798 return; 799 } 800 801 load->weight = scale_load(prio_to_weight[prio]); 802 load->inv_weight = prio_to_wmult[prio]; 803 } 804 805 static void enqueue_task(struct rq *rq, struct task_struct *p, int flags) 806 { 807 update_rq_clock(rq); 808 sched_info_queued(rq, p); 809 p->sched_class->enqueue_task(rq, p, flags); 810 } 811 812 static void dequeue_task(struct rq *rq, struct task_struct *p, int flags) 813 { 814 update_rq_clock(rq); 815 sched_info_dequeued(rq, p); 816 p->sched_class->dequeue_task(rq, p, flags); 817 } 818 819 void activate_task(struct rq *rq, struct task_struct *p, int flags) 820 { 821 if (task_contributes_to_load(p)) 822 rq->nr_uninterruptible--; 823 824 enqueue_task(rq, p, flags); 825 } 826 827 void deactivate_task(struct rq *rq, struct task_struct *p, int flags) 828 { 829 if (task_contributes_to_load(p)) 830 rq->nr_uninterruptible++; 831 832 dequeue_task(rq, p, flags); 833 } 834 835 static void update_rq_clock_task(struct rq *rq, s64 delta) 836 { 837 /* 838 * In theory, the compile should just see 0 here, and optimize out the call 839 * to sched_rt_avg_update. But I don't trust it... 840 */ 841 #if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING) 842 s64 steal = 0, irq_delta = 0; 843 #endif 844 #ifdef CONFIG_IRQ_TIME_ACCOUNTING 845 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time; 846 847 /* 848 * Since irq_time is only updated on {soft,}irq_exit, we might run into 849 * this case when a previous update_rq_clock() happened inside a 850 * {soft,}irq region. 851 * 852 * When this happens, we stop ->clock_task and only update the 853 * prev_irq_time stamp to account for the part that fit, so that a next 854 * update will consume the rest. This ensures ->clock_task is 855 * monotonic. 856 * 857 * It does however cause some slight miss-attribution of {soft,}irq 858 * time, a more accurate solution would be to update the irq_time using 859 * the current rq->clock timestamp, except that would require using 860 * atomic ops. 861 */ 862 if (irq_delta > delta) 863 irq_delta = delta; 864 865 rq->prev_irq_time += irq_delta; 866 delta -= irq_delta; 867 #endif 868 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING 869 if (static_key_false((¶virt_steal_rq_enabled))) { 870 steal = paravirt_steal_clock(cpu_of(rq)); 871 steal -= rq->prev_steal_time_rq; 872 873 if (unlikely(steal > delta)) 874 steal = delta; 875 876 rq->prev_steal_time_rq += steal; 877 delta -= steal; 878 } 879 #endif 880 881 rq->clock_task += delta; 882 883 #if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING) 884 if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY)) 885 sched_rt_avg_update(rq, irq_delta + steal); 886 #endif 887 } 888 889 void sched_set_stop_task(int cpu, struct task_struct *stop) 890 { 891 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; 892 struct task_struct *old_stop = cpu_rq(cpu)->stop; 893 894 if (stop) { 895 /* 896 * Make it appear like a SCHED_FIFO task, its something 897 * userspace knows about and won't get confused about. 898 * 899 * Also, it will make PI more or less work without too 900 * much confusion -- but then, stop work should not 901 * rely on PI working anyway. 902 */ 903 sched_setscheduler_nocheck(stop, SCHED_FIFO, ¶m); 904 905 stop->sched_class = &stop_sched_class; 906 } 907 908 cpu_rq(cpu)->stop = stop; 909 910 if (old_stop) { 911 /* 912 * Reset it back to a normal scheduling class so that 913 * it can die in pieces. 914 */ 915 old_stop->sched_class = &rt_sched_class; 916 } 917 } 918 919 /* 920 * __normal_prio - return the priority that is based on the static prio 921 */ 922 static inline int __normal_prio(struct task_struct *p) 923 { 924 return p->static_prio; 925 } 926 927 /* 928 * Calculate the expected normal priority: i.e. priority 929 * without taking RT-inheritance into account. Might be 930 * boosted by interactivity modifiers. Changes upon fork, 931 * setprio syscalls, and whenever the interactivity 932 * estimator recalculates. 933 */ 934 static inline int normal_prio(struct task_struct *p) 935 { 936 int prio; 937 938 if (task_has_dl_policy(p)) 939 prio = MAX_DL_PRIO-1; 940 else if (task_has_rt_policy(p)) 941 prio = MAX_RT_PRIO-1 - p->rt_priority; 942 else 943 prio = __normal_prio(p); 944 return prio; 945 } 946 947 /* 948 * Calculate the current priority, i.e. the priority 949 * taken into account by the scheduler. This value might 950 * be boosted by RT tasks, or might be boosted by 951 * interactivity modifiers. Will be RT if the task got 952 * RT-boosted. If not then it returns p->normal_prio. 953 */ 954 static int effective_prio(struct task_struct *p) 955 { 956 p->normal_prio = normal_prio(p); 957 /* 958 * If we are RT tasks or we were boosted to RT priority, 959 * keep the priority unchanged. Otherwise, update priority 960 * to the normal priority: 961 */ 962 if (!rt_prio(p->prio)) 963 return p->normal_prio; 964 return p->prio; 965 } 966 967 /** 968 * task_curr - is this task currently executing on a CPU? 969 * @p: the task in question. 970 * 971 * Return: 1 if the task is currently executing. 0 otherwise. 972 */ 973 inline int task_curr(const struct task_struct *p) 974 { 975 return cpu_curr(task_cpu(p)) == p; 976 } 977 978 /* 979 * Can drop rq->lock because from sched_class::switched_from() methods drop it. 980 */ 981 static inline void check_class_changed(struct rq *rq, struct task_struct *p, 982 const struct sched_class *prev_class, 983 int oldprio) 984 { 985 if (prev_class != p->sched_class) { 986 if (prev_class->switched_from) 987 prev_class->switched_from(rq, p); 988 /* Possble rq->lock 'hole'. */ 989 p->sched_class->switched_to(rq, p); 990 } else if (oldprio != p->prio || dl_task(p)) 991 p->sched_class->prio_changed(rq, p, oldprio); 992 } 993 994 void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) 995 { 996 const struct sched_class *class; 997 998 if (p->sched_class == rq->curr->sched_class) { 999 rq->curr->sched_class->check_preempt_curr(rq, p, flags); 1000 } else { 1001 for_each_class(class) { 1002 if (class == rq->curr->sched_class) 1003 break; 1004 if (class == p->sched_class) { 1005 resched_curr(rq); 1006 break; 1007 } 1008 } 1009 } 1010 1011 /* 1012 * A queue event has occurred, and we're going to schedule. In 1013 * this case, we can save a useless back to back clock update. 1014 */ 1015 if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr)) 1016 rq_clock_skip_update(rq, true); 1017 } 1018 1019 static ATOMIC_NOTIFIER_HEAD(task_migration_notifier); 1020 1021 void register_task_migration_notifier(struct notifier_block *n) 1022 { 1023 atomic_notifier_chain_register(&task_migration_notifier, n); 1024 } 1025 1026 #ifdef CONFIG_SMP 1027 void set_task_cpu(struct task_struct *p, unsigned int new_cpu) 1028 { 1029 #ifdef CONFIG_SCHED_DEBUG 1030 /* 1031 * We should never call set_task_cpu() on a blocked task, 1032 * ttwu() will sort out the placement. 1033 */ 1034 WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING && 1035 !p->on_rq); 1036 1037 #ifdef CONFIG_LOCKDEP 1038 /* 1039 * The caller should hold either p->pi_lock or rq->lock, when changing 1040 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks. 1041 * 1042 * sched_move_task() holds both and thus holding either pins the cgroup, 1043 * see task_group(). 1044 * 1045 * Furthermore, all task_rq users should acquire both locks, see 1046 * task_rq_lock(). 1047 */ 1048 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) || 1049 lockdep_is_held(&task_rq(p)->lock))); 1050 #endif 1051 #endif 1052 1053 trace_sched_migrate_task(p, new_cpu); 1054 1055 if (task_cpu(p) != new_cpu) { 1056 struct task_migration_notifier tmn; 1057 1058 if (p->sched_class->migrate_task_rq) 1059 p->sched_class->migrate_task_rq(p, new_cpu); 1060 p->se.nr_migrations++; 1061 perf_sw_event_sched(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 0); 1062 1063 tmn.task = p; 1064 tmn.from_cpu = task_cpu(p); 1065 tmn.to_cpu = new_cpu; 1066 1067 atomic_notifier_call_chain(&task_migration_notifier, 0, &tmn); 1068 } 1069 1070 __set_task_cpu(p, new_cpu); 1071 } 1072 1073 static void __migrate_swap_task(struct task_struct *p, int cpu) 1074 { 1075 if (task_on_rq_queued(p)) { 1076 struct rq *src_rq, *dst_rq; 1077 1078 src_rq = task_rq(p); 1079 dst_rq = cpu_rq(cpu); 1080 1081 deactivate_task(src_rq, p, 0); 1082 set_task_cpu(p, cpu); 1083 activate_task(dst_rq, p, 0); 1084 check_preempt_curr(dst_rq, p, 0); 1085 } else { 1086 /* 1087 * Task isn't running anymore; make it appear like we migrated 1088 * it before it went to sleep. This means on wakeup we make the 1089 * previous cpu our targer instead of where it really is. 1090 */ 1091 p->wake_cpu = cpu; 1092 } 1093 } 1094 1095 struct migration_swap_arg { 1096 struct task_struct *src_task, *dst_task; 1097 int src_cpu, dst_cpu; 1098 }; 1099 1100 static int migrate_swap_stop(void *data) 1101 { 1102 struct migration_swap_arg *arg = data; 1103 struct rq *src_rq, *dst_rq; 1104 int ret = -EAGAIN; 1105 1106 src_rq = cpu_rq(arg->src_cpu); 1107 dst_rq = cpu_rq(arg->dst_cpu); 1108 1109 double_raw_lock(&arg->src_task->pi_lock, 1110 &arg->dst_task->pi_lock); 1111 double_rq_lock(src_rq, dst_rq); 1112 if (task_cpu(arg->dst_task) != arg->dst_cpu) 1113 goto unlock; 1114 1115 if (task_cpu(arg->src_task) != arg->src_cpu) 1116 goto unlock; 1117 1118 if (!cpumask_test_cpu(arg->dst_cpu, tsk_cpus_allowed(arg->src_task))) 1119 goto unlock; 1120 1121 if (!cpumask_test_cpu(arg->src_cpu, tsk_cpus_allowed(arg->dst_task))) 1122 goto unlock; 1123 1124 __migrate_swap_task(arg->src_task, arg->dst_cpu); 1125 __migrate_swap_task(arg->dst_task, arg->src_cpu); 1126 1127 ret = 0; 1128 1129 unlock: 1130 double_rq_unlock(src_rq, dst_rq); 1131 raw_spin_unlock(&arg->dst_task->pi_lock); 1132 raw_spin_unlock(&arg->src_task->pi_lock); 1133 1134 return ret; 1135 } 1136 1137 /* 1138 * Cross migrate two tasks 1139 */ 1140 int migrate_swap(struct task_struct *cur, struct task_struct *p) 1141 { 1142 struct migration_swap_arg arg; 1143 int ret = -EINVAL; 1144 1145 arg = (struct migration_swap_arg){ 1146 .src_task = cur, 1147 .src_cpu = task_cpu(cur), 1148 .dst_task = p, 1149 .dst_cpu = task_cpu(p), 1150 }; 1151 1152 if (arg.src_cpu == arg.dst_cpu) 1153 goto out; 1154 1155 /* 1156 * These three tests are all lockless; this is OK since all of them 1157 * will be re-checked with proper locks held further down the line. 1158 */ 1159 if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu)) 1160 goto out; 1161 1162 if (!cpumask_test_cpu(arg.dst_cpu, tsk_cpus_allowed(arg.src_task))) 1163 goto out; 1164 1165 if (!cpumask_test_cpu(arg.src_cpu, tsk_cpus_allowed(arg.dst_task))) 1166 goto out; 1167 1168 trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu); 1169 ret = stop_two_cpus(arg.dst_cpu, arg.src_cpu, migrate_swap_stop, &arg); 1170 1171 out: 1172 return ret; 1173 } 1174 1175 struct migration_arg { 1176 struct task_struct *task; 1177 int dest_cpu; 1178 }; 1179 1180 static int migration_cpu_stop(void *data); 1181 1182 /* 1183 * wait_task_inactive - wait for a thread to unschedule. 1184 * 1185 * If @match_state is nonzero, it's the @p->state value just checked and 1186 * not expected to change. If it changes, i.e. @p might have woken up, 1187 * then return zero. When we succeed in waiting for @p to be off its CPU, 1188 * we return a positive number (its total switch count). If a second call 1189 * a short while later returns the same number, the caller can be sure that 1190 * @p has remained unscheduled the whole time. 1191 * 1192 * The caller must ensure that the task *will* unschedule sometime soon, 1193 * else this function might spin for a *long* time. This function can't 1194 * be called with interrupts off, or it may introduce deadlock with 1195 * smp_call_function() if an IPI is sent by the same process we are 1196 * waiting to become inactive. 1197 */ 1198 unsigned long wait_task_inactive(struct task_struct *p, long match_state) 1199 { 1200 unsigned long flags; 1201 int running, queued; 1202 unsigned long ncsw; 1203 struct rq *rq; 1204 1205 for (;;) { 1206 /* 1207 * We do the initial early heuristics without holding 1208 * any task-queue locks at all. We'll only try to get 1209 * the runqueue lock when things look like they will 1210 * work out! 1211 */ 1212 rq = task_rq(p); 1213 1214 /* 1215 * If the task is actively running on another CPU 1216 * still, just relax and busy-wait without holding 1217 * any locks. 1218 * 1219 * NOTE! Since we don't hold any locks, it's not 1220 * even sure that "rq" stays as the right runqueue! 1221 * But we don't care, since "task_running()" will 1222 * return false if the runqueue has changed and p 1223 * is actually now running somewhere else! 1224 */ 1225 while (task_running(rq, p)) { 1226 if (match_state && unlikely(p->state != match_state)) 1227 return 0; 1228 cpu_relax(); 1229 } 1230 1231 /* 1232 * Ok, time to look more closely! We need the rq 1233 * lock now, to be *sure*. If we're wrong, we'll 1234 * just go back and repeat. 1235 */ 1236 rq = task_rq_lock(p, &flags); 1237 trace_sched_wait_task(p); 1238 running = task_running(rq, p); 1239 queued = task_on_rq_queued(p); 1240 ncsw = 0; 1241 if (!match_state || p->state == match_state) 1242 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ 1243 task_rq_unlock(rq, p, &flags); 1244 1245 /* 1246 * If it changed from the expected state, bail out now. 1247 */ 1248 if (unlikely(!ncsw)) 1249 break; 1250 1251 /* 1252 * Was it really running after all now that we 1253 * checked with the proper locks actually held? 1254 * 1255 * Oops. Go back and try again.. 1256 */ 1257 if (unlikely(running)) { 1258 cpu_relax(); 1259 continue; 1260 } 1261 1262 /* 1263 * It's not enough that it's not actively running, 1264 * it must be off the runqueue _entirely_, and not 1265 * preempted! 1266 * 1267 * So if it was still runnable (but just not actively 1268 * running right now), it's preempted, and we should 1269 * yield - it could be a while. 1270 */ 1271 if (unlikely(queued)) { 1272 ktime_t to = ktime_set(0, NSEC_PER_SEC/HZ); 1273 1274 set_current_state(TASK_UNINTERRUPTIBLE); 1275 schedule_hrtimeout(&to, HRTIMER_MODE_REL); 1276 continue; 1277 } 1278 1279 /* 1280 * Ahh, all good. It wasn't running, and it wasn't 1281 * runnable, which means that it will never become 1282 * running in the future either. We're all done! 1283 */ 1284 break; 1285 } 1286 1287 return ncsw; 1288 } 1289 1290 /*** 1291 * kick_process - kick a running thread to enter/exit the kernel 1292 * @p: the to-be-kicked thread 1293 * 1294 * Cause a process which is running on another CPU to enter 1295 * kernel-mode, without any delay. (to get signals handled.) 1296 * 1297 * NOTE: this function doesn't have to take the runqueue lock, 1298 * because all it wants to ensure is that the remote task enters 1299 * the kernel. If the IPI races and the task has been migrated 1300 * to another CPU then no harm is done and the purpose has been 1301 * achieved as well. 1302 */ 1303 void kick_process(struct task_struct *p) 1304 { 1305 int cpu; 1306 1307 preempt_disable(); 1308 cpu = task_cpu(p); 1309 if ((cpu != smp_processor_id()) && task_curr(p)) 1310 smp_send_reschedule(cpu); 1311 preempt_enable(); 1312 } 1313 EXPORT_SYMBOL_GPL(kick_process); 1314 #endif /* CONFIG_SMP */ 1315 1316 #ifdef CONFIG_SMP 1317 /* 1318 * ->cpus_allowed is protected by both rq->lock and p->pi_lock 1319 */ 1320 static int select_fallback_rq(int cpu, struct task_struct *p) 1321 { 1322 int nid = cpu_to_node(cpu); 1323 const struct cpumask *nodemask = NULL; 1324 enum { cpuset, possible, fail } state = cpuset; 1325 int dest_cpu; 1326 1327 /* 1328 * If the node that the cpu is on has been offlined, cpu_to_node() 1329 * will return -1. There is no cpu on the node, and we should 1330 * select the cpu on the other node. 1331 */ 1332 if (nid != -1) { 1333 nodemask = cpumask_of_node(nid); 1334 1335 /* Look for allowed, online CPU in same node. */ 1336 for_each_cpu(dest_cpu, nodemask) { 1337 if (!cpu_online(dest_cpu)) 1338 continue; 1339 if (!cpu_active(dest_cpu)) 1340 continue; 1341 if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p))) 1342 return dest_cpu; 1343 } 1344 } 1345 1346 for (;;) { 1347 /* Any allowed, online CPU? */ 1348 for_each_cpu(dest_cpu, tsk_cpus_allowed(p)) { 1349 if (!cpu_online(dest_cpu)) 1350 continue; 1351 if (!cpu_active(dest_cpu)) 1352 continue; 1353 goto out; 1354 } 1355 1356 switch (state) { 1357 case cpuset: 1358 /* No more Mr. Nice Guy. */ 1359 cpuset_cpus_allowed_fallback(p); 1360 state = possible; 1361 break; 1362 1363 case possible: 1364 do_set_cpus_allowed(p, cpu_possible_mask); 1365 state = fail; 1366 break; 1367 1368 case fail: 1369 BUG(); 1370 break; 1371 } 1372 } 1373 1374 out: 1375 if (state != cpuset) { 1376 /* 1377 * Don't tell them about moving exiting tasks or 1378 * kernel threads (both mm NULL), since they never 1379 * leave kernel. 1380 */ 1381 if (p->mm && printk_ratelimit()) { 1382 printk_deferred("process %d (%s) no longer affine to cpu%d\n", 1383 task_pid_nr(p), p->comm, cpu); 1384 } 1385 } 1386 1387 return dest_cpu; 1388 } 1389 1390 /* 1391 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable. 1392 */ 1393 static inline 1394 int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags) 1395 { 1396 if (p->nr_cpus_allowed > 1) 1397 cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags); 1398 1399 /* 1400 * In order not to call set_task_cpu() on a blocking task we need 1401 * to rely on ttwu() to place the task on a valid ->cpus_allowed 1402 * cpu. 1403 * 1404 * Since this is common to all placement strategies, this lives here. 1405 * 1406 * [ this allows ->select_task() to simply return task_cpu(p) and 1407 * not worry about this generic constraint ] 1408 */ 1409 if (unlikely(!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) || 1410 !cpu_online(cpu))) 1411 cpu = select_fallback_rq(task_cpu(p), p); 1412 1413 return cpu; 1414 } 1415 1416 static void update_avg(u64 *avg, u64 sample) 1417 { 1418 s64 diff = sample - *avg; 1419 *avg += diff >> 3; 1420 } 1421 #endif 1422 1423 static void 1424 ttwu_stat(struct task_struct *p, int cpu, int wake_flags) 1425 { 1426 #ifdef CONFIG_SCHEDSTATS 1427 struct rq *rq = this_rq(); 1428 1429 #ifdef CONFIG_SMP 1430 int this_cpu = smp_processor_id(); 1431 1432 if (cpu == this_cpu) { 1433 schedstat_inc(rq, ttwu_local); 1434 schedstat_inc(p, se.statistics.nr_wakeups_local); 1435 } else { 1436 struct sched_domain *sd; 1437 1438 schedstat_inc(p, se.statistics.nr_wakeups_remote); 1439 rcu_read_lock(); 1440 for_each_domain(this_cpu, sd) { 1441 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) { 1442 schedstat_inc(sd, ttwu_wake_remote); 1443 break; 1444 } 1445 } 1446 rcu_read_unlock(); 1447 } 1448 1449 if (wake_flags & WF_MIGRATED) 1450 schedstat_inc(p, se.statistics.nr_wakeups_migrate); 1451 1452 #endif /* CONFIG_SMP */ 1453 1454 schedstat_inc(rq, ttwu_count); 1455 schedstat_inc(p, se.statistics.nr_wakeups); 1456 1457 if (wake_flags & WF_SYNC) 1458 schedstat_inc(p, se.statistics.nr_wakeups_sync); 1459 1460 #endif /* CONFIG_SCHEDSTATS */ 1461 } 1462 1463 static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags) 1464 { 1465 activate_task(rq, p, en_flags); 1466 p->on_rq = TASK_ON_RQ_QUEUED; 1467 1468 /* if a worker is waking up, notify workqueue */ 1469 if (p->flags & PF_WQ_WORKER) 1470 wq_worker_waking_up(p, cpu_of(rq)); 1471 } 1472 1473 /* 1474 * Mark the task runnable and perform wakeup-preemption. 1475 */ 1476 static void 1477 ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags) 1478 { 1479 check_preempt_curr(rq, p, wake_flags); 1480 trace_sched_wakeup(p, true); 1481 1482 p->state = TASK_RUNNING; 1483 #ifdef CONFIG_SMP 1484 if (p->sched_class->task_woken) 1485 p->sched_class->task_woken(rq, p); 1486 1487 if (rq->idle_stamp) { 1488 u64 delta = rq_clock(rq) - rq->idle_stamp; 1489 u64 max = 2*rq->max_idle_balance_cost; 1490 1491 update_avg(&rq->avg_idle, delta); 1492 1493 if (rq->avg_idle > max) 1494 rq->avg_idle = max; 1495 1496 rq->idle_stamp = 0; 1497 } 1498 #endif 1499 } 1500 1501 static void 1502 ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags) 1503 { 1504 #ifdef CONFIG_SMP 1505 if (p->sched_contributes_to_load) 1506 rq->nr_uninterruptible--; 1507 #endif 1508 1509 ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_WAKING); 1510 ttwu_do_wakeup(rq, p, wake_flags); 1511 } 1512 1513 /* 1514 * Called in case the task @p isn't fully descheduled from its runqueue, 1515 * in this case we must do a remote wakeup. Its a 'light' wakeup though, 1516 * since all we need to do is flip p->state to TASK_RUNNING, since 1517 * the task is still ->on_rq. 1518 */ 1519 static int ttwu_remote(struct task_struct *p, int wake_flags) 1520 { 1521 struct rq *rq; 1522 int ret = 0; 1523 1524 rq = __task_rq_lock(p); 1525 if (task_on_rq_queued(p)) { 1526 /* check_preempt_curr() may use rq clock */ 1527 update_rq_clock(rq); 1528 ttwu_do_wakeup(rq, p, wake_flags); 1529 ret = 1; 1530 } 1531 __task_rq_unlock(rq); 1532 1533 return ret; 1534 } 1535 1536 #ifdef CONFIG_SMP 1537 void sched_ttwu_pending(void) 1538 { 1539 struct rq *rq = this_rq(); 1540 struct llist_node *llist = llist_del_all(&rq->wake_list); 1541 struct task_struct *p; 1542 unsigned long flags; 1543 1544 if (!llist) 1545 return; 1546 1547 raw_spin_lock_irqsave(&rq->lock, flags); 1548 1549 while (llist) { 1550 p = llist_entry(llist, struct task_struct, wake_entry); 1551 llist = llist_next(llist); 1552 ttwu_do_activate(rq, p, 0); 1553 } 1554 1555 raw_spin_unlock_irqrestore(&rq->lock, flags); 1556 } 1557 1558 void scheduler_ipi(void) 1559 { 1560 /* 1561 * Fold TIF_NEED_RESCHED into the preempt_count; anybody setting 1562 * TIF_NEED_RESCHED remotely (for the first time) will also send 1563 * this IPI. 1564 */ 1565 preempt_fold_need_resched(); 1566 1567 if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick()) 1568 return; 1569 1570 /* 1571 * Not all reschedule IPI handlers call irq_enter/irq_exit, since 1572 * traditionally all their work was done from the interrupt return 1573 * path. Now that we actually do some work, we need to make sure 1574 * we do call them. 1575 * 1576 * Some archs already do call them, luckily irq_enter/exit nest 1577 * properly. 1578 * 1579 * Arguably we should visit all archs and update all handlers, 1580 * however a fair share of IPIs are still resched only so this would 1581 * somewhat pessimize the simple resched case. 1582 */ 1583 irq_enter(); 1584 sched_ttwu_pending(); 1585 1586 /* 1587 * Check if someone kicked us for doing the nohz idle load balance. 1588 */ 1589 if (unlikely(got_nohz_idle_kick())) { 1590 this_rq()->idle_balance = 1; 1591 raise_softirq_irqoff(SCHED_SOFTIRQ); 1592 } 1593 irq_exit(); 1594 } 1595 1596 static void ttwu_queue_remote(struct task_struct *p, int cpu) 1597 { 1598 struct rq *rq = cpu_rq(cpu); 1599 1600 if (llist_add(&p->wake_entry, &cpu_rq(cpu)->wake_list)) { 1601 if (!set_nr_if_polling(rq->idle)) 1602 smp_send_reschedule(cpu); 1603 else 1604 trace_sched_wake_idle_without_ipi(cpu); 1605 } 1606 } 1607 1608 void wake_up_if_idle(int cpu) 1609 { 1610 struct rq *rq = cpu_rq(cpu); 1611 unsigned long flags; 1612 1613 rcu_read_lock(); 1614 1615 if (!is_idle_task(rcu_dereference(rq->curr))) 1616 goto out; 1617 1618 if (set_nr_if_polling(rq->idle)) { 1619 trace_sched_wake_idle_without_ipi(cpu); 1620 } else { 1621 raw_spin_lock_irqsave(&rq->lock, flags); 1622 if (is_idle_task(rq->curr)) 1623 smp_send_reschedule(cpu); 1624 /* Else cpu is not in idle, do nothing here */ 1625 raw_spin_unlock_irqrestore(&rq->lock, flags); 1626 } 1627 1628 out: 1629 rcu_read_unlock(); 1630 } 1631 1632 bool cpus_share_cache(int this_cpu, int that_cpu) 1633 { 1634 return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu); 1635 } 1636 #endif /* CONFIG_SMP */ 1637 1638 static void ttwu_queue(struct task_struct *p, int cpu) 1639 { 1640 struct rq *rq = cpu_rq(cpu); 1641 1642 #if defined(CONFIG_SMP) 1643 if (sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) { 1644 sched_clock_cpu(cpu); /* sync clocks x-cpu */ 1645 ttwu_queue_remote(p, cpu); 1646 return; 1647 } 1648 #endif 1649 1650 raw_spin_lock(&rq->lock); 1651 ttwu_do_activate(rq, p, 0); 1652 raw_spin_unlock(&rq->lock); 1653 } 1654 1655 /** 1656 * try_to_wake_up - wake up a thread 1657 * @p: the thread to be awakened 1658 * @state: the mask of task states that can be woken 1659 * @wake_flags: wake modifier flags (WF_*) 1660 * 1661 * Put it on the run-queue if it's not already there. The "current" 1662 * thread is always on the run-queue (except when the actual 1663 * re-schedule is in progress), and as such you're allowed to do 1664 * the simpler "current->state = TASK_RUNNING" to mark yourself 1665 * runnable without the overhead of this. 1666 * 1667 * Return: %true if @p was woken up, %false if it was already running. 1668 * or @state didn't match @p's state. 1669 */ 1670 static int 1671 try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) 1672 { 1673 unsigned long flags; 1674 int cpu, success = 0; 1675 1676 /* 1677 * If we are going to wake up a thread waiting for CONDITION we 1678 * need to ensure that CONDITION=1 done by the caller can not be 1679 * reordered with p->state check below. This pairs with mb() in 1680 * set_current_state() the waiting thread does. 1681 */ 1682 smp_mb__before_spinlock(); 1683 raw_spin_lock_irqsave(&p->pi_lock, flags); 1684 if (!(p->state & state)) 1685 goto out; 1686 1687 success = 1; /* we're going to change ->state */ 1688 cpu = task_cpu(p); 1689 1690 if (p->on_rq && ttwu_remote(p, wake_flags)) 1691 goto stat; 1692 1693 #ifdef CONFIG_SMP 1694 /* 1695 * If the owning (remote) cpu is still in the middle of schedule() with 1696 * this task as prev, wait until its done referencing the task. 1697 */ 1698 while (p->on_cpu) 1699 cpu_relax(); 1700 /* 1701 * Pairs with the smp_wmb() in finish_lock_switch(). 1702 */ 1703 smp_rmb(); 1704 1705 p->sched_contributes_to_load = !!task_contributes_to_load(p); 1706 p->state = TASK_WAKING; 1707 1708 if (p->sched_class->task_waking) 1709 p->sched_class->task_waking(p); 1710 1711 cpu = select_task_rq(p, p->wake_cpu, SD_BALANCE_WAKE, wake_flags); 1712 if (task_cpu(p) != cpu) { 1713 wake_flags |= WF_MIGRATED; 1714 set_task_cpu(p, cpu); 1715 } 1716 #endif /* CONFIG_SMP */ 1717 1718 ttwu_queue(p, cpu); 1719 stat: 1720 ttwu_stat(p, cpu, wake_flags); 1721 out: 1722 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 1723 1724 return success; 1725 } 1726 1727 /** 1728 * try_to_wake_up_local - try to wake up a local task with rq lock held 1729 * @p: the thread to be awakened 1730 * 1731 * Put @p on the run-queue if it's not already there. The caller must 1732 * ensure that this_rq() is locked, @p is bound to this_rq() and not 1733 * the current task. 1734 */ 1735 static void try_to_wake_up_local(struct task_struct *p) 1736 { 1737 struct rq *rq = task_rq(p); 1738 1739 if (WARN_ON_ONCE(rq != this_rq()) || 1740 WARN_ON_ONCE(p == current)) 1741 return; 1742 1743 lockdep_assert_held(&rq->lock); 1744 1745 if (!raw_spin_trylock(&p->pi_lock)) { 1746 raw_spin_unlock(&rq->lock); 1747 raw_spin_lock(&p->pi_lock); 1748 raw_spin_lock(&rq->lock); 1749 } 1750 1751 if (!(p->state & TASK_NORMAL)) 1752 goto out; 1753 1754 if (!task_on_rq_queued(p)) 1755 ttwu_activate(rq, p, ENQUEUE_WAKEUP); 1756 1757 ttwu_do_wakeup(rq, p, 0); 1758 ttwu_stat(p, smp_processor_id(), 0); 1759 out: 1760 raw_spin_unlock(&p->pi_lock); 1761 } 1762 1763 /** 1764 * wake_up_process - Wake up a specific process 1765 * @p: The process to be woken up. 1766 * 1767 * Attempt to wake up the nominated process and move it to the set of runnable 1768 * processes. 1769 * 1770 * Return: 1 if the process was woken up, 0 if it was already running. 1771 * 1772 * It may be assumed that this function implies a write memory barrier before 1773 * changing the task state if and only if any tasks are woken up. 1774 */ 1775 int wake_up_process(struct task_struct *p) 1776 { 1777 WARN_ON(task_is_stopped_or_traced(p)); 1778 return try_to_wake_up(p, TASK_NORMAL, 0); 1779 } 1780 EXPORT_SYMBOL(wake_up_process); 1781 1782 int wake_up_state(struct task_struct *p, unsigned int state) 1783 { 1784 return try_to_wake_up(p, state, 0); 1785 } 1786 1787 /* 1788 * This function clears the sched_dl_entity static params. 1789 */ 1790 void __dl_clear_params(struct task_struct *p) 1791 { 1792 struct sched_dl_entity *dl_se = &p->dl; 1793 1794 dl_se->dl_runtime = 0; 1795 dl_se->dl_deadline = 0; 1796 dl_se->dl_period = 0; 1797 dl_se->flags = 0; 1798 dl_se->dl_bw = 0; 1799 1800 dl_se->dl_throttled = 0; 1801 dl_se->dl_new = 1; 1802 dl_se->dl_yielded = 0; 1803 } 1804 1805 /* 1806 * Perform scheduler related setup for a newly forked process p. 1807 * p is forked by current. 1808 * 1809 * __sched_fork() is basic setup used by init_idle() too: 1810 */ 1811 static void __sched_fork(unsigned long clone_flags, struct task_struct *p) 1812 { 1813 p->on_rq = 0; 1814 1815 p->se.on_rq = 0; 1816 p->se.exec_start = 0; 1817 p->se.sum_exec_runtime = 0; 1818 p->se.prev_sum_exec_runtime = 0; 1819 p->se.nr_migrations = 0; 1820 p->se.vruntime = 0; 1821 #ifdef CONFIG_SMP 1822 p->se.avg.decay_count = 0; 1823 #endif 1824 INIT_LIST_HEAD(&p->se.group_node); 1825 1826 #ifdef CONFIG_SCHEDSTATS 1827 memset(&p->se.statistics, 0, sizeof(p->se.statistics)); 1828 #endif 1829 1830 RB_CLEAR_NODE(&p->dl.rb_node); 1831 init_dl_task_timer(&p->dl); 1832 __dl_clear_params(p); 1833 1834 INIT_LIST_HEAD(&p->rt.run_list); 1835 1836 #ifdef CONFIG_PREEMPT_NOTIFIERS 1837 INIT_HLIST_HEAD(&p->preempt_notifiers); 1838 #endif 1839 1840 #ifdef CONFIG_NUMA_BALANCING 1841 if (p->mm && atomic_read(&p->mm->mm_users) == 1) { 1842 p->mm->numa_next_scan = jiffies + msecs_to_jiffies(sysctl_numa_balancing_scan_delay); 1843 p->mm->numa_scan_seq = 0; 1844 } 1845 1846 if (clone_flags & CLONE_VM) 1847 p->numa_preferred_nid = current->numa_preferred_nid; 1848 else 1849 p->numa_preferred_nid = -1; 1850 1851 p->node_stamp = 0ULL; 1852 p->numa_scan_seq = p->mm ? p->mm->numa_scan_seq : 0; 1853 p->numa_scan_period = sysctl_numa_balancing_scan_delay; 1854 p->numa_work.next = &p->numa_work; 1855 p->numa_faults = NULL; 1856 p->last_task_numa_placement = 0; 1857 p->last_sum_exec_runtime = 0; 1858 1859 p->numa_group = NULL; 1860 #endif /* CONFIG_NUMA_BALANCING */ 1861 } 1862 1863 #ifdef CONFIG_NUMA_BALANCING 1864 #ifdef CONFIG_SCHED_DEBUG 1865 void set_numabalancing_state(bool enabled) 1866 { 1867 if (enabled) 1868 sched_feat_set("NUMA"); 1869 else 1870 sched_feat_set("NO_NUMA"); 1871 } 1872 #else 1873 __read_mostly bool numabalancing_enabled; 1874 1875 void set_numabalancing_state(bool enabled) 1876 { 1877 numabalancing_enabled = enabled; 1878 } 1879 #endif /* CONFIG_SCHED_DEBUG */ 1880 1881 #ifdef CONFIG_PROC_SYSCTL 1882 int sysctl_numa_balancing(struct ctl_table *table, int write, 1883 void __user *buffer, size_t *lenp, loff_t *ppos) 1884 { 1885 struct ctl_table t; 1886 int err; 1887 int state = numabalancing_enabled; 1888 1889 if (write && !capable(CAP_SYS_ADMIN)) 1890 return -EPERM; 1891 1892 t = *table; 1893 t.data = &state; 1894 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); 1895 if (err < 0) 1896 return err; 1897 if (write) 1898 set_numabalancing_state(state); 1899 return err; 1900 } 1901 #endif 1902 #endif 1903 1904 /* 1905 * fork()/clone()-time setup: 1906 */ 1907 int sched_fork(unsigned long clone_flags, struct task_struct *p) 1908 { 1909 unsigned long flags; 1910 int cpu = get_cpu(); 1911 1912 __sched_fork(clone_flags, p); 1913 /* 1914 * We mark the process as running here. This guarantees that 1915 * nobody will actually run it, and a signal or other external 1916 * event cannot wake it up and insert it on the runqueue either. 1917 */ 1918 p->state = TASK_RUNNING; 1919 1920 /* 1921 * Make sure we do not leak PI boosting priority to the child. 1922 */ 1923 p->prio = current->normal_prio; 1924 1925 /* 1926 * Revert to default priority/policy on fork if requested. 1927 */ 1928 if (unlikely(p->sched_reset_on_fork)) { 1929 if (task_has_dl_policy(p) || task_has_rt_policy(p)) { 1930 p->policy = SCHED_NORMAL; 1931 p->static_prio = NICE_TO_PRIO(0); 1932 p->rt_priority = 0; 1933 } else if (PRIO_TO_NICE(p->static_prio) < 0) 1934 p->static_prio = NICE_TO_PRIO(0); 1935 1936 p->prio = p->normal_prio = __normal_prio(p); 1937 set_load_weight(p); 1938 1939 /* 1940 * We don't need the reset flag anymore after the fork. It has 1941 * fulfilled its duty: 1942 */ 1943 p->sched_reset_on_fork = 0; 1944 } 1945 1946 if (dl_prio(p->prio)) { 1947 put_cpu(); 1948 return -EAGAIN; 1949 } else if (rt_prio(p->prio)) { 1950 p->sched_class = &rt_sched_class; 1951 } else { 1952 p->sched_class = &fair_sched_class; 1953 } 1954 1955 if (p->sched_class->task_fork) 1956 p->sched_class->task_fork(p); 1957 1958 /* 1959 * The child is not yet in the pid-hash so no cgroup attach races, 1960 * and the cgroup is pinned to this child due to cgroup_fork() 1961 * is ran before sched_fork(). 1962 * 1963 * Silence PROVE_RCU. 1964 */ 1965 raw_spin_lock_irqsave(&p->pi_lock, flags); 1966 set_task_cpu(p, cpu); 1967 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 1968 1969 #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) 1970 if (likely(sched_info_on())) 1971 memset(&p->sched_info, 0, sizeof(p->sched_info)); 1972 #endif 1973 #if defined(CONFIG_SMP) 1974 p->on_cpu = 0; 1975 #endif 1976 init_task_preempt_count(p); 1977 #ifdef CONFIG_SMP 1978 plist_node_init(&p->pushable_tasks, MAX_PRIO); 1979 RB_CLEAR_NODE(&p->pushable_dl_tasks); 1980 #endif 1981 1982 put_cpu(); 1983 return 0; 1984 } 1985 1986 unsigned long to_ratio(u64 period, u64 runtime) 1987 { 1988 if (runtime == RUNTIME_INF) 1989 return 1ULL << 20; 1990 1991 /* 1992 * Doing this here saves a lot of checks in all 1993 * the calling paths, and returning zero seems 1994 * safe for them anyway. 1995 */ 1996 if (period == 0) 1997 return 0; 1998 1999 return div64_u64(runtime << 20, period); 2000 } 2001 2002 #ifdef CONFIG_SMP 2003 inline struct dl_bw *dl_bw_of(int i) 2004 { 2005 rcu_lockdep_assert(rcu_read_lock_sched_held(), 2006 "sched RCU must be held"); 2007 return &cpu_rq(i)->rd->dl_bw; 2008 } 2009 2010 static inline int dl_bw_cpus(int i) 2011 { 2012 struct root_domain *rd = cpu_rq(i)->rd; 2013 int cpus = 0; 2014 2015 rcu_lockdep_assert(rcu_read_lock_sched_held(), 2016 "sched RCU must be held"); 2017 for_each_cpu_and(i, rd->span, cpu_active_mask) 2018 cpus++; 2019 2020 return cpus; 2021 } 2022 #else 2023 inline struct dl_bw *dl_bw_of(int i) 2024 { 2025 return &cpu_rq(i)->dl.dl_bw; 2026 } 2027 2028 static inline int dl_bw_cpus(int i) 2029 { 2030 return 1; 2031 } 2032 #endif 2033 2034 /* 2035 * We must be sure that accepting a new task (or allowing changing the 2036 * parameters of an existing one) is consistent with the bandwidth 2037 * constraints. If yes, this function also accordingly updates the currently 2038 * allocated bandwidth to reflect the new situation. 2039 * 2040 * This function is called while holding p's rq->lock. 2041 * 2042 * XXX we should delay bw change until the task's 0-lag point, see 2043 * __setparam_dl(). 2044 */ 2045 static int dl_overflow(struct task_struct *p, int policy, 2046 const struct sched_attr *attr) 2047 { 2048 2049 struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); 2050 u64 period = attr->sched_period ?: attr->sched_deadline; 2051 u64 runtime = attr->sched_runtime; 2052 u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0; 2053 int cpus, err = -1; 2054 2055 if (new_bw == p->dl.dl_bw) 2056 return 0; 2057 2058 /* 2059 * Either if a task, enters, leave, or stays -deadline but changes 2060 * its parameters, we may need to update accordingly the total 2061 * allocated bandwidth of the container. 2062 */ 2063 raw_spin_lock(&dl_b->lock); 2064 cpus = dl_bw_cpus(task_cpu(p)); 2065 if (dl_policy(policy) && !task_has_dl_policy(p) && 2066 !__dl_overflow(dl_b, cpus, 0, new_bw)) { 2067 __dl_add(dl_b, new_bw); 2068 err = 0; 2069 } else if (dl_policy(policy) && task_has_dl_policy(p) && 2070 !__dl_overflow(dl_b, cpus, p->dl.dl_bw, new_bw)) { 2071 __dl_clear(dl_b, p->dl.dl_bw); 2072 __dl_add(dl_b, new_bw); 2073 err = 0; 2074 } else if (!dl_policy(policy) && task_has_dl_policy(p)) { 2075 __dl_clear(dl_b, p->dl.dl_bw); 2076 err = 0; 2077 } 2078 raw_spin_unlock(&dl_b->lock); 2079 2080 return err; 2081 } 2082 2083 extern void init_dl_bw(struct dl_bw *dl_b); 2084 2085 /* 2086 * wake_up_new_task - wake up a newly created task for the first time. 2087 * 2088 * This function will do some initial scheduler statistics housekeeping 2089 * that must be done for every newly created context, then puts the task 2090 * on the runqueue and wakes it. 2091 */ 2092 void wake_up_new_task(struct task_struct *p) 2093 { 2094 unsigned long flags; 2095 struct rq *rq; 2096 2097 raw_spin_lock_irqsave(&p->pi_lock, flags); 2098 #ifdef CONFIG_SMP 2099 /* 2100 * Fork balancing, do it here and not earlier because: 2101 * - cpus_allowed can change in the fork path 2102 * - any previously selected cpu might disappear through hotplug 2103 */ 2104 set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0)); 2105 #endif 2106 2107 /* Initialize new task's runnable average */ 2108 init_task_runnable_average(p); 2109 rq = __task_rq_lock(p); 2110 activate_task(rq, p, 0); 2111 p->on_rq = TASK_ON_RQ_QUEUED; 2112 trace_sched_wakeup_new(p, true); 2113 check_preempt_curr(rq, p, WF_FORK); 2114 #ifdef CONFIG_SMP 2115 if (p->sched_class->task_woken) 2116 p->sched_class->task_woken(rq, p); 2117 #endif 2118 task_rq_unlock(rq, p, &flags); 2119 } 2120 2121 #ifdef CONFIG_PREEMPT_NOTIFIERS 2122 2123 /** 2124 * preempt_notifier_register - tell me when current is being preempted & rescheduled 2125 * @notifier: notifier struct to register 2126 */ 2127 void preempt_notifier_register(struct preempt_notifier *notifier) 2128 { 2129 hlist_add_head(¬ifier->link, ¤t->preempt_notifiers); 2130 } 2131 EXPORT_SYMBOL_GPL(preempt_notifier_register); 2132 2133 /** 2134 * preempt_notifier_unregister - no longer interested in preemption notifications 2135 * @notifier: notifier struct to unregister 2136 * 2137 * This is safe to call from within a preemption notifier. 2138 */ 2139 void preempt_notifier_unregister(struct preempt_notifier *notifier) 2140 { 2141 hlist_del(¬ifier->link); 2142 } 2143 EXPORT_SYMBOL_GPL(preempt_notifier_unregister); 2144 2145 static void fire_sched_in_preempt_notifiers(struct task_struct *curr) 2146 { 2147 struct preempt_notifier *notifier; 2148 2149 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) 2150 notifier->ops->sched_in(notifier, raw_smp_processor_id()); 2151 } 2152 2153 static void 2154 fire_sched_out_preempt_notifiers(struct task_struct *curr, 2155 struct task_struct *next) 2156 { 2157 struct preempt_notifier *notifier; 2158 2159 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) 2160 notifier->ops->sched_out(notifier, next); 2161 } 2162 2163 #else /* !CONFIG_PREEMPT_NOTIFIERS */ 2164 2165 static void fire_sched_in_preempt_notifiers(struct task_struct *curr) 2166 { 2167 } 2168 2169 static void 2170 fire_sched_out_preempt_notifiers(struct task_struct *curr, 2171 struct task_struct *next) 2172 { 2173 } 2174 2175 #endif /* CONFIG_PREEMPT_NOTIFIERS */ 2176 2177 /** 2178 * prepare_task_switch - prepare to switch tasks 2179 * @rq: the runqueue preparing to switch 2180 * @prev: the current task that is being switched out 2181 * @next: the task we are going to switch to. 2182 * 2183 * This is called with the rq lock held and interrupts off. It must 2184 * be paired with a subsequent finish_task_switch after the context 2185 * switch. 2186 * 2187 * prepare_task_switch sets up locking and calls architecture specific 2188 * hooks. 2189 */ 2190 static inline void 2191 prepare_task_switch(struct rq *rq, struct task_struct *prev, 2192 struct task_struct *next) 2193 { 2194 trace_sched_switch(prev, next); 2195 sched_info_switch(rq, prev, next); 2196 perf_event_task_sched_out(prev, next); 2197 fire_sched_out_preempt_notifiers(prev, next); 2198 prepare_lock_switch(rq, next); 2199 prepare_arch_switch(next); 2200 } 2201 2202 /** 2203 * finish_task_switch - clean up after a task-switch 2204 * @prev: the thread we just switched away from. 2205 * 2206 * finish_task_switch must be called after the context switch, paired 2207 * with a prepare_task_switch call before the context switch. 2208 * finish_task_switch will reconcile locking set up by prepare_task_switch, 2209 * and do any other architecture-specific cleanup actions. 2210 * 2211 * Note that we may have delayed dropping an mm in context_switch(). If 2212 * so, we finish that here outside of the runqueue lock. (Doing it 2213 * with the lock held can cause deadlocks; see schedule() for 2214 * details.) 2215 * 2216 * The context switch have flipped the stack from under us and restored the 2217 * local variables which were saved when this task called schedule() in the 2218 * past. prev == current is still correct but we need to recalculate this_rq 2219 * because prev may have moved to another CPU. 2220 */ 2221 static struct rq *finish_task_switch(struct task_struct *prev) 2222 __releases(rq->lock) 2223 { 2224 struct rq *rq = this_rq(); 2225 struct mm_struct *mm = rq->prev_mm; 2226 long prev_state; 2227 2228 rq->prev_mm = NULL; 2229 2230 /* 2231 * A task struct has one reference for the use as "current". 2232 * If a task dies, then it sets TASK_DEAD in tsk->state and calls 2233 * schedule one last time. The schedule call will never return, and 2234 * the scheduled task must drop that reference. 2235 * The test for TASK_DEAD must occur while the runqueue locks are 2236 * still held, otherwise prev could be scheduled on another cpu, die 2237 * there before we look at prev->state, and then the reference would 2238 * be dropped twice. 2239 * Manfred Spraul <manfred@colorfullife.com> 2240 */ 2241 prev_state = prev->state; 2242 vtime_task_switch(prev); 2243 finish_arch_switch(prev); 2244 perf_event_task_sched_in(prev, current); 2245 finish_lock_switch(rq, prev); 2246 finish_arch_post_lock_switch(); 2247 2248 fire_sched_in_preempt_notifiers(current); 2249 if (mm) 2250 mmdrop(mm); 2251 if (unlikely(prev_state == TASK_DEAD)) { 2252 if (prev->sched_class->task_dead) 2253 prev->sched_class->task_dead(prev); 2254 2255 /* 2256 * Remove function-return probe instances associated with this 2257 * task and put them back on the free list. 2258 */ 2259 kprobe_flush_task(prev); 2260 put_task_struct(prev); 2261 } 2262 2263 tick_nohz_task_switch(current); 2264 return rq; 2265 } 2266 2267 #ifdef CONFIG_SMP 2268 2269 /* rq->lock is NOT held, but preemption is disabled */ 2270 static inline void post_schedule(struct rq *rq) 2271 { 2272 if (rq->post_schedule) { 2273 unsigned long flags; 2274 2275 raw_spin_lock_irqsave(&rq->lock, flags); 2276 if (rq->curr->sched_class->post_schedule) 2277 rq->curr->sched_class->post_schedule(rq); 2278 raw_spin_unlock_irqrestore(&rq->lock, flags); 2279 2280 rq->post_schedule = 0; 2281 } 2282 } 2283 2284 #else 2285 2286 static inline void post_schedule(struct rq *rq) 2287 { 2288 } 2289 2290 #endif 2291 2292 /** 2293 * schedule_tail - first thing a freshly forked thread must call. 2294 * @prev: the thread we just switched away from. 2295 */ 2296 asmlinkage __visible void schedule_tail(struct task_struct *prev) 2297 __releases(rq->lock) 2298 { 2299 struct rq *rq; 2300 2301 /* finish_task_switch() drops rq->lock and enables preemtion */ 2302 preempt_disable(); 2303 rq = finish_task_switch(prev); 2304 post_schedule(rq); 2305 preempt_enable(); 2306 2307 if (current->set_child_tid) 2308 put_user(task_pid_vnr(current), current->set_child_tid); 2309 } 2310 2311 /* 2312 * context_switch - switch to the new MM and the new thread's register state. 2313 */ 2314 static inline struct rq * 2315 context_switch(struct rq *rq, struct task_struct *prev, 2316 struct task_struct *next) 2317 { 2318 struct mm_struct *mm, *oldmm; 2319 2320 prepare_task_switch(rq, prev, next); 2321 2322 mm = next->mm; 2323 oldmm = prev->active_mm; 2324 /* 2325 * For paravirt, this is coupled with an exit in switch_to to 2326 * combine the page table reload and the switch backend into 2327 * one hypercall. 2328 */ 2329 arch_start_context_switch(prev); 2330 2331 if (!mm) { 2332 next->active_mm = oldmm; 2333 atomic_inc(&oldmm->mm_count); 2334 enter_lazy_tlb(oldmm, next); 2335 } else 2336 switch_mm(oldmm, mm, next); 2337 2338 if (!prev->mm) { 2339 prev->active_mm = NULL; 2340 rq->prev_mm = oldmm; 2341 } 2342 /* 2343 * Since the runqueue lock will be released by the next 2344 * task (which is an invalid locking op but in the case 2345 * of the scheduler it's an obvious special-case), so we 2346 * do an early lockdep release here: 2347 */ 2348 spin_release(&rq->lock.dep_map, 1, _THIS_IP_); 2349 2350 context_tracking_task_switch(prev, next); 2351 /* Here we just switch the register state and the stack. */ 2352 switch_to(prev, next, prev); 2353 barrier(); 2354 2355 return finish_task_switch(prev); 2356 } 2357 2358 /* 2359 * nr_running and nr_context_switches: 2360 * 2361 * externally visible scheduler statistics: current number of runnable 2362 * threads, total number of context switches performed since bootup. 2363 */ 2364 unsigned long nr_running(void) 2365 { 2366 unsigned long i, sum = 0; 2367 2368 for_each_online_cpu(i) 2369 sum += cpu_rq(i)->nr_running; 2370 2371 return sum; 2372 } 2373 2374 /* 2375 * Check if only the current task is running on the cpu. 2376 */ 2377 bool single_task_running(void) 2378 { 2379 if (cpu_rq(smp_processor_id())->nr_running == 1) 2380 return true; 2381 else 2382 return false; 2383 } 2384 EXPORT_SYMBOL(single_task_running); 2385 2386 unsigned long long nr_context_switches(void) 2387 { 2388 int i; 2389 unsigned long long sum = 0; 2390 2391 for_each_possible_cpu(i) 2392 sum += cpu_rq(i)->nr_switches; 2393 2394 return sum; 2395 } 2396 2397 unsigned long nr_iowait(void) 2398 { 2399 unsigned long i, sum = 0; 2400 2401 for_each_possible_cpu(i) 2402 sum += atomic_read(&cpu_rq(i)->nr_iowait); 2403 2404 return sum; 2405 } 2406 2407 unsigned long nr_iowait_cpu(int cpu) 2408 { 2409 struct rq *this = cpu_rq(cpu); 2410 return atomic_read(&this->nr_iowait); 2411 } 2412 2413 void get_iowait_load(unsigned long *nr_waiters, unsigned long *load) 2414 { 2415 struct rq *this = this_rq(); 2416 *nr_waiters = atomic_read(&this->nr_iowait); 2417 *load = this->cpu_load[0]; 2418 } 2419 2420 #ifdef CONFIG_SMP 2421 2422 /* 2423 * sched_exec - execve() is a valuable balancing opportunity, because at 2424 * this point the task has the smallest effective memory and cache footprint. 2425 */ 2426 void sched_exec(void) 2427 { 2428 struct task_struct *p = current; 2429 unsigned long flags; 2430 int dest_cpu; 2431 2432 raw_spin_lock_irqsave(&p->pi_lock, flags); 2433 dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), SD_BALANCE_EXEC, 0); 2434 if (dest_cpu == smp_processor_id()) 2435 goto unlock; 2436 2437 if (likely(cpu_active(dest_cpu))) { 2438 struct migration_arg arg = { p, dest_cpu }; 2439 2440 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 2441 stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg); 2442 return; 2443 } 2444 unlock: 2445 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 2446 } 2447 2448 #endif 2449 2450 DEFINE_PER_CPU(struct kernel_stat, kstat); 2451 DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat); 2452 2453 EXPORT_PER_CPU_SYMBOL(kstat); 2454 EXPORT_PER_CPU_SYMBOL(kernel_cpustat); 2455 2456 /* 2457 * Return accounted runtime for the task. 2458 * In case the task is currently running, return the runtime plus current's 2459 * pending runtime that have not been accounted yet. 2460 */ 2461 unsigned long long task_sched_runtime(struct task_struct *p) 2462 { 2463 unsigned long flags; 2464 struct rq *rq; 2465 u64 ns; 2466 2467 #if defined(CONFIG_64BIT) && defined(CONFIG_SMP) 2468 /* 2469 * 64-bit doesn't need locks to atomically read a 64bit value. 2470 * So we have a optimization chance when the task's delta_exec is 0. 2471 * Reading ->on_cpu is racy, but this is ok. 2472 * 2473 * If we race with it leaving cpu, we'll take a lock. So we're correct. 2474 * If we race with it entering cpu, unaccounted time is 0. This is 2475 * indistinguishable from the read occurring a few cycles earlier. 2476 * If we see ->on_cpu without ->on_rq, the task is leaving, and has 2477 * been accounted, so we're correct here as well. 2478 */ 2479 if (!p->on_cpu || !task_on_rq_queued(p)) 2480 return p->se.sum_exec_runtime; 2481 #endif 2482 2483 rq = task_rq_lock(p, &flags); 2484 /* 2485 * Must be ->curr _and_ ->on_rq. If dequeued, we would 2486 * project cycles that may never be accounted to this 2487 * thread, breaking clock_gettime(). 2488 */ 2489 if (task_current(rq, p) && task_on_rq_queued(p)) { 2490 update_rq_clock(rq); 2491 p->sched_class->update_curr(rq); 2492 } 2493 ns = p->se.sum_exec_runtime; 2494 task_rq_unlock(rq, p, &flags); 2495 2496 return ns; 2497 } 2498 2499 /* 2500 * This function gets called by the timer code, with HZ frequency. 2501 * We call it with interrupts disabled. 2502 */ 2503 void scheduler_tick(void) 2504 { 2505 int cpu = smp_processor_id(); 2506 struct rq *rq = cpu_rq(cpu); 2507 struct task_struct *curr = rq->curr; 2508 2509 sched_clock_tick(); 2510 2511 raw_spin_lock(&rq->lock); 2512 update_rq_clock(rq); 2513 curr->sched_class->task_tick(rq, curr, 0); 2514 update_cpu_load_active(rq); 2515 raw_spin_unlock(&rq->lock); 2516 2517 perf_event_task_tick(); 2518 2519 #ifdef CONFIG_SMP 2520 rq->idle_balance = idle_cpu(cpu); 2521 trigger_load_balance(rq); 2522 #endif 2523 rq_last_tick_reset(rq); 2524 } 2525 2526 #ifdef CONFIG_NO_HZ_FULL 2527 /** 2528 * scheduler_tick_max_deferment 2529 * 2530 * Keep at least one tick per second when a single 2531 * active task is running because the scheduler doesn't 2532 * yet completely support full dynticks environment. 2533 * 2534 * This makes sure that uptime, CFS vruntime, load 2535 * balancing, etc... continue to move forward, even 2536 * with a very low granularity. 2537 * 2538 * Return: Maximum deferment in nanoseconds. 2539 */ 2540 u64 scheduler_tick_max_deferment(void) 2541 { 2542 struct rq *rq = this_rq(); 2543 unsigned long next, now = ACCESS_ONCE(jiffies); 2544 2545 next = rq->last_sched_tick + HZ; 2546 2547 if (time_before_eq(next, now)) 2548 return 0; 2549 2550 return jiffies_to_nsecs(next - now); 2551 } 2552 #endif 2553 2554 notrace unsigned long get_parent_ip(unsigned long addr) 2555 { 2556 if (in_lock_functions(addr)) { 2557 addr = CALLER_ADDR2; 2558 if (in_lock_functions(addr)) 2559 addr = CALLER_ADDR3; 2560 } 2561 return addr; 2562 } 2563 2564 #if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \ 2565 defined(CONFIG_PREEMPT_TRACER)) 2566 2567 void preempt_count_add(int val) 2568 { 2569 #ifdef CONFIG_DEBUG_PREEMPT 2570 /* 2571 * Underflow? 2572 */ 2573 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0))) 2574 return; 2575 #endif 2576 __preempt_count_add(val); 2577 #ifdef CONFIG_DEBUG_PREEMPT 2578 /* 2579 * Spinlock count overflowing soon? 2580 */ 2581 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >= 2582 PREEMPT_MASK - 10); 2583 #endif 2584 if (preempt_count() == val) { 2585 unsigned long ip = get_parent_ip(CALLER_ADDR1); 2586 #ifdef CONFIG_DEBUG_PREEMPT 2587 current->preempt_disable_ip = ip; 2588 #endif 2589 trace_preempt_off(CALLER_ADDR0, ip); 2590 } 2591 } 2592 EXPORT_SYMBOL(preempt_count_add); 2593 NOKPROBE_SYMBOL(preempt_count_add); 2594 2595 void preempt_count_sub(int val) 2596 { 2597 #ifdef CONFIG_DEBUG_PREEMPT 2598 /* 2599 * Underflow? 2600 */ 2601 if (DEBUG_LOCKS_WARN_ON(val > preempt_count())) 2602 return; 2603 /* 2604 * Is the spinlock portion underflowing? 2605 */ 2606 if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) && 2607 !(preempt_count() & PREEMPT_MASK))) 2608 return; 2609 #endif 2610 2611 if (preempt_count() == val) 2612 trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); 2613 __preempt_count_sub(val); 2614 } 2615 EXPORT_SYMBOL(preempt_count_sub); 2616 NOKPROBE_SYMBOL(preempt_count_sub); 2617 2618 #endif 2619 2620 /* 2621 * Print scheduling while atomic bug: 2622 */ 2623 static noinline void __schedule_bug(struct task_struct *prev) 2624 { 2625 if (oops_in_progress) 2626 return; 2627 2628 printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n", 2629 prev->comm, prev->pid, preempt_count()); 2630 2631 debug_show_held_locks(prev); 2632 print_modules(); 2633 if (irqs_disabled()) 2634 print_irqtrace_events(prev); 2635 #ifdef CONFIG_DEBUG_PREEMPT 2636 if (in_atomic_preempt_off()) { 2637 pr_err("Preemption disabled at:"); 2638 print_ip_sym(current->preempt_disable_ip); 2639 pr_cont("\n"); 2640 } 2641 #endif 2642 dump_stack(); 2643 add_taint(TAINT_WARN, LOCKDEP_STILL_OK); 2644 } 2645 2646 /* 2647 * Various schedule()-time debugging checks and statistics: 2648 */ 2649 static inline void schedule_debug(struct task_struct *prev) 2650 { 2651 #ifdef CONFIG_SCHED_STACK_END_CHECK 2652 BUG_ON(unlikely(task_stack_end_corrupted(prev))); 2653 #endif 2654 /* 2655 * Test if we are atomic. Since do_exit() needs to call into 2656 * schedule() atomically, we ignore that path. Otherwise whine 2657 * if we are scheduling when we should not. 2658 */ 2659 if (unlikely(in_atomic_preempt_off() && prev->state != TASK_DEAD)) 2660 __schedule_bug(prev); 2661 rcu_sleep_check(); 2662 2663 profile_hit(SCHED_PROFILING, __builtin_return_address(0)); 2664 2665 schedstat_inc(this_rq(), sched_count); 2666 } 2667 2668 /* 2669 * Pick up the highest-prio task: 2670 */ 2671 static inline struct task_struct * 2672 pick_next_task(struct rq *rq, struct task_struct *prev) 2673 { 2674 const struct sched_class *class = &fair_sched_class; 2675 struct task_struct *p; 2676 2677 /* 2678 * Optimization: we know that if all tasks are in 2679 * the fair class we can call that function directly: 2680 */ 2681 if (likely(prev->sched_class == class && 2682 rq->nr_running == rq->cfs.h_nr_running)) { 2683 p = fair_sched_class.pick_next_task(rq, prev); 2684 if (unlikely(p == RETRY_TASK)) 2685 goto again; 2686 2687 /* assumes fair_sched_class->next == idle_sched_class */ 2688 if (unlikely(!p)) 2689 p = idle_sched_class.pick_next_task(rq, prev); 2690 2691 return p; 2692 } 2693 2694 again: 2695 for_each_class(class) { 2696 p = class->pick_next_task(rq, prev); 2697 if (p) { 2698 if (unlikely(p == RETRY_TASK)) 2699 goto again; 2700 return p; 2701 } 2702 } 2703 2704 BUG(); /* the idle class will always have a runnable task */ 2705 } 2706 2707 /* 2708 * __schedule() is the main scheduler function. 2709 * 2710 * The main means of driving the scheduler and thus entering this function are: 2711 * 2712 * 1. Explicit blocking: mutex, semaphore, waitqueue, etc. 2713 * 2714 * 2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return 2715 * paths. For example, see arch/x86/entry_64.S. 2716 * 2717 * To drive preemption between tasks, the scheduler sets the flag in timer 2718 * interrupt handler scheduler_tick(). 2719 * 2720 * 3. Wakeups don't really cause entry into schedule(). They add a 2721 * task to the run-queue and that's it. 2722 * 2723 * Now, if the new task added to the run-queue preempts the current 2724 * task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets 2725 * called on the nearest possible occasion: 2726 * 2727 * - If the kernel is preemptible (CONFIG_PREEMPT=y): 2728 * 2729 * - in syscall or exception context, at the next outmost 2730 * preempt_enable(). (this might be as soon as the wake_up()'s 2731 * spin_unlock()!) 2732 * 2733 * - in IRQ context, return from interrupt-handler to 2734 * preemptible context 2735 * 2736 * - If the kernel is not preemptible (CONFIG_PREEMPT is not set) 2737 * then at the next: 2738 * 2739 * - cond_resched() call 2740 * - explicit schedule() call 2741 * - return from syscall or exception to user-space 2742 * - return from interrupt-handler to user-space 2743 * 2744 * WARNING: all callers must re-check need_resched() afterward and reschedule 2745 * accordingly in case an event triggered the need for rescheduling (such as 2746 * an interrupt waking up a task) while preemption was disabled in __schedule(). 2747 */ 2748 static void __sched __schedule(void) 2749 { 2750 struct task_struct *prev, *next; 2751 unsigned long *switch_count; 2752 struct rq *rq; 2753 int cpu; 2754 2755 preempt_disable(); 2756 cpu = smp_processor_id(); 2757 rq = cpu_rq(cpu); 2758 rcu_note_context_switch(); 2759 prev = rq->curr; 2760 2761 schedule_debug(prev); 2762 2763 if (sched_feat(HRTICK)) 2764 hrtick_clear(rq); 2765 2766 /* 2767 * Make sure that signal_pending_state()->signal_pending() below 2768 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE) 2769 * done by the caller to avoid the race with signal_wake_up(). 2770 */ 2771 smp_mb__before_spinlock(); 2772 raw_spin_lock_irq(&rq->lock); 2773 2774 rq->clock_skip_update <<= 1; /* promote REQ to ACT */ 2775 2776 switch_count = &prev->nivcsw; 2777 if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { 2778 if (unlikely(signal_pending_state(prev->state, prev))) { 2779 prev->state = TASK_RUNNING; 2780 } else { 2781 deactivate_task(rq, prev, DEQUEUE_SLEEP); 2782 prev->on_rq = 0; 2783 2784 /* 2785 * If a worker went to sleep, notify and ask workqueue 2786 * whether it wants to wake up a task to maintain 2787 * concurrency. 2788 */ 2789 if (prev->flags & PF_WQ_WORKER) { 2790 struct task_struct *to_wakeup; 2791 2792 to_wakeup = wq_worker_sleeping(prev, cpu); 2793 if (to_wakeup) 2794 try_to_wake_up_local(to_wakeup); 2795 } 2796 } 2797 switch_count = &prev->nvcsw; 2798 } 2799 2800 if (task_on_rq_queued(prev)) 2801 update_rq_clock(rq); 2802 2803 next = pick_next_task(rq, prev); 2804 clear_tsk_need_resched(prev); 2805 clear_preempt_need_resched(); 2806 rq->clock_skip_update = 0; 2807 2808 if (likely(prev != next)) { 2809 rq->nr_switches++; 2810 rq->curr = next; 2811 ++*switch_count; 2812 2813 rq = context_switch(rq, prev, next); /* unlocks the rq */ 2814 cpu = cpu_of(rq); 2815 } else 2816 raw_spin_unlock_irq(&rq->lock); 2817 2818 post_schedule(rq); 2819 2820 sched_preempt_enable_no_resched(); 2821 } 2822 2823 static inline void sched_submit_work(struct task_struct *tsk) 2824 { 2825 if (!tsk->state || tsk_is_pi_blocked(tsk)) 2826 return; 2827 /* 2828 * If we are going to sleep and we have plugged IO queued, 2829 * make sure to submit it to avoid deadlocks. 2830 */ 2831 if (blk_needs_flush_plug(tsk)) 2832 blk_schedule_flush_plug(tsk); 2833 } 2834 2835 asmlinkage __visible void __sched schedule(void) 2836 { 2837 struct task_struct *tsk = current; 2838 2839 sched_submit_work(tsk); 2840 do { 2841 __schedule(); 2842 } while (need_resched()); 2843 } 2844 EXPORT_SYMBOL(schedule); 2845 2846 #ifdef CONFIG_CONTEXT_TRACKING 2847 asmlinkage __visible void __sched schedule_user(void) 2848 { 2849 /* 2850 * If we come here after a random call to set_need_resched(), 2851 * or we have been woken up remotely but the IPI has not yet arrived, 2852 * we haven't yet exited the RCU idle mode. Do it here manually until 2853 * we find a better solution. 2854 * 2855 * NB: There are buggy callers of this function. Ideally we 2856 * should warn if prev_state != CONTEXT_USER, but that will trigger 2857 * too frequently to make sense yet. 2858 */ 2859 enum ctx_state prev_state = exception_enter(); 2860 schedule(); 2861 exception_exit(prev_state); 2862 } 2863 #endif 2864 2865 /** 2866 * schedule_preempt_disabled - called with preemption disabled 2867 * 2868 * Returns with preemption disabled. Note: preempt_count must be 1 2869 */ 2870 void __sched schedule_preempt_disabled(void) 2871 { 2872 sched_preempt_enable_no_resched(); 2873 schedule(); 2874 preempt_disable(); 2875 } 2876 2877 static void __sched notrace preempt_schedule_common(void) 2878 { 2879 do { 2880 __preempt_count_add(PREEMPT_ACTIVE); 2881 __schedule(); 2882 __preempt_count_sub(PREEMPT_ACTIVE); 2883 2884 /* 2885 * Check again in case we missed a preemption opportunity 2886 * between schedule and now. 2887 */ 2888 barrier(); 2889 } while (need_resched()); 2890 } 2891 2892 #ifdef CONFIG_PREEMPT 2893 /* 2894 * this is the entry point to schedule() from in-kernel preemption 2895 * off of preempt_enable. Kernel preemptions off return from interrupt 2896 * occur there and call schedule directly. 2897 */ 2898 asmlinkage __visible void __sched notrace preempt_schedule(void) 2899 { 2900 /* 2901 * If there is a non-zero preempt_count or interrupts are disabled, 2902 * we do not want to preempt the current task. Just return.. 2903 */ 2904 if (likely(!preemptible())) 2905 return; 2906 2907 preempt_schedule_common(); 2908 } 2909 NOKPROBE_SYMBOL(preempt_schedule); 2910 EXPORT_SYMBOL(preempt_schedule); 2911 2912 #ifdef CONFIG_CONTEXT_TRACKING 2913 /** 2914 * preempt_schedule_context - preempt_schedule called by tracing 2915 * 2916 * The tracing infrastructure uses preempt_enable_notrace to prevent 2917 * recursion and tracing preempt enabling caused by the tracing 2918 * infrastructure itself. But as tracing can happen in areas coming 2919 * from userspace or just about to enter userspace, a preempt enable 2920 * can occur before user_exit() is called. This will cause the scheduler 2921 * to be called when the system is still in usermode. 2922 * 2923 * To prevent this, the preempt_enable_notrace will use this function 2924 * instead of preempt_schedule() to exit user context if needed before 2925 * calling the scheduler. 2926 */ 2927 asmlinkage __visible void __sched notrace preempt_schedule_context(void) 2928 { 2929 enum ctx_state prev_ctx; 2930 2931 if (likely(!preemptible())) 2932 return; 2933 2934 do { 2935 __preempt_count_add(PREEMPT_ACTIVE); 2936 /* 2937 * Needs preempt disabled in case user_exit() is traced 2938 * and the tracer calls preempt_enable_notrace() causing 2939 * an infinite recursion. 2940 */ 2941 prev_ctx = exception_enter(); 2942 __schedule(); 2943 exception_exit(prev_ctx); 2944 2945 __preempt_count_sub(PREEMPT_ACTIVE); 2946 barrier(); 2947 } while (need_resched()); 2948 } 2949 EXPORT_SYMBOL_GPL(preempt_schedule_context); 2950 #endif /* CONFIG_CONTEXT_TRACKING */ 2951 2952 #endif /* CONFIG_PREEMPT */ 2953 2954 /* 2955 * this is the entry point to schedule() from kernel preemption 2956 * off of irq context. 2957 * Note, that this is called and return with irqs disabled. This will 2958 * protect us against recursive calling from irq. 2959 */ 2960 asmlinkage __visible void __sched preempt_schedule_irq(void) 2961 { 2962 enum ctx_state prev_state; 2963 2964 /* Catch callers which need to be fixed */ 2965 BUG_ON(preempt_count() || !irqs_disabled()); 2966 2967 prev_state = exception_enter(); 2968 2969 do { 2970 __preempt_count_add(PREEMPT_ACTIVE); 2971 local_irq_enable(); 2972 __schedule(); 2973 local_irq_disable(); 2974 __preempt_count_sub(PREEMPT_ACTIVE); 2975 2976 /* 2977 * Check again in case we missed a preemption opportunity 2978 * between schedule and now. 2979 */ 2980 barrier(); 2981 } while (need_resched()); 2982 2983 exception_exit(prev_state); 2984 } 2985 2986 int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags, 2987 void *key) 2988 { 2989 return try_to_wake_up(curr->private, mode, wake_flags); 2990 } 2991 EXPORT_SYMBOL(default_wake_function); 2992 2993 #ifdef CONFIG_RT_MUTEXES 2994 2995 /* 2996 * rt_mutex_setprio - set the current priority of a task 2997 * @p: task 2998 * @prio: prio value (kernel-internal form) 2999 * 3000 * This function changes the 'effective' priority of a task. It does 3001 * not touch ->normal_prio like __setscheduler(). 3002 * 3003 * Used by the rt_mutex code to implement priority inheritance 3004 * logic. Call site only calls if the priority of the task changed. 3005 */ 3006 void rt_mutex_setprio(struct task_struct *p, int prio) 3007 { 3008 int oldprio, queued, running, enqueue_flag = 0; 3009 struct rq *rq; 3010 const struct sched_class *prev_class; 3011 3012 BUG_ON(prio > MAX_PRIO); 3013 3014 rq = __task_rq_lock(p); 3015 3016 /* 3017 * Idle task boosting is a nono in general. There is one 3018 * exception, when PREEMPT_RT and NOHZ is active: 3019 * 3020 * The idle task calls get_next_timer_interrupt() and holds 3021 * the timer wheel base->lock on the CPU and another CPU wants 3022 * to access the timer (probably to cancel it). We can safely 3023 * ignore the boosting request, as the idle CPU runs this code 3024 * with interrupts disabled and will complete the lock 3025 * protected section without being interrupted. So there is no 3026 * real need to boost. 3027 */ 3028 if (unlikely(p == rq->idle)) { 3029 WARN_ON(p != rq->curr); 3030 WARN_ON(p->pi_blocked_on); 3031 goto out_unlock; 3032 } 3033 3034 trace_sched_pi_setprio(p, prio); 3035 oldprio = p->prio; 3036 prev_class = p->sched_class; 3037 queued = task_on_rq_queued(p); 3038 running = task_current(rq, p); 3039 if (queued) 3040 dequeue_task(rq, p, 0); 3041 if (running) 3042 put_prev_task(rq, p); 3043 3044 /* 3045 * Boosting condition are: 3046 * 1. -rt task is running and holds mutex A 3047 * --> -dl task blocks on mutex A 3048 * 3049 * 2. -dl task is running and holds mutex A 3050 * --> -dl task blocks on mutex A and could preempt the 3051 * running task 3052 */ 3053 if (dl_prio(prio)) { 3054 struct task_struct *pi_task = rt_mutex_get_top_task(p); 3055 if (!dl_prio(p->normal_prio) || 3056 (pi_task && dl_entity_preempt(&pi_task->dl, &p->dl))) { 3057 p->dl.dl_boosted = 1; 3058 p->dl.dl_throttled = 0; 3059 enqueue_flag = ENQUEUE_REPLENISH; 3060 } else 3061 p->dl.dl_boosted = 0; 3062 p->sched_class = &dl_sched_class; 3063 } else if (rt_prio(prio)) { 3064 if (dl_prio(oldprio)) 3065 p->dl.dl_boosted = 0; 3066 if (oldprio < prio) 3067 enqueue_flag = ENQUEUE_HEAD; 3068 p->sched_class = &rt_sched_class; 3069 } else { 3070 if (dl_prio(oldprio)) 3071 p->dl.dl_boosted = 0; 3072 if (rt_prio(oldprio)) 3073 p->rt.timeout = 0; 3074 p->sched_class = &fair_sched_class; 3075 } 3076 3077 p->prio = prio; 3078 3079 if (running) 3080 p->sched_class->set_curr_task(rq); 3081 if (queued) 3082 enqueue_task(rq, p, enqueue_flag); 3083 3084 check_class_changed(rq, p, prev_class, oldprio); 3085 out_unlock: 3086 __task_rq_unlock(rq); 3087 } 3088 #endif 3089 3090 void set_user_nice(struct task_struct *p, long nice) 3091 { 3092 int old_prio, delta, queued; 3093 unsigned long flags; 3094 struct rq *rq; 3095 3096 if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE) 3097 return; 3098 /* 3099 * We have to be careful, if called from sys_setpriority(), 3100 * the task might be in the middle of scheduling on another CPU. 3101 */ 3102 rq = task_rq_lock(p, &flags); 3103 /* 3104 * The RT priorities are set via sched_setscheduler(), but we still 3105 * allow the 'normal' nice value to be set - but as expected 3106 * it wont have any effect on scheduling until the task is 3107 * SCHED_DEADLINE, SCHED_FIFO or SCHED_RR: 3108 */ 3109 if (task_has_dl_policy(p) || task_has_rt_policy(p)) { 3110 p->static_prio = NICE_TO_PRIO(nice); 3111 goto out_unlock; 3112 } 3113 queued = task_on_rq_queued(p); 3114 if (queued) 3115 dequeue_task(rq, p, 0); 3116 3117 p->static_prio = NICE_TO_PRIO(nice); 3118 set_load_weight(p); 3119 old_prio = p->prio; 3120 p->prio = effective_prio(p); 3121 delta = p->prio - old_prio; 3122 3123 if (queued) { 3124 enqueue_task(rq, p, 0); 3125 /* 3126 * If the task increased its priority or is running and 3127 * lowered its priority, then reschedule its CPU: 3128 */ 3129 if (delta < 0 || (delta > 0 && task_running(rq, p))) 3130 resched_curr(rq); 3131 } 3132 out_unlock: 3133 task_rq_unlock(rq, p, &flags); 3134 } 3135 EXPORT_SYMBOL(set_user_nice); 3136 3137 /* 3138 * can_nice - check if a task can reduce its nice value 3139 * @p: task 3140 * @nice: nice value 3141 */ 3142 int can_nice(const struct task_struct *p, const int nice) 3143 { 3144 /* convert nice value [19,-20] to rlimit style value [1,40] */ 3145 int nice_rlim = nice_to_rlimit(nice); 3146 3147 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) || 3148 capable(CAP_SYS_NICE)); 3149 } 3150 3151 #ifdef __ARCH_WANT_SYS_NICE 3152 3153 /* 3154 * sys_nice - change the priority of the current process. 3155 * @increment: priority increment 3156 * 3157 * sys_setpriority is a more generic, but much slower function that 3158 * does similar things. 3159 */ 3160 SYSCALL_DEFINE1(nice, int, increment) 3161 { 3162 long nice, retval; 3163 3164 /* 3165 * Setpriority might change our priority at the same moment. 3166 * We don't have to worry. Conceptually one call occurs first 3167 * and we have a single winner. 3168 */ 3169 increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH); 3170 nice = task_nice(current) + increment; 3171 3172 nice = clamp_val(nice, MIN_NICE, MAX_NICE); 3173 if (increment < 0 && !can_nice(current, nice)) 3174 return -EPERM; 3175 3176 retval = security_task_setnice(current, nice); 3177 if (retval) 3178 return retval; 3179 3180 set_user_nice(current, nice); 3181 return 0; 3182 } 3183 3184 #endif 3185 3186 /** 3187 * task_prio - return the priority value of a given task. 3188 * @p: the task in question. 3189 * 3190 * Return: The priority value as seen by users in /proc. 3191 * RT tasks are offset by -200. Normal tasks are centered 3192 * around 0, value goes from -16 to +15. 3193 */ 3194 int task_prio(const struct task_struct *p) 3195 { 3196 return p->prio - MAX_RT_PRIO; 3197 } 3198 3199 /** 3200 * idle_cpu - is a given cpu idle currently? 3201 * @cpu: the processor in question. 3202 * 3203 * Return: 1 if the CPU is currently idle. 0 otherwise. 3204 */ 3205 int idle_cpu(int cpu) 3206 { 3207 struct rq *rq = cpu_rq(cpu); 3208 3209 if (rq->curr != rq->idle) 3210 return 0; 3211 3212 if (rq->nr_running) 3213 return 0; 3214 3215 #ifdef CONFIG_SMP 3216 if (!llist_empty(&rq->wake_list)) 3217 return 0; 3218 #endif 3219 3220 return 1; 3221 } 3222 3223 /** 3224 * idle_task - return the idle task for a given cpu. 3225 * @cpu: the processor in question. 3226 * 3227 * Return: The idle task for the cpu @cpu. 3228 */ 3229 struct task_struct *idle_task(int cpu) 3230 { 3231 return cpu_rq(cpu)->idle; 3232 } 3233 3234 /** 3235 * find_process_by_pid - find a process with a matching PID value. 3236 * @pid: the pid in question. 3237 * 3238 * The task of @pid, if found. %NULL otherwise. 3239 */ 3240 static struct task_struct *find_process_by_pid(pid_t pid) 3241 { 3242 return pid ? find_task_by_vpid(pid) : current; 3243 } 3244 3245 /* 3246 * This function initializes the sched_dl_entity of a newly becoming 3247 * SCHED_DEADLINE task. 3248 * 3249 * Only the static values are considered here, the actual runtime and the 3250 * absolute deadline will be properly calculated when the task is enqueued 3251 * for the first time with its new policy. 3252 */ 3253 static void 3254 __setparam_dl(struct task_struct *p, const struct sched_attr *attr) 3255 { 3256 struct sched_dl_entity *dl_se = &p->dl; 3257 3258 dl_se->dl_runtime = attr->sched_runtime; 3259 dl_se->dl_deadline = attr->sched_deadline; 3260 dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline; 3261 dl_se->flags = attr->sched_flags; 3262 dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime); 3263 3264 /* 3265 * Changing the parameters of a task is 'tricky' and we're not doing 3266 * the correct thing -- also see task_dead_dl() and switched_from_dl(). 3267 * 3268 * What we SHOULD do is delay the bandwidth release until the 0-lag 3269 * point. This would include retaining the task_struct until that time 3270 * and change dl_overflow() to not immediately decrement the current 3271 * amount. 3272 * 3273 * Instead we retain the current runtime/deadline and let the new 3274 * parameters take effect after the current reservation period lapses. 3275 * This is safe (albeit pessimistic) because the 0-lag point is always 3276 * before the current scheduling deadline. 3277 * 3278 * We can still have temporary overloads because we do not delay the 3279 * change in bandwidth until that time; so admission control is 3280 * not on the safe side. It does however guarantee tasks will never 3281 * consume more than promised. 3282 */ 3283 } 3284 3285 /* 3286 * sched_setparam() passes in -1 for its policy, to let the functions 3287 * it calls know not to change it. 3288 */ 3289 #define SETPARAM_POLICY -1 3290 3291 static void __setscheduler_params(struct task_struct *p, 3292 const struct sched_attr *attr) 3293 { 3294 int policy = attr->sched_policy; 3295 3296 if (policy == SETPARAM_POLICY) 3297 policy = p->policy; 3298 3299 p->policy = policy; 3300 3301 if (dl_policy(policy)) 3302 __setparam_dl(p, attr); 3303 else if (fair_policy(policy)) 3304 p->static_prio = NICE_TO_PRIO(attr->sched_nice); 3305 3306 /* 3307 * __sched_setscheduler() ensures attr->sched_priority == 0 when 3308 * !rt_policy. Always setting this ensures that things like 3309 * getparam()/getattr() don't report silly values for !rt tasks. 3310 */ 3311 p->rt_priority = attr->sched_priority; 3312 p->normal_prio = normal_prio(p); 3313 set_load_weight(p); 3314 } 3315 3316 /* Actually do priority change: must hold pi & rq lock. */ 3317 static void __setscheduler(struct rq *rq, struct task_struct *p, 3318 const struct sched_attr *attr) 3319 { 3320 __setscheduler_params(p, attr); 3321 3322 /* 3323 * If we get here, there was no pi waiters boosting the 3324 * task. It is safe to use the normal prio. 3325 */ 3326 p->prio = normal_prio(p); 3327 3328 if (dl_prio(p->prio)) 3329 p->sched_class = &dl_sched_class; 3330 else if (rt_prio(p->prio)) 3331 p->sched_class = &rt_sched_class; 3332 else 3333 p->sched_class = &fair_sched_class; 3334 } 3335 3336 static void 3337 __getparam_dl(struct task_struct *p, struct sched_attr *attr) 3338 { 3339 struct sched_dl_entity *dl_se = &p->dl; 3340 3341 attr->sched_priority = p->rt_priority; 3342 attr->sched_runtime = dl_se->dl_runtime; 3343 attr->sched_deadline = dl_se->dl_deadline; 3344 attr->sched_period = dl_se->dl_period; 3345 attr->sched_flags = dl_se->flags; 3346 } 3347 3348 /* 3349 * This function validates the new parameters of a -deadline task. 3350 * We ask for the deadline not being zero, and greater or equal 3351 * than the runtime, as well as the period of being zero or 3352 * greater than deadline. Furthermore, we have to be sure that 3353 * user parameters are above the internal resolution of 1us (we 3354 * check sched_runtime only since it is always the smaller one) and 3355 * below 2^63 ns (we have to check both sched_deadline and 3356 * sched_period, as the latter can be zero). 3357 */ 3358 static bool 3359 __checkparam_dl(const struct sched_attr *attr) 3360 { 3361 /* deadline != 0 */ 3362 if (attr->sched_deadline == 0) 3363 return false; 3364 3365 /* 3366 * Since we truncate DL_SCALE bits, make sure we're at least 3367 * that big. 3368 */ 3369 if (attr->sched_runtime < (1ULL << DL_SCALE)) 3370 return false; 3371 3372 /* 3373 * Since we use the MSB for wrap-around and sign issues, make 3374 * sure it's not set (mind that period can be equal to zero). 3375 */ 3376 if (attr->sched_deadline & (1ULL << 63) || 3377 attr->sched_period & (1ULL << 63)) 3378 return false; 3379 3380 /* runtime <= deadline <= period (if period != 0) */ 3381 if ((attr->sched_period != 0 && 3382 attr->sched_period < attr->sched_deadline) || 3383 attr->sched_deadline < attr->sched_runtime) 3384 return false; 3385 3386 return true; 3387 } 3388 3389 /* 3390 * check the target process has a UID that matches the current process's 3391 */ 3392 static bool check_same_owner(struct task_struct *p) 3393 { 3394 const struct cred *cred = current_cred(), *pcred; 3395 bool match; 3396 3397 rcu_read_lock(); 3398 pcred = __task_cred(p); 3399 match = (uid_eq(cred->euid, pcred->euid) || 3400 uid_eq(cred->euid, pcred->uid)); 3401 rcu_read_unlock(); 3402 return match; 3403 } 3404 3405 static bool dl_param_changed(struct task_struct *p, 3406 const struct sched_attr *attr) 3407 { 3408 struct sched_dl_entity *dl_se = &p->dl; 3409 3410 if (dl_se->dl_runtime != attr->sched_runtime || 3411 dl_se->dl_deadline != attr->sched_deadline || 3412 dl_se->dl_period != attr->sched_period || 3413 dl_se->flags != attr->sched_flags) 3414 return true; 3415 3416 return false; 3417 } 3418 3419 static int __sched_setscheduler(struct task_struct *p, 3420 const struct sched_attr *attr, 3421 bool user) 3422 { 3423 int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 : 3424 MAX_RT_PRIO - 1 - attr->sched_priority; 3425 int retval, oldprio, oldpolicy = -1, queued, running; 3426 int policy = attr->sched_policy; 3427 unsigned long flags; 3428 const struct sched_class *prev_class; 3429 struct rq *rq; 3430 int reset_on_fork; 3431 3432 /* may grab non-irq protected spin_locks */ 3433 BUG_ON(in_interrupt()); 3434 recheck: 3435 /* double check policy once rq lock held */ 3436 if (policy < 0) { 3437 reset_on_fork = p->sched_reset_on_fork; 3438 policy = oldpolicy = p->policy; 3439 } else { 3440 reset_on_fork = !!(attr->sched_flags & SCHED_FLAG_RESET_ON_FORK); 3441 3442 if (policy != SCHED_DEADLINE && 3443 policy != SCHED_FIFO && policy != SCHED_RR && 3444 policy != SCHED_NORMAL && policy != SCHED_BATCH && 3445 policy != SCHED_IDLE) 3446 return -EINVAL; 3447 } 3448 3449 if (attr->sched_flags & ~(SCHED_FLAG_RESET_ON_FORK)) 3450 return -EINVAL; 3451 3452 /* 3453 * Valid priorities for SCHED_FIFO and SCHED_RR are 3454 * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL, 3455 * SCHED_BATCH and SCHED_IDLE is 0. 3456 */ 3457 if ((p->mm && attr->sched_priority > MAX_USER_RT_PRIO-1) || 3458 (!p->mm && attr->sched_priority > MAX_RT_PRIO-1)) 3459 return -EINVAL; 3460 if ((dl_policy(policy) && !__checkparam_dl(attr)) || 3461 (rt_policy(policy) != (attr->sched_priority != 0))) 3462 return -EINVAL; 3463 3464 /* 3465 * Allow unprivileged RT tasks to decrease priority: 3466 */ 3467 if (user && !capable(CAP_SYS_NICE)) { 3468 if (fair_policy(policy)) { 3469 if (attr->sched_nice < task_nice(p) && 3470 !can_nice(p, attr->sched_nice)) 3471 return -EPERM; 3472 } 3473 3474 if (rt_policy(policy)) { 3475 unsigned long rlim_rtprio = 3476 task_rlimit(p, RLIMIT_RTPRIO); 3477 3478 /* can't set/change the rt policy */ 3479 if (policy != p->policy && !rlim_rtprio) 3480 return -EPERM; 3481 3482 /* can't increase priority */ 3483 if (attr->sched_priority > p->rt_priority && 3484 attr->sched_priority > rlim_rtprio) 3485 return -EPERM; 3486 } 3487 3488 /* 3489 * Can't set/change SCHED_DEADLINE policy at all for now 3490 * (safest behavior); in the future we would like to allow 3491 * unprivileged DL tasks to increase their relative deadline 3492 * or reduce their runtime (both ways reducing utilization) 3493 */ 3494 if (dl_policy(policy)) 3495 return -EPERM; 3496 3497 /* 3498 * Treat SCHED_IDLE as nice 20. Only allow a switch to 3499 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it. 3500 */ 3501 if (p->policy == SCHED_IDLE && policy != SCHED_IDLE) { 3502 if (!can_nice(p, task_nice(p))) 3503 return -EPERM; 3504 } 3505 3506 /* can't change other user's priorities */ 3507 if (!check_same_owner(p)) 3508 return -EPERM; 3509 3510 /* Normal users shall not reset the sched_reset_on_fork flag */ 3511 if (p->sched_reset_on_fork && !reset_on_fork) 3512 return -EPERM; 3513 } 3514 3515 if (user) { 3516 retval = security_task_setscheduler(p); 3517 if (retval) 3518 return retval; 3519 } 3520 3521 /* 3522 * make sure no PI-waiters arrive (or leave) while we are 3523 * changing the priority of the task: 3524 * 3525 * To be able to change p->policy safely, the appropriate 3526 * runqueue lock must be held. 3527 */ 3528 rq = task_rq_lock(p, &flags); 3529 3530 /* 3531 * Changing the policy of the stop threads its a very bad idea 3532 */ 3533 if (p == rq->stop) { 3534 task_rq_unlock(rq, p, &flags); 3535 return -EINVAL; 3536 } 3537 3538 /* 3539 * If not changing anything there's no need to proceed further, 3540 * but store a possible modification of reset_on_fork. 3541 */ 3542 if (unlikely(policy == p->policy)) { 3543 if (fair_policy(policy) && attr->sched_nice != task_nice(p)) 3544 goto change; 3545 if (rt_policy(policy) && attr->sched_priority != p->rt_priority) 3546 goto change; 3547 if (dl_policy(policy) && dl_param_changed(p, attr)) 3548 goto change; 3549 3550 p->sched_reset_on_fork = reset_on_fork; 3551 task_rq_unlock(rq, p, &flags); 3552 return 0; 3553 } 3554 change: 3555 3556 if (user) { 3557 #ifdef CONFIG_RT_GROUP_SCHED 3558 /* 3559 * Do not allow realtime tasks into groups that have no runtime 3560 * assigned. 3561 */ 3562 if (rt_bandwidth_enabled() && rt_policy(policy) && 3563 task_group(p)->rt_bandwidth.rt_runtime == 0 && 3564 !task_group_is_autogroup(task_group(p))) { 3565 task_rq_unlock(rq, p, &flags); 3566 return -EPERM; 3567 } 3568 #endif 3569 #ifdef CONFIG_SMP 3570 if (dl_bandwidth_enabled() && dl_policy(policy)) { 3571 cpumask_t *span = rq->rd->span; 3572 3573 /* 3574 * Don't allow tasks with an affinity mask smaller than 3575 * the entire root_domain to become SCHED_DEADLINE. We 3576 * will also fail if there's no bandwidth available. 3577 */ 3578 if (!cpumask_subset(span, &p->cpus_allowed) || 3579 rq->rd->dl_bw.bw == 0) { 3580 task_rq_unlock(rq, p, &flags); 3581 return -EPERM; 3582 } 3583 } 3584 #endif 3585 } 3586 3587 /* recheck policy now with rq lock held */ 3588 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { 3589 policy = oldpolicy = -1; 3590 task_rq_unlock(rq, p, &flags); 3591 goto recheck; 3592 } 3593 3594 /* 3595 * If setscheduling to SCHED_DEADLINE (or changing the parameters 3596 * of a SCHED_DEADLINE task) we need to check if enough bandwidth 3597 * is available. 3598 */ 3599 if ((dl_policy(policy) || dl_task(p)) && dl_overflow(p, policy, attr)) { 3600 task_rq_unlock(rq, p, &flags); 3601 return -EBUSY; 3602 } 3603 3604 p->sched_reset_on_fork = reset_on_fork; 3605 oldprio = p->prio; 3606 3607 /* 3608 * Special case for priority boosted tasks. 3609 * 3610 * If the new priority is lower or equal (user space view) 3611 * than the current (boosted) priority, we just store the new 3612 * normal parameters and do not touch the scheduler class and 3613 * the runqueue. This will be done when the task deboost 3614 * itself. 3615 */ 3616 if (rt_mutex_check_prio(p, newprio)) { 3617 __setscheduler_params(p, attr); 3618 task_rq_unlock(rq, p, &flags); 3619 return 0; 3620 } 3621 3622 queued = task_on_rq_queued(p); 3623 running = task_current(rq, p); 3624 if (queued) 3625 dequeue_task(rq, p, 0); 3626 if (running) 3627 put_prev_task(rq, p); 3628 3629 prev_class = p->sched_class; 3630 __setscheduler(rq, p, attr); 3631 3632 if (running) 3633 p->sched_class->set_curr_task(rq); 3634 if (queued) { 3635 /* 3636 * We enqueue to tail when the priority of a task is 3637 * increased (user space view). 3638 */ 3639 enqueue_task(rq, p, oldprio <= p->prio ? ENQUEUE_HEAD : 0); 3640 } 3641 3642 check_class_changed(rq, p, prev_class, oldprio); 3643 task_rq_unlock(rq, p, &flags); 3644 3645 rt_mutex_adjust_pi(p); 3646 3647 return 0; 3648 } 3649 3650 static int _sched_setscheduler(struct task_struct *p, int policy, 3651 const struct sched_param *param, bool check) 3652 { 3653 struct sched_attr attr = { 3654 .sched_policy = policy, 3655 .sched_priority = param->sched_priority, 3656 .sched_nice = PRIO_TO_NICE(p->static_prio), 3657 }; 3658 3659 /* Fixup the legacy SCHED_RESET_ON_FORK hack. */ 3660 if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) { 3661 attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; 3662 policy &= ~SCHED_RESET_ON_FORK; 3663 attr.sched_policy = policy; 3664 } 3665 3666 return __sched_setscheduler(p, &attr, check); 3667 } 3668 /** 3669 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread. 3670 * @p: the task in question. 3671 * @policy: new policy. 3672 * @param: structure containing the new RT priority. 3673 * 3674 * Return: 0 on success. An error code otherwise. 3675 * 3676 * NOTE that the task may be already dead. 3677 */ 3678 int sched_setscheduler(struct task_struct *p, int policy, 3679 const struct sched_param *param) 3680 { 3681 return _sched_setscheduler(p, policy, param, true); 3682 } 3683 EXPORT_SYMBOL_GPL(sched_setscheduler); 3684 3685 int sched_setattr(struct task_struct *p, const struct sched_attr *attr) 3686 { 3687 return __sched_setscheduler(p, attr, true); 3688 } 3689 EXPORT_SYMBOL_GPL(sched_setattr); 3690 3691 /** 3692 * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace. 3693 * @p: the task in question. 3694 * @policy: new policy. 3695 * @param: structure containing the new RT priority. 3696 * 3697 * Just like sched_setscheduler, only don't bother checking if the 3698 * current context has permission. For example, this is needed in 3699 * stop_machine(): we create temporary high priority worker threads, 3700 * but our caller might not have that capability. 3701 * 3702 * Return: 0 on success. An error code otherwise. 3703 */ 3704 int sched_setscheduler_nocheck(struct task_struct *p, int policy, 3705 const struct sched_param *param) 3706 { 3707 return _sched_setscheduler(p, policy, param, false); 3708 } 3709 3710 static int 3711 do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) 3712 { 3713 struct sched_param lparam; 3714 struct task_struct *p; 3715 int retval; 3716 3717 if (!param || pid < 0) 3718 return -EINVAL; 3719 if (copy_from_user(&lparam, param, sizeof(struct sched_param))) 3720 return -EFAULT; 3721 3722 rcu_read_lock(); 3723 retval = -ESRCH; 3724 p = find_process_by_pid(pid); 3725 if (p != NULL) 3726 retval = sched_setscheduler(p, policy, &lparam); 3727 rcu_read_unlock(); 3728 3729 return retval; 3730 } 3731 3732 /* 3733 * Mimics kernel/events/core.c perf_copy_attr(). 3734 */ 3735 static int sched_copy_attr(struct sched_attr __user *uattr, 3736 struct sched_attr *attr) 3737 { 3738 u32 size; 3739 int ret; 3740 3741 if (!access_ok(VERIFY_WRITE, uattr, SCHED_ATTR_SIZE_VER0)) 3742 return -EFAULT; 3743 3744 /* 3745 * zero the full structure, so that a short copy will be nice. 3746 */ 3747 memset(attr, 0, sizeof(*attr)); 3748 3749 ret = get_user(size, &uattr->size); 3750 if (ret) 3751 return ret; 3752 3753 if (size > PAGE_SIZE) /* silly large */ 3754 goto err_size; 3755 3756 if (!size) /* abi compat */ 3757 size = SCHED_ATTR_SIZE_VER0; 3758 3759 if (size < SCHED_ATTR_SIZE_VER0) 3760 goto err_size; 3761 3762 /* 3763 * If we're handed a bigger struct than we know of, 3764 * ensure all the unknown bits are 0 - i.e. new 3765 * user-space does not rely on any kernel feature 3766 * extensions we dont know about yet. 3767 */ 3768 if (size > sizeof(*attr)) { 3769 unsigned char __user *addr; 3770 unsigned char __user *end; 3771 unsigned char val; 3772 3773 addr = (void __user *)uattr + sizeof(*attr); 3774 end = (void __user *)uattr + size; 3775 3776 for (; addr < end; addr++) { 3777 ret = get_user(val, addr); 3778 if (ret) 3779 return ret; 3780 if (val) 3781 goto err_size; 3782 } 3783 size = sizeof(*attr); 3784 } 3785 3786 ret = copy_from_user(attr, uattr, size); 3787 if (ret) 3788 return -EFAULT; 3789 3790 /* 3791 * XXX: do we want to be lenient like existing syscalls; or do we want 3792 * to be strict and return an error on out-of-bounds values? 3793 */ 3794 attr->sched_nice = clamp(attr->sched_nice, MIN_NICE, MAX_NICE); 3795 3796 return 0; 3797 3798 err_size: 3799 put_user(sizeof(*attr), &uattr->size); 3800 return -E2BIG; 3801 } 3802 3803 /** 3804 * sys_sched_setscheduler - set/change the scheduler policy and RT priority 3805 * @pid: the pid in question. 3806 * @policy: new policy. 3807 * @param: structure containing the new RT priority. 3808 * 3809 * Return: 0 on success. An error code otherwise. 3810 */ 3811 SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, 3812 struct sched_param __user *, param) 3813 { 3814 /* negative values for policy are not valid */ 3815 if (policy < 0) 3816 return -EINVAL; 3817 3818 return do_sched_setscheduler(pid, policy, param); 3819 } 3820 3821 /** 3822 * sys_sched_setparam - set/change the RT priority of a thread 3823 * @pid: the pid in question. 3824 * @param: structure containing the new RT priority. 3825 * 3826 * Return: 0 on success. An error code otherwise. 3827 */ 3828 SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) 3829 { 3830 return do_sched_setscheduler(pid, SETPARAM_POLICY, param); 3831 } 3832 3833 /** 3834 * sys_sched_setattr - same as above, but with extended sched_attr 3835 * @pid: the pid in question. 3836 * @uattr: structure containing the extended parameters. 3837 * @flags: for future extension. 3838 */ 3839 SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr, 3840 unsigned int, flags) 3841 { 3842 struct sched_attr attr; 3843 struct task_struct *p; 3844 int retval; 3845 3846 if (!uattr || pid < 0 || flags) 3847 return -EINVAL; 3848 3849 retval = sched_copy_attr(uattr, &attr); 3850 if (retval) 3851 return retval; 3852 3853 if ((int)attr.sched_policy < 0) 3854 return -EINVAL; 3855 3856 rcu_read_lock(); 3857 retval = -ESRCH; 3858 p = find_process_by_pid(pid); 3859 if (p != NULL) 3860 retval = sched_setattr(p, &attr); 3861 rcu_read_unlock(); 3862 3863 return retval; 3864 } 3865 3866 /** 3867 * sys_sched_getscheduler - get the policy (scheduling class) of a thread 3868 * @pid: the pid in question. 3869 * 3870 * Return: On success, the policy of the thread. Otherwise, a negative error 3871 * code. 3872 */ 3873 SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) 3874 { 3875 struct task_struct *p; 3876 int retval; 3877 3878 if (pid < 0) 3879 return -EINVAL; 3880 3881 retval = -ESRCH; 3882 rcu_read_lock(); 3883 p = find_process_by_pid(pid); 3884 if (p) { 3885 retval = security_task_getscheduler(p); 3886 if (!retval) 3887 retval = p->policy 3888 | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0); 3889 } 3890 rcu_read_unlock(); 3891 return retval; 3892 } 3893 3894 /** 3895 * sys_sched_getparam - get the RT priority of a thread 3896 * @pid: the pid in question. 3897 * @param: structure containing the RT priority. 3898 * 3899 * Return: On success, 0 and the RT priority is in @param. Otherwise, an error 3900 * code. 3901 */ 3902 SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) 3903 { 3904 struct sched_param lp = { .sched_priority = 0 }; 3905 struct task_struct *p; 3906 int retval; 3907 3908 if (!param || pid < 0) 3909 return -EINVAL; 3910 3911 rcu_read_lock(); 3912 p = find_process_by_pid(pid); 3913 retval = -ESRCH; 3914 if (!p) 3915 goto out_unlock; 3916 3917 retval = security_task_getscheduler(p); 3918 if (retval) 3919 goto out_unlock; 3920 3921 if (task_has_rt_policy(p)) 3922 lp.sched_priority = p->rt_priority; 3923 rcu_read_unlock(); 3924 3925 /* 3926 * This one might sleep, we cannot do it with a spinlock held ... 3927 */ 3928 retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0; 3929 3930 return retval; 3931 3932 out_unlock: 3933 rcu_read_unlock(); 3934 return retval; 3935 } 3936 3937 static int sched_read_attr(struct sched_attr __user *uattr, 3938 struct sched_attr *attr, 3939 unsigned int usize) 3940 { 3941 int ret; 3942 3943 if (!access_ok(VERIFY_WRITE, uattr, usize)) 3944 return -EFAULT; 3945 3946 /* 3947 * If we're handed a smaller struct than we know of, 3948 * ensure all the unknown bits are 0 - i.e. old 3949 * user-space does not get uncomplete information. 3950 */ 3951 if (usize < sizeof(*attr)) { 3952 unsigned char *addr; 3953 unsigned char *end; 3954 3955 addr = (void *)attr + usize; 3956 end = (void *)attr + sizeof(*attr); 3957 3958 for (; addr < end; addr++) { 3959 if (*addr) 3960 return -EFBIG; 3961 } 3962 3963 attr->size = usize; 3964 } 3965 3966 ret = copy_to_user(uattr, attr, attr->size); 3967 if (ret) 3968 return -EFAULT; 3969 3970 return 0; 3971 } 3972 3973 /** 3974 * sys_sched_getattr - similar to sched_getparam, but with sched_attr 3975 * @pid: the pid in question. 3976 * @uattr: structure containing the extended parameters. 3977 * @size: sizeof(attr) for fwd/bwd comp. 3978 * @flags: for future extension. 3979 */ 3980 SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, 3981 unsigned int, size, unsigned int, flags) 3982 { 3983 struct sched_attr attr = { 3984 .size = sizeof(struct sched_attr), 3985 }; 3986 struct task_struct *p; 3987 int retval; 3988 3989 if (!uattr || pid < 0 || size > PAGE_SIZE || 3990 size < SCHED_ATTR_SIZE_VER0 || flags) 3991 return -EINVAL; 3992 3993 rcu_read_lock(); 3994 p = find_process_by_pid(pid); 3995 retval = -ESRCH; 3996 if (!p) 3997 goto out_unlock; 3998 3999 retval = security_task_getscheduler(p); 4000 if (retval) 4001 goto out_unlock; 4002 4003 attr.sched_policy = p->policy; 4004 if (p->sched_reset_on_fork) 4005 attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; 4006 if (task_has_dl_policy(p)) 4007 __getparam_dl(p, &attr); 4008 else if (task_has_rt_policy(p)) 4009 attr.sched_priority = p->rt_priority; 4010 else 4011 attr.sched_nice = task_nice(p); 4012 4013 rcu_read_unlock(); 4014 4015 retval = sched_read_attr(uattr, &attr, size); 4016 return retval; 4017 4018 out_unlock: 4019 rcu_read_unlock(); 4020 return retval; 4021 } 4022 4023 long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) 4024 { 4025 cpumask_var_t cpus_allowed, new_mask; 4026 struct task_struct *p; 4027 int retval; 4028 4029 rcu_read_lock(); 4030 4031 p = find_process_by_pid(pid); 4032 if (!p) { 4033 rcu_read_unlock(); 4034 return -ESRCH; 4035 } 4036 4037 /* Prevent p going away */ 4038 get_task_struct(p); 4039 rcu_read_unlock(); 4040 4041 if (p->flags & PF_NO_SETAFFINITY) { 4042 retval = -EINVAL; 4043 goto out_put_task; 4044 } 4045 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) { 4046 retval = -ENOMEM; 4047 goto out_put_task; 4048 } 4049 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) { 4050 retval = -ENOMEM; 4051 goto out_free_cpus_allowed; 4052 } 4053 retval = -EPERM; 4054 if (!check_same_owner(p)) { 4055 rcu_read_lock(); 4056 if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) { 4057 rcu_read_unlock(); 4058 goto out_free_new_mask; 4059 } 4060 rcu_read_unlock(); 4061 } 4062 4063 retval = security_task_setscheduler(p); 4064 if (retval) 4065 goto out_free_new_mask; 4066 4067 4068 cpuset_cpus_allowed(p, cpus_allowed); 4069 cpumask_and(new_mask, in_mask, cpus_allowed); 4070 4071 /* 4072 * Since bandwidth control happens on root_domain basis, 4073 * if admission test is enabled, we only admit -deadline 4074 * tasks allowed to run on all the CPUs in the task's 4075 * root_domain. 4076 */ 4077 #ifdef CONFIG_SMP 4078 if (task_has_dl_policy(p) && dl_bandwidth_enabled()) { 4079 rcu_read_lock(); 4080 if (!cpumask_subset(task_rq(p)->rd->span, new_mask)) { 4081 retval = -EBUSY; 4082 rcu_read_unlock(); 4083 goto out_free_new_mask; 4084 } 4085 rcu_read_unlock(); 4086 } 4087 #endif 4088 again: 4089 retval = set_cpus_allowed_ptr(p, new_mask); 4090 4091 if (!retval) { 4092 cpuset_cpus_allowed(p, cpus_allowed); 4093 if (!cpumask_subset(new_mask, cpus_allowed)) { 4094 /* 4095 * We must have raced with a concurrent cpuset 4096 * update. Just reset the cpus_allowed to the 4097 * cpuset's cpus_allowed 4098 */ 4099 cpumask_copy(new_mask, cpus_allowed); 4100 goto again; 4101 } 4102 } 4103 out_free_new_mask: 4104 free_cpumask_var(new_mask); 4105 out_free_cpus_allowed: 4106 free_cpumask_var(cpus_allowed); 4107 out_put_task: 4108 put_task_struct(p); 4109 return retval; 4110 } 4111 4112 static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, 4113 struct cpumask *new_mask) 4114 { 4115 if (len < cpumask_size()) 4116 cpumask_clear(new_mask); 4117 else if (len > cpumask_size()) 4118 len = cpumask_size(); 4119 4120 return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0; 4121 } 4122 4123 /** 4124 * sys_sched_setaffinity - set the cpu affinity of a process 4125 * @pid: pid of the process 4126 * @len: length in bytes of the bitmask pointed to by user_mask_ptr 4127 * @user_mask_ptr: user-space pointer to the new cpu mask 4128 * 4129 * Return: 0 on success. An error code otherwise. 4130 */ 4131 SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len, 4132 unsigned long __user *, user_mask_ptr) 4133 { 4134 cpumask_var_t new_mask; 4135 int retval; 4136 4137 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) 4138 return -ENOMEM; 4139 4140 retval = get_user_cpu_mask(user_mask_ptr, len, new_mask); 4141 if (retval == 0) 4142 retval = sched_setaffinity(pid, new_mask); 4143 free_cpumask_var(new_mask); 4144 return retval; 4145 } 4146 4147 long sched_getaffinity(pid_t pid, struct cpumask *mask) 4148 { 4149 struct task_struct *p; 4150 unsigned long flags; 4151 int retval; 4152 4153 rcu_read_lock(); 4154 4155 retval = -ESRCH; 4156 p = find_process_by_pid(pid); 4157 if (!p) 4158 goto out_unlock; 4159 4160 retval = security_task_getscheduler(p); 4161 if (retval) 4162 goto out_unlock; 4163 4164 raw_spin_lock_irqsave(&p->pi_lock, flags); 4165 cpumask_and(mask, &p->cpus_allowed, cpu_active_mask); 4166 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 4167 4168 out_unlock: 4169 rcu_read_unlock(); 4170 4171 return retval; 4172 } 4173 4174 /** 4175 * sys_sched_getaffinity - get the cpu affinity of a process 4176 * @pid: pid of the process 4177 * @len: length in bytes of the bitmask pointed to by user_mask_ptr 4178 * @user_mask_ptr: user-space pointer to hold the current cpu mask 4179 * 4180 * Return: 0 on success. An error code otherwise. 4181 */ 4182 SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, 4183 unsigned long __user *, user_mask_ptr) 4184 { 4185 int ret; 4186 cpumask_var_t mask; 4187 4188 if ((len * BITS_PER_BYTE) < nr_cpu_ids) 4189 return -EINVAL; 4190 if (len & (sizeof(unsigned long)-1)) 4191 return -EINVAL; 4192 4193 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) 4194 return -ENOMEM; 4195 4196 ret = sched_getaffinity(pid, mask); 4197 if (ret == 0) { 4198 size_t retlen = min_t(size_t, len, cpumask_size()); 4199 4200 if (copy_to_user(user_mask_ptr, mask, retlen)) 4201 ret = -EFAULT; 4202 else 4203 ret = retlen; 4204 } 4205 free_cpumask_var(mask); 4206 4207 return ret; 4208 } 4209 4210 /** 4211 * sys_sched_yield - yield the current processor to other threads. 4212 * 4213 * This function yields the current CPU to other tasks. If there are no 4214 * other threads running on this CPU then this function will return. 4215 * 4216 * Return: 0. 4217 */ 4218 SYSCALL_DEFINE0(sched_yield) 4219 { 4220 struct rq *rq = this_rq_lock(); 4221 4222 schedstat_inc(rq, yld_count); 4223 current->sched_class->yield_task(rq); 4224 4225 /* 4226 * Since we are going to call schedule() anyway, there's 4227 * no need to preempt or enable interrupts: 4228 */ 4229 __release(rq->lock); 4230 spin_release(&rq->lock.dep_map, 1, _THIS_IP_); 4231 do_raw_spin_unlock(&rq->lock); 4232 sched_preempt_enable_no_resched(); 4233 4234 schedule(); 4235 4236 return 0; 4237 } 4238 4239 int __sched _cond_resched(void) 4240 { 4241 if (should_resched()) { 4242 preempt_schedule_common(); 4243 return 1; 4244 } 4245 return 0; 4246 } 4247 EXPORT_SYMBOL(_cond_resched); 4248 4249 /* 4250 * __cond_resched_lock() - if a reschedule is pending, drop the given lock, 4251 * call schedule, and on return reacquire the lock. 4252 * 4253 * This works OK both with and without CONFIG_PREEMPT. We do strange low-level 4254 * operations here to prevent schedule() from being called twice (once via 4255 * spin_unlock(), once by hand). 4256 */ 4257 int __cond_resched_lock(spinlock_t *lock) 4258 { 4259 int resched = should_resched(); 4260 int ret = 0; 4261 4262 lockdep_assert_held(lock); 4263 4264 if (spin_needbreak(lock) || resched) { 4265 spin_unlock(lock); 4266 if (resched) 4267 preempt_schedule_common(); 4268 else 4269 cpu_relax(); 4270 ret = 1; 4271 spin_lock(lock); 4272 } 4273 return ret; 4274 } 4275 EXPORT_SYMBOL(__cond_resched_lock); 4276 4277 int __sched __cond_resched_softirq(void) 4278 { 4279 BUG_ON(!in_softirq()); 4280 4281 if (should_resched()) { 4282 local_bh_enable(); 4283 preempt_schedule_common(); 4284 local_bh_disable(); 4285 return 1; 4286 } 4287 return 0; 4288 } 4289 EXPORT_SYMBOL(__cond_resched_softirq); 4290 4291 /** 4292 * yield - yield the current processor to other threads. 4293 * 4294 * Do not ever use this function, there's a 99% chance you're doing it wrong. 4295 * 4296 * The scheduler is at all times free to pick the calling task as the most 4297 * eligible task to run, if removing the yield() call from your code breaks 4298 * it, its already broken. 4299 * 4300 * Typical broken usage is: 4301 * 4302 * while (!event) 4303 * yield(); 4304 * 4305 * where one assumes that yield() will let 'the other' process run that will 4306 * make event true. If the current task is a SCHED_FIFO task that will never 4307 * happen. Never use yield() as a progress guarantee!! 4308 * 4309 * If you want to use yield() to wait for something, use wait_event(). 4310 * If you want to use yield() to be 'nice' for others, use cond_resched(). 4311 * If you still want to use yield(), do not! 4312 */ 4313 void __sched yield(void) 4314 { 4315 set_current_state(TASK_RUNNING); 4316 sys_sched_yield(); 4317 } 4318 EXPORT_SYMBOL(yield); 4319 4320 /** 4321 * yield_to - yield the current processor to another thread in 4322 * your thread group, or accelerate that thread toward the 4323 * processor it's on. 4324 * @p: target task 4325 * @preempt: whether task preemption is allowed or not 4326 * 4327 * It's the caller's job to ensure that the target task struct 4328 * can't go away on us before we can do any checks. 4329 * 4330 * Return: 4331 * true (>0) if we indeed boosted the target task. 4332 * false (0) if we failed to boost the target. 4333 * -ESRCH if there's no task to yield to. 4334 */ 4335 int __sched yield_to(struct task_struct *p, bool preempt) 4336 { 4337 struct task_struct *curr = current; 4338 struct rq *rq, *p_rq; 4339 unsigned long flags; 4340 int yielded = 0; 4341 4342 local_irq_save(flags); 4343 rq = this_rq(); 4344 4345 again: 4346 p_rq = task_rq(p); 4347 /* 4348 * If we're the only runnable task on the rq and target rq also 4349 * has only one task, there's absolutely no point in yielding. 4350 */ 4351 if (rq->nr_running == 1 && p_rq->nr_running == 1) { 4352 yielded = -ESRCH; 4353 goto out_irq; 4354 } 4355 4356 double_rq_lock(rq, p_rq); 4357 if (task_rq(p) != p_rq) { 4358 double_rq_unlock(rq, p_rq); 4359 goto again; 4360 } 4361 4362 if (!curr->sched_class->yield_to_task) 4363 goto out_unlock; 4364 4365 if (curr->sched_class != p->sched_class) 4366 goto out_unlock; 4367 4368 if (task_running(p_rq, p) || p->state) 4369 goto out_unlock; 4370 4371 yielded = curr->sched_class->yield_to_task(rq, p, preempt); 4372 if (yielded) { 4373 schedstat_inc(rq, yld_count); 4374 /* 4375 * Make p's CPU reschedule; pick_next_entity takes care of 4376 * fairness. 4377 */ 4378 if (preempt && rq != p_rq) 4379 resched_curr(p_rq); 4380 } 4381 4382 out_unlock: 4383 double_rq_unlock(rq, p_rq); 4384 out_irq: 4385 local_irq_restore(flags); 4386 4387 if (yielded > 0) 4388 schedule(); 4389 4390 return yielded; 4391 } 4392 EXPORT_SYMBOL_GPL(yield_to); 4393 4394 /* 4395 * This task is about to go to sleep on IO. Increment rq->nr_iowait so 4396 * that process accounting knows that this is a task in IO wait state. 4397 */ 4398 long __sched io_schedule_timeout(long timeout) 4399 { 4400 int old_iowait = current->in_iowait; 4401 struct rq *rq; 4402 long ret; 4403 4404 current->in_iowait = 1; 4405 if (old_iowait) 4406 blk_schedule_flush_plug(current); 4407 else 4408 blk_flush_plug(current); 4409 4410 delayacct_blkio_start(); 4411 rq = raw_rq(); 4412 atomic_inc(&rq->nr_iowait); 4413 ret = schedule_timeout(timeout); 4414 current->in_iowait = old_iowait; 4415 atomic_dec(&rq->nr_iowait); 4416 delayacct_blkio_end(); 4417 4418 return ret; 4419 } 4420 EXPORT_SYMBOL(io_schedule_timeout); 4421 4422 /** 4423 * sys_sched_get_priority_max - return maximum RT priority. 4424 * @policy: scheduling class. 4425 * 4426 * Return: On success, this syscall returns the maximum 4427 * rt_priority that can be used by a given scheduling class. 4428 * On failure, a negative error code is returned. 4429 */ 4430 SYSCALL_DEFINE1(sched_get_priority_max, int, policy) 4431 { 4432 int ret = -EINVAL; 4433 4434 switch (policy) { 4435 case SCHED_FIFO: 4436 case SCHED_RR: 4437 ret = MAX_USER_RT_PRIO-1; 4438 break; 4439 case SCHED_DEADLINE: 4440 case SCHED_NORMAL: 4441 case SCHED_BATCH: 4442 case SCHED_IDLE: 4443 ret = 0; 4444 break; 4445 } 4446 return ret; 4447 } 4448 4449 /** 4450 * sys_sched_get_priority_min - return minimum RT priority. 4451 * @policy: scheduling class. 4452 * 4453 * Return: On success, this syscall returns the minimum 4454 * rt_priority that can be used by a given scheduling class. 4455 * On failure, a negative error code is returned. 4456 */ 4457 SYSCALL_DEFINE1(sched_get_priority_min, int, policy) 4458 { 4459 int ret = -EINVAL; 4460 4461 switch (policy) { 4462 case SCHED_FIFO: 4463 case SCHED_RR: 4464 ret = 1; 4465 break; 4466 case SCHED_DEADLINE: 4467 case SCHED_NORMAL: 4468 case SCHED_BATCH: 4469 case SCHED_IDLE: 4470 ret = 0; 4471 } 4472 return ret; 4473 } 4474 4475 /** 4476 * sys_sched_rr_get_interval - return the default timeslice of a process. 4477 * @pid: pid of the process. 4478 * @interval: userspace pointer to the timeslice value. 4479 * 4480 * this syscall writes the default timeslice value of a given process 4481 * into the user-space timespec buffer. A value of '0' means infinity. 4482 * 4483 * Return: On success, 0 and the timeslice is in @interval. Otherwise, 4484 * an error code. 4485 */ 4486 SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, 4487 struct timespec __user *, interval) 4488 { 4489 struct task_struct *p; 4490 unsigned int time_slice; 4491 unsigned long flags; 4492 struct rq *rq; 4493 int retval; 4494 struct timespec t; 4495 4496 if (pid < 0) 4497 return -EINVAL; 4498 4499 retval = -ESRCH; 4500 rcu_read_lock(); 4501 p = find_process_by_pid(pid); 4502 if (!p) 4503 goto out_unlock; 4504 4505 retval = security_task_getscheduler(p); 4506 if (retval) 4507 goto out_unlock; 4508 4509 rq = task_rq_lock(p, &flags); 4510 time_slice = 0; 4511 if (p->sched_class->get_rr_interval) 4512 time_slice = p->sched_class->get_rr_interval(rq, p); 4513 task_rq_unlock(rq, p, &flags); 4514 4515 rcu_read_unlock(); 4516 jiffies_to_timespec(time_slice, &t); 4517 retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0; 4518 return retval; 4519 4520 out_unlock: 4521 rcu_read_unlock(); 4522 return retval; 4523 } 4524 4525 static const char stat_nam[] = TASK_STATE_TO_CHAR_STR; 4526 4527 void sched_show_task(struct task_struct *p) 4528 { 4529 unsigned long free = 0; 4530 int ppid; 4531 unsigned long state = p->state; 4532 4533 if (state) 4534 state = __ffs(state) + 1; 4535 printk(KERN_INFO "%-15.15s %c", p->comm, 4536 state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?'); 4537 #if BITS_PER_LONG == 32 4538 if (state == TASK_RUNNING) 4539 printk(KERN_CONT " running "); 4540 else 4541 printk(KERN_CONT " %08lx ", thread_saved_pc(p)); 4542 #else 4543 if (state == TASK_RUNNING) 4544 printk(KERN_CONT " running task "); 4545 else 4546 printk(KERN_CONT " %016lx ", thread_saved_pc(p)); 4547 #endif 4548 #ifdef CONFIG_DEBUG_STACK_USAGE 4549 free = stack_not_used(p); 4550 #endif 4551 ppid = 0; 4552 rcu_read_lock(); 4553 if (pid_alive(p)) 4554 ppid = task_pid_nr(rcu_dereference(p->real_parent)); 4555 rcu_read_unlock(); 4556 printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free, 4557 task_pid_nr(p), ppid, 4558 (unsigned long)task_thread_info(p)->flags); 4559 4560 print_worker_info(KERN_INFO, p); 4561 show_stack(p, NULL); 4562 } 4563 4564 void show_state_filter(unsigned long state_filter) 4565 { 4566 struct task_struct *g, *p; 4567 4568 #if BITS_PER_LONG == 32 4569 printk(KERN_INFO 4570 " task PC stack pid father\n"); 4571 #else 4572 printk(KERN_INFO 4573 " task PC stack pid father\n"); 4574 #endif 4575 rcu_read_lock(); 4576 for_each_process_thread(g, p) { 4577 /* 4578 * reset the NMI-timeout, listing all files on a slow 4579 * console might take a lot of time: 4580 */ 4581 touch_nmi_watchdog(); 4582 if (!state_filter || (p->state & state_filter)) 4583 sched_show_task(p); 4584 } 4585 4586 touch_all_softlockup_watchdogs(); 4587 4588 #ifdef CONFIG_SCHED_DEBUG 4589 sysrq_sched_debug_show(); 4590 #endif 4591 rcu_read_unlock(); 4592 /* 4593 * Only show locks if all tasks are dumped: 4594 */ 4595 if (!state_filter) 4596 debug_show_all_locks(); 4597 } 4598 4599 void init_idle_bootup_task(struct task_struct *idle) 4600 { 4601 idle->sched_class = &idle_sched_class; 4602 } 4603 4604 /** 4605 * init_idle - set up an idle thread for a given CPU 4606 * @idle: task in question 4607 * @cpu: cpu the idle task belongs to 4608 * 4609 * NOTE: this function does not set the idle thread's NEED_RESCHED 4610 * flag, to make booting more robust. 4611 */ 4612 void init_idle(struct task_struct *idle, int cpu) 4613 { 4614 struct rq *rq = cpu_rq(cpu); 4615 unsigned long flags; 4616 4617 raw_spin_lock_irqsave(&rq->lock, flags); 4618 4619 __sched_fork(0, idle); 4620 idle->state = TASK_RUNNING; 4621 idle->se.exec_start = sched_clock(); 4622 4623 do_set_cpus_allowed(idle, cpumask_of(cpu)); 4624 /* 4625 * We're having a chicken and egg problem, even though we are 4626 * holding rq->lock, the cpu isn't yet set to this cpu so the 4627 * lockdep check in task_group() will fail. 4628 * 4629 * Similar case to sched_fork(). / Alternatively we could 4630 * use task_rq_lock() here and obtain the other rq->lock. 4631 * 4632 * Silence PROVE_RCU 4633 */ 4634 rcu_read_lock(); 4635 __set_task_cpu(idle, cpu); 4636 rcu_read_unlock(); 4637 4638 rq->curr = rq->idle = idle; 4639 idle->on_rq = TASK_ON_RQ_QUEUED; 4640 #if defined(CONFIG_SMP) 4641 idle->on_cpu = 1; 4642 #endif 4643 raw_spin_unlock_irqrestore(&rq->lock, flags); 4644 4645 /* Set the preempt count _outside_ the spinlocks! */ 4646 init_idle_preempt_count(idle, cpu); 4647 4648 /* 4649 * The idle tasks have their own, simple scheduling class: 4650 */ 4651 idle->sched_class = &idle_sched_class; 4652 ftrace_graph_init_idle_task(idle, cpu); 4653 vtime_init_idle(idle, cpu); 4654 #if defined(CONFIG_SMP) 4655 sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu); 4656 #endif 4657 } 4658 4659 int cpuset_cpumask_can_shrink(const struct cpumask *cur, 4660 const struct cpumask *trial) 4661 { 4662 int ret = 1, trial_cpus; 4663 struct dl_bw *cur_dl_b; 4664 unsigned long flags; 4665 4666 if (!cpumask_weight(cur)) 4667 return ret; 4668 4669 rcu_read_lock_sched(); 4670 cur_dl_b = dl_bw_of(cpumask_any(cur)); 4671 trial_cpus = cpumask_weight(trial); 4672 4673 raw_spin_lock_irqsave(&cur_dl_b->lock, flags); 4674 if (cur_dl_b->bw != -1 && 4675 cur_dl_b->bw * trial_cpus < cur_dl_b->total_bw) 4676 ret = 0; 4677 raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags); 4678 rcu_read_unlock_sched(); 4679 4680 return ret; 4681 } 4682 4683 int task_can_attach(struct task_struct *p, 4684 const struct cpumask *cs_cpus_allowed) 4685 { 4686 int ret = 0; 4687 4688 /* 4689 * Kthreads which disallow setaffinity shouldn't be moved 4690 * to a new cpuset; we don't want to change their cpu 4691 * affinity and isolating such threads by their set of 4692 * allowed nodes is unnecessary. Thus, cpusets are not 4693 * applicable for such threads. This prevents checking for 4694 * success of set_cpus_allowed_ptr() on all attached tasks 4695 * before cpus_allowed may be changed. 4696 */ 4697 if (p->flags & PF_NO_SETAFFINITY) { 4698 ret = -EINVAL; 4699 goto out; 4700 } 4701 4702 #ifdef CONFIG_SMP 4703 if (dl_task(p) && !cpumask_intersects(task_rq(p)->rd->span, 4704 cs_cpus_allowed)) { 4705 unsigned int dest_cpu = cpumask_any_and(cpu_active_mask, 4706 cs_cpus_allowed); 4707 struct dl_bw *dl_b; 4708 bool overflow; 4709 int cpus; 4710 unsigned long flags; 4711 4712 rcu_read_lock_sched(); 4713 dl_b = dl_bw_of(dest_cpu); 4714 raw_spin_lock_irqsave(&dl_b->lock, flags); 4715 cpus = dl_bw_cpus(dest_cpu); 4716 overflow = __dl_overflow(dl_b, cpus, 0, p->dl.dl_bw); 4717 if (overflow) 4718 ret = -EBUSY; 4719 else { 4720 /* 4721 * We reserve space for this task in the destination 4722 * root_domain, as we can't fail after this point. 4723 * We will free resources in the source root_domain 4724 * later on (see set_cpus_allowed_dl()). 4725 */ 4726 __dl_add(dl_b, p->dl.dl_bw); 4727 } 4728 raw_spin_unlock_irqrestore(&dl_b->lock, flags); 4729 rcu_read_unlock_sched(); 4730 4731 } 4732 #endif 4733 out: 4734 return ret; 4735 } 4736 4737 #ifdef CONFIG_SMP 4738 /* 4739 * move_queued_task - move a queued task to new rq. 4740 * 4741 * Returns (locked) new rq. Old rq's lock is released. 4742 */ 4743 static struct rq *move_queued_task(struct task_struct *p, int new_cpu) 4744 { 4745 struct rq *rq = task_rq(p); 4746 4747 lockdep_assert_held(&rq->lock); 4748 4749 dequeue_task(rq, p, 0); 4750 p->on_rq = TASK_ON_RQ_MIGRATING; 4751 set_task_cpu(p, new_cpu); 4752 raw_spin_unlock(&rq->lock); 4753 4754 rq = cpu_rq(new_cpu); 4755 4756 raw_spin_lock(&rq->lock); 4757 BUG_ON(task_cpu(p) != new_cpu); 4758 p->on_rq = TASK_ON_RQ_QUEUED; 4759 enqueue_task(rq, p, 0); 4760 check_preempt_curr(rq, p, 0); 4761 4762 return rq; 4763 } 4764 4765 void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) 4766 { 4767 if (p->sched_class->set_cpus_allowed) 4768 p->sched_class->set_cpus_allowed(p, new_mask); 4769 4770 cpumask_copy(&p->cpus_allowed, new_mask); 4771 p->nr_cpus_allowed = cpumask_weight(new_mask); 4772 } 4773 4774 /* 4775 * This is how migration works: 4776 * 4777 * 1) we invoke migration_cpu_stop() on the target CPU using 4778 * stop_one_cpu(). 4779 * 2) stopper starts to run (implicitly forcing the migrated thread 4780 * off the CPU) 4781 * 3) it checks whether the migrated task is still in the wrong runqueue. 4782 * 4) if it's in the wrong runqueue then the migration thread removes 4783 * it and puts it into the right queue. 4784 * 5) stopper completes and stop_one_cpu() returns and the migration 4785 * is done. 4786 */ 4787 4788 /* 4789 * Change a given task's CPU affinity. Migrate the thread to a 4790 * proper CPU and schedule it away if the CPU it's executing on 4791 * is removed from the allowed bitmask. 4792 * 4793 * NOTE: the caller must have a valid reference to the task, the 4794 * task must not exit() & deallocate itself prematurely. The 4795 * call is not atomic; no spinlocks may be held. 4796 */ 4797 int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) 4798 { 4799 unsigned long flags; 4800 struct rq *rq; 4801 unsigned int dest_cpu; 4802 int ret = 0; 4803 4804 rq = task_rq_lock(p, &flags); 4805 4806 if (cpumask_equal(&p->cpus_allowed, new_mask)) 4807 goto out; 4808 4809 if (!cpumask_intersects(new_mask, cpu_active_mask)) { 4810 ret = -EINVAL; 4811 goto out; 4812 } 4813 4814 do_set_cpus_allowed(p, new_mask); 4815 4816 /* Can the task run on the task's current CPU? If so, we're done */ 4817 if (cpumask_test_cpu(task_cpu(p), new_mask)) 4818 goto out; 4819 4820 dest_cpu = cpumask_any_and(cpu_active_mask, new_mask); 4821 if (task_running(rq, p) || p->state == TASK_WAKING) { 4822 struct migration_arg arg = { p, dest_cpu }; 4823 /* Need help from migration thread: drop lock and wait. */ 4824 task_rq_unlock(rq, p, &flags); 4825 stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg); 4826 tlb_migrate_finish(p->mm); 4827 return 0; 4828 } else if (task_on_rq_queued(p)) 4829 rq = move_queued_task(p, dest_cpu); 4830 out: 4831 task_rq_unlock(rq, p, &flags); 4832 4833 return ret; 4834 } 4835 EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr); 4836 4837 /* 4838 * Move (not current) task off this cpu, onto dest cpu. We're doing 4839 * this because either it can't run here any more (set_cpus_allowed() 4840 * away from this CPU, or CPU going down), or because we're 4841 * attempting to rebalance this task on exec (sched_exec). 4842 * 4843 * So we race with normal scheduler movements, but that's OK, as long 4844 * as the task is no longer on this CPU. 4845 * 4846 * Returns non-zero if task was successfully migrated. 4847 */ 4848 static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) 4849 { 4850 struct rq *rq; 4851 int ret = 0; 4852 4853 if (unlikely(!cpu_active(dest_cpu))) 4854 return ret; 4855 4856 rq = cpu_rq(src_cpu); 4857 4858 raw_spin_lock(&p->pi_lock); 4859 raw_spin_lock(&rq->lock); 4860 /* Already moved. */ 4861 if (task_cpu(p) != src_cpu) 4862 goto done; 4863 4864 /* Affinity changed (again). */ 4865 if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p))) 4866 goto fail; 4867 4868 /* 4869 * If we're not on a rq, the next wake-up will ensure we're 4870 * placed properly. 4871 */ 4872 if (task_on_rq_queued(p)) 4873 rq = move_queued_task(p, dest_cpu); 4874 done: 4875 ret = 1; 4876 fail: 4877 raw_spin_unlock(&rq->lock); 4878 raw_spin_unlock(&p->pi_lock); 4879 return ret; 4880 } 4881 4882 #ifdef CONFIG_NUMA_BALANCING 4883 /* Migrate current task p to target_cpu */ 4884 int migrate_task_to(struct task_struct *p, int target_cpu) 4885 { 4886 struct migration_arg arg = { p, target_cpu }; 4887 int curr_cpu = task_cpu(p); 4888 4889 if (curr_cpu == target_cpu) 4890 return 0; 4891 4892 if (!cpumask_test_cpu(target_cpu, tsk_cpus_allowed(p))) 4893 return -EINVAL; 4894 4895 /* TODO: This is not properly updating schedstats */ 4896 4897 trace_sched_move_numa(p, curr_cpu, target_cpu); 4898 return stop_one_cpu(curr_cpu, migration_cpu_stop, &arg); 4899 } 4900 4901 /* 4902 * Requeue a task on a given node and accurately track the number of NUMA 4903 * tasks on the runqueues 4904 */ 4905 void sched_setnuma(struct task_struct *p, int nid) 4906 { 4907 struct rq *rq; 4908 unsigned long flags; 4909 bool queued, running; 4910 4911 rq = task_rq_lock(p, &flags); 4912 queued = task_on_rq_queued(p); 4913 running = task_current(rq, p); 4914 4915 if (queued) 4916 dequeue_task(rq, p, 0); 4917 if (running) 4918 put_prev_task(rq, p); 4919 4920 p->numa_preferred_nid = nid; 4921 4922 if (running) 4923 p->sched_class->set_curr_task(rq); 4924 if (queued) 4925 enqueue_task(rq, p, 0); 4926 task_rq_unlock(rq, p, &flags); 4927 } 4928 #endif 4929 4930 /* 4931 * migration_cpu_stop - this will be executed by a highprio stopper thread 4932 * and performs thread migration by bumping thread off CPU then 4933 * 'pushing' onto another runqueue. 4934 */ 4935 static int migration_cpu_stop(void *data) 4936 { 4937 struct migration_arg *arg = data; 4938 4939 /* 4940 * The original target cpu might have gone down and we might 4941 * be on another cpu but it doesn't matter. 4942 */ 4943 local_irq_disable(); 4944 /* 4945 * We need to explicitly wake pending tasks before running 4946 * __migrate_task() such that we will not miss enforcing cpus_allowed 4947 * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test. 4948 */ 4949 sched_ttwu_pending(); 4950 __migrate_task(arg->task, raw_smp_processor_id(), arg->dest_cpu); 4951 local_irq_enable(); 4952 return 0; 4953 } 4954 4955 #ifdef CONFIG_HOTPLUG_CPU 4956 4957 /* 4958 * Ensures that the idle task is using init_mm right before its cpu goes 4959 * offline. 4960 */ 4961 void idle_task_exit(void) 4962 { 4963 struct mm_struct *mm = current->active_mm; 4964 4965 BUG_ON(cpu_online(smp_processor_id())); 4966 4967 if (mm != &init_mm) { 4968 switch_mm(mm, &init_mm, current); 4969 finish_arch_post_lock_switch(); 4970 } 4971 mmdrop(mm); 4972 } 4973 4974 /* 4975 * Since this CPU is going 'away' for a while, fold any nr_active delta 4976 * we might have. Assumes we're called after migrate_tasks() so that the 4977 * nr_active count is stable. 4978 * 4979 * Also see the comment "Global load-average calculations". 4980 */ 4981 static void calc_load_migrate(struct rq *rq) 4982 { 4983 long delta = calc_load_fold_active(rq); 4984 if (delta) 4985 atomic_long_add(delta, &calc_load_tasks); 4986 } 4987 4988 static void put_prev_task_fake(struct rq *rq, struct task_struct *prev) 4989 { 4990 } 4991 4992 static const struct sched_class fake_sched_class = { 4993 .put_prev_task = put_prev_task_fake, 4994 }; 4995 4996 static struct task_struct fake_task = { 4997 /* 4998 * Avoid pull_{rt,dl}_task() 4999 */ 5000 .prio = MAX_PRIO + 1, 5001 .sched_class = &fake_sched_class, 5002 }; 5003 5004 /* 5005 * Migrate all tasks from the rq, sleeping tasks will be migrated by 5006 * try_to_wake_up()->select_task_rq(). 5007 * 5008 * Called with rq->lock held even though we'er in stop_machine() and 5009 * there's no concurrency possible, we hold the required locks anyway 5010 * because of lock validation efforts. 5011 */ 5012 static void migrate_tasks(unsigned int dead_cpu) 5013 { 5014 struct rq *rq = cpu_rq(dead_cpu); 5015 struct task_struct *next, *stop = rq->stop; 5016 int dest_cpu; 5017 5018 /* 5019 * Fudge the rq selection such that the below task selection loop 5020 * doesn't get stuck on the currently eligible stop task. 5021 * 5022 * We're currently inside stop_machine() and the rq is either stuck 5023 * in the stop_machine_cpu_stop() loop, or we're executing this code, 5024 * either way we should never end up calling schedule() until we're 5025 * done here. 5026 */ 5027 rq->stop = NULL; 5028 5029 /* 5030 * put_prev_task() and pick_next_task() sched 5031 * class method both need to have an up-to-date 5032 * value of rq->clock[_task] 5033 */ 5034 update_rq_clock(rq); 5035 5036 for ( ; ; ) { 5037 /* 5038 * There's this thread running, bail when that's the only 5039 * remaining thread. 5040 */ 5041 if (rq->nr_running == 1) 5042 break; 5043 5044 next = pick_next_task(rq, &fake_task); 5045 BUG_ON(!next); 5046 next->sched_class->put_prev_task(rq, next); 5047 5048 /* Find suitable destination for @next, with force if needed. */ 5049 dest_cpu = select_fallback_rq(dead_cpu, next); 5050 raw_spin_unlock(&rq->lock); 5051 5052 __migrate_task(next, dead_cpu, dest_cpu); 5053 5054 raw_spin_lock(&rq->lock); 5055 } 5056 5057 rq->stop = stop; 5058 } 5059 5060 #endif /* CONFIG_HOTPLUG_CPU */ 5061 5062 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL) 5063 5064 static struct ctl_table sd_ctl_dir[] = { 5065 { 5066 .procname = "sched_domain", 5067 .mode = 0555, 5068 }, 5069 {} 5070 }; 5071 5072 static struct ctl_table sd_ctl_root[] = { 5073 { 5074 .procname = "kernel", 5075 .mode = 0555, 5076 .child = sd_ctl_dir, 5077 }, 5078 {} 5079 }; 5080 5081 static struct ctl_table *sd_alloc_ctl_entry(int n) 5082 { 5083 struct ctl_table *entry = 5084 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL); 5085 5086 return entry; 5087 } 5088 5089 static void sd_free_ctl_entry(struct ctl_table **tablep) 5090 { 5091 struct ctl_table *entry; 5092 5093 /* 5094 * In the intermediate directories, both the child directory and 5095 * procname are dynamically allocated and could fail but the mode 5096 * will always be set. In the lowest directory the names are 5097 * static strings and all have proc handlers. 5098 */ 5099 for (entry = *tablep; entry->mode; entry++) { 5100 if (entry->child) 5101 sd_free_ctl_entry(&entry->child); 5102 if (entry->proc_handler == NULL) 5103 kfree(entry->procname); 5104 } 5105 5106 kfree(*tablep); 5107 *tablep = NULL; 5108 } 5109 5110 static int min_load_idx = 0; 5111 static int max_load_idx = CPU_LOAD_IDX_MAX-1; 5112 5113 static void 5114 set_table_entry(struct ctl_table *entry, 5115 const char *procname, void *data, int maxlen, 5116 umode_t mode, proc_handler *proc_handler, 5117 bool load_idx) 5118 { 5119 entry->procname = procname; 5120 entry->data = data; 5121 entry->maxlen = maxlen; 5122 entry->mode = mode; 5123 entry->proc_handler = proc_handler; 5124 5125 if (load_idx) { 5126 entry->extra1 = &min_load_idx; 5127 entry->extra2 = &max_load_idx; 5128 } 5129 } 5130 5131 static struct ctl_table * 5132 sd_alloc_ctl_domain_table(struct sched_domain *sd) 5133 { 5134 struct ctl_table *table = sd_alloc_ctl_entry(14); 5135 5136 if (table == NULL) 5137 return NULL; 5138 5139 set_table_entry(&table[0], "min_interval", &sd->min_interval, 5140 sizeof(long), 0644, proc_doulongvec_minmax, false); 5141 set_table_entry(&table[1], "max_interval", &sd->max_interval, 5142 sizeof(long), 0644, proc_doulongvec_minmax, false); 5143 set_table_entry(&table[2], "busy_idx", &sd->busy_idx, 5144 sizeof(int), 0644, proc_dointvec_minmax, true); 5145 set_table_entry(&table[3], "idle_idx", &sd->idle_idx, 5146 sizeof(int), 0644, proc_dointvec_minmax, true); 5147 set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx, 5148 sizeof(int), 0644, proc_dointvec_minmax, true); 5149 set_table_entry(&table[5], "wake_idx", &sd->wake_idx, 5150 sizeof(int), 0644, proc_dointvec_minmax, true); 5151 set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx, 5152 sizeof(int), 0644, proc_dointvec_minmax, true); 5153 set_table_entry(&table[7], "busy_factor", &sd->busy_factor, 5154 sizeof(int), 0644, proc_dointvec_minmax, false); 5155 set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct, 5156 sizeof(int), 0644, proc_dointvec_minmax, false); 5157 set_table_entry(&table[9], "cache_nice_tries", 5158 &sd->cache_nice_tries, 5159 sizeof(int), 0644, proc_dointvec_minmax, false); 5160 set_table_entry(&table[10], "flags", &sd->flags, 5161 sizeof(int), 0644, proc_dointvec_minmax, false); 5162 set_table_entry(&table[11], "max_newidle_lb_cost", 5163 &sd->max_newidle_lb_cost, 5164 sizeof(long), 0644, proc_doulongvec_minmax, false); 5165 set_table_entry(&table[12], "name", sd->name, 5166 CORENAME_MAX_SIZE, 0444, proc_dostring, false); 5167 /* &table[13] is terminator */ 5168 5169 return table; 5170 } 5171 5172 static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu) 5173 { 5174 struct ctl_table *entry, *table; 5175 struct sched_domain *sd; 5176 int domain_num = 0, i; 5177 char buf[32]; 5178 5179 for_each_domain(cpu, sd) 5180 domain_num++; 5181 entry = table = sd_alloc_ctl_entry(domain_num + 1); 5182 if (table == NULL) 5183 return NULL; 5184 5185 i = 0; 5186 for_each_domain(cpu, sd) { 5187 snprintf(buf, 32, "domain%d", i); 5188 entry->procname = kstrdup(buf, GFP_KERNEL); 5189 entry->mode = 0555; 5190 entry->child = sd_alloc_ctl_domain_table(sd); 5191 entry++; 5192 i++; 5193 } 5194 return table; 5195 } 5196 5197 static struct ctl_table_header *sd_sysctl_header; 5198 static void register_sched_domain_sysctl(void) 5199 { 5200 int i, cpu_num = num_possible_cpus(); 5201 struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1); 5202 char buf[32]; 5203 5204 WARN_ON(sd_ctl_dir[0].child); 5205 sd_ctl_dir[0].child = entry; 5206 5207 if (entry == NULL) 5208 return; 5209 5210 for_each_possible_cpu(i) { 5211 snprintf(buf, 32, "cpu%d", i); 5212 entry->procname = kstrdup(buf, GFP_KERNEL); 5213 entry->mode = 0555; 5214 entry->child = sd_alloc_ctl_cpu_table(i); 5215 entry++; 5216 } 5217 5218 WARN_ON(sd_sysctl_header); 5219 sd_sysctl_header = register_sysctl_table(sd_ctl_root); 5220 } 5221 5222 /* may be called multiple times per register */ 5223 static void unregister_sched_domain_sysctl(void) 5224 { 5225 if (sd_sysctl_header) 5226 unregister_sysctl_table(sd_sysctl_header); 5227 sd_sysctl_header = NULL; 5228 if (sd_ctl_dir[0].child) 5229 sd_free_ctl_entry(&sd_ctl_dir[0].child); 5230 } 5231 #else 5232 static void register_sched_domain_sysctl(void) 5233 { 5234 } 5235 static void unregister_sched_domain_sysctl(void) 5236 { 5237 } 5238 #endif 5239 5240 static void set_rq_online(struct rq *rq) 5241 { 5242 if (!rq->online) { 5243 const struct sched_class *class; 5244 5245 cpumask_set_cpu(rq->cpu, rq->rd->online); 5246 rq->online = 1; 5247 5248 for_each_class(class) { 5249 if (class->rq_online) 5250 class->rq_online(rq); 5251 } 5252 } 5253 } 5254 5255 static void set_rq_offline(struct rq *rq) 5256 { 5257 if (rq->online) { 5258 const struct sched_class *class; 5259 5260 for_each_class(class) { 5261 if (class->rq_offline) 5262 class->rq_offline(rq); 5263 } 5264 5265 cpumask_clear_cpu(rq->cpu, rq->rd->online); 5266 rq->online = 0; 5267 } 5268 } 5269 5270 /* 5271 * migration_call - callback that gets triggered when a CPU is added. 5272 * Here we can start up the necessary migration thread for the new CPU. 5273 */ 5274 static int 5275 migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) 5276 { 5277 int cpu = (long)hcpu; 5278 unsigned long flags; 5279 struct rq *rq = cpu_rq(cpu); 5280 5281 switch (action & ~CPU_TASKS_FROZEN) { 5282 5283 case CPU_UP_PREPARE: 5284 rq->calc_load_update = calc_load_update; 5285 break; 5286 5287 case CPU_ONLINE: 5288 /* Update our root-domain */ 5289 raw_spin_lock_irqsave(&rq->lock, flags); 5290 if (rq->rd) { 5291 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 5292 5293 set_rq_online(rq); 5294 } 5295 raw_spin_unlock_irqrestore(&rq->lock, flags); 5296 break; 5297 5298 #ifdef CONFIG_HOTPLUG_CPU 5299 case CPU_DYING: 5300 sched_ttwu_pending(); 5301 /* Update our root-domain */ 5302 raw_spin_lock_irqsave(&rq->lock, flags); 5303 if (rq->rd) { 5304 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 5305 set_rq_offline(rq); 5306 } 5307 migrate_tasks(cpu); 5308 BUG_ON(rq->nr_running != 1); /* the migration thread */ 5309 raw_spin_unlock_irqrestore(&rq->lock, flags); 5310 break; 5311 5312 case CPU_DEAD: 5313 calc_load_migrate(rq); 5314 break; 5315 #endif 5316 } 5317 5318 update_max_interval(); 5319 5320 return NOTIFY_OK; 5321 } 5322 5323 /* 5324 * Register at high priority so that task migration (migrate_all_tasks) 5325 * happens before everything else. This has to be lower priority than 5326 * the notifier in the perf_event subsystem, though. 5327 */ 5328 static struct notifier_block migration_notifier = { 5329 .notifier_call = migration_call, 5330 .priority = CPU_PRI_MIGRATION, 5331 }; 5332 5333 static void __cpuinit set_cpu_rq_start_time(void) 5334 { 5335 int cpu = smp_processor_id(); 5336 struct rq *rq = cpu_rq(cpu); 5337 rq->age_stamp = sched_clock_cpu(cpu); 5338 } 5339 5340 static int sched_cpu_active(struct notifier_block *nfb, 5341 unsigned long action, void *hcpu) 5342 { 5343 switch (action & ~CPU_TASKS_FROZEN) { 5344 case CPU_STARTING: 5345 set_cpu_rq_start_time(); 5346 return NOTIFY_OK; 5347 case CPU_DOWN_FAILED: 5348 set_cpu_active((long)hcpu, true); 5349 return NOTIFY_OK; 5350 default: 5351 return NOTIFY_DONE; 5352 } 5353 } 5354 5355 static int sched_cpu_inactive(struct notifier_block *nfb, 5356 unsigned long action, void *hcpu) 5357 { 5358 switch (action & ~CPU_TASKS_FROZEN) { 5359 case CPU_DOWN_PREPARE: 5360 set_cpu_active((long)hcpu, false); 5361 return NOTIFY_OK; 5362 default: 5363 return NOTIFY_DONE; 5364 } 5365 } 5366 5367 static int __init migration_init(void) 5368 { 5369 void *cpu = (void *)(long)smp_processor_id(); 5370 int err; 5371 5372 /* Initialize migration for the boot CPU */ 5373 err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu); 5374 BUG_ON(err == NOTIFY_BAD); 5375 migration_call(&migration_notifier, CPU_ONLINE, cpu); 5376 register_cpu_notifier(&migration_notifier); 5377 5378 /* Register cpu active notifiers */ 5379 cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE); 5380 cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE); 5381 5382 return 0; 5383 } 5384 early_initcall(migration_init); 5385 #endif 5386 5387 #ifdef CONFIG_SMP 5388 5389 static cpumask_var_t sched_domains_tmpmask; /* sched_domains_mutex */ 5390 5391 #ifdef CONFIG_SCHED_DEBUG 5392 5393 static __read_mostly int sched_debug_enabled; 5394 5395 static int __init sched_debug_setup(char *str) 5396 { 5397 sched_debug_enabled = 1; 5398 5399 return 0; 5400 } 5401 early_param("sched_debug", sched_debug_setup); 5402 5403 static inline bool sched_debug(void) 5404 { 5405 return sched_debug_enabled; 5406 } 5407 5408 static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, 5409 struct cpumask *groupmask) 5410 { 5411 struct sched_group *group = sd->groups; 5412 5413 cpumask_clear(groupmask); 5414 5415 printk(KERN_DEBUG "%*s domain %d: ", level, "", level); 5416 5417 if (!(sd->flags & SD_LOAD_BALANCE)) { 5418 printk("does not load-balance\n"); 5419 if (sd->parent) 5420 printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain" 5421 " has parent"); 5422 return -1; 5423 } 5424 5425 printk(KERN_CONT "span %*pbl level %s\n", 5426 cpumask_pr_args(sched_domain_span(sd)), sd->name); 5427 5428 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) { 5429 printk(KERN_ERR "ERROR: domain->span does not contain " 5430 "CPU%d\n", cpu); 5431 } 5432 if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) { 5433 printk(KERN_ERR "ERROR: domain->groups does not contain" 5434 " CPU%d\n", cpu); 5435 } 5436 5437 printk(KERN_DEBUG "%*s groups:", level + 1, ""); 5438 do { 5439 if (!group) { 5440 printk("\n"); 5441 printk(KERN_ERR "ERROR: group is NULL\n"); 5442 break; 5443 } 5444 5445 if (!cpumask_weight(sched_group_cpus(group))) { 5446 printk(KERN_CONT "\n"); 5447 printk(KERN_ERR "ERROR: empty group\n"); 5448 break; 5449 } 5450 5451 if (!(sd->flags & SD_OVERLAP) && 5452 cpumask_intersects(groupmask, sched_group_cpus(group))) { 5453 printk(KERN_CONT "\n"); 5454 printk(KERN_ERR "ERROR: repeated CPUs\n"); 5455 break; 5456 } 5457 5458 cpumask_or(groupmask, groupmask, sched_group_cpus(group)); 5459 5460 printk(KERN_CONT " %*pbl", 5461 cpumask_pr_args(sched_group_cpus(group))); 5462 if (group->sgc->capacity != SCHED_CAPACITY_SCALE) { 5463 printk(KERN_CONT " (cpu_capacity = %d)", 5464 group->sgc->capacity); 5465 } 5466 5467 group = group->next; 5468 } while (group != sd->groups); 5469 printk(KERN_CONT "\n"); 5470 5471 if (!cpumask_equal(sched_domain_span(sd), groupmask)) 5472 printk(KERN_ERR "ERROR: groups don't span domain->span\n"); 5473 5474 if (sd->parent && 5475 !cpumask_subset(groupmask, sched_domain_span(sd->parent))) 5476 printk(KERN_ERR "ERROR: parent span is not a superset " 5477 "of domain->span\n"); 5478 return 0; 5479 } 5480 5481 static void sched_domain_debug(struct sched_domain *sd, int cpu) 5482 { 5483 int level = 0; 5484 5485 if (!sched_debug_enabled) 5486 return; 5487 5488 if (!sd) { 5489 printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu); 5490 return; 5491 } 5492 5493 printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu); 5494 5495 for (;;) { 5496 if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask)) 5497 break; 5498 level++; 5499 sd = sd->parent; 5500 if (!sd) 5501 break; 5502 } 5503 } 5504 #else /* !CONFIG_SCHED_DEBUG */ 5505 # define sched_domain_debug(sd, cpu) do { } while (0) 5506 static inline bool sched_debug(void) 5507 { 5508 return false; 5509 } 5510 #endif /* CONFIG_SCHED_DEBUG */ 5511 5512 static int sd_degenerate(struct sched_domain *sd) 5513 { 5514 if (cpumask_weight(sched_domain_span(sd)) == 1) 5515 return 1; 5516 5517 /* Following flags need at least 2 groups */ 5518 if (sd->flags & (SD_LOAD_BALANCE | 5519 SD_BALANCE_NEWIDLE | 5520 SD_BALANCE_FORK | 5521 SD_BALANCE_EXEC | 5522 SD_SHARE_CPUCAPACITY | 5523 SD_SHARE_PKG_RESOURCES | 5524 SD_SHARE_POWERDOMAIN)) { 5525 if (sd->groups != sd->groups->next) 5526 return 0; 5527 } 5528 5529 /* Following flags don't use groups */ 5530 if (sd->flags & (SD_WAKE_AFFINE)) 5531 return 0; 5532 5533 return 1; 5534 } 5535 5536 static int 5537 sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) 5538 { 5539 unsigned long cflags = sd->flags, pflags = parent->flags; 5540 5541 if (sd_degenerate(parent)) 5542 return 1; 5543 5544 if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent))) 5545 return 0; 5546 5547 /* Flags needing groups don't count if only 1 group in parent */ 5548 if (parent->groups == parent->groups->next) { 5549 pflags &= ~(SD_LOAD_BALANCE | 5550 SD_BALANCE_NEWIDLE | 5551 SD_BALANCE_FORK | 5552 SD_BALANCE_EXEC | 5553 SD_SHARE_CPUCAPACITY | 5554 SD_SHARE_PKG_RESOURCES | 5555 SD_PREFER_SIBLING | 5556 SD_SHARE_POWERDOMAIN); 5557 if (nr_node_ids == 1) 5558 pflags &= ~SD_SERIALIZE; 5559 } 5560 if (~cflags & pflags) 5561 return 0; 5562 5563 return 1; 5564 } 5565 5566 static void free_rootdomain(struct rcu_head *rcu) 5567 { 5568 struct root_domain *rd = container_of(rcu, struct root_domain, rcu); 5569 5570 cpupri_cleanup(&rd->cpupri); 5571 cpudl_cleanup(&rd->cpudl); 5572 free_cpumask_var(rd->dlo_mask); 5573 free_cpumask_var(rd->rto_mask); 5574 free_cpumask_var(rd->online); 5575 free_cpumask_var(rd->span); 5576 kfree(rd); 5577 } 5578 5579 static void rq_attach_root(struct rq *rq, struct root_domain *rd) 5580 { 5581 struct root_domain *old_rd = NULL; 5582 unsigned long flags; 5583 5584 raw_spin_lock_irqsave(&rq->lock, flags); 5585 5586 if (rq->rd) { 5587 old_rd = rq->rd; 5588 5589 if (cpumask_test_cpu(rq->cpu, old_rd->online)) 5590 set_rq_offline(rq); 5591 5592 cpumask_clear_cpu(rq->cpu, old_rd->span); 5593 5594 /* 5595 * If we dont want to free the old_rd yet then 5596 * set old_rd to NULL to skip the freeing later 5597 * in this function: 5598 */ 5599 if (!atomic_dec_and_test(&old_rd->refcount)) 5600 old_rd = NULL; 5601 } 5602 5603 atomic_inc(&rd->refcount); 5604 rq->rd = rd; 5605 5606 cpumask_set_cpu(rq->cpu, rd->span); 5607 if (cpumask_test_cpu(rq->cpu, cpu_active_mask)) 5608 set_rq_online(rq); 5609 5610 raw_spin_unlock_irqrestore(&rq->lock, flags); 5611 5612 if (old_rd) 5613 call_rcu_sched(&old_rd->rcu, free_rootdomain); 5614 } 5615 5616 static int init_rootdomain(struct root_domain *rd) 5617 { 5618 memset(rd, 0, sizeof(*rd)); 5619 5620 if (!alloc_cpumask_var(&rd->span, GFP_KERNEL)) 5621 goto out; 5622 if (!alloc_cpumask_var(&rd->online, GFP_KERNEL)) 5623 goto free_span; 5624 if (!alloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL)) 5625 goto free_online; 5626 if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) 5627 goto free_dlo_mask; 5628 5629 init_dl_bw(&rd->dl_bw); 5630 if (cpudl_init(&rd->cpudl) != 0) 5631 goto free_dlo_mask; 5632 5633 if (cpupri_init(&rd->cpupri) != 0) 5634 goto free_rto_mask; 5635 return 0; 5636 5637 free_rto_mask: 5638 free_cpumask_var(rd->rto_mask); 5639 free_dlo_mask: 5640 free_cpumask_var(rd->dlo_mask); 5641 free_online: 5642 free_cpumask_var(rd->online); 5643 free_span: 5644 free_cpumask_var(rd->span); 5645 out: 5646 return -ENOMEM; 5647 } 5648 5649 /* 5650 * By default the system creates a single root-domain with all cpus as 5651 * members (mimicking the global state we have today). 5652 */ 5653 struct root_domain def_root_domain; 5654 5655 static void init_defrootdomain(void) 5656 { 5657 init_rootdomain(&def_root_domain); 5658 5659 atomic_set(&def_root_domain.refcount, 1); 5660 } 5661 5662 static struct root_domain *alloc_rootdomain(void) 5663 { 5664 struct root_domain *rd; 5665 5666 rd = kmalloc(sizeof(*rd), GFP_KERNEL); 5667 if (!rd) 5668 return NULL; 5669 5670 if (init_rootdomain(rd) != 0) { 5671 kfree(rd); 5672 return NULL; 5673 } 5674 5675 return rd; 5676 } 5677 5678 static void free_sched_groups(struct sched_group *sg, int free_sgc) 5679 { 5680 struct sched_group *tmp, *first; 5681 5682 if (!sg) 5683 return; 5684 5685 first = sg; 5686 do { 5687 tmp = sg->next; 5688 5689 if (free_sgc && atomic_dec_and_test(&sg->sgc->ref)) 5690 kfree(sg->sgc); 5691 5692 kfree(sg); 5693 sg = tmp; 5694 } while (sg != first); 5695 } 5696 5697 static void free_sched_domain(struct rcu_head *rcu) 5698 { 5699 struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu); 5700 5701 /* 5702 * If its an overlapping domain it has private groups, iterate and 5703 * nuke them all. 5704 */ 5705 if (sd->flags & SD_OVERLAP) { 5706 free_sched_groups(sd->groups, 1); 5707 } else if (atomic_dec_and_test(&sd->groups->ref)) { 5708 kfree(sd->groups->sgc); 5709 kfree(sd->groups); 5710 } 5711 kfree(sd); 5712 } 5713 5714 static void destroy_sched_domain(struct sched_domain *sd, int cpu) 5715 { 5716 call_rcu(&sd->rcu, free_sched_domain); 5717 } 5718 5719 static void destroy_sched_domains(struct sched_domain *sd, int cpu) 5720 { 5721 for (; sd; sd = sd->parent) 5722 destroy_sched_domain(sd, cpu); 5723 } 5724 5725 /* 5726 * Keep a special pointer to the highest sched_domain that has 5727 * SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this 5728 * allows us to avoid some pointer chasing select_idle_sibling(). 5729 * 5730 * Also keep a unique ID per domain (we use the first cpu number in 5731 * the cpumask of the domain), this allows us to quickly tell if 5732 * two cpus are in the same cache domain, see cpus_share_cache(). 5733 */ 5734 DEFINE_PER_CPU(struct sched_domain *, sd_llc); 5735 DEFINE_PER_CPU(int, sd_llc_size); 5736 DEFINE_PER_CPU(int, sd_llc_id); 5737 DEFINE_PER_CPU(struct sched_domain *, sd_numa); 5738 DEFINE_PER_CPU(struct sched_domain *, sd_busy); 5739 DEFINE_PER_CPU(struct sched_domain *, sd_asym); 5740 5741 static void update_top_cache_domain(int cpu) 5742 { 5743 struct sched_domain *sd; 5744 struct sched_domain *busy_sd = NULL; 5745 int id = cpu; 5746 int size = 1; 5747 5748 sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES); 5749 if (sd) { 5750 id = cpumask_first(sched_domain_span(sd)); 5751 size = cpumask_weight(sched_domain_span(sd)); 5752 busy_sd = sd->parent; /* sd_busy */ 5753 } 5754 rcu_assign_pointer(per_cpu(sd_busy, cpu), busy_sd); 5755 5756 rcu_assign_pointer(per_cpu(sd_llc, cpu), sd); 5757 per_cpu(sd_llc_size, cpu) = size; 5758 per_cpu(sd_llc_id, cpu) = id; 5759 5760 sd = lowest_flag_domain(cpu, SD_NUMA); 5761 rcu_assign_pointer(per_cpu(sd_numa, cpu), sd); 5762 5763 sd = highest_flag_domain(cpu, SD_ASYM_PACKING); 5764 rcu_assign_pointer(per_cpu(sd_asym, cpu), sd); 5765 } 5766 5767 /* 5768 * Attach the domain 'sd' to 'cpu' as its base domain. Callers must 5769 * hold the hotplug lock. 5770 */ 5771 static void 5772 cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) 5773 { 5774 struct rq *rq = cpu_rq(cpu); 5775 struct sched_domain *tmp; 5776 5777 /* Remove the sched domains which do not contribute to scheduling. */ 5778 for (tmp = sd; tmp; ) { 5779 struct sched_domain *parent = tmp->parent; 5780 if (!parent) 5781 break; 5782 5783 if (sd_parent_degenerate(tmp, parent)) { 5784 tmp->parent = parent->parent; 5785 if (parent->parent) 5786 parent->parent->child = tmp; 5787 /* 5788 * Transfer SD_PREFER_SIBLING down in case of a 5789 * degenerate parent; the spans match for this 5790 * so the property transfers. 5791 */ 5792 if (parent->flags & SD_PREFER_SIBLING) 5793 tmp->flags |= SD_PREFER_SIBLING; 5794 destroy_sched_domain(parent, cpu); 5795 } else 5796 tmp = tmp->parent; 5797 } 5798 5799 if (sd && sd_degenerate(sd)) { 5800 tmp = sd; 5801 sd = sd->parent; 5802 destroy_sched_domain(tmp, cpu); 5803 if (sd) 5804 sd->child = NULL; 5805 } 5806 5807 sched_domain_debug(sd, cpu); 5808 5809 rq_attach_root(rq, rd); 5810 tmp = rq->sd; 5811 rcu_assign_pointer(rq->sd, sd); 5812 destroy_sched_domains(tmp, cpu); 5813 5814 update_top_cache_domain(cpu); 5815 } 5816 5817 /* Setup the mask of cpus configured for isolated domains */ 5818 static int __init isolated_cpu_setup(char *str) 5819 { 5820 alloc_bootmem_cpumask_var(&cpu_isolated_map); 5821 cpulist_parse(str, cpu_isolated_map); 5822 return 1; 5823 } 5824 5825 __setup("isolcpus=", isolated_cpu_setup); 5826 5827 struct s_data { 5828 struct sched_domain ** __percpu sd; 5829 struct root_domain *rd; 5830 }; 5831 5832 enum s_alloc { 5833 sa_rootdomain, 5834 sa_sd, 5835 sa_sd_storage, 5836 sa_none, 5837 }; 5838 5839 /* 5840 * Build an iteration mask that can exclude certain CPUs from the upwards 5841 * domain traversal. 5842 * 5843 * Asymmetric node setups can result in situations where the domain tree is of 5844 * unequal depth, make sure to skip domains that already cover the entire 5845 * range. 5846 * 5847 * In that case build_sched_domains() will have terminated the iteration early 5848 * and our sibling sd spans will be empty. Domains should always include the 5849 * cpu they're built on, so check that. 5850 * 5851 */ 5852 static void build_group_mask(struct sched_domain *sd, struct sched_group *sg) 5853 { 5854 const struct cpumask *span = sched_domain_span(sd); 5855 struct sd_data *sdd = sd->private; 5856 struct sched_domain *sibling; 5857 int i; 5858 5859 for_each_cpu(i, span) { 5860 sibling = *per_cpu_ptr(sdd->sd, i); 5861 if (!cpumask_test_cpu(i, sched_domain_span(sibling))) 5862 continue; 5863 5864 cpumask_set_cpu(i, sched_group_mask(sg)); 5865 } 5866 } 5867 5868 /* 5869 * Return the canonical balance cpu for this group, this is the first cpu 5870 * of this group that's also in the iteration mask. 5871 */ 5872 int group_balance_cpu(struct sched_group *sg) 5873 { 5874 return cpumask_first_and(sched_group_cpus(sg), sched_group_mask(sg)); 5875 } 5876 5877 static int 5878 build_overlap_sched_groups(struct sched_domain *sd, int cpu) 5879 { 5880 struct sched_group *first = NULL, *last = NULL, *groups = NULL, *sg; 5881 const struct cpumask *span = sched_domain_span(sd); 5882 struct cpumask *covered = sched_domains_tmpmask; 5883 struct sd_data *sdd = sd->private; 5884 struct sched_domain *sibling; 5885 int i; 5886 5887 cpumask_clear(covered); 5888 5889 for_each_cpu(i, span) { 5890 struct cpumask *sg_span; 5891 5892 if (cpumask_test_cpu(i, covered)) 5893 continue; 5894 5895 sibling = *per_cpu_ptr(sdd->sd, i); 5896 5897 /* See the comment near build_group_mask(). */ 5898 if (!cpumask_test_cpu(i, sched_domain_span(sibling))) 5899 continue; 5900 5901 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), 5902 GFP_KERNEL, cpu_to_node(cpu)); 5903 5904 if (!sg) 5905 goto fail; 5906 5907 sg_span = sched_group_cpus(sg); 5908 if (sibling->child) 5909 cpumask_copy(sg_span, sched_domain_span(sibling->child)); 5910 else 5911 cpumask_set_cpu(i, sg_span); 5912 5913 cpumask_or(covered, covered, sg_span); 5914 5915 sg->sgc = *per_cpu_ptr(sdd->sgc, i); 5916 if (atomic_inc_return(&sg->sgc->ref) == 1) 5917 build_group_mask(sd, sg); 5918 5919 /* 5920 * Initialize sgc->capacity such that even if we mess up the 5921 * domains and no possible iteration will get us here, we won't 5922 * die on a /0 trap. 5923 */ 5924 sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span); 5925 5926 /* 5927 * Make sure the first group of this domain contains the 5928 * canonical balance cpu. Otherwise the sched_domain iteration 5929 * breaks. See update_sg_lb_stats(). 5930 */ 5931 if ((!groups && cpumask_test_cpu(cpu, sg_span)) || 5932 group_balance_cpu(sg) == cpu) 5933 groups = sg; 5934 5935 if (!first) 5936 first = sg; 5937 if (last) 5938 last->next = sg; 5939 last = sg; 5940 last->next = first; 5941 } 5942 sd->groups = groups; 5943 5944 return 0; 5945 5946 fail: 5947 free_sched_groups(first, 0); 5948 5949 return -ENOMEM; 5950 } 5951 5952 static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg) 5953 { 5954 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); 5955 struct sched_domain *child = sd->child; 5956 5957 if (child) 5958 cpu = cpumask_first(sched_domain_span(child)); 5959 5960 if (sg) { 5961 *sg = *per_cpu_ptr(sdd->sg, cpu); 5962 (*sg)->sgc = *per_cpu_ptr(sdd->sgc, cpu); 5963 atomic_set(&(*sg)->sgc->ref, 1); /* for claim_allocations */ 5964 } 5965 5966 return cpu; 5967 } 5968 5969 /* 5970 * build_sched_groups will build a circular linked list of the groups 5971 * covered by the given span, and will set each group's ->cpumask correctly, 5972 * and ->cpu_capacity to 0. 5973 * 5974 * Assumes the sched_domain tree is fully constructed 5975 */ 5976 static int 5977 build_sched_groups(struct sched_domain *sd, int cpu) 5978 { 5979 struct sched_group *first = NULL, *last = NULL; 5980 struct sd_data *sdd = sd->private; 5981 const struct cpumask *span = sched_domain_span(sd); 5982 struct cpumask *covered; 5983 int i; 5984 5985 get_group(cpu, sdd, &sd->groups); 5986 atomic_inc(&sd->groups->ref); 5987 5988 if (cpu != cpumask_first(span)) 5989 return 0; 5990 5991 lockdep_assert_held(&sched_domains_mutex); 5992 covered = sched_domains_tmpmask; 5993 5994 cpumask_clear(covered); 5995 5996 for_each_cpu(i, span) { 5997 struct sched_group *sg; 5998 int group, j; 5999 6000 if (cpumask_test_cpu(i, covered)) 6001 continue; 6002 6003 group = get_group(i, sdd, &sg); 6004 cpumask_setall(sched_group_mask(sg)); 6005 6006 for_each_cpu(j, span) { 6007 if (get_group(j, sdd, NULL) != group) 6008 continue; 6009 6010 cpumask_set_cpu(j, covered); 6011 cpumask_set_cpu(j, sched_group_cpus(sg)); 6012 } 6013 6014 if (!first) 6015 first = sg; 6016 if (last) 6017 last->next = sg; 6018 last = sg; 6019 } 6020 last->next = first; 6021 6022 return 0; 6023 } 6024 6025 /* 6026 * Initialize sched groups cpu_capacity. 6027 * 6028 * cpu_capacity indicates the capacity of sched group, which is used while 6029 * distributing the load between different sched groups in a sched domain. 6030 * Typically cpu_capacity for all the groups in a sched domain will be same 6031 * unless there are asymmetries in the topology. If there are asymmetries, 6032 * group having more cpu_capacity will pickup more load compared to the 6033 * group having less cpu_capacity. 6034 */ 6035 static void init_sched_groups_capacity(int cpu, struct sched_domain *sd) 6036 { 6037 struct sched_group *sg = sd->groups; 6038 6039 WARN_ON(!sg); 6040 6041 do { 6042 sg->group_weight = cpumask_weight(sched_group_cpus(sg)); 6043 sg = sg->next; 6044 } while (sg != sd->groups); 6045 6046 if (cpu != group_balance_cpu(sg)) 6047 return; 6048 6049 update_group_capacity(sd, cpu); 6050 atomic_set(&sg->sgc->nr_busy_cpus, sg->group_weight); 6051 } 6052 6053 /* 6054 * Initializers for schedule domains 6055 * Non-inlined to reduce accumulated stack pressure in build_sched_domains() 6056 */ 6057 6058 static int default_relax_domain_level = -1; 6059 int sched_domain_level_max; 6060 6061 static int __init setup_relax_domain_level(char *str) 6062 { 6063 if (kstrtoint(str, 0, &default_relax_domain_level)) 6064 pr_warn("Unable to set relax_domain_level\n"); 6065 6066 return 1; 6067 } 6068 __setup("relax_domain_level=", setup_relax_domain_level); 6069 6070 static void set_domain_attribute(struct sched_domain *sd, 6071 struct sched_domain_attr *attr) 6072 { 6073 int request; 6074 6075 if (!attr || attr->relax_domain_level < 0) { 6076 if (default_relax_domain_level < 0) 6077 return; 6078 else 6079 request = default_relax_domain_level; 6080 } else 6081 request = attr->relax_domain_level; 6082 if (request < sd->level) { 6083 /* turn off idle balance on this domain */ 6084 sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE); 6085 } else { 6086 /* turn on idle balance on this domain */ 6087 sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE); 6088 } 6089 } 6090 6091 static void __sdt_free(const struct cpumask *cpu_map); 6092 static int __sdt_alloc(const struct cpumask *cpu_map); 6093 6094 static void __free_domain_allocs(struct s_data *d, enum s_alloc what, 6095 const struct cpumask *cpu_map) 6096 { 6097 switch (what) { 6098 case sa_rootdomain: 6099 if (!atomic_read(&d->rd->refcount)) 6100 free_rootdomain(&d->rd->rcu); /* fall through */ 6101 case sa_sd: 6102 free_percpu(d->sd); /* fall through */ 6103 case sa_sd_storage: 6104 __sdt_free(cpu_map); /* fall through */ 6105 case sa_none: 6106 break; 6107 } 6108 } 6109 6110 static enum s_alloc __visit_domain_allocation_hell(struct s_data *d, 6111 const struct cpumask *cpu_map) 6112 { 6113 memset(d, 0, sizeof(*d)); 6114 6115 if (__sdt_alloc(cpu_map)) 6116 return sa_sd_storage; 6117 d->sd = alloc_percpu(struct sched_domain *); 6118 if (!d->sd) 6119 return sa_sd_storage; 6120 d->rd = alloc_rootdomain(); 6121 if (!d->rd) 6122 return sa_sd; 6123 return sa_rootdomain; 6124 } 6125 6126 /* 6127 * NULL the sd_data elements we've used to build the sched_domain and 6128 * sched_group structure so that the subsequent __free_domain_allocs() 6129 * will not free the data we're using. 6130 */ 6131 static void claim_allocations(int cpu, struct sched_domain *sd) 6132 { 6133 struct sd_data *sdd = sd->private; 6134 6135 WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd); 6136 *per_cpu_ptr(sdd->sd, cpu) = NULL; 6137 6138 if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref)) 6139 *per_cpu_ptr(sdd->sg, cpu) = NULL; 6140 6141 if (atomic_read(&(*per_cpu_ptr(sdd->sgc, cpu))->ref)) 6142 *per_cpu_ptr(sdd->sgc, cpu) = NULL; 6143 } 6144 6145 #ifdef CONFIG_NUMA 6146 static int sched_domains_numa_levels; 6147 enum numa_topology_type sched_numa_topology_type; 6148 static int *sched_domains_numa_distance; 6149 int sched_max_numa_distance; 6150 static struct cpumask ***sched_domains_numa_masks; 6151 static int sched_domains_curr_level; 6152 #endif 6153 6154 /* 6155 * SD_flags allowed in topology descriptions. 6156 * 6157 * SD_SHARE_CPUCAPACITY - describes SMT topologies 6158 * SD_SHARE_PKG_RESOURCES - describes shared caches 6159 * SD_NUMA - describes NUMA topologies 6160 * SD_SHARE_POWERDOMAIN - describes shared power domain 6161 * 6162 * Odd one out: 6163 * SD_ASYM_PACKING - describes SMT quirks 6164 */ 6165 #define TOPOLOGY_SD_FLAGS \ 6166 (SD_SHARE_CPUCAPACITY | \ 6167 SD_SHARE_PKG_RESOURCES | \ 6168 SD_NUMA | \ 6169 SD_ASYM_PACKING | \ 6170 SD_SHARE_POWERDOMAIN) 6171 6172 static struct sched_domain * 6173 sd_init(struct sched_domain_topology_level *tl, int cpu) 6174 { 6175 struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu); 6176 int sd_weight, sd_flags = 0; 6177 6178 #ifdef CONFIG_NUMA 6179 /* 6180 * Ugly hack to pass state to sd_numa_mask()... 6181 */ 6182 sched_domains_curr_level = tl->numa_level; 6183 #endif 6184 6185 sd_weight = cpumask_weight(tl->mask(cpu)); 6186 6187 if (tl->sd_flags) 6188 sd_flags = (*tl->sd_flags)(); 6189 if (WARN_ONCE(sd_flags & ~TOPOLOGY_SD_FLAGS, 6190 "wrong sd_flags in topology description\n")) 6191 sd_flags &= ~TOPOLOGY_SD_FLAGS; 6192 6193 *sd = (struct sched_domain){ 6194 .min_interval = sd_weight, 6195 .max_interval = 2*sd_weight, 6196 .busy_factor = 32, 6197 .imbalance_pct = 125, 6198 6199 .cache_nice_tries = 0, 6200 .busy_idx = 0, 6201 .idle_idx = 0, 6202 .newidle_idx = 0, 6203 .wake_idx = 0, 6204 .forkexec_idx = 0, 6205 6206 .flags = 1*SD_LOAD_BALANCE 6207 | 1*SD_BALANCE_NEWIDLE 6208 | 1*SD_BALANCE_EXEC 6209 | 1*SD_BALANCE_FORK 6210 | 0*SD_BALANCE_WAKE 6211 | 1*SD_WAKE_AFFINE 6212 | 0*SD_SHARE_CPUCAPACITY 6213 | 0*SD_SHARE_PKG_RESOURCES 6214 | 0*SD_SERIALIZE 6215 | 0*SD_PREFER_SIBLING 6216 | 0*SD_NUMA 6217 | sd_flags 6218 , 6219 6220 .last_balance = jiffies, 6221 .balance_interval = sd_weight, 6222 .smt_gain = 0, 6223 .max_newidle_lb_cost = 0, 6224 .next_decay_max_lb_cost = jiffies, 6225 #ifdef CONFIG_SCHED_DEBUG 6226 .name = tl->name, 6227 #endif 6228 }; 6229 6230 /* 6231 * Convert topological properties into behaviour. 6232 */ 6233 6234 if (sd->flags & SD_SHARE_CPUCAPACITY) { 6235 sd->flags |= SD_PREFER_SIBLING; 6236 sd->imbalance_pct = 110; 6237 sd->smt_gain = 1178; /* ~15% */ 6238 6239 } else if (sd->flags & SD_SHARE_PKG_RESOURCES) { 6240 sd->imbalance_pct = 117; 6241 sd->cache_nice_tries = 1; 6242 sd->busy_idx = 2; 6243 6244 #ifdef CONFIG_NUMA 6245 } else if (sd->flags & SD_NUMA) { 6246 sd->cache_nice_tries = 2; 6247 sd->busy_idx = 3; 6248 sd->idle_idx = 2; 6249 6250 sd->flags |= SD_SERIALIZE; 6251 if (sched_domains_numa_distance[tl->numa_level] > RECLAIM_DISTANCE) { 6252 sd->flags &= ~(SD_BALANCE_EXEC | 6253 SD_BALANCE_FORK | 6254 SD_WAKE_AFFINE); 6255 } 6256 6257 #endif 6258 } else { 6259 sd->flags |= SD_PREFER_SIBLING; 6260 sd->cache_nice_tries = 1; 6261 sd->busy_idx = 2; 6262 sd->idle_idx = 1; 6263 } 6264 6265 sd->private = &tl->data; 6266 6267 return sd; 6268 } 6269 6270 /* 6271 * Topology list, bottom-up. 6272 */ 6273 static struct sched_domain_topology_level default_topology[] = { 6274 #ifdef CONFIG_SCHED_SMT 6275 { cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT) }, 6276 #endif 6277 #ifdef CONFIG_SCHED_MC 6278 { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) }, 6279 #endif 6280 { cpu_cpu_mask, SD_INIT_NAME(DIE) }, 6281 { NULL, }, 6282 }; 6283 6284 struct sched_domain_topology_level *sched_domain_topology = default_topology; 6285 6286 #define for_each_sd_topology(tl) \ 6287 for (tl = sched_domain_topology; tl->mask; tl++) 6288 6289 void set_sched_topology(struct sched_domain_topology_level *tl) 6290 { 6291 sched_domain_topology = tl; 6292 } 6293 6294 #ifdef CONFIG_NUMA 6295 6296 static const struct cpumask *sd_numa_mask(int cpu) 6297 { 6298 return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)]; 6299 } 6300 6301 static void sched_numa_warn(const char *str) 6302 { 6303 static int done = false; 6304 int i,j; 6305 6306 if (done) 6307 return; 6308 6309 done = true; 6310 6311 printk(KERN_WARNING "ERROR: %s\n\n", str); 6312 6313 for (i = 0; i < nr_node_ids; i++) { 6314 printk(KERN_WARNING " "); 6315 for (j = 0; j < nr_node_ids; j++) 6316 printk(KERN_CONT "%02d ", node_distance(i,j)); 6317 printk(KERN_CONT "\n"); 6318 } 6319 printk(KERN_WARNING "\n"); 6320 } 6321 6322 bool find_numa_distance(int distance) 6323 { 6324 int i; 6325 6326 if (distance == node_distance(0, 0)) 6327 return true; 6328 6329 for (i = 0; i < sched_domains_numa_levels; i++) { 6330 if (sched_domains_numa_distance[i] == distance) 6331 return true; 6332 } 6333 6334 return false; 6335 } 6336 6337 /* 6338 * A system can have three types of NUMA topology: 6339 * NUMA_DIRECT: all nodes are directly connected, or not a NUMA system 6340 * NUMA_GLUELESS_MESH: some nodes reachable through intermediary nodes 6341 * NUMA_BACKPLANE: nodes can reach other nodes through a backplane 6342 * 6343 * The difference between a glueless mesh topology and a backplane 6344 * topology lies in whether communication between not directly 6345 * connected nodes goes through intermediary nodes (where programs 6346 * could run), or through backplane controllers. This affects 6347 * placement of programs. 6348 * 6349 * The type of topology can be discerned with the following tests: 6350 * - If the maximum distance between any nodes is 1 hop, the system 6351 * is directly connected. 6352 * - If for two nodes A and B, located N > 1 hops away from each other, 6353 * there is an intermediary node C, which is < N hops away from both 6354 * nodes A and B, the system is a glueless mesh. 6355 */ 6356 static void init_numa_topology_type(void) 6357 { 6358 int a, b, c, n; 6359 6360 n = sched_max_numa_distance; 6361 6362 if (n <= 1) 6363 sched_numa_topology_type = NUMA_DIRECT; 6364 6365 for_each_online_node(a) { 6366 for_each_online_node(b) { 6367 /* Find two nodes furthest removed from each other. */ 6368 if (node_distance(a, b) < n) 6369 continue; 6370 6371 /* Is there an intermediary node between a and b? */ 6372 for_each_online_node(c) { 6373 if (node_distance(a, c) < n && 6374 node_distance(b, c) < n) { 6375 sched_numa_topology_type = 6376 NUMA_GLUELESS_MESH; 6377 return; 6378 } 6379 } 6380 6381 sched_numa_topology_type = NUMA_BACKPLANE; 6382 return; 6383 } 6384 } 6385 } 6386 6387 static void sched_init_numa(void) 6388 { 6389 int next_distance, curr_distance = node_distance(0, 0); 6390 struct sched_domain_topology_level *tl; 6391 int level = 0; 6392 int i, j, k; 6393 6394 sched_domains_numa_distance = kzalloc(sizeof(int) * nr_node_ids, GFP_KERNEL); 6395 if (!sched_domains_numa_distance) 6396 return; 6397 6398 /* 6399 * O(nr_nodes^2) deduplicating selection sort -- in order to find the 6400 * unique distances in the node_distance() table. 6401 * 6402 * Assumes node_distance(0,j) includes all distances in 6403 * node_distance(i,j) in order to avoid cubic time. 6404 */ 6405 next_distance = curr_distance; 6406 for (i = 0; i < nr_node_ids; i++) { 6407 for (j = 0; j < nr_node_ids; j++) { 6408 for (k = 0; k < nr_node_ids; k++) { 6409 int distance = node_distance(i, k); 6410 6411 if (distance > curr_distance && 6412 (distance < next_distance || 6413 next_distance == curr_distance)) 6414 next_distance = distance; 6415 6416 /* 6417 * While not a strong assumption it would be nice to know 6418 * about cases where if node A is connected to B, B is not 6419 * equally connected to A. 6420 */ 6421 if (sched_debug() && node_distance(k, i) != distance) 6422 sched_numa_warn("Node-distance not symmetric"); 6423 6424 if (sched_debug() && i && !find_numa_distance(distance)) 6425 sched_numa_warn("Node-0 not representative"); 6426 } 6427 if (next_distance != curr_distance) { 6428 sched_domains_numa_distance[level++] = next_distance; 6429 sched_domains_numa_levels = level; 6430 curr_distance = next_distance; 6431 } else break; 6432 } 6433 6434 /* 6435 * In case of sched_debug() we verify the above assumption. 6436 */ 6437 if (!sched_debug()) 6438 break; 6439 } 6440 6441 if (!level) 6442 return; 6443 6444 /* 6445 * 'level' contains the number of unique distances, excluding the 6446 * identity distance node_distance(i,i). 6447 * 6448 * The sched_domains_numa_distance[] array includes the actual distance 6449 * numbers. 6450 */ 6451 6452 /* 6453 * Here, we should temporarily reset sched_domains_numa_levels to 0. 6454 * If it fails to allocate memory for array sched_domains_numa_masks[][], 6455 * the array will contain less then 'level' members. This could be 6456 * dangerous when we use it to iterate array sched_domains_numa_masks[][] 6457 * in other functions. 6458 * 6459 * We reset it to 'level' at the end of this function. 6460 */ 6461 sched_domains_numa_levels = 0; 6462 6463 sched_domains_numa_masks = kzalloc(sizeof(void *) * level, GFP_KERNEL); 6464 if (!sched_domains_numa_masks) 6465 return; 6466 6467 /* 6468 * Now for each level, construct a mask per node which contains all 6469 * cpus of nodes that are that many hops away from us. 6470 */ 6471 for (i = 0; i < level; i++) { 6472 sched_domains_numa_masks[i] = 6473 kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL); 6474 if (!sched_domains_numa_masks[i]) 6475 return; 6476 6477 for (j = 0; j < nr_node_ids; j++) { 6478 struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL); 6479 if (!mask) 6480 return; 6481 6482 sched_domains_numa_masks[i][j] = mask; 6483 6484 for (k = 0; k < nr_node_ids; k++) { 6485 if (node_distance(j, k) > sched_domains_numa_distance[i]) 6486 continue; 6487 6488 cpumask_or(mask, mask, cpumask_of_node(k)); 6489 } 6490 } 6491 } 6492 6493 /* Compute default topology size */ 6494 for (i = 0; sched_domain_topology[i].mask; i++); 6495 6496 tl = kzalloc((i + level + 1) * 6497 sizeof(struct sched_domain_topology_level), GFP_KERNEL); 6498 if (!tl) 6499 return; 6500 6501 /* 6502 * Copy the default topology bits.. 6503 */ 6504 for (i = 0; sched_domain_topology[i].mask; i++) 6505 tl[i] = sched_domain_topology[i]; 6506 6507 /* 6508 * .. and append 'j' levels of NUMA goodness. 6509 */ 6510 for (j = 0; j < level; i++, j++) { 6511 tl[i] = (struct sched_domain_topology_level){ 6512 .mask = sd_numa_mask, 6513 .sd_flags = cpu_numa_flags, 6514 .flags = SDTL_OVERLAP, 6515 .numa_level = j, 6516 SD_INIT_NAME(NUMA) 6517 }; 6518 } 6519 6520 sched_domain_topology = tl; 6521 6522 sched_domains_numa_levels = level; 6523 sched_max_numa_distance = sched_domains_numa_distance[level - 1]; 6524 6525 init_numa_topology_type(); 6526 } 6527 6528 static void sched_domains_numa_masks_set(int cpu) 6529 { 6530 int i, j; 6531 int node = cpu_to_node(cpu); 6532 6533 for (i = 0; i < sched_domains_numa_levels; i++) { 6534 for (j = 0; j < nr_node_ids; j++) { 6535 if (node_distance(j, node) <= sched_domains_numa_distance[i]) 6536 cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]); 6537 } 6538 } 6539 } 6540 6541 static void sched_domains_numa_masks_clear(int cpu) 6542 { 6543 int i, j; 6544 for (i = 0; i < sched_domains_numa_levels; i++) { 6545 for (j = 0; j < nr_node_ids; j++) 6546 cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]); 6547 } 6548 } 6549 6550 /* 6551 * Update sched_domains_numa_masks[level][node] array when new cpus 6552 * are onlined. 6553 */ 6554 static int sched_domains_numa_masks_update(struct notifier_block *nfb, 6555 unsigned long action, 6556 void *hcpu) 6557 { 6558 int cpu = (long)hcpu; 6559 6560 switch (action & ~CPU_TASKS_FROZEN) { 6561 case CPU_ONLINE: 6562 sched_domains_numa_masks_set(cpu); 6563 break; 6564 6565 case CPU_DEAD: 6566 sched_domains_numa_masks_clear(cpu); 6567 break; 6568 6569 default: 6570 return NOTIFY_DONE; 6571 } 6572 6573 return NOTIFY_OK; 6574 } 6575 #else 6576 static inline void sched_init_numa(void) 6577 { 6578 } 6579 6580 static int sched_domains_numa_masks_update(struct notifier_block *nfb, 6581 unsigned long action, 6582 void *hcpu) 6583 { 6584 return 0; 6585 } 6586 #endif /* CONFIG_NUMA */ 6587 6588 static int __sdt_alloc(const struct cpumask *cpu_map) 6589 { 6590 struct sched_domain_topology_level *tl; 6591 int j; 6592 6593 for_each_sd_topology(tl) { 6594 struct sd_data *sdd = &tl->data; 6595 6596 sdd->sd = alloc_percpu(struct sched_domain *); 6597 if (!sdd->sd) 6598 return -ENOMEM; 6599 6600 sdd->sg = alloc_percpu(struct sched_group *); 6601 if (!sdd->sg) 6602 return -ENOMEM; 6603 6604 sdd->sgc = alloc_percpu(struct sched_group_capacity *); 6605 if (!sdd->sgc) 6606 return -ENOMEM; 6607 6608 for_each_cpu(j, cpu_map) { 6609 struct sched_domain *sd; 6610 struct sched_group *sg; 6611 struct sched_group_capacity *sgc; 6612 6613 sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(), 6614 GFP_KERNEL, cpu_to_node(j)); 6615 if (!sd) 6616 return -ENOMEM; 6617 6618 *per_cpu_ptr(sdd->sd, j) = sd; 6619 6620 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), 6621 GFP_KERNEL, cpu_to_node(j)); 6622 if (!sg) 6623 return -ENOMEM; 6624 6625 sg->next = sg; 6626 6627 *per_cpu_ptr(sdd->sg, j) = sg; 6628 6629 sgc = kzalloc_node(sizeof(struct sched_group_capacity) + cpumask_size(), 6630 GFP_KERNEL, cpu_to_node(j)); 6631 if (!sgc) 6632 return -ENOMEM; 6633 6634 *per_cpu_ptr(sdd->sgc, j) = sgc; 6635 } 6636 } 6637 6638 return 0; 6639 } 6640 6641 static void __sdt_free(const struct cpumask *cpu_map) 6642 { 6643 struct sched_domain_topology_level *tl; 6644 int j; 6645 6646 for_each_sd_topology(tl) { 6647 struct sd_data *sdd = &tl->data; 6648 6649 for_each_cpu(j, cpu_map) { 6650 struct sched_domain *sd; 6651 6652 if (sdd->sd) { 6653 sd = *per_cpu_ptr(sdd->sd, j); 6654 if (sd && (sd->flags & SD_OVERLAP)) 6655 free_sched_groups(sd->groups, 0); 6656 kfree(*per_cpu_ptr(sdd->sd, j)); 6657 } 6658 6659 if (sdd->sg) 6660 kfree(*per_cpu_ptr(sdd->sg, j)); 6661 if (sdd->sgc) 6662 kfree(*per_cpu_ptr(sdd->sgc, j)); 6663 } 6664 free_percpu(sdd->sd); 6665 sdd->sd = NULL; 6666 free_percpu(sdd->sg); 6667 sdd->sg = NULL; 6668 free_percpu(sdd->sgc); 6669 sdd->sgc = NULL; 6670 } 6671 } 6672 6673 struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl, 6674 const struct cpumask *cpu_map, struct sched_domain_attr *attr, 6675 struct sched_domain *child, int cpu) 6676 { 6677 struct sched_domain *sd = sd_init(tl, cpu); 6678 if (!sd) 6679 return child; 6680 6681 cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu)); 6682 if (child) { 6683 sd->level = child->level + 1; 6684 sched_domain_level_max = max(sched_domain_level_max, sd->level); 6685 child->parent = sd; 6686 sd->child = child; 6687 6688 if (!cpumask_subset(sched_domain_span(child), 6689 sched_domain_span(sd))) { 6690 pr_err("BUG: arch topology borken\n"); 6691 #ifdef CONFIG_SCHED_DEBUG 6692 pr_err(" the %s domain not a subset of the %s domain\n", 6693 child->name, sd->name); 6694 #endif 6695 /* Fixup, ensure @sd has at least @child cpus. */ 6696 cpumask_or(sched_domain_span(sd), 6697 sched_domain_span(sd), 6698 sched_domain_span(child)); 6699 } 6700 6701 } 6702 set_domain_attribute(sd, attr); 6703 6704 return sd; 6705 } 6706 6707 /* 6708 * Build sched domains for a given set of cpus and attach the sched domains 6709 * to the individual cpus 6710 */ 6711 static int build_sched_domains(const struct cpumask *cpu_map, 6712 struct sched_domain_attr *attr) 6713 { 6714 enum s_alloc alloc_state; 6715 struct sched_domain *sd; 6716 struct s_data d; 6717 int i, ret = -ENOMEM; 6718 6719 alloc_state = __visit_domain_allocation_hell(&d, cpu_map); 6720 if (alloc_state != sa_rootdomain) 6721 goto error; 6722 6723 /* Set up domains for cpus specified by the cpu_map. */ 6724 for_each_cpu(i, cpu_map) { 6725 struct sched_domain_topology_level *tl; 6726 6727 sd = NULL; 6728 for_each_sd_topology(tl) { 6729 sd = build_sched_domain(tl, cpu_map, attr, sd, i); 6730 if (tl == sched_domain_topology) 6731 *per_cpu_ptr(d.sd, i) = sd; 6732 if (tl->flags & SDTL_OVERLAP || sched_feat(FORCE_SD_OVERLAP)) 6733 sd->flags |= SD_OVERLAP; 6734 if (cpumask_equal(cpu_map, sched_domain_span(sd))) 6735 break; 6736 } 6737 } 6738 6739 /* Build the groups for the domains */ 6740 for_each_cpu(i, cpu_map) { 6741 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { 6742 sd->span_weight = cpumask_weight(sched_domain_span(sd)); 6743 if (sd->flags & SD_OVERLAP) { 6744 if (build_overlap_sched_groups(sd, i)) 6745 goto error; 6746 } else { 6747 if (build_sched_groups(sd, i)) 6748 goto error; 6749 } 6750 } 6751 } 6752 6753 /* Calculate CPU capacity for physical packages and nodes */ 6754 for (i = nr_cpumask_bits-1; i >= 0; i--) { 6755 if (!cpumask_test_cpu(i, cpu_map)) 6756 continue; 6757 6758 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { 6759 claim_allocations(i, sd); 6760 init_sched_groups_capacity(i, sd); 6761 } 6762 } 6763 6764 /* Attach the domains */ 6765 rcu_read_lock(); 6766 for_each_cpu(i, cpu_map) { 6767 sd = *per_cpu_ptr(d.sd, i); 6768 cpu_attach_domain(sd, d.rd, i); 6769 } 6770 rcu_read_unlock(); 6771 6772 ret = 0; 6773 error: 6774 __free_domain_allocs(&d, alloc_state, cpu_map); 6775 return ret; 6776 } 6777 6778 static cpumask_var_t *doms_cur; /* current sched domains */ 6779 static int ndoms_cur; /* number of sched domains in 'doms_cur' */ 6780 static struct sched_domain_attr *dattr_cur; 6781 /* attribues of custom domains in 'doms_cur' */ 6782 6783 /* 6784 * Special case: If a kmalloc of a doms_cur partition (array of 6785 * cpumask) fails, then fallback to a single sched domain, 6786 * as determined by the single cpumask fallback_doms. 6787 */ 6788 static cpumask_var_t fallback_doms; 6789 6790 /* 6791 * arch_update_cpu_topology lets virtualized architectures update the 6792 * cpu core maps. It is supposed to return 1 if the topology changed 6793 * or 0 if it stayed the same. 6794 */ 6795 int __weak arch_update_cpu_topology(void) 6796 { 6797 return 0; 6798 } 6799 6800 cpumask_var_t *alloc_sched_domains(unsigned int ndoms) 6801 { 6802 int i; 6803 cpumask_var_t *doms; 6804 6805 doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL); 6806 if (!doms) 6807 return NULL; 6808 for (i = 0; i < ndoms; i++) { 6809 if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) { 6810 free_sched_domains(doms, i); 6811 return NULL; 6812 } 6813 } 6814 return doms; 6815 } 6816 6817 void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms) 6818 { 6819 unsigned int i; 6820 for (i = 0; i < ndoms; i++) 6821 free_cpumask_var(doms[i]); 6822 kfree(doms); 6823 } 6824 6825 /* 6826 * Set up scheduler domains and groups. Callers must hold the hotplug lock. 6827 * For now this just excludes isolated cpus, but could be used to 6828 * exclude other special cases in the future. 6829 */ 6830 static int init_sched_domains(const struct cpumask *cpu_map) 6831 { 6832 int err; 6833 6834 arch_update_cpu_topology(); 6835 ndoms_cur = 1; 6836 doms_cur = alloc_sched_domains(ndoms_cur); 6837 if (!doms_cur) 6838 doms_cur = &fallback_doms; 6839 cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map); 6840 err = build_sched_domains(doms_cur[0], NULL); 6841 register_sched_domain_sysctl(); 6842 6843 return err; 6844 } 6845 6846 /* 6847 * Detach sched domains from a group of cpus specified in cpu_map 6848 * These cpus will now be attached to the NULL domain 6849 */ 6850 static void detach_destroy_domains(const struct cpumask *cpu_map) 6851 { 6852 int i; 6853 6854 rcu_read_lock(); 6855 for_each_cpu(i, cpu_map) 6856 cpu_attach_domain(NULL, &def_root_domain, i); 6857 rcu_read_unlock(); 6858 } 6859 6860 /* handle null as "default" */ 6861 static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur, 6862 struct sched_domain_attr *new, int idx_new) 6863 { 6864 struct sched_domain_attr tmp; 6865 6866 /* fast path */ 6867 if (!new && !cur) 6868 return 1; 6869 6870 tmp = SD_ATTR_INIT; 6871 return !memcmp(cur ? (cur + idx_cur) : &tmp, 6872 new ? (new + idx_new) : &tmp, 6873 sizeof(struct sched_domain_attr)); 6874 } 6875 6876 /* 6877 * Partition sched domains as specified by the 'ndoms_new' 6878 * cpumasks in the array doms_new[] of cpumasks. This compares 6879 * doms_new[] to the current sched domain partitioning, doms_cur[]. 6880 * It destroys each deleted domain and builds each new domain. 6881 * 6882 * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'. 6883 * The masks don't intersect (don't overlap.) We should setup one 6884 * sched domain for each mask. CPUs not in any of the cpumasks will 6885 * not be load balanced. If the same cpumask appears both in the 6886 * current 'doms_cur' domains and in the new 'doms_new', we can leave 6887 * it as it is. 6888 * 6889 * The passed in 'doms_new' should be allocated using 6890 * alloc_sched_domains. This routine takes ownership of it and will 6891 * free_sched_domains it when done with it. If the caller failed the 6892 * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1, 6893 * and partition_sched_domains() will fallback to the single partition 6894 * 'fallback_doms', it also forces the domains to be rebuilt. 6895 * 6896 * If doms_new == NULL it will be replaced with cpu_online_mask. 6897 * ndoms_new == 0 is a special case for destroying existing domains, 6898 * and it will not create the default domain. 6899 * 6900 * Call with hotplug lock held 6901 */ 6902 void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], 6903 struct sched_domain_attr *dattr_new) 6904 { 6905 int i, j, n; 6906 int new_topology; 6907 6908 mutex_lock(&sched_domains_mutex); 6909 6910 /* always unregister in case we don't destroy any domains */ 6911 unregister_sched_domain_sysctl(); 6912 6913 /* Let architecture update cpu core mappings. */ 6914 new_topology = arch_update_cpu_topology(); 6915 6916 n = doms_new ? ndoms_new : 0; 6917 6918 /* Destroy deleted domains */ 6919 for (i = 0; i < ndoms_cur; i++) { 6920 for (j = 0; j < n && !new_topology; j++) { 6921 if (cpumask_equal(doms_cur[i], doms_new[j]) 6922 && dattrs_equal(dattr_cur, i, dattr_new, j)) 6923 goto match1; 6924 } 6925 /* no match - a current sched domain not in new doms_new[] */ 6926 detach_destroy_domains(doms_cur[i]); 6927 match1: 6928 ; 6929 } 6930 6931 n = ndoms_cur; 6932 if (doms_new == NULL) { 6933 n = 0; 6934 doms_new = &fallback_doms; 6935 cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map); 6936 WARN_ON_ONCE(dattr_new); 6937 } 6938 6939 /* Build new domains */ 6940 for (i = 0; i < ndoms_new; i++) { 6941 for (j = 0; j < n && !new_topology; j++) { 6942 if (cpumask_equal(doms_new[i], doms_cur[j]) 6943 && dattrs_equal(dattr_new, i, dattr_cur, j)) 6944 goto match2; 6945 } 6946 /* no match - add a new doms_new */ 6947 build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL); 6948 match2: 6949 ; 6950 } 6951 6952 /* Remember the new sched domains */ 6953 if (doms_cur != &fallback_doms) 6954 free_sched_domains(doms_cur, ndoms_cur); 6955 kfree(dattr_cur); /* kfree(NULL) is safe */ 6956 doms_cur = doms_new; 6957 dattr_cur = dattr_new; 6958 ndoms_cur = ndoms_new; 6959 6960 register_sched_domain_sysctl(); 6961 6962 mutex_unlock(&sched_domains_mutex); 6963 } 6964 6965 static int num_cpus_frozen; /* used to mark begin/end of suspend/resume */ 6966 6967 /* 6968 * Update cpusets according to cpu_active mask. If cpusets are 6969 * disabled, cpuset_update_active_cpus() becomes a simple wrapper 6970 * around partition_sched_domains(). 6971 * 6972 * If we come here as part of a suspend/resume, don't touch cpusets because we 6973 * want to restore it back to its original state upon resume anyway. 6974 */ 6975 static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action, 6976 void *hcpu) 6977 { 6978 switch (action) { 6979 case CPU_ONLINE_FROZEN: 6980 case CPU_DOWN_FAILED_FROZEN: 6981 6982 /* 6983 * num_cpus_frozen tracks how many CPUs are involved in suspend 6984 * resume sequence. As long as this is not the last online 6985 * operation in the resume sequence, just build a single sched 6986 * domain, ignoring cpusets. 6987 */ 6988 num_cpus_frozen--; 6989 if (likely(num_cpus_frozen)) { 6990 partition_sched_domains(1, NULL, NULL); 6991 break; 6992 } 6993 6994 /* 6995 * This is the last CPU online operation. So fall through and 6996 * restore the original sched domains by considering the 6997 * cpuset configurations. 6998 */ 6999 7000 case CPU_ONLINE: 7001 cpuset_update_active_cpus(true); 7002 break; 7003 default: 7004 return NOTIFY_DONE; 7005 } 7006 return NOTIFY_OK; 7007 } 7008 7009 static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action, 7010 void *hcpu) 7011 { 7012 unsigned long flags; 7013 long cpu = (long)hcpu; 7014 struct dl_bw *dl_b; 7015 7016 switch (action & ~CPU_TASKS_FROZEN) { 7017 case CPU_DOWN_PREPARE: 7018 /* explicitly allow suspend */ 7019 if (!(action & CPU_TASKS_FROZEN)) { 7020 bool overflow; 7021 int cpus; 7022 7023 rcu_read_lock_sched(); 7024 dl_b = dl_bw_of(cpu); 7025 7026 raw_spin_lock_irqsave(&dl_b->lock, flags); 7027 cpus = dl_bw_cpus(cpu); 7028 overflow = __dl_overflow(dl_b, cpus, 0, 0); 7029 raw_spin_unlock_irqrestore(&dl_b->lock, flags); 7030 7031 rcu_read_unlock_sched(); 7032 7033 if (overflow) 7034 return notifier_from_errno(-EBUSY); 7035 } 7036 cpuset_update_active_cpus(false); 7037 break; 7038 case CPU_DOWN_PREPARE_FROZEN: 7039 num_cpus_frozen++; 7040 partition_sched_domains(1, NULL, NULL); 7041 break; 7042 default: 7043 return NOTIFY_DONE; 7044 } 7045 return NOTIFY_OK; 7046 } 7047 7048 void __init sched_init_smp(void) 7049 { 7050 cpumask_var_t non_isolated_cpus; 7051 7052 alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL); 7053 alloc_cpumask_var(&fallback_doms, GFP_KERNEL); 7054 7055 sched_init_numa(); 7056 7057 /* 7058 * There's no userspace yet to cause hotplug operations; hence all the 7059 * cpu masks are stable and all blatant races in the below code cannot 7060 * happen. 7061 */ 7062 mutex_lock(&sched_domains_mutex); 7063 init_sched_domains(cpu_active_mask); 7064 cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map); 7065 if (cpumask_empty(non_isolated_cpus)) 7066 cpumask_set_cpu(smp_processor_id(), non_isolated_cpus); 7067 mutex_unlock(&sched_domains_mutex); 7068 7069 hotcpu_notifier(sched_domains_numa_masks_update, CPU_PRI_SCHED_ACTIVE); 7070 hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE); 7071 hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE); 7072 7073 init_hrtick(); 7074 7075 /* Move init over to a non-isolated CPU */ 7076 if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0) 7077 BUG(); 7078 sched_init_granularity(); 7079 free_cpumask_var(non_isolated_cpus); 7080 7081 init_sched_rt_class(); 7082 init_sched_dl_class(); 7083 } 7084 #else 7085 void __init sched_init_smp(void) 7086 { 7087 sched_init_granularity(); 7088 } 7089 #endif /* CONFIG_SMP */ 7090 7091 const_debug unsigned int sysctl_timer_migration = 1; 7092 7093 int in_sched_functions(unsigned long addr) 7094 { 7095 return in_lock_functions(addr) || 7096 (addr >= (unsigned long)__sched_text_start 7097 && addr < (unsigned long)__sched_text_end); 7098 } 7099 7100 #ifdef CONFIG_CGROUP_SCHED 7101 /* 7102 * Default task group. 7103 * Every task in system belongs to this group at bootup. 7104 */ 7105 struct task_group root_task_group; 7106 LIST_HEAD(task_groups); 7107 #endif 7108 7109 DECLARE_PER_CPU(cpumask_var_t, load_balance_mask); 7110 7111 void __init sched_init(void) 7112 { 7113 int i, j; 7114 unsigned long alloc_size = 0, ptr; 7115 7116 #ifdef CONFIG_FAIR_GROUP_SCHED 7117 alloc_size += 2 * nr_cpu_ids * sizeof(void **); 7118 #endif 7119 #ifdef CONFIG_RT_GROUP_SCHED 7120 alloc_size += 2 * nr_cpu_ids * sizeof(void **); 7121 #endif 7122 if (alloc_size) { 7123 ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT); 7124 7125 #ifdef CONFIG_FAIR_GROUP_SCHED 7126 root_task_group.se = (struct sched_entity **)ptr; 7127 ptr += nr_cpu_ids * sizeof(void **); 7128 7129 root_task_group.cfs_rq = (struct cfs_rq **)ptr; 7130 ptr += nr_cpu_ids * sizeof(void **); 7131 7132 #endif /* CONFIG_FAIR_GROUP_SCHED */ 7133 #ifdef CONFIG_RT_GROUP_SCHED 7134 root_task_group.rt_se = (struct sched_rt_entity **)ptr; 7135 ptr += nr_cpu_ids * sizeof(void **); 7136 7137 root_task_group.rt_rq = (struct rt_rq **)ptr; 7138 ptr += nr_cpu_ids * sizeof(void **); 7139 7140 #endif /* CONFIG_RT_GROUP_SCHED */ 7141 } 7142 #ifdef CONFIG_CPUMASK_OFFSTACK 7143 for_each_possible_cpu(i) { 7144 per_cpu(load_balance_mask, i) = (cpumask_var_t)kzalloc_node( 7145 cpumask_size(), GFP_KERNEL, cpu_to_node(i)); 7146 } 7147 #endif /* CONFIG_CPUMASK_OFFSTACK */ 7148 7149 init_rt_bandwidth(&def_rt_bandwidth, 7150 global_rt_period(), global_rt_runtime()); 7151 init_dl_bandwidth(&def_dl_bandwidth, 7152 global_rt_period(), global_rt_runtime()); 7153 7154 #ifdef CONFIG_SMP 7155 init_defrootdomain(); 7156 #endif 7157 7158 #ifdef CONFIG_RT_GROUP_SCHED 7159 init_rt_bandwidth(&root_task_group.rt_bandwidth, 7160 global_rt_period(), global_rt_runtime()); 7161 #endif /* CONFIG_RT_GROUP_SCHED */ 7162 7163 #ifdef CONFIG_CGROUP_SCHED 7164 list_add(&root_task_group.list, &task_groups); 7165 INIT_LIST_HEAD(&root_task_group.children); 7166 INIT_LIST_HEAD(&root_task_group.siblings); 7167 autogroup_init(&init_task); 7168 7169 #endif /* CONFIG_CGROUP_SCHED */ 7170 7171 for_each_possible_cpu(i) { 7172 struct rq *rq; 7173 7174 rq = cpu_rq(i); 7175 raw_spin_lock_init(&rq->lock); 7176 rq->nr_running = 0; 7177 rq->calc_load_active = 0; 7178 rq->calc_load_update = jiffies + LOAD_FREQ; 7179 init_cfs_rq(&rq->cfs); 7180 init_rt_rq(&rq->rt); 7181 init_dl_rq(&rq->dl); 7182 #ifdef CONFIG_FAIR_GROUP_SCHED 7183 root_task_group.shares = ROOT_TASK_GROUP_LOAD; 7184 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list); 7185 /* 7186 * How much cpu bandwidth does root_task_group get? 7187 * 7188 * In case of task-groups formed thr' the cgroup filesystem, it 7189 * gets 100% of the cpu resources in the system. This overall 7190 * system cpu resource is divided among the tasks of 7191 * root_task_group and its child task-groups in a fair manner, 7192 * based on each entity's (task or task-group's) weight 7193 * (se->load.weight). 7194 * 7195 * In other words, if root_task_group has 10 tasks of weight 7196 * 1024) and two child groups A0 and A1 (of weight 1024 each), 7197 * then A0's share of the cpu resource is: 7198 * 7199 * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33% 7200 * 7201 * We achieve this by letting root_task_group's tasks sit 7202 * directly in rq->cfs (i.e root_task_group->se[] = NULL). 7203 */ 7204 init_cfs_bandwidth(&root_task_group.cfs_bandwidth); 7205 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL); 7206 #endif /* CONFIG_FAIR_GROUP_SCHED */ 7207 7208 rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime; 7209 #ifdef CONFIG_RT_GROUP_SCHED 7210 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL); 7211 #endif 7212 7213 for (j = 0; j < CPU_LOAD_IDX_MAX; j++) 7214 rq->cpu_load[j] = 0; 7215 7216 rq->last_load_update_tick = jiffies; 7217 7218 #ifdef CONFIG_SMP 7219 rq->sd = NULL; 7220 rq->rd = NULL; 7221 rq->cpu_capacity = rq->cpu_capacity_orig = SCHED_CAPACITY_SCALE; 7222 rq->post_schedule = 0; 7223 rq->active_balance = 0; 7224 rq->next_balance = jiffies; 7225 rq->push_cpu = 0; 7226 rq->cpu = i; 7227 rq->online = 0; 7228 rq->idle_stamp = 0; 7229 rq->avg_idle = 2*sysctl_sched_migration_cost; 7230 rq->max_idle_balance_cost = sysctl_sched_migration_cost; 7231 7232 INIT_LIST_HEAD(&rq->cfs_tasks); 7233 7234 rq_attach_root(rq, &def_root_domain); 7235 #ifdef CONFIG_NO_HZ_COMMON 7236 rq->nohz_flags = 0; 7237 #endif 7238 #ifdef CONFIG_NO_HZ_FULL 7239 rq->last_sched_tick = 0; 7240 #endif 7241 #endif 7242 init_rq_hrtick(rq); 7243 atomic_set(&rq->nr_iowait, 0); 7244 } 7245 7246 set_load_weight(&init_task); 7247 7248 #ifdef CONFIG_PREEMPT_NOTIFIERS 7249 INIT_HLIST_HEAD(&init_task.preempt_notifiers); 7250 #endif 7251 7252 /* 7253 * The boot idle thread does lazy MMU switching as well: 7254 */ 7255 atomic_inc(&init_mm.mm_count); 7256 enter_lazy_tlb(&init_mm, current); 7257 7258 /* 7259 * During early bootup we pretend to be a normal task: 7260 */ 7261 current->sched_class = &fair_sched_class; 7262 7263 /* 7264 * Make us the idle thread. Technically, schedule() should not be 7265 * called from this thread, however somewhere below it might be, 7266 * but because we are the idle thread, we just pick up running again 7267 * when this runqueue becomes "idle". 7268 */ 7269 init_idle(current, smp_processor_id()); 7270 7271 calc_load_update = jiffies + LOAD_FREQ; 7272 7273 #ifdef CONFIG_SMP 7274 zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT); 7275 /* May be allocated at isolcpus cmdline parse time */ 7276 if (cpu_isolated_map == NULL) 7277 zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT); 7278 idle_thread_set_boot_cpu(); 7279 set_cpu_rq_start_time(); 7280 #endif 7281 init_sched_fair_class(); 7282 7283 scheduler_running = 1; 7284 } 7285 7286 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP 7287 static inline int preempt_count_equals(int preempt_offset) 7288 { 7289 int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth(); 7290 7291 return (nested == preempt_offset); 7292 } 7293 7294 void __might_sleep(const char *file, int line, int preempt_offset) 7295 { 7296 /* 7297 * Blocking primitives will set (and therefore destroy) current->state, 7298 * since we will exit with TASK_RUNNING make sure we enter with it, 7299 * otherwise we will destroy state. 7300 */ 7301 WARN_ONCE(current->state != TASK_RUNNING && current->task_state_change, 7302 "do not call blocking ops when !TASK_RUNNING; " 7303 "state=%lx set at [<%p>] %pS\n", 7304 current->state, 7305 (void *)current->task_state_change, 7306 (void *)current->task_state_change); 7307 7308 ___might_sleep(file, line, preempt_offset); 7309 } 7310 EXPORT_SYMBOL(__might_sleep); 7311 7312 void ___might_sleep(const char *file, int line, int preempt_offset) 7313 { 7314 static unsigned long prev_jiffy; /* ratelimiting */ 7315 7316 rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */ 7317 if ((preempt_count_equals(preempt_offset) && !irqs_disabled() && 7318 !is_idle_task(current)) || 7319 system_state != SYSTEM_RUNNING || oops_in_progress) 7320 return; 7321 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) 7322 return; 7323 prev_jiffy = jiffies; 7324 7325 printk(KERN_ERR 7326 "BUG: sleeping function called from invalid context at %s:%d\n", 7327 file, line); 7328 printk(KERN_ERR 7329 "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n", 7330 in_atomic(), irqs_disabled(), 7331 current->pid, current->comm); 7332 7333 if (task_stack_end_corrupted(current)) 7334 printk(KERN_EMERG "Thread overran stack, or stack corrupted\n"); 7335 7336 debug_show_held_locks(current); 7337 if (irqs_disabled()) 7338 print_irqtrace_events(current); 7339 #ifdef CONFIG_DEBUG_PREEMPT 7340 if (!preempt_count_equals(preempt_offset)) { 7341 pr_err("Preemption disabled at:"); 7342 print_ip_sym(current->preempt_disable_ip); 7343 pr_cont("\n"); 7344 } 7345 #endif 7346 dump_stack(); 7347 } 7348 EXPORT_SYMBOL(___might_sleep); 7349 #endif 7350 7351 #ifdef CONFIG_MAGIC_SYSRQ 7352 static void normalize_task(struct rq *rq, struct task_struct *p) 7353 { 7354 const struct sched_class *prev_class = p->sched_class; 7355 struct sched_attr attr = { 7356 .sched_policy = SCHED_NORMAL, 7357 }; 7358 int old_prio = p->prio; 7359 int queued; 7360 7361 queued = task_on_rq_queued(p); 7362 if (queued) 7363 dequeue_task(rq, p, 0); 7364 __setscheduler(rq, p, &attr); 7365 if (queued) { 7366 enqueue_task(rq, p, 0); 7367 resched_curr(rq); 7368 } 7369 7370 check_class_changed(rq, p, prev_class, old_prio); 7371 } 7372 7373 void normalize_rt_tasks(void) 7374 { 7375 struct task_struct *g, *p; 7376 unsigned long flags; 7377 struct rq *rq; 7378 7379 read_lock(&tasklist_lock); 7380 for_each_process_thread(g, p) { 7381 /* 7382 * Only normalize user tasks: 7383 */ 7384 if (p->flags & PF_KTHREAD) 7385 continue; 7386 7387 p->se.exec_start = 0; 7388 #ifdef CONFIG_SCHEDSTATS 7389 p->se.statistics.wait_start = 0; 7390 p->se.statistics.sleep_start = 0; 7391 p->se.statistics.block_start = 0; 7392 #endif 7393 7394 if (!dl_task(p) && !rt_task(p)) { 7395 /* 7396 * Renice negative nice level userspace 7397 * tasks back to 0: 7398 */ 7399 if (task_nice(p) < 0) 7400 set_user_nice(p, 0); 7401 continue; 7402 } 7403 7404 rq = task_rq_lock(p, &flags); 7405 normalize_task(rq, p); 7406 task_rq_unlock(rq, p, &flags); 7407 } 7408 read_unlock(&tasklist_lock); 7409 } 7410 7411 #endif /* CONFIG_MAGIC_SYSRQ */ 7412 7413 #if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) 7414 /* 7415 * These functions are only useful for the IA64 MCA handling, or kdb. 7416 * 7417 * They can only be called when the whole system has been 7418 * stopped - every CPU needs to be quiescent, and no scheduling 7419 * activity can take place. Using them for anything else would 7420 * be a serious bug, and as a result, they aren't even visible 7421 * under any other configuration. 7422 */ 7423 7424 /** 7425 * curr_task - return the current task for a given cpu. 7426 * @cpu: the processor in question. 7427 * 7428 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! 7429 * 7430 * Return: The current task for @cpu. 7431 */ 7432 struct task_struct *curr_task(int cpu) 7433 { 7434 return cpu_curr(cpu); 7435 } 7436 7437 #endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */ 7438 7439 #ifdef CONFIG_IA64 7440 /** 7441 * set_curr_task - set the current task for a given cpu. 7442 * @cpu: the processor in question. 7443 * @p: the task pointer to set. 7444 * 7445 * Description: This function must only be used when non-maskable interrupts 7446 * are serviced on a separate stack. It allows the architecture to switch the 7447 * notion of the current task on a cpu in a non-blocking manner. This function 7448 * must be called with all CPU's synchronized, and interrupts disabled, the 7449 * and caller must save the original value of the current task (see 7450 * curr_task() above) and restore that value before reenabling interrupts and 7451 * re-starting the system. 7452 * 7453 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! 7454 */ 7455 void set_curr_task(int cpu, struct task_struct *p) 7456 { 7457 cpu_curr(cpu) = p; 7458 } 7459 7460 #endif 7461 7462 #ifdef CONFIG_CGROUP_SCHED 7463 /* task_group_lock serializes the addition/removal of task groups */ 7464 static DEFINE_SPINLOCK(task_group_lock); 7465 7466 static void free_sched_group(struct task_group *tg) 7467 { 7468 free_fair_sched_group(tg); 7469 free_rt_sched_group(tg); 7470 autogroup_free(tg); 7471 kfree(tg); 7472 } 7473 7474 /* allocate runqueue etc for a new task group */ 7475 struct task_group *sched_create_group(struct task_group *parent) 7476 { 7477 struct task_group *tg; 7478 7479 tg = kzalloc(sizeof(*tg), GFP_KERNEL); 7480 if (!tg) 7481 return ERR_PTR(-ENOMEM); 7482 7483 if (!alloc_fair_sched_group(tg, parent)) 7484 goto err; 7485 7486 if (!alloc_rt_sched_group(tg, parent)) 7487 goto err; 7488 7489 return tg; 7490 7491 err: 7492 free_sched_group(tg); 7493 return ERR_PTR(-ENOMEM); 7494 } 7495 7496 void sched_online_group(struct task_group *tg, struct task_group *parent) 7497 { 7498 unsigned long flags; 7499 7500 spin_lock_irqsave(&task_group_lock, flags); 7501 list_add_rcu(&tg->list, &task_groups); 7502 7503 WARN_ON(!parent); /* root should already exist */ 7504 7505 tg->parent = parent; 7506 INIT_LIST_HEAD(&tg->children); 7507 list_add_rcu(&tg->siblings, &parent->children); 7508 spin_unlock_irqrestore(&task_group_lock, flags); 7509 } 7510 7511 /* rcu callback to free various structures associated with a task group */ 7512 static void free_sched_group_rcu(struct rcu_head *rhp) 7513 { 7514 /* now it should be safe to free those cfs_rqs */ 7515 free_sched_group(container_of(rhp, struct task_group, rcu)); 7516 } 7517 7518 /* Destroy runqueue etc associated with a task group */ 7519 void sched_destroy_group(struct task_group *tg) 7520 { 7521 /* wait for possible concurrent references to cfs_rqs complete */ 7522 call_rcu(&tg->rcu, free_sched_group_rcu); 7523 } 7524 7525 void sched_offline_group(struct task_group *tg) 7526 { 7527 unsigned long flags; 7528 int i; 7529 7530 /* end participation in shares distribution */ 7531 for_each_possible_cpu(i) 7532 unregister_fair_sched_group(tg, i); 7533 7534 spin_lock_irqsave(&task_group_lock, flags); 7535 list_del_rcu(&tg->list); 7536 list_del_rcu(&tg->siblings); 7537 spin_unlock_irqrestore(&task_group_lock, flags); 7538 } 7539 7540 /* change task's runqueue when it moves between groups. 7541 * The caller of this function should have put the task in its new group 7542 * by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to 7543 * reflect its new group. 7544 */ 7545 void sched_move_task(struct task_struct *tsk) 7546 { 7547 struct task_group *tg; 7548 int queued, running; 7549 unsigned long flags; 7550 struct rq *rq; 7551 7552 rq = task_rq_lock(tsk, &flags); 7553 7554 running = task_current(rq, tsk); 7555 queued = task_on_rq_queued(tsk); 7556 7557 if (queued) 7558 dequeue_task(rq, tsk, 0); 7559 if (unlikely(running)) 7560 put_prev_task(rq, tsk); 7561 7562 /* 7563 * All callers are synchronized by task_rq_lock(); we do not use RCU 7564 * which is pointless here. Thus, we pass "true" to task_css_check() 7565 * to prevent lockdep warnings. 7566 */ 7567 tg = container_of(task_css_check(tsk, cpu_cgrp_id, true), 7568 struct task_group, css); 7569 tg = autogroup_task_group(tsk, tg); 7570 tsk->sched_task_group = tg; 7571 7572 #ifdef CONFIG_FAIR_GROUP_SCHED 7573 if (tsk->sched_class->task_move_group) 7574 tsk->sched_class->task_move_group(tsk, queued); 7575 else 7576 #endif 7577 set_task_rq(tsk, task_cpu(tsk)); 7578 7579 if (unlikely(running)) 7580 tsk->sched_class->set_curr_task(rq); 7581 if (queued) 7582 enqueue_task(rq, tsk, 0); 7583 7584 task_rq_unlock(rq, tsk, &flags); 7585 } 7586 #endif /* CONFIG_CGROUP_SCHED */ 7587 7588 #ifdef CONFIG_RT_GROUP_SCHED 7589 /* 7590 * Ensure that the real time constraints are schedulable. 7591 */ 7592 static DEFINE_MUTEX(rt_constraints_mutex); 7593 7594 /* Must be called with tasklist_lock held */ 7595 static inline int tg_has_rt_tasks(struct task_group *tg) 7596 { 7597 struct task_struct *g, *p; 7598 7599 /* 7600 * Autogroups do not have RT tasks; see autogroup_create(). 7601 */ 7602 if (task_group_is_autogroup(tg)) 7603 return 0; 7604 7605 for_each_process_thread(g, p) { 7606 if (rt_task(p) && task_group(p) == tg) 7607 return 1; 7608 } 7609 7610 return 0; 7611 } 7612 7613 struct rt_schedulable_data { 7614 struct task_group *tg; 7615 u64 rt_period; 7616 u64 rt_runtime; 7617 }; 7618 7619 static int tg_rt_schedulable(struct task_group *tg, void *data) 7620 { 7621 struct rt_schedulable_data *d = data; 7622 struct task_group *child; 7623 unsigned long total, sum = 0; 7624 u64 period, runtime; 7625 7626 period = ktime_to_ns(tg->rt_bandwidth.rt_period); 7627 runtime = tg->rt_bandwidth.rt_runtime; 7628 7629 if (tg == d->tg) { 7630 period = d->rt_period; 7631 runtime = d->rt_runtime; 7632 } 7633 7634 /* 7635 * Cannot have more runtime than the period. 7636 */ 7637 if (runtime > period && runtime != RUNTIME_INF) 7638 return -EINVAL; 7639 7640 /* 7641 * Ensure we don't starve existing RT tasks. 7642 */ 7643 if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg)) 7644 return -EBUSY; 7645 7646 total = to_ratio(period, runtime); 7647 7648 /* 7649 * Nobody can have more than the global setting allows. 7650 */ 7651 if (total > to_ratio(global_rt_period(), global_rt_runtime())) 7652 return -EINVAL; 7653 7654 /* 7655 * The sum of our children's runtime should not exceed our own. 7656 */ 7657 list_for_each_entry_rcu(child, &tg->children, siblings) { 7658 period = ktime_to_ns(child->rt_bandwidth.rt_period); 7659 runtime = child->rt_bandwidth.rt_runtime; 7660 7661 if (child == d->tg) { 7662 period = d->rt_period; 7663 runtime = d->rt_runtime; 7664 } 7665 7666 sum += to_ratio(period, runtime); 7667 } 7668 7669 if (sum > total) 7670 return -EINVAL; 7671 7672 return 0; 7673 } 7674 7675 static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) 7676 { 7677 int ret; 7678 7679 struct rt_schedulable_data data = { 7680 .tg = tg, 7681 .rt_period = period, 7682 .rt_runtime = runtime, 7683 }; 7684 7685 rcu_read_lock(); 7686 ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data); 7687 rcu_read_unlock(); 7688 7689 return ret; 7690 } 7691 7692 static int tg_set_rt_bandwidth(struct task_group *tg, 7693 u64 rt_period, u64 rt_runtime) 7694 { 7695 int i, err = 0; 7696 7697 /* 7698 * Disallowing the root group RT runtime is BAD, it would disallow the 7699 * kernel creating (and or operating) RT threads. 7700 */ 7701 if (tg == &root_task_group && rt_runtime == 0) 7702 return -EINVAL; 7703 7704 /* No period doesn't make any sense. */ 7705 if (rt_period == 0) 7706 return -EINVAL; 7707 7708 mutex_lock(&rt_constraints_mutex); 7709 read_lock(&tasklist_lock); 7710 err = __rt_schedulable(tg, rt_period, rt_runtime); 7711 if (err) 7712 goto unlock; 7713 7714 raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock); 7715 tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period); 7716 tg->rt_bandwidth.rt_runtime = rt_runtime; 7717 7718 for_each_possible_cpu(i) { 7719 struct rt_rq *rt_rq = tg->rt_rq[i]; 7720 7721 raw_spin_lock(&rt_rq->rt_runtime_lock); 7722 rt_rq->rt_runtime = rt_runtime; 7723 raw_spin_unlock(&rt_rq->rt_runtime_lock); 7724 } 7725 raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock); 7726 unlock: 7727 read_unlock(&tasklist_lock); 7728 mutex_unlock(&rt_constraints_mutex); 7729 7730 return err; 7731 } 7732 7733 static int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us) 7734 { 7735 u64 rt_runtime, rt_period; 7736 7737 rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period); 7738 rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC; 7739 if (rt_runtime_us < 0) 7740 rt_runtime = RUNTIME_INF; 7741 7742 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime); 7743 } 7744 7745 static long sched_group_rt_runtime(struct task_group *tg) 7746 { 7747 u64 rt_runtime_us; 7748 7749 if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF) 7750 return -1; 7751 7752 rt_runtime_us = tg->rt_bandwidth.rt_runtime; 7753 do_div(rt_runtime_us, NSEC_PER_USEC); 7754 return rt_runtime_us; 7755 } 7756 7757 static int sched_group_set_rt_period(struct task_group *tg, long rt_period_us) 7758 { 7759 u64 rt_runtime, rt_period; 7760 7761 rt_period = (u64)rt_period_us * NSEC_PER_USEC; 7762 rt_runtime = tg->rt_bandwidth.rt_runtime; 7763 7764 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime); 7765 } 7766 7767 static long sched_group_rt_period(struct task_group *tg) 7768 { 7769 u64 rt_period_us; 7770 7771 rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period); 7772 do_div(rt_period_us, NSEC_PER_USEC); 7773 return rt_period_us; 7774 } 7775 #endif /* CONFIG_RT_GROUP_SCHED */ 7776 7777 #ifdef CONFIG_RT_GROUP_SCHED 7778 static int sched_rt_global_constraints(void) 7779 { 7780 int ret = 0; 7781 7782 mutex_lock(&rt_constraints_mutex); 7783 read_lock(&tasklist_lock); 7784 ret = __rt_schedulable(NULL, 0, 0); 7785 read_unlock(&tasklist_lock); 7786 mutex_unlock(&rt_constraints_mutex); 7787 7788 return ret; 7789 } 7790 7791 static int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk) 7792 { 7793 /* Don't accept realtime tasks when there is no way for them to run */ 7794 if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0) 7795 return 0; 7796 7797 return 1; 7798 } 7799 7800 #else /* !CONFIG_RT_GROUP_SCHED */ 7801 static int sched_rt_global_constraints(void) 7802 { 7803 unsigned long flags; 7804 int i, ret = 0; 7805 7806 raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags); 7807 for_each_possible_cpu(i) { 7808 struct rt_rq *rt_rq = &cpu_rq(i)->rt; 7809 7810 raw_spin_lock(&rt_rq->rt_runtime_lock); 7811 rt_rq->rt_runtime = global_rt_runtime(); 7812 raw_spin_unlock(&rt_rq->rt_runtime_lock); 7813 } 7814 raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags); 7815 7816 return ret; 7817 } 7818 #endif /* CONFIG_RT_GROUP_SCHED */ 7819 7820 static int sched_dl_global_validate(void) 7821 { 7822 u64 runtime = global_rt_runtime(); 7823 u64 period = global_rt_period(); 7824 u64 new_bw = to_ratio(period, runtime); 7825 struct dl_bw *dl_b; 7826 int cpu, ret = 0; 7827 unsigned long flags; 7828 7829 /* 7830 * Here we want to check the bandwidth not being set to some 7831 * value smaller than the currently allocated bandwidth in 7832 * any of the root_domains. 7833 * 7834 * FIXME: Cycling on all the CPUs is overdoing, but simpler than 7835 * cycling on root_domains... Discussion on different/better 7836 * solutions is welcome! 7837 */ 7838 for_each_possible_cpu(cpu) { 7839 rcu_read_lock_sched(); 7840 dl_b = dl_bw_of(cpu); 7841 7842 raw_spin_lock_irqsave(&dl_b->lock, flags); 7843 if (new_bw < dl_b->total_bw) 7844 ret = -EBUSY; 7845 raw_spin_unlock_irqrestore(&dl_b->lock, flags); 7846 7847 rcu_read_unlock_sched(); 7848 7849 if (ret) 7850 break; 7851 } 7852 7853 return ret; 7854 } 7855 7856 static void sched_dl_do_global(void) 7857 { 7858 u64 new_bw = -1; 7859 struct dl_bw *dl_b; 7860 int cpu; 7861 unsigned long flags; 7862 7863 def_dl_bandwidth.dl_period = global_rt_period(); 7864 def_dl_bandwidth.dl_runtime = global_rt_runtime(); 7865 7866 if (global_rt_runtime() != RUNTIME_INF) 7867 new_bw = to_ratio(global_rt_period(), global_rt_runtime()); 7868 7869 /* 7870 * FIXME: As above... 7871 */ 7872 for_each_possible_cpu(cpu) { 7873 rcu_read_lock_sched(); 7874 dl_b = dl_bw_of(cpu); 7875 7876 raw_spin_lock_irqsave(&dl_b->lock, flags); 7877 dl_b->bw = new_bw; 7878 raw_spin_unlock_irqrestore(&dl_b->lock, flags); 7879 7880 rcu_read_unlock_sched(); 7881 } 7882 } 7883 7884 static int sched_rt_global_validate(void) 7885 { 7886 if (sysctl_sched_rt_period <= 0) 7887 return -EINVAL; 7888 7889 if ((sysctl_sched_rt_runtime != RUNTIME_INF) && 7890 (sysctl_sched_rt_runtime > sysctl_sched_rt_period)) 7891 return -EINVAL; 7892 7893 return 0; 7894 } 7895 7896 static void sched_rt_do_global(void) 7897 { 7898 def_rt_bandwidth.rt_runtime = global_rt_runtime(); 7899 def_rt_bandwidth.rt_period = ns_to_ktime(global_rt_period()); 7900 } 7901 7902 int sched_rt_handler(struct ctl_table *table, int write, 7903 void __user *buffer, size_t *lenp, 7904 loff_t *ppos) 7905 { 7906 int old_period, old_runtime; 7907 static DEFINE_MUTEX(mutex); 7908 int ret; 7909 7910 mutex_lock(&mutex); 7911 old_period = sysctl_sched_rt_period; 7912 old_runtime = sysctl_sched_rt_runtime; 7913 7914 ret = proc_dointvec(table, write, buffer, lenp, ppos); 7915 7916 if (!ret && write) { 7917 ret = sched_rt_global_validate(); 7918 if (ret) 7919 goto undo; 7920 7921 ret = sched_dl_global_validate(); 7922 if (ret) 7923 goto undo; 7924 7925 ret = sched_rt_global_constraints(); 7926 if (ret) 7927 goto undo; 7928 7929 sched_rt_do_global(); 7930 sched_dl_do_global(); 7931 } 7932 if (0) { 7933 undo: 7934 sysctl_sched_rt_period = old_period; 7935 sysctl_sched_rt_runtime = old_runtime; 7936 } 7937 mutex_unlock(&mutex); 7938 7939 return ret; 7940 } 7941 7942 int sched_rr_handler(struct ctl_table *table, int write, 7943 void __user *buffer, size_t *lenp, 7944 loff_t *ppos) 7945 { 7946 int ret; 7947 static DEFINE_MUTEX(mutex); 7948 7949 mutex_lock(&mutex); 7950 ret = proc_dointvec(table, write, buffer, lenp, ppos); 7951 /* make sure that internally we keep jiffies */ 7952 /* also, writing zero resets timeslice to default */ 7953 if (!ret && write) { 7954 sched_rr_timeslice = sched_rr_timeslice <= 0 ? 7955 RR_TIMESLICE : msecs_to_jiffies(sched_rr_timeslice); 7956 } 7957 mutex_unlock(&mutex); 7958 return ret; 7959 } 7960 7961 #ifdef CONFIG_CGROUP_SCHED 7962 7963 static inline struct task_group *css_tg(struct cgroup_subsys_state *css) 7964 { 7965 return css ? container_of(css, struct task_group, css) : NULL; 7966 } 7967 7968 static struct cgroup_subsys_state * 7969 cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) 7970 { 7971 struct task_group *parent = css_tg(parent_css); 7972 struct task_group *tg; 7973 7974 if (!parent) { 7975 /* This is early initialization for the top cgroup */ 7976 return &root_task_group.css; 7977 } 7978 7979 tg = sched_create_group(parent); 7980 if (IS_ERR(tg)) 7981 return ERR_PTR(-ENOMEM); 7982 7983 return &tg->css; 7984 } 7985 7986 static int cpu_cgroup_css_online(struct cgroup_subsys_state *css) 7987 { 7988 struct task_group *tg = css_tg(css); 7989 struct task_group *parent = css_tg(css->parent); 7990 7991 if (parent) 7992 sched_online_group(tg, parent); 7993 return 0; 7994 } 7995 7996 static void cpu_cgroup_css_free(struct cgroup_subsys_state *css) 7997 { 7998 struct task_group *tg = css_tg(css); 7999 8000 sched_destroy_group(tg); 8001 } 8002 8003 static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css) 8004 { 8005 struct task_group *tg = css_tg(css); 8006 8007 sched_offline_group(tg); 8008 } 8009 8010 static void cpu_cgroup_fork(struct task_struct *task) 8011 { 8012 sched_move_task(task); 8013 } 8014 8015 static int cpu_cgroup_can_attach(struct cgroup_subsys_state *css, 8016 struct cgroup_taskset *tset) 8017 { 8018 struct task_struct *task; 8019 8020 cgroup_taskset_for_each(task, tset) { 8021 #ifdef CONFIG_RT_GROUP_SCHED 8022 if (!sched_rt_can_attach(css_tg(css), task)) 8023 return -EINVAL; 8024 #else 8025 /* We don't support RT-tasks being in separate groups */ 8026 if (task->sched_class != &fair_sched_class) 8027 return -EINVAL; 8028 #endif 8029 } 8030 return 0; 8031 } 8032 8033 static void cpu_cgroup_attach(struct cgroup_subsys_state *css, 8034 struct cgroup_taskset *tset) 8035 { 8036 struct task_struct *task; 8037 8038 cgroup_taskset_for_each(task, tset) 8039 sched_move_task(task); 8040 } 8041 8042 static void cpu_cgroup_exit(struct cgroup_subsys_state *css, 8043 struct cgroup_subsys_state *old_css, 8044 struct task_struct *task) 8045 { 8046 /* 8047 * cgroup_exit() is called in the copy_process() failure path. 8048 * Ignore this case since the task hasn't ran yet, this avoids 8049 * trying to poke a half freed task state from generic code. 8050 */ 8051 if (!(task->flags & PF_EXITING)) 8052 return; 8053 8054 sched_move_task(task); 8055 } 8056 8057 #ifdef CONFIG_FAIR_GROUP_SCHED 8058 static int cpu_shares_write_u64(struct cgroup_subsys_state *css, 8059 struct cftype *cftype, u64 shareval) 8060 { 8061 return sched_group_set_shares(css_tg(css), scale_load(shareval)); 8062 } 8063 8064 static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css, 8065 struct cftype *cft) 8066 { 8067 struct task_group *tg = css_tg(css); 8068 8069 return (u64) scale_load_down(tg->shares); 8070 } 8071 8072 #ifdef CONFIG_CFS_BANDWIDTH 8073 static DEFINE_MUTEX(cfs_constraints_mutex); 8074 8075 const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */ 8076 const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */ 8077 8078 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime); 8079 8080 static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota) 8081 { 8082 int i, ret = 0, runtime_enabled, runtime_was_enabled; 8083 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 8084 8085 if (tg == &root_task_group) 8086 return -EINVAL; 8087 8088 /* 8089 * Ensure we have at some amount of bandwidth every period. This is 8090 * to prevent reaching a state of large arrears when throttled via 8091 * entity_tick() resulting in prolonged exit starvation. 8092 */ 8093 if (quota < min_cfs_quota_period || period < min_cfs_quota_period) 8094 return -EINVAL; 8095 8096 /* 8097 * Likewise, bound things on the otherside by preventing insane quota 8098 * periods. This also allows us to normalize in computing quota 8099 * feasibility. 8100 */ 8101 if (period > max_cfs_quota_period) 8102 return -EINVAL; 8103 8104 /* 8105 * Prevent race between setting of cfs_rq->runtime_enabled and 8106 * unthrottle_offline_cfs_rqs(). 8107 */ 8108 get_online_cpus(); 8109 mutex_lock(&cfs_constraints_mutex); 8110 ret = __cfs_schedulable(tg, period, quota); 8111 if (ret) 8112 goto out_unlock; 8113 8114 runtime_enabled = quota != RUNTIME_INF; 8115 runtime_was_enabled = cfs_b->quota != RUNTIME_INF; 8116 /* 8117 * If we need to toggle cfs_bandwidth_used, off->on must occur 8118 * before making related changes, and on->off must occur afterwards 8119 */ 8120 if (runtime_enabled && !runtime_was_enabled) 8121 cfs_bandwidth_usage_inc(); 8122 raw_spin_lock_irq(&cfs_b->lock); 8123 cfs_b->period = ns_to_ktime(period); 8124 cfs_b->quota = quota; 8125 8126 __refill_cfs_bandwidth_runtime(cfs_b); 8127 /* restart the period timer (if active) to handle new period expiry */ 8128 if (runtime_enabled && cfs_b->timer_active) { 8129 /* force a reprogram */ 8130 __start_cfs_bandwidth(cfs_b, true); 8131 } 8132 raw_spin_unlock_irq(&cfs_b->lock); 8133 8134 for_each_online_cpu(i) { 8135 struct cfs_rq *cfs_rq = tg->cfs_rq[i]; 8136 struct rq *rq = cfs_rq->rq; 8137 8138 raw_spin_lock_irq(&rq->lock); 8139 cfs_rq->runtime_enabled = runtime_enabled; 8140 cfs_rq->runtime_remaining = 0; 8141 8142 if (cfs_rq->throttled) 8143 unthrottle_cfs_rq(cfs_rq); 8144 raw_spin_unlock_irq(&rq->lock); 8145 } 8146 if (runtime_was_enabled && !runtime_enabled) 8147 cfs_bandwidth_usage_dec(); 8148 out_unlock: 8149 mutex_unlock(&cfs_constraints_mutex); 8150 put_online_cpus(); 8151 8152 return ret; 8153 } 8154 8155 int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us) 8156 { 8157 u64 quota, period; 8158 8159 period = ktime_to_ns(tg->cfs_bandwidth.period); 8160 if (cfs_quota_us < 0) 8161 quota = RUNTIME_INF; 8162 else 8163 quota = (u64)cfs_quota_us * NSEC_PER_USEC; 8164 8165 return tg_set_cfs_bandwidth(tg, period, quota); 8166 } 8167 8168 long tg_get_cfs_quota(struct task_group *tg) 8169 { 8170 u64 quota_us; 8171 8172 if (tg->cfs_bandwidth.quota == RUNTIME_INF) 8173 return -1; 8174 8175 quota_us = tg->cfs_bandwidth.quota; 8176 do_div(quota_us, NSEC_PER_USEC); 8177 8178 return quota_us; 8179 } 8180 8181 int tg_set_cfs_period(struct task_group *tg, long cfs_period_us) 8182 { 8183 u64 quota, period; 8184 8185 period = (u64)cfs_period_us * NSEC_PER_USEC; 8186 quota = tg->cfs_bandwidth.quota; 8187 8188 return tg_set_cfs_bandwidth(tg, period, quota); 8189 } 8190 8191 long tg_get_cfs_period(struct task_group *tg) 8192 { 8193 u64 cfs_period_us; 8194 8195 cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period); 8196 do_div(cfs_period_us, NSEC_PER_USEC); 8197 8198 return cfs_period_us; 8199 } 8200 8201 static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css, 8202 struct cftype *cft) 8203 { 8204 return tg_get_cfs_quota(css_tg(css)); 8205 } 8206 8207 static int cpu_cfs_quota_write_s64(struct cgroup_subsys_state *css, 8208 struct cftype *cftype, s64 cfs_quota_us) 8209 { 8210 return tg_set_cfs_quota(css_tg(css), cfs_quota_us); 8211 } 8212 8213 static u64 cpu_cfs_period_read_u64(struct cgroup_subsys_state *css, 8214 struct cftype *cft) 8215 { 8216 return tg_get_cfs_period(css_tg(css)); 8217 } 8218 8219 static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css, 8220 struct cftype *cftype, u64 cfs_period_us) 8221 { 8222 return tg_set_cfs_period(css_tg(css), cfs_period_us); 8223 } 8224 8225 struct cfs_schedulable_data { 8226 struct task_group *tg; 8227 u64 period, quota; 8228 }; 8229 8230 /* 8231 * normalize group quota/period to be quota/max_period 8232 * note: units are usecs 8233 */ 8234 static u64 normalize_cfs_quota(struct task_group *tg, 8235 struct cfs_schedulable_data *d) 8236 { 8237 u64 quota, period; 8238 8239 if (tg == d->tg) { 8240 period = d->period; 8241 quota = d->quota; 8242 } else { 8243 period = tg_get_cfs_period(tg); 8244 quota = tg_get_cfs_quota(tg); 8245 } 8246 8247 /* note: these should typically be equivalent */ 8248 if (quota == RUNTIME_INF || quota == -1) 8249 return RUNTIME_INF; 8250 8251 return to_ratio(period, quota); 8252 } 8253 8254 static int tg_cfs_schedulable_down(struct task_group *tg, void *data) 8255 { 8256 struct cfs_schedulable_data *d = data; 8257 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 8258 s64 quota = 0, parent_quota = -1; 8259 8260 if (!tg->parent) { 8261 quota = RUNTIME_INF; 8262 } else { 8263 struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth; 8264 8265 quota = normalize_cfs_quota(tg, d); 8266 parent_quota = parent_b->hierarchical_quota; 8267 8268 /* 8269 * ensure max(child_quota) <= parent_quota, inherit when no 8270 * limit is set 8271 */ 8272 if (quota == RUNTIME_INF) 8273 quota = parent_quota; 8274 else if (parent_quota != RUNTIME_INF && quota > parent_quota) 8275 return -EINVAL; 8276 } 8277 cfs_b->hierarchical_quota = quota; 8278 8279 return 0; 8280 } 8281 8282 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota) 8283 { 8284 int ret; 8285 struct cfs_schedulable_data data = { 8286 .tg = tg, 8287 .period = period, 8288 .quota = quota, 8289 }; 8290 8291 if (quota != RUNTIME_INF) { 8292 do_div(data.period, NSEC_PER_USEC); 8293 do_div(data.quota, NSEC_PER_USEC); 8294 } 8295 8296 rcu_read_lock(); 8297 ret = walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data); 8298 rcu_read_unlock(); 8299 8300 return ret; 8301 } 8302 8303 static int cpu_stats_show(struct seq_file *sf, void *v) 8304 { 8305 struct task_group *tg = css_tg(seq_css(sf)); 8306 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 8307 8308 seq_printf(sf, "nr_periods %d\n", cfs_b->nr_periods); 8309 seq_printf(sf, "nr_throttled %d\n", cfs_b->nr_throttled); 8310 seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time); 8311 8312 return 0; 8313 } 8314 #endif /* CONFIG_CFS_BANDWIDTH */ 8315 #endif /* CONFIG_FAIR_GROUP_SCHED */ 8316 8317 #ifdef CONFIG_RT_GROUP_SCHED 8318 static int cpu_rt_runtime_write(struct cgroup_subsys_state *css, 8319 struct cftype *cft, s64 val) 8320 { 8321 return sched_group_set_rt_runtime(css_tg(css), val); 8322 } 8323 8324 static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css, 8325 struct cftype *cft) 8326 { 8327 return sched_group_rt_runtime(css_tg(css)); 8328 } 8329 8330 static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css, 8331 struct cftype *cftype, u64 rt_period_us) 8332 { 8333 return sched_group_set_rt_period(css_tg(css), rt_period_us); 8334 } 8335 8336 static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css, 8337 struct cftype *cft) 8338 { 8339 return sched_group_rt_period(css_tg(css)); 8340 } 8341 #endif /* CONFIG_RT_GROUP_SCHED */ 8342 8343 static struct cftype cpu_files[] = { 8344 #ifdef CONFIG_FAIR_GROUP_SCHED 8345 { 8346 .name = "shares", 8347 .read_u64 = cpu_shares_read_u64, 8348 .write_u64 = cpu_shares_write_u64, 8349 }, 8350 #endif 8351 #ifdef CONFIG_CFS_BANDWIDTH 8352 { 8353 .name = "cfs_quota_us", 8354 .read_s64 = cpu_cfs_quota_read_s64, 8355 .write_s64 = cpu_cfs_quota_write_s64, 8356 }, 8357 { 8358 .name = "cfs_period_us", 8359 .read_u64 = cpu_cfs_period_read_u64, 8360 .write_u64 = cpu_cfs_period_write_u64, 8361 }, 8362 { 8363 .name = "stat", 8364 .seq_show = cpu_stats_show, 8365 }, 8366 #endif 8367 #ifdef CONFIG_RT_GROUP_SCHED 8368 { 8369 .name = "rt_runtime_us", 8370 .read_s64 = cpu_rt_runtime_read, 8371 .write_s64 = cpu_rt_runtime_write, 8372 }, 8373 { 8374 .name = "rt_period_us", 8375 .read_u64 = cpu_rt_period_read_uint, 8376 .write_u64 = cpu_rt_period_write_uint, 8377 }, 8378 #endif 8379 { } /* terminate */ 8380 }; 8381 8382 struct cgroup_subsys cpu_cgrp_subsys = { 8383 .css_alloc = cpu_cgroup_css_alloc, 8384 .css_free = cpu_cgroup_css_free, 8385 .css_online = cpu_cgroup_css_online, 8386 .css_offline = cpu_cgroup_css_offline, 8387 .fork = cpu_cgroup_fork, 8388 .can_attach = cpu_cgroup_can_attach, 8389 .attach = cpu_cgroup_attach, 8390 .exit = cpu_cgroup_exit, 8391 .legacy_cftypes = cpu_files, 8392 .early_init = 1, 8393 }; 8394 8395 #endif /* CONFIG_CGROUP_SCHED */ 8396 8397 void dump_cpu_task(int cpu) 8398 { 8399 pr_info("Task dump for CPU %d:\n", cpu); 8400 sched_show_task(cpu_curr(cpu)); 8401 } 8402