1 /* 2 * kernel/sched/core.c 3 * 4 * Kernel scheduler and related syscalls 5 * 6 * Copyright (C) 1991-2002 Linus Torvalds 7 * 8 * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and 9 * make semaphores SMP safe 10 * 1998-11-19 Implemented schedule_timeout() and related stuff 11 * by Andrea Arcangeli 12 * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar: 13 * hybrid priority-list and round-robin design with 14 * an array-switch method of distributing timeslices 15 * and per-CPU runqueues. Cleanups and useful suggestions 16 * by Davide Libenzi, preemptible kernel bits by Robert Love. 17 * 2003-09-03 Interactivity tuning by Con Kolivas. 18 * 2004-04-02 Scheduler domains code by Nick Piggin 19 * 2007-04-15 Work begun on replacing all interactivity tuning with a 20 * fair scheduling design by Con Kolivas. 21 * 2007-05-05 Load balancing (smp-nice) and other improvements 22 * by Peter Williams 23 * 2007-05-06 Interactivity improvements to CFS by Mike Galbraith 24 * 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri 25 * 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins, 26 * Thomas Gleixner, Mike Kravetz 27 */ 28 29 #include <linux/mm.h> 30 #include <linux/module.h> 31 #include <linux/nmi.h> 32 #include <linux/init.h> 33 #include <linux/uaccess.h> 34 #include <linux/highmem.h> 35 #include <asm/mmu_context.h> 36 #include <linux/interrupt.h> 37 #include <linux/capability.h> 38 #include <linux/completion.h> 39 #include <linux/kernel_stat.h> 40 #include <linux/debug_locks.h> 41 #include <linux/perf_event.h> 42 #include <linux/security.h> 43 #include <linux/notifier.h> 44 #include <linux/profile.h> 45 #include <linux/freezer.h> 46 #include <linux/vmalloc.h> 47 #include <linux/blkdev.h> 48 #include <linux/delay.h> 49 #include <linux/pid_namespace.h> 50 #include <linux/smp.h> 51 #include <linux/threads.h> 52 #include <linux/timer.h> 53 #include <linux/rcupdate.h> 54 #include <linux/cpu.h> 55 #include <linux/cpuset.h> 56 #include <linux/percpu.h> 57 #include <linux/proc_fs.h> 58 #include <linux/seq_file.h> 59 #include <linux/sysctl.h> 60 #include <linux/syscalls.h> 61 #include <linux/times.h> 62 #include <linux/tsacct_kern.h> 63 #include <linux/kprobes.h> 64 #include <linux/delayacct.h> 65 #include <linux/unistd.h> 66 #include <linux/pagemap.h> 67 #include <linux/hrtimer.h> 68 #include <linux/tick.h> 69 #include <linux/debugfs.h> 70 #include <linux/ctype.h> 71 #include <linux/ftrace.h> 72 #include <linux/slab.h> 73 #include <linux/init_task.h> 74 #include <linux/binfmts.h> 75 #include <linux/context_tracking.h> 76 #include <linux/compiler.h> 77 78 #include <asm/switch_to.h> 79 #include <asm/tlb.h> 80 #include <asm/irq_regs.h> 81 #include <asm/mutex.h> 82 #ifdef CONFIG_PARAVIRT 83 #include <asm/paravirt.h> 84 #endif 85 86 #include "sched.h" 87 #include "../workqueue_internal.h" 88 #include "../smpboot.h" 89 90 #define CREATE_TRACE_POINTS 91 #include <trace/events/sched.h> 92 93 DEFINE_MUTEX(sched_domains_mutex); 94 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); 95 96 static void update_rq_clock_task(struct rq *rq, s64 delta); 97 98 void update_rq_clock(struct rq *rq) 99 { 100 s64 delta; 101 102 lockdep_assert_held(&rq->lock); 103 104 if (rq->clock_skip_update & RQCF_ACT_SKIP) 105 return; 106 107 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock; 108 if (delta < 0) 109 return; 110 rq->clock += delta; 111 update_rq_clock_task(rq, delta); 112 } 113 114 /* 115 * Debugging: various feature bits 116 */ 117 118 #define SCHED_FEAT(name, enabled) \ 119 (1UL << __SCHED_FEAT_##name) * enabled | 120 121 const_debug unsigned int sysctl_sched_features = 122 #include "features.h" 123 0; 124 125 #undef SCHED_FEAT 126 127 #ifdef CONFIG_SCHED_DEBUG 128 #define SCHED_FEAT(name, enabled) \ 129 #name , 130 131 static const char * const sched_feat_names[] = { 132 #include "features.h" 133 }; 134 135 #undef SCHED_FEAT 136 137 static int sched_feat_show(struct seq_file *m, void *v) 138 { 139 int i; 140 141 for (i = 0; i < __SCHED_FEAT_NR; i++) { 142 if (!(sysctl_sched_features & (1UL << i))) 143 seq_puts(m, "NO_"); 144 seq_printf(m, "%s ", sched_feat_names[i]); 145 } 146 seq_puts(m, "\n"); 147 148 return 0; 149 } 150 151 #ifdef HAVE_JUMP_LABEL 152 153 #define jump_label_key__true STATIC_KEY_INIT_TRUE 154 #define jump_label_key__false STATIC_KEY_INIT_FALSE 155 156 #define SCHED_FEAT(name, enabled) \ 157 jump_label_key__##enabled , 158 159 struct static_key sched_feat_keys[__SCHED_FEAT_NR] = { 160 #include "features.h" 161 }; 162 163 #undef SCHED_FEAT 164 165 static void sched_feat_disable(int i) 166 { 167 static_key_disable(&sched_feat_keys[i]); 168 } 169 170 static void sched_feat_enable(int i) 171 { 172 static_key_enable(&sched_feat_keys[i]); 173 } 174 #else 175 static void sched_feat_disable(int i) { }; 176 static void sched_feat_enable(int i) { }; 177 #endif /* HAVE_JUMP_LABEL */ 178 179 static int sched_feat_set(char *cmp) 180 { 181 int i; 182 int neg = 0; 183 184 if (strncmp(cmp, "NO_", 3) == 0) { 185 neg = 1; 186 cmp += 3; 187 } 188 189 for (i = 0; i < __SCHED_FEAT_NR; i++) { 190 if (strcmp(cmp, sched_feat_names[i]) == 0) { 191 if (neg) { 192 sysctl_sched_features &= ~(1UL << i); 193 sched_feat_disable(i); 194 } else { 195 sysctl_sched_features |= (1UL << i); 196 sched_feat_enable(i); 197 } 198 break; 199 } 200 } 201 202 return i; 203 } 204 205 static ssize_t 206 sched_feat_write(struct file *filp, const char __user *ubuf, 207 size_t cnt, loff_t *ppos) 208 { 209 char buf[64]; 210 char *cmp; 211 int i; 212 struct inode *inode; 213 214 if (cnt > 63) 215 cnt = 63; 216 217 if (copy_from_user(&buf, ubuf, cnt)) 218 return -EFAULT; 219 220 buf[cnt] = 0; 221 cmp = strstrip(buf); 222 223 /* Ensure the static_key remains in a consistent state */ 224 inode = file_inode(filp); 225 inode_lock(inode); 226 i = sched_feat_set(cmp); 227 inode_unlock(inode); 228 if (i == __SCHED_FEAT_NR) 229 return -EINVAL; 230 231 *ppos += cnt; 232 233 return cnt; 234 } 235 236 static int sched_feat_open(struct inode *inode, struct file *filp) 237 { 238 return single_open(filp, sched_feat_show, NULL); 239 } 240 241 static const struct file_operations sched_feat_fops = { 242 .open = sched_feat_open, 243 .write = sched_feat_write, 244 .read = seq_read, 245 .llseek = seq_lseek, 246 .release = single_release, 247 }; 248 249 static __init int sched_init_debug(void) 250 { 251 debugfs_create_file("sched_features", 0644, NULL, NULL, 252 &sched_feat_fops); 253 254 return 0; 255 } 256 late_initcall(sched_init_debug); 257 #endif /* CONFIG_SCHED_DEBUG */ 258 259 /* 260 * Number of tasks to iterate in a single balance run. 261 * Limited because this is done with IRQs disabled. 262 */ 263 const_debug unsigned int sysctl_sched_nr_migrate = 32; 264 265 /* 266 * period over which we average the RT time consumption, measured 267 * in ms. 268 * 269 * default: 1s 270 */ 271 const_debug unsigned int sysctl_sched_time_avg = MSEC_PER_SEC; 272 273 /* 274 * period over which we measure -rt task cpu usage in us. 275 * default: 1s 276 */ 277 unsigned int sysctl_sched_rt_period = 1000000; 278 279 __read_mostly int scheduler_running; 280 281 /* 282 * part of the period that we allow rt tasks to run in us. 283 * default: 0.95s 284 */ 285 int sysctl_sched_rt_runtime = 950000; 286 287 /* cpus with isolated domains */ 288 cpumask_var_t cpu_isolated_map; 289 290 /* 291 * this_rq_lock - lock this runqueue and disable interrupts. 292 */ 293 static struct rq *this_rq_lock(void) 294 __acquires(rq->lock) 295 { 296 struct rq *rq; 297 298 local_irq_disable(); 299 rq = this_rq(); 300 raw_spin_lock(&rq->lock); 301 302 return rq; 303 } 304 305 #ifdef CONFIG_SCHED_HRTICK 306 /* 307 * Use HR-timers to deliver accurate preemption points. 308 */ 309 310 static void hrtick_clear(struct rq *rq) 311 { 312 if (hrtimer_active(&rq->hrtick_timer)) 313 hrtimer_cancel(&rq->hrtick_timer); 314 } 315 316 /* 317 * High-resolution timer tick. 318 * Runs from hardirq context with interrupts disabled. 319 */ 320 static enum hrtimer_restart hrtick(struct hrtimer *timer) 321 { 322 struct rq *rq = container_of(timer, struct rq, hrtick_timer); 323 324 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); 325 326 raw_spin_lock(&rq->lock); 327 update_rq_clock(rq); 328 rq->curr->sched_class->task_tick(rq, rq->curr, 1); 329 raw_spin_unlock(&rq->lock); 330 331 return HRTIMER_NORESTART; 332 } 333 334 #ifdef CONFIG_SMP 335 336 static void __hrtick_restart(struct rq *rq) 337 { 338 struct hrtimer *timer = &rq->hrtick_timer; 339 340 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED); 341 } 342 343 /* 344 * called from hardirq (IPI) context 345 */ 346 static void __hrtick_start(void *arg) 347 { 348 struct rq *rq = arg; 349 350 raw_spin_lock(&rq->lock); 351 __hrtick_restart(rq); 352 rq->hrtick_csd_pending = 0; 353 raw_spin_unlock(&rq->lock); 354 } 355 356 /* 357 * Called to set the hrtick timer state. 358 * 359 * called with rq->lock held and irqs disabled 360 */ 361 void hrtick_start(struct rq *rq, u64 delay) 362 { 363 struct hrtimer *timer = &rq->hrtick_timer; 364 ktime_t time; 365 s64 delta; 366 367 /* 368 * Don't schedule slices shorter than 10000ns, that just 369 * doesn't make sense and can cause timer DoS. 370 */ 371 delta = max_t(s64, delay, 10000LL); 372 time = ktime_add_ns(timer->base->get_time(), delta); 373 374 hrtimer_set_expires(timer, time); 375 376 if (rq == this_rq()) { 377 __hrtick_restart(rq); 378 } else if (!rq->hrtick_csd_pending) { 379 smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd); 380 rq->hrtick_csd_pending = 1; 381 } 382 } 383 384 static int 385 hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu) 386 { 387 int cpu = (int)(long)hcpu; 388 389 switch (action) { 390 case CPU_UP_CANCELED: 391 case CPU_UP_CANCELED_FROZEN: 392 case CPU_DOWN_PREPARE: 393 case CPU_DOWN_PREPARE_FROZEN: 394 case CPU_DEAD: 395 case CPU_DEAD_FROZEN: 396 hrtick_clear(cpu_rq(cpu)); 397 return NOTIFY_OK; 398 } 399 400 return NOTIFY_DONE; 401 } 402 403 static __init void init_hrtick(void) 404 { 405 hotcpu_notifier(hotplug_hrtick, 0); 406 } 407 #else 408 /* 409 * Called to set the hrtick timer state. 410 * 411 * called with rq->lock held and irqs disabled 412 */ 413 void hrtick_start(struct rq *rq, u64 delay) 414 { 415 /* 416 * Don't schedule slices shorter than 10000ns, that just 417 * doesn't make sense. Rely on vruntime for fairness. 418 */ 419 delay = max_t(u64, delay, 10000LL); 420 hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay), 421 HRTIMER_MODE_REL_PINNED); 422 } 423 424 static inline void init_hrtick(void) 425 { 426 } 427 #endif /* CONFIG_SMP */ 428 429 static void init_rq_hrtick(struct rq *rq) 430 { 431 #ifdef CONFIG_SMP 432 rq->hrtick_csd_pending = 0; 433 434 rq->hrtick_csd.flags = 0; 435 rq->hrtick_csd.func = __hrtick_start; 436 rq->hrtick_csd.info = rq; 437 #endif 438 439 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 440 rq->hrtick_timer.function = hrtick; 441 } 442 #else /* CONFIG_SCHED_HRTICK */ 443 static inline void hrtick_clear(struct rq *rq) 444 { 445 } 446 447 static inline void init_rq_hrtick(struct rq *rq) 448 { 449 } 450 451 static inline void init_hrtick(void) 452 { 453 } 454 #endif /* CONFIG_SCHED_HRTICK */ 455 456 /* 457 * cmpxchg based fetch_or, macro so it works for different integer types 458 */ 459 #define fetch_or(ptr, val) \ 460 ({ typeof(*(ptr)) __old, __val = *(ptr); \ 461 for (;;) { \ 462 __old = cmpxchg((ptr), __val, __val | (val)); \ 463 if (__old == __val) \ 464 break; \ 465 __val = __old; \ 466 } \ 467 __old; \ 468 }) 469 470 #if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG) 471 /* 472 * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG, 473 * this avoids any races wrt polling state changes and thereby avoids 474 * spurious IPIs. 475 */ 476 static bool set_nr_and_not_polling(struct task_struct *p) 477 { 478 struct thread_info *ti = task_thread_info(p); 479 return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG); 480 } 481 482 /* 483 * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set. 484 * 485 * If this returns true, then the idle task promises to call 486 * sched_ttwu_pending() and reschedule soon. 487 */ 488 static bool set_nr_if_polling(struct task_struct *p) 489 { 490 struct thread_info *ti = task_thread_info(p); 491 typeof(ti->flags) old, val = READ_ONCE(ti->flags); 492 493 for (;;) { 494 if (!(val & _TIF_POLLING_NRFLAG)) 495 return false; 496 if (val & _TIF_NEED_RESCHED) 497 return true; 498 old = cmpxchg(&ti->flags, val, val | _TIF_NEED_RESCHED); 499 if (old == val) 500 break; 501 val = old; 502 } 503 return true; 504 } 505 506 #else 507 static bool set_nr_and_not_polling(struct task_struct *p) 508 { 509 set_tsk_need_resched(p); 510 return true; 511 } 512 513 #ifdef CONFIG_SMP 514 static bool set_nr_if_polling(struct task_struct *p) 515 { 516 return false; 517 } 518 #endif 519 #endif 520 521 void wake_q_add(struct wake_q_head *head, struct task_struct *task) 522 { 523 struct wake_q_node *node = &task->wake_q; 524 525 /* 526 * Atomically grab the task, if ->wake_q is !nil already it means 527 * its already queued (either by us or someone else) and will get the 528 * wakeup due to that. 529 * 530 * This cmpxchg() implies a full barrier, which pairs with the write 531 * barrier implied by the wakeup in wake_up_list(). 532 */ 533 if (cmpxchg(&node->next, NULL, WAKE_Q_TAIL)) 534 return; 535 536 get_task_struct(task); 537 538 /* 539 * The head is context local, there can be no concurrency. 540 */ 541 *head->lastp = node; 542 head->lastp = &node->next; 543 } 544 545 void wake_up_q(struct wake_q_head *head) 546 { 547 struct wake_q_node *node = head->first; 548 549 while (node != WAKE_Q_TAIL) { 550 struct task_struct *task; 551 552 task = container_of(node, struct task_struct, wake_q); 553 BUG_ON(!task); 554 /* task can safely be re-inserted now */ 555 node = node->next; 556 task->wake_q.next = NULL; 557 558 /* 559 * wake_up_process() implies a wmb() to pair with the queueing 560 * in wake_q_add() so as not to miss wakeups. 561 */ 562 wake_up_process(task); 563 put_task_struct(task); 564 } 565 } 566 567 /* 568 * resched_curr - mark rq's current task 'to be rescheduled now'. 569 * 570 * On UP this means the setting of the need_resched flag, on SMP it 571 * might also involve a cross-CPU call to trigger the scheduler on 572 * the target CPU. 573 */ 574 void resched_curr(struct rq *rq) 575 { 576 struct task_struct *curr = rq->curr; 577 int cpu; 578 579 lockdep_assert_held(&rq->lock); 580 581 if (test_tsk_need_resched(curr)) 582 return; 583 584 cpu = cpu_of(rq); 585 586 if (cpu == smp_processor_id()) { 587 set_tsk_need_resched(curr); 588 set_preempt_need_resched(); 589 return; 590 } 591 592 if (set_nr_and_not_polling(curr)) 593 smp_send_reschedule(cpu); 594 else 595 trace_sched_wake_idle_without_ipi(cpu); 596 } 597 598 void resched_cpu(int cpu) 599 { 600 struct rq *rq = cpu_rq(cpu); 601 unsigned long flags; 602 603 if (!raw_spin_trylock_irqsave(&rq->lock, flags)) 604 return; 605 resched_curr(rq); 606 raw_spin_unlock_irqrestore(&rq->lock, flags); 607 } 608 609 #ifdef CONFIG_SMP 610 #ifdef CONFIG_NO_HZ_COMMON 611 /* 612 * In the semi idle case, use the nearest busy cpu for migrating timers 613 * from an idle cpu. This is good for power-savings. 614 * 615 * We don't do similar optimization for completely idle system, as 616 * selecting an idle cpu will add more delays to the timers than intended 617 * (as that cpu's timer base may not be uptodate wrt jiffies etc). 618 */ 619 int get_nohz_timer_target(void) 620 { 621 int i, cpu = smp_processor_id(); 622 struct sched_domain *sd; 623 624 if (!idle_cpu(cpu) && is_housekeeping_cpu(cpu)) 625 return cpu; 626 627 rcu_read_lock(); 628 for_each_domain(cpu, sd) { 629 for_each_cpu(i, sched_domain_span(sd)) { 630 if (!idle_cpu(i) && is_housekeeping_cpu(cpu)) { 631 cpu = i; 632 goto unlock; 633 } 634 } 635 } 636 637 if (!is_housekeeping_cpu(cpu)) 638 cpu = housekeeping_any_cpu(); 639 unlock: 640 rcu_read_unlock(); 641 return cpu; 642 } 643 /* 644 * When add_timer_on() enqueues a timer into the timer wheel of an 645 * idle CPU then this timer might expire before the next timer event 646 * which is scheduled to wake up that CPU. In case of a completely 647 * idle system the next event might even be infinite time into the 648 * future. wake_up_idle_cpu() ensures that the CPU is woken up and 649 * leaves the inner idle loop so the newly added timer is taken into 650 * account when the CPU goes back to idle and evaluates the timer 651 * wheel for the next timer event. 652 */ 653 static void wake_up_idle_cpu(int cpu) 654 { 655 struct rq *rq = cpu_rq(cpu); 656 657 if (cpu == smp_processor_id()) 658 return; 659 660 if (set_nr_and_not_polling(rq->idle)) 661 smp_send_reschedule(cpu); 662 else 663 trace_sched_wake_idle_without_ipi(cpu); 664 } 665 666 static bool wake_up_full_nohz_cpu(int cpu) 667 { 668 /* 669 * We just need the target to call irq_exit() and re-evaluate 670 * the next tick. The nohz full kick at least implies that. 671 * If needed we can still optimize that later with an 672 * empty IRQ. 673 */ 674 if (tick_nohz_full_cpu(cpu)) { 675 if (cpu != smp_processor_id() || 676 tick_nohz_tick_stopped()) 677 tick_nohz_full_kick_cpu(cpu); 678 return true; 679 } 680 681 return false; 682 } 683 684 void wake_up_nohz_cpu(int cpu) 685 { 686 if (!wake_up_full_nohz_cpu(cpu)) 687 wake_up_idle_cpu(cpu); 688 } 689 690 static inline bool got_nohz_idle_kick(void) 691 { 692 int cpu = smp_processor_id(); 693 694 if (!test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu))) 695 return false; 696 697 if (idle_cpu(cpu) && !need_resched()) 698 return true; 699 700 /* 701 * We can't run Idle Load Balance on this CPU for this time so we 702 * cancel it and clear NOHZ_BALANCE_KICK 703 */ 704 clear_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu)); 705 return false; 706 } 707 708 #else /* CONFIG_NO_HZ_COMMON */ 709 710 static inline bool got_nohz_idle_kick(void) 711 { 712 return false; 713 } 714 715 #endif /* CONFIG_NO_HZ_COMMON */ 716 717 #ifdef CONFIG_NO_HZ_FULL 718 bool sched_can_stop_tick(void) 719 { 720 /* 721 * FIFO realtime policy runs the highest priority task. Other runnable 722 * tasks are of a lower priority. The scheduler tick does nothing. 723 */ 724 if (current->policy == SCHED_FIFO) 725 return true; 726 727 /* 728 * Round-robin realtime tasks time slice with other tasks at the same 729 * realtime priority. Is this task the only one at this priority? 730 */ 731 if (current->policy == SCHED_RR) { 732 struct sched_rt_entity *rt_se = ¤t->rt; 733 734 return list_is_singular(&rt_se->run_list); 735 } 736 737 /* 738 * More than one running task need preemption. 739 * nr_running update is assumed to be visible 740 * after IPI is sent from wakers. 741 */ 742 if (this_rq()->nr_running > 1) 743 return false; 744 745 return true; 746 } 747 #endif /* CONFIG_NO_HZ_FULL */ 748 749 void sched_avg_update(struct rq *rq) 750 { 751 s64 period = sched_avg_period(); 752 753 while ((s64)(rq_clock(rq) - rq->age_stamp) > period) { 754 /* 755 * Inline assembly required to prevent the compiler 756 * optimising this loop into a divmod call. 757 * See __iter_div_u64_rem() for another example of this. 758 */ 759 asm("" : "+rm" (rq->age_stamp)); 760 rq->age_stamp += period; 761 rq->rt_avg /= 2; 762 } 763 } 764 765 #endif /* CONFIG_SMP */ 766 767 #if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \ 768 (defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH))) 769 /* 770 * Iterate task_group tree rooted at *from, calling @down when first entering a 771 * node and @up when leaving it for the final time. 772 * 773 * Caller must hold rcu_lock or sufficient equivalent. 774 */ 775 int walk_tg_tree_from(struct task_group *from, 776 tg_visitor down, tg_visitor up, void *data) 777 { 778 struct task_group *parent, *child; 779 int ret; 780 781 parent = from; 782 783 down: 784 ret = (*down)(parent, data); 785 if (ret) 786 goto out; 787 list_for_each_entry_rcu(child, &parent->children, siblings) { 788 parent = child; 789 goto down; 790 791 up: 792 continue; 793 } 794 ret = (*up)(parent, data); 795 if (ret || parent == from) 796 goto out; 797 798 child = parent; 799 parent = parent->parent; 800 if (parent) 801 goto up; 802 out: 803 return ret; 804 } 805 806 int tg_nop(struct task_group *tg, void *data) 807 { 808 return 0; 809 } 810 #endif 811 812 static void set_load_weight(struct task_struct *p) 813 { 814 int prio = p->static_prio - MAX_RT_PRIO; 815 struct load_weight *load = &p->se.load; 816 817 /* 818 * SCHED_IDLE tasks get minimal weight: 819 */ 820 if (idle_policy(p->policy)) { 821 load->weight = scale_load(WEIGHT_IDLEPRIO); 822 load->inv_weight = WMULT_IDLEPRIO; 823 return; 824 } 825 826 load->weight = scale_load(sched_prio_to_weight[prio]); 827 load->inv_weight = sched_prio_to_wmult[prio]; 828 } 829 830 static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags) 831 { 832 update_rq_clock(rq); 833 if (!(flags & ENQUEUE_RESTORE)) 834 sched_info_queued(rq, p); 835 p->sched_class->enqueue_task(rq, p, flags); 836 } 837 838 static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags) 839 { 840 update_rq_clock(rq); 841 if (!(flags & DEQUEUE_SAVE)) 842 sched_info_dequeued(rq, p); 843 p->sched_class->dequeue_task(rq, p, flags); 844 } 845 846 void activate_task(struct rq *rq, struct task_struct *p, int flags) 847 { 848 if (task_contributes_to_load(p)) 849 rq->nr_uninterruptible--; 850 851 enqueue_task(rq, p, flags); 852 } 853 854 void deactivate_task(struct rq *rq, struct task_struct *p, int flags) 855 { 856 if (task_contributes_to_load(p)) 857 rq->nr_uninterruptible++; 858 859 dequeue_task(rq, p, flags); 860 } 861 862 static void update_rq_clock_task(struct rq *rq, s64 delta) 863 { 864 /* 865 * In theory, the compile should just see 0 here, and optimize out the call 866 * to sched_rt_avg_update. But I don't trust it... 867 */ 868 #if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING) 869 s64 steal = 0, irq_delta = 0; 870 #endif 871 #ifdef CONFIG_IRQ_TIME_ACCOUNTING 872 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time; 873 874 /* 875 * Since irq_time is only updated on {soft,}irq_exit, we might run into 876 * this case when a previous update_rq_clock() happened inside a 877 * {soft,}irq region. 878 * 879 * When this happens, we stop ->clock_task and only update the 880 * prev_irq_time stamp to account for the part that fit, so that a next 881 * update will consume the rest. This ensures ->clock_task is 882 * monotonic. 883 * 884 * It does however cause some slight miss-attribution of {soft,}irq 885 * time, a more accurate solution would be to update the irq_time using 886 * the current rq->clock timestamp, except that would require using 887 * atomic ops. 888 */ 889 if (irq_delta > delta) 890 irq_delta = delta; 891 892 rq->prev_irq_time += irq_delta; 893 delta -= irq_delta; 894 #endif 895 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING 896 if (static_key_false((¶virt_steal_rq_enabled))) { 897 steal = paravirt_steal_clock(cpu_of(rq)); 898 steal -= rq->prev_steal_time_rq; 899 900 if (unlikely(steal > delta)) 901 steal = delta; 902 903 rq->prev_steal_time_rq += steal; 904 delta -= steal; 905 } 906 #endif 907 908 rq->clock_task += delta; 909 910 #if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING) 911 if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY)) 912 sched_rt_avg_update(rq, irq_delta + steal); 913 #endif 914 } 915 916 void sched_set_stop_task(int cpu, struct task_struct *stop) 917 { 918 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; 919 struct task_struct *old_stop = cpu_rq(cpu)->stop; 920 921 if (stop) { 922 /* 923 * Make it appear like a SCHED_FIFO task, its something 924 * userspace knows about and won't get confused about. 925 * 926 * Also, it will make PI more or less work without too 927 * much confusion -- but then, stop work should not 928 * rely on PI working anyway. 929 */ 930 sched_setscheduler_nocheck(stop, SCHED_FIFO, ¶m); 931 932 stop->sched_class = &stop_sched_class; 933 } 934 935 cpu_rq(cpu)->stop = stop; 936 937 if (old_stop) { 938 /* 939 * Reset it back to a normal scheduling class so that 940 * it can die in pieces. 941 */ 942 old_stop->sched_class = &rt_sched_class; 943 } 944 } 945 946 /* 947 * __normal_prio - return the priority that is based on the static prio 948 */ 949 static inline int __normal_prio(struct task_struct *p) 950 { 951 return p->static_prio; 952 } 953 954 /* 955 * Calculate the expected normal priority: i.e. priority 956 * without taking RT-inheritance into account. Might be 957 * boosted by interactivity modifiers. Changes upon fork, 958 * setprio syscalls, and whenever the interactivity 959 * estimator recalculates. 960 */ 961 static inline int normal_prio(struct task_struct *p) 962 { 963 int prio; 964 965 if (task_has_dl_policy(p)) 966 prio = MAX_DL_PRIO-1; 967 else if (task_has_rt_policy(p)) 968 prio = MAX_RT_PRIO-1 - p->rt_priority; 969 else 970 prio = __normal_prio(p); 971 return prio; 972 } 973 974 /* 975 * Calculate the current priority, i.e. the priority 976 * taken into account by the scheduler. This value might 977 * be boosted by RT tasks, or might be boosted by 978 * interactivity modifiers. Will be RT if the task got 979 * RT-boosted. If not then it returns p->normal_prio. 980 */ 981 static int effective_prio(struct task_struct *p) 982 { 983 p->normal_prio = normal_prio(p); 984 /* 985 * If we are RT tasks or we were boosted to RT priority, 986 * keep the priority unchanged. Otherwise, update priority 987 * to the normal priority: 988 */ 989 if (!rt_prio(p->prio)) 990 return p->normal_prio; 991 return p->prio; 992 } 993 994 /** 995 * task_curr - is this task currently executing on a CPU? 996 * @p: the task in question. 997 * 998 * Return: 1 if the task is currently executing. 0 otherwise. 999 */ 1000 inline int task_curr(const struct task_struct *p) 1001 { 1002 return cpu_curr(task_cpu(p)) == p; 1003 } 1004 1005 /* 1006 * switched_from, switched_to and prio_changed must _NOT_ drop rq->lock, 1007 * use the balance_callback list if you want balancing. 1008 * 1009 * this means any call to check_class_changed() must be followed by a call to 1010 * balance_callback(). 1011 */ 1012 static inline void check_class_changed(struct rq *rq, struct task_struct *p, 1013 const struct sched_class *prev_class, 1014 int oldprio) 1015 { 1016 if (prev_class != p->sched_class) { 1017 if (prev_class->switched_from) 1018 prev_class->switched_from(rq, p); 1019 1020 p->sched_class->switched_to(rq, p); 1021 } else if (oldprio != p->prio || dl_task(p)) 1022 p->sched_class->prio_changed(rq, p, oldprio); 1023 } 1024 1025 void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) 1026 { 1027 const struct sched_class *class; 1028 1029 if (p->sched_class == rq->curr->sched_class) { 1030 rq->curr->sched_class->check_preempt_curr(rq, p, flags); 1031 } else { 1032 for_each_class(class) { 1033 if (class == rq->curr->sched_class) 1034 break; 1035 if (class == p->sched_class) { 1036 resched_curr(rq); 1037 break; 1038 } 1039 } 1040 } 1041 1042 /* 1043 * A queue event has occurred, and we're going to schedule. In 1044 * this case, we can save a useless back to back clock update. 1045 */ 1046 if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr)) 1047 rq_clock_skip_update(rq, true); 1048 } 1049 1050 #ifdef CONFIG_SMP 1051 /* 1052 * This is how migration works: 1053 * 1054 * 1) we invoke migration_cpu_stop() on the target CPU using 1055 * stop_one_cpu(). 1056 * 2) stopper starts to run (implicitly forcing the migrated thread 1057 * off the CPU) 1058 * 3) it checks whether the migrated task is still in the wrong runqueue. 1059 * 4) if it's in the wrong runqueue then the migration thread removes 1060 * it and puts it into the right queue. 1061 * 5) stopper completes and stop_one_cpu() returns and the migration 1062 * is done. 1063 */ 1064 1065 /* 1066 * move_queued_task - move a queued task to new rq. 1067 * 1068 * Returns (locked) new rq. Old rq's lock is released. 1069 */ 1070 static struct rq *move_queued_task(struct rq *rq, struct task_struct *p, int new_cpu) 1071 { 1072 lockdep_assert_held(&rq->lock); 1073 1074 p->on_rq = TASK_ON_RQ_MIGRATING; 1075 dequeue_task(rq, p, 0); 1076 set_task_cpu(p, new_cpu); 1077 raw_spin_unlock(&rq->lock); 1078 1079 rq = cpu_rq(new_cpu); 1080 1081 raw_spin_lock(&rq->lock); 1082 BUG_ON(task_cpu(p) != new_cpu); 1083 enqueue_task(rq, p, 0); 1084 p->on_rq = TASK_ON_RQ_QUEUED; 1085 check_preempt_curr(rq, p, 0); 1086 1087 return rq; 1088 } 1089 1090 struct migration_arg { 1091 struct task_struct *task; 1092 int dest_cpu; 1093 }; 1094 1095 /* 1096 * Move (not current) task off this cpu, onto dest cpu. We're doing 1097 * this because either it can't run here any more (set_cpus_allowed() 1098 * away from this CPU, or CPU going down), or because we're 1099 * attempting to rebalance this task on exec (sched_exec). 1100 * 1101 * So we race with normal scheduler movements, but that's OK, as long 1102 * as the task is no longer on this CPU. 1103 */ 1104 static struct rq *__migrate_task(struct rq *rq, struct task_struct *p, int dest_cpu) 1105 { 1106 if (unlikely(!cpu_active(dest_cpu))) 1107 return rq; 1108 1109 /* Affinity changed (again). */ 1110 if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p))) 1111 return rq; 1112 1113 rq = move_queued_task(rq, p, dest_cpu); 1114 1115 return rq; 1116 } 1117 1118 /* 1119 * migration_cpu_stop - this will be executed by a highprio stopper thread 1120 * and performs thread migration by bumping thread off CPU then 1121 * 'pushing' onto another runqueue. 1122 */ 1123 static int migration_cpu_stop(void *data) 1124 { 1125 struct migration_arg *arg = data; 1126 struct task_struct *p = arg->task; 1127 struct rq *rq = this_rq(); 1128 1129 /* 1130 * The original target cpu might have gone down and we might 1131 * be on another cpu but it doesn't matter. 1132 */ 1133 local_irq_disable(); 1134 /* 1135 * We need to explicitly wake pending tasks before running 1136 * __migrate_task() such that we will not miss enforcing cpus_allowed 1137 * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test. 1138 */ 1139 sched_ttwu_pending(); 1140 1141 raw_spin_lock(&p->pi_lock); 1142 raw_spin_lock(&rq->lock); 1143 /* 1144 * If task_rq(p) != rq, it cannot be migrated here, because we're 1145 * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because 1146 * we're holding p->pi_lock. 1147 */ 1148 if (task_rq(p) == rq && task_on_rq_queued(p)) 1149 rq = __migrate_task(rq, p, arg->dest_cpu); 1150 raw_spin_unlock(&rq->lock); 1151 raw_spin_unlock(&p->pi_lock); 1152 1153 local_irq_enable(); 1154 return 0; 1155 } 1156 1157 /* 1158 * sched_class::set_cpus_allowed must do the below, but is not required to 1159 * actually call this function. 1160 */ 1161 void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask) 1162 { 1163 cpumask_copy(&p->cpus_allowed, new_mask); 1164 p->nr_cpus_allowed = cpumask_weight(new_mask); 1165 } 1166 1167 void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) 1168 { 1169 struct rq *rq = task_rq(p); 1170 bool queued, running; 1171 1172 lockdep_assert_held(&p->pi_lock); 1173 1174 queued = task_on_rq_queued(p); 1175 running = task_current(rq, p); 1176 1177 if (queued) { 1178 /* 1179 * Because __kthread_bind() calls this on blocked tasks without 1180 * holding rq->lock. 1181 */ 1182 lockdep_assert_held(&rq->lock); 1183 dequeue_task(rq, p, DEQUEUE_SAVE); 1184 } 1185 if (running) 1186 put_prev_task(rq, p); 1187 1188 p->sched_class->set_cpus_allowed(p, new_mask); 1189 1190 if (running) 1191 p->sched_class->set_curr_task(rq); 1192 if (queued) 1193 enqueue_task(rq, p, ENQUEUE_RESTORE); 1194 } 1195 1196 /* 1197 * Change a given task's CPU affinity. Migrate the thread to a 1198 * proper CPU and schedule it away if the CPU it's executing on 1199 * is removed from the allowed bitmask. 1200 * 1201 * NOTE: the caller must have a valid reference to the task, the 1202 * task must not exit() & deallocate itself prematurely. The 1203 * call is not atomic; no spinlocks may be held. 1204 */ 1205 static int __set_cpus_allowed_ptr(struct task_struct *p, 1206 const struct cpumask *new_mask, bool check) 1207 { 1208 unsigned long flags; 1209 struct rq *rq; 1210 unsigned int dest_cpu; 1211 int ret = 0; 1212 1213 rq = task_rq_lock(p, &flags); 1214 1215 /* 1216 * Must re-check here, to close a race against __kthread_bind(), 1217 * sched_setaffinity() is not guaranteed to observe the flag. 1218 */ 1219 if (check && (p->flags & PF_NO_SETAFFINITY)) { 1220 ret = -EINVAL; 1221 goto out; 1222 } 1223 1224 if (cpumask_equal(&p->cpus_allowed, new_mask)) 1225 goto out; 1226 1227 if (!cpumask_intersects(new_mask, cpu_active_mask)) { 1228 ret = -EINVAL; 1229 goto out; 1230 } 1231 1232 do_set_cpus_allowed(p, new_mask); 1233 1234 /* Can the task run on the task's current CPU? If so, we're done */ 1235 if (cpumask_test_cpu(task_cpu(p), new_mask)) 1236 goto out; 1237 1238 dest_cpu = cpumask_any_and(cpu_active_mask, new_mask); 1239 if (task_running(rq, p) || p->state == TASK_WAKING) { 1240 struct migration_arg arg = { p, dest_cpu }; 1241 /* Need help from migration thread: drop lock and wait. */ 1242 task_rq_unlock(rq, p, &flags); 1243 stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg); 1244 tlb_migrate_finish(p->mm); 1245 return 0; 1246 } else if (task_on_rq_queued(p)) { 1247 /* 1248 * OK, since we're going to drop the lock immediately 1249 * afterwards anyway. 1250 */ 1251 lockdep_unpin_lock(&rq->lock); 1252 rq = move_queued_task(rq, p, dest_cpu); 1253 lockdep_pin_lock(&rq->lock); 1254 } 1255 out: 1256 task_rq_unlock(rq, p, &flags); 1257 1258 return ret; 1259 } 1260 1261 int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) 1262 { 1263 return __set_cpus_allowed_ptr(p, new_mask, false); 1264 } 1265 EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr); 1266 1267 void set_task_cpu(struct task_struct *p, unsigned int new_cpu) 1268 { 1269 #ifdef CONFIG_SCHED_DEBUG 1270 /* 1271 * We should never call set_task_cpu() on a blocked task, 1272 * ttwu() will sort out the placement. 1273 */ 1274 WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING && 1275 !p->on_rq); 1276 1277 /* 1278 * Migrating fair class task must have p->on_rq = TASK_ON_RQ_MIGRATING, 1279 * because schedstat_wait_{start,end} rebase migrating task's wait_start 1280 * time relying on p->on_rq. 1281 */ 1282 WARN_ON_ONCE(p->state == TASK_RUNNING && 1283 p->sched_class == &fair_sched_class && 1284 (p->on_rq && !task_on_rq_migrating(p))); 1285 1286 #ifdef CONFIG_LOCKDEP 1287 /* 1288 * The caller should hold either p->pi_lock or rq->lock, when changing 1289 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks. 1290 * 1291 * sched_move_task() holds both and thus holding either pins the cgroup, 1292 * see task_group(). 1293 * 1294 * Furthermore, all task_rq users should acquire both locks, see 1295 * task_rq_lock(). 1296 */ 1297 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) || 1298 lockdep_is_held(&task_rq(p)->lock))); 1299 #endif 1300 #endif 1301 1302 trace_sched_migrate_task(p, new_cpu); 1303 1304 if (task_cpu(p) != new_cpu) { 1305 if (p->sched_class->migrate_task_rq) 1306 p->sched_class->migrate_task_rq(p); 1307 p->se.nr_migrations++; 1308 perf_event_task_migrate(p); 1309 } 1310 1311 __set_task_cpu(p, new_cpu); 1312 } 1313 1314 static void __migrate_swap_task(struct task_struct *p, int cpu) 1315 { 1316 if (task_on_rq_queued(p)) { 1317 struct rq *src_rq, *dst_rq; 1318 1319 src_rq = task_rq(p); 1320 dst_rq = cpu_rq(cpu); 1321 1322 p->on_rq = TASK_ON_RQ_MIGRATING; 1323 deactivate_task(src_rq, p, 0); 1324 set_task_cpu(p, cpu); 1325 activate_task(dst_rq, p, 0); 1326 p->on_rq = TASK_ON_RQ_QUEUED; 1327 check_preempt_curr(dst_rq, p, 0); 1328 } else { 1329 /* 1330 * Task isn't running anymore; make it appear like we migrated 1331 * it before it went to sleep. This means on wakeup we make the 1332 * previous cpu our targer instead of where it really is. 1333 */ 1334 p->wake_cpu = cpu; 1335 } 1336 } 1337 1338 struct migration_swap_arg { 1339 struct task_struct *src_task, *dst_task; 1340 int src_cpu, dst_cpu; 1341 }; 1342 1343 static int migrate_swap_stop(void *data) 1344 { 1345 struct migration_swap_arg *arg = data; 1346 struct rq *src_rq, *dst_rq; 1347 int ret = -EAGAIN; 1348 1349 if (!cpu_active(arg->src_cpu) || !cpu_active(arg->dst_cpu)) 1350 return -EAGAIN; 1351 1352 src_rq = cpu_rq(arg->src_cpu); 1353 dst_rq = cpu_rq(arg->dst_cpu); 1354 1355 double_raw_lock(&arg->src_task->pi_lock, 1356 &arg->dst_task->pi_lock); 1357 double_rq_lock(src_rq, dst_rq); 1358 1359 if (task_cpu(arg->dst_task) != arg->dst_cpu) 1360 goto unlock; 1361 1362 if (task_cpu(arg->src_task) != arg->src_cpu) 1363 goto unlock; 1364 1365 if (!cpumask_test_cpu(arg->dst_cpu, tsk_cpus_allowed(arg->src_task))) 1366 goto unlock; 1367 1368 if (!cpumask_test_cpu(arg->src_cpu, tsk_cpus_allowed(arg->dst_task))) 1369 goto unlock; 1370 1371 __migrate_swap_task(arg->src_task, arg->dst_cpu); 1372 __migrate_swap_task(arg->dst_task, arg->src_cpu); 1373 1374 ret = 0; 1375 1376 unlock: 1377 double_rq_unlock(src_rq, dst_rq); 1378 raw_spin_unlock(&arg->dst_task->pi_lock); 1379 raw_spin_unlock(&arg->src_task->pi_lock); 1380 1381 return ret; 1382 } 1383 1384 /* 1385 * Cross migrate two tasks 1386 */ 1387 int migrate_swap(struct task_struct *cur, struct task_struct *p) 1388 { 1389 struct migration_swap_arg arg; 1390 int ret = -EINVAL; 1391 1392 arg = (struct migration_swap_arg){ 1393 .src_task = cur, 1394 .src_cpu = task_cpu(cur), 1395 .dst_task = p, 1396 .dst_cpu = task_cpu(p), 1397 }; 1398 1399 if (arg.src_cpu == arg.dst_cpu) 1400 goto out; 1401 1402 /* 1403 * These three tests are all lockless; this is OK since all of them 1404 * will be re-checked with proper locks held further down the line. 1405 */ 1406 if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu)) 1407 goto out; 1408 1409 if (!cpumask_test_cpu(arg.dst_cpu, tsk_cpus_allowed(arg.src_task))) 1410 goto out; 1411 1412 if (!cpumask_test_cpu(arg.src_cpu, tsk_cpus_allowed(arg.dst_task))) 1413 goto out; 1414 1415 trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu); 1416 ret = stop_two_cpus(arg.dst_cpu, arg.src_cpu, migrate_swap_stop, &arg); 1417 1418 out: 1419 return ret; 1420 } 1421 1422 /* 1423 * wait_task_inactive - wait for a thread to unschedule. 1424 * 1425 * If @match_state is nonzero, it's the @p->state value just checked and 1426 * not expected to change. If it changes, i.e. @p might have woken up, 1427 * then return zero. When we succeed in waiting for @p to be off its CPU, 1428 * we return a positive number (its total switch count). If a second call 1429 * a short while later returns the same number, the caller can be sure that 1430 * @p has remained unscheduled the whole time. 1431 * 1432 * The caller must ensure that the task *will* unschedule sometime soon, 1433 * else this function might spin for a *long* time. This function can't 1434 * be called with interrupts off, or it may introduce deadlock with 1435 * smp_call_function() if an IPI is sent by the same process we are 1436 * waiting to become inactive. 1437 */ 1438 unsigned long wait_task_inactive(struct task_struct *p, long match_state) 1439 { 1440 unsigned long flags; 1441 int running, queued; 1442 unsigned long ncsw; 1443 struct rq *rq; 1444 1445 for (;;) { 1446 /* 1447 * We do the initial early heuristics without holding 1448 * any task-queue locks at all. We'll only try to get 1449 * the runqueue lock when things look like they will 1450 * work out! 1451 */ 1452 rq = task_rq(p); 1453 1454 /* 1455 * If the task is actively running on another CPU 1456 * still, just relax and busy-wait without holding 1457 * any locks. 1458 * 1459 * NOTE! Since we don't hold any locks, it's not 1460 * even sure that "rq" stays as the right runqueue! 1461 * But we don't care, since "task_running()" will 1462 * return false if the runqueue has changed and p 1463 * is actually now running somewhere else! 1464 */ 1465 while (task_running(rq, p)) { 1466 if (match_state && unlikely(p->state != match_state)) 1467 return 0; 1468 cpu_relax(); 1469 } 1470 1471 /* 1472 * Ok, time to look more closely! We need the rq 1473 * lock now, to be *sure*. If we're wrong, we'll 1474 * just go back and repeat. 1475 */ 1476 rq = task_rq_lock(p, &flags); 1477 trace_sched_wait_task(p); 1478 running = task_running(rq, p); 1479 queued = task_on_rq_queued(p); 1480 ncsw = 0; 1481 if (!match_state || p->state == match_state) 1482 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ 1483 task_rq_unlock(rq, p, &flags); 1484 1485 /* 1486 * If it changed from the expected state, bail out now. 1487 */ 1488 if (unlikely(!ncsw)) 1489 break; 1490 1491 /* 1492 * Was it really running after all now that we 1493 * checked with the proper locks actually held? 1494 * 1495 * Oops. Go back and try again.. 1496 */ 1497 if (unlikely(running)) { 1498 cpu_relax(); 1499 continue; 1500 } 1501 1502 /* 1503 * It's not enough that it's not actively running, 1504 * it must be off the runqueue _entirely_, and not 1505 * preempted! 1506 * 1507 * So if it was still runnable (but just not actively 1508 * running right now), it's preempted, and we should 1509 * yield - it could be a while. 1510 */ 1511 if (unlikely(queued)) { 1512 ktime_t to = ktime_set(0, NSEC_PER_SEC/HZ); 1513 1514 set_current_state(TASK_UNINTERRUPTIBLE); 1515 schedule_hrtimeout(&to, HRTIMER_MODE_REL); 1516 continue; 1517 } 1518 1519 /* 1520 * Ahh, all good. It wasn't running, and it wasn't 1521 * runnable, which means that it will never become 1522 * running in the future either. We're all done! 1523 */ 1524 break; 1525 } 1526 1527 return ncsw; 1528 } 1529 1530 /*** 1531 * kick_process - kick a running thread to enter/exit the kernel 1532 * @p: the to-be-kicked thread 1533 * 1534 * Cause a process which is running on another CPU to enter 1535 * kernel-mode, without any delay. (to get signals handled.) 1536 * 1537 * NOTE: this function doesn't have to take the runqueue lock, 1538 * because all it wants to ensure is that the remote task enters 1539 * the kernel. If the IPI races and the task has been migrated 1540 * to another CPU then no harm is done and the purpose has been 1541 * achieved as well. 1542 */ 1543 void kick_process(struct task_struct *p) 1544 { 1545 int cpu; 1546 1547 preempt_disable(); 1548 cpu = task_cpu(p); 1549 if ((cpu != smp_processor_id()) && task_curr(p)) 1550 smp_send_reschedule(cpu); 1551 preempt_enable(); 1552 } 1553 EXPORT_SYMBOL_GPL(kick_process); 1554 1555 /* 1556 * ->cpus_allowed is protected by both rq->lock and p->pi_lock 1557 */ 1558 static int select_fallback_rq(int cpu, struct task_struct *p) 1559 { 1560 int nid = cpu_to_node(cpu); 1561 const struct cpumask *nodemask = NULL; 1562 enum { cpuset, possible, fail } state = cpuset; 1563 int dest_cpu; 1564 1565 /* 1566 * If the node that the cpu is on has been offlined, cpu_to_node() 1567 * will return -1. There is no cpu on the node, and we should 1568 * select the cpu on the other node. 1569 */ 1570 if (nid != -1) { 1571 nodemask = cpumask_of_node(nid); 1572 1573 /* Look for allowed, online CPU in same node. */ 1574 for_each_cpu(dest_cpu, nodemask) { 1575 if (!cpu_online(dest_cpu)) 1576 continue; 1577 if (!cpu_active(dest_cpu)) 1578 continue; 1579 if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p))) 1580 return dest_cpu; 1581 } 1582 } 1583 1584 for (;;) { 1585 /* Any allowed, online CPU? */ 1586 for_each_cpu(dest_cpu, tsk_cpus_allowed(p)) { 1587 if (!cpu_online(dest_cpu)) 1588 continue; 1589 if (!cpu_active(dest_cpu)) 1590 continue; 1591 goto out; 1592 } 1593 1594 /* No more Mr. Nice Guy. */ 1595 switch (state) { 1596 case cpuset: 1597 if (IS_ENABLED(CONFIG_CPUSETS)) { 1598 cpuset_cpus_allowed_fallback(p); 1599 state = possible; 1600 break; 1601 } 1602 /* fall-through */ 1603 case possible: 1604 do_set_cpus_allowed(p, cpu_possible_mask); 1605 state = fail; 1606 break; 1607 1608 case fail: 1609 BUG(); 1610 break; 1611 } 1612 } 1613 1614 out: 1615 if (state != cpuset) { 1616 /* 1617 * Don't tell them about moving exiting tasks or 1618 * kernel threads (both mm NULL), since they never 1619 * leave kernel. 1620 */ 1621 if (p->mm && printk_ratelimit()) { 1622 printk_deferred("process %d (%s) no longer affine to cpu%d\n", 1623 task_pid_nr(p), p->comm, cpu); 1624 } 1625 } 1626 1627 return dest_cpu; 1628 } 1629 1630 /* 1631 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable. 1632 */ 1633 static inline 1634 int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags) 1635 { 1636 lockdep_assert_held(&p->pi_lock); 1637 1638 if (p->nr_cpus_allowed > 1) 1639 cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags); 1640 1641 /* 1642 * In order not to call set_task_cpu() on a blocking task we need 1643 * to rely on ttwu() to place the task on a valid ->cpus_allowed 1644 * cpu. 1645 * 1646 * Since this is common to all placement strategies, this lives here. 1647 * 1648 * [ this allows ->select_task() to simply return task_cpu(p) and 1649 * not worry about this generic constraint ] 1650 */ 1651 if (unlikely(!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) || 1652 !cpu_online(cpu))) 1653 cpu = select_fallback_rq(task_cpu(p), p); 1654 1655 return cpu; 1656 } 1657 1658 static void update_avg(u64 *avg, u64 sample) 1659 { 1660 s64 diff = sample - *avg; 1661 *avg += diff >> 3; 1662 } 1663 1664 #else 1665 1666 static inline int __set_cpus_allowed_ptr(struct task_struct *p, 1667 const struct cpumask *new_mask, bool check) 1668 { 1669 return set_cpus_allowed_ptr(p, new_mask); 1670 } 1671 1672 #endif /* CONFIG_SMP */ 1673 1674 static void 1675 ttwu_stat(struct task_struct *p, int cpu, int wake_flags) 1676 { 1677 #ifdef CONFIG_SCHEDSTATS 1678 struct rq *rq = this_rq(); 1679 1680 #ifdef CONFIG_SMP 1681 int this_cpu = smp_processor_id(); 1682 1683 if (cpu == this_cpu) { 1684 schedstat_inc(rq, ttwu_local); 1685 schedstat_inc(p, se.statistics.nr_wakeups_local); 1686 } else { 1687 struct sched_domain *sd; 1688 1689 schedstat_inc(p, se.statistics.nr_wakeups_remote); 1690 rcu_read_lock(); 1691 for_each_domain(this_cpu, sd) { 1692 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) { 1693 schedstat_inc(sd, ttwu_wake_remote); 1694 break; 1695 } 1696 } 1697 rcu_read_unlock(); 1698 } 1699 1700 if (wake_flags & WF_MIGRATED) 1701 schedstat_inc(p, se.statistics.nr_wakeups_migrate); 1702 1703 #endif /* CONFIG_SMP */ 1704 1705 schedstat_inc(rq, ttwu_count); 1706 schedstat_inc(p, se.statistics.nr_wakeups); 1707 1708 if (wake_flags & WF_SYNC) 1709 schedstat_inc(p, se.statistics.nr_wakeups_sync); 1710 1711 #endif /* CONFIG_SCHEDSTATS */ 1712 } 1713 1714 static inline void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags) 1715 { 1716 activate_task(rq, p, en_flags); 1717 p->on_rq = TASK_ON_RQ_QUEUED; 1718 1719 /* if a worker is waking up, notify workqueue */ 1720 if (p->flags & PF_WQ_WORKER) 1721 wq_worker_waking_up(p, cpu_of(rq)); 1722 } 1723 1724 /* 1725 * Mark the task runnable and perform wakeup-preemption. 1726 */ 1727 static void 1728 ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags) 1729 { 1730 check_preempt_curr(rq, p, wake_flags); 1731 p->state = TASK_RUNNING; 1732 trace_sched_wakeup(p); 1733 1734 #ifdef CONFIG_SMP 1735 if (p->sched_class->task_woken) { 1736 /* 1737 * Our task @p is fully woken up and running; so its safe to 1738 * drop the rq->lock, hereafter rq is only used for statistics. 1739 */ 1740 lockdep_unpin_lock(&rq->lock); 1741 p->sched_class->task_woken(rq, p); 1742 lockdep_pin_lock(&rq->lock); 1743 } 1744 1745 if (rq->idle_stamp) { 1746 u64 delta = rq_clock(rq) - rq->idle_stamp; 1747 u64 max = 2*rq->max_idle_balance_cost; 1748 1749 update_avg(&rq->avg_idle, delta); 1750 1751 if (rq->avg_idle > max) 1752 rq->avg_idle = max; 1753 1754 rq->idle_stamp = 0; 1755 } 1756 #endif 1757 } 1758 1759 static void 1760 ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags) 1761 { 1762 lockdep_assert_held(&rq->lock); 1763 1764 #ifdef CONFIG_SMP 1765 if (p->sched_contributes_to_load) 1766 rq->nr_uninterruptible--; 1767 #endif 1768 1769 ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_WAKING); 1770 ttwu_do_wakeup(rq, p, wake_flags); 1771 } 1772 1773 /* 1774 * Called in case the task @p isn't fully descheduled from its runqueue, 1775 * in this case we must do a remote wakeup. Its a 'light' wakeup though, 1776 * since all we need to do is flip p->state to TASK_RUNNING, since 1777 * the task is still ->on_rq. 1778 */ 1779 static int ttwu_remote(struct task_struct *p, int wake_flags) 1780 { 1781 struct rq *rq; 1782 int ret = 0; 1783 1784 rq = __task_rq_lock(p); 1785 if (task_on_rq_queued(p)) { 1786 /* check_preempt_curr() may use rq clock */ 1787 update_rq_clock(rq); 1788 ttwu_do_wakeup(rq, p, wake_flags); 1789 ret = 1; 1790 } 1791 __task_rq_unlock(rq); 1792 1793 return ret; 1794 } 1795 1796 #ifdef CONFIG_SMP 1797 void sched_ttwu_pending(void) 1798 { 1799 struct rq *rq = this_rq(); 1800 struct llist_node *llist = llist_del_all(&rq->wake_list); 1801 struct task_struct *p; 1802 unsigned long flags; 1803 1804 if (!llist) 1805 return; 1806 1807 raw_spin_lock_irqsave(&rq->lock, flags); 1808 lockdep_pin_lock(&rq->lock); 1809 1810 while (llist) { 1811 p = llist_entry(llist, struct task_struct, wake_entry); 1812 llist = llist_next(llist); 1813 ttwu_do_activate(rq, p, 0); 1814 } 1815 1816 lockdep_unpin_lock(&rq->lock); 1817 raw_spin_unlock_irqrestore(&rq->lock, flags); 1818 } 1819 1820 void scheduler_ipi(void) 1821 { 1822 /* 1823 * Fold TIF_NEED_RESCHED into the preempt_count; anybody setting 1824 * TIF_NEED_RESCHED remotely (for the first time) will also send 1825 * this IPI. 1826 */ 1827 preempt_fold_need_resched(); 1828 1829 if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick()) 1830 return; 1831 1832 /* 1833 * Not all reschedule IPI handlers call irq_enter/irq_exit, since 1834 * traditionally all their work was done from the interrupt return 1835 * path. Now that we actually do some work, we need to make sure 1836 * we do call them. 1837 * 1838 * Some archs already do call them, luckily irq_enter/exit nest 1839 * properly. 1840 * 1841 * Arguably we should visit all archs and update all handlers, 1842 * however a fair share of IPIs are still resched only so this would 1843 * somewhat pessimize the simple resched case. 1844 */ 1845 irq_enter(); 1846 sched_ttwu_pending(); 1847 1848 /* 1849 * Check if someone kicked us for doing the nohz idle load balance. 1850 */ 1851 if (unlikely(got_nohz_idle_kick())) { 1852 this_rq()->idle_balance = 1; 1853 raise_softirq_irqoff(SCHED_SOFTIRQ); 1854 } 1855 irq_exit(); 1856 } 1857 1858 static void ttwu_queue_remote(struct task_struct *p, int cpu) 1859 { 1860 struct rq *rq = cpu_rq(cpu); 1861 1862 if (llist_add(&p->wake_entry, &cpu_rq(cpu)->wake_list)) { 1863 if (!set_nr_if_polling(rq->idle)) 1864 smp_send_reschedule(cpu); 1865 else 1866 trace_sched_wake_idle_without_ipi(cpu); 1867 } 1868 } 1869 1870 void wake_up_if_idle(int cpu) 1871 { 1872 struct rq *rq = cpu_rq(cpu); 1873 unsigned long flags; 1874 1875 rcu_read_lock(); 1876 1877 if (!is_idle_task(rcu_dereference(rq->curr))) 1878 goto out; 1879 1880 if (set_nr_if_polling(rq->idle)) { 1881 trace_sched_wake_idle_without_ipi(cpu); 1882 } else { 1883 raw_spin_lock_irqsave(&rq->lock, flags); 1884 if (is_idle_task(rq->curr)) 1885 smp_send_reschedule(cpu); 1886 /* Else cpu is not in idle, do nothing here */ 1887 raw_spin_unlock_irqrestore(&rq->lock, flags); 1888 } 1889 1890 out: 1891 rcu_read_unlock(); 1892 } 1893 1894 bool cpus_share_cache(int this_cpu, int that_cpu) 1895 { 1896 return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu); 1897 } 1898 #endif /* CONFIG_SMP */ 1899 1900 static void ttwu_queue(struct task_struct *p, int cpu) 1901 { 1902 struct rq *rq = cpu_rq(cpu); 1903 1904 #if defined(CONFIG_SMP) 1905 if (sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) { 1906 sched_clock_cpu(cpu); /* sync clocks x-cpu */ 1907 ttwu_queue_remote(p, cpu); 1908 return; 1909 } 1910 #endif 1911 1912 raw_spin_lock(&rq->lock); 1913 lockdep_pin_lock(&rq->lock); 1914 ttwu_do_activate(rq, p, 0); 1915 lockdep_unpin_lock(&rq->lock); 1916 raw_spin_unlock(&rq->lock); 1917 } 1918 1919 /* 1920 * Notes on Program-Order guarantees on SMP systems. 1921 * 1922 * MIGRATION 1923 * 1924 * The basic program-order guarantee on SMP systems is that when a task [t] 1925 * migrates, all its activity on its old cpu [c0] happens-before any subsequent 1926 * execution on its new cpu [c1]. 1927 * 1928 * For migration (of runnable tasks) this is provided by the following means: 1929 * 1930 * A) UNLOCK of the rq(c0)->lock scheduling out task t 1931 * B) migration for t is required to synchronize *both* rq(c0)->lock and 1932 * rq(c1)->lock (if not at the same time, then in that order). 1933 * C) LOCK of the rq(c1)->lock scheduling in task 1934 * 1935 * Transitivity guarantees that B happens after A and C after B. 1936 * Note: we only require RCpc transitivity. 1937 * Note: the cpu doing B need not be c0 or c1 1938 * 1939 * Example: 1940 * 1941 * CPU0 CPU1 CPU2 1942 * 1943 * LOCK rq(0)->lock 1944 * sched-out X 1945 * sched-in Y 1946 * UNLOCK rq(0)->lock 1947 * 1948 * LOCK rq(0)->lock // orders against CPU0 1949 * dequeue X 1950 * UNLOCK rq(0)->lock 1951 * 1952 * LOCK rq(1)->lock 1953 * enqueue X 1954 * UNLOCK rq(1)->lock 1955 * 1956 * LOCK rq(1)->lock // orders against CPU2 1957 * sched-out Z 1958 * sched-in X 1959 * UNLOCK rq(1)->lock 1960 * 1961 * 1962 * BLOCKING -- aka. SLEEP + WAKEUP 1963 * 1964 * For blocking we (obviously) need to provide the same guarantee as for 1965 * migration. However the means are completely different as there is no lock 1966 * chain to provide order. Instead we do: 1967 * 1968 * 1) smp_store_release(X->on_cpu, 0) 1969 * 2) smp_cond_acquire(!X->on_cpu) 1970 * 1971 * Example: 1972 * 1973 * CPU0 (schedule) CPU1 (try_to_wake_up) CPU2 (schedule) 1974 * 1975 * LOCK rq(0)->lock LOCK X->pi_lock 1976 * dequeue X 1977 * sched-out X 1978 * smp_store_release(X->on_cpu, 0); 1979 * 1980 * smp_cond_acquire(!X->on_cpu); 1981 * X->state = WAKING 1982 * set_task_cpu(X,2) 1983 * 1984 * LOCK rq(2)->lock 1985 * enqueue X 1986 * X->state = RUNNING 1987 * UNLOCK rq(2)->lock 1988 * 1989 * LOCK rq(2)->lock // orders against CPU1 1990 * sched-out Z 1991 * sched-in X 1992 * UNLOCK rq(2)->lock 1993 * 1994 * UNLOCK X->pi_lock 1995 * UNLOCK rq(0)->lock 1996 * 1997 * 1998 * However; for wakeups there is a second guarantee we must provide, namely we 1999 * must observe the state that lead to our wakeup. That is, not only must our 2000 * task observe its own prior state, it must also observe the stores prior to 2001 * its wakeup. 2002 * 2003 * This means that any means of doing remote wakeups must order the CPU doing 2004 * the wakeup against the CPU the task is going to end up running on. This, 2005 * however, is already required for the regular Program-Order guarantee above, 2006 * since the waking CPU is the one issueing the ACQUIRE (smp_cond_acquire). 2007 * 2008 */ 2009 2010 /** 2011 * try_to_wake_up - wake up a thread 2012 * @p: the thread to be awakened 2013 * @state: the mask of task states that can be woken 2014 * @wake_flags: wake modifier flags (WF_*) 2015 * 2016 * Put it on the run-queue if it's not already there. The "current" 2017 * thread is always on the run-queue (except when the actual 2018 * re-schedule is in progress), and as such you're allowed to do 2019 * the simpler "current->state = TASK_RUNNING" to mark yourself 2020 * runnable without the overhead of this. 2021 * 2022 * Return: %true if @p was woken up, %false if it was already running. 2023 * or @state didn't match @p's state. 2024 */ 2025 static int 2026 try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) 2027 { 2028 unsigned long flags; 2029 int cpu, success = 0; 2030 2031 /* 2032 * If we are going to wake up a thread waiting for CONDITION we 2033 * need to ensure that CONDITION=1 done by the caller can not be 2034 * reordered with p->state check below. This pairs with mb() in 2035 * set_current_state() the waiting thread does. 2036 */ 2037 smp_mb__before_spinlock(); 2038 raw_spin_lock_irqsave(&p->pi_lock, flags); 2039 if (!(p->state & state)) 2040 goto out; 2041 2042 trace_sched_waking(p); 2043 2044 success = 1; /* we're going to change ->state */ 2045 cpu = task_cpu(p); 2046 2047 if (p->on_rq && ttwu_remote(p, wake_flags)) 2048 goto stat; 2049 2050 #ifdef CONFIG_SMP 2051 /* 2052 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be 2053 * possible to, falsely, observe p->on_cpu == 0. 2054 * 2055 * One must be running (->on_cpu == 1) in order to remove oneself 2056 * from the runqueue. 2057 * 2058 * [S] ->on_cpu = 1; [L] ->on_rq 2059 * UNLOCK rq->lock 2060 * RMB 2061 * LOCK rq->lock 2062 * [S] ->on_rq = 0; [L] ->on_cpu 2063 * 2064 * Pairs with the full barrier implied in the UNLOCK+LOCK on rq->lock 2065 * from the consecutive calls to schedule(); the first switching to our 2066 * task, the second putting it to sleep. 2067 */ 2068 smp_rmb(); 2069 2070 /* 2071 * If the owning (remote) cpu is still in the middle of schedule() with 2072 * this task as prev, wait until its done referencing the task. 2073 * 2074 * Pairs with the smp_store_release() in finish_lock_switch(). 2075 * 2076 * This ensures that tasks getting woken will be fully ordered against 2077 * their previous state and preserve Program Order. 2078 */ 2079 smp_cond_acquire(!p->on_cpu); 2080 2081 p->sched_contributes_to_load = !!task_contributes_to_load(p); 2082 p->state = TASK_WAKING; 2083 2084 if (p->sched_class->task_waking) 2085 p->sched_class->task_waking(p); 2086 2087 cpu = select_task_rq(p, p->wake_cpu, SD_BALANCE_WAKE, wake_flags); 2088 if (task_cpu(p) != cpu) { 2089 wake_flags |= WF_MIGRATED; 2090 set_task_cpu(p, cpu); 2091 } 2092 #endif /* CONFIG_SMP */ 2093 2094 ttwu_queue(p, cpu); 2095 stat: 2096 ttwu_stat(p, cpu, wake_flags); 2097 out: 2098 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 2099 2100 return success; 2101 } 2102 2103 /** 2104 * try_to_wake_up_local - try to wake up a local task with rq lock held 2105 * @p: the thread to be awakened 2106 * 2107 * Put @p on the run-queue if it's not already there. The caller must 2108 * ensure that this_rq() is locked, @p is bound to this_rq() and not 2109 * the current task. 2110 */ 2111 static void try_to_wake_up_local(struct task_struct *p) 2112 { 2113 struct rq *rq = task_rq(p); 2114 2115 if (WARN_ON_ONCE(rq != this_rq()) || 2116 WARN_ON_ONCE(p == current)) 2117 return; 2118 2119 lockdep_assert_held(&rq->lock); 2120 2121 if (!raw_spin_trylock(&p->pi_lock)) { 2122 /* 2123 * This is OK, because current is on_cpu, which avoids it being 2124 * picked for load-balance and preemption/IRQs are still 2125 * disabled avoiding further scheduler activity on it and we've 2126 * not yet picked a replacement task. 2127 */ 2128 lockdep_unpin_lock(&rq->lock); 2129 raw_spin_unlock(&rq->lock); 2130 raw_spin_lock(&p->pi_lock); 2131 raw_spin_lock(&rq->lock); 2132 lockdep_pin_lock(&rq->lock); 2133 } 2134 2135 if (!(p->state & TASK_NORMAL)) 2136 goto out; 2137 2138 trace_sched_waking(p); 2139 2140 if (!task_on_rq_queued(p)) 2141 ttwu_activate(rq, p, ENQUEUE_WAKEUP); 2142 2143 ttwu_do_wakeup(rq, p, 0); 2144 ttwu_stat(p, smp_processor_id(), 0); 2145 out: 2146 raw_spin_unlock(&p->pi_lock); 2147 } 2148 2149 /** 2150 * wake_up_process - Wake up a specific process 2151 * @p: The process to be woken up. 2152 * 2153 * Attempt to wake up the nominated process and move it to the set of runnable 2154 * processes. 2155 * 2156 * Return: 1 if the process was woken up, 0 if it was already running. 2157 * 2158 * It may be assumed that this function implies a write memory barrier before 2159 * changing the task state if and only if any tasks are woken up. 2160 */ 2161 int wake_up_process(struct task_struct *p) 2162 { 2163 return try_to_wake_up(p, TASK_NORMAL, 0); 2164 } 2165 EXPORT_SYMBOL(wake_up_process); 2166 2167 int wake_up_state(struct task_struct *p, unsigned int state) 2168 { 2169 return try_to_wake_up(p, state, 0); 2170 } 2171 2172 /* 2173 * This function clears the sched_dl_entity static params. 2174 */ 2175 void __dl_clear_params(struct task_struct *p) 2176 { 2177 struct sched_dl_entity *dl_se = &p->dl; 2178 2179 dl_se->dl_runtime = 0; 2180 dl_se->dl_deadline = 0; 2181 dl_se->dl_period = 0; 2182 dl_se->flags = 0; 2183 dl_se->dl_bw = 0; 2184 2185 dl_se->dl_throttled = 0; 2186 dl_se->dl_new = 1; 2187 dl_se->dl_yielded = 0; 2188 } 2189 2190 /* 2191 * Perform scheduler related setup for a newly forked process p. 2192 * p is forked by current. 2193 * 2194 * __sched_fork() is basic setup used by init_idle() too: 2195 */ 2196 static void __sched_fork(unsigned long clone_flags, struct task_struct *p) 2197 { 2198 p->on_rq = 0; 2199 2200 p->se.on_rq = 0; 2201 p->se.exec_start = 0; 2202 p->se.sum_exec_runtime = 0; 2203 p->se.prev_sum_exec_runtime = 0; 2204 p->se.nr_migrations = 0; 2205 p->se.vruntime = 0; 2206 INIT_LIST_HEAD(&p->se.group_node); 2207 2208 #ifdef CONFIG_FAIR_GROUP_SCHED 2209 p->se.cfs_rq = NULL; 2210 #endif 2211 2212 #ifdef CONFIG_SCHEDSTATS 2213 memset(&p->se.statistics, 0, sizeof(p->se.statistics)); 2214 #endif 2215 2216 RB_CLEAR_NODE(&p->dl.rb_node); 2217 init_dl_task_timer(&p->dl); 2218 __dl_clear_params(p); 2219 2220 INIT_LIST_HEAD(&p->rt.run_list); 2221 2222 #ifdef CONFIG_PREEMPT_NOTIFIERS 2223 INIT_HLIST_HEAD(&p->preempt_notifiers); 2224 #endif 2225 2226 #ifdef CONFIG_NUMA_BALANCING 2227 if (p->mm && atomic_read(&p->mm->mm_users) == 1) { 2228 p->mm->numa_next_scan = jiffies + msecs_to_jiffies(sysctl_numa_balancing_scan_delay); 2229 p->mm->numa_scan_seq = 0; 2230 } 2231 2232 if (clone_flags & CLONE_VM) 2233 p->numa_preferred_nid = current->numa_preferred_nid; 2234 else 2235 p->numa_preferred_nid = -1; 2236 2237 p->node_stamp = 0ULL; 2238 p->numa_scan_seq = p->mm ? p->mm->numa_scan_seq : 0; 2239 p->numa_scan_period = sysctl_numa_balancing_scan_delay; 2240 p->numa_work.next = &p->numa_work; 2241 p->numa_faults = NULL; 2242 p->last_task_numa_placement = 0; 2243 p->last_sum_exec_runtime = 0; 2244 2245 p->numa_group = NULL; 2246 #endif /* CONFIG_NUMA_BALANCING */ 2247 } 2248 2249 DEFINE_STATIC_KEY_FALSE(sched_numa_balancing); 2250 2251 #ifdef CONFIG_NUMA_BALANCING 2252 2253 void set_numabalancing_state(bool enabled) 2254 { 2255 if (enabled) 2256 static_branch_enable(&sched_numa_balancing); 2257 else 2258 static_branch_disable(&sched_numa_balancing); 2259 } 2260 2261 #ifdef CONFIG_PROC_SYSCTL 2262 int sysctl_numa_balancing(struct ctl_table *table, int write, 2263 void __user *buffer, size_t *lenp, loff_t *ppos) 2264 { 2265 struct ctl_table t; 2266 int err; 2267 int state = static_branch_likely(&sched_numa_balancing); 2268 2269 if (write && !capable(CAP_SYS_ADMIN)) 2270 return -EPERM; 2271 2272 t = *table; 2273 t.data = &state; 2274 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); 2275 if (err < 0) 2276 return err; 2277 if (write) 2278 set_numabalancing_state(state); 2279 return err; 2280 } 2281 #endif 2282 #endif 2283 2284 /* 2285 * fork()/clone()-time setup: 2286 */ 2287 int sched_fork(unsigned long clone_flags, struct task_struct *p) 2288 { 2289 unsigned long flags; 2290 int cpu = get_cpu(); 2291 2292 __sched_fork(clone_flags, p); 2293 /* 2294 * We mark the process as running here. This guarantees that 2295 * nobody will actually run it, and a signal or other external 2296 * event cannot wake it up and insert it on the runqueue either. 2297 */ 2298 p->state = TASK_RUNNING; 2299 2300 /* 2301 * Make sure we do not leak PI boosting priority to the child. 2302 */ 2303 p->prio = current->normal_prio; 2304 2305 /* 2306 * Revert to default priority/policy on fork if requested. 2307 */ 2308 if (unlikely(p->sched_reset_on_fork)) { 2309 if (task_has_dl_policy(p) || task_has_rt_policy(p)) { 2310 p->policy = SCHED_NORMAL; 2311 p->static_prio = NICE_TO_PRIO(0); 2312 p->rt_priority = 0; 2313 } else if (PRIO_TO_NICE(p->static_prio) < 0) 2314 p->static_prio = NICE_TO_PRIO(0); 2315 2316 p->prio = p->normal_prio = __normal_prio(p); 2317 set_load_weight(p); 2318 2319 /* 2320 * We don't need the reset flag anymore after the fork. It has 2321 * fulfilled its duty: 2322 */ 2323 p->sched_reset_on_fork = 0; 2324 } 2325 2326 if (dl_prio(p->prio)) { 2327 put_cpu(); 2328 return -EAGAIN; 2329 } else if (rt_prio(p->prio)) { 2330 p->sched_class = &rt_sched_class; 2331 } else { 2332 p->sched_class = &fair_sched_class; 2333 } 2334 2335 if (p->sched_class->task_fork) 2336 p->sched_class->task_fork(p); 2337 2338 /* 2339 * The child is not yet in the pid-hash so no cgroup attach races, 2340 * and the cgroup is pinned to this child due to cgroup_fork() 2341 * is ran before sched_fork(). 2342 * 2343 * Silence PROVE_RCU. 2344 */ 2345 raw_spin_lock_irqsave(&p->pi_lock, flags); 2346 set_task_cpu(p, cpu); 2347 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 2348 2349 #ifdef CONFIG_SCHED_INFO 2350 if (likely(sched_info_on())) 2351 memset(&p->sched_info, 0, sizeof(p->sched_info)); 2352 #endif 2353 #if defined(CONFIG_SMP) 2354 p->on_cpu = 0; 2355 #endif 2356 init_task_preempt_count(p); 2357 #ifdef CONFIG_SMP 2358 plist_node_init(&p->pushable_tasks, MAX_PRIO); 2359 RB_CLEAR_NODE(&p->pushable_dl_tasks); 2360 #endif 2361 2362 put_cpu(); 2363 return 0; 2364 } 2365 2366 unsigned long to_ratio(u64 period, u64 runtime) 2367 { 2368 if (runtime == RUNTIME_INF) 2369 return 1ULL << 20; 2370 2371 /* 2372 * Doing this here saves a lot of checks in all 2373 * the calling paths, and returning zero seems 2374 * safe for them anyway. 2375 */ 2376 if (period == 0) 2377 return 0; 2378 2379 return div64_u64(runtime << 20, period); 2380 } 2381 2382 #ifdef CONFIG_SMP 2383 inline struct dl_bw *dl_bw_of(int i) 2384 { 2385 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(), 2386 "sched RCU must be held"); 2387 return &cpu_rq(i)->rd->dl_bw; 2388 } 2389 2390 static inline int dl_bw_cpus(int i) 2391 { 2392 struct root_domain *rd = cpu_rq(i)->rd; 2393 int cpus = 0; 2394 2395 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(), 2396 "sched RCU must be held"); 2397 for_each_cpu_and(i, rd->span, cpu_active_mask) 2398 cpus++; 2399 2400 return cpus; 2401 } 2402 #else 2403 inline struct dl_bw *dl_bw_of(int i) 2404 { 2405 return &cpu_rq(i)->dl.dl_bw; 2406 } 2407 2408 static inline int dl_bw_cpus(int i) 2409 { 2410 return 1; 2411 } 2412 #endif 2413 2414 /* 2415 * We must be sure that accepting a new task (or allowing changing the 2416 * parameters of an existing one) is consistent with the bandwidth 2417 * constraints. If yes, this function also accordingly updates the currently 2418 * allocated bandwidth to reflect the new situation. 2419 * 2420 * This function is called while holding p's rq->lock. 2421 * 2422 * XXX we should delay bw change until the task's 0-lag point, see 2423 * __setparam_dl(). 2424 */ 2425 static int dl_overflow(struct task_struct *p, int policy, 2426 const struct sched_attr *attr) 2427 { 2428 2429 struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); 2430 u64 period = attr->sched_period ?: attr->sched_deadline; 2431 u64 runtime = attr->sched_runtime; 2432 u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0; 2433 int cpus, err = -1; 2434 2435 if (new_bw == p->dl.dl_bw) 2436 return 0; 2437 2438 /* 2439 * Either if a task, enters, leave, or stays -deadline but changes 2440 * its parameters, we may need to update accordingly the total 2441 * allocated bandwidth of the container. 2442 */ 2443 raw_spin_lock(&dl_b->lock); 2444 cpus = dl_bw_cpus(task_cpu(p)); 2445 if (dl_policy(policy) && !task_has_dl_policy(p) && 2446 !__dl_overflow(dl_b, cpus, 0, new_bw)) { 2447 __dl_add(dl_b, new_bw); 2448 err = 0; 2449 } else if (dl_policy(policy) && task_has_dl_policy(p) && 2450 !__dl_overflow(dl_b, cpus, p->dl.dl_bw, new_bw)) { 2451 __dl_clear(dl_b, p->dl.dl_bw); 2452 __dl_add(dl_b, new_bw); 2453 err = 0; 2454 } else if (!dl_policy(policy) && task_has_dl_policy(p)) { 2455 __dl_clear(dl_b, p->dl.dl_bw); 2456 err = 0; 2457 } 2458 raw_spin_unlock(&dl_b->lock); 2459 2460 return err; 2461 } 2462 2463 extern void init_dl_bw(struct dl_bw *dl_b); 2464 2465 /* 2466 * wake_up_new_task - wake up a newly created task for the first time. 2467 * 2468 * This function will do some initial scheduler statistics housekeeping 2469 * that must be done for every newly created context, then puts the task 2470 * on the runqueue and wakes it. 2471 */ 2472 void wake_up_new_task(struct task_struct *p) 2473 { 2474 unsigned long flags; 2475 struct rq *rq; 2476 2477 raw_spin_lock_irqsave(&p->pi_lock, flags); 2478 /* Initialize new task's runnable average */ 2479 init_entity_runnable_average(&p->se); 2480 #ifdef CONFIG_SMP 2481 /* 2482 * Fork balancing, do it here and not earlier because: 2483 * - cpus_allowed can change in the fork path 2484 * - any previously selected cpu might disappear through hotplug 2485 */ 2486 set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0)); 2487 #endif 2488 2489 rq = __task_rq_lock(p); 2490 activate_task(rq, p, 0); 2491 p->on_rq = TASK_ON_RQ_QUEUED; 2492 trace_sched_wakeup_new(p); 2493 check_preempt_curr(rq, p, WF_FORK); 2494 #ifdef CONFIG_SMP 2495 if (p->sched_class->task_woken) { 2496 /* 2497 * Nothing relies on rq->lock after this, so its fine to 2498 * drop it. 2499 */ 2500 lockdep_unpin_lock(&rq->lock); 2501 p->sched_class->task_woken(rq, p); 2502 lockdep_pin_lock(&rq->lock); 2503 } 2504 #endif 2505 task_rq_unlock(rq, p, &flags); 2506 } 2507 2508 #ifdef CONFIG_PREEMPT_NOTIFIERS 2509 2510 static struct static_key preempt_notifier_key = STATIC_KEY_INIT_FALSE; 2511 2512 void preempt_notifier_inc(void) 2513 { 2514 static_key_slow_inc(&preempt_notifier_key); 2515 } 2516 EXPORT_SYMBOL_GPL(preempt_notifier_inc); 2517 2518 void preempt_notifier_dec(void) 2519 { 2520 static_key_slow_dec(&preempt_notifier_key); 2521 } 2522 EXPORT_SYMBOL_GPL(preempt_notifier_dec); 2523 2524 /** 2525 * preempt_notifier_register - tell me when current is being preempted & rescheduled 2526 * @notifier: notifier struct to register 2527 */ 2528 void preempt_notifier_register(struct preempt_notifier *notifier) 2529 { 2530 if (!static_key_false(&preempt_notifier_key)) 2531 WARN(1, "registering preempt_notifier while notifiers disabled\n"); 2532 2533 hlist_add_head(¬ifier->link, ¤t->preempt_notifiers); 2534 } 2535 EXPORT_SYMBOL_GPL(preempt_notifier_register); 2536 2537 /** 2538 * preempt_notifier_unregister - no longer interested in preemption notifications 2539 * @notifier: notifier struct to unregister 2540 * 2541 * This is *not* safe to call from within a preemption notifier. 2542 */ 2543 void preempt_notifier_unregister(struct preempt_notifier *notifier) 2544 { 2545 hlist_del(¬ifier->link); 2546 } 2547 EXPORT_SYMBOL_GPL(preempt_notifier_unregister); 2548 2549 static void __fire_sched_in_preempt_notifiers(struct task_struct *curr) 2550 { 2551 struct preempt_notifier *notifier; 2552 2553 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) 2554 notifier->ops->sched_in(notifier, raw_smp_processor_id()); 2555 } 2556 2557 static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr) 2558 { 2559 if (static_key_false(&preempt_notifier_key)) 2560 __fire_sched_in_preempt_notifiers(curr); 2561 } 2562 2563 static void 2564 __fire_sched_out_preempt_notifiers(struct task_struct *curr, 2565 struct task_struct *next) 2566 { 2567 struct preempt_notifier *notifier; 2568 2569 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) 2570 notifier->ops->sched_out(notifier, next); 2571 } 2572 2573 static __always_inline void 2574 fire_sched_out_preempt_notifiers(struct task_struct *curr, 2575 struct task_struct *next) 2576 { 2577 if (static_key_false(&preempt_notifier_key)) 2578 __fire_sched_out_preempt_notifiers(curr, next); 2579 } 2580 2581 #else /* !CONFIG_PREEMPT_NOTIFIERS */ 2582 2583 static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr) 2584 { 2585 } 2586 2587 static inline void 2588 fire_sched_out_preempt_notifiers(struct task_struct *curr, 2589 struct task_struct *next) 2590 { 2591 } 2592 2593 #endif /* CONFIG_PREEMPT_NOTIFIERS */ 2594 2595 /** 2596 * prepare_task_switch - prepare to switch tasks 2597 * @rq: the runqueue preparing to switch 2598 * @prev: the current task that is being switched out 2599 * @next: the task we are going to switch to. 2600 * 2601 * This is called with the rq lock held and interrupts off. It must 2602 * be paired with a subsequent finish_task_switch after the context 2603 * switch. 2604 * 2605 * prepare_task_switch sets up locking and calls architecture specific 2606 * hooks. 2607 */ 2608 static inline void 2609 prepare_task_switch(struct rq *rq, struct task_struct *prev, 2610 struct task_struct *next) 2611 { 2612 sched_info_switch(rq, prev, next); 2613 perf_event_task_sched_out(prev, next); 2614 fire_sched_out_preempt_notifiers(prev, next); 2615 prepare_lock_switch(rq, next); 2616 prepare_arch_switch(next); 2617 } 2618 2619 /** 2620 * finish_task_switch - clean up after a task-switch 2621 * @prev: the thread we just switched away from. 2622 * 2623 * finish_task_switch must be called after the context switch, paired 2624 * with a prepare_task_switch call before the context switch. 2625 * finish_task_switch will reconcile locking set up by prepare_task_switch, 2626 * and do any other architecture-specific cleanup actions. 2627 * 2628 * Note that we may have delayed dropping an mm in context_switch(). If 2629 * so, we finish that here outside of the runqueue lock. (Doing it 2630 * with the lock held can cause deadlocks; see schedule() for 2631 * details.) 2632 * 2633 * The context switch have flipped the stack from under us and restored the 2634 * local variables which were saved when this task called schedule() in the 2635 * past. prev == current is still correct but we need to recalculate this_rq 2636 * because prev may have moved to another CPU. 2637 */ 2638 static struct rq *finish_task_switch(struct task_struct *prev) 2639 __releases(rq->lock) 2640 { 2641 struct rq *rq = this_rq(); 2642 struct mm_struct *mm = rq->prev_mm; 2643 long prev_state; 2644 2645 /* 2646 * The previous task will have left us with a preempt_count of 2 2647 * because it left us after: 2648 * 2649 * schedule() 2650 * preempt_disable(); // 1 2651 * __schedule() 2652 * raw_spin_lock_irq(&rq->lock) // 2 2653 * 2654 * Also, see FORK_PREEMPT_COUNT. 2655 */ 2656 if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET, 2657 "corrupted preempt_count: %s/%d/0x%x\n", 2658 current->comm, current->pid, preempt_count())) 2659 preempt_count_set(FORK_PREEMPT_COUNT); 2660 2661 rq->prev_mm = NULL; 2662 2663 /* 2664 * A task struct has one reference for the use as "current". 2665 * If a task dies, then it sets TASK_DEAD in tsk->state and calls 2666 * schedule one last time. The schedule call will never return, and 2667 * the scheduled task must drop that reference. 2668 * 2669 * We must observe prev->state before clearing prev->on_cpu (in 2670 * finish_lock_switch), otherwise a concurrent wakeup can get prev 2671 * running on another CPU and we could rave with its RUNNING -> DEAD 2672 * transition, resulting in a double drop. 2673 */ 2674 prev_state = prev->state; 2675 vtime_task_switch(prev); 2676 perf_event_task_sched_in(prev, current); 2677 finish_lock_switch(rq, prev); 2678 finish_arch_post_lock_switch(); 2679 2680 fire_sched_in_preempt_notifiers(current); 2681 if (mm) 2682 mmdrop(mm); 2683 if (unlikely(prev_state == TASK_DEAD)) { 2684 if (prev->sched_class->task_dead) 2685 prev->sched_class->task_dead(prev); 2686 2687 /* 2688 * Remove function-return probe instances associated with this 2689 * task and put them back on the free list. 2690 */ 2691 kprobe_flush_task(prev); 2692 put_task_struct(prev); 2693 } 2694 2695 tick_nohz_task_switch(); 2696 return rq; 2697 } 2698 2699 #ifdef CONFIG_SMP 2700 2701 /* rq->lock is NOT held, but preemption is disabled */ 2702 static void __balance_callback(struct rq *rq) 2703 { 2704 struct callback_head *head, *next; 2705 void (*func)(struct rq *rq); 2706 unsigned long flags; 2707 2708 raw_spin_lock_irqsave(&rq->lock, flags); 2709 head = rq->balance_callback; 2710 rq->balance_callback = NULL; 2711 while (head) { 2712 func = (void (*)(struct rq *))head->func; 2713 next = head->next; 2714 head->next = NULL; 2715 head = next; 2716 2717 func(rq); 2718 } 2719 raw_spin_unlock_irqrestore(&rq->lock, flags); 2720 } 2721 2722 static inline void balance_callback(struct rq *rq) 2723 { 2724 if (unlikely(rq->balance_callback)) 2725 __balance_callback(rq); 2726 } 2727 2728 #else 2729 2730 static inline void balance_callback(struct rq *rq) 2731 { 2732 } 2733 2734 #endif 2735 2736 /** 2737 * schedule_tail - first thing a freshly forked thread must call. 2738 * @prev: the thread we just switched away from. 2739 */ 2740 asmlinkage __visible void schedule_tail(struct task_struct *prev) 2741 __releases(rq->lock) 2742 { 2743 struct rq *rq; 2744 2745 /* 2746 * New tasks start with FORK_PREEMPT_COUNT, see there and 2747 * finish_task_switch() for details. 2748 * 2749 * finish_task_switch() will drop rq->lock() and lower preempt_count 2750 * and the preempt_enable() will end up enabling preemption (on 2751 * PREEMPT_COUNT kernels). 2752 */ 2753 2754 rq = finish_task_switch(prev); 2755 balance_callback(rq); 2756 preempt_enable(); 2757 2758 if (current->set_child_tid) 2759 put_user(task_pid_vnr(current), current->set_child_tid); 2760 } 2761 2762 /* 2763 * context_switch - switch to the new MM and the new thread's register state. 2764 */ 2765 static inline struct rq * 2766 context_switch(struct rq *rq, struct task_struct *prev, 2767 struct task_struct *next) 2768 { 2769 struct mm_struct *mm, *oldmm; 2770 2771 prepare_task_switch(rq, prev, next); 2772 2773 mm = next->mm; 2774 oldmm = prev->active_mm; 2775 /* 2776 * For paravirt, this is coupled with an exit in switch_to to 2777 * combine the page table reload and the switch backend into 2778 * one hypercall. 2779 */ 2780 arch_start_context_switch(prev); 2781 2782 if (!mm) { 2783 next->active_mm = oldmm; 2784 atomic_inc(&oldmm->mm_count); 2785 enter_lazy_tlb(oldmm, next); 2786 } else 2787 switch_mm(oldmm, mm, next); 2788 2789 if (!prev->mm) { 2790 prev->active_mm = NULL; 2791 rq->prev_mm = oldmm; 2792 } 2793 /* 2794 * Since the runqueue lock will be released by the next 2795 * task (which is an invalid locking op but in the case 2796 * of the scheduler it's an obvious special-case), so we 2797 * do an early lockdep release here: 2798 */ 2799 lockdep_unpin_lock(&rq->lock); 2800 spin_release(&rq->lock.dep_map, 1, _THIS_IP_); 2801 2802 /* Here we just switch the register state and the stack. */ 2803 switch_to(prev, next, prev); 2804 barrier(); 2805 2806 return finish_task_switch(prev); 2807 } 2808 2809 /* 2810 * nr_running and nr_context_switches: 2811 * 2812 * externally visible scheduler statistics: current number of runnable 2813 * threads, total number of context switches performed since bootup. 2814 */ 2815 unsigned long nr_running(void) 2816 { 2817 unsigned long i, sum = 0; 2818 2819 for_each_online_cpu(i) 2820 sum += cpu_rq(i)->nr_running; 2821 2822 return sum; 2823 } 2824 2825 /* 2826 * Check if only the current task is running on the cpu. 2827 * 2828 * Caution: this function does not check that the caller has disabled 2829 * preemption, thus the result might have a time-of-check-to-time-of-use 2830 * race. The caller is responsible to use it correctly, for example: 2831 * 2832 * - from a non-preemptable section (of course) 2833 * 2834 * - from a thread that is bound to a single CPU 2835 * 2836 * - in a loop with very short iterations (e.g. a polling loop) 2837 */ 2838 bool single_task_running(void) 2839 { 2840 return raw_rq()->nr_running == 1; 2841 } 2842 EXPORT_SYMBOL(single_task_running); 2843 2844 unsigned long long nr_context_switches(void) 2845 { 2846 int i; 2847 unsigned long long sum = 0; 2848 2849 for_each_possible_cpu(i) 2850 sum += cpu_rq(i)->nr_switches; 2851 2852 return sum; 2853 } 2854 2855 unsigned long nr_iowait(void) 2856 { 2857 unsigned long i, sum = 0; 2858 2859 for_each_possible_cpu(i) 2860 sum += atomic_read(&cpu_rq(i)->nr_iowait); 2861 2862 return sum; 2863 } 2864 2865 unsigned long nr_iowait_cpu(int cpu) 2866 { 2867 struct rq *this = cpu_rq(cpu); 2868 return atomic_read(&this->nr_iowait); 2869 } 2870 2871 void get_iowait_load(unsigned long *nr_waiters, unsigned long *load) 2872 { 2873 struct rq *rq = this_rq(); 2874 *nr_waiters = atomic_read(&rq->nr_iowait); 2875 *load = rq->load.weight; 2876 } 2877 2878 #ifdef CONFIG_SMP 2879 2880 /* 2881 * sched_exec - execve() is a valuable balancing opportunity, because at 2882 * this point the task has the smallest effective memory and cache footprint. 2883 */ 2884 void sched_exec(void) 2885 { 2886 struct task_struct *p = current; 2887 unsigned long flags; 2888 int dest_cpu; 2889 2890 raw_spin_lock_irqsave(&p->pi_lock, flags); 2891 dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), SD_BALANCE_EXEC, 0); 2892 if (dest_cpu == smp_processor_id()) 2893 goto unlock; 2894 2895 if (likely(cpu_active(dest_cpu))) { 2896 struct migration_arg arg = { p, dest_cpu }; 2897 2898 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 2899 stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg); 2900 return; 2901 } 2902 unlock: 2903 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 2904 } 2905 2906 #endif 2907 2908 DEFINE_PER_CPU(struct kernel_stat, kstat); 2909 DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat); 2910 2911 EXPORT_PER_CPU_SYMBOL(kstat); 2912 EXPORT_PER_CPU_SYMBOL(kernel_cpustat); 2913 2914 /* 2915 * Return accounted runtime for the task. 2916 * In case the task is currently running, return the runtime plus current's 2917 * pending runtime that have not been accounted yet. 2918 */ 2919 unsigned long long task_sched_runtime(struct task_struct *p) 2920 { 2921 unsigned long flags; 2922 struct rq *rq; 2923 u64 ns; 2924 2925 #if defined(CONFIG_64BIT) && defined(CONFIG_SMP) 2926 /* 2927 * 64-bit doesn't need locks to atomically read a 64bit value. 2928 * So we have a optimization chance when the task's delta_exec is 0. 2929 * Reading ->on_cpu is racy, but this is ok. 2930 * 2931 * If we race with it leaving cpu, we'll take a lock. So we're correct. 2932 * If we race with it entering cpu, unaccounted time is 0. This is 2933 * indistinguishable from the read occurring a few cycles earlier. 2934 * If we see ->on_cpu without ->on_rq, the task is leaving, and has 2935 * been accounted, so we're correct here as well. 2936 */ 2937 if (!p->on_cpu || !task_on_rq_queued(p)) 2938 return p->se.sum_exec_runtime; 2939 #endif 2940 2941 rq = task_rq_lock(p, &flags); 2942 /* 2943 * Must be ->curr _and_ ->on_rq. If dequeued, we would 2944 * project cycles that may never be accounted to this 2945 * thread, breaking clock_gettime(). 2946 */ 2947 if (task_current(rq, p) && task_on_rq_queued(p)) { 2948 update_rq_clock(rq); 2949 p->sched_class->update_curr(rq); 2950 } 2951 ns = p->se.sum_exec_runtime; 2952 task_rq_unlock(rq, p, &flags); 2953 2954 return ns; 2955 } 2956 2957 /* 2958 * This function gets called by the timer code, with HZ frequency. 2959 * We call it with interrupts disabled. 2960 */ 2961 void scheduler_tick(void) 2962 { 2963 int cpu = smp_processor_id(); 2964 struct rq *rq = cpu_rq(cpu); 2965 struct task_struct *curr = rq->curr; 2966 2967 sched_clock_tick(); 2968 2969 raw_spin_lock(&rq->lock); 2970 update_rq_clock(rq); 2971 curr->sched_class->task_tick(rq, curr, 0); 2972 update_cpu_load_active(rq); 2973 calc_global_load_tick(rq); 2974 raw_spin_unlock(&rq->lock); 2975 2976 perf_event_task_tick(); 2977 2978 #ifdef CONFIG_SMP 2979 rq->idle_balance = idle_cpu(cpu); 2980 trigger_load_balance(rq); 2981 #endif 2982 rq_last_tick_reset(rq); 2983 } 2984 2985 #ifdef CONFIG_NO_HZ_FULL 2986 /** 2987 * scheduler_tick_max_deferment 2988 * 2989 * Keep at least one tick per second when a single 2990 * active task is running because the scheduler doesn't 2991 * yet completely support full dynticks environment. 2992 * 2993 * This makes sure that uptime, CFS vruntime, load 2994 * balancing, etc... continue to move forward, even 2995 * with a very low granularity. 2996 * 2997 * Return: Maximum deferment in nanoseconds. 2998 */ 2999 u64 scheduler_tick_max_deferment(void) 3000 { 3001 struct rq *rq = this_rq(); 3002 unsigned long next, now = READ_ONCE(jiffies); 3003 3004 next = rq->last_sched_tick + HZ; 3005 3006 if (time_before_eq(next, now)) 3007 return 0; 3008 3009 return jiffies_to_nsecs(next - now); 3010 } 3011 #endif 3012 3013 notrace unsigned long get_parent_ip(unsigned long addr) 3014 { 3015 if (in_lock_functions(addr)) { 3016 addr = CALLER_ADDR2; 3017 if (in_lock_functions(addr)) 3018 addr = CALLER_ADDR3; 3019 } 3020 return addr; 3021 } 3022 3023 #if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \ 3024 defined(CONFIG_PREEMPT_TRACER)) 3025 3026 void preempt_count_add(int val) 3027 { 3028 #ifdef CONFIG_DEBUG_PREEMPT 3029 /* 3030 * Underflow? 3031 */ 3032 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0))) 3033 return; 3034 #endif 3035 __preempt_count_add(val); 3036 #ifdef CONFIG_DEBUG_PREEMPT 3037 /* 3038 * Spinlock count overflowing soon? 3039 */ 3040 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >= 3041 PREEMPT_MASK - 10); 3042 #endif 3043 if (preempt_count() == val) { 3044 unsigned long ip = get_parent_ip(CALLER_ADDR1); 3045 #ifdef CONFIG_DEBUG_PREEMPT 3046 current->preempt_disable_ip = ip; 3047 #endif 3048 trace_preempt_off(CALLER_ADDR0, ip); 3049 } 3050 } 3051 EXPORT_SYMBOL(preempt_count_add); 3052 NOKPROBE_SYMBOL(preempt_count_add); 3053 3054 void preempt_count_sub(int val) 3055 { 3056 #ifdef CONFIG_DEBUG_PREEMPT 3057 /* 3058 * Underflow? 3059 */ 3060 if (DEBUG_LOCKS_WARN_ON(val > preempt_count())) 3061 return; 3062 /* 3063 * Is the spinlock portion underflowing? 3064 */ 3065 if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) && 3066 !(preempt_count() & PREEMPT_MASK))) 3067 return; 3068 #endif 3069 3070 if (preempt_count() == val) 3071 trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); 3072 __preempt_count_sub(val); 3073 } 3074 EXPORT_SYMBOL(preempt_count_sub); 3075 NOKPROBE_SYMBOL(preempt_count_sub); 3076 3077 #endif 3078 3079 /* 3080 * Print scheduling while atomic bug: 3081 */ 3082 static noinline void __schedule_bug(struct task_struct *prev) 3083 { 3084 if (oops_in_progress) 3085 return; 3086 3087 printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n", 3088 prev->comm, prev->pid, preempt_count()); 3089 3090 debug_show_held_locks(prev); 3091 print_modules(); 3092 if (irqs_disabled()) 3093 print_irqtrace_events(prev); 3094 #ifdef CONFIG_DEBUG_PREEMPT 3095 if (in_atomic_preempt_off()) { 3096 pr_err("Preemption disabled at:"); 3097 print_ip_sym(current->preempt_disable_ip); 3098 pr_cont("\n"); 3099 } 3100 #endif 3101 dump_stack(); 3102 add_taint(TAINT_WARN, LOCKDEP_STILL_OK); 3103 } 3104 3105 /* 3106 * Various schedule()-time debugging checks and statistics: 3107 */ 3108 static inline void schedule_debug(struct task_struct *prev) 3109 { 3110 #ifdef CONFIG_SCHED_STACK_END_CHECK 3111 BUG_ON(task_stack_end_corrupted(prev)); 3112 #endif 3113 3114 if (unlikely(in_atomic_preempt_off())) { 3115 __schedule_bug(prev); 3116 preempt_count_set(PREEMPT_DISABLED); 3117 } 3118 rcu_sleep_check(); 3119 3120 profile_hit(SCHED_PROFILING, __builtin_return_address(0)); 3121 3122 schedstat_inc(this_rq(), sched_count); 3123 } 3124 3125 /* 3126 * Pick up the highest-prio task: 3127 */ 3128 static inline struct task_struct * 3129 pick_next_task(struct rq *rq, struct task_struct *prev) 3130 { 3131 const struct sched_class *class = &fair_sched_class; 3132 struct task_struct *p; 3133 3134 /* 3135 * Optimization: we know that if all tasks are in 3136 * the fair class we can call that function directly: 3137 */ 3138 if (likely(prev->sched_class == class && 3139 rq->nr_running == rq->cfs.h_nr_running)) { 3140 p = fair_sched_class.pick_next_task(rq, prev); 3141 if (unlikely(p == RETRY_TASK)) 3142 goto again; 3143 3144 /* assumes fair_sched_class->next == idle_sched_class */ 3145 if (unlikely(!p)) 3146 p = idle_sched_class.pick_next_task(rq, prev); 3147 3148 return p; 3149 } 3150 3151 again: 3152 for_each_class(class) { 3153 p = class->pick_next_task(rq, prev); 3154 if (p) { 3155 if (unlikely(p == RETRY_TASK)) 3156 goto again; 3157 return p; 3158 } 3159 } 3160 3161 BUG(); /* the idle class will always have a runnable task */ 3162 } 3163 3164 /* 3165 * __schedule() is the main scheduler function. 3166 * 3167 * The main means of driving the scheduler and thus entering this function are: 3168 * 3169 * 1. Explicit blocking: mutex, semaphore, waitqueue, etc. 3170 * 3171 * 2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return 3172 * paths. For example, see arch/x86/entry_64.S. 3173 * 3174 * To drive preemption between tasks, the scheduler sets the flag in timer 3175 * interrupt handler scheduler_tick(). 3176 * 3177 * 3. Wakeups don't really cause entry into schedule(). They add a 3178 * task to the run-queue and that's it. 3179 * 3180 * Now, if the new task added to the run-queue preempts the current 3181 * task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets 3182 * called on the nearest possible occasion: 3183 * 3184 * - If the kernel is preemptible (CONFIG_PREEMPT=y): 3185 * 3186 * - in syscall or exception context, at the next outmost 3187 * preempt_enable(). (this might be as soon as the wake_up()'s 3188 * spin_unlock()!) 3189 * 3190 * - in IRQ context, return from interrupt-handler to 3191 * preemptible context 3192 * 3193 * - If the kernel is not preemptible (CONFIG_PREEMPT is not set) 3194 * then at the next: 3195 * 3196 * - cond_resched() call 3197 * - explicit schedule() call 3198 * - return from syscall or exception to user-space 3199 * - return from interrupt-handler to user-space 3200 * 3201 * WARNING: must be called with preemption disabled! 3202 */ 3203 static void __sched notrace __schedule(bool preempt) 3204 { 3205 struct task_struct *prev, *next; 3206 unsigned long *switch_count; 3207 struct rq *rq; 3208 int cpu; 3209 3210 cpu = smp_processor_id(); 3211 rq = cpu_rq(cpu); 3212 prev = rq->curr; 3213 3214 /* 3215 * do_exit() calls schedule() with preemption disabled as an exception; 3216 * however we must fix that up, otherwise the next task will see an 3217 * inconsistent (higher) preempt count. 3218 * 3219 * It also avoids the below schedule_debug() test from complaining 3220 * about this. 3221 */ 3222 if (unlikely(prev->state == TASK_DEAD)) 3223 preempt_enable_no_resched_notrace(); 3224 3225 schedule_debug(prev); 3226 3227 if (sched_feat(HRTICK)) 3228 hrtick_clear(rq); 3229 3230 local_irq_disable(); 3231 rcu_note_context_switch(); 3232 3233 /* 3234 * Make sure that signal_pending_state()->signal_pending() below 3235 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE) 3236 * done by the caller to avoid the race with signal_wake_up(). 3237 */ 3238 smp_mb__before_spinlock(); 3239 raw_spin_lock(&rq->lock); 3240 lockdep_pin_lock(&rq->lock); 3241 3242 rq->clock_skip_update <<= 1; /* promote REQ to ACT */ 3243 3244 switch_count = &prev->nivcsw; 3245 if (!preempt && prev->state) { 3246 if (unlikely(signal_pending_state(prev->state, prev))) { 3247 prev->state = TASK_RUNNING; 3248 } else { 3249 deactivate_task(rq, prev, DEQUEUE_SLEEP); 3250 prev->on_rq = 0; 3251 3252 /* 3253 * If a worker went to sleep, notify and ask workqueue 3254 * whether it wants to wake up a task to maintain 3255 * concurrency. 3256 */ 3257 if (prev->flags & PF_WQ_WORKER) { 3258 struct task_struct *to_wakeup; 3259 3260 to_wakeup = wq_worker_sleeping(prev, cpu); 3261 if (to_wakeup) 3262 try_to_wake_up_local(to_wakeup); 3263 } 3264 } 3265 switch_count = &prev->nvcsw; 3266 } 3267 3268 if (task_on_rq_queued(prev)) 3269 update_rq_clock(rq); 3270 3271 next = pick_next_task(rq, prev); 3272 clear_tsk_need_resched(prev); 3273 clear_preempt_need_resched(); 3274 rq->clock_skip_update = 0; 3275 3276 if (likely(prev != next)) { 3277 rq->nr_switches++; 3278 rq->curr = next; 3279 ++*switch_count; 3280 3281 trace_sched_switch(preempt, prev, next); 3282 rq = context_switch(rq, prev, next); /* unlocks the rq */ 3283 cpu = cpu_of(rq); 3284 } else { 3285 lockdep_unpin_lock(&rq->lock); 3286 raw_spin_unlock_irq(&rq->lock); 3287 } 3288 3289 balance_callback(rq); 3290 } 3291 3292 static inline void sched_submit_work(struct task_struct *tsk) 3293 { 3294 if (!tsk->state || tsk_is_pi_blocked(tsk)) 3295 return; 3296 /* 3297 * If we are going to sleep and we have plugged IO queued, 3298 * make sure to submit it to avoid deadlocks. 3299 */ 3300 if (blk_needs_flush_plug(tsk)) 3301 blk_schedule_flush_plug(tsk); 3302 } 3303 3304 asmlinkage __visible void __sched schedule(void) 3305 { 3306 struct task_struct *tsk = current; 3307 3308 sched_submit_work(tsk); 3309 do { 3310 preempt_disable(); 3311 __schedule(false); 3312 sched_preempt_enable_no_resched(); 3313 } while (need_resched()); 3314 } 3315 EXPORT_SYMBOL(schedule); 3316 3317 #ifdef CONFIG_CONTEXT_TRACKING 3318 asmlinkage __visible void __sched schedule_user(void) 3319 { 3320 /* 3321 * If we come here after a random call to set_need_resched(), 3322 * or we have been woken up remotely but the IPI has not yet arrived, 3323 * we haven't yet exited the RCU idle mode. Do it here manually until 3324 * we find a better solution. 3325 * 3326 * NB: There are buggy callers of this function. Ideally we 3327 * should warn if prev_state != CONTEXT_USER, but that will trigger 3328 * too frequently to make sense yet. 3329 */ 3330 enum ctx_state prev_state = exception_enter(); 3331 schedule(); 3332 exception_exit(prev_state); 3333 } 3334 #endif 3335 3336 /** 3337 * schedule_preempt_disabled - called with preemption disabled 3338 * 3339 * Returns with preemption disabled. Note: preempt_count must be 1 3340 */ 3341 void __sched schedule_preempt_disabled(void) 3342 { 3343 sched_preempt_enable_no_resched(); 3344 schedule(); 3345 preempt_disable(); 3346 } 3347 3348 static void __sched notrace preempt_schedule_common(void) 3349 { 3350 do { 3351 preempt_disable_notrace(); 3352 __schedule(true); 3353 preempt_enable_no_resched_notrace(); 3354 3355 /* 3356 * Check again in case we missed a preemption opportunity 3357 * between schedule and now. 3358 */ 3359 } while (need_resched()); 3360 } 3361 3362 #ifdef CONFIG_PREEMPT 3363 /* 3364 * this is the entry point to schedule() from in-kernel preemption 3365 * off of preempt_enable. Kernel preemptions off return from interrupt 3366 * occur there and call schedule directly. 3367 */ 3368 asmlinkage __visible void __sched notrace preempt_schedule(void) 3369 { 3370 /* 3371 * If there is a non-zero preempt_count or interrupts are disabled, 3372 * we do not want to preempt the current task. Just return.. 3373 */ 3374 if (likely(!preemptible())) 3375 return; 3376 3377 preempt_schedule_common(); 3378 } 3379 NOKPROBE_SYMBOL(preempt_schedule); 3380 EXPORT_SYMBOL(preempt_schedule); 3381 3382 /** 3383 * preempt_schedule_notrace - preempt_schedule called by tracing 3384 * 3385 * The tracing infrastructure uses preempt_enable_notrace to prevent 3386 * recursion and tracing preempt enabling caused by the tracing 3387 * infrastructure itself. But as tracing can happen in areas coming 3388 * from userspace or just about to enter userspace, a preempt enable 3389 * can occur before user_exit() is called. This will cause the scheduler 3390 * to be called when the system is still in usermode. 3391 * 3392 * To prevent this, the preempt_enable_notrace will use this function 3393 * instead of preempt_schedule() to exit user context if needed before 3394 * calling the scheduler. 3395 */ 3396 asmlinkage __visible void __sched notrace preempt_schedule_notrace(void) 3397 { 3398 enum ctx_state prev_ctx; 3399 3400 if (likely(!preemptible())) 3401 return; 3402 3403 do { 3404 preempt_disable_notrace(); 3405 /* 3406 * Needs preempt disabled in case user_exit() is traced 3407 * and the tracer calls preempt_enable_notrace() causing 3408 * an infinite recursion. 3409 */ 3410 prev_ctx = exception_enter(); 3411 __schedule(true); 3412 exception_exit(prev_ctx); 3413 3414 preempt_enable_no_resched_notrace(); 3415 } while (need_resched()); 3416 } 3417 EXPORT_SYMBOL_GPL(preempt_schedule_notrace); 3418 3419 #endif /* CONFIG_PREEMPT */ 3420 3421 /* 3422 * this is the entry point to schedule() from kernel preemption 3423 * off of irq context. 3424 * Note, that this is called and return with irqs disabled. This will 3425 * protect us against recursive calling from irq. 3426 */ 3427 asmlinkage __visible void __sched preempt_schedule_irq(void) 3428 { 3429 enum ctx_state prev_state; 3430 3431 /* Catch callers which need to be fixed */ 3432 BUG_ON(preempt_count() || !irqs_disabled()); 3433 3434 prev_state = exception_enter(); 3435 3436 do { 3437 preempt_disable(); 3438 local_irq_enable(); 3439 __schedule(true); 3440 local_irq_disable(); 3441 sched_preempt_enable_no_resched(); 3442 } while (need_resched()); 3443 3444 exception_exit(prev_state); 3445 } 3446 3447 int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags, 3448 void *key) 3449 { 3450 return try_to_wake_up(curr->private, mode, wake_flags); 3451 } 3452 EXPORT_SYMBOL(default_wake_function); 3453 3454 #ifdef CONFIG_RT_MUTEXES 3455 3456 /* 3457 * rt_mutex_setprio - set the current priority of a task 3458 * @p: task 3459 * @prio: prio value (kernel-internal form) 3460 * 3461 * This function changes the 'effective' priority of a task. It does 3462 * not touch ->normal_prio like __setscheduler(). 3463 * 3464 * Used by the rt_mutex code to implement priority inheritance 3465 * logic. Call site only calls if the priority of the task changed. 3466 */ 3467 void rt_mutex_setprio(struct task_struct *p, int prio) 3468 { 3469 int oldprio, queued, running, enqueue_flag = ENQUEUE_RESTORE; 3470 struct rq *rq; 3471 const struct sched_class *prev_class; 3472 3473 BUG_ON(prio > MAX_PRIO); 3474 3475 rq = __task_rq_lock(p); 3476 3477 /* 3478 * Idle task boosting is a nono in general. There is one 3479 * exception, when PREEMPT_RT and NOHZ is active: 3480 * 3481 * The idle task calls get_next_timer_interrupt() and holds 3482 * the timer wheel base->lock on the CPU and another CPU wants 3483 * to access the timer (probably to cancel it). We can safely 3484 * ignore the boosting request, as the idle CPU runs this code 3485 * with interrupts disabled and will complete the lock 3486 * protected section without being interrupted. So there is no 3487 * real need to boost. 3488 */ 3489 if (unlikely(p == rq->idle)) { 3490 WARN_ON(p != rq->curr); 3491 WARN_ON(p->pi_blocked_on); 3492 goto out_unlock; 3493 } 3494 3495 trace_sched_pi_setprio(p, prio); 3496 oldprio = p->prio; 3497 prev_class = p->sched_class; 3498 queued = task_on_rq_queued(p); 3499 running = task_current(rq, p); 3500 if (queued) 3501 dequeue_task(rq, p, DEQUEUE_SAVE); 3502 if (running) 3503 put_prev_task(rq, p); 3504 3505 /* 3506 * Boosting condition are: 3507 * 1. -rt task is running and holds mutex A 3508 * --> -dl task blocks on mutex A 3509 * 3510 * 2. -dl task is running and holds mutex A 3511 * --> -dl task blocks on mutex A and could preempt the 3512 * running task 3513 */ 3514 if (dl_prio(prio)) { 3515 struct task_struct *pi_task = rt_mutex_get_top_task(p); 3516 if (!dl_prio(p->normal_prio) || 3517 (pi_task && dl_entity_preempt(&pi_task->dl, &p->dl))) { 3518 p->dl.dl_boosted = 1; 3519 enqueue_flag |= ENQUEUE_REPLENISH; 3520 } else 3521 p->dl.dl_boosted = 0; 3522 p->sched_class = &dl_sched_class; 3523 } else if (rt_prio(prio)) { 3524 if (dl_prio(oldprio)) 3525 p->dl.dl_boosted = 0; 3526 if (oldprio < prio) 3527 enqueue_flag |= ENQUEUE_HEAD; 3528 p->sched_class = &rt_sched_class; 3529 } else { 3530 if (dl_prio(oldprio)) 3531 p->dl.dl_boosted = 0; 3532 if (rt_prio(oldprio)) 3533 p->rt.timeout = 0; 3534 p->sched_class = &fair_sched_class; 3535 } 3536 3537 p->prio = prio; 3538 3539 if (running) 3540 p->sched_class->set_curr_task(rq); 3541 if (queued) 3542 enqueue_task(rq, p, enqueue_flag); 3543 3544 check_class_changed(rq, p, prev_class, oldprio); 3545 out_unlock: 3546 preempt_disable(); /* avoid rq from going away on us */ 3547 __task_rq_unlock(rq); 3548 3549 balance_callback(rq); 3550 preempt_enable(); 3551 } 3552 #endif 3553 3554 void set_user_nice(struct task_struct *p, long nice) 3555 { 3556 int old_prio, delta, queued; 3557 unsigned long flags; 3558 struct rq *rq; 3559 3560 if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE) 3561 return; 3562 /* 3563 * We have to be careful, if called from sys_setpriority(), 3564 * the task might be in the middle of scheduling on another CPU. 3565 */ 3566 rq = task_rq_lock(p, &flags); 3567 /* 3568 * The RT priorities are set via sched_setscheduler(), but we still 3569 * allow the 'normal' nice value to be set - but as expected 3570 * it wont have any effect on scheduling until the task is 3571 * SCHED_DEADLINE, SCHED_FIFO or SCHED_RR: 3572 */ 3573 if (task_has_dl_policy(p) || task_has_rt_policy(p)) { 3574 p->static_prio = NICE_TO_PRIO(nice); 3575 goto out_unlock; 3576 } 3577 queued = task_on_rq_queued(p); 3578 if (queued) 3579 dequeue_task(rq, p, DEQUEUE_SAVE); 3580 3581 p->static_prio = NICE_TO_PRIO(nice); 3582 set_load_weight(p); 3583 old_prio = p->prio; 3584 p->prio = effective_prio(p); 3585 delta = p->prio - old_prio; 3586 3587 if (queued) { 3588 enqueue_task(rq, p, ENQUEUE_RESTORE); 3589 /* 3590 * If the task increased its priority or is running and 3591 * lowered its priority, then reschedule its CPU: 3592 */ 3593 if (delta < 0 || (delta > 0 && task_running(rq, p))) 3594 resched_curr(rq); 3595 } 3596 out_unlock: 3597 task_rq_unlock(rq, p, &flags); 3598 } 3599 EXPORT_SYMBOL(set_user_nice); 3600 3601 /* 3602 * can_nice - check if a task can reduce its nice value 3603 * @p: task 3604 * @nice: nice value 3605 */ 3606 int can_nice(const struct task_struct *p, const int nice) 3607 { 3608 /* convert nice value [19,-20] to rlimit style value [1,40] */ 3609 int nice_rlim = nice_to_rlimit(nice); 3610 3611 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) || 3612 capable(CAP_SYS_NICE)); 3613 } 3614 3615 #ifdef __ARCH_WANT_SYS_NICE 3616 3617 /* 3618 * sys_nice - change the priority of the current process. 3619 * @increment: priority increment 3620 * 3621 * sys_setpriority is a more generic, but much slower function that 3622 * does similar things. 3623 */ 3624 SYSCALL_DEFINE1(nice, int, increment) 3625 { 3626 long nice, retval; 3627 3628 /* 3629 * Setpriority might change our priority at the same moment. 3630 * We don't have to worry. Conceptually one call occurs first 3631 * and we have a single winner. 3632 */ 3633 increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH); 3634 nice = task_nice(current) + increment; 3635 3636 nice = clamp_val(nice, MIN_NICE, MAX_NICE); 3637 if (increment < 0 && !can_nice(current, nice)) 3638 return -EPERM; 3639 3640 retval = security_task_setnice(current, nice); 3641 if (retval) 3642 return retval; 3643 3644 set_user_nice(current, nice); 3645 return 0; 3646 } 3647 3648 #endif 3649 3650 /** 3651 * task_prio - return the priority value of a given task. 3652 * @p: the task in question. 3653 * 3654 * Return: The priority value as seen by users in /proc. 3655 * RT tasks are offset by -200. Normal tasks are centered 3656 * around 0, value goes from -16 to +15. 3657 */ 3658 int task_prio(const struct task_struct *p) 3659 { 3660 return p->prio - MAX_RT_PRIO; 3661 } 3662 3663 /** 3664 * idle_cpu - is a given cpu idle currently? 3665 * @cpu: the processor in question. 3666 * 3667 * Return: 1 if the CPU is currently idle. 0 otherwise. 3668 */ 3669 int idle_cpu(int cpu) 3670 { 3671 struct rq *rq = cpu_rq(cpu); 3672 3673 if (rq->curr != rq->idle) 3674 return 0; 3675 3676 if (rq->nr_running) 3677 return 0; 3678 3679 #ifdef CONFIG_SMP 3680 if (!llist_empty(&rq->wake_list)) 3681 return 0; 3682 #endif 3683 3684 return 1; 3685 } 3686 3687 /** 3688 * idle_task - return the idle task for a given cpu. 3689 * @cpu: the processor in question. 3690 * 3691 * Return: The idle task for the cpu @cpu. 3692 */ 3693 struct task_struct *idle_task(int cpu) 3694 { 3695 return cpu_rq(cpu)->idle; 3696 } 3697 3698 /** 3699 * find_process_by_pid - find a process with a matching PID value. 3700 * @pid: the pid in question. 3701 * 3702 * The task of @pid, if found. %NULL otherwise. 3703 */ 3704 static struct task_struct *find_process_by_pid(pid_t pid) 3705 { 3706 return pid ? find_task_by_vpid(pid) : current; 3707 } 3708 3709 /* 3710 * This function initializes the sched_dl_entity of a newly becoming 3711 * SCHED_DEADLINE task. 3712 * 3713 * Only the static values are considered here, the actual runtime and the 3714 * absolute deadline will be properly calculated when the task is enqueued 3715 * for the first time with its new policy. 3716 */ 3717 static void 3718 __setparam_dl(struct task_struct *p, const struct sched_attr *attr) 3719 { 3720 struct sched_dl_entity *dl_se = &p->dl; 3721 3722 dl_se->dl_runtime = attr->sched_runtime; 3723 dl_se->dl_deadline = attr->sched_deadline; 3724 dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline; 3725 dl_se->flags = attr->sched_flags; 3726 dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime); 3727 3728 /* 3729 * Changing the parameters of a task is 'tricky' and we're not doing 3730 * the correct thing -- also see task_dead_dl() and switched_from_dl(). 3731 * 3732 * What we SHOULD do is delay the bandwidth release until the 0-lag 3733 * point. This would include retaining the task_struct until that time 3734 * and change dl_overflow() to not immediately decrement the current 3735 * amount. 3736 * 3737 * Instead we retain the current runtime/deadline and let the new 3738 * parameters take effect after the current reservation period lapses. 3739 * This is safe (albeit pessimistic) because the 0-lag point is always 3740 * before the current scheduling deadline. 3741 * 3742 * We can still have temporary overloads because we do not delay the 3743 * change in bandwidth until that time; so admission control is 3744 * not on the safe side. It does however guarantee tasks will never 3745 * consume more than promised. 3746 */ 3747 } 3748 3749 /* 3750 * sched_setparam() passes in -1 for its policy, to let the functions 3751 * it calls know not to change it. 3752 */ 3753 #define SETPARAM_POLICY -1 3754 3755 static void __setscheduler_params(struct task_struct *p, 3756 const struct sched_attr *attr) 3757 { 3758 int policy = attr->sched_policy; 3759 3760 if (policy == SETPARAM_POLICY) 3761 policy = p->policy; 3762 3763 p->policy = policy; 3764 3765 if (dl_policy(policy)) 3766 __setparam_dl(p, attr); 3767 else if (fair_policy(policy)) 3768 p->static_prio = NICE_TO_PRIO(attr->sched_nice); 3769 3770 /* 3771 * __sched_setscheduler() ensures attr->sched_priority == 0 when 3772 * !rt_policy. Always setting this ensures that things like 3773 * getparam()/getattr() don't report silly values for !rt tasks. 3774 */ 3775 p->rt_priority = attr->sched_priority; 3776 p->normal_prio = normal_prio(p); 3777 set_load_weight(p); 3778 } 3779 3780 /* Actually do priority change: must hold pi & rq lock. */ 3781 static void __setscheduler(struct rq *rq, struct task_struct *p, 3782 const struct sched_attr *attr, bool keep_boost) 3783 { 3784 __setscheduler_params(p, attr); 3785 3786 /* 3787 * Keep a potential priority boosting if called from 3788 * sched_setscheduler(). 3789 */ 3790 if (keep_boost) 3791 p->prio = rt_mutex_get_effective_prio(p, normal_prio(p)); 3792 else 3793 p->prio = normal_prio(p); 3794 3795 if (dl_prio(p->prio)) 3796 p->sched_class = &dl_sched_class; 3797 else if (rt_prio(p->prio)) 3798 p->sched_class = &rt_sched_class; 3799 else 3800 p->sched_class = &fair_sched_class; 3801 } 3802 3803 static void 3804 __getparam_dl(struct task_struct *p, struct sched_attr *attr) 3805 { 3806 struct sched_dl_entity *dl_se = &p->dl; 3807 3808 attr->sched_priority = p->rt_priority; 3809 attr->sched_runtime = dl_se->dl_runtime; 3810 attr->sched_deadline = dl_se->dl_deadline; 3811 attr->sched_period = dl_se->dl_period; 3812 attr->sched_flags = dl_se->flags; 3813 } 3814 3815 /* 3816 * This function validates the new parameters of a -deadline task. 3817 * We ask for the deadline not being zero, and greater or equal 3818 * than the runtime, as well as the period of being zero or 3819 * greater than deadline. Furthermore, we have to be sure that 3820 * user parameters are above the internal resolution of 1us (we 3821 * check sched_runtime only since it is always the smaller one) and 3822 * below 2^63 ns (we have to check both sched_deadline and 3823 * sched_period, as the latter can be zero). 3824 */ 3825 static bool 3826 __checkparam_dl(const struct sched_attr *attr) 3827 { 3828 /* deadline != 0 */ 3829 if (attr->sched_deadline == 0) 3830 return false; 3831 3832 /* 3833 * Since we truncate DL_SCALE bits, make sure we're at least 3834 * that big. 3835 */ 3836 if (attr->sched_runtime < (1ULL << DL_SCALE)) 3837 return false; 3838 3839 /* 3840 * Since we use the MSB for wrap-around and sign issues, make 3841 * sure it's not set (mind that period can be equal to zero). 3842 */ 3843 if (attr->sched_deadline & (1ULL << 63) || 3844 attr->sched_period & (1ULL << 63)) 3845 return false; 3846 3847 /* runtime <= deadline <= period (if period != 0) */ 3848 if ((attr->sched_period != 0 && 3849 attr->sched_period < attr->sched_deadline) || 3850 attr->sched_deadline < attr->sched_runtime) 3851 return false; 3852 3853 return true; 3854 } 3855 3856 /* 3857 * check the target process has a UID that matches the current process's 3858 */ 3859 static bool check_same_owner(struct task_struct *p) 3860 { 3861 const struct cred *cred = current_cred(), *pcred; 3862 bool match; 3863 3864 rcu_read_lock(); 3865 pcred = __task_cred(p); 3866 match = (uid_eq(cred->euid, pcred->euid) || 3867 uid_eq(cred->euid, pcred->uid)); 3868 rcu_read_unlock(); 3869 return match; 3870 } 3871 3872 static bool dl_param_changed(struct task_struct *p, 3873 const struct sched_attr *attr) 3874 { 3875 struct sched_dl_entity *dl_se = &p->dl; 3876 3877 if (dl_se->dl_runtime != attr->sched_runtime || 3878 dl_se->dl_deadline != attr->sched_deadline || 3879 dl_se->dl_period != attr->sched_period || 3880 dl_se->flags != attr->sched_flags) 3881 return true; 3882 3883 return false; 3884 } 3885 3886 static int __sched_setscheduler(struct task_struct *p, 3887 const struct sched_attr *attr, 3888 bool user, bool pi) 3889 { 3890 int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 : 3891 MAX_RT_PRIO - 1 - attr->sched_priority; 3892 int retval, oldprio, oldpolicy = -1, queued, running; 3893 int new_effective_prio, policy = attr->sched_policy; 3894 unsigned long flags; 3895 const struct sched_class *prev_class; 3896 struct rq *rq; 3897 int reset_on_fork; 3898 3899 /* may grab non-irq protected spin_locks */ 3900 BUG_ON(in_interrupt()); 3901 recheck: 3902 /* double check policy once rq lock held */ 3903 if (policy < 0) { 3904 reset_on_fork = p->sched_reset_on_fork; 3905 policy = oldpolicy = p->policy; 3906 } else { 3907 reset_on_fork = !!(attr->sched_flags & SCHED_FLAG_RESET_ON_FORK); 3908 3909 if (!valid_policy(policy)) 3910 return -EINVAL; 3911 } 3912 3913 if (attr->sched_flags & ~(SCHED_FLAG_RESET_ON_FORK)) 3914 return -EINVAL; 3915 3916 /* 3917 * Valid priorities for SCHED_FIFO and SCHED_RR are 3918 * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL, 3919 * SCHED_BATCH and SCHED_IDLE is 0. 3920 */ 3921 if ((p->mm && attr->sched_priority > MAX_USER_RT_PRIO-1) || 3922 (!p->mm && attr->sched_priority > MAX_RT_PRIO-1)) 3923 return -EINVAL; 3924 if ((dl_policy(policy) && !__checkparam_dl(attr)) || 3925 (rt_policy(policy) != (attr->sched_priority != 0))) 3926 return -EINVAL; 3927 3928 /* 3929 * Allow unprivileged RT tasks to decrease priority: 3930 */ 3931 if (user && !capable(CAP_SYS_NICE)) { 3932 if (fair_policy(policy)) { 3933 if (attr->sched_nice < task_nice(p) && 3934 !can_nice(p, attr->sched_nice)) 3935 return -EPERM; 3936 } 3937 3938 if (rt_policy(policy)) { 3939 unsigned long rlim_rtprio = 3940 task_rlimit(p, RLIMIT_RTPRIO); 3941 3942 /* can't set/change the rt policy */ 3943 if (policy != p->policy && !rlim_rtprio) 3944 return -EPERM; 3945 3946 /* can't increase priority */ 3947 if (attr->sched_priority > p->rt_priority && 3948 attr->sched_priority > rlim_rtprio) 3949 return -EPERM; 3950 } 3951 3952 /* 3953 * Can't set/change SCHED_DEADLINE policy at all for now 3954 * (safest behavior); in the future we would like to allow 3955 * unprivileged DL tasks to increase their relative deadline 3956 * or reduce their runtime (both ways reducing utilization) 3957 */ 3958 if (dl_policy(policy)) 3959 return -EPERM; 3960 3961 /* 3962 * Treat SCHED_IDLE as nice 20. Only allow a switch to 3963 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it. 3964 */ 3965 if (idle_policy(p->policy) && !idle_policy(policy)) { 3966 if (!can_nice(p, task_nice(p))) 3967 return -EPERM; 3968 } 3969 3970 /* can't change other user's priorities */ 3971 if (!check_same_owner(p)) 3972 return -EPERM; 3973 3974 /* Normal users shall not reset the sched_reset_on_fork flag */ 3975 if (p->sched_reset_on_fork && !reset_on_fork) 3976 return -EPERM; 3977 } 3978 3979 if (user) { 3980 retval = security_task_setscheduler(p); 3981 if (retval) 3982 return retval; 3983 } 3984 3985 /* 3986 * make sure no PI-waiters arrive (or leave) while we are 3987 * changing the priority of the task: 3988 * 3989 * To be able to change p->policy safely, the appropriate 3990 * runqueue lock must be held. 3991 */ 3992 rq = task_rq_lock(p, &flags); 3993 3994 /* 3995 * Changing the policy of the stop threads its a very bad idea 3996 */ 3997 if (p == rq->stop) { 3998 task_rq_unlock(rq, p, &flags); 3999 return -EINVAL; 4000 } 4001 4002 /* 4003 * If not changing anything there's no need to proceed further, 4004 * but store a possible modification of reset_on_fork. 4005 */ 4006 if (unlikely(policy == p->policy)) { 4007 if (fair_policy(policy) && attr->sched_nice != task_nice(p)) 4008 goto change; 4009 if (rt_policy(policy) && attr->sched_priority != p->rt_priority) 4010 goto change; 4011 if (dl_policy(policy) && dl_param_changed(p, attr)) 4012 goto change; 4013 4014 p->sched_reset_on_fork = reset_on_fork; 4015 task_rq_unlock(rq, p, &flags); 4016 return 0; 4017 } 4018 change: 4019 4020 if (user) { 4021 #ifdef CONFIG_RT_GROUP_SCHED 4022 /* 4023 * Do not allow realtime tasks into groups that have no runtime 4024 * assigned. 4025 */ 4026 if (rt_bandwidth_enabled() && rt_policy(policy) && 4027 task_group(p)->rt_bandwidth.rt_runtime == 0 && 4028 !task_group_is_autogroup(task_group(p))) { 4029 task_rq_unlock(rq, p, &flags); 4030 return -EPERM; 4031 } 4032 #endif 4033 #ifdef CONFIG_SMP 4034 if (dl_bandwidth_enabled() && dl_policy(policy)) { 4035 cpumask_t *span = rq->rd->span; 4036 4037 /* 4038 * Don't allow tasks with an affinity mask smaller than 4039 * the entire root_domain to become SCHED_DEADLINE. We 4040 * will also fail if there's no bandwidth available. 4041 */ 4042 if (!cpumask_subset(span, &p->cpus_allowed) || 4043 rq->rd->dl_bw.bw == 0) { 4044 task_rq_unlock(rq, p, &flags); 4045 return -EPERM; 4046 } 4047 } 4048 #endif 4049 } 4050 4051 /* recheck policy now with rq lock held */ 4052 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { 4053 policy = oldpolicy = -1; 4054 task_rq_unlock(rq, p, &flags); 4055 goto recheck; 4056 } 4057 4058 /* 4059 * If setscheduling to SCHED_DEADLINE (or changing the parameters 4060 * of a SCHED_DEADLINE task) we need to check if enough bandwidth 4061 * is available. 4062 */ 4063 if ((dl_policy(policy) || dl_task(p)) && dl_overflow(p, policy, attr)) { 4064 task_rq_unlock(rq, p, &flags); 4065 return -EBUSY; 4066 } 4067 4068 p->sched_reset_on_fork = reset_on_fork; 4069 oldprio = p->prio; 4070 4071 if (pi) { 4072 /* 4073 * Take priority boosted tasks into account. If the new 4074 * effective priority is unchanged, we just store the new 4075 * normal parameters and do not touch the scheduler class and 4076 * the runqueue. This will be done when the task deboost 4077 * itself. 4078 */ 4079 new_effective_prio = rt_mutex_get_effective_prio(p, newprio); 4080 if (new_effective_prio == oldprio) { 4081 __setscheduler_params(p, attr); 4082 task_rq_unlock(rq, p, &flags); 4083 return 0; 4084 } 4085 } 4086 4087 queued = task_on_rq_queued(p); 4088 running = task_current(rq, p); 4089 if (queued) 4090 dequeue_task(rq, p, DEQUEUE_SAVE); 4091 if (running) 4092 put_prev_task(rq, p); 4093 4094 prev_class = p->sched_class; 4095 __setscheduler(rq, p, attr, pi); 4096 4097 if (running) 4098 p->sched_class->set_curr_task(rq); 4099 if (queued) { 4100 int enqueue_flags = ENQUEUE_RESTORE; 4101 /* 4102 * We enqueue to tail when the priority of a task is 4103 * increased (user space view). 4104 */ 4105 if (oldprio <= p->prio) 4106 enqueue_flags |= ENQUEUE_HEAD; 4107 4108 enqueue_task(rq, p, enqueue_flags); 4109 } 4110 4111 check_class_changed(rq, p, prev_class, oldprio); 4112 preempt_disable(); /* avoid rq from going away on us */ 4113 task_rq_unlock(rq, p, &flags); 4114 4115 if (pi) 4116 rt_mutex_adjust_pi(p); 4117 4118 /* 4119 * Run balance callbacks after we've adjusted the PI chain. 4120 */ 4121 balance_callback(rq); 4122 preempt_enable(); 4123 4124 return 0; 4125 } 4126 4127 static int _sched_setscheduler(struct task_struct *p, int policy, 4128 const struct sched_param *param, bool check) 4129 { 4130 struct sched_attr attr = { 4131 .sched_policy = policy, 4132 .sched_priority = param->sched_priority, 4133 .sched_nice = PRIO_TO_NICE(p->static_prio), 4134 }; 4135 4136 /* Fixup the legacy SCHED_RESET_ON_FORK hack. */ 4137 if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) { 4138 attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; 4139 policy &= ~SCHED_RESET_ON_FORK; 4140 attr.sched_policy = policy; 4141 } 4142 4143 return __sched_setscheduler(p, &attr, check, true); 4144 } 4145 /** 4146 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread. 4147 * @p: the task in question. 4148 * @policy: new policy. 4149 * @param: structure containing the new RT priority. 4150 * 4151 * Return: 0 on success. An error code otherwise. 4152 * 4153 * NOTE that the task may be already dead. 4154 */ 4155 int sched_setscheduler(struct task_struct *p, int policy, 4156 const struct sched_param *param) 4157 { 4158 return _sched_setscheduler(p, policy, param, true); 4159 } 4160 EXPORT_SYMBOL_GPL(sched_setscheduler); 4161 4162 int sched_setattr(struct task_struct *p, const struct sched_attr *attr) 4163 { 4164 return __sched_setscheduler(p, attr, true, true); 4165 } 4166 EXPORT_SYMBOL_GPL(sched_setattr); 4167 4168 /** 4169 * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace. 4170 * @p: the task in question. 4171 * @policy: new policy. 4172 * @param: structure containing the new RT priority. 4173 * 4174 * Just like sched_setscheduler, only don't bother checking if the 4175 * current context has permission. For example, this is needed in 4176 * stop_machine(): we create temporary high priority worker threads, 4177 * but our caller might not have that capability. 4178 * 4179 * Return: 0 on success. An error code otherwise. 4180 */ 4181 int sched_setscheduler_nocheck(struct task_struct *p, int policy, 4182 const struct sched_param *param) 4183 { 4184 return _sched_setscheduler(p, policy, param, false); 4185 } 4186 EXPORT_SYMBOL_GPL(sched_setscheduler_nocheck); 4187 4188 static int 4189 do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) 4190 { 4191 struct sched_param lparam; 4192 struct task_struct *p; 4193 int retval; 4194 4195 if (!param || pid < 0) 4196 return -EINVAL; 4197 if (copy_from_user(&lparam, param, sizeof(struct sched_param))) 4198 return -EFAULT; 4199 4200 rcu_read_lock(); 4201 retval = -ESRCH; 4202 p = find_process_by_pid(pid); 4203 if (p != NULL) 4204 retval = sched_setscheduler(p, policy, &lparam); 4205 rcu_read_unlock(); 4206 4207 return retval; 4208 } 4209 4210 /* 4211 * Mimics kernel/events/core.c perf_copy_attr(). 4212 */ 4213 static int sched_copy_attr(struct sched_attr __user *uattr, 4214 struct sched_attr *attr) 4215 { 4216 u32 size; 4217 int ret; 4218 4219 if (!access_ok(VERIFY_WRITE, uattr, SCHED_ATTR_SIZE_VER0)) 4220 return -EFAULT; 4221 4222 /* 4223 * zero the full structure, so that a short copy will be nice. 4224 */ 4225 memset(attr, 0, sizeof(*attr)); 4226 4227 ret = get_user(size, &uattr->size); 4228 if (ret) 4229 return ret; 4230 4231 if (size > PAGE_SIZE) /* silly large */ 4232 goto err_size; 4233 4234 if (!size) /* abi compat */ 4235 size = SCHED_ATTR_SIZE_VER0; 4236 4237 if (size < SCHED_ATTR_SIZE_VER0) 4238 goto err_size; 4239 4240 /* 4241 * If we're handed a bigger struct than we know of, 4242 * ensure all the unknown bits are 0 - i.e. new 4243 * user-space does not rely on any kernel feature 4244 * extensions we dont know about yet. 4245 */ 4246 if (size > sizeof(*attr)) { 4247 unsigned char __user *addr; 4248 unsigned char __user *end; 4249 unsigned char val; 4250 4251 addr = (void __user *)uattr + sizeof(*attr); 4252 end = (void __user *)uattr + size; 4253 4254 for (; addr < end; addr++) { 4255 ret = get_user(val, addr); 4256 if (ret) 4257 return ret; 4258 if (val) 4259 goto err_size; 4260 } 4261 size = sizeof(*attr); 4262 } 4263 4264 ret = copy_from_user(attr, uattr, size); 4265 if (ret) 4266 return -EFAULT; 4267 4268 /* 4269 * XXX: do we want to be lenient like existing syscalls; or do we want 4270 * to be strict and return an error on out-of-bounds values? 4271 */ 4272 attr->sched_nice = clamp(attr->sched_nice, MIN_NICE, MAX_NICE); 4273 4274 return 0; 4275 4276 err_size: 4277 put_user(sizeof(*attr), &uattr->size); 4278 return -E2BIG; 4279 } 4280 4281 /** 4282 * sys_sched_setscheduler - set/change the scheduler policy and RT priority 4283 * @pid: the pid in question. 4284 * @policy: new policy. 4285 * @param: structure containing the new RT priority. 4286 * 4287 * Return: 0 on success. An error code otherwise. 4288 */ 4289 SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, 4290 struct sched_param __user *, param) 4291 { 4292 /* negative values for policy are not valid */ 4293 if (policy < 0) 4294 return -EINVAL; 4295 4296 return do_sched_setscheduler(pid, policy, param); 4297 } 4298 4299 /** 4300 * sys_sched_setparam - set/change the RT priority of a thread 4301 * @pid: the pid in question. 4302 * @param: structure containing the new RT priority. 4303 * 4304 * Return: 0 on success. An error code otherwise. 4305 */ 4306 SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) 4307 { 4308 return do_sched_setscheduler(pid, SETPARAM_POLICY, param); 4309 } 4310 4311 /** 4312 * sys_sched_setattr - same as above, but with extended sched_attr 4313 * @pid: the pid in question. 4314 * @uattr: structure containing the extended parameters. 4315 * @flags: for future extension. 4316 */ 4317 SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr, 4318 unsigned int, flags) 4319 { 4320 struct sched_attr attr; 4321 struct task_struct *p; 4322 int retval; 4323 4324 if (!uattr || pid < 0 || flags) 4325 return -EINVAL; 4326 4327 retval = sched_copy_attr(uattr, &attr); 4328 if (retval) 4329 return retval; 4330 4331 if ((int)attr.sched_policy < 0) 4332 return -EINVAL; 4333 4334 rcu_read_lock(); 4335 retval = -ESRCH; 4336 p = find_process_by_pid(pid); 4337 if (p != NULL) 4338 retval = sched_setattr(p, &attr); 4339 rcu_read_unlock(); 4340 4341 return retval; 4342 } 4343 4344 /** 4345 * sys_sched_getscheduler - get the policy (scheduling class) of a thread 4346 * @pid: the pid in question. 4347 * 4348 * Return: On success, the policy of the thread. Otherwise, a negative error 4349 * code. 4350 */ 4351 SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) 4352 { 4353 struct task_struct *p; 4354 int retval; 4355 4356 if (pid < 0) 4357 return -EINVAL; 4358 4359 retval = -ESRCH; 4360 rcu_read_lock(); 4361 p = find_process_by_pid(pid); 4362 if (p) { 4363 retval = security_task_getscheduler(p); 4364 if (!retval) 4365 retval = p->policy 4366 | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0); 4367 } 4368 rcu_read_unlock(); 4369 return retval; 4370 } 4371 4372 /** 4373 * sys_sched_getparam - get the RT priority of a thread 4374 * @pid: the pid in question. 4375 * @param: structure containing the RT priority. 4376 * 4377 * Return: On success, 0 and the RT priority is in @param. Otherwise, an error 4378 * code. 4379 */ 4380 SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) 4381 { 4382 struct sched_param lp = { .sched_priority = 0 }; 4383 struct task_struct *p; 4384 int retval; 4385 4386 if (!param || pid < 0) 4387 return -EINVAL; 4388 4389 rcu_read_lock(); 4390 p = find_process_by_pid(pid); 4391 retval = -ESRCH; 4392 if (!p) 4393 goto out_unlock; 4394 4395 retval = security_task_getscheduler(p); 4396 if (retval) 4397 goto out_unlock; 4398 4399 if (task_has_rt_policy(p)) 4400 lp.sched_priority = p->rt_priority; 4401 rcu_read_unlock(); 4402 4403 /* 4404 * This one might sleep, we cannot do it with a spinlock held ... 4405 */ 4406 retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0; 4407 4408 return retval; 4409 4410 out_unlock: 4411 rcu_read_unlock(); 4412 return retval; 4413 } 4414 4415 static int sched_read_attr(struct sched_attr __user *uattr, 4416 struct sched_attr *attr, 4417 unsigned int usize) 4418 { 4419 int ret; 4420 4421 if (!access_ok(VERIFY_WRITE, uattr, usize)) 4422 return -EFAULT; 4423 4424 /* 4425 * If we're handed a smaller struct than we know of, 4426 * ensure all the unknown bits are 0 - i.e. old 4427 * user-space does not get uncomplete information. 4428 */ 4429 if (usize < sizeof(*attr)) { 4430 unsigned char *addr; 4431 unsigned char *end; 4432 4433 addr = (void *)attr + usize; 4434 end = (void *)attr + sizeof(*attr); 4435 4436 for (; addr < end; addr++) { 4437 if (*addr) 4438 return -EFBIG; 4439 } 4440 4441 attr->size = usize; 4442 } 4443 4444 ret = copy_to_user(uattr, attr, attr->size); 4445 if (ret) 4446 return -EFAULT; 4447 4448 return 0; 4449 } 4450 4451 /** 4452 * sys_sched_getattr - similar to sched_getparam, but with sched_attr 4453 * @pid: the pid in question. 4454 * @uattr: structure containing the extended parameters. 4455 * @size: sizeof(attr) for fwd/bwd comp. 4456 * @flags: for future extension. 4457 */ 4458 SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, 4459 unsigned int, size, unsigned int, flags) 4460 { 4461 struct sched_attr attr = { 4462 .size = sizeof(struct sched_attr), 4463 }; 4464 struct task_struct *p; 4465 int retval; 4466 4467 if (!uattr || pid < 0 || size > PAGE_SIZE || 4468 size < SCHED_ATTR_SIZE_VER0 || flags) 4469 return -EINVAL; 4470 4471 rcu_read_lock(); 4472 p = find_process_by_pid(pid); 4473 retval = -ESRCH; 4474 if (!p) 4475 goto out_unlock; 4476 4477 retval = security_task_getscheduler(p); 4478 if (retval) 4479 goto out_unlock; 4480 4481 attr.sched_policy = p->policy; 4482 if (p->sched_reset_on_fork) 4483 attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; 4484 if (task_has_dl_policy(p)) 4485 __getparam_dl(p, &attr); 4486 else if (task_has_rt_policy(p)) 4487 attr.sched_priority = p->rt_priority; 4488 else 4489 attr.sched_nice = task_nice(p); 4490 4491 rcu_read_unlock(); 4492 4493 retval = sched_read_attr(uattr, &attr, size); 4494 return retval; 4495 4496 out_unlock: 4497 rcu_read_unlock(); 4498 return retval; 4499 } 4500 4501 long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) 4502 { 4503 cpumask_var_t cpus_allowed, new_mask; 4504 struct task_struct *p; 4505 int retval; 4506 4507 rcu_read_lock(); 4508 4509 p = find_process_by_pid(pid); 4510 if (!p) { 4511 rcu_read_unlock(); 4512 return -ESRCH; 4513 } 4514 4515 /* Prevent p going away */ 4516 get_task_struct(p); 4517 rcu_read_unlock(); 4518 4519 if (p->flags & PF_NO_SETAFFINITY) { 4520 retval = -EINVAL; 4521 goto out_put_task; 4522 } 4523 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) { 4524 retval = -ENOMEM; 4525 goto out_put_task; 4526 } 4527 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) { 4528 retval = -ENOMEM; 4529 goto out_free_cpus_allowed; 4530 } 4531 retval = -EPERM; 4532 if (!check_same_owner(p)) { 4533 rcu_read_lock(); 4534 if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) { 4535 rcu_read_unlock(); 4536 goto out_free_new_mask; 4537 } 4538 rcu_read_unlock(); 4539 } 4540 4541 retval = security_task_setscheduler(p); 4542 if (retval) 4543 goto out_free_new_mask; 4544 4545 4546 cpuset_cpus_allowed(p, cpus_allowed); 4547 cpumask_and(new_mask, in_mask, cpus_allowed); 4548 4549 /* 4550 * Since bandwidth control happens on root_domain basis, 4551 * if admission test is enabled, we only admit -deadline 4552 * tasks allowed to run on all the CPUs in the task's 4553 * root_domain. 4554 */ 4555 #ifdef CONFIG_SMP 4556 if (task_has_dl_policy(p) && dl_bandwidth_enabled()) { 4557 rcu_read_lock(); 4558 if (!cpumask_subset(task_rq(p)->rd->span, new_mask)) { 4559 retval = -EBUSY; 4560 rcu_read_unlock(); 4561 goto out_free_new_mask; 4562 } 4563 rcu_read_unlock(); 4564 } 4565 #endif 4566 again: 4567 retval = __set_cpus_allowed_ptr(p, new_mask, true); 4568 4569 if (!retval) { 4570 cpuset_cpus_allowed(p, cpus_allowed); 4571 if (!cpumask_subset(new_mask, cpus_allowed)) { 4572 /* 4573 * We must have raced with a concurrent cpuset 4574 * update. Just reset the cpus_allowed to the 4575 * cpuset's cpus_allowed 4576 */ 4577 cpumask_copy(new_mask, cpus_allowed); 4578 goto again; 4579 } 4580 } 4581 out_free_new_mask: 4582 free_cpumask_var(new_mask); 4583 out_free_cpus_allowed: 4584 free_cpumask_var(cpus_allowed); 4585 out_put_task: 4586 put_task_struct(p); 4587 return retval; 4588 } 4589 4590 static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, 4591 struct cpumask *new_mask) 4592 { 4593 if (len < cpumask_size()) 4594 cpumask_clear(new_mask); 4595 else if (len > cpumask_size()) 4596 len = cpumask_size(); 4597 4598 return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0; 4599 } 4600 4601 /** 4602 * sys_sched_setaffinity - set the cpu affinity of a process 4603 * @pid: pid of the process 4604 * @len: length in bytes of the bitmask pointed to by user_mask_ptr 4605 * @user_mask_ptr: user-space pointer to the new cpu mask 4606 * 4607 * Return: 0 on success. An error code otherwise. 4608 */ 4609 SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len, 4610 unsigned long __user *, user_mask_ptr) 4611 { 4612 cpumask_var_t new_mask; 4613 int retval; 4614 4615 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) 4616 return -ENOMEM; 4617 4618 retval = get_user_cpu_mask(user_mask_ptr, len, new_mask); 4619 if (retval == 0) 4620 retval = sched_setaffinity(pid, new_mask); 4621 free_cpumask_var(new_mask); 4622 return retval; 4623 } 4624 4625 long sched_getaffinity(pid_t pid, struct cpumask *mask) 4626 { 4627 struct task_struct *p; 4628 unsigned long flags; 4629 int retval; 4630 4631 rcu_read_lock(); 4632 4633 retval = -ESRCH; 4634 p = find_process_by_pid(pid); 4635 if (!p) 4636 goto out_unlock; 4637 4638 retval = security_task_getscheduler(p); 4639 if (retval) 4640 goto out_unlock; 4641 4642 raw_spin_lock_irqsave(&p->pi_lock, flags); 4643 cpumask_and(mask, &p->cpus_allowed, cpu_active_mask); 4644 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 4645 4646 out_unlock: 4647 rcu_read_unlock(); 4648 4649 return retval; 4650 } 4651 4652 /** 4653 * sys_sched_getaffinity - get the cpu affinity of a process 4654 * @pid: pid of the process 4655 * @len: length in bytes of the bitmask pointed to by user_mask_ptr 4656 * @user_mask_ptr: user-space pointer to hold the current cpu mask 4657 * 4658 * Return: 0 on success. An error code otherwise. 4659 */ 4660 SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, 4661 unsigned long __user *, user_mask_ptr) 4662 { 4663 int ret; 4664 cpumask_var_t mask; 4665 4666 if ((len * BITS_PER_BYTE) < nr_cpu_ids) 4667 return -EINVAL; 4668 if (len & (sizeof(unsigned long)-1)) 4669 return -EINVAL; 4670 4671 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) 4672 return -ENOMEM; 4673 4674 ret = sched_getaffinity(pid, mask); 4675 if (ret == 0) { 4676 size_t retlen = min_t(size_t, len, cpumask_size()); 4677 4678 if (copy_to_user(user_mask_ptr, mask, retlen)) 4679 ret = -EFAULT; 4680 else 4681 ret = retlen; 4682 } 4683 free_cpumask_var(mask); 4684 4685 return ret; 4686 } 4687 4688 /** 4689 * sys_sched_yield - yield the current processor to other threads. 4690 * 4691 * This function yields the current CPU to other tasks. If there are no 4692 * other threads running on this CPU then this function will return. 4693 * 4694 * Return: 0. 4695 */ 4696 SYSCALL_DEFINE0(sched_yield) 4697 { 4698 struct rq *rq = this_rq_lock(); 4699 4700 schedstat_inc(rq, yld_count); 4701 current->sched_class->yield_task(rq); 4702 4703 /* 4704 * Since we are going to call schedule() anyway, there's 4705 * no need to preempt or enable interrupts: 4706 */ 4707 __release(rq->lock); 4708 spin_release(&rq->lock.dep_map, 1, _THIS_IP_); 4709 do_raw_spin_unlock(&rq->lock); 4710 sched_preempt_enable_no_resched(); 4711 4712 schedule(); 4713 4714 return 0; 4715 } 4716 4717 int __sched _cond_resched(void) 4718 { 4719 if (should_resched(0)) { 4720 preempt_schedule_common(); 4721 return 1; 4722 } 4723 return 0; 4724 } 4725 EXPORT_SYMBOL(_cond_resched); 4726 4727 /* 4728 * __cond_resched_lock() - if a reschedule is pending, drop the given lock, 4729 * call schedule, and on return reacquire the lock. 4730 * 4731 * This works OK both with and without CONFIG_PREEMPT. We do strange low-level 4732 * operations here to prevent schedule() from being called twice (once via 4733 * spin_unlock(), once by hand). 4734 */ 4735 int __cond_resched_lock(spinlock_t *lock) 4736 { 4737 int resched = should_resched(PREEMPT_LOCK_OFFSET); 4738 int ret = 0; 4739 4740 lockdep_assert_held(lock); 4741 4742 if (spin_needbreak(lock) || resched) { 4743 spin_unlock(lock); 4744 if (resched) 4745 preempt_schedule_common(); 4746 else 4747 cpu_relax(); 4748 ret = 1; 4749 spin_lock(lock); 4750 } 4751 return ret; 4752 } 4753 EXPORT_SYMBOL(__cond_resched_lock); 4754 4755 int __sched __cond_resched_softirq(void) 4756 { 4757 BUG_ON(!in_softirq()); 4758 4759 if (should_resched(SOFTIRQ_DISABLE_OFFSET)) { 4760 local_bh_enable(); 4761 preempt_schedule_common(); 4762 local_bh_disable(); 4763 return 1; 4764 } 4765 return 0; 4766 } 4767 EXPORT_SYMBOL(__cond_resched_softirq); 4768 4769 /** 4770 * yield - yield the current processor to other threads. 4771 * 4772 * Do not ever use this function, there's a 99% chance you're doing it wrong. 4773 * 4774 * The scheduler is at all times free to pick the calling task as the most 4775 * eligible task to run, if removing the yield() call from your code breaks 4776 * it, its already broken. 4777 * 4778 * Typical broken usage is: 4779 * 4780 * while (!event) 4781 * yield(); 4782 * 4783 * where one assumes that yield() will let 'the other' process run that will 4784 * make event true. If the current task is a SCHED_FIFO task that will never 4785 * happen. Never use yield() as a progress guarantee!! 4786 * 4787 * If you want to use yield() to wait for something, use wait_event(). 4788 * If you want to use yield() to be 'nice' for others, use cond_resched(). 4789 * If you still want to use yield(), do not! 4790 */ 4791 void __sched yield(void) 4792 { 4793 set_current_state(TASK_RUNNING); 4794 sys_sched_yield(); 4795 } 4796 EXPORT_SYMBOL(yield); 4797 4798 /** 4799 * yield_to - yield the current processor to another thread in 4800 * your thread group, or accelerate that thread toward the 4801 * processor it's on. 4802 * @p: target task 4803 * @preempt: whether task preemption is allowed or not 4804 * 4805 * It's the caller's job to ensure that the target task struct 4806 * can't go away on us before we can do any checks. 4807 * 4808 * Return: 4809 * true (>0) if we indeed boosted the target task. 4810 * false (0) if we failed to boost the target. 4811 * -ESRCH if there's no task to yield to. 4812 */ 4813 int __sched yield_to(struct task_struct *p, bool preempt) 4814 { 4815 struct task_struct *curr = current; 4816 struct rq *rq, *p_rq; 4817 unsigned long flags; 4818 int yielded = 0; 4819 4820 local_irq_save(flags); 4821 rq = this_rq(); 4822 4823 again: 4824 p_rq = task_rq(p); 4825 /* 4826 * If we're the only runnable task on the rq and target rq also 4827 * has only one task, there's absolutely no point in yielding. 4828 */ 4829 if (rq->nr_running == 1 && p_rq->nr_running == 1) { 4830 yielded = -ESRCH; 4831 goto out_irq; 4832 } 4833 4834 double_rq_lock(rq, p_rq); 4835 if (task_rq(p) != p_rq) { 4836 double_rq_unlock(rq, p_rq); 4837 goto again; 4838 } 4839 4840 if (!curr->sched_class->yield_to_task) 4841 goto out_unlock; 4842 4843 if (curr->sched_class != p->sched_class) 4844 goto out_unlock; 4845 4846 if (task_running(p_rq, p) || p->state) 4847 goto out_unlock; 4848 4849 yielded = curr->sched_class->yield_to_task(rq, p, preempt); 4850 if (yielded) { 4851 schedstat_inc(rq, yld_count); 4852 /* 4853 * Make p's CPU reschedule; pick_next_entity takes care of 4854 * fairness. 4855 */ 4856 if (preempt && rq != p_rq) 4857 resched_curr(p_rq); 4858 } 4859 4860 out_unlock: 4861 double_rq_unlock(rq, p_rq); 4862 out_irq: 4863 local_irq_restore(flags); 4864 4865 if (yielded > 0) 4866 schedule(); 4867 4868 return yielded; 4869 } 4870 EXPORT_SYMBOL_GPL(yield_to); 4871 4872 /* 4873 * This task is about to go to sleep on IO. Increment rq->nr_iowait so 4874 * that process accounting knows that this is a task in IO wait state. 4875 */ 4876 long __sched io_schedule_timeout(long timeout) 4877 { 4878 int old_iowait = current->in_iowait; 4879 struct rq *rq; 4880 long ret; 4881 4882 current->in_iowait = 1; 4883 blk_schedule_flush_plug(current); 4884 4885 delayacct_blkio_start(); 4886 rq = raw_rq(); 4887 atomic_inc(&rq->nr_iowait); 4888 ret = schedule_timeout(timeout); 4889 current->in_iowait = old_iowait; 4890 atomic_dec(&rq->nr_iowait); 4891 delayacct_blkio_end(); 4892 4893 return ret; 4894 } 4895 EXPORT_SYMBOL(io_schedule_timeout); 4896 4897 /** 4898 * sys_sched_get_priority_max - return maximum RT priority. 4899 * @policy: scheduling class. 4900 * 4901 * Return: On success, this syscall returns the maximum 4902 * rt_priority that can be used by a given scheduling class. 4903 * On failure, a negative error code is returned. 4904 */ 4905 SYSCALL_DEFINE1(sched_get_priority_max, int, policy) 4906 { 4907 int ret = -EINVAL; 4908 4909 switch (policy) { 4910 case SCHED_FIFO: 4911 case SCHED_RR: 4912 ret = MAX_USER_RT_PRIO-1; 4913 break; 4914 case SCHED_DEADLINE: 4915 case SCHED_NORMAL: 4916 case SCHED_BATCH: 4917 case SCHED_IDLE: 4918 ret = 0; 4919 break; 4920 } 4921 return ret; 4922 } 4923 4924 /** 4925 * sys_sched_get_priority_min - return minimum RT priority. 4926 * @policy: scheduling class. 4927 * 4928 * Return: On success, this syscall returns the minimum 4929 * rt_priority that can be used by a given scheduling class. 4930 * On failure, a negative error code is returned. 4931 */ 4932 SYSCALL_DEFINE1(sched_get_priority_min, int, policy) 4933 { 4934 int ret = -EINVAL; 4935 4936 switch (policy) { 4937 case SCHED_FIFO: 4938 case SCHED_RR: 4939 ret = 1; 4940 break; 4941 case SCHED_DEADLINE: 4942 case SCHED_NORMAL: 4943 case SCHED_BATCH: 4944 case SCHED_IDLE: 4945 ret = 0; 4946 } 4947 return ret; 4948 } 4949 4950 /** 4951 * sys_sched_rr_get_interval - return the default timeslice of a process. 4952 * @pid: pid of the process. 4953 * @interval: userspace pointer to the timeslice value. 4954 * 4955 * this syscall writes the default timeslice value of a given process 4956 * into the user-space timespec buffer. A value of '0' means infinity. 4957 * 4958 * Return: On success, 0 and the timeslice is in @interval. Otherwise, 4959 * an error code. 4960 */ 4961 SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, 4962 struct timespec __user *, interval) 4963 { 4964 struct task_struct *p; 4965 unsigned int time_slice; 4966 unsigned long flags; 4967 struct rq *rq; 4968 int retval; 4969 struct timespec t; 4970 4971 if (pid < 0) 4972 return -EINVAL; 4973 4974 retval = -ESRCH; 4975 rcu_read_lock(); 4976 p = find_process_by_pid(pid); 4977 if (!p) 4978 goto out_unlock; 4979 4980 retval = security_task_getscheduler(p); 4981 if (retval) 4982 goto out_unlock; 4983 4984 rq = task_rq_lock(p, &flags); 4985 time_slice = 0; 4986 if (p->sched_class->get_rr_interval) 4987 time_slice = p->sched_class->get_rr_interval(rq, p); 4988 task_rq_unlock(rq, p, &flags); 4989 4990 rcu_read_unlock(); 4991 jiffies_to_timespec(time_slice, &t); 4992 retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0; 4993 return retval; 4994 4995 out_unlock: 4996 rcu_read_unlock(); 4997 return retval; 4998 } 4999 5000 static const char stat_nam[] = TASK_STATE_TO_CHAR_STR; 5001 5002 void sched_show_task(struct task_struct *p) 5003 { 5004 unsigned long free = 0; 5005 int ppid; 5006 unsigned long state = p->state; 5007 5008 if (state) 5009 state = __ffs(state) + 1; 5010 printk(KERN_INFO "%-15.15s %c", p->comm, 5011 state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?'); 5012 #if BITS_PER_LONG == 32 5013 if (state == TASK_RUNNING) 5014 printk(KERN_CONT " running "); 5015 else 5016 printk(KERN_CONT " %08lx ", thread_saved_pc(p)); 5017 #else 5018 if (state == TASK_RUNNING) 5019 printk(KERN_CONT " running task "); 5020 else 5021 printk(KERN_CONT " %016lx ", thread_saved_pc(p)); 5022 #endif 5023 #ifdef CONFIG_DEBUG_STACK_USAGE 5024 free = stack_not_used(p); 5025 #endif 5026 ppid = 0; 5027 rcu_read_lock(); 5028 if (pid_alive(p)) 5029 ppid = task_pid_nr(rcu_dereference(p->real_parent)); 5030 rcu_read_unlock(); 5031 printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free, 5032 task_pid_nr(p), ppid, 5033 (unsigned long)task_thread_info(p)->flags); 5034 5035 print_worker_info(KERN_INFO, p); 5036 show_stack(p, NULL); 5037 } 5038 5039 void show_state_filter(unsigned long state_filter) 5040 { 5041 struct task_struct *g, *p; 5042 5043 #if BITS_PER_LONG == 32 5044 printk(KERN_INFO 5045 " task PC stack pid father\n"); 5046 #else 5047 printk(KERN_INFO 5048 " task PC stack pid father\n"); 5049 #endif 5050 rcu_read_lock(); 5051 for_each_process_thread(g, p) { 5052 /* 5053 * reset the NMI-timeout, listing all files on a slow 5054 * console might take a lot of time: 5055 */ 5056 touch_nmi_watchdog(); 5057 if (!state_filter || (p->state & state_filter)) 5058 sched_show_task(p); 5059 } 5060 5061 touch_all_softlockup_watchdogs(); 5062 5063 #ifdef CONFIG_SCHED_DEBUG 5064 sysrq_sched_debug_show(); 5065 #endif 5066 rcu_read_unlock(); 5067 /* 5068 * Only show locks if all tasks are dumped: 5069 */ 5070 if (!state_filter) 5071 debug_show_all_locks(); 5072 } 5073 5074 void init_idle_bootup_task(struct task_struct *idle) 5075 { 5076 idle->sched_class = &idle_sched_class; 5077 } 5078 5079 /** 5080 * init_idle - set up an idle thread for a given CPU 5081 * @idle: task in question 5082 * @cpu: cpu the idle task belongs to 5083 * 5084 * NOTE: this function does not set the idle thread's NEED_RESCHED 5085 * flag, to make booting more robust. 5086 */ 5087 void init_idle(struct task_struct *idle, int cpu) 5088 { 5089 struct rq *rq = cpu_rq(cpu); 5090 unsigned long flags; 5091 5092 raw_spin_lock_irqsave(&idle->pi_lock, flags); 5093 raw_spin_lock(&rq->lock); 5094 5095 __sched_fork(0, idle); 5096 idle->state = TASK_RUNNING; 5097 idle->se.exec_start = sched_clock(); 5098 5099 #ifdef CONFIG_SMP 5100 /* 5101 * Its possible that init_idle() gets called multiple times on a task, 5102 * in that case do_set_cpus_allowed() will not do the right thing. 5103 * 5104 * And since this is boot we can forgo the serialization. 5105 */ 5106 set_cpus_allowed_common(idle, cpumask_of(cpu)); 5107 #endif 5108 /* 5109 * We're having a chicken and egg problem, even though we are 5110 * holding rq->lock, the cpu isn't yet set to this cpu so the 5111 * lockdep check in task_group() will fail. 5112 * 5113 * Similar case to sched_fork(). / Alternatively we could 5114 * use task_rq_lock() here and obtain the other rq->lock. 5115 * 5116 * Silence PROVE_RCU 5117 */ 5118 rcu_read_lock(); 5119 __set_task_cpu(idle, cpu); 5120 rcu_read_unlock(); 5121 5122 rq->curr = rq->idle = idle; 5123 idle->on_rq = TASK_ON_RQ_QUEUED; 5124 #ifdef CONFIG_SMP 5125 idle->on_cpu = 1; 5126 #endif 5127 raw_spin_unlock(&rq->lock); 5128 raw_spin_unlock_irqrestore(&idle->pi_lock, flags); 5129 5130 /* Set the preempt count _outside_ the spinlocks! */ 5131 init_idle_preempt_count(idle, cpu); 5132 5133 /* 5134 * The idle tasks have their own, simple scheduling class: 5135 */ 5136 idle->sched_class = &idle_sched_class; 5137 ftrace_graph_init_idle_task(idle, cpu); 5138 vtime_init_idle(idle, cpu); 5139 #ifdef CONFIG_SMP 5140 sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu); 5141 #endif 5142 } 5143 5144 int cpuset_cpumask_can_shrink(const struct cpumask *cur, 5145 const struct cpumask *trial) 5146 { 5147 int ret = 1, trial_cpus; 5148 struct dl_bw *cur_dl_b; 5149 unsigned long flags; 5150 5151 if (!cpumask_weight(cur)) 5152 return ret; 5153 5154 rcu_read_lock_sched(); 5155 cur_dl_b = dl_bw_of(cpumask_any(cur)); 5156 trial_cpus = cpumask_weight(trial); 5157 5158 raw_spin_lock_irqsave(&cur_dl_b->lock, flags); 5159 if (cur_dl_b->bw != -1 && 5160 cur_dl_b->bw * trial_cpus < cur_dl_b->total_bw) 5161 ret = 0; 5162 raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags); 5163 rcu_read_unlock_sched(); 5164 5165 return ret; 5166 } 5167 5168 int task_can_attach(struct task_struct *p, 5169 const struct cpumask *cs_cpus_allowed) 5170 { 5171 int ret = 0; 5172 5173 /* 5174 * Kthreads which disallow setaffinity shouldn't be moved 5175 * to a new cpuset; we don't want to change their cpu 5176 * affinity and isolating such threads by their set of 5177 * allowed nodes is unnecessary. Thus, cpusets are not 5178 * applicable for such threads. This prevents checking for 5179 * success of set_cpus_allowed_ptr() on all attached tasks 5180 * before cpus_allowed may be changed. 5181 */ 5182 if (p->flags & PF_NO_SETAFFINITY) { 5183 ret = -EINVAL; 5184 goto out; 5185 } 5186 5187 #ifdef CONFIG_SMP 5188 if (dl_task(p) && !cpumask_intersects(task_rq(p)->rd->span, 5189 cs_cpus_allowed)) { 5190 unsigned int dest_cpu = cpumask_any_and(cpu_active_mask, 5191 cs_cpus_allowed); 5192 struct dl_bw *dl_b; 5193 bool overflow; 5194 int cpus; 5195 unsigned long flags; 5196 5197 rcu_read_lock_sched(); 5198 dl_b = dl_bw_of(dest_cpu); 5199 raw_spin_lock_irqsave(&dl_b->lock, flags); 5200 cpus = dl_bw_cpus(dest_cpu); 5201 overflow = __dl_overflow(dl_b, cpus, 0, p->dl.dl_bw); 5202 if (overflow) 5203 ret = -EBUSY; 5204 else { 5205 /* 5206 * We reserve space for this task in the destination 5207 * root_domain, as we can't fail after this point. 5208 * We will free resources in the source root_domain 5209 * later on (see set_cpus_allowed_dl()). 5210 */ 5211 __dl_add(dl_b, p->dl.dl_bw); 5212 } 5213 raw_spin_unlock_irqrestore(&dl_b->lock, flags); 5214 rcu_read_unlock_sched(); 5215 5216 } 5217 #endif 5218 out: 5219 return ret; 5220 } 5221 5222 #ifdef CONFIG_SMP 5223 5224 #ifdef CONFIG_NUMA_BALANCING 5225 /* Migrate current task p to target_cpu */ 5226 int migrate_task_to(struct task_struct *p, int target_cpu) 5227 { 5228 struct migration_arg arg = { p, target_cpu }; 5229 int curr_cpu = task_cpu(p); 5230 5231 if (curr_cpu == target_cpu) 5232 return 0; 5233 5234 if (!cpumask_test_cpu(target_cpu, tsk_cpus_allowed(p))) 5235 return -EINVAL; 5236 5237 /* TODO: This is not properly updating schedstats */ 5238 5239 trace_sched_move_numa(p, curr_cpu, target_cpu); 5240 return stop_one_cpu(curr_cpu, migration_cpu_stop, &arg); 5241 } 5242 5243 /* 5244 * Requeue a task on a given node and accurately track the number of NUMA 5245 * tasks on the runqueues 5246 */ 5247 void sched_setnuma(struct task_struct *p, int nid) 5248 { 5249 struct rq *rq; 5250 unsigned long flags; 5251 bool queued, running; 5252 5253 rq = task_rq_lock(p, &flags); 5254 queued = task_on_rq_queued(p); 5255 running = task_current(rq, p); 5256 5257 if (queued) 5258 dequeue_task(rq, p, DEQUEUE_SAVE); 5259 if (running) 5260 put_prev_task(rq, p); 5261 5262 p->numa_preferred_nid = nid; 5263 5264 if (running) 5265 p->sched_class->set_curr_task(rq); 5266 if (queued) 5267 enqueue_task(rq, p, ENQUEUE_RESTORE); 5268 task_rq_unlock(rq, p, &flags); 5269 } 5270 #endif /* CONFIG_NUMA_BALANCING */ 5271 5272 #ifdef CONFIG_HOTPLUG_CPU 5273 /* 5274 * Ensures that the idle task is using init_mm right before its cpu goes 5275 * offline. 5276 */ 5277 void idle_task_exit(void) 5278 { 5279 struct mm_struct *mm = current->active_mm; 5280 5281 BUG_ON(cpu_online(smp_processor_id())); 5282 5283 if (mm != &init_mm) { 5284 switch_mm(mm, &init_mm, current); 5285 finish_arch_post_lock_switch(); 5286 } 5287 mmdrop(mm); 5288 } 5289 5290 /* 5291 * Since this CPU is going 'away' for a while, fold any nr_active delta 5292 * we might have. Assumes we're called after migrate_tasks() so that the 5293 * nr_active count is stable. 5294 * 5295 * Also see the comment "Global load-average calculations". 5296 */ 5297 static void calc_load_migrate(struct rq *rq) 5298 { 5299 long delta = calc_load_fold_active(rq); 5300 if (delta) 5301 atomic_long_add(delta, &calc_load_tasks); 5302 } 5303 5304 static void put_prev_task_fake(struct rq *rq, struct task_struct *prev) 5305 { 5306 } 5307 5308 static const struct sched_class fake_sched_class = { 5309 .put_prev_task = put_prev_task_fake, 5310 }; 5311 5312 static struct task_struct fake_task = { 5313 /* 5314 * Avoid pull_{rt,dl}_task() 5315 */ 5316 .prio = MAX_PRIO + 1, 5317 .sched_class = &fake_sched_class, 5318 }; 5319 5320 /* 5321 * Migrate all tasks from the rq, sleeping tasks will be migrated by 5322 * try_to_wake_up()->select_task_rq(). 5323 * 5324 * Called with rq->lock held even though we'er in stop_machine() and 5325 * there's no concurrency possible, we hold the required locks anyway 5326 * because of lock validation efforts. 5327 */ 5328 static void migrate_tasks(struct rq *dead_rq) 5329 { 5330 struct rq *rq = dead_rq; 5331 struct task_struct *next, *stop = rq->stop; 5332 int dest_cpu; 5333 5334 /* 5335 * Fudge the rq selection such that the below task selection loop 5336 * doesn't get stuck on the currently eligible stop task. 5337 * 5338 * We're currently inside stop_machine() and the rq is either stuck 5339 * in the stop_machine_cpu_stop() loop, or we're executing this code, 5340 * either way we should never end up calling schedule() until we're 5341 * done here. 5342 */ 5343 rq->stop = NULL; 5344 5345 /* 5346 * put_prev_task() and pick_next_task() sched 5347 * class method both need to have an up-to-date 5348 * value of rq->clock[_task] 5349 */ 5350 update_rq_clock(rq); 5351 5352 for (;;) { 5353 /* 5354 * There's this thread running, bail when that's the only 5355 * remaining thread. 5356 */ 5357 if (rq->nr_running == 1) 5358 break; 5359 5360 /* 5361 * pick_next_task assumes pinned rq->lock. 5362 */ 5363 lockdep_pin_lock(&rq->lock); 5364 next = pick_next_task(rq, &fake_task); 5365 BUG_ON(!next); 5366 next->sched_class->put_prev_task(rq, next); 5367 5368 /* 5369 * Rules for changing task_struct::cpus_allowed are holding 5370 * both pi_lock and rq->lock, such that holding either 5371 * stabilizes the mask. 5372 * 5373 * Drop rq->lock is not quite as disastrous as it usually is 5374 * because !cpu_active at this point, which means load-balance 5375 * will not interfere. Also, stop-machine. 5376 */ 5377 lockdep_unpin_lock(&rq->lock); 5378 raw_spin_unlock(&rq->lock); 5379 raw_spin_lock(&next->pi_lock); 5380 raw_spin_lock(&rq->lock); 5381 5382 /* 5383 * Since we're inside stop-machine, _nothing_ should have 5384 * changed the task, WARN if weird stuff happened, because in 5385 * that case the above rq->lock drop is a fail too. 5386 */ 5387 if (WARN_ON(task_rq(next) != rq || !task_on_rq_queued(next))) { 5388 raw_spin_unlock(&next->pi_lock); 5389 continue; 5390 } 5391 5392 /* Find suitable destination for @next, with force if needed. */ 5393 dest_cpu = select_fallback_rq(dead_rq->cpu, next); 5394 5395 rq = __migrate_task(rq, next, dest_cpu); 5396 if (rq != dead_rq) { 5397 raw_spin_unlock(&rq->lock); 5398 rq = dead_rq; 5399 raw_spin_lock(&rq->lock); 5400 } 5401 raw_spin_unlock(&next->pi_lock); 5402 } 5403 5404 rq->stop = stop; 5405 } 5406 #endif /* CONFIG_HOTPLUG_CPU */ 5407 5408 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL) 5409 5410 static struct ctl_table sd_ctl_dir[] = { 5411 { 5412 .procname = "sched_domain", 5413 .mode = 0555, 5414 }, 5415 {} 5416 }; 5417 5418 static struct ctl_table sd_ctl_root[] = { 5419 { 5420 .procname = "kernel", 5421 .mode = 0555, 5422 .child = sd_ctl_dir, 5423 }, 5424 {} 5425 }; 5426 5427 static struct ctl_table *sd_alloc_ctl_entry(int n) 5428 { 5429 struct ctl_table *entry = 5430 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL); 5431 5432 return entry; 5433 } 5434 5435 static void sd_free_ctl_entry(struct ctl_table **tablep) 5436 { 5437 struct ctl_table *entry; 5438 5439 /* 5440 * In the intermediate directories, both the child directory and 5441 * procname are dynamically allocated and could fail but the mode 5442 * will always be set. In the lowest directory the names are 5443 * static strings and all have proc handlers. 5444 */ 5445 for (entry = *tablep; entry->mode; entry++) { 5446 if (entry->child) 5447 sd_free_ctl_entry(&entry->child); 5448 if (entry->proc_handler == NULL) 5449 kfree(entry->procname); 5450 } 5451 5452 kfree(*tablep); 5453 *tablep = NULL; 5454 } 5455 5456 static int min_load_idx = 0; 5457 static int max_load_idx = CPU_LOAD_IDX_MAX-1; 5458 5459 static void 5460 set_table_entry(struct ctl_table *entry, 5461 const char *procname, void *data, int maxlen, 5462 umode_t mode, proc_handler *proc_handler, 5463 bool load_idx) 5464 { 5465 entry->procname = procname; 5466 entry->data = data; 5467 entry->maxlen = maxlen; 5468 entry->mode = mode; 5469 entry->proc_handler = proc_handler; 5470 5471 if (load_idx) { 5472 entry->extra1 = &min_load_idx; 5473 entry->extra2 = &max_load_idx; 5474 } 5475 } 5476 5477 static struct ctl_table * 5478 sd_alloc_ctl_domain_table(struct sched_domain *sd) 5479 { 5480 struct ctl_table *table = sd_alloc_ctl_entry(14); 5481 5482 if (table == NULL) 5483 return NULL; 5484 5485 set_table_entry(&table[0], "min_interval", &sd->min_interval, 5486 sizeof(long), 0644, proc_doulongvec_minmax, false); 5487 set_table_entry(&table[1], "max_interval", &sd->max_interval, 5488 sizeof(long), 0644, proc_doulongvec_minmax, false); 5489 set_table_entry(&table[2], "busy_idx", &sd->busy_idx, 5490 sizeof(int), 0644, proc_dointvec_minmax, true); 5491 set_table_entry(&table[3], "idle_idx", &sd->idle_idx, 5492 sizeof(int), 0644, proc_dointvec_minmax, true); 5493 set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx, 5494 sizeof(int), 0644, proc_dointvec_minmax, true); 5495 set_table_entry(&table[5], "wake_idx", &sd->wake_idx, 5496 sizeof(int), 0644, proc_dointvec_minmax, true); 5497 set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx, 5498 sizeof(int), 0644, proc_dointvec_minmax, true); 5499 set_table_entry(&table[7], "busy_factor", &sd->busy_factor, 5500 sizeof(int), 0644, proc_dointvec_minmax, false); 5501 set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct, 5502 sizeof(int), 0644, proc_dointvec_minmax, false); 5503 set_table_entry(&table[9], "cache_nice_tries", 5504 &sd->cache_nice_tries, 5505 sizeof(int), 0644, proc_dointvec_minmax, false); 5506 set_table_entry(&table[10], "flags", &sd->flags, 5507 sizeof(int), 0644, proc_dointvec_minmax, false); 5508 set_table_entry(&table[11], "max_newidle_lb_cost", 5509 &sd->max_newidle_lb_cost, 5510 sizeof(long), 0644, proc_doulongvec_minmax, false); 5511 set_table_entry(&table[12], "name", sd->name, 5512 CORENAME_MAX_SIZE, 0444, proc_dostring, false); 5513 /* &table[13] is terminator */ 5514 5515 return table; 5516 } 5517 5518 static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu) 5519 { 5520 struct ctl_table *entry, *table; 5521 struct sched_domain *sd; 5522 int domain_num = 0, i; 5523 char buf[32]; 5524 5525 for_each_domain(cpu, sd) 5526 domain_num++; 5527 entry = table = sd_alloc_ctl_entry(domain_num + 1); 5528 if (table == NULL) 5529 return NULL; 5530 5531 i = 0; 5532 for_each_domain(cpu, sd) { 5533 snprintf(buf, 32, "domain%d", i); 5534 entry->procname = kstrdup(buf, GFP_KERNEL); 5535 entry->mode = 0555; 5536 entry->child = sd_alloc_ctl_domain_table(sd); 5537 entry++; 5538 i++; 5539 } 5540 return table; 5541 } 5542 5543 static struct ctl_table_header *sd_sysctl_header; 5544 static void register_sched_domain_sysctl(void) 5545 { 5546 int i, cpu_num = num_possible_cpus(); 5547 struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1); 5548 char buf[32]; 5549 5550 WARN_ON(sd_ctl_dir[0].child); 5551 sd_ctl_dir[0].child = entry; 5552 5553 if (entry == NULL) 5554 return; 5555 5556 for_each_possible_cpu(i) { 5557 snprintf(buf, 32, "cpu%d", i); 5558 entry->procname = kstrdup(buf, GFP_KERNEL); 5559 entry->mode = 0555; 5560 entry->child = sd_alloc_ctl_cpu_table(i); 5561 entry++; 5562 } 5563 5564 WARN_ON(sd_sysctl_header); 5565 sd_sysctl_header = register_sysctl_table(sd_ctl_root); 5566 } 5567 5568 /* may be called multiple times per register */ 5569 static void unregister_sched_domain_sysctl(void) 5570 { 5571 unregister_sysctl_table(sd_sysctl_header); 5572 sd_sysctl_header = NULL; 5573 if (sd_ctl_dir[0].child) 5574 sd_free_ctl_entry(&sd_ctl_dir[0].child); 5575 } 5576 #else 5577 static void register_sched_domain_sysctl(void) 5578 { 5579 } 5580 static void unregister_sched_domain_sysctl(void) 5581 { 5582 } 5583 #endif /* CONFIG_SCHED_DEBUG && CONFIG_SYSCTL */ 5584 5585 static void set_rq_online(struct rq *rq) 5586 { 5587 if (!rq->online) { 5588 const struct sched_class *class; 5589 5590 cpumask_set_cpu(rq->cpu, rq->rd->online); 5591 rq->online = 1; 5592 5593 for_each_class(class) { 5594 if (class->rq_online) 5595 class->rq_online(rq); 5596 } 5597 } 5598 } 5599 5600 static void set_rq_offline(struct rq *rq) 5601 { 5602 if (rq->online) { 5603 const struct sched_class *class; 5604 5605 for_each_class(class) { 5606 if (class->rq_offline) 5607 class->rq_offline(rq); 5608 } 5609 5610 cpumask_clear_cpu(rq->cpu, rq->rd->online); 5611 rq->online = 0; 5612 } 5613 } 5614 5615 /* 5616 * migration_call - callback that gets triggered when a CPU is added. 5617 * Here we can start up the necessary migration thread for the new CPU. 5618 */ 5619 static int 5620 migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) 5621 { 5622 int cpu = (long)hcpu; 5623 unsigned long flags; 5624 struct rq *rq = cpu_rq(cpu); 5625 5626 switch (action & ~CPU_TASKS_FROZEN) { 5627 5628 case CPU_UP_PREPARE: 5629 rq->calc_load_update = calc_load_update; 5630 break; 5631 5632 case CPU_ONLINE: 5633 /* Update our root-domain */ 5634 raw_spin_lock_irqsave(&rq->lock, flags); 5635 if (rq->rd) { 5636 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 5637 5638 set_rq_online(rq); 5639 } 5640 raw_spin_unlock_irqrestore(&rq->lock, flags); 5641 break; 5642 5643 #ifdef CONFIG_HOTPLUG_CPU 5644 case CPU_DYING: 5645 sched_ttwu_pending(); 5646 /* Update our root-domain */ 5647 raw_spin_lock_irqsave(&rq->lock, flags); 5648 if (rq->rd) { 5649 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 5650 set_rq_offline(rq); 5651 } 5652 migrate_tasks(rq); 5653 BUG_ON(rq->nr_running != 1); /* the migration thread */ 5654 raw_spin_unlock_irqrestore(&rq->lock, flags); 5655 break; 5656 5657 case CPU_DEAD: 5658 calc_load_migrate(rq); 5659 break; 5660 #endif 5661 } 5662 5663 update_max_interval(); 5664 5665 return NOTIFY_OK; 5666 } 5667 5668 /* 5669 * Register at high priority so that task migration (migrate_all_tasks) 5670 * happens before everything else. This has to be lower priority than 5671 * the notifier in the perf_event subsystem, though. 5672 */ 5673 static struct notifier_block migration_notifier = { 5674 .notifier_call = migration_call, 5675 .priority = CPU_PRI_MIGRATION, 5676 }; 5677 5678 static void set_cpu_rq_start_time(void) 5679 { 5680 int cpu = smp_processor_id(); 5681 struct rq *rq = cpu_rq(cpu); 5682 rq->age_stamp = sched_clock_cpu(cpu); 5683 } 5684 5685 static int sched_cpu_active(struct notifier_block *nfb, 5686 unsigned long action, void *hcpu) 5687 { 5688 int cpu = (long)hcpu; 5689 5690 switch (action & ~CPU_TASKS_FROZEN) { 5691 case CPU_STARTING: 5692 set_cpu_rq_start_time(); 5693 return NOTIFY_OK; 5694 5695 case CPU_ONLINE: 5696 /* 5697 * At this point a starting CPU has marked itself as online via 5698 * set_cpu_online(). But it might not yet have marked itself 5699 * as active, which is essential from here on. 5700 */ 5701 set_cpu_active(cpu, true); 5702 stop_machine_unpark(cpu); 5703 return NOTIFY_OK; 5704 5705 case CPU_DOWN_FAILED: 5706 set_cpu_active(cpu, true); 5707 return NOTIFY_OK; 5708 5709 default: 5710 return NOTIFY_DONE; 5711 } 5712 } 5713 5714 static int sched_cpu_inactive(struct notifier_block *nfb, 5715 unsigned long action, void *hcpu) 5716 { 5717 switch (action & ~CPU_TASKS_FROZEN) { 5718 case CPU_DOWN_PREPARE: 5719 set_cpu_active((long)hcpu, false); 5720 return NOTIFY_OK; 5721 default: 5722 return NOTIFY_DONE; 5723 } 5724 } 5725 5726 static int __init migration_init(void) 5727 { 5728 void *cpu = (void *)(long)smp_processor_id(); 5729 int err; 5730 5731 /* Initialize migration for the boot CPU */ 5732 err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu); 5733 BUG_ON(err == NOTIFY_BAD); 5734 migration_call(&migration_notifier, CPU_ONLINE, cpu); 5735 register_cpu_notifier(&migration_notifier); 5736 5737 /* Register cpu active notifiers */ 5738 cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE); 5739 cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE); 5740 5741 return 0; 5742 } 5743 early_initcall(migration_init); 5744 5745 static cpumask_var_t sched_domains_tmpmask; /* sched_domains_mutex */ 5746 5747 #ifdef CONFIG_SCHED_DEBUG 5748 5749 static __read_mostly int sched_debug_enabled; 5750 5751 static int __init sched_debug_setup(char *str) 5752 { 5753 sched_debug_enabled = 1; 5754 5755 return 0; 5756 } 5757 early_param("sched_debug", sched_debug_setup); 5758 5759 static inline bool sched_debug(void) 5760 { 5761 return sched_debug_enabled; 5762 } 5763 5764 static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, 5765 struct cpumask *groupmask) 5766 { 5767 struct sched_group *group = sd->groups; 5768 5769 cpumask_clear(groupmask); 5770 5771 printk(KERN_DEBUG "%*s domain %d: ", level, "", level); 5772 5773 if (!(sd->flags & SD_LOAD_BALANCE)) { 5774 printk("does not load-balance\n"); 5775 if (sd->parent) 5776 printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain" 5777 " has parent"); 5778 return -1; 5779 } 5780 5781 printk(KERN_CONT "span %*pbl level %s\n", 5782 cpumask_pr_args(sched_domain_span(sd)), sd->name); 5783 5784 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) { 5785 printk(KERN_ERR "ERROR: domain->span does not contain " 5786 "CPU%d\n", cpu); 5787 } 5788 if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) { 5789 printk(KERN_ERR "ERROR: domain->groups does not contain" 5790 " CPU%d\n", cpu); 5791 } 5792 5793 printk(KERN_DEBUG "%*s groups:", level + 1, ""); 5794 do { 5795 if (!group) { 5796 printk("\n"); 5797 printk(KERN_ERR "ERROR: group is NULL\n"); 5798 break; 5799 } 5800 5801 if (!cpumask_weight(sched_group_cpus(group))) { 5802 printk(KERN_CONT "\n"); 5803 printk(KERN_ERR "ERROR: empty group\n"); 5804 break; 5805 } 5806 5807 if (!(sd->flags & SD_OVERLAP) && 5808 cpumask_intersects(groupmask, sched_group_cpus(group))) { 5809 printk(KERN_CONT "\n"); 5810 printk(KERN_ERR "ERROR: repeated CPUs\n"); 5811 break; 5812 } 5813 5814 cpumask_or(groupmask, groupmask, sched_group_cpus(group)); 5815 5816 printk(KERN_CONT " %*pbl", 5817 cpumask_pr_args(sched_group_cpus(group))); 5818 if (group->sgc->capacity != SCHED_CAPACITY_SCALE) { 5819 printk(KERN_CONT " (cpu_capacity = %d)", 5820 group->sgc->capacity); 5821 } 5822 5823 group = group->next; 5824 } while (group != sd->groups); 5825 printk(KERN_CONT "\n"); 5826 5827 if (!cpumask_equal(sched_domain_span(sd), groupmask)) 5828 printk(KERN_ERR "ERROR: groups don't span domain->span\n"); 5829 5830 if (sd->parent && 5831 !cpumask_subset(groupmask, sched_domain_span(sd->parent))) 5832 printk(KERN_ERR "ERROR: parent span is not a superset " 5833 "of domain->span\n"); 5834 return 0; 5835 } 5836 5837 static void sched_domain_debug(struct sched_domain *sd, int cpu) 5838 { 5839 int level = 0; 5840 5841 if (!sched_debug_enabled) 5842 return; 5843 5844 if (!sd) { 5845 printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu); 5846 return; 5847 } 5848 5849 printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu); 5850 5851 for (;;) { 5852 if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask)) 5853 break; 5854 level++; 5855 sd = sd->parent; 5856 if (!sd) 5857 break; 5858 } 5859 } 5860 #else /* !CONFIG_SCHED_DEBUG */ 5861 # define sched_domain_debug(sd, cpu) do { } while (0) 5862 static inline bool sched_debug(void) 5863 { 5864 return false; 5865 } 5866 #endif /* CONFIG_SCHED_DEBUG */ 5867 5868 static int sd_degenerate(struct sched_domain *sd) 5869 { 5870 if (cpumask_weight(sched_domain_span(sd)) == 1) 5871 return 1; 5872 5873 /* Following flags need at least 2 groups */ 5874 if (sd->flags & (SD_LOAD_BALANCE | 5875 SD_BALANCE_NEWIDLE | 5876 SD_BALANCE_FORK | 5877 SD_BALANCE_EXEC | 5878 SD_SHARE_CPUCAPACITY | 5879 SD_SHARE_PKG_RESOURCES | 5880 SD_SHARE_POWERDOMAIN)) { 5881 if (sd->groups != sd->groups->next) 5882 return 0; 5883 } 5884 5885 /* Following flags don't use groups */ 5886 if (sd->flags & (SD_WAKE_AFFINE)) 5887 return 0; 5888 5889 return 1; 5890 } 5891 5892 static int 5893 sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) 5894 { 5895 unsigned long cflags = sd->flags, pflags = parent->flags; 5896 5897 if (sd_degenerate(parent)) 5898 return 1; 5899 5900 if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent))) 5901 return 0; 5902 5903 /* Flags needing groups don't count if only 1 group in parent */ 5904 if (parent->groups == parent->groups->next) { 5905 pflags &= ~(SD_LOAD_BALANCE | 5906 SD_BALANCE_NEWIDLE | 5907 SD_BALANCE_FORK | 5908 SD_BALANCE_EXEC | 5909 SD_SHARE_CPUCAPACITY | 5910 SD_SHARE_PKG_RESOURCES | 5911 SD_PREFER_SIBLING | 5912 SD_SHARE_POWERDOMAIN); 5913 if (nr_node_ids == 1) 5914 pflags &= ~SD_SERIALIZE; 5915 } 5916 if (~cflags & pflags) 5917 return 0; 5918 5919 return 1; 5920 } 5921 5922 static void free_rootdomain(struct rcu_head *rcu) 5923 { 5924 struct root_domain *rd = container_of(rcu, struct root_domain, rcu); 5925 5926 cpupri_cleanup(&rd->cpupri); 5927 cpudl_cleanup(&rd->cpudl); 5928 free_cpumask_var(rd->dlo_mask); 5929 free_cpumask_var(rd->rto_mask); 5930 free_cpumask_var(rd->online); 5931 free_cpumask_var(rd->span); 5932 kfree(rd); 5933 } 5934 5935 static void rq_attach_root(struct rq *rq, struct root_domain *rd) 5936 { 5937 struct root_domain *old_rd = NULL; 5938 unsigned long flags; 5939 5940 raw_spin_lock_irqsave(&rq->lock, flags); 5941 5942 if (rq->rd) { 5943 old_rd = rq->rd; 5944 5945 if (cpumask_test_cpu(rq->cpu, old_rd->online)) 5946 set_rq_offline(rq); 5947 5948 cpumask_clear_cpu(rq->cpu, old_rd->span); 5949 5950 /* 5951 * If we dont want to free the old_rd yet then 5952 * set old_rd to NULL to skip the freeing later 5953 * in this function: 5954 */ 5955 if (!atomic_dec_and_test(&old_rd->refcount)) 5956 old_rd = NULL; 5957 } 5958 5959 atomic_inc(&rd->refcount); 5960 rq->rd = rd; 5961 5962 cpumask_set_cpu(rq->cpu, rd->span); 5963 if (cpumask_test_cpu(rq->cpu, cpu_active_mask)) 5964 set_rq_online(rq); 5965 5966 raw_spin_unlock_irqrestore(&rq->lock, flags); 5967 5968 if (old_rd) 5969 call_rcu_sched(&old_rd->rcu, free_rootdomain); 5970 } 5971 5972 static int init_rootdomain(struct root_domain *rd) 5973 { 5974 memset(rd, 0, sizeof(*rd)); 5975 5976 if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL)) 5977 goto out; 5978 if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL)) 5979 goto free_span; 5980 if (!zalloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL)) 5981 goto free_online; 5982 if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) 5983 goto free_dlo_mask; 5984 5985 init_dl_bw(&rd->dl_bw); 5986 if (cpudl_init(&rd->cpudl) != 0) 5987 goto free_dlo_mask; 5988 5989 if (cpupri_init(&rd->cpupri) != 0) 5990 goto free_rto_mask; 5991 return 0; 5992 5993 free_rto_mask: 5994 free_cpumask_var(rd->rto_mask); 5995 free_dlo_mask: 5996 free_cpumask_var(rd->dlo_mask); 5997 free_online: 5998 free_cpumask_var(rd->online); 5999 free_span: 6000 free_cpumask_var(rd->span); 6001 out: 6002 return -ENOMEM; 6003 } 6004 6005 /* 6006 * By default the system creates a single root-domain with all cpus as 6007 * members (mimicking the global state we have today). 6008 */ 6009 struct root_domain def_root_domain; 6010 6011 static void init_defrootdomain(void) 6012 { 6013 init_rootdomain(&def_root_domain); 6014 6015 atomic_set(&def_root_domain.refcount, 1); 6016 } 6017 6018 static struct root_domain *alloc_rootdomain(void) 6019 { 6020 struct root_domain *rd; 6021 6022 rd = kmalloc(sizeof(*rd), GFP_KERNEL); 6023 if (!rd) 6024 return NULL; 6025 6026 if (init_rootdomain(rd) != 0) { 6027 kfree(rd); 6028 return NULL; 6029 } 6030 6031 return rd; 6032 } 6033 6034 static void free_sched_groups(struct sched_group *sg, int free_sgc) 6035 { 6036 struct sched_group *tmp, *first; 6037 6038 if (!sg) 6039 return; 6040 6041 first = sg; 6042 do { 6043 tmp = sg->next; 6044 6045 if (free_sgc && atomic_dec_and_test(&sg->sgc->ref)) 6046 kfree(sg->sgc); 6047 6048 kfree(sg); 6049 sg = tmp; 6050 } while (sg != first); 6051 } 6052 6053 static void free_sched_domain(struct rcu_head *rcu) 6054 { 6055 struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu); 6056 6057 /* 6058 * If its an overlapping domain it has private groups, iterate and 6059 * nuke them all. 6060 */ 6061 if (sd->flags & SD_OVERLAP) { 6062 free_sched_groups(sd->groups, 1); 6063 } else if (atomic_dec_and_test(&sd->groups->ref)) { 6064 kfree(sd->groups->sgc); 6065 kfree(sd->groups); 6066 } 6067 kfree(sd); 6068 } 6069 6070 static void destroy_sched_domain(struct sched_domain *sd, int cpu) 6071 { 6072 call_rcu(&sd->rcu, free_sched_domain); 6073 } 6074 6075 static void destroy_sched_domains(struct sched_domain *sd, int cpu) 6076 { 6077 for (; sd; sd = sd->parent) 6078 destroy_sched_domain(sd, cpu); 6079 } 6080 6081 /* 6082 * Keep a special pointer to the highest sched_domain that has 6083 * SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this 6084 * allows us to avoid some pointer chasing select_idle_sibling(). 6085 * 6086 * Also keep a unique ID per domain (we use the first cpu number in 6087 * the cpumask of the domain), this allows us to quickly tell if 6088 * two cpus are in the same cache domain, see cpus_share_cache(). 6089 */ 6090 DEFINE_PER_CPU(struct sched_domain *, sd_llc); 6091 DEFINE_PER_CPU(int, sd_llc_size); 6092 DEFINE_PER_CPU(int, sd_llc_id); 6093 DEFINE_PER_CPU(struct sched_domain *, sd_numa); 6094 DEFINE_PER_CPU(struct sched_domain *, sd_busy); 6095 DEFINE_PER_CPU(struct sched_domain *, sd_asym); 6096 6097 static void update_top_cache_domain(int cpu) 6098 { 6099 struct sched_domain *sd; 6100 struct sched_domain *busy_sd = NULL; 6101 int id = cpu; 6102 int size = 1; 6103 6104 sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES); 6105 if (sd) { 6106 id = cpumask_first(sched_domain_span(sd)); 6107 size = cpumask_weight(sched_domain_span(sd)); 6108 busy_sd = sd->parent; /* sd_busy */ 6109 } 6110 rcu_assign_pointer(per_cpu(sd_busy, cpu), busy_sd); 6111 6112 rcu_assign_pointer(per_cpu(sd_llc, cpu), sd); 6113 per_cpu(sd_llc_size, cpu) = size; 6114 per_cpu(sd_llc_id, cpu) = id; 6115 6116 sd = lowest_flag_domain(cpu, SD_NUMA); 6117 rcu_assign_pointer(per_cpu(sd_numa, cpu), sd); 6118 6119 sd = highest_flag_domain(cpu, SD_ASYM_PACKING); 6120 rcu_assign_pointer(per_cpu(sd_asym, cpu), sd); 6121 } 6122 6123 /* 6124 * Attach the domain 'sd' to 'cpu' as its base domain. Callers must 6125 * hold the hotplug lock. 6126 */ 6127 static void 6128 cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) 6129 { 6130 struct rq *rq = cpu_rq(cpu); 6131 struct sched_domain *tmp; 6132 6133 /* Remove the sched domains which do not contribute to scheduling. */ 6134 for (tmp = sd; tmp; ) { 6135 struct sched_domain *parent = tmp->parent; 6136 if (!parent) 6137 break; 6138 6139 if (sd_parent_degenerate(tmp, parent)) { 6140 tmp->parent = parent->parent; 6141 if (parent->parent) 6142 parent->parent->child = tmp; 6143 /* 6144 * Transfer SD_PREFER_SIBLING down in case of a 6145 * degenerate parent; the spans match for this 6146 * so the property transfers. 6147 */ 6148 if (parent->flags & SD_PREFER_SIBLING) 6149 tmp->flags |= SD_PREFER_SIBLING; 6150 destroy_sched_domain(parent, cpu); 6151 } else 6152 tmp = tmp->parent; 6153 } 6154 6155 if (sd && sd_degenerate(sd)) { 6156 tmp = sd; 6157 sd = sd->parent; 6158 destroy_sched_domain(tmp, cpu); 6159 if (sd) 6160 sd->child = NULL; 6161 } 6162 6163 sched_domain_debug(sd, cpu); 6164 6165 rq_attach_root(rq, rd); 6166 tmp = rq->sd; 6167 rcu_assign_pointer(rq->sd, sd); 6168 destroy_sched_domains(tmp, cpu); 6169 6170 update_top_cache_domain(cpu); 6171 } 6172 6173 /* Setup the mask of cpus configured for isolated domains */ 6174 static int __init isolated_cpu_setup(char *str) 6175 { 6176 alloc_bootmem_cpumask_var(&cpu_isolated_map); 6177 cpulist_parse(str, cpu_isolated_map); 6178 return 1; 6179 } 6180 6181 __setup("isolcpus=", isolated_cpu_setup); 6182 6183 struct s_data { 6184 struct sched_domain ** __percpu sd; 6185 struct root_domain *rd; 6186 }; 6187 6188 enum s_alloc { 6189 sa_rootdomain, 6190 sa_sd, 6191 sa_sd_storage, 6192 sa_none, 6193 }; 6194 6195 /* 6196 * Build an iteration mask that can exclude certain CPUs from the upwards 6197 * domain traversal. 6198 * 6199 * Asymmetric node setups can result in situations where the domain tree is of 6200 * unequal depth, make sure to skip domains that already cover the entire 6201 * range. 6202 * 6203 * In that case build_sched_domains() will have terminated the iteration early 6204 * and our sibling sd spans will be empty. Domains should always include the 6205 * cpu they're built on, so check that. 6206 * 6207 */ 6208 static void build_group_mask(struct sched_domain *sd, struct sched_group *sg) 6209 { 6210 const struct cpumask *span = sched_domain_span(sd); 6211 struct sd_data *sdd = sd->private; 6212 struct sched_domain *sibling; 6213 int i; 6214 6215 for_each_cpu(i, span) { 6216 sibling = *per_cpu_ptr(sdd->sd, i); 6217 if (!cpumask_test_cpu(i, sched_domain_span(sibling))) 6218 continue; 6219 6220 cpumask_set_cpu(i, sched_group_mask(sg)); 6221 } 6222 } 6223 6224 /* 6225 * Return the canonical balance cpu for this group, this is the first cpu 6226 * of this group that's also in the iteration mask. 6227 */ 6228 int group_balance_cpu(struct sched_group *sg) 6229 { 6230 return cpumask_first_and(sched_group_cpus(sg), sched_group_mask(sg)); 6231 } 6232 6233 static int 6234 build_overlap_sched_groups(struct sched_domain *sd, int cpu) 6235 { 6236 struct sched_group *first = NULL, *last = NULL, *groups = NULL, *sg; 6237 const struct cpumask *span = sched_domain_span(sd); 6238 struct cpumask *covered = sched_domains_tmpmask; 6239 struct sd_data *sdd = sd->private; 6240 struct sched_domain *sibling; 6241 int i; 6242 6243 cpumask_clear(covered); 6244 6245 for_each_cpu(i, span) { 6246 struct cpumask *sg_span; 6247 6248 if (cpumask_test_cpu(i, covered)) 6249 continue; 6250 6251 sibling = *per_cpu_ptr(sdd->sd, i); 6252 6253 /* See the comment near build_group_mask(). */ 6254 if (!cpumask_test_cpu(i, sched_domain_span(sibling))) 6255 continue; 6256 6257 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), 6258 GFP_KERNEL, cpu_to_node(cpu)); 6259 6260 if (!sg) 6261 goto fail; 6262 6263 sg_span = sched_group_cpus(sg); 6264 if (sibling->child) 6265 cpumask_copy(sg_span, sched_domain_span(sibling->child)); 6266 else 6267 cpumask_set_cpu(i, sg_span); 6268 6269 cpumask_or(covered, covered, sg_span); 6270 6271 sg->sgc = *per_cpu_ptr(sdd->sgc, i); 6272 if (atomic_inc_return(&sg->sgc->ref) == 1) 6273 build_group_mask(sd, sg); 6274 6275 /* 6276 * Initialize sgc->capacity such that even if we mess up the 6277 * domains and no possible iteration will get us here, we won't 6278 * die on a /0 trap. 6279 */ 6280 sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span); 6281 6282 /* 6283 * Make sure the first group of this domain contains the 6284 * canonical balance cpu. Otherwise the sched_domain iteration 6285 * breaks. See update_sg_lb_stats(). 6286 */ 6287 if ((!groups && cpumask_test_cpu(cpu, sg_span)) || 6288 group_balance_cpu(sg) == cpu) 6289 groups = sg; 6290 6291 if (!first) 6292 first = sg; 6293 if (last) 6294 last->next = sg; 6295 last = sg; 6296 last->next = first; 6297 } 6298 sd->groups = groups; 6299 6300 return 0; 6301 6302 fail: 6303 free_sched_groups(first, 0); 6304 6305 return -ENOMEM; 6306 } 6307 6308 static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg) 6309 { 6310 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); 6311 struct sched_domain *child = sd->child; 6312 6313 if (child) 6314 cpu = cpumask_first(sched_domain_span(child)); 6315 6316 if (sg) { 6317 *sg = *per_cpu_ptr(sdd->sg, cpu); 6318 (*sg)->sgc = *per_cpu_ptr(sdd->sgc, cpu); 6319 atomic_set(&(*sg)->sgc->ref, 1); /* for claim_allocations */ 6320 } 6321 6322 return cpu; 6323 } 6324 6325 /* 6326 * build_sched_groups will build a circular linked list of the groups 6327 * covered by the given span, and will set each group's ->cpumask correctly, 6328 * and ->cpu_capacity to 0. 6329 * 6330 * Assumes the sched_domain tree is fully constructed 6331 */ 6332 static int 6333 build_sched_groups(struct sched_domain *sd, int cpu) 6334 { 6335 struct sched_group *first = NULL, *last = NULL; 6336 struct sd_data *sdd = sd->private; 6337 const struct cpumask *span = sched_domain_span(sd); 6338 struct cpumask *covered; 6339 int i; 6340 6341 get_group(cpu, sdd, &sd->groups); 6342 atomic_inc(&sd->groups->ref); 6343 6344 if (cpu != cpumask_first(span)) 6345 return 0; 6346 6347 lockdep_assert_held(&sched_domains_mutex); 6348 covered = sched_domains_tmpmask; 6349 6350 cpumask_clear(covered); 6351 6352 for_each_cpu(i, span) { 6353 struct sched_group *sg; 6354 int group, j; 6355 6356 if (cpumask_test_cpu(i, covered)) 6357 continue; 6358 6359 group = get_group(i, sdd, &sg); 6360 cpumask_setall(sched_group_mask(sg)); 6361 6362 for_each_cpu(j, span) { 6363 if (get_group(j, sdd, NULL) != group) 6364 continue; 6365 6366 cpumask_set_cpu(j, covered); 6367 cpumask_set_cpu(j, sched_group_cpus(sg)); 6368 } 6369 6370 if (!first) 6371 first = sg; 6372 if (last) 6373 last->next = sg; 6374 last = sg; 6375 } 6376 last->next = first; 6377 6378 return 0; 6379 } 6380 6381 /* 6382 * Initialize sched groups cpu_capacity. 6383 * 6384 * cpu_capacity indicates the capacity of sched group, which is used while 6385 * distributing the load between different sched groups in a sched domain. 6386 * Typically cpu_capacity for all the groups in a sched domain will be same 6387 * unless there are asymmetries in the topology. If there are asymmetries, 6388 * group having more cpu_capacity will pickup more load compared to the 6389 * group having less cpu_capacity. 6390 */ 6391 static void init_sched_groups_capacity(int cpu, struct sched_domain *sd) 6392 { 6393 struct sched_group *sg = sd->groups; 6394 6395 WARN_ON(!sg); 6396 6397 do { 6398 sg->group_weight = cpumask_weight(sched_group_cpus(sg)); 6399 sg = sg->next; 6400 } while (sg != sd->groups); 6401 6402 if (cpu != group_balance_cpu(sg)) 6403 return; 6404 6405 update_group_capacity(sd, cpu); 6406 atomic_set(&sg->sgc->nr_busy_cpus, sg->group_weight); 6407 } 6408 6409 /* 6410 * Initializers for schedule domains 6411 * Non-inlined to reduce accumulated stack pressure in build_sched_domains() 6412 */ 6413 6414 static int default_relax_domain_level = -1; 6415 int sched_domain_level_max; 6416 6417 static int __init setup_relax_domain_level(char *str) 6418 { 6419 if (kstrtoint(str, 0, &default_relax_domain_level)) 6420 pr_warn("Unable to set relax_domain_level\n"); 6421 6422 return 1; 6423 } 6424 __setup("relax_domain_level=", setup_relax_domain_level); 6425 6426 static void set_domain_attribute(struct sched_domain *sd, 6427 struct sched_domain_attr *attr) 6428 { 6429 int request; 6430 6431 if (!attr || attr->relax_domain_level < 0) { 6432 if (default_relax_domain_level < 0) 6433 return; 6434 else 6435 request = default_relax_domain_level; 6436 } else 6437 request = attr->relax_domain_level; 6438 if (request < sd->level) { 6439 /* turn off idle balance on this domain */ 6440 sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE); 6441 } else { 6442 /* turn on idle balance on this domain */ 6443 sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE); 6444 } 6445 } 6446 6447 static void __sdt_free(const struct cpumask *cpu_map); 6448 static int __sdt_alloc(const struct cpumask *cpu_map); 6449 6450 static void __free_domain_allocs(struct s_data *d, enum s_alloc what, 6451 const struct cpumask *cpu_map) 6452 { 6453 switch (what) { 6454 case sa_rootdomain: 6455 if (!atomic_read(&d->rd->refcount)) 6456 free_rootdomain(&d->rd->rcu); /* fall through */ 6457 case sa_sd: 6458 free_percpu(d->sd); /* fall through */ 6459 case sa_sd_storage: 6460 __sdt_free(cpu_map); /* fall through */ 6461 case sa_none: 6462 break; 6463 } 6464 } 6465 6466 static enum s_alloc __visit_domain_allocation_hell(struct s_data *d, 6467 const struct cpumask *cpu_map) 6468 { 6469 memset(d, 0, sizeof(*d)); 6470 6471 if (__sdt_alloc(cpu_map)) 6472 return sa_sd_storage; 6473 d->sd = alloc_percpu(struct sched_domain *); 6474 if (!d->sd) 6475 return sa_sd_storage; 6476 d->rd = alloc_rootdomain(); 6477 if (!d->rd) 6478 return sa_sd; 6479 return sa_rootdomain; 6480 } 6481 6482 /* 6483 * NULL the sd_data elements we've used to build the sched_domain and 6484 * sched_group structure so that the subsequent __free_domain_allocs() 6485 * will not free the data we're using. 6486 */ 6487 static void claim_allocations(int cpu, struct sched_domain *sd) 6488 { 6489 struct sd_data *sdd = sd->private; 6490 6491 WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd); 6492 *per_cpu_ptr(sdd->sd, cpu) = NULL; 6493 6494 if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref)) 6495 *per_cpu_ptr(sdd->sg, cpu) = NULL; 6496 6497 if (atomic_read(&(*per_cpu_ptr(sdd->sgc, cpu))->ref)) 6498 *per_cpu_ptr(sdd->sgc, cpu) = NULL; 6499 } 6500 6501 #ifdef CONFIG_NUMA 6502 static int sched_domains_numa_levels; 6503 enum numa_topology_type sched_numa_topology_type; 6504 static int *sched_domains_numa_distance; 6505 int sched_max_numa_distance; 6506 static struct cpumask ***sched_domains_numa_masks; 6507 static int sched_domains_curr_level; 6508 #endif 6509 6510 /* 6511 * SD_flags allowed in topology descriptions. 6512 * 6513 * SD_SHARE_CPUCAPACITY - describes SMT topologies 6514 * SD_SHARE_PKG_RESOURCES - describes shared caches 6515 * SD_NUMA - describes NUMA topologies 6516 * SD_SHARE_POWERDOMAIN - describes shared power domain 6517 * 6518 * Odd one out: 6519 * SD_ASYM_PACKING - describes SMT quirks 6520 */ 6521 #define TOPOLOGY_SD_FLAGS \ 6522 (SD_SHARE_CPUCAPACITY | \ 6523 SD_SHARE_PKG_RESOURCES | \ 6524 SD_NUMA | \ 6525 SD_ASYM_PACKING | \ 6526 SD_SHARE_POWERDOMAIN) 6527 6528 static struct sched_domain * 6529 sd_init(struct sched_domain_topology_level *tl, int cpu) 6530 { 6531 struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu); 6532 int sd_weight, sd_flags = 0; 6533 6534 #ifdef CONFIG_NUMA 6535 /* 6536 * Ugly hack to pass state to sd_numa_mask()... 6537 */ 6538 sched_domains_curr_level = tl->numa_level; 6539 #endif 6540 6541 sd_weight = cpumask_weight(tl->mask(cpu)); 6542 6543 if (tl->sd_flags) 6544 sd_flags = (*tl->sd_flags)(); 6545 if (WARN_ONCE(sd_flags & ~TOPOLOGY_SD_FLAGS, 6546 "wrong sd_flags in topology description\n")) 6547 sd_flags &= ~TOPOLOGY_SD_FLAGS; 6548 6549 *sd = (struct sched_domain){ 6550 .min_interval = sd_weight, 6551 .max_interval = 2*sd_weight, 6552 .busy_factor = 32, 6553 .imbalance_pct = 125, 6554 6555 .cache_nice_tries = 0, 6556 .busy_idx = 0, 6557 .idle_idx = 0, 6558 .newidle_idx = 0, 6559 .wake_idx = 0, 6560 .forkexec_idx = 0, 6561 6562 .flags = 1*SD_LOAD_BALANCE 6563 | 1*SD_BALANCE_NEWIDLE 6564 | 1*SD_BALANCE_EXEC 6565 | 1*SD_BALANCE_FORK 6566 | 0*SD_BALANCE_WAKE 6567 | 1*SD_WAKE_AFFINE 6568 | 0*SD_SHARE_CPUCAPACITY 6569 | 0*SD_SHARE_PKG_RESOURCES 6570 | 0*SD_SERIALIZE 6571 | 0*SD_PREFER_SIBLING 6572 | 0*SD_NUMA 6573 | sd_flags 6574 , 6575 6576 .last_balance = jiffies, 6577 .balance_interval = sd_weight, 6578 .smt_gain = 0, 6579 .max_newidle_lb_cost = 0, 6580 .next_decay_max_lb_cost = jiffies, 6581 #ifdef CONFIG_SCHED_DEBUG 6582 .name = tl->name, 6583 #endif 6584 }; 6585 6586 /* 6587 * Convert topological properties into behaviour. 6588 */ 6589 6590 if (sd->flags & SD_SHARE_CPUCAPACITY) { 6591 sd->flags |= SD_PREFER_SIBLING; 6592 sd->imbalance_pct = 110; 6593 sd->smt_gain = 1178; /* ~15% */ 6594 6595 } else if (sd->flags & SD_SHARE_PKG_RESOURCES) { 6596 sd->imbalance_pct = 117; 6597 sd->cache_nice_tries = 1; 6598 sd->busy_idx = 2; 6599 6600 #ifdef CONFIG_NUMA 6601 } else if (sd->flags & SD_NUMA) { 6602 sd->cache_nice_tries = 2; 6603 sd->busy_idx = 3; 6604 sd->idle_idx = 2; 6605 6606 sd->flags |= SD_SERIALIZE; 6607 if (sched_domains_numa_distance[tl->numa_level] > RECLAIM_DISTANCE) { 6608 sd->flags &= ~(SD_BALANCE_EXEC | 6609 SD_BALANCE_FORK | 6610 SD_WAKE_AFFINE); 6611 } 6612 6613 #endif 6614 } else { 6615 sd->flags |= SD_PREFER_SIBLING; 6616 sd->cache_nice_tries = 1; 6617 sd->busy_idx = 2; 6618 sd->idle_idx = 1; 6619 } 6620 6621 sd->private = &tl->data; 6622 6623 return sd; 6624 } 6625 6626 /* 6627 * Topology list, bottom-up. 6628 */ 6629 static struct sched_domain_topology_level default_topology[] = { 6630 #ifdef CONFIG_SCHED_SMT 6631 { cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT) }, 6632 #endif 6633 #ifdef CONFIG_SCHED_MC 6634 { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) }, 6635 #endif 6636 { cpu_cpu_mask, SD_INIT_NAME(DIE) }, 6637 { NULL, }, 6638 }; 6639 6640 static struct sched_domain_topology_level *sched_domain_topology = 6641 default_topology; 6642 6643 #define for_each_sd_topology(tl) \ 6644 for (tl = sched_domain_topology; tl->mask; tl++) 6645 6646 void set_sched_topology(struct sched_domain_topology_level *tl) 6647 { 6648 sched_domain_topology = tl; 6649 } 6650 6651 #ifdef CONFIG_NUMA 6652 6653 static const struct cpumask *sd_numa_mask(int cpu) 6654 { 6655 return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)]; 6656 } 6657 6658 static void sched_numa_warn(const char *str) 6659 { 6660 static int done = false; 6661 int i,j; 6662 6663 if (done) 6664 return; 6665 6666 done = true; 6667 6668 printk(KERN_WARNING "ERROR: %s\n\n", str); 6669 6670 for (i = 0; i < nr_node_ids; i++) { 6671 printk(KERN_WARNING " "); 6672 for (j = 0; j < nr_node_ids; j++) 6673 printk(KERN_CONT "%02d ", node_distance(i,j)); 6674 printk(KERN_CONT "\n"); 6675 } 6676 printk(KERN_WARNING "\n"); 6677 } 6678 6679 bool find_numa_distance(int distance) 6680 { 6681 int i; 6682 6683 if (distance == node_distance(0, 0)) 6684 return true; 6685 6686 for (i = 0; i < sched_domains_numa_levels; i++) { 6687 if (sched_domains_numa_distance[i] == distance) 6688 return true; 6689 } 6690 6691 return false; 6692 } 6693 6694 /* 6695 * A system can have three types of NUMA topology: 6696 * NUMA_DIRECT: all nodes are directly connected, or not a NUMA system 6697 * NUMA_GLUELESS_MESH: some nodes reachable through intermediary nodes 6698 * NUMA_BACKPLANE: nodes can reach other nodes through a backplane 6699 * 6700 * The difference between a glueless mesh topology and a backplane 6701 * topology lies in whether communication between not directly 6702 * connected nodes goes through intermediary nodes (where programs 6703 * could run), or through backplane controllers. This affects 6704 * placement of programs. 6705 * 6706 * The type of topology can be discerned with the following tests: 6707 * - If the maximum distance between any nodes is 1 hop, the system 6708 * is directly connected. 6709 * - If for two nodes A and B, located N > 1 hops away from each other, 6710 * there is an intermediary node C, which is < N hops away from both 6711 * nodes A and B, the system is a glueless mesh. 6712 */ 6713 static void init_numa_topology_type(void) 6714 { 6715 int a, b, c, n; 6716 6717 n = sched_max_numa_distance; 6718 6719 if (sched_domains_numa_levels <= 1) { 6720 sched_numa_topology_type = NUMA_DIRECT; 6721 return; 6722 } 6723 6724 for_each_online_node(a) { 6725 for_each_online_node(b) { 6726 /* Find two nodes furthest removed from each other. */ 6727 if (node_distance(a, b) < n) 6728 continue; 6729 6730 /* Is there an intermediary node between a and b? */ 6731 for_each_online_node(c) { 6732 if (node_distance(a, c) < n && 6733 node_distance(b, c) < n) { 6734 sched_numa_topology_type = 6735 NUMA_GLUELESS_MESH; 6736 return; 6737 } 6738 } 6739 6740 sched_numa_topology_type = NUMA_BACKPLANE; 6741 return; 6742 } 6743 } 6744 } 6745 6746 static void sched_init_numa(void) 6747 { 6748 int next_distance, curr_distance = node_distance(0, 0); 6749 struct sched_domain_topology_level *tl; 6750 int level = 0; 6751 int i, j, k; 6752 6753 sched_domains_numa_distance = kzalloc(sizeof(int) * nr_node_ids, GFP_KERNEL); 6754 if (!sched_domains_numa_distance) 6755 return; 6756 6757 /* 6758 * O(nr_nodes^2) deduplicating selection sort -- in order to find the 6759 * unique distances in the node_distance() table. 6760 * 6761 * Assumes node_distance(0,j) includes all distances in 6762 * node_distance(i,j) in order to avoid cubic time. 6763 */ 6764 next_distance = curr_distance; 6765 for (i = 0; i < nr_node_ids; i++) { 6766 for (j = 0; j < nr_node_ids; j++) { 6767 for (k = 0; k < nr_node_ids; k++) { 6768 int distance = node_distance(i, k); 6769 6770 if (distance > curr_distance && 6771 (distance < next_distance || 6772 next_distance == curr_distance)) 6773 next_distance = distance; 6774 6775 /* 6776 * While not a strong assumption it would be nice to know 6777 * about cases where if node A is connected to B, B is not 6778 * equally connected to A. 6779 */ 6780 if (sched_debug() && node_distance(k, i) != distance) 6781 sched_numa_warn("Node-distance not symmetric"); 6782 6783 if (sched_debug() && i && !find_numa_distance(distance)) 6784 sched_numa_warn("Node-0 not representative"); 6785 } 6786 if (next_distance != curr_distance) { 6787 sched_domains_numa_distance[level++] = next_distance; 6788 sched_domains_numa_levels = level; 6789 curr_distance = next_distance; 6790 } else break; 6791 } 6792 6793 /* 6794 * In case of sched_debug() we verify the above assumption. 6795 */ 6796 if (!sched_debug()) 6797 break; 6798 } 6799 6800 if (!level) 6801 return; 6802 6803 /* 6804 * 'level' contains the number of unique distances, excluding the 6805 * identity distance node_distance(i,i). 6806 * 6807 * The sched_domains_numa_distance[] array includes the actual distance 6808 * numbers. 6809 */ 6810 6811 /* 6812 * Here, we should temporarily reset sched_domains_numa_levels to 0. 6813 * If it fails to allocate memory for array sched_domains_numa_masks[][], 6814 * the array will contain less then 'level' members. This could be 6815 * dangerous when we use it to iterate array sched_domains_numa_masks[][] 6816 * in other functions. 6817 * 6818 * We reset it to 'level' at the end of this function. 6819 */ 6820 sched_domains_numa_levels = 0; 6821 6822 sched_domains_numa_masks = kzalloc(sizeof(void *) * level, GFP_KERNEL); 6823 if (!sched_domains_numa_masks) 6824 return; 6825 6826 /* 6827 * Now for each level, construct a mask per node which contains all 6828 * cpus of nodes that are that many hops away from us. 6829 */ 6830 for (i = 0; i < level; i++) { 6831 sched_domains_numa_masks[i] = 6832 kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL); 6833 if (!sched_domains_numa_masks[i]) 6834 return; 6835 6836 for (j = 0; j < nr_node_ids; j++) { 6837 struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL); 6838 if (!mask) 6839 return; 6840 6841 sched_domains_numa_masks[i][j] = mask; 6842 6843 for_each_node(k) { 6844 if (node_distance(j, k) > sched_domains_numa_distance[i]) 6845 continue; 6846 6847 cpumask_or(mask, mask, cpumask_of_node(k)); 6848 } 6849 } 6850 } 6851 6852 /* Compute default topology size */ 6853 for (i = 0; sched_domain_topology[i].mask; i++); 6854 6855 tl = kzalloc((i + level + 1) * 6856 sizeof(struct sched_domain_topology_level), GFP_KERNEL); 6857 if (!tl) 6858 return; 6859 6860 /* 6861 * Copy the default topology bits.. 6862 */ 6863 for (i = 0; sched_domain_topology[i].mask; i++) 6864 tl[i] = sched_domain_topology[i]; 6865 6866 /* 6867 * .. and append 'j' levels of NUMA goodness. 6868 */ 6869 for (j = 0; j < level; i++, j++) { 6870 tl[i] = (struct sched_domain_topology_level){ 6871 .mask = sd_numa_mask, 6872 .sd_flags = cpu_numa_flags, 6873 .flags = SDTL_OVERLAP, 6874 .numa_level = j, 6875 SD_INIT_NAME(NUMA) 6876 }; 6877 } 6878 6879 sched_domain_topology = tl; 6880 6881 sched_domains_numa_levels = level; 6882 sched_max_numa_distance = sched_domains_numa_distance[level - 1]; 6883 6884 init_numa_topology_type(); 6885 } 6886 6887 static void sched_domains_numa_masks_set(int cpu) 6888 { 6889 int i, j; 6890 int node = cpu_to_node(cpu); 6891 6892 for (i = 0; i < sched_domains_numa_levels; i++) { 6893 for (j = 0; j < nr_node_ids; j++) { 6894 if (node_distance(j, node) <= sched_domains_numa_distance[i]) 6895 cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]); 6896 } 6897 } 6898 } 6899 6900 static void sched_domains_numa_masks_clear(int cpu) 6901 { 6902 int i, j; 6903 for (i = 0; i < sched_domains_numa_levels; i++) { 6904 for (j = 0; j < nr_node_ids; j++) 6905 cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]); 6906 } 6907 } 6908 6909 /* 6910 * Update sched_domains_numa_masks[level][node] array when new cpus 6911 * are onlined. 6912 */ 6913 static int sched_domains_numa_masks_update(struct notifier_block *nfb, 6914 unsigned long action, 6915 void *hcpu) 6916 { 6917 int cpu = (long)hcpu; 6918 6919 switch (action & ~CPU_TASKS_FROZEN) { 6920 case CPU_ONLINE: 6921 sched_domains_numa_masks_set(cpu); 6922 break; 6923 6924 case CPU_DEAD: 6925 sched_domains_numa_masks_clear(cpu); 6926 break; 6927 6928 default: 6929 return NOTIFY_DONE; 6930 } 6931 6932 return NOTIFY_OK; 6933 } 6934 #else 6935 static inline void sched_init_numa(void) 6936 { 6937 } 6938 6939 static int sched_domains_numa_masks_update(struct notifier_block *nfb, 6940 unsigned long action, 6941 void *hcpu) 6942 { 6943 return 0; 6944 } 6945 #endif /* CONFIG_NUMA */ 6946 6947 static int __sdt_alloc(const struct cpumask *cpu_map) 6948 { 6949 struct sched_domain_topology_level *tl; 6950 int j; 6951 6952 for_each_sd_topology(tl) { 6953 struct sd_data *sdd = &tl->data; 6954 6955 sdd->sd = alloc_percpu(struct sched_domain *); 6956 if (!sdd->sd) 6957 return -ENOMEM; 6958 6959 sdd->sg = alloc_percpu(struct sched_group *); 6960 if (!sdd->sg) 6961 return -ENOMEM; 6962 6963 sdd->sgc = alloc_percpu(struct sched_group_capacity *); 6964 if (!sdd->sgc) 6965 return -ENOMEM; 6966 6967 for_each_cpu(j, cpu_map) { 6968 struct sched_domain *sd; 6969 struct sched_group *sg; 6970 struct sched_group_capacity *sgc; 6971 6972 sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(), 6973 GFP_KERNEL, cpu_to_node(j)); 6974 if (!sd) 6975 return -ENOMEM; 6976 6977 *per_cpu_ptr(sdd->sd, j) = sd; 6978 6979 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), 6980 GFP_KERNEL, cpu_to_node(j)); 6981 if (!sg) 6982 return -ENOMEM; 6983 6984 sg->next = sg; 6985 6986 *per_cpu_ptr(sdd->sg, j) = sg; 6987 6988 sgc = kzalloc_node(sizeof(struct sched_group_capacity) + cpumask_size(), 6989 GFP_KERNEL, cpu_to_node(j)); 6990 if (!sgc) 6991 return -ENOMEM; 6992 6993 *per_cpu_ptr(sdd->sgc, j) = sgc; 6994 } 6995 } 6996 6997 return 0; 6998 } 6999 7000 static void __sdt_free(const struct cpumask *cpu_map) 7001 { 7002 struct sched_domain_topology_level *tl; 7003 int j; 7004 7005 for_each_sd_topology(tl) { 7006 struct sd_data *sdd = &tl->data; 7007 7008 for_each_cpu(j, cpu_map) { 7009 struct sched_domain *sd; 7010 7011 if (sdd->sd) { 7012 sd = *per_cpu_ptr(sdd->sd, j); 7013 if (sd && (sd->flags & SD_OVERLAP)) 7014 free_sched_groups(sd->groups, 0); 7015 kfree(*per_cpu_ptr(sdd->sd, j)); 7016 } 7017 7018 if (sdd->sg) 7019 kfree(*per_cpu_ptr(sdd->sg, j)); 7020 if (sdd->sgc) 7021 kfree(*per_cpu_ptr(sdd->sgc, j)); 7022 } 7023 free_percpu(sdd->sd); 7024 sdd->sd = NULL; 7025 free_percpu(sdd->sg); 7026 sdd->sg = NULL; 7027 free_percpu(sdd->sgc); 7028 sdd->sgc = NULL; 7029 } 7030 } 7031 7032 struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl, 7033 const struct cpumask *cpu_map, struct sched_domain_attr *attr, 7034 struct sched_domain *child, int cpu) 7035 { 7036 struct sched_domain *sd = sd_init(tl, cpu); 7037 if (!sd) 7038 return child; 7039 7040 cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu)); 7041 if (child) { 7042 sd->level = child->level + 1; 7043 sched_domain_level_max = max(sched_domain_level_max, sd->level); 7044 child->parent = sd; 7045 sd->child = child; 7046 7047 if (!cpumask_subset(sched_domain_span(child), 7048 sched_domain_span(sd))) { 7049 pr_err("BUG: arch topology borken\n"); 7050 #ifdef CONFIG_SCHED_DEBUG 7051 pr_err(" the %s domain not a subset of the %s domain\n", 7052 child->name, sd->name); 7053 #endif 7054 /* Fixup, ensure @sd has at least @child cpus. */ 7055 cpumask_or(sched_domain_span(sd), 7056 sched_domain_span(sd), 7057 sched_domain_span(child)); 7058 } 7059 7060 } 7061 set_domain_attribute(sd, attr); 7062 7063 return sd; 7064 } 7065 7066 /* 7067 * Build sched domains for a given set of cpus and attach the sched domains 7068 * to the individual cpus 7069 */ 7070 static int build_sched_domains(const struct cpumask *cpu_map, 7071 struct sched_domain_attr *attr) 7072 { 7073 enum s_alloc alloc_state; 7074 struct sched_domain *sd; 7075 struct s_data d; 7076 int i, ret = -ENOMEM; 7077 7078 alloc_state = __visit_domain_allocation_hell(&d, cpu_map); 7079 if (alloc_state != sa_rootdomain) 7080 goto error; 7081 7082 /* Set up domains for cpus specified by the cpu_map. */ 7083 for_each_cpu(i, cpu_map) { 7084 struct sched_domain_topology_level *tl; 7085 7086 sd = NULL; 7087 for_each_sd_topology(tl) { 7088 sd = build_sched_domain(tl, cpu_map, attr, sd, i); 7089 if (tl == sched_domain_topology) 7090 *per_cpu_ptr(d.sd, i) = sd; 7091 if (tl->flags & SDTL_OVERLAP || sched_feat(FORCE_SD_OVERLAP)) 7092 sd->flags |= SD_OVERLAP; 7093 if (cpumask_equal(cpu_map, sched_domain_span(sd))) 7094 break; 7095 } 7096 } 7097 7098 /* Build the groups for the domains */ 7099 for_each_cpu(i, cpu_map) { 7100 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { 7101 sd->span_weight = cpumask_weight(sched_domain_span(sd)); 7102 if (sd->flags & SD_OVERLAP) { 7103 if (build_overlap_sched_groups(sd, i)) 7104 goto error; 7105 } else { 7106 if (build_sched_groups(sd, i)) 7107 goto error; 7108 } 7109 } 7110 } 7111 7112 /* Calculate CPU capacity for physical packages and nodes */ 7113 for (i = nr_cpumask_bits-1; i >= 0; i--) { 7114 if (!cpumask_test_cpu(i, cpu_map)) 7115 continue; 7116 7117 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { 7118 claim_allocations(i, sd); 7119 init_sched_groups_capacity(i, sd); 7120 } 7121 } 7122 7123 /* Attach the domains */ 7124 rcu_read_lock(); 7125 for_each_cpu(i, cpu_map) { 7126 sd = *per_cpu_ptr(d.sd, i); 7127 cpu_attach_domain(sd, d.rd, i); 7128 } 7129 rcu_read_unlock(); 7130 7131 ret = 0; 7132 error: 7133 __free_domain_allocs(&d, alloc_state, cpu_map); 7134 return ret; 7135 } 7136 7137 static cpumask_var_t *doms_cur; /* current sched domains */ 7138 static int ndoms_cur; /* number of sched domains in 'doms_cur' */ 7139 static struct sched_domain_attr *dattr_cur; 7140 /* attribues of custom domains in 'doms_cur' */ 7141 7142 /* 7143 * Special case: If a kmalloc of a doms_cur partition (array of 7144 * cpumask) fails, then fallback to a single sched domain, 7145 * as determined by the single cpumask fallback_doms. 7146 */ 7147 static cpumask_var_t fallback_doms; 7148 7149 /* 7150 * arch_update_cpu_topology lets virtualized architectures update the 7151 * cpu core maps. It is supposed to return 1 if the topology changed 7152 * or 0 if it stayed the same. 7153 */ 7154 int __weak arch_update_cpu_topology(void) 7155 { 7156 return 0; 7157 } 7158 7159 cpumask_var_t *alloc_sched_domains(unsigned int ndoms) 7160 { 7161 int i; 7162 cpumask_var_t *doms; 7163 7164 doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL); 7165 if (!doms) 7166 return NULL; 7167 for (i = 0; i < ndoms; i++) { 7168 if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) { 7169 free_sched_domains(doms, i); 7170 return NULL; 7171 } 7172 } 7173 return doms; 7174 } 7175 7176 void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms) 7177 { 7178 unsigned int i; 7179 for (i = 0; i < ndoms; i++) 7180 free_cpumask_var(doms[i]); 7181 kfree(doms); 7182 } 7183 7184 /* 7185 * Set up scheduler domains and groups. Callers must hold the hotplug lock. 7186 * For now this just excludes isolated cpus, but could be used to 7187 * exclude other special cases in the future. 7188 */ 7189 static int init_sched_domains(const struct cpumask *cpu_map) 7190 { 7191 int err; 7192 7193 arch_update_cpu_topology(); 7194 ndoms_cur = 1; 7195 doms_cur = alloc_sched_domains(ndoms_cur); 7196 if (!doms_cur) 7197 doms_cur = &fallback_doms; 7198 cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map); 7199 err = build_sched_domains(doms_cur[0], NULL); 7200 register_sched_domain_sysctl(); 7201 7202 return err; 7203 } 7204 7205 /* 7206 * Detach sched domains from a group of cpus specified in cpu_map 7207 * These cpus will now be attached to the NULL domain 7208 */ 7209 static void detach_destroy_domains(const struct cpumask *cpu_map) 7210 { 7211 int i; 7212 7213 rcu_read_lock(); 7214 for_each_cpu(i, cpu_map) 7215 cpu_attach_domain(NULL, &def_root_domain, i); 7216 rcu_read_unlock(); 7217 } 7218 7219 /* handle null as "default" */ 7220 static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur, 7221 struct sched_domain_attr *new, int idx_new) 7222 { 7223 struct sched_domain_attr tmp; 7224 7225 /* fast path */ 7226 if (!new && !cur) 7227 return 1; 7228 7229 tmp = SD_ATTR_INIT; 7230 return !memcmp(cur ? (cur + idx_cur) : &tmp, 7231 new ? (new + idx_new) : &tmp, 7232 sizeof(struct sched_domain_attr)); 7233 } 7234 7235 /* 7236 * Partition sched domains as specified by the 'ndoms_new' 7237 * cpumasks in the array doms_new[] of cpumasks. This compares 7238 * doms_new[] to the current sched domain partitioning, doms_cur[]. 7239 * It destroys each deleted domain and builds each new domain. 7240 * 7241 * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'. 7242 * The masks don't intersect (don't overlap.) We should setup one 7243 * sched domain for each mask. CPUs not in any of the cpumasks will 7244 * not be load balanced. If the same cpumask appears both in the 7245 * current 'doms_cur' domains and in the new 'doms_new', we can leave 7246 * it as it is. 7247 * 7248 * The passed in 'doms_new' should be allocated using 7249 * alloc_sched_domains. This routine takes ownership of it and will 7250 * free_sched_domains it when done with it. If the caller failed the 7251 * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1, 7252 * and partition_sched_domains() will fallback to the single partition 7253 * 'fallback_doms', it also forces the domains to be rebuilt. 7254 * 7255 * If doms_new == NULL it will be replaced with cpu_online_mask. 7256 * ndoms_new == 0 is a special case for destroying existing domains, 7257 * and it will not create the default domain. 7258 * 7259 * Call with hotplug lock held 7260 */ 7261 void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], 7262 struct sched_domain_attr *dattr_new) 7263 { 7264 int i, j, n; 7265 int new_topology; 7266 7267 mutex_lock(&sched_domains_mutex); 7268 7269 /* always unregister in case we don't destroy any domains */ 7270 unregister_sched_domain_sysctl(); 7271 7272 /* Let architecture update cpu core mappings. */ 7273 new_topology = arch_update_cpu_topology(); 7274 7275 n = doms_new ? ndoms_new : 0; 7276 7277 /* Destroy deleted domains */ 7278 for (i = 0; i < ndoms_cur; i++) { 7279 for (j = 0; j < n && !new_topology; j++) { 7280 if (cpumask_equal(doms_cur[i], doms_new[j]) 7281 && dattrs_equal(dattr_cur, i, dattr_new, j)) 7282 goto match1; 7283 } 7284 /* no match - a current sched domain not in new doms_new[] */ 7285 detach_destroy_domains(doms_cur[i]); 7286 match1: 7287 ; 7288 } 7289 7290 n = ndoms_cur; 7291 if (doms_new == NULL) { 7292 n = 0; 7293 doms_new = &fallback_doms; 7294 cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map); 7295 WARN_ON_ONCE(dattr_new); 7296 } 7297 7298 /* Build new domains */ 7299 for (i = 0; i < ndoms_new; i++) { 7300 for (j = 0; j < n && !new_topology; j++) { 7301 if (cpumask_equal(doms_new[i], doms_cur[j]) 7302 && dattrs_equal(dattr_new, i, dattr_cur, j)) 7303 goto match2; 7304 } 7305 /* no match - add a new doms_new */ 7306 build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL); 7307 match2: 7308 ; 7309 } 7310 7311 /* Remember the new sched domains */ 7312 if (doms_cur != &fallback_doms) 7313 free_sched_domains(doms_cur, ndoms_cur); 7314 kfree(dattr_cur); /* kfree(NULL) is safe */ 7315 doms_cur = doms_new; 7316 dattr_cur = dattr_new; 7317 ndoms_cur = ndoms_new; 7318 7319 register_sched_domain_sysctl(); 7320 7321 mutex_unlock(&sched_domains_mutex); 7322 } 7323 7324 static int num_cpus_frozen; /* used to mark begin/end of suspend/resume */ 7325 7326 /* 7327 * Update cpusets according to cpu_active mask. If cpusets are 7328 * disabled, cpuset_update_active_cpus() becomes a simple wrapper 7329 * around partition_sched_domains(). 7330 * 7331 * If we come here as part of a suspend/resume, don't touch cpusets because we 7332 * want to restore it back to its original state upon resume anyway. 7333 */ 7334 static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action, 7335 void *hcpu) 7336 { 7337 switch (action) { 7338 case CPU_ONLINE_FROZEN: 7339 case CPU_DOWN_FAILED_FROZEN: 7340 7341 /* 7342 * num_cpus_frozen tracks how many CPUs are involved in suspend 7343 * resume sequence. As long as this is not the last online 7344 * operation in the resume sequence, just build a single sched 7345 * domain, ignoring cpusets. 7346 */ 7347 num_cpus_frozen--; 7348 if (likely(num_cpus_frozen)) { 7349 partition_sched_domains(1, NULL, NULL); 7350 break; 7351 } 7352 7353 /* 7354 * This is the last CPU online operation. So fall through and 7355 * restore the original sched domains by considering the 7356 * cpuset configurations. 7357 */ 7358 7359 case CPU_ONLINE: 7360 cpuset_update_active_cpus(true); 7361 break; 7362 default: 7363 return NOTIFY_DONE; 7364 } 7365 return NOTIFY_OK; 7366 } 7367 7368 static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action, 7369 void *hcpu) 7370 { 7371 unsigned long flags; 7372 long cpu = (long)hcpu; 7373 struct dl_bw *dl_b; 7374 bool overflow; 7375 int cpus; 7376 7377 switch (action) { 7378 case CPU_DOWN_PREPARE: 7379 rcu_read_lock_sched(); 7380 dl_b = dl_bw_of(cpu); 7381 7382 raw_spin_lock_irqsave(&dl_b->lock, flags); 7383 cpus = dl_bw_cpus(cpu); 7384 overflow = __dl_overflow(dl_b, cpus, 0, 0); 7385 raw_spin_unlock_irqrestore(&dl_b->lock, flags); 7386 7387 rcu_read_unlock_sched(); 7388 7389 if (overflow) 7390 return notifier_from_errno(-EBUSY); 7391 cpuset_update_active_cpus(false); 7392 break; 7393 case CPU_DOWN_PREPARE_FROZEN: 7394 num_cpus_frozen++; 7395 partition_sched_domains(1, NULL, NULL); 7396 break; 7397 default: 7398 return NOTIFY_DONE; 7399 } 7400 return NOTIFY_OK; 7401 } 7402 7403 void __init sched_init_smp(void) 7404 { 7405 cpumask_var_t non_isolated_cpus; 7406 7407 alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL); 7408 alloc_cpumask_var(&fallback_doms, GFP_KERNEL); 7409 7410 sched_init_numa(); 7411 7412 /* 7413 * There's no userspace yet to cause hotplug operations; hence all the 7414 * cpu masks are stable and all blatant races in the below code cannot 7415 * happen. 7416 */ 7417 mutex_lock(&sched_domains_mutex); 7418 init_sched_domains(cpu_active_mask); 7419 cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map); 7420 if (cpumask_empty(non_isolated_cpus)) 7421 cpumask_set_cpu(smp_processor_id(), non_isolated_cpus); 7422 mutex_unlock(&sched_domains_mutex); 7423 7424 hotcpu_notifier(sched_domains_numa_masks_update, CPU_PRI_SCHED_ACTIVE); 7425 hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE); 7426 hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE); 7427 7428 init_hrtick(); 7429 7430 /* Move init over to a non-isolated CPU */ 7431 if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0) 7432 BUG(); 7433 sched_init_granularity(); 7434 free_cpumask_var(non_isolated_cpus); 7435 7436 init_sched_rt_class(); 7437 init_sched_dl_class(); 7438 } 7439 #else 7440 void __init sched_init_smp(void) 7441 { 7442 sched_init_granularity(); 7443 } 7444 #endif /* CONFIG_SMP */ 7445 7446 int in_sched_functions(unsigned long addr) 7447 { 7448 return in_lock_functions(addr) || 7449 (addr >= (unsigned long)__sched_text_start 7450 && addr < (unsigned long)__sched_text_end); 7451 } 7452 7453 #ifdef CONFIG_CGROUP_SCHED 7454 /* 7455 * Default task group. 7456 * Every task in system belongs to this group at bootup. 7457 */ 7458 struct task_group root_task_group; 7459 LIST_HEAD(task_groups); 7460 7461 /* Cacheline aligned slab cache for task_group */ 7462 static struct kmem_cache *task_group_cache __read_mostly; 7463 #endif 7464 7465 DECLARE_PER_CPU(cpumask_var_t, load_balance_mask); 7466 7467 void __init sched_init(void) 7468 { 7469 int i, j; 7470 unsigned long alloc_size = 0, ptr; 7471 7472 #ifdef CONFIG_FAIR_GROUP_SCHED 7473 alloc_size += 2 * nr_cpu_ids * sizeof(void **); 7474 #endif 7475 #ifdef CONFIG_RT_GROUP_SCHED 7476 alloc_size += 2 * nr_cpu_ids * sizeof(void **); 7477 #endif 7478 if (alloc_size) { 7479 ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT); 7480 7481 #ifdef CONFIG_FAIR_GROUP_SCHED 7482 root_task_group.se = (struct sched_entity **)ptr; 7483 ptr += nr_cpu_ids * sizeof(void **); 7484 7485 root_task_group.cfs_rq = (struct cfs_rq **)ptr; 7486 ptr += nr_cpu_ids * sizeof(void **); 7487 7488 #endif /* CONFIG_FAIR_GROUP_SCHED */ 7489 #ifdef CONFIG_RT_GROUP_SCHED 7490 root_task_group.rt_se = (struct sched_rt_entity **)ptr; 7491 ptr += nr_cpu_ids * sizeof(void **); 7492 7493 root_task_group.rt_rq = (struct rt_rq **)ptr; 7494 ptr += nr_cpu_ids * sizeof(void **); 7495 7496 #endif /* CONFIG_RT_GROUP_SCHED */ 7497 } 7498 #ifdef CONFIG_CPUMASK_OFFSTACK 7499 for_each_possible_cpu(i) { 7500 per_cpu(load_balance_mask, i) = (cpumask_var_t)kzalloc_node( 7501 cpumask_size(), GFP_KERNEL, cpu_to_node(i)); 7502 } 7503 #endif /* CONFIG_CPUMASK_OFFSTACK */ 7504 7505 init_rt_bandwidth(&def_rt_bandwidth, 7506 global_rt_period(), global_rt_runtime()); 7507 init_dl_bandwidth(&def_dl_bandwidth, 7508 global_rt_period(), global_rt_runtime()); 7509 7510 #ifdef CONFIG_SMP 7511 init_defrootdomain(); 7512 #endif 7513 7514 #ifdef CONFIG_RT_GROUP_SCHED 7515 init_rt_bandwidth(&root_task_group.rt_bandwidth, 7516 global_rt_period(), global_rt_runtime()); 7517 #endif /* CONFIG_RT_GROUP_SCHED */ 7518 7519 #ifdef CONFIG_CGROUP_SCHED 7520 task_group_cache = KMEM_CACHE(task_group, 0); 7521 7522 list_add(&root_task_group.list, &task_groups); 7523 INIT_LIST_HEAD(&root_task_group.children); 7524 INIT_LIST_HEAD(&root_task_group.siblings); 7525 autogroup_init(&init_task); 7526 #endif /* CONFIG_CGROUP_SCHED */ 7527 7528 for_each_possible_cpu(i) { 7529 struct rq *rq; 7530 7531 rq = cpu_rq(i); 7532 raw_spin_lock_init(&rq->lock); 7533 rq->nr_running = 0; 7534 rq->calc_load_active = 0; 7535 rq->calc_load_update = jiffies + LOAD_FREQ; 7536 init_cfs_rq(&rq->cfs); 7537 init_rt_rq(&rq->rt); 7538 init_dl_rq(&rq->dl); 7539 #ifdef CONFIG_FAIR_GROUP_SCHED 7540 root_task_group.shares = ROOT_TASK_GROUP_LOAD; 7541 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list); 7542 /* 7543 * How much cpu bandwidth does root_task_group get? 7544 * 7545 * In case of task-groups formed thr' the cgroup filesystem, it 7546 * gets 100% of the cpu resources in the system. This overall 7547 * system cpu resource is divided among the tasks of 7548 * root_task_group and its child task-groups in a fair manner, 7549 * based on each entity's (task or task-group's) weight 7550 * (se->load.weight). 7551 * 7552 * In other words, if root_task_group has 10 tasks of weight 7553 * 1024) and two child groups A0 and A1 (of weight 1024 each), 7554 * then A0's share of the cpu resource is: 7555 * 7556 * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33% 7557 * 7558 * We achieve this by letting root_task_group's tasks sit 7559 * directly in rq->cfs (i.e root_task_group->se[] = NULL). 7560 */ 7561 init_cfs_bandwidth(&root_task_group.cfs_bandwidth); 7562 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL); 7563 #endif /* CONFIG_FAIR_GROUP_SCHED */ 7564 7565 rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime; 7566 #ifdef CONFIG_RT_GROUP_SCHED 7567 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL); 7568 #endif 7569 7570 for (j = 0; j < CPU_LOAD_IDX_MAX; j++) 7571 rq->cpu_load[j] = 0; 7572 7573 rq->last_load_update_tick = jiffies; 7574 7575 #ifdef CONFIG_SMP 7576 rq->sd = NULL; 7577 rq->rd = NULL; 7578 rq->cpu_capacity = rq->cpu_capacity_orig = SCHED_CAPACITY_SCALE; 7579 rq->balance_callback = NULL; 7580 rq->active_balance = 0; 7581 rq->next_balance = jiffies; 7582 rq->push_cpu = 0; 7583 rq->cpu = i; 7584 rq->online = 0; 7585 rq->idle_stamp = 0; 7586 rq->avg_idle = 2*sysctl_sched_migration_cost; 7587 rq->max_idle_balance_cost = sysctl_sched_migration_cost; 7588 7589 INIT_LIST_HEAD(&rq->cfs_tasks); 7590 7591 rq_attach_root(rq, &def_root_domain); 7592 #ifdef CONFIG_NO_HZ_COMMON 7593 rq->nohz_flags = 0; 7594 #endif 7595 #ifdef CONFIG_NO_HZ_FULL 7596 rq->last_sched_tick = 0; 7597 #endif 7598 #endif 7599 init_rq_hrtick(rq); 7600 atomic_set(&rq->nr_iowait, 0); 7601 } 7602 7603 set_load_weight(&init_task); 7604 7605 #ifdef CONFIG_PREEMPT_NOTIFIERS 7606 INIT_HLIST_HEAD(&init_task.preempt_notifiers); 7607 #endif 7608 7609 /* 7610 * The boot idle thread does lazy MMU switching as well: 7611 */ 7612 atomic_inc(&init_mm.mm_count); 7613 enter_lazy_tlb(&init_mm, current); 7614 7615 /* 7616 * During early bootup we pretend to be a normal task: 7617 */ 7618 current->sched_class = &fair_sched_class; 7619 7620 /* 7621 * Make us the idle thread. Technically, schedule() should not be 7622 * called from this thread, however somewhere below it might be, 7623 * but because we are the idle thread, we just pick up running again 7624 * when this runqueue becomes "idle". 7625 */ 7626 init_idle(current, smp_processor_id()); 7627 7628 calc_load_update = jiffies + LOAD_FREQ; 7629 7630 #ifdef CONFIG_SMP 7631 zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT); 7632 /* May be allocated at isolcpus cmdline parse time */ 7633 if (cpu_isolated_map == NULL) 7634 zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT); 7635 idle_thread_set_boot_cpu(); 7636 set_cpu_rq_start_time(); 7637 #endif 7638 init_sched_fair_class(); 7639 7640 scheduler_running = 1; 7641 } 7642 7643 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP 7644 static inline int preempt_count_equals(int preempt_offset) 7645 { 7646 int nested = preempt_count() + rcu_preempt_depth(); 7647 7648 return (nested == preempt_offset); 7649 } 7650 7651 void __might_sleep(const char *file, int line, int preempt_offset) 7652 { 7653 /* 7654 * Blocking primitives will set (and therefore destroy) current->state, 7655 * since we will exit with TASK_RUNNING make sure we enter with it, 7656 * otherwise we will destroy state. 7657 */ 7658 WARN_ONCE(current->state != TASK_RUNNING && current->task_state_change, 7659 "do not call blocking ops when !TASK_RUNNING; " 7660 "state=%lx set at [<%p>] %pS\n", 7661 current->state, 7662 (void *)current->task_state_change, 7663 (void *)current->task_state_change); 7664 7665 ___might_sleep(file, line, preempt_offset); 7666 } 7667 EXPORT_SYMBOL(__might_sleep); 7668 7669 void ___might_sleep(const char *file, int line, int preempt_offset) 7670 { 7671 static unsigned long prev_jiffy; /* ratelimiting */ 7672 7673 rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */ 7674 if ((preempt_count_equals(preempt_offset) && !irqs_disabled() && 7675 !is_idle_task(current)) || 7676 system_state != SYSTEM_RUNNING || oops_in_progress) 7677 return; 7678 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) 7679 return; 7680 prev_jiffy = jiffies; 7681 7682 printk(KERN_ERR 7683 "BUG: sleeping function called from invalid context at %s:%d\n", 7684 file, line); 7685 printk(KERN_ERR 7686 "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n", 7687 in_atomic(), irqs_disabled(), 7688 current->pid, current->comm); 7689 7690 if (task_stack_end_corrupted(current)) 7691 printk(KERN_EMERG "Thread overran stack, or stack corrupted\n"); 7692 7693 debug_show_held_locks(current); 7694 if (irqs_disabled()) 7695 print_irqtrace_events(current); 7696 #ifdef CONFIG_DEBUG_PREEMPT 7697 if (!preempt_count_equals(preempt_offset)) { 7698 pr_err("Preemption disabled at:"); 7699 print_ip_sym(current->preempt_disable_ip); 7700 pr_cont("\n"); 7701 } 7702 #endif 7703 dump_stack(); 7704 } 7705 EXPORT_SYMBOL(___might_sleep); 7706 #endif 7707 7708 #ifdef CONFIG_MAGIC_SYSRQ 7709 void normalize_rt_tasks(void) 7710 { 7711 struct task_struct *g, *p; 7712 struct sched_attr attr = { 7713 .sched_policy = SCHED_NORMAL, 7714 }; 7715 7716 read_lock(&tasklist_lock); 7717 for_each_process_thread(g, p) { 7718 /* 7719 * Only normalize user tasks: 7720 */ 7721 if (p->flags & PF_KTHREAD) 7722 continue; 7723 7724 p->se.exec_start = 0; 7725 #ifdef CONFIG_SCHEDSTATS 7726 p->se.statistics.wait_start = 0; 7727 p->se.statistics.sleep_start = 0; 7728 p->se.statistics.block_start = 0; 7729 #endif 7730 7731 if (!dl_task(p) && !rt_task(p)) { 7732 /* 7733 * Renice negative nice level userspace 7734 * tasks back to 0: 7735 */ 7736 if (task_nice(p) < 0) 7737 set_user_nice(p, 0); 7738 continue; 7739 } 7740 7741 __sched_setscheduler(p, &attr, false, false); 7742 } 7743 read_unlock(&tasklist_lock); 7744 } 7745 7746 #endif /* CONFIG_MAGIC_SYSRQ */ 7747 7748 #if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) 7749 /* 7750 * These functions are only useful for the IA64 MCA handling, or kdb. 7751 * 7752 * They can only be called when the whole system has been 7753 * stopped - every CPU needs to be quiescent, and no scheduling 7754 * activity can take place. Using them for anything else would 7755 * be a serious bug, and as a result, they aren't even visible 7756 * under any other configuration. 7757 */ 7758 7759 /** 7760 * curr_task - return the current task for a given cpu. 7761 * @cpu: the processor in question. 7762 * 7763 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! 7764 * 7765 * Return: The current task for @cpu. 7766 */ 7767 struct task_struct *curr_task(int cpu) 7768 { 7769 return cpu_curr(cpu); 7770 } 7771 7772 #endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */ 7773 7774 #ifdef CONFIG_IA64 7775 /** 7776 * set_curr_task - set the current task for a given cpu. 7777 * @cpu: the processor in question. 7778 * @p: the task pointer to set. 7779 * 7780 * Description: This function must only be used when non-maskable interrupts 7781 * are serviced on a separate stack. It allows the architecture to switch the 7782 * notion of the current task on a cpu in a non-blocking manner. This function 7783 * must be called with all CPU's synchronized, and interrupts disabled, the 7784 * and caller must save the original value of the current task (see 7785 * curr_task() above) and restore that value before reenabling interrupts and 7786 * re-starting the system. 7787 * 7788 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! 7789 */ 7790 void set_curr_task(int cpu, struct task_struct *p) 7791 { 7792 cpu_curr(cpu) = p; 7793 } 7794 7795 #endif 7796 7797 #ifdef CONFIG_CGROUP_SCHED 7798 /* task_group_lock serializes the addition/removal of task groups */ 7799 static DEFINE_SPINLOCK(task_group_lock); 7800 7801 static void free_sched_group(struct task_group *tg) 7802 { 7803 free_fair_sched_group(tg); 7804 free_rt_sched_group(tg); 7805 autogroup_free(tg); 7806 kmem_cache_free(task_group_cache, tg); 7807 } 7808 7809 /* allocate runqueue etc for a new task group */ 7810 struct task_group *sched_create_group(struct task_group *parent) 7811 { 7812 struct task_group *tg; 7813 7814 tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO); 7815 if (!tg) 7816 return ERR_PTR(-ENOMEM); 7817 7818 if (!alloc_fair_sched_group(tg, parent)) 7819 goto err; 7820 7821 if (!alloc_rt_sched_group(tg, parent)) 7822 goto err; 7823 7824 return tg; 7825 7826 err: 7827 free_sched_group(tg); 7828 return ERR_PTR(-ENOMEM); 7829 } 7830 7831 void sched_online_group(struct task_group *tg, struct task_group *parent) 7832 { 7833 unsigned long flags; 7834 7835 spin_lock_irqsave(&task_group_lock, flags); 7836 list_add_rcu(&tg->list, &task_groups); 7837 7838 WARN_ON(!parent); /* root should already exist */ 7839 7840 tg->parent = parent; 7841 INIT_LIST_HEAD(&tg->children); 7842 list_add_rcu(&tg->siblings, &parent->children); 7843 spin_unlock_irqrestore(&task_group_lock, flags); 7844 } 7845 7846 /* rcu callback to free various structures associated with a task group */ 7847 static void free_sched_group_rcu(struct rcu_head *rhp) 7848 { 7849 /* now it should be safe to free those cfs_rqs */ 7850 free_sched_group(container_of(rhp, struct task_group, rcu)); 7851 } 7852 7853 /* Destroy runqueue etc associated with a task group */ 7854 void sched_destroy_group(struct task_group *tg) 7855 { 7856 /* wait for possible concurrent references to cfs_rqs complete */ 7857 call_rcu(&tg->rcu, free_sched_group_rcu); 7858 } 7859 7860 void sched_offline_group(struct task_group *tg) 7861 { 7862 unsigned long flags; 7863 int i; 7864 7865 /* end participation in shares distribution */ 7866 for_each_possible_cpu(i) 7867 unregister_fair_sched_group(tg, i); 7868 7869 spin_lock_irqsave(&task_group_lock, flags); 7870 list_del_rcu(&tg->list); 7871 list_del_rcu(&tg->siblings); 7872 spin_unlock_irqrestore(&task_group_lock, flags); 7873 } 7874 7875 /* change task's runqueue when it moves between groups. 7876 * The caller of this function should have put the task in its new group 7877 * by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to 7878 * reflect its new group. 7879 */ 7880 void sched_move_task(struct task_struct *tsk) 7881 { 7882 struct task_group *tg; 7883 int queued, running; 7884 unsigned long flags; 7885 struct rq *rq; 7886 7887 rq = task_rq_lock(tsk, &flags); 7888 7889 running = task_current(rq, tsk); 7890 queued = task_on_rq_queued(tsk); 7891 7892 if (queued) 7893 dequeue_task(rq, tsk, DEQUEUE_SAVE); 7894 if (unlikely(running)) 7895 put_prev_task(rq, tsk); 7896 7897 /* 7898 * All callers are synchronized by task_rq_lock(); we do not use RCU 7899 * which is pointless here. Thus, we pass "true" to task_css_check() 7900 * to prevent lockdep warnings. 7901 */ 7902 tg = container_of(task_css_check(tsk, cpu_cgrp_id, true), 7903 struct task_group, css); 7904 tg = autogroup_task_group(tsk, tg); 7905 tsk->sched_task_group = tg; 7906 7907 #ifdef CONFIG_FAIR_GROUP_SCHED 7908 if (tsk->sched_class->task_move_group) 7909 tsk->sched_class->task_move_group(tsk); 7910 else 7911 #endif 7912 set_task_rq(tsk, task_cpu(tsk)); 7913 7914 if (unlikely(running)) 7915 tsk->sched_class->set_curr_task(rq); 7916 if (queued) 7917 enqueue_task(rq, tsk, ENQUEUE_RESTORE); 7918 7919 task_rq_unlock(rq, tsk, &flags); 7920 } 7921 #endif /* CONFIG_CGROUP_SCHED */ 7922 7923 #ifdef CONFIG_RT_GROUP_SCHED 7924 /* 7925 * Ensure that the real time constraints are schedulable. 7926 */ 7927 static DEFINE_MUTEX(rt_constraints_mutex); 7928 7929 /* Must be called with tasklist_lock held */ 7930 static inline int tg_has_rt_tasks(struct task_group *tg) 7931 { 7932 struct task_struct *g, *p; 7933 7934 /* 7935 * Autogroups do not have RT tasks; see autogroup_create(). 7936 */ 7937 if (task_group_is_autogroup(tg)) 7938 return 0; 7939 7940 for_each_process_thread(g, p) { 7941 if (rt_task(p) && task_group(p) == tg) 7942 return 1; 7943 } 7944 7945 return 0; 7946 } 7947 7948 struct rt_schedulable_data { 7949 struct task_group *tg; 7950 u64 rt_period; 7951 u64 rt_runtime; 7952 }; 7953 7954 static int tg_rt_schedulable(struct task_group *tg, void *data) 7955 { 7956 struct rt_schedulable_data *d = data; 7957 struct task_group *child; 7958 unsigned long total, sum = 0; 7959 u64 period, runtime; 7960 7961 period = ktime_to_ns(tg->rt_bandwidth.rt_period); 7962 runtime = tg->rt_bandwidth.rt_runtime; 7963 7964 if (tg == d->tg) { 7965 period = d->rt_period; 7966 runtime = d->rt_runtime; 7967 } 7968 7969 /* 7970 * Cannot have more runtime than the period. 7971 */ 7972 if (runtime > period && runtime != RUNTIME_INF) 7973 return -EINVAL; 7974 7975 /* 7976 * Ensure we don't starve existing RT tasks. 7977 */ 7978 if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg)) 7979 return -EBUSY; 7980 7981 total = to_ratio(period, runtime); 7982 7983 /* 7984 * Nobody can have more than the global setting allows. 7985 */ 7986 if (total > to_ratio(global_rt_period(), global_rt_runtime())) 7987 return -EINVAL; 7988 7989 /* 7990 * The sum of our children's runtime should not exceed our own. 7991 */ 7992 list_for_each_entry_rcu(child, &tg->children, siblings) { 7993 period = ktime_to_ns(child->rt_bandwidth.rt_period); 7994 runtime = child->rt_bandwidth.rt_runtime; 7995 7996 if (child == d->tg) { 7997 period = d->rt_period; 7998 runtime = d->rt_runtime; 7999 } 8000 8001 sum += to_ratio(period, runtime); 8002 } 8003 8004 if (sum > total) 8005 return -EINVAL; 8006 8007 return 0; 8008 } 8009 8010 static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) 8011 { 8012 int ret; 8013 8014 struct rt_schedulable_data data = { 8015 .tg = tg, 8016 .rt_period = period, 8017 .rt_runtime = runtime, 8018 }; 8019 8020 rcu_read_lock(); 8021 ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data); 8022 rcu_read_unlock(); 8023 8024 return ret; 8025 } 8026 8027 static int tg_set_rt_bandwidth(struct task_group *tg, 8028 u64 rt_period, u64 rt_runtime) 8029 { 8030 int i, err = 0; 8031 8032 /* 8033 * Disallowing the root group RT runtime is BAD, it would disallow the 8034 * kernel creating (and or operating) RT threads. 8035 */ 8036 if (tg == &root_task_group && rt_runtime == 0) 8037 return -EINVAL; 8038 8039 /* No period doesn't make any sense. */ 8040 if (rt_period == 0) 8041 return -EINVAL; 8042 8043 mutex_lock(&rt_constraints_mutex); 8044 read_lock(&tasklist_lock); 8045 err = __rt_schedulable(tg, rt_period, rt_runtime); 8046 if (err) 8047 goto unlock; 8048 8049 raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock); 8050 tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period); 8051 tg->rt_bandwidth.rt_runtime = rt_runtime; 8052 8053 for_each_possible_cpu(i) { 8054 struct rt_rq *rt_rq = tg->rt_rq[i]; 8055 8056 raw_spin_lock(&rt_rq->rt_runtime_lock); 8057 rt_rq->rt_runtime = rt_runtime; 8058 raw_spin_unlock(&rt_rq->rt_runtime_lock); 8059 } 8060 raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock); 8061 unlock: 8062 read_unlock(&tasklist_lock); 8063 mutex_unlock(&rt_constraints_mutex); 8064 8065 return err; 8066 } 8067 8068 static int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us) 8069 { 8070 u64 rt_runtime, rt_period; 8071 8072 rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period); 8073 rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC; 8074 if (rt_runtime_us < 0) 8075 rt_runtime = RUNTIME_INF; 8076 8077 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime); 8078 } 8079 8080 static long sched_group_rt_runtime(struct task_group *tg) 8081 { 8082 u64 rt_runtime_us; 8083 8084 if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF) 8085 return -1; 8086 8087 rt_runtime_us = tg->rt_bandwidth.rt_runtime; 8088 do_div(rt_runtime_us, NSEC_PER_USEC); 8089 return rt_runtime_us; 8090 } 8091 8092 static int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us) 8093 { 8094 u64 rt_runtime, rt_period; 8095 8096 rt_period = rt_period_us * NSEC_PER_USEC; 8097 rt_runtime = tg->rt_bandwidth.rt_runtime; 8098 8099 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime); 8100 } 8101 8102 static long sched_group_rt_period(struct task_group *tg) 8103 { 8104 u64 rt_period_us; 8105 8106 rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period); 8107 do_div(rt_period_us, NSEC_PER_USEC); 8108 return rt_period_us; 8109 } 8110 #endif /* CONFIG_RT_GROUP_SCHED */ 8111 8112 #ifdef CONFIG_RT_GROUP_SCHED 8113 static int sched_rt_global_constraints(void) 8114 { 8115 int ret = 0; 8116 8117 mutex_lock(&rt_constraints_mutex); 8118 read_lock(&tasklist_lock); 8119 ret = __rt_schedulable(NULL, 0, 0); 8120 read_unlock(&tasklist_lock); 8121 mutex_unlock(&rt_constraints_mutex); 8122 8123 return ret; 8124 } 8125 8126 static int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk) 8127 { 8128 /* Don't accept realtime tasks when there is no way for them to run */ 8129 if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0) 8130 return 0; 8131 8132 return 1; 8133 } 8134 8135 #else /* !CONFIG_RT_GROUP_SCHED */ 8136 static int sched_rt_global_constraints(void) 8137 { 8138 unsigned long flags; 8139 int i, ret = 0; 8140 8141 raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags); 8142 for_each_possible_cpu(i) { 8143 struct rt_rq *rt_rq = &cpu_rq(i)->rt; 8144 8145 raw_spin_lock(&rt_rq->rt_runtime_lock); 8146 rt_rq->rt_runtime = global_rt_runtime(); 8147 raw_spin_unlock(&rt_rq->rt_runtime_lock); 8148 } 8149 raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags); 8150 8151 return ret; 8152 } 8153 #endif /* CONFIG_RT_GROUP_SCHED */ 8154 8155 static int sched_dl_global_validate(void) 8156 { 8157 u64 runtime = global_rt_runtime(); 8158 u64 period = global_rt_period(); 8159 u64 new_bw = to_ratio(period, runtime); 8160 struct dl_bw *dl_b; 8161 int cpu, ret = 0; 8162 unsigned long flags; 8163 8164 /* 8165 * Here we want to check the bandwidth not being set to some 8166 * value smaller than the currently allocated bandwidth in 8167 * any of the root_domains. 8168 * 8169 * FIXME: Cycling on all the CPUs is overdoing, but simpler than 8170 * cycling on root_domains... Discussion on different/better 8171 * solutions is welcome! 8172 */ 8173 for_each_possible_cpu(cpu) { 8174 rcu_read_lock_sched(); 8175 dl_b = dl_bw_of(cpu); 8176 8177 raw_spin_lock_irqsave(&dl_b->lock, flags); 8178 if (new_bw < dl_b->total_bw) 8179 ret = -EBUSY; 8180 raw_spin_unlock_irqrestore(&dl_b->lock, flags); 8181 8182 rcu_read_unlock_sched(); 8183 8184 if (ret) 8185 break; 8186 } 8187 8188 return ret; 8189 } 8190 8191 static void sched_dl_do_global(void) 8192 { 8193 u64 new_bw = -1; 8194 struct dl_bw *dl_b; 8195 int cpu; 8196 unsigned long flags; 8197 8198 def_dl_bandwidth.dl_period = global_rt_period(); 8199 def_dl_bandwidth.dl_runtime = global_rt_runtime(); 8200 8201 if (global_rt_runtime() != RUNTIME_INF) 8202 new_bw = to_ratio(global_rt_period(), global_rt_runtime()); 8203 8204 /* 8205 * FIXME: As above... 8206 */ 8207 for_each_possible_cpu(cpu) { 8208 rcu_read_lock_sched(); 8209 dl_b = dl_bw_of(cpu); 8210 8211 raw_spin_lock_irqsave(&dl_b->lock, flags); 8212 dl_b->bw = new_bw; 8213 raw_spin_unlock_irqrestore(&dl_b->lock, flags); 8214 8215 rcu_read_unlock_sched(); 8216 } 8217 } 8218 8219 static int sched_rt_global_validate(void) 8220 { 8221 if (sysctl_sched_rt_period <= 0) 8222 return -EINVAL; 8223 8224 if ((sysctl_sched_rt_runtime != RUNTIME_INF) && 8225 (sysctl_sched_rt_runtime > sysctl_sched_rt_period)) 8226 return -EINVAL; 8227 8228 return 0; 8229 } 8230 8231 static void sched_rt_do_global(void) 8232 { 8233 def_rt_bandwidth.rt_runtime = global_rt_runtime(); 8234 def_rt_bandwidth.rt_period = ns_to_ktime(global_rt_period()); 8235 } 8236 8237 int sched_rt_handler(struct ctl_table *table, int write, 8238 void __user *buffer, size_t *lenp, 8239 loff_t *ppos) 8240 { 8241 int old_period, old_runtime; 8242 static DEFINE_MUTEX(mutex); 8243 int ret; 8244 8245 mutex_lock(&mutex); 8246 old_period = sysctl_sched_rt_period; 8247 old_runtime = sysctl_sched_rt_runtime; 8248 8249 ret = proc_dointvec(table, write, buffer, lenp, ppos); 8250 8251 if (!ret && write) { 8252 ret = sched_rt_global_validate(); 8253 if (ret) 8254 goto undo; 8255 8256 ret = sched_dl_global_validate(); 8257 if (ret) 8258 goto undo; 8259 8260 ret = sched_rt_global_constraints(); 8261 if (ret) 8262 goto undo; 8263 8264 sched_rt_do_global(); 8265 sched_dl_do_global(); 8266 } 8267 if (0) { 8268 undo: 8269 sysctl_sched_rt_period = old_period; 8270 sysctl_sched_rt_runtime = old_runtime; 8271 } 8272 mutex_unlock(&mutex); 8273 8274 return ret; 8275 } 8276 8277 int sched_rr_handler(struct ctl_table *table, int write, 8278 void __user *buffer, size_t *lenp, 8279 loff_t *ppos) 8280 { 8281 int ret; 8282 static DEFINE_MUTEX(mutex); 8283 8284 mutex_lock(&mutex); 8285 ret = proc_dointvec(table, write, buffer, lenp, ppos); 8286 /* make sure that internally we keep jiffies */ 8287 /* also, writing zero resets timeslice to default */ 8288 if (!ret && write) { 8289 sched_rr_timeslice = sched_rr_timeslice <= 0 ? 8290 RR_TIMESLICE : msecs_to_jiffies(sched_rr_timeslice); 8291 } 8292 mutex_unlock(&mutex); 8293 return ret; 8294 } 8295 8296 #ifdef CONFIG_CGROUP_SCHED 8297 8298 static inline struct task_group *css_tg(struct cgroup_subsys_state *css) 8299 { 8300 return css ? container_of(css, struct task_group, css) : NULL; 8301 } 8302 8303 static struct cgroup_subsys_state * 8304 cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) 8305 { 8306 struct task_group *parent = css_tg(parent_css); 8307 struct task_group *tg; 8308 8309 if (!parent) { 8310 /* This is early initialization for the top cgroup */ 8311 return &root_task_group.css; 8312 } 8313 8314 tg = sched_create_group(parent); 8315 if (IS_ERR(tg)) 8316 return ERR_PTR(-ENOMEM); 8317 8318 return &tg->css; 8319 } 8320 8321 static int cpu_cgroup_css_online(struct cgroup_subsys_state *css) 8322 { 8323 struct task_group *tg = css_tg(css); 8324 struct task_group *parent = css_tg(css->parent); 8325 8326 if (parent) 8327 sched_online_group(tg, parent); 8328 return 0; 8329 } 8330 8331 static void cpu_cgroup_css_free(struct cgroup_subsys_state *css) 8332 { 8333 struct task_group *tg = css_tg(css); 8334 8335 sched_destroy_group(tg); 8336 } 8337 8338 static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css) 8339 { 8340 struct task_group *tg = css_tg(css); 8341 8342 sched_offline_group(tg); 8343 } 8344 8345 static void cpu_cgroup_fork(struct task_struct *task) 8346 { 8347 sched_move_task(task); 8348 } 8349 8350 static int cpu_cgroup_can_attach(struct cgroup_taskset *tset) 8351 { 8352 struct task_struct *task; 8353 struct cgroup_subsys_state *css; 8354 8355 cgroup_taskset_for_each(task, css, tset) { 8356 #ifdef CONFIG_RT_GROUP_SCHED 8357 if (!sched_rt_can_attach(css_tg(css), task)) 8358 return -EINVAL; 8359 #else 8360 /* We don't support RT-tasks being in separate groups */ 8361 if (task->sched_class != &fair_sched_class) 8362 return -EINVAL; 8363 #endif 8364 } 8365 return 0; 8366 } 8367 8368 static void cpu_cgroup_attach(struct cgroup_taskset *tset) 8369 { 8370 struct task_struct *task; 8371 struct cgroup_subsys_state *css; 8372 8373 cgroup_taskset_for_each(task, css, tset) 8374 sched_move_task(task); 8375 } 8376 8377 #ifdef CONFIG_FAIR_GROUP_SCHED 8378 static int cpu_shares_write_u64(struct cgroup_subsys_state *css, 8379 struct cftype *cftype, u64 shareval) 8380 { 8381 return sched_group_set_shares(css_tg(css), scale_load(shareval)); 8382 } 8383 8384 static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css, 8385 struct cftype *cft) 8386 { 8387 struct task_group *tg = css_tg(css); 8388 8389 return (u64) scale_load_down(tg->shares); 8390 } 8391 8392 #ifdef CONFIG_CFS_BANDWIDTH 8393 static DEFINE_MUTEX(cfs_constraints_mutex); 8394 8395 const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */ 8396 const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */ 8397 8398 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime); 8399 8400 static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota) 8401 { 8402 int i, ret = 0, runtime_enabled, runtime_was_enabled; 8403 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 8404 8405 if (tg == &root_task_group) 8406 return -EINVAL; 8407 8408 /* 8409 * Ensure we have at some amount of bandwidth every period. This is 8410 * to prevent reaching a state of large arrears when throttled via 8411 * entity_tick() resulting in prolonged exit starvation. 8412 */ 8413 if (quota < min_cfs_quota_period || period < min_cfs_quota_period) 8414 return -EINVAL; 8415 8416 /* 8417 * Likewise, bound things on the otherside by preventing insane quota 8418 * periods. This also allows us to normalize in computing quota 8419 * feasibility. 8420 */ 8421 if (period > max_cfs_quota_period) 8422 return -EINVAL; 8423 8424 /* 8425 * Prevent race between setting of cfs_rq->runtime_enabled and 8426 * unthrottle_offline_cfs_rqs(). 8427 */ 8428 get_online_cpus(); 8429 mutex_lock(&cfs_constraints_mutex); 8430 ret = __cfs_schedulable(tg, period, quota); 8431 if (ret) 8432 goto out_unlock; 8433 8434 runtime_enabled = quota != RUNTIME_INF; 8435 runtime_was_enabled = cfs_b->quota != RUNTIME_INF; 8436 /* 8437 * If we need to toggle cfs_bandwidth_used, off->on must occur 8438 * before making related changes, and on->off must occur afterwards 8439 */ 8440 if (runtime_enabled && !runtime_was_enabled) 8441 cfs_bandwidth_usage_inc(); 8442 raw_spin_lock_irq(&cfs_b->lock); 8443 cfs_b->period = ns_to_ktime(period); 8444 cfs_b->quota = quota; 8445 8446 __refill_cfs_bandwidth_runtime(cfs_b); 8447 /* restart the period timer (if active) to handle new period expiry */ 8448 if (runtime_enabled) 8449 start_cfs_bandwidth(cfs_b); 8450 raw_spin_unlock_irq(&cfs_b->lock); 8451 8452 for_each_online_cpu(i) { 8453 struct cfs_rq *cfs_rq = tg->cfs_rq[i]; 8454 struct rq *rq = cfs_rq->rq; 8455 8456 raw_spin_lock_irq(&rq->lock); 8457 cfs_rq->runtime_enabled = runtime_enabled; 8458 cfs_rq->runtime_remaining = 0; 8459 8460 if (cfs_rq->throttled) 8461 unthrottle_cfs_rq(cfs_rq); 8462 raw_spin_unlock_irq(&rq->lock); 8463 } 8464 if (runtime_was_enabled && !runtime_enabled) 8465 cfs_bandwidth_usage_dec(); 8466 out_unlock: 8467 mutex_unlock(&cfs_constraints_mutex); 8468 put_online_cpus(); 8469 8470 return ret; 8471 } 8472 8473 int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us) 8474 { 8475 u64 quota, period; 8476 8477 period = ktime_to_ns(tg->cfs_bandwidth.period); 8478 if (cfs_quota_us < 0) 8479 quota = RUNTIME_INF; 8480 else 8481 quota = (u64)cfs_quota_us * NSEC_PER_USEC; 8482 8483 return tg_set_cfs_bandwidth(tg, period, quota); 8484 } 8485 8486 long tg_get_cfs_quota(struct task_group *tg) 8487 { 8488 u64 quota_us; 8489 8490 if (tg->cfs_bandwidth.quota == RUNTIME_INF) 8491 return -1; 8492 8493 quota_us = tg->cfs_bandwidth.quota; 8494 do_div(quota_us, NSEC_PER_USEC); 8495 8496 return quota_us; 8497 } 8498 8499 int tg_set_cfs_period(struct task_group *tg, long cfs_period_us) 8500 { 8501 u64 quota, period; 8502 8503 period = (u64)cfs_period_us * NSEC_PER_USEC; 8504 quota = tg->cfs_bandwidth.quota; 8505 8506 return tg_set_cfs_bandwidth(tg, period, quota); 8507 } 8508 8509 long tg_get_cfs_period(struct task_group *tg) 8510 { 8511 u64 cfs_period_us; 8512 8513 cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period); 8514 do_div(cfs_period_us, NSEC_PER_USEC); 8515 8516 return cfs_period_us; 8517 } 8518 8519 static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css, 8520 struct cftype *cft) 8521 { 8522 return tg_get_cfs_quota(css_tg(css)); 8523 } 8524 8525 static int cpu_cfs_quota_write_s64(struct cgroup_subsys_state *css, 8526 struct cftype *cftype, s64 cfs_quota_us) 8527 { 8528 return tg_set_cfs_quota(css_tg(css), cfs_quota_us); 8529 } 8530 8531 static u64 cpu_cfs_period_read_u64(struct cgroup_subsys_state *css, 8532 struct cftype *cft) 8533 { 8534 return tg_get_cfs_period(css_tg(css)); 8535 } 8536 8537 static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css, 8538 struct cftype *cftype, u64 cfs_period_us) 8539 { 8540 return tg_set_cfs_period(css_tg(css), cfs_period_us); 8541 } 8542 8543 struct cfs_schedulable_data { 8544 struct task_group *tg; 8545 u64 period, quota; 8546 }; 8547 8548 /* 8549 * normalize group quota/period to be quota/max_period 8550 * note: units are usecs 8551 */ 8552 static u64 normalize_cfs_quota(struct task_group *tg, 8553 struct cfs_schedulable_data *d) 8554 { 8555 u64 quota, period; 8556 8557 if (tg == d->tg) { 8558 period = d->period; 8559 quota = d->quota; 8560 } else { 8561 period = tg_get_cfs_period(tg); 8562 quota = tg_get_cfs_quota(tg); 8563 } 8564 8565 /* note: these should typically be equivalent */ 8566 if (quota == RUNTIME_INF || quota == -1) 8567 return RUNTIME_INF; 8568 8569 return to_ratio(period, quota); 8570 } 8571 8572 static int tg_cfs_schedulable_down(struct task_group *tg, void *data) 8573 { 8574 struct cfs_schedulable_data *d = data; 8575 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 8576 s64 quota = 0, parent_quota = -1; 8577 8578 if (!tg->parent) { 8579 quota = RUNTIME_INF; 8580 } else { 8581 struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth; 8582 8583 quota = normalize_cfs_quota(tg, d); 8584 parent_quota = parent_b->hierarchical_quota; 8585 8586 /* 8587 * ensure max(child_quota) <= parent_quota, inherit when no 8588 * limit is set 8589 */ 8590 if (quota == RUNTIME_INF) 8591 quota = parent_quota; 8592 else if (parent_quota != RUNTIME_INF && quota > parent_quota) 8593 return -EINVAL; 8594 } 8595 cfs_b->hierarchical_quota = quota; 8596 8597 return 0; 8598 } 8599 8600 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota) 8601 { 8602 int ret; 8603 struct cfs_schedulable_data data = { 8604 .tg = tg, 8605 .period = period, 8606 .quota = quota, 8607 }; 8608 8609 if (quota != RUNTIME_INF) { 8610 do_div(data.period, NSEC_PER_USEC); 8611 do_div(data.quota, NSEC_PER_USEC); 8612 } 8613 8614 rcu_read_lock(); 8615 ret = walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data); 8616 rcu_read_unlock(); 8617 8618 return ret; 8619 } 8620 8621 static int cpu_stats_show(struct seq_file *sf, void *v) 8622 { 8623 struct task_group *tg = css_tg(seq_css(sf)); 8624 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 8625 8626 seq_printf(sf, "nr_periods %d\n", cfs_b->nr_periods); 8627 seq_printf(sf, "nr_throttled %d\n", cfs_b->nr_throttled); 8628 seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time); 8629 8630 return 0; 8631 } 8632 #endif /* CONFIG_CFS_BANDWIDTH */ 8633 #endif /* CONFIG_FAIR_GROUP_SCHED */ 8634 8635 #ifdef CONFIG_RT_GROUP_SCHED 8636 static int cpu_rt_runtime_write(struct cgroup_subsys_state *css, 8637 struct cftype *cft, s64 val) 8638 { 8639 return sched_group_set_rt_runtime(css_tg(css), val); 8640 } 8641 8642 static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css, 8643 struct cftype *cft) 8644 { 8645 return sched_group_rt_runtime(css_tg(css)); 8646 } 8647 8648 static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css, 8649 struct cftype *cftype, u64 rt_period_us) 8650 { 8651 return sched_group_set_rt_period(css_tg(css), rt_period_us); 8652 } 8653 8654 static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css, 8655 struct cftype *cft) 8656 { 8657 return sched_group_rt_period(css_tg(css)); 8658 } 8659 #endif /* CONFIG_RT_GROUP_SCHED */ 8660 8661 static struct cftype cpu_files[] = { 8662 #ifdef CONFIG_FAIR_GROUP_SCHED 8663 { 8664 .name = "shares", 8665 .read_u64 = cpu_shares_read_u64, 8666 .write_u64 = cpu_shares_write_u64, 8667 }, 8668 #endif 8669 #ifdef CONFIG_CFS_BANDWIDTH 8670 { 8671 .name = "cfs_quota_us", 8672 .read_s64 = cpu_cfs_quota_read_s64, 8673 .write_s64 = cpu_cfs_quota_write_s64, 8674 }, 8675 { 8676 .name = "cfs_period_us", 8677 .read_u64 = cpu_cfs_period_read_u64, 8678 .write_u64 = cpu_cfs_period_write_u64, 8679 }, 8680 { 8681 .name = "stat", 8682 .seq_show = cpu_stats_show, 8683 }, 8684 #endif 8685 #ifdef CONFIG_RT_GROUP_SCHED 8686 { 8687 .name = "rt_runtime_us", 8688 .read_s64 = cpu_rt_runtime_read, 8689 .write_s64 = cpu_rt_runtime_write, 8690 }, 8691 { 8692 .name = "rt_period_us", 8693 .read_u64 = cpu_rt_period_read_uint, 8694 .write_u64 = cpu_rt_period_write_uint, 8695 }, 8696 #endif 8697 { } /* terminate */ 8698 }; 8699 8700 struct cgroup_subsys cpu_cgrp_subsys = { 8701 .css_alloc = cpu_cgroup_css_alloc, 8702 .css_free = cpu_cgroup_css_free, 8703 .css_online = cpu_cgroup_css_online, 8704 .css_offline = cpu_cgroup_css_offline, 8705 .fork = cpu_cgroup_fork, 8706 .can_attach = cpu_cgroup_can_attach, 8707 .attach = cpu_cgroup_attach, 8708 .legacy_cftypes = cpu_files, 8709 .early_init = 1, 8710 }; 8711 8712 #endif /* CONFIG_CGROUP_SCHED */ 8713 8714 void dump_cpu_task(int cpu) 8715 { 8716 pr_info("Task dump for CPU %d:\n", cpu); 8717 sched_show_task(cpu_curr(cpu)); 8718 } 8719 8720 /* 8721 * Nice levels are multiplicative, with a gentle 10% change for every 8722 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to 8723 * nice 1, it will get ~10% less CPU time than another CPU-bound task 8724 * that remained on nice 0. 8725 * 8726 * The "10% effect" is relative and cumulative: from _any_ nice level, 8727 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level 8728 * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25. 8729 * If a task goes up by ~10% and another task goes down by ~10% then 8730 * the relative distance between them is ~25%.) 8731 */ 8732 const int sched_prio_to_weight[40] = { 8733 /* -20 */ 88761, 71755, 56483, 46273, 36291, 8734 /* -15 */ 29154, 23254, 18705, 14949, 11916, 8735 /* -10 */ 9548, 7620, 6100, 4904, 3906, 8736 /* -5 */ 3121, 2501, 1991, 1586, 1277, 8737 /* 0 */ 1024, 820, 655, 526, 423, 8738 /* 5 */ 335, 272, 215, 172, 137, 8739 /* 10 */ 110, 87, 70, 56, 45, 8740 /* 15 */ 36, 29, 23, 18, 15, 8741 }; 8742 8743 /* 8744 * Inverse (2^32/x) values of the sched_prio_to_weight[] array, precalculated. 8745 * 8746 * In cases where the weight does not change often, we can use the 8747 * precalculated inverse to speed up arithmetics by turning divisions 8748 * into multiplications: 8749 */ 8750 const u32 sched_prio_to_wmult[40] = { 8751 /* -20 */ 48388, 59856, 76040, 92818, 118348, 8752 /* -15 */ 147320, 184698, 229616, 287308, 360437, 8753 /* -10 */ 449829, 563644, 704093, 875809, 1099582, 8754 /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326, 8755 /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587, 8756 /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126, 8757 /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717, 8758 /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153, 8759 }; 8760