1 /* 2 * kernel/sched/core.c 3 * 4 * Kernel scheduler and related syscalls 5 * 6 * Copyright (C) 1991-2002 Linus Torvalds 7 * 8 * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and 9 * make semaphores SMP safe 10 * 1998-11-19 Implemented schedule_timeout() and related stuff 11 * by Andrea Arcangeli 12 * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar: 13 * hybrid priority-list and round-robin design with 14 * an array-switch method of distributing timeslices 15 * and per-CPU runqueues. Cleanups and useful suggestions 16 * by Davide Libenzi, preemptible kernel bits by Robert Love. 17 * 2003-09-03 Interactivity tuning by Con Kolivas. 18 * 2004-04-02 Scheduler domains code by Nick Piggin 19 * 2007-04-15 Work begun on replacing all interactivity tuning with a 20 * fair scheduling design by Con Kolivas. 21 * 2007-05-05 Load balancing (smp-nice) and other improvements 22 * by Peter Williams 23 * 2007-05-06 Interactivity improvements to CFS by Mike Galbraith 24 * 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri 25 * 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins, 26 * Thomas Gleixner, Mike Kravetz 27 */ 28 29 #include <linux/mm.h> 30 #include <linux/module.h> 31 #include <linux/nmi.h> 32 #include <linux/init.h> 33 #include <linux/uaccess.h> 34 #include <linux/highmem.h> 35 #include <asm/mmu_context.h> 36 #include <linux/interrupt.h> 37 #include <linux/capability.h> 38 #include <linux/completion.h> 39 #include <linux/kernel_stat.h> 40 #include <linux/debug_locks.h> 41 #include <linux/perf_event.h> 42 #include <linux/security.h> 43 #include <linux/notifier.h> 44 #include <linux/profile.h> 45 #include <linux/freezer.h> 46 #include <linux/vmalloc.h> 47 #include <linux/blkdev.h> 48 #include <linux/delay.h> 49 #include <linux/pid_namespace.h> 50 #include <linux/smp.h> 51 #include <linux/threads.h> 52 #include <linux/timer.h> 53 #include <linux/rcupdate.h> 54 #include <linux/cpu.h> 55 #include <linux/cpuset.h> 56 #include <linux/percpu.h> 57 #include <linux/proc_fs.h> 58 #include <linux/seq_file.h> 59 #include <linux/sysctl.h> 60 #include <linux/syscalls.h> 61 #include <linux/times.h> 62 #include <linux/tsacct_kern.h> 63 #include <linux/kprobes.h> 64 #include <linux/delayacct.h> 65 #include <linux/unistd.h> 66 #include <linux/pagemap.h> 67 #include <linux/hrtimer.h> 68 #include <linux/tick.h> 69 #include <linux/debugfs.h> 70 #include <linux/ctype.h> 71 #include <linux/ftrace.h> 72 #include <linux/slab.h> 73 #include <linux/init_task.h> 74 #include <linux/binfmts.h> 75 #include <linux/context_tracking.h> 76 #include <linux/compiler.h> 77 78 #include <asm/switch_to.h> 79 #include <asm/tlb.h> 80 #include <asm/irq_regs.h> 81 #include <asm/mutex.h> 82 #ifdef CONFIG_PARAVIRT 83 #include <asm/paravirt.h> 84 #endif 85 86 #include "sched.h" 87 #include "../workqueue_internal.h" 88 #include "../smpboot.h" 89 90 #define CREATE_TRACE_POINTS 91 #include <trace/events/sched.h> 92 93 DEFINE_MUTEX(sched_domains_mutex); 94 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); 95 96 static void update_rq_clock_task(struct rq *rq, s64 delta); 97 98 void update_rq_clock(struct rq *rq) 99 { 100 s64 delta; 101 102 lockdep_assert_held(&rq->lock); 103 104 if (rq->clock_skip_update & RQCF_ACT_SKIP) 105 return; 106 107 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock; 108 if (delta < 0) 109 return; 110 rq->clock += delta; 111 update_rq_clock_task(rq, delta); 112 } 113 114 /* 115 * Debugging: various feature bits 116 */ 117 118 #define SCHED_FEAT(name, enabled) \ 119 (1UL << __SCHED_FEAT_##name) * enabled | 120 121 const_debug unsigned int sysctl_sched_features = 122 #include "features.h" 123 0; 124 125 #undef SCHED_FEAT 126 127 #ifdef CONFIG_SCHED_DEBUG 128 #define SCHED_FEAT(name, enabled) \ 129 #name , 130 131 static const char * const sched_feat_names[] = { 132 #include "features.h" 133 }; 134 135 #undef SCHED_FEAT 136 137 static int sched_feat_show(struct seq_file *m, void *v) 138 { 139 int i; 140 141 for (i = 0; i < __SCHED_FEAT_NR; i++) { 142 if (!(sysctl_sched_features & (1UL << i))) 143 seq_puts(m, "NO_"); 144 seq_printf(m, "%s ", sched_feat_names[i]); 145 } 146 seq_puts(m, "\n"); 147 148 return 0; 149 } 150 151 #ifdef HAVE_JUMP_LABEL 152 153 #define jump_label_key__true STATIC_KEY_INIT_TRUE 154 #define jump_label_key__false STATIC_KEY_INIT_FALSE 155 156 #define SCHED_FEAT(name, enabled) \ 157 jump_label_key__##enabled , 158 159 struct static_key sched_feat_keys[__SCHED_FEAT_NR] = { 160 #include "features.h" 161 }; 162 163 #undef SCHED_FEAT 164 165 static void sched_feat_disable(int i) 166 { 167 static_key_disable(&sched_feat_keys[i]); 168 } 169 170 static void sched_feat_enable(int i) 171 { 172 static_key_enable(&sched_feat_keys[i]); 173 } 174 #else 175 static void sched_feat_disable(int i) { }; 176 static void sched_feat_enable(int i) { }; 177 #endif /* HAVE_JUMP_LABEL */ 178 179 static int sched_feat_set(char *cmp) 180 { 181 int i; 182 int neg = 0; 183 184 if (strncmp(cmp, "NO_", 3) == 0) { 185 neg = 1; 186 cmp += 3; 187 } 188 189 for (i = 0; i < __SCHED_FEAT_NR; i++) { 190 if (strcmp(cmp, sched_feat_names[i]) == 0) { 191 if (neg) { 192 sysctl_sched_features &= ~(1UL << i); 193 sched_feat_disable(i); 194 } else { 195 sysctl_sched_features |= (1UL << i); 196 sched_feat_enable(i); 197 } 198 break; 199 } 200 } 201 202 return i; 203 } 204 205 static ssize_t 206 sched_feat_write(struct file *filp, const char __user *ubuf, 207 size_t cnt, loff_t *ppos) 208 { 209 char buf[64]; 210 char *cmp; 211 int i; 212 struct inode *inode; 213 214 if (cnt > 63) 215 cnt = 63; 216 217 if (copy_from_user(&buf, ubuf, cnt)) 218 return -EFAULT; 219 220 buf[cnt] = 0; 221 cmp = strstrip(buf); 222 223 /* Ensure the static_key remains in a consistent state */ 224 inode = file_inode(filp); 225 mutex_lock(&inode->i_mutex); 226 i = sched_feat_set(cmp); 227 mutex_unlock(&inode->i_mutex); 228 if (i == __SCHED_FEAT_NR) 229 return -EINVAL; 230 231 *ppos += cnt; 232 233 return cnt; 234 } 235 236 static int sched_feat_open(struct inode *inode, struct file *filp) 237 { 238 return single_open(filp, sched_feat_show, NULL); 239 } 240 241 static const struct file_operations sched_feat_fops = { 242 .open = sched_feat_open, 243 .write = sched_feat_write, 244 .read = seq_read, 245 .llseek = seq_lseek, 246 .release = single_release, 247 }; 248 249 static __init int sched_init_debug(void) 250 { 251 debugfs_create_file("sched_features", 0644, NULL, NULL, 252 &sched_feat_fops); 253 254 return 0; 255 } 256 late_initcall(sched_init_debug); 257 #endif /* CONFIG_SCHED_DEBUG */ 258 259 /* 260 * Number of tasks to iterate in a single balance run. 261 * Limited because this is done with IRQs disabled. 262 */ 263 const_debug unsigned int sysctl_sched_nr_migrate = 32; 264 265 /* 266 * period over which we average the RT time consumption, measured 267 * in ms. 268 * 269 * default: 1s 270 */ 271 const_debug unsigned int sysctl_sched_time_avg = MSEC_PER_SEC; 272 273 /* 274 * period over which we measure -rt task cpu usage in us. 275 * default: 1s 276 */ 277 unsigned int sysctl_sched_rt_period = 1000000; 278 279 __read_mostly int scheduler_running; 280 281 /* 282 * part of the period that we allow rt tasks to run in us. 283 * default: 0.95s 284 */ 285 int sysctl_sched_rt_runtime = 950000; 286 287 /* cpus with isolated domains */ 288 cpumask_var_t cpu_isolated_map; 289 290 /* 291 * this_rq_lock - lock this runqueue and disable interrupts. 292 */ 293 static struct rq *this_rq_lock(void) 294 __acquires(rq->lock) 295 { 296 struct rq *rq; 297 298 local_irq_disable(); 299 rq = this_rq(); 300 raw_spin_lock(&rq->lock); 301 302 return rq; 303 } 304 305 #ifdef CONFIG_SCHED_HRTICK 306 /* 307 * Use HR-timers to deliver accurate preemption points. 308 */ 309 310 static void hrtick_clear(struct rq *rq) 311 { 312 if (hrtimer_active(&rq->hrtick_timer)) 313 hrtimer_cancel(&rq->hrtick_timer); 314 } 315 316 /* 317 * High-resolution timer tick. 318 * Runs from hardirq context with interrupts disabled. 319 */ 320 static enum hrtimer_restart hrtick(struct hrtimer *timer) 321 { 322 struct rq *rq = container_of(timer, struct rq, hrtick_timer); 323 324 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); 325 326 raw_spin_lock(&rq->lock); 327 update_rq_clock(rq); 328 rq->curr->sched_class->task_tick(rq, rq->curr, 1); 329 raw_spin_unlock(&rq->lock); 330 331 return HRTIMER_NORESTART; 332 } 333 334 #ifdef CONFIG_SMP 335 336 static void __hrtick_restart(struct rq *rq) 337 { 338 struct hrtimer *timer = &rq->hrtick_timer; 339 340 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED); 341 } 342 343 /* 344 * called from hardirq (IPI) context 345 */ 346 static void __hrtick_start(void *arg) 347 { 348 struct rq *rq = arg; 349 350 raw_spin_lock(&rq->lock); 351 __hrtick_restart(rq); 352 rq->hrtick_csd_pending = 0; 353 raw_spin_unlock(&rq->lock); 354 } 355 356 /* 357 * Called to set the hrtick timer state. 358 * 359 * called with rq->lock held and irqs disabled 360 */ 361 void hrtick_start(struct rq *rq, u64 delay) 362 { 363 struct hrtimer *timer = &rq->hrtick_timer; 364 ktime_t time; 365 s64 delta; 366 367 /* 368 * Don't schedule slices shorter than 10000ns, that just 369 * doesn't make sense and can cause timer DoS. 370 */ 371 delta = max_t(s64, delay, 10000LL); 372 time = ktime_add_ns(timer->base->get_time(), delta); 373 374 hrtimer_set_expires(timer, time); 375 376 if (rq == this_rq()) { 377 __hrtick_restart(rq); 378 } else if (!rq->hrtick_csd_pending) { 379 smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd); 380 rq->hrtick_csd_pending = 1; 381 } 382 } 383 384 static int 385 hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu) 386 { 387 int cpu = (int)(long)hcpu; 388 389 switch (action) { 390 case CPU_UP_CANCELED: 391 case CPU_UP_CANCELED_FROZEN: 392 case CPU_DOWN_PREPARE: 393 case CPU_DOWN_PREPARE_FROZEN: 394 case CPU_DEAD: 395 case CPU_DEAD_FROZEN: 396 hrtick_clear(cpu_rq(cpu)); 397 return NOTIFY_OK; 398 } 399 400 return NOTIFY_DONE; 401 } 402 403 static __init void init_hrtick(void) 404 { 405 hotcpu_notifier(hotplug_hrtick, 0); 406 } 407 #else 408 /* 409 * Called to set the hrtick timer state. 410 * 411 * called with rq->lock held and irqs disabled 412 */ 413 void hrtick_start(struct rq *rq, u64 delay) 414 { 415 /* 416 * Don't schedule slices shorter than 10000ns, that just 417 * doesn't make sense. Rely on vruntime for fairness. 418 */ 419 delay = max_t(u64, delay, 10000LL); 420 hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay), 421 HRTIMER_MODE_REL_PINNED); 422 } 423 424 static inline void init_hrtick(void) 425 { 426 } 427 #endif /* CONFIG_SMP */ 428 429 static void init_rq_hrtick(struct rq *rq) 430 { 431 #ifdef CONFIG_SMP 432 rq->hrtick_csd_pending = 0; 433 434 rq->hrtick_csd.flags = 0; 435 rq->hrtick_csd.func = __hrtick_start; 436 rq->hrtick_csd.info = rq; 437 #endif 438 439 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 440 rq->hrtick_timer.function = hrtick; 441 } 442 #else /* CONFIG_SCHED_HRTICK */ 443 static inline void hrtick_clear(struct rq *rq) 444 { 445 } 446 447 static inline void init_rq_hrtick(struct rq *rq) 448 { 449 } 450 451 static inline void init_hrtick(void) 452 { 453 } 454 #endif /* CONFIG_SCHED_HRTICK */ 455 456 /* 457 * cmpxchg based fetch_or, macro so it works for different integer types 458 */ 459 #define fetch_or(ptr, val) \ 460 ({ typeof(*(ptr)) __old, __val = *(ptr); \ 461 for (;;) { \ 462 __old = cmpxchg((ptr), __val, __val | (val)); \ 463 if (__old == __val) \ 464 break; \ 465 __val = __old; \ 466 } \ 467 __old; \ 468 }) 469 470 #if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG) 471 /* 472 * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG, 473 * this avoids any races wrt polling state changes and thereby avoids 474 * spurious IPIs. 475 */ 476 static bool set_nr_and_not_polling(struct task_struct *p) 477 { 478 struct thread_info *ti = task_thread_info(p); 479 return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG); 480 } 481 482 /* 483 * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set. 484 * 485 * If this returns true, then the idle task promises to call 486 * sched_ttwu_pending() and reschedule soon. 487 */ 488 static bool set_nr_if_polling(struct task_struct *p) 489 { 490 struct thread_info *ti = task_thread_info(p); 491 typeof(ti->flags) old, val = READ_ONCE(ti->flags); 492 493 for (;;) { 494 if (!(val & _TIF_POLLING_NRFLAG)) 495 return false; 496 if (val & _TIF_NEED_RESCHED) 497 return true; 498 old = cmpxchg(&ti->flags, val, val | _TIF_NEED_RESCHED); 499 if (old == val) 500 break; 501 val = old; 502 } 503 return true; 504 } 505 506 #else 507 static bool set_nr_and_not_polling(struct task_struct *p) 508 { 509 set_tsk_need_resched(p); 510 return true; 511 } 512 513 #ifdef CONFIG_SMP 514 static bool set_nr_if_polling(struct task_struct *p) 515 { 516 return false; 517 } 518 #endif 519 #endif 520 521 void wake_q_add(struct wake_q_head *head, struct task_struct *task) 522 { 523 struct wake_q_node *node = &task->wake_q; 524 525 /* 526 * Atomically grab the task, if ->wake_q is !nil already it means 527 * its already queued (either by us or someone else) and will get the 528 * wakeup due to that. 529 * 530 * This cmpxchg() implies a full barrier, which pairs with the write 531 * barrier implied by the wakeup in wake_up_list(). 532 */ 533 if (cmpxchg(&node->next, NULL, WAKE_Q_TAIL)) 534 return; 535 536 get_task_struct(task); 537 538 /* 539 * The head is context local, there can be no concurrency. 540 */ 541 *head->lastp = node; 542 head->lastp = &node->next; 543 } 544 545 void wake_up_q(struct wake_q_head *head) 546 { 547 struct wake_q_node *node = head->first; 548 549 while (node != WAKE_Q_TAIL) { 550 struct task_struct *task; 551 552 task = container_of(node, struct task_struct, wake_q); 553 BUG_ON(!task); 554 /* task can safely be re-inserted now */ 555 node = node->next; 556 task->wake_q.next = NULL; 557 558 /* 559 * wake_up_process() implies a wmb() to pair with the queueing 560 * in wake_q_add() so as not to miss wakeups. 561 */ 562 wake_up_process(task); 563 put_task_struct(task); 564 } 565 } 566 567 /* 568 * resched_curr - mark rq's current task 'to be rescheduled now'. 569 * 570 * On UP this means the setting of the need_resched flag, on SMP it 571 * might also involve a cross-CPU call to trigger the scheduler on 572 * the target CPU. 573 */ 574 void resched_curr(struct rq *rq) 575 { 576 struct task_struct *curr = rq->curr; 577 int cpu; 578 579 lockdep_assert_held(&rq->lock); 580 581 if (test_tsk_need_resched(curr)) 582 return; 583 584 cpu = cpu_of(rq); 585 586 if (cpu == smp_processor_id()) { 587 set_tsk_need_resched(curr); 588 set_preempt_need_resched(); 589 return; 590 } 591 592 if (set_nr_and_not_polling(curr)) 593 smp_send_reschedule(cpu); 594 else 595 trace_sched_wake_idle_without_ipi(cpu); 596 } 597 598 void resched_cpu(int cpu) 599 { 600 struct rq *rq = cpu_rq(cpu); 601 unsigned long flags; 602 603 if (!raw_spin_trylock_irqsave(&rq->lock, flags)) 604 return; 605 resched_curr(rq); 606 raw_spin_unlock_irqrestore(&rq->lock, flags); 607 } 608 609 #ifdef CONFIG_SMP 610 #ifdef CONFIG_NO_HZ_COMMON 611 /* 612 * In the semi idle case, use the nearest busy cpu for migrating timers 613 * from an idle cpu. This is good for power-savings. 614 * 615 * We don't do similar optimization for completely idle system, as 616 * selecting an idle cpu will add more delays to the timers than intended 617 * (as that cpu's timer base may not be uptodate wrt jiffies etc). 618 */ 619 int get_nohz_timer_target(void) 620 { 621 int i, cpu = smp_processor_id(); 622 struct sched_domain *sd; 623 624 if (!idle_cpu(cpu) && is_housekeeping_cpu(cpu)) 625 return cpu; 626 627 rcu_read_lock(); 628 for_each_domain(cpu, sd) { 629 for_each_cpu(i, sched_domain_span(sd)) { 630 if (!idle_cpu(i) && is_housekeeping_cpu(cpu)) { 631 cpu = i; 632 goto unlock; 633 } 634 } 635 } 636 637 if (!is_housekeeping_cpu(cpu)) 638 cpu = housekeeping_any_cpu(); 639 unlock: 640 rcu_read_unlock(); 641 return cpu; 642 } 643 /* 644 * When add_timer_on() enqueues a timer into the timer wheel of an 645 * idle CPU then this timer might expire before the next timer event 646 * which is scheduled to wake up that CPU. In case of a completely 647 * idle system the next event might even be infinite time into the 648 * future. wake_up_idle_cpu() ensures that the CPU is woken up and 649 * leaves the inner idle loop so the newly added timer is taken into 650 * account when the CPU goes back to idle and evaluates the timer 651 * wheel for the next timer event. 652 */ 653 static void wake_up_idle_cpu(int cpu) 654 { 655 struct rq *rq = cpu_rq(cpu); 656 657 if (cpu == smp_processor_id()) 658 return; 659 660 if (set_nr_and_not_polling(rq->idle)) 661 smp_send_reschedule(cpu); 662 else 663 trace_sched_wake_idle_without_ipi(cpu); 664 } 665 666 static bool wake_up_full_nohz_cpu(int cpu) 667 { 668 /* 669 * We just need the target to call irq_exit() and re-evaluate 670 * the next tick. The nohz full kick at least implies that. 671 * If needed we can still optimize that later with an 672 * empty IRQ. 673 */ 674 if (tick_nohz_full_cpu(cpu)) { 675 if (cpu != smp_processor_id() || 676 tick_nohz_tick_stopped()) 677 tick_nohz_full_kick_cpu(cpu); 678 return true; 679 } 680 681 return false; 682 } 683 684 void wake_up_nohz_cpu(int cpu) 685 { 686 if (!wake_up_full_nohz_cpu(cpu)) 687 wake_up_idle_cpu(cpu); 688 } 689 690 static inline bool got_nohz_idle_kick(void) 691 { 692 int cpu = smp_processor_id(); 693 694 if (!test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu))) 695 return false; 696 697 if (idle_cpu(cpu) && !need_resched()) 698 return true; 699 700 /* 701 * We can't run Idle Load Balance on this CPU for this time so we 702 * cancel it and clear NOHZ_BALANCE_KICK 703 */ 704 clear_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu)); 705 return false; 706 } 707 708 #else /* CONFIG_NO_HZ_COMMON */ 709 710 static inline bool got_nohz_idle_kick(void) 711 { 712 return false; 713 } 714 715 #endif /* CONFIG_NO_HZ_COMMON */ 716 717 #ifdef CONFIG_NO_HZ_FULL 718 bool sched_can_stop_tick(void) 719 { 720 /* 721 * FIFO realtime policy runs the highest priority task. Other runnable 722 * tasks are of a lower priority. The scheduler tick does nothing. 723 */ 724 if (current->policy == SCHED_FIFO) 725 return true; 726 727 /* 728 * Round-robin realtime tasks time slice with other tasks at the same 729 * realtime priority. Is this task the only one at this priority? 730 */ 731 if (current->policy == SCHED_RR) { 732 struct sched_rt_entity *rt_se = ¤t->rt; 733 734 return rt_se->run_list.prev == rt_se->run_list.next; 735 } 736 737 /* 738 * More than one running task need preemption. 739 * nr_running update is assumed to be visible 740 * after IPI is sent from wakers. 741 */ 742 if (this_rq()->nr_running > 1) 743 return false; 744 745 return true; 746 } 747 #endif /* CONFIG_NO_HZ_FULL */ 748 749 void sched_avg_update(struct rq *rq) 750 { 751 s64 period = sched_avg_period(); 752 753 while ((s64)(rq_clock(rq) - rq->age_stamp) > period) { 754 /* 755 * Inline assembly required to prevent the compiler 756 * optimising this loop into a divmod call. 757 * See __iter_div_u64_rem() for another example of this. 758 */ 759 asm("" : "+rm" (rq->age_stamp)); 760 rq->age_stamp += period; 761 rq->rt_avg /= 2; 762 } 763 } 764 765 #endif /* CONFIG_SMP */ 766 767 #if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \ 768 (defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH))) 769 /* 770 * Iterate task_group tree rooted at *from, calling @down when first entering a 771 * node and @up when leaving it for the final time. 772 * 773 * Caller must hold rcu_lock or sufficient equivalent. 774 */ 775 int walk_tg_tree_from(struct task_group *from, 776 tg_visitor down, tg_visitor up, void *data) 777 { 778 struct task_group *parent, *child; 779 int ret; 780 781 parent = from; 782 783 down: 784 ret = (*down)(parent, data); 785 if (ret) 786 goto out; 787 list_for_each_entry_rcu(child, &parent->children, siblings) { 788 parent = child; 789 goto down; 790 791 up: 792 continue; 793 } 794 ret = (*up)(parent, data); 795 if (ret || parent == from) 796 goto out; 797 798 child = parent; 799 parent = parent->parent; 800 if (parent) 801 goto up; 802 out: 803 return ret; 804 } 805 806 int tg_nop(struct task_group *tg, void *data) 807 { 808 return 0; 809 } 810 #endif 811 812 static void set_load_weight(struct task_struct *p) 813 { 814 int prio = p->static_prio - MAX_RT_PRIO; 815 struct load_weight *load = &p->se.load; 816 817 /* 818 * SCHED_IDLE tasks get minimal weight: 819 */ 820 if (idle_policy(p->policy)) { 821 load->weight = scale_load(WEIGHT_IDLEPRIO); 822 load->inv_weight = WMULT_IDLEPRIO; 823 return; 824 } 825 826 load->weight = scale_load(prio_to_weight[prio]); 827 load->inv_weight = prio_to_wmult[prio]; 828 } 829 830 static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags) 831 { 832 update_rq_clock(rq); 833 if (!(flags & ENQUEUE_RESTORE)) 834 sched_info_queued(rq, p); 835 p->sched_class->enqueue_task(rq, p, flags); 836 } 837 838 static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags) 839 { 840 update_rq_clock(rq); 841 if (!(flags & DEQUEUE_SAVE)) 842 sched_info_dequeued(rq, p); 843 p->sched_class->dequeue_task(rq, p, flags); 844 } 845 846 void activate_task(struct rq *rq, struct task_struct *p, int flags) 847 { 848 if (task_contributes_to_load(p)) 849 rq->nr_uninterruptible--; 850 851 enqueue_task(rq, p, flags); 852 } 853 854 void deactivate_task(struct rq *rq, struct task_struct *p, int flags) 855 { 856 if (task_contributes_to_load(p)) 857 rq->nr_uninterruptible++; 858 859 dequeue_task(rq, p, flags); 860 } 861 862 static void update_rq_clock_task(struct rq *rq, s64 delta) 863 { 864 /* 865 * In theory, the compile should just see 0 here, and optimize out the call 866 * to sched_rt_avg_update. But I don't trust it... 867 */ 868 #if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING) 869 s64 steal = 0, irq_delta = 0; 870 #endif 871 #ifdef CONFIG_IRQ_TIME_ACCOUNTING 872 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time; 873 874 /* 875 * Since irq_time is only updated on {soft,}irq_exit, we might run into 876 * this case when a previous update_rq_clock() happened inside a 877 * {soft,}irq region. 878 * 879 * When this happens, we stop ->clock_task and only update the 880 * prev_irq_time stamp to account for the part that fit, so that a next 881 * update will consume the rest. This ensures ->clock_task is 882 * monotonic. 883 * 884 * It does however cause some slight miss-attribution of {soft,}irq 885 * time, a more accurate solution would be to update the irq_time using 886 * the current rq->clock timestamp, except that would require using 887 * atomic ops. 888 */ 889 if (irq_delta > delta) 890 irq_delta = delta; 891 892 rq->prev_irq_time += irq_delta; 893 delta -= irq_delta; 894 #endif 895 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING 896 if (static_key_false((¶virt_steal_rq_enabled))) { 897 steal = paravirt_steal_clock(cpu_of(rq)); 898 steal -= rq->prev_steal_time_rq; 899 900 if (unlikely(steal > delta)) 901 steal = delta; 902 903 rq->prev_steal_time_rq += steal; 904 delta -= steal; 905 } 906 #endif 907 908 rq->clock_task += delta; 909 910 #if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING) 911 if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY)) 912 sched_rt_avg_update(rq, irq_delta + steal); 913 #endif 914 } 915 916 void sched_set_stop_task(int cpu, struct task_struct *stop) 917 { 918 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; 919 struct task_struct *old_stop = cpu_rq(cpu)->stop; 920 921 if (stop) { 922 /* 923 * Make it appear like a SCHED_FIFO task, its something 924 * userspace knows about and won't get confused about. 925 * 926 * Also, it will make PI more or less work without too 927 * much confusion -- but then, stop work should not 928 * rely on PI working anyway. 929 */ 930 sched_setscheduler_nocheck(stop, SCHED_FIFO, ¶m); 931 932 stop->sched_class = &stop_sched_class; 933 } 934 935 cpu_rq(cpu)->stop = stop; 936 937 if (old_stop) { 938 /* 939 * Reset it back to a normal scheduling class so that 940 * it can die in pieces. 941 */ 942 old_stop->sched_class = &rt_sched_class; 943 } 944 } 945 946 /* 947 * __normal_prio - return the priority that is based on the static prio 948 */ 949 static inline int __normal_prio(struct task_struct *p) 950 { 951 return p->static_prio; 952 } 953 954 /* 955 * Calculate the expected normal priority: i.e. priority 956 * without taking RT-inheritance into account. Might be 957 * boosted by interactivity modifiers. Changes upon fork, 958 * setprio syscalls, and whenever the interactivity 959 * estimator recalculates. 960 */ 961 static inline int normal_prio(struct task_struct *p) 962 { 963 int prio; 964 965 if (task_has_dl_policy(p)) 966 prio = MAX_DL_PRIO-1; 967 else if (task_has_rt_policy(p)) 968 prio = MAX_RT_PRIO-1 - p->rt_priority; 969 else 970 prio = __normal_prio(p); 971 return prio; 972 } 973 974 /* 975 * Calculate the current priority, i.e. the priority 976 * taken into account by the scheduler. This value might 977 * be boosted by RT tasks, or might be boosted by 978 * interactivity modifiers. Will be RT if the task got 979 * RT-boosted. If not then it returns p->normal_prio. 980 */ 981 static int effective_prio(struct task_struct *p) 982 { 983 p->normal_prio = normal_prio(p); 984 /* 985 * If we are RT tasks or we were boosted to RT priority, 986 * keep the priority unchanged. Otherwise, update priority 987 * to the normal priority: 988 */ 989 if (!rt_prio(p->prio)) 990 return p->normal_prio; 991 return p->prio; 992 } 993 994 /** 995 * task_curr - is this task currently executing on a CPU? 996 * @p: the task in question. 997 * 998 * Return: 1 if the task is currently executing. 0 otherwise. 999 */ 1000 inline int task_curr(const struct task_struct *p) 1001 { 1002 return cpu_curr(task_cpu(p)) == p; 1003 } 1004 1005 /* 1006 * switched_from, switched_to and prio_changed must _NOT_ drop rq->lock, 1007 * use the balance_callback list if you want balancing. 1008 * 1009 * this means any call to check_class_changed() must be followed by a call to 1010 * balance_callback(). 1011 */ 1012 static inline void check_class_changed(struct rq *rq, struct task_struct *p, 1013 const struct sched_class *prev_class, 1014 int oldprio) 1015 { 1016 if (prev_class != p->sched_class) { 1017 if (prev_class->switched_from) 1018 prev_class->switched_from(rq, p); 1019 1020 p->sched_class->switched_to(rq, p); 1021 } else if (oldprio != p->prio || dl_task(p)) 1022 p->sched_class->prio_changed(rq, p, oldprio); 1023 } 1024 1025 void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) 1026 { 1027 const struct sched_class *class; 1028 1029 if (p->sched_class == rq->curr->sched_class) { 1030 rq->curr->sched_class->check_preempt_curr(rq, p, flags); 1031 } else { 1032 for_each_class(class) { 1033 if (class == rq->curr->sched_class) 1034 break; 1035 if (class == p->sched_class) { 1036 resched_curr(rq); 1037 break; 1038 } 1039 } 1040 } 1041 1042 /* 1043 * A queue event has occurred, and we're going to schedule. In 1044 * this case, we can save a useless back to back clock update. 1045 */ 1046 if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr)) 1047 rq_clock_skip_update(rq, true); 1048 } 1049 1050 #ifdef CONFIG_SMP 1051 /* 1052 * This is how migration works: 1053 * 1054 * 1) we invoke migration_cpu_stop() on the target CPU using 1055 * stop_one_cpu(). 1056 * 2) stopper starts to run (implicitly forcing the migrated thread 1057 * off the CPU) 1058 * 3) it checks whether the migrated task is still in the wrong runqueue. 1059 * 4) if it's in the wrong runqueue then the migration thread removes 1060 * it and puts it into the right queue. 1061 * 5) stopper completes and stop_one_cpu() returns and the migration 1062 * is done. 1063 */ 1064 1065 /* 1066 * move_queued_task - move a queued task to new rq. 1067 * 1068 * Returns (locked) new rq. Old rq's lock is released. 1069 */ 1070 static struct rq *move_queued_task(struct rq *rq, struct task_struct *p, int new_cpu) 1071 { 1072 lockdep_assert_held(&rq->lock); 1073 1074 dequeue_task(rq, p, 0); 1075 p->on_rq = TASK_ON_RQ_MIGRATING; 1076 set_task_cpu(p, new_cpu); 1077 raw_spin_unlock(&rq->lock); 1078 1079 rq = cpu_rq(new_cpu); 1080 1081 raw_spin_lock(&rq->lock); 1082 BUG_ON(task_cpu(p) != new_cpu); 1083 p->on_rq = TASK_ON_RQ_QUEUED; 1084 enqueue_task(rq, p, 0); 1085 check_preempt_curr(rq, p, 0); 1086 1087 return rq; 1088 } 1089 1090 struct migration_arg { 1091 struct task_struct *task; 1092 int dest_cpu; 1093 }; 1094 1095 /* 1096 * Move (not current) task off this cpu, onto dest cpu. We're doing 1097 * this because either it can't run here any more (set_cpus_allowed() 1098 * away from this CPU, or CPU going down), or because we're 1099 * attempting to rebalance this task on exec (sched_exec). 1100 * 1101 * So we race with normal scheduler movements, but that's OK, as long 1102 * as the task is no longer on this CPU. 1103 */ 1104 static struct rq *__migrate_task(struct rq *rq, struct task_struct *p, int dest_cpu) 1105 { 1106 if (unlikely(!cpu_active(dest_cpu))) 1107 return rq; 1108 1109 /* Affinity changed (again). */ 1110 if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p))) 1111 return rq; 1112 1113 rq = move_queued_task(rq, p, dest_cpu); 1114 1115 return rq; 1116 } 1117 1118 /* 1119 * migration_cpu_stop - this will be executed by a highprio stopper thread 1120 * and performs thread migration by bumping thread off CPU then 1121 * 'pushing' onto another runqueue. 1122 */ 1123 static int migration_cpu_stop(void *data) 1124 { 1125 struct migration_arg *arg = data; 1126 struct task_struct *p = arg->task; 1127 struct rq *rq = this_rq(); 1128 1129 /* 1130 * The original target cpu might have gone down and we might 1131 * be on another cpu but it doesn't matter. 1132 */ 1133 local_irq_disable(); 1134 /* 1135 * We need to explicitly wake pending tasks before running 1136 * __migrate_task() such that we will not miss enforcing cpus_allowed 1137 * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test. 1138 */ 1139 sched_ttwu_pending(); 1140 1141 raw_spin_lock(&p->pi_lock); 1142 raw_spin_lock(&rq->lock); 1143 /* 1144 * If task_rq(p) != rq, it cannot be migrated here, because we're 1145 * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because 1146 * we're holding p->pi_lock. 1147 */ 1148 if (task_rq(p) == rq && task_on_rq_queued(p)) 1149 rq = __migrate_task(rq, p, arg->dest_cpu); 1150 raw_spin_unlock(&rq->lock); 1151 raw_spin_unlock(&p->pi_lock); 1152 1153 local_irq_enable(); 1154 return 0; 1155 } 1156 1157 /* 1158 * sched_class::set_cpus_allowed must do the below, but is not required to 1159 * actually call this function. 1160 */ 1161 void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask) 1162 { 1163 cpumask_copy(&p->cpus_allowed, new_mask); 1164 p->nr_cpus_allowed = cpumask_weight(new_mask); 1165 } 1166 1167 void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) 1168 { 1169 struct rq *rq = task_rq(p); 1170 bool queued, running; 1171 1172 lockdep_assert_held(&p->pi_lock); 1173 1174 queued = task_on_rq_queued(p); 1175 running = task_current(rq, p); 1176 1177 if (queued) { 1178 /* 1179 * Because __kthread_bind() calls this on blocked tasks without 1180 * holding rq->lock. 1181 */ 1182 lockdep_assert_held(&rq->lock); 1183 dequeue_task(rq, p, DEQUEUE_SAVE); 1184 } 1185 if (running) 1186 put_prev_task(rq, p); 1187 1188 p->sched_class->set_cpus_allowed(p, new_mask); 1189 1190 if (running) 1191 p->sched_class->set_curr_task(rq); 1192 if (queued) 1193 enqueue_task(rq, p, ENQUEUE_RESTORE); 1194 } 1195 1196 /* 1197 * Change a given task's CPU affinity. Migrate the thread to a 1198 * proper CPU and schedule it away if the CPU it's executing on 1199 * is removed from the allowed bitmask. 1200 * 1201 * NOTE: the caller must have a valid reference to the task, the 1202 * task must not exit() & deallocate itself prematurely. The 1203 * call is not atomic; no spinlocks may be held. 1204 */ 1205 static int __set_cpus_allowed_ptr(struct task_struct *p, 1206 const struct cpumask *new_mask, bool check) 1207 { 1208 unsigned long flags; 1209 struct rq *rq; 1210 unsigned int dest_cpu; 1211 int ret = 0; 1212 1213 rq = task_rq_lock(p, &flags); 1214 1215 /* 1216 * Must re-check here, to close a race against __kthread_bind(), 1217 * sched_setaffinity() is not guaranteed to observe the flag. 1218 */ 1219 if (check && (p->flags & PF_NO_SETAFFINITY)) { 1220 ret = -EINVAL; 1221 goto out; 1222 } 1223 1224 if (cpumask_equal(&p->cpus_allowed, new_mask)) 1225 goto out; 1226 1227 if (!cpumask_intersects(new_mask, cpu_active_mask)) { 1228 ret = -EINVAL; 1229 goto out; 1230 } 1231 1232 do_set_cpus_allowed(p, new_mask); 1233 1234 /* Can the task run on the task's current CPU? If so, we're done */ 1235 if (cpumask_test_cpu(task_cpu(p), new_mask)) 1236 goto out; 1237 1238 dest_cpu = cpumask_any_and(cpu_active_mask, new_mask); 1239 if (task_running(rq, p) || p->state == TASK_WAKING) { 1240 struct migration_arg arg = { p, dest_cpu }; 1241 /* Need help from migration thread: drop lock and wait. */ 1242 task_rq_unlock(rq, p, &flags); 1243 stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg); 1244 tlb_migrate_finish(p->mm); 1245 return 0; 1246 } else if (task_on_rq_queued(p)) { 1247 /* 1248 * OK, since we're going to drop the lock immediately 1249 * afterwards anyway. 1250 */ 1251 lockdep_unpin_lock(&rq->lock); 1252 rq = move_queued_task(rq, p, dest_cpu); 1253 lockdep_pin_lock(&rq->lock); 1254 } 1255 out: 1256 task_rq_unlock(rq, p, &flags); 1257 1258 return ret; 1259 } 1260 1261 int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) 1262 { 1263 return __set_cpus_allowed_ptr(p, new_mask, false); 1264 } 1265 EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr); 1266 1267 void set_task_cpu(struct task_struct *p, unsigned int new_cpu) 1268 { 1269 #ifdef CONFIG_SCHED_DEBUG 1270 /* 1271 * We should never call set_task_cpu() on a blocked task, 1272 * ttwu() will sort out the placement. 1273 */ 1274 WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING && 1275 !p->on_rq); 1276 1277 #ifdef CONFIG_LOCKDEP 1278 /* 1279 * The caller should hold either p->pi_lock or rq->lock, when changing 1280 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks. 1281 * 1282 * sched_move_task() holds both and thus holding either pins the cgroup, 1283 * see task_group(). 1284 * 1285 * Furthermore, all task_rq users should acquire both locks, see 1286 * task_rq_lock(). 1287 */ 1288 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) || 1289 lockdep_is_held(&task_rq(p)->lock))); 1290 #endif 1291 #endif 1292 1293 trace_sched_migrate_task(p, new_cpu); 1294 1295 if (task_cpu(p) != new_cpu) { 1296 if (p->sched_class->migrate_task_rq) 1297 p->sched_class->migrate_task_rq(p); 1298 p->se.nr_migrations++; 1299 perf_event_task_migrate(p); 1300 } 1301 1302 __set_task_cpu(p, new_cpu); 1303 } 1304 1305 static void __migrate_swap_task(struct task_struct *p, int cpu) 1306 { 1307 if (task_on_rq_queued(p)) { 1308 struct rq *src_rq, *dst_rq; 1309 1310 src_rq = task_rq(p); 1311 dst_rq = cpu_rq(cpu); 1312 1313 deactivate_task(src_rq, p, 0); 1314 set_task_cpu(p, cpu); 1315 activate_task(dst_rq, p, 0); 1316 check_preempt_curr(dst_rq, p, 0); 1317 } else { 1318 /* 1319 * Task isn't running anymore; make it appear like we migrated 1320 * it before it went to sleep. This means on wakeup we make the 1321 * previous cpu our targer instead of where it really is. 1322 */ 1323 p->wake_cpu = cpu; 1324 } 1325 } 1326 1327 struct migration_swap_arg { 1328 struct task_struct *src_task, *dst_task; 1329 int src_cpu, dst_cpu; 1330 }; 1331 1332 static int migrate_swap_stop(void *data) 1333 { 1334 struct migration_swap_arg *arg = data; 1335 struct rq *src_rq, *dst_rq; 1336 int ret = -EAGAIN; 1337 1338 if (!cpu_active(arg->src_cpu) || !cpu_active(arg->dst_cpu)) 1339 return -EAGAIN; 1340 1341 src_rq = cpu_rq(arg->src_cpu); 1342 dst_rq = cpu_rq(arg->dst_cpu); 1343 1344 double_raw_lock(&arg->src_task->pi_lock, 1345 &arg->dst_task->pi_lock); 1346 double_rq_lock(src_rq, dst_rq); 1347 1348 if (task_cpu(arg->dst_task) != arg->dst_cpu) 1349 goto unlock; 1350 1351 if (task_cpu(arg->src_task) != arg->src_cpu) 1352 goto unlock; 1353 1354 if (!cpumask_test_cpu(arg->dst_cpu, tsk_cpus_allowed(arg->src_task))) 1355 goto unlock; 1356 1357 if (!cpumask_test_cpu(arg->src_cpu, tsk_cpus_allowed(arg->dst_task))) 1358 goto unlock; 1359 1360 __migrate_swap_task(arg->src_task, arg->dst_cpu); 1361 __migrate_swap_task(arg->dst_task, arg->src_cpu); 1362 1363 ret = 0; 1364 1365 unlock: 1366 double_rq_unlock(src_rq, dst_rq); 1367 raw_spin_unlock(&arg->dst_task->pi_lock); 1368 raw_spin_unlock(&arg->src_task->pi_lock); 1369 1370 return ret; 1371 } 1372 1373 /* 1374 * Cross migrate two tasks 1375 */ 1376 int migrate_swap(struct task_struct *cur, struct task_struct *p) 1377 { 1378 struct migration_swap_arg arg; 1379 int ret = -EINVAL; 1380 1381 arg = (struct migration_swap_arg){ 1382 .src_task = cur, 1383 .src_cpu = task_cpu(cur), 1384 .dst_task = p, 1385 .dst_cpu = task_cpu(p), 1386 }; 1387 1388 if (arg.src_cpu == arg.dst_cpu) 1389 goto out; 1390 1391 /* 1392 * These three tests are all lockless; this is OK since all of them 1393 * will be re-checked with proper locks held further down the line. 1394 */ 1395 if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu)) 1396 goto out; 1397 1398 if (!cpumask_test_cpu(arg.dst_cpu, tsk_cpus_allowed(arg.src_task))) 1399 goto out; 1400 1401 if (!cpumask_test_cpu(arg.src_cpu, tsk_cpus_allowed(arg.dst_task))) 1402 goto out; 1403 1404 trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu); 1405 ret = stop_two_cpus(arg.dst_cpu, arg.src_cpu, migrate_swap_stop, &arg); 1406 1407 out: 1408 return ret; 1409 } 1410 1411 /* 1412 * wait_task_inactive - wait for a thread to unschedule. 1413 * 1414 * If @match_state is nonzero, it's the @p->state value just checked and 1415 * not expected to change. If it changes, i.e. @p might have woken up, 1416 * then return zero. When we succeed in waiting for @p to be off its CPU, 1417 * we return a positive number (its total switch count). If a second call 1418 * a short while later returns the same number, the caller can be sure that 1419 * @p has remained unscheduled the whole time. 1420 * 1421 * The caller must ensure that the task *will* unschedule sometime soon, 1422 * else this function might spin for a *long* time. This function can't 1423 * be called with interrupts off, or it may introduce deadlock with 1424 * smp_call_function() if an IPI is sent by the same process we are 1425 * waiting to become inactive. 1426 */ 1427 unsigned long wait_task_inactive(struct task_struct *p, long match_state) 1428 { 1429 unsigned long flags; 1430 int running, queued; 1431 unsigned long ncsw; 1432 struct rq *rq; 1433 1434 for (;;) { 1435 /* 1436 * We do the initial early heuristics without holding 1437 * any task-queue locks at all. We'll only try to get 1438 * the runqueue lock when things look like they will 1439 * work out! 1440 */ 1441 rq = task_rq(p); 1442 1443 /* 1444 * If the task is actively running on another CPU 1445 * still, just relax and busy-wait without holding 1446 * any locks. 1447 * 1448 * NOTE! Since we don't hold any locks, it's not 1449 * even sure that "rq" stays as the right runqueue! 1450 * But we don't care, since "task_running()" will 1451 * return false if the runqueue has changed and p 1452 * is actually now running somewhere else! 1453 */ 1454 while (task_running(rq, p)) { 1455 if (match_state && unlikely(p->state != match_state)) 1456 return 0; 1457 cpu_relax(); 1458 } 1459 1460 /* 1461 * Ok, time to look more closely! We need the rq 1462 * lock now, to be *sure*. If we're wrong, we'll 1463 * just go back and repeat. 1464 */ 1465 rq = task_rq_lock(p, &flags); 1466 trace_sched_wait_task(p); 1467 running = task_running(rq, p); 1468 queued = task_on_rq_queued(p); 1469 ncsw = 0; 1470 if (!match_state || p->state == match_state) 1471 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ 1472 task_rq_unlock(rq, p, &flags); 1473 1474 /* 1475 * If it changed from the expected state, bail out now. 1476 */ 1477 if (unlikely(!ncsw)) 1478 break; 1479 1480 /* 1481 * Was it really running after all now that we 1482 * checked with the proper locks actually held? 1483 * 1484 * Oops. Go back and try again.. 1485 */ 1486 if (unlikely(running)) { 1487 cpu_relax(); 1488 continue; 1489 } 1490 1491 /* 1492 * It's not enough that it's not actively running, 1493 * it must be off the runqueue _entirely_, and not 1494 * preempted! 1495 * 1496 * So if it was still runnable (but just not actively 1497 * running right now), it's preempted, and we should 1498 * yield - it could be a while. 1499 */ 1500 if (unlikely(queued)) { 1501 ktime_t to = ktime_set(0, NSEC_PER_SEC/HZ); 1502 1503 set_current_state(TASK_UNINTERRUPTIBLE); 1504 schedule_hrtimeout(&to, HRTIMER_MODE_REL); 1505 continue; 1506 } 1507 1508 /* 1509 * Ahh, all good. It wasn't running, and it wasn't 1510 * runnable, which means that it will never become 1511 * running in the future either. We're all done! 1512 */ 1513 break; 1514 } 1515 1516 return ncsw; 1517 } 1518 1519 /*** 1520 * kick_process - kick a running thread to enter/exit the kernel 1521 * @p: the to-be-kicked thread 1522 * 1523 * Cause a process which is running on another CPU to enter 1524 * kernel-mode, without any delay. (to get signals handled.) 1525 * 1526 * NOTE: this function doesn't have to take the runqueue lock, 1527 * because all it wants to ensure is that the remote task enters 1528 * the kernel. If the IPI races and the task has been migrated 1529 * to another CPU then no harm is done and the purpose has been 1530 * achieved as well. 1531 */ 1532 void kick_process(struct task_struct *p) 1533 { 1534 int cpu; 1535 1536 preempt_disable(); 1537 cpu = task_cpu(p); 1538 if ((cpu != smp_processor_id()) && task_curr(p)) 1539 smp_send_reschedule(cpu); 1540 preempt_enable(); 1541 } 1542 EXPORT_SYMBOL_GPL(kick_process); 1543 1544 /* 1545 * ->cpus_allowed is protected by both rq->lock and p->pi_lock 1546 */ 1547 static int select_fallback_rq(int cpu, struct task_struct *p) 1548 { 1549 int nid = cpu_to_node(cpu); 1550 const struct cpumask *nodemask = NULL; 1551 enum { cpuset, possible, fail } state = cpuset; 1552 int dest_cpu; 1553 1554 /* 1555 * If the node that the cpu is on has been offlined, cpu_to_node() 1556 * will return -1. There is no cpu on the node, and we should 1557 * select the cpu on the other node. 1558 */ 1559 if (nid != -1) { 1560 nodemask = cpumask_of_node(nid); 1561 1562 /* Look for allowed, online CPU in same node. */ 1563 for_each_cpu(dest_cpu, nodemask) { 1564 if (!cpu_online(dest_cpu)) 1565 continue; 1566 if (!cpu_active(dest_cpu)) 1567 continue; 1568 if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p))) 1569 return dest_cpu; 1570 } 1571 } 1572 1573 for (;;) { 1574 /* Any allowed, online CPU? */ 1575 for_each_cpu(dest_cpu, tsk_cpus_allowed(p)) { 1576 if (!cpu_online(dest_cpu)) 1577 continue; 1578 if (!cpu_active(dest_cpu)) 1579 continue; 1580 goto out; 1581 } 1582 1583 /* No more Mr. Nice Guy. */ 1584 switch (state) { 1585 case cpuset: 1586 if (IS_ENABLED(CONFIG_CPUSETS)) { 1587 cpuset_cpus_allowed_fallback(p); 1588 state = possible; 1589 break; 1590 } 1591 /* fall-through */ 1592 case possible: 1593 do_set_cpus_allowed(p, cpu_possible_mask); 1594 state = fail; 1595 break; 1596 1597 case fail: 1598 BUG(); 1599 break; 1600 } 1601 } 1602 1603 out: 1604 if (state != cpuset) { 1605 /* 1606 * Don't tell them about moving exiting tasks or 1607 * kernel threads (both mm NULL), since they never 1608 * leave kernel. 1609 */ 1610 if (p->mm && printk_ratelimit()) { 1611 printk_deferred("process %d (%s) no longer affine to cpu%d\n", 1612 task_pid_nr(p), p->comm, cpu); 1613 } 1614 } 1615 1616 return dest_cpu; 1617 } 1618 1619 /* 1620 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable. 1621 */ 1622 static inline 1623 int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags) 1624 { 1625 lockdep_assert_held(&p->pi_lock); 1626 1627 if (p->nr_cpus_allowed > 1) 1628 cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags); 1629 1630 /* 1631 * In order not to call set_task_cpu() on a blocking task we need 1632 * to rely on ttwu() to place the task on a valid ->cpus_allowed 1633 * cpu. 1634 * 1635 * Since this is common to all placement strategies, this lives here. 1636 * 1637 * [ this allows ->select_task() to simply return task_cpu(p) and 1638 * not worry about this generic constraint ] 1639 */ 1640 if (unlikely(!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) || 1641 !cpu_online(cpu))) 1642 cpu = select_fallback_rq(task_cpu(p), p); 1643 1644 return cpu; 1645 } 1646 1647 static void update_avg(u64 *avg, u64 sample) 1648 { 1649 s64 diff = sample - *avg; 1650 *avg += diff >> 3; 1651 } 1652 1653 #else 1654 1655 static inline int __set_cpus_allowed_ptr(struct task_struct *p, 1656 const struct cpumask *new_mask, bool check) 1657 { 1658 return set_cpus_allowed_ptr(p, new_mask); 1659 } 1660 1661 #endif /* CONFIG_SMP */ 1662 1663 static void 1664 ttwu_stat(struct task_struct *p, int cpu, int wake_flags) 1665 { 1666 #ifdef CONFIG_SCHEDSTATS 1667 struct rq *rq = this_rq(); 1668 1669 #ifdef CONFIG_SMP 1670 int this_cpu = smp_processor_id(); 1671 1672 if (cpu == this_cpu) { 1673 schedstat_inc(rq, ttwu_local); 1674 schedstat_inc(p, se.statistics.nr_wakeups_local); 1675 } else { 1676 struct sched_domain *sd; 1677 1678 schedstat_inc(p, se.statistics.nr_wakeups_remote); 1679 rcu_read_lock(); 1680 for_each_domain(this_cpu, sd) { 1681 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) { 1682 schedstat_inc(sd, ttwu_wake_remote); 1683 break; 1684 } 1685 } 1686 rcu_read_unlock(); 1687 } 1688 1689 if (wake_flags & WF_MIGRATED) 1690 schedstat_inc(p, se.statistics.nr_wakeups_migrate); 1691 1692 #endif /* CONFIG_SMP */ 1693 1694 schedstat_inc(rq, ttwu_count); 1695 schedstat_inc(p, se.statistics.nr_wakeups); 1696 1697 if (wake_flags & WF_SYNC) 1698 schedstat_inc(p, se.statistics.nr_wakeups_sync); 1699 1700 #endif /* CONFIG_SCHEDSTATS */ 1701 } 1702 1703 static inline void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags) 1704 { 1705 activate_task(rq, p, en_flags); 1706 p->on_rq = TASK_ON_RQ_QUEUED; 1707 1708 /* if a worker is waking up, notify workqueue */ 1709 if (p->flags & PF_WQ_WORKER) 1710 wq_worker_waking_up(p, cpu_of(rq)); 1711 } 1712 1713 /* 1714 * Mark the task runnable and perform wakeup-preemption. 1715 */ 1716 static void 1717 ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags) 1718 { 1719 check_preempt_curr(rq, p, wake_flags); 1720 p->state = TASK_RUNNING; 1721 trace_sched_wakeup(p); 1722 1723 #ifdef CONFIG_SMP 1724 if (p->sched_class->task_woken) { 1725 /* 1726 * Our task @p is fully woken up and running; so its safe to 1727 * drop the rq->lock, hereafter rq is only used for statistics. 1728 */ 1729 lockdep_unpin_lock(&rq->lock); 1730 p->sched_class->task_woken(rq, p); 1731 lockdep_pin_lock(&rq->lock); 1732 } 1733 1734 if (rq->idle_stamp) { 1735 u64 delta = rq_clock(rq) - rq->idle_stamp; 1736 u64 max = 2*rq->max_idle_balance_cost; 1737 1738 update_avg(&rq->avg_idle, delta); 1739 1740 if (rq->avg_idle > max) 1741 rq->avg_idle = max; 1742 1743 rq->idle_stamp = 0; 1744 } 1745 #endif 1746 } 1747 1748 static void 1749 ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags) 1750 { 1751 lockdep_assert_held(&rq->lock); 1752 1753 #ifdef CONFIG_SMP 1754 if (p->sched_contributes_to_load) 1755 rq->nr_uninterruptible--; 1756 #endif 1757 1758 ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_WAKING); 1759 ttwu_do_wakeup(rq, p, wake_flags); 1760 } 1761 1762 /* 1763 * Called in case the task @p isn't fully descheduled from its runqueue, 1764 * in this case we must do a remote wakeup. Its a 'light' wakeup though, 1765 * since all we need to do is flip p->state to TASK_RUNNING, since 1766 * the task is still ->on_rq. 1767 */ 1768 static int ttwu_remote(struct task_struct *p, int wake_flags) 1769 { 1770 struct rq *rq; 1771 int ret = 0; 1772 1773 rq = __task_rq_lock(p); 1774 if (task_on_rq_queued(p)) { 1775 /* check_preempt_curr() may use rq clock */ 1776 update_rq_clock(rq); 1777 ttwu_do_wakeup(rq, p, wake_flags); 1778 ret = 1; 1779 } 1780 __task_rq_unlock(rq); 1781 1782 return ret; 1783 } 1784 1785 #ifdef CONFIG_SMP 1786 void sched_ttwu_pending(void) 1787 { 1788 struct rq *rq = this_rq(); 1789 struct llist_node *llist = llist_del_all(&rq->wake_list); 1790 struct task_struct *p; 1791 unsigned long flags; 1792 1793 if (!llist) 1794 return; 1795 1796 raw_spin_lock_irqsave(&rq->lock, flags); 1797 lockdep_pin_lock(&rq->lock); 1798 1799 while (llist) { 1800 p = llist_entry(llist, struct task_struct, wake_entry); 1801 llist = llist_next(llist); 1802 ttwu_do_activate(rq, p, 0); 1803 } 1804 1805 lockdep_unpin_lock(&rq->lock); 1806 raw_spin_unlock_irqrestore(&rq->lock, flags); 1807 } 1808 1809 void scheduler_ipi(void) 1810 { 1811 /* 1812 * Fold TIF_NEED_RESCHED into the preempt_count; anybody setting 1813 * TIF_NEED_RESCHED remotely (for the first time) will also send 1814 * this IPI. 1815 */ 1816 preempt_fold_need_resched(); 1817 1818 if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick()) 1819 return; 1820 1821 /* 1822 * Not all reschedule IPI handlers call irq_enter/irq_exit, since 1823 * traditionally all their work was done from the interrupt return 1824 * path. Now that we actually do some work, we need to make sure 1825 * we do call them. 1826 * 1827 * Some archs already do call them, luckily irq_enter/exit nest 1828 * properly. 1829 * 1830 * Arguably we should visit all archs and update all handlers, 1831 * however a fair share of IPIs are still resched only so this would 1832 * somewhat pessimize the simple resched case. 1833 */ 1834 irq_enter(); 1835 sched_ttwu_pending(); 1836 1837 /* 1838 * Check if someone kicked us for doing the nohz idle load balance. 1839 */ 1840 if (unlikely(got_nohz_idle_kick())) { 1841 this_rq()->idle_balance = 1; 1842 raise_softirq_irqoff(SCHED_SOFTIRQ); 1843 } 1844 irq_exit(); 1845 } 1846 1847 static void ttwu_queue_remote(struct task_struct *p, int cpu) 1848 { 1849 struct rq *rq = cpu_rq(cpu); 1850 1851 if (llist_add(&p->wake_entry, &cpu_rq(cpu)->wake_list)) { 1852 if (!set_nr_if_polling(rq->idle)) 1853 smp_send_reschedule(cpu); 1854 else 1855 trace_sched_wake_idle_without_ipi(cpu); 1856 } 1857 } 1858 1859 void wake_up_if_idle(int cpu) 1860 { 1861 struct rq *rq = cpu_rq(cpu); 1862 unsigned long flags; 1863 1864 rcu_read_lock(); 1865 1866 if (!is_idle_task(rcu_dereference(rq->curr))) 1867 goto out; 1868 1869 if (set_nr_if_polling(rq->idle)) { 1870 trace_sched_wake_idle_without_ipi(cpu); 1871 } else { 1872 raw_spin_lock_irqsave(&rq->lock, flags); 1873 if (is_idle_task(rq->curr)) 1874 smp_send_reschedule(cpu); 1875 /* Else cpu is not in idle, do nothing here */ 1876 raw_spin_unlock_irqrestore(&rq->lock, flags); 1877 } 1878 1879 out: 1880 rcu_read_unlock(); 1881 } 1882 1883 bool cpus_share_cache(int this_cpu, int that_cpu) 1884 { 1885 return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu); 1886 } 1887 #endif /* CONFIG_SMP */ 1888 1889 static void ttwu_queue(struct task_struct *p, int cpu) 1890 { 1891 struct rq *rq = cpu_rq(cpu); 1892 1893 #if defined(CONFIG_SMP) 1894 if (sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) { 1895 sched_clock_cpu(cpu); /* sync clocks x-cpu */ 1896 ttwu_queue_remote(p, cpu); 1897 return; 1898 } 1899 #endif 1900 1901 raw_spin_lock(&rq->lock); 1902 lockdep_pin_lock(&rq->lock); 1903 ttwu_do_activate(rq, p, 0); 1904 lockdep_unpin_lock(&rq->lock); 1905 raw_spin_unlock(&rq->lock); 1906 } 1907 1908 /** 1909 * try_to_wake_up - wake up a thread 1910 * @p: the thread to be awakened 1911 * @state: the mask of task states that can be woken 1912 * @wake_flags: wake modifier flags (WF_*) 1913 * 1914 * Put it on the run-queue if it's not already there. The "current" 1915 * thread is always on the run-queue (except when the actual 1916 * re-schedule is in progress), and as such you're allowed to do 1917 * the simpler "current->state = TASK_RUNNING" to mark yourself 1918 * runnable without the overhead of this. 1919 * 1920 * Return: %true if @p was woken up, %false if it was already running. 1921 * or @state didn't match @p's state. 1922 */ 1923 static int 1924 try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) 1925 { 1926 unsigned long flags; 1927 int cpu, success = 0; 1928 1929 /* 1930 * If we are going to wake up a thread waiting for CONDITION we 1931 * need to ensure that CONDITION=1 done by the caller can not be 1932 * reordered with p->state check below. This pairs with mb() in 1933 * set_current_state() the waiting thread does. 1934 */ 1935 smp_mb__before_spinlock(); 1936 raw_spin_lock_irqsave(&p->pi_lock, flags); 1937 if (!(p->state & state)) 1938 goto out; 1939 1940 trace_sched_waking(p); 1941 1942 success = 1; /* we're going to change ->state */ 1943 cpu = task_cpu(p); 1944 1945 if (p->on_rq && ttwu_remote(p, wake_flags)) 1946 goto stat; 1947 1948 #ifdef CONFIG_SMP 1949 /* 1950 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be 1951 * possible to, falsely, observe p->on_cpu == 0. 1952 * 1953 * One must be running (->on_cpu == 1) in order to remove oneself 1954 * from the runqueue. 1955 * 1956 * [S] ->on_cpu = 1; [L] ->on_rq 1957 * UNLOCK rq->lock 1958 * RMB 1959 * LOCK rq->lock 1960 * [S] ->on_rq = 0; [L] ->on_cpu 1961 * 1962 * Pairs with the full barrier implied in the UNLOCK+LOCK on rq->lock 1963 * from the consecutive calls to schedule(); the first switching to our 1964 * task, the second putting it to sleep. 1965 */ 1966 smp_rmb(); 1967 1968 /* 1969 * If the owning (remote) cpu is still in the middle of schedule() with 1970 * this task as prev, wait until its done referencing the task. 1971 */ 1972 while (p->on_cpu) 1973 cpu_relax(); 1974 /* 1975 * Combined with the control dependency above, we have an effective 1976 * smp_load_acquire() without the need for full barriers. 1977 * 1978 * Pairs with the smp_store_release() in finish_lock_switch(). 1979 * 1980 * This ensures that tasks getting woken will be fully ordered against 1981 * their previous state and preserve Program Order. 1982 */ 1983 smp_rmb(); 1984 1985 p->sched_contributes_to_load = !!task_contributes_to_load(p); 1986 p->state = TASK_WAKING; 1987 1988 if (p->sched_class->task_waking) 1989 p->sched_class->task_waking(p); 1990 1991 cpu = select_task_rq(p, p->wake_cpu, SD_BALANCE_WAKE, wake_flags); 1992 if (task_cpu(p) != cpu) { 1993 wake_flags |= WF_MIGRATED; 1994 set_task_cpu(p, cpu); 1995 } 1996 #endif /* CONFIG_SMP */ 1997 1998 ttwu_queue(p, cpu); 1999 stat: 2000 ttwu_stat(p, cpu, wake_flags); 2001 out: 2002 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 2003 2004 return success; 2005 } 2006 2007 /** 2008 * try_to_wake_up_local - try to wake up a local task with rq lock held 2009 * @p: the thread to be awakened 2010 * 2011 * Put @p on the run-queue if it's not already there. The caller must 2012 * ensure that this_rq() is locked, @p is bound to this_rq() and not 2013 * the current task. 2014 */ 2015 static void try_to_wake_up_local(struct task_struct *p) 2016 { 2017 struct rq *rq = task_rq(p); 2018 2019 if (WARN_ON_ONCE(rq != this_rq()) || 2020 WARN_ON_ONCE(p == current)) 2021 return; 2022 2023 lockdep_assert_held(&rq->lock); 2024 2025 if (!raw_spin_trylock(&p->pi_lock)) { 2026 /* 2027 * This is OK, because current is on_cpu, which avoids it being 2028 * picked for load-balance and preemption/IRQs are still 2029 * disabled avoiding further scheduler activity on it and we've 2030 * not yet picked a replacement task. 2031 */ 2032 lockdep_unpin_lock(&rq->lock); 2033 raw_spin_unlock(&rq->lock); 2034 raw_spin_lock(&p->pi_lock); 2035 raw_spin_lock(&rq->lock); 2036 lockdep_pin_lock(&rq->lock); 2037 } 2038 2039 if (!(p->state & TASK_NORMAL)) 2040 goto out; 2041 2042 trace_sched_waking(p); 2043 2044 if (!task_on_rq_queued(p)) 2045 ttwu_activate(rq, p, ENQUEUE_WAKEUP); 2046 2047 ttwu_do_wakeup(rq, p, 0); 2048 ttwu_stat(p, smp_processor_id(), 0); 2049 out: 2050 raw_spin_unlock(&p->pi_lock); 2051 } 2052 2053 /** 2054 * wake_up_process - Wake up a specific process 2055 * @p: The process to be woken up. 2056 * 2057 * Attempt to wake up the nominated process and move it to the set of runnable 2058 * processes. 2059 * 2060 * Return: 1 if the process was woken up, 0 if it was already running. 2061 * 2062 * It may be assumed that this function implies a write memory barrier before 2063 * changing the task state if and only if any tasks are woken up. 2064 */ 2065 int wake_up_process(struct task_struct *p) 2066 { 2067 return try_to_wake_up(p, TASK_NORMAL, 0); 2068 } 2069 EXPORT_SYMBOL(wake_up_process); 2070 2071 int wake_up_state(struct task_struct *p, unsigned int state) 2072 { 2073 return try_to_wake_up(p, state, 0); 2074 } 2075 2076 /* 2077 * This function clears the sched_dl_entity static params. 2078 */ 2079 void __dl_clear_params(struct task_struct *p) 2080 { 2081 struct sched_dl_entity *dl_se = &p->dl; 2082 2083 dl_se->dl_runtime = 0; 2084 dl_se->dl_deadline = 0; 2085 dl_se->dl_period = 0; 2086 dl_se->flags = 0; 2087 dl_se->dl_bw = 0; 2088 2089 dl_se->dl_throttled = 0; 2090 dl_se->dl_new = 1; 2091 dl_se->dl_yielded = 0; 2092 } 2093 2094 /* 2095 * Perform scheduler related setup for a newly forked process p. 2096 * p is forked by current. 2097 * 2098 * __sched_fork() is basic setup used by init_idle() too: 2099 */ 2100 static void __sched_fork(unsigned long clone_flags, struct task_struct *p) 2101 { 2102 p->on_rq = 0; 2103 2104 p->se.on_rq = 0; 2105 p->se.exec_start = 0; 2106 p->se.sum_exec_runtime = 0; 2107 p->se.prev_sum_exec_runtime = 0; 2108 p->se.nr_migrations = 0; 2109 p->se.vruntime = 0; 2110 INIT_LIST_HEAD(&p->se.group_node); 2111 2112 #ifdef CONFIG_SCHEDSTATS 2113 memset(&p->se.statistics, 0, sizeof(p->se.statistics)); 2114 #endif 2115 2116 RB_CLEAR_NODE(&p->dl.rb_node); 2117 init_dl_task_timer(&p->dl); 2118 __dl_clear_params(p); 2119 2120 INIT_LIST_HEAD(&p->rt.run_list); 2121 2122 #ifdef CONFIG_PREEMPT_NOTIFIERS 2123 INIT_HLIST_HEAD(&p->preempt_notifiers); 2124 #endif 2125 2126 #ifdef CONFIG_NUMA_BALANCING 2127 if (p->mm && atomic_read(&p->mm->mm_users) == 1) { 2128 p->mm->numa_next_scan = jiffies + msecs_to_jiffies(sysctl_numa_balancing_scan_delay); 2129 p->mm->numa_scan_seq = 0; 2130 } 2131 2132 if (clone_flags & CLONE_VM) 2133 p->numa_preferred_nid = current->numa_preferred_nid; 2134 else 2135 p->numa_preferred_nid = -1; 2136 2137 p->node_stamp = 0ULL; 2138 p->numa_scan_seq = p->mm ? p->mm->numa_scan_seq : 0; 2139 p->numa_scan_period = sysctl_numa_balancing_scan_delay; 2140 p->numa_work.next = &p->numa_work; 2141 p->numa_faults = NULL; 2142 p->last_task_numa_placement = 0; 2143 p->last_sum_exec_runtime = 0; 2144 2145 p->numa_group = NULL; 2146 #endif /* CONFIG_NUMA_BALANCING */ 2147 } 2148 2149 DEFINE_STATIC_KEY_FALSE(sched_numa_balancing); 2150 2151 #ifdef CONFIG_NUMA_BALANCING 2152 2153 void set_numabalancing_state(bool enabled) 2154 { 2155 if (enabled) 2156 static_branch_enable(&sched_numa_balancing); 2157 else 2158 static_branch_disable(&sched_numa_balancing); 2159 } 2160 2161 #ifdef CONFIG_PROC_SYSCTL 2162 int sysctl_numa_balancing(struct ctl_table *table, int write, 2163 void __user *buffer, size_t *lenp, loff_t *ppos) 2164 { 2165 struct ctl_table t; 2166 int err; 2167 int state = static_branch_likely(&sched_numa_balancing); 2168 2169 if (write && !capable(CAP_SYS_ADMIN)) 2170 return -EPERM; 2171 2172 t = *table; 2173 t.data = &state; 2174 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); 2175 if (err < 0) 2176 return err; 2177 if (write) 2178 set_numabalancing_state(state); 2179 return err; 2180 } 2181 #endif 2182 #endif 2183 2184 /* 2185 * fork()/clone()-time setup: 2186 */ 2187 int sched_fork(unsigned long clone_flags, struct task_struct *p) 2188 { 2189 unsigned long flags; 2190 int cpu = get_cpu(); 2191 2192 __sched_fork(clone_flags, p); 2193 /* 2194 * We mark the process as running here. This guarantees that 2195 * nobody will actually run it, and a signal or other external 2196 * event cannot wake it up and insert it on the runqueue either. 2197 */ 2198 p->state = TASK_RUNNING; 2199 2200 /* 2201 * Make sure we do not leak PI boosting priority to the child. 2202 */ 2203 p->prio = current->normal_prio; 2204 2205 /* 2206 * Revert to default priority/policy on fork if requested. 2207 */ 2208 if (unlikely(p->sched_reset_on_fork)) { 2209 if (task_has_dl_policy(p) || task_has_rt_policy(p)) { 2210 p->policy = SCHED_NORMAL; 2211 p->static_prio = NICE_TO_PRIO(0); 2212 p->rt_priority = 0; 2213 } else if (PRIO_TO_NICE(p->static_prio) < 0) 2214 p->static_prio = NICE_TO_PRIO(0); 2215 2216 p->prio = p->normal_prio = __normal_prio(p); 2217 set_load_weight(p); 2218 2219 /* 2220 * We don't need the reset flag anymore after the fork. It has 2221 * fulfilled its duty: 2222 */ 2223 p->sched_reset_on_fork = 0; 2224 } 2225 2226 if (dl_prio(p->prio)) { 2227 put_cpu(); 2228 return -EAGAIN; 2229 } else if (rt_prio(p->prio)) { 2230 p->sched_class = &rt_sched_class; 2231 } else { 2232 p->sched_class = &fair_sched_class; 2233 } 2234 2235 if (p->sched_class->task_fork) 2236 p->sched_class->task_fork(p); 2237 2238 /* 2239 * The child is not yet in the pid-hash so no cgroup attach races, 2240 * and the cgroup is pinned to this child due to cgroup_fork() 2241 * is ran before sched_fork(). 2242 * 2243 * Silence PROVE_RCU. 2244 */ 2245 raw_spin_lock_irqsave(&p->pi_lock, flags); 2246 set_task_cpu(p, cpu); 2247 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 2248 2249 #ifdef CONFIG_SCHED_INFO 2250 if (likely(sched_info_on())) 2251 memset(&p->sched_info, 0, sizeof(p->sched_info)); 2252 #endif 2253 #if defined(CONFIG_SMP) 2254 p->on_cpu = 0; 2255 #endif 2256 init_task_preempt_count(p); 2257 #ifdef CONFIG_SMP 2258 plist_node_init(&p->pushable_tasks, MAX_PRIO); 2259 RB_CLEAR_NODE(&p->pushable_dl_tasks); 2260 #endif 2261 2262 put_cpu(); 2263 return 0; 2264 } 2265 2266 unsigned long to_ratio(u64 period, u64 runtime) 2267 { 2268 if (runtime == RUNTIME_INF) 2269 return 1ULL << 20; 2270 2271 /* 2272 * Doing this here saves a lot of checks in all 2273 * the calling paths, and returning zero seems 2274 * safe for them anyway. 2275 */ 2276 if (period == 0) 2277 return 0; 2278 2279 return div64_u64(runtime << 20, period); 2280 } 2281 2282 #ifdef CONFIG_SMP 2283 inline struct dl_bw *dl_bw_of(int i) 2284 { 2285 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(), 2286 "sched RCU must be held"); 2287 return &cpu_rq(i)->rd->dl_bw; 2288 } 2289 2290 static inline int dl_bw_cpus(int i) 2291 { 2292 struct root_domain *rd = cpu_rq(i)->rd; 2293 int cpus = 0; 2294 2295 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(), 2296 "sched RCU must be held"); 2297 for_each_cpu_and(i, rd->span, cpu_active_mask) 2298 cpus++; 2299 2300 return cpus; 2301 } 2302 #else 2303 inline struct dl_bw *dl_bw_of(int i) 2304 { 2305 return &cpu_rq(i)->dl.dl_bw; 2306 } 2307 2308 static inline int dl_bw_cpus(int i) 2309 { 2310 return 1; 2311 } 2312 #endif 2313 2314 /* 2315 * We must be sure that accepting a new task (or allowing changing the 2316 * parameters of an existing one) is consistent with the bandwidth 2317 * constraints. If yes, this function also accordingly updates the currently 2318 * allocated bandwidth to reflect the new situation. 2319 * 2320 * This function is called while holding p's rq->lock. 2321 * 2322 * XXX we should delay bw change until the task's 0-lag point, see 2323 * __setparam_dl(). 2324 */ 2325 static int dl_overflow(struct task_struct *p, int policy, 2326 const struct sched_attr *attr) 2327 { 2328 2329 struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); 2330 u64 period = attr->sched_period ?: attr->sched_deadline; 2331 u64 runtime = attr->sched_runtime; 2332 u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0; 2333 int cpus, err = -1; 2334 2335 if (new_bw == p->dl.dl_bw) 2336 return 0; 2337 2338 /* 2339 * Either if a task, enters, leave, or stays -deadline but changes 2340 * its parameters, we may need to update accordingly the total 2341 * allocated bandwidth of the container. 2342 */ 2343 raw_spin_lock(&dl_b->lock); 2344 cpus = dl_bw_cpus(task_cpu(p)); 2345 if (dl_policy(policy) && !task_has_dl_policy(p) && 2346 !__dl_overflow(dl_b, cpus, 0, new_bw)) { 2347 __dl_add(dl_b, new_bw); 2348 err = 0; 2349 } else if (dl_policy(policy) && task_has_dl_policy(p) && 2350 !__dl_overflow(dl_b, cpus, p->dl.dl_bw, new_bw)) { 2351 __dl_clear(dl_b, p->dl.dl_bw); 2352 __dl_add(dl_b, new_bw); 2353 err = 0; 2354 } else if (!dl_policy(policy) && task_has_dl_policy(p)) { 2355 __dl_clear(dl_b, p->dl.dl_bw); 2356 err = 0; 2357 } 2358 raw_spin_unlock(&dl_b->lock); 2359 2360 return err; 2361 } 2362 2363 extern void init_dl_bw(struct dl_bw *dl_b); 2364 2365 /* 2366 * wake_up_new_task - wake up a newly created task for the first time. 2367 * 2368 * This function will do some initial scheduler statistics housekeeping 2369 * that must be done for every newly created context, then puts the task 2370 * on the runqueue and wakes it. 2371 */ 2372 void wake_up_new_task(struct task_struct *p) 2373 { 2374 unsigned long flags; 2375 struct rq *rq; 2376 2377 raw_spin_lock_irqsave(&p->pi_lock, flags); 2378 /* Initialize new task's runnable average */ 2379 init_entity_runnable_average(&p->se); 2380 #ifdef CONFIG_SMP 2381 /* 2382 * Fork balancing, do it here and not earlier because: 2383 * - cpus_allowed can change in the fork path 2384 * - any previously selected cpu might disappear through hotplug 2385 */ 2386 set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0)); 2387 #endif 2388 2389 rq = __task_rq_lock(p); 2390 activate_task(rq, p, 0); 2391 p->on_rq = TASK_ON_RQ_QUEUED; 2392 trace_sched_wakeup_new(p); 2393 check_preempt_curr(rq, p, WF_FORK); 2394 #ifdef CONFIG_SMP 2395 if (p->sched_class->task_woken) { 2396 /* 2397 * Nothing relies on rq->lock after this, so its fine to 2398 * drop it. 2399 */ 2400 lockdep_unpin_lock(&rq->lock); 2401 p->sched_class->task_woken(rq, p); 2402 lockdep_pin_lock(&rq->lock); 2403 } 2404 #endif 2405 task_rq_unlock(rq, p, &flags); 2406 } 2407 2408 #ifdef CONFIG_PREEMPT_NOTIFIERS 2409 2410 static struct static_key preempt_notifier_key = STATIC_KEY_INIT_FALSE; 2411 2412 void preempt_notifier_inc(void) 2413 { 2414 static_key_slow_inc(&preempt_notifier_key); 2415 } 2416 EXPORT_SYMBOL_GPL(preempt_notifier_inc); 2417 2418 void preempt_notifier_dec(void) 2419 { 2420 static_key_slow_dec(&preempt_notifier_key); 2421 } 2422 EXPORT_SYMBOL_GPL(preempt_notifier_dec); 2423 2424 /** 2425 * preempt_notifier_register - tell me when current is being preempted & rescheduled 2426 * @notifier: notifier struct to register 2427 */ 2428 void preempt_notifier_register(struct preempt_notifier *notifier) 2429 { 2430 if (!static_key_false(&preempt_notifier_key)) 2431 WARN(1, "registering preempt_notifier while notifiers disabled\n"); 2432 2433 hlist_add_head(¬ifier->link, ¤t->preempt_notifiers); 2434 } 2435 EXPORT_SYMBOL_GPL(preempt_notifier_register); 2436 2437 /** 2438 * preempt_notifier_unregister - no longer interested in preemption notifications 2439 * @notifier: notifier struct to unregister 2440 * 2441 * This is *not* safe to call from within a preemption notifier. 2442 */ 2443 void preempt_notifier_unregister(struct preempt_notifier *notifier) 2444 { 2445 hlist_del(¬ifier->link); 2446 } 2447 EXPORT_SYMBOL_GPL(preempt_notifier_unregister); 2448 2449 static void __fire_sched_in_preempt_notifiers(struct task_struct *curr) 2450 { 2451 struct preempt_notifier *notifier; 2452 2453 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) 2454 notifier->ops->sched_in(notifier, raw_smp_processor_id()); 2455 } 2456 2457 static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr) 2458 { 2459 if (static_key_false(&preempt_notifier_key)) 2460 __fire_sched_in_preempt_notifiers(curr); 2461 } 2462 2463 static void 2464 __fire_sched_out_preempt_notifiers(struct task_struct *curr, 2465 struct task_struct *next) 2466 { 2467 struct preempt_notifier *notifier; 2468 2469 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) 2470 notifier->ops->sched_out(notifier, next); 2471 } 2472 2473 static __always_inline void 2474 fire_sched_out_preempt_notifiers(struct task_struct *curr, 2475 struct task_struct *next) 2476 { 2477 if (static_key_false(&preempt_notifier_key)) 2478 __fire_sched_out_preempt_notifiers(curr, next); 2479 } 2480 2481 #else /* !CONFIG_PREEMPT_NOTIFIERS */ 2482 2483 static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr) 2484 { 2485 } 2486 2487 static inline void 2488 fire_sched_out_preempt_notifiers(struct task_struct *curr, 2489 struct task_struct *next) 2490 { 2491 } 2492 2493 #endif /* CONFIG_PREEMPT_NOTIFIERS */ 2494 2495 /** 2496 * prepare_task_switch - prepare to switch tasks 2497 * @rq: the runqueue preparing to switch 2498 * @prev: the current task that is being switched out 2499 * @next: the task we are going to switch to. 2500 * 2501 * This is called with the rq lock held and interrupts off. It must 2502 * be paired with a subsequent finish_task_switch after the context 2503 * switch. 2504 * 2505 * prepare_task_switch sets up locking and calls architecture specific 2506 * hooks. 2507 */ 2508 static inline void 2509 prepare_task_switch(struct rq *rq, struct task_struct *prev, 2510 struct task_struct *next) 2511 { 2512 sched_info_switch(rq, prev, next); 2513 perf_event_task_sched_out(prev, next); 2514 fire_sched_out_preempt_notifiers(prev, next); 2515 prepare_lock_switch(rq, next); 2516 prepare_arch_switch(next); 2517 } 2518 2519 /** 2520 * finish_task_switch - clean up after a task-switch 2521 * @prev: the thread we just switched away from. 2522 * 2523 * finish_task_switch must be called after the context switch, paired 2524 * with a prepare_task_switch call before the context switch. 2525 * finish_task_switch will reconcile locking set up by prepare_task_switch, 2526 * and do any other architecture-specific cleanup actions. 2527 * 2528 * Note that we may have delayed dropping an mm in context_switch(). If 2529 * so, we finish that here outside of the runqueue lock. (Doing it 2530 * with the lock held can cause deadlocks; see schedule() for 2531 * details.) 2532 * 2533 * The context switch have flipped the stack from under us and restored the 2534 * local variables which were saved when this task called schedule() in the 2535 * past. prev == current is still correct but we need to recalculate this_rq 2536 * because prev may have moved to another CPU. 2537 */ 2538 static struct rq *finish_task_switch(struct task_struct *prev) 2539 __releases(rq->lock) 2540 { 2541 struct rq *rq = this_rq(); 2542 struct mm_struct *mm = rq->prev_mm; 2543 long prev_state; 2544 2545 /* 2546 * The previous task will have left us with a preempt_count of 2 2547 * because it left us after: 2548 * 2549 * schedule() 2550 * preempt_disable(); // 1 2551 * __schedule() 2552 * raw_spin_lock_irq(&rq->lock) // 2 2553 * 2554 * Also, see FORK_PREEMPT_COUNT. 2555 */ 2556 if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET, 2557 "corrupted preempt_count: %s/%d/0x%x\n", 2558 current->comm, current->pid, preempt_count())) 2559 preempt_count_set(FORK_PREEMPT_COUNT); 2560 2561 rq->prev_mm = NULL; 2562 2563 /* 2564 * A task struct has one reference for the use as "current". 2565 * If a task dies, then it sets TASK_DEAD in tsk->state and calls 2566 * schedule one last time. The schedule call will never return, and 2567 * the scheduled task must drop that reference. 2568 * 2569 * We must observe prev->state before clearing prev->on_cpu (in 2570 * finish_lock_switch), otherwise a concurrent wakeup can get prev 2571 * running on another CPU and we could rave with its RUNNING -> DEAD 2572 * transition, resulting in a double drop. 2573 */ 2574 prev_state = prev->state; 2575 vtime_task_switch(prev); 2576 perf_event_task_sched_in(prev, current); 2577 finish_lock_switch(rq, prev); 2578 finish_arch_post_lock_switch(); 2579 2580 fire_sched_in_preempt_notifiers(current); 2581 if (mm) 2582 mmdrop(mm); 2583 if (unlikely(prev_state == TASK_DEAD)) { 2584 if (prev->sched_class->task_dead) 2585 prev->sched_class->task_dead(prev); 2586 2587 /* 2588 * Remove function-return probe instances associated with this 2589 * task and put them back on the free list. 2590 */ 2591 kprobe_flush_task(prev); 2592 put_task_struct(prev); 2593 } 2594 2595 tick_nohz_task_switch(); 2596 return rq; 2597 } 2598 2599 #ifdef CONFIG_SMP 2600 2601 /* rq->lock is NOT held, but preemption is disabled */ 2602 static void __balance_callback(struct rq *rq) 2603 { 2604 struct callback_head *head, *next; 2605 void (*func)(struct rq *rq); 2606 unsigned long flags; 2607 2608 raw_spin_lock_irqsave(&rq->lock, flags); 2609 head = rq->balance_callback; 2610 rq->balance_callback = NULL; 2611 while (head) { 2612 func = (void (*)(struct rq *))head->func; 2613 next = head->next; 2614 head->next = NULL; 2615 head = next; 2616 2617 func(rq); 2618 } 2619 raw_spin_unlock_irqrestore(&rq->lock, flags); 2620 } 2621 2622 static inline void balance_callback(struct rq *rq) 2623 { 2624 if (unlikely(rq->balance_callback)) 2625 __balance_callback(rq); 2626 } 2627 2628 #else 2629 2630 static inline void balance_callback(struct rq *rq) 2631 { 2632 } 2633 2634 #endif 2635 2636 /** 2637 * schedule_tail - first thing a freshly forked thread must call. 2638 * @prev: the thread we just switched away from. 2639 */ 2640 asmlinkage __visible void schedule_tail(struct task_struct *prev) 2641 __releases(rq->lock) 2642 { 2643 struct rq *rq; 2644 2645 /* 2646 * New tasks start with FORK_PREEMPT_COUNT, see there and 2647 * finish_task_switch() for details. 2648 * 2649 * finish_task_switch() will drop rq->lock() and lower preempt_count 2650 * and the preempt_enable() will end up enabling preemption (on 2651 * PREEMPT_COUNT kernels). 2652 */ 2653 2654 rq = finish_task_switch(prev); 2655 balance_callback(rq); 2656 preempt_enable(); 2657 2658 if (current->set_child_tid) 2659 put_user(task_pid_vnr(current), current->set_child_tid); 2660 } 2661 2662 /* 2663 * context_switch - switch to the new MM and the new thread's register state. 2664 */ 2665 static inline struct rq * 2666 context_switch(struct rq *rq, struct task_struct *prev, 2667 struct task_struct *next) 2668 { 2669 struct mm_struct *mm, *oldmm; 2670 2671 prepare_task_switch(rq, prev, next); 2672 2673 mm = next->mm; 2674 oldmm = prev->active_mm; 2675 /* 2676 * For paravirt, this is coupled with an exit in switch_to to 2677 * combine the page table reload and the switch backend into 2678 * one hypercall. 2679 */ 2680 arch_start_context_switch(prev); 2681 2682 if (!mm) { 2683 next->active_mm = oldmm; 2684 atomic_inc(&oldmm->mm_count); 2685 enter_lazy_tlb(oldmm, next); 2686 } else 2687 switch_mm(oldmm, mm, next); 2688 2689 if (!prev->mm) { 2690 prev->active_mm = NULL; 2691 rq->prev_mm = oldmm; 2692 } 2693 /* 2694 * Since the runqueue lock will be released by the next 2695 * task (which is an invalid locking op but in the case 2696 * of the scheduler it's an obvious special-case), so we 2697 * do an early lockdep release here: 2698 */ 2699 lockdep_unpin_lock(&rq->lock); 2700 spin_release(&rq->lock.dep_map, 1, _THIS_IP_); 2701 2702 /* Here we just switch the register state and the stack. */ 2703 switch_to(prev, next, prev); 2704 barrier(); 2705 2706 return finish_task_switch(prev); 2707 } 2708 2709 /* 2710 * nr_running and nr_context_switches: 2711 * 2712 * externally visible scheduler statistics: current number of runnable 2713 * threads, total number of context switches performed since bootup. 2714 */ 2715 unsigned long nr_running(void) 2716 { 2717 unsigned long i, sum = 0; 2718 2719 for_each_online_cpu(i) 2720 sum += cpu_rq(i)->nr_running; 2721 2722 return sum; 2723 } 2724 2725 /* 2726 * Check if only the current task is running on the cpu. 2727 * 2728 * Caution: this function does not check that the caller has disabled 2729 * preemption, thus the result might have a time-of-check-to-time-of-use 2730 * race. The caller is responsible to use it correctly, for example: 2731 * 2732 * - from a non-preemptable section (of course) 2733 * 2734 * - from a thread that is bound to a single CPU 2735 * 2736 * - in a loop with very short iterations (e.g. a polling loop) 2737 */ 2738 bool single_task_running(void) 2739 { 2740 return raw_rq()->nr_running == 1; 2741 } 2742 EXPORT_SYMBOL(single_task_running); 2743 2744 unsigned long long nr_context_switches(void) 2745 { 2746 int i; 2747 unsigned long long sum = 0; 2748 2749 for_each_possible_cpu(i) 2750 sum += cpu_rq(i)->nr_switches; 2751 2752 return sum; 2753 } 2754 2755 unsigned long nr_iowait(void) 2756 { 2757 unsigned long i, sum = 0; 2758 2759 for_each_possible_cpu(i) 2760 sum += atomic_read(&cpu_rq(i)->nr_iowait); 2761 2762 return sum; 2763 } 2764 2765 unsigned long nr_iowait_cpu(int cpu) 2766 { 2767 struct rq *this = cpu_rq(cpu); 2768 return atomic_read(&this->nr_iowait); 2769 } 2770 2771 void get_iowait_load(unsigned long *nr_waiters, unsigned long *load) 2772 { 2773 struct rq *rq = this_rq(); 2774 *nr_waiters = atomic_read(&rq->nr_iowait); 2775 *load = rq->load.weight; 2776 } 2777 2778 #ifdef CONFIG_SMP 2779 2780 /* 2781 * sched_exec - execve() is a valuable balancing opportunity, because at 2782 * this point the task has the smallest effective memory and cache footprint. 2783 */ 2784 void sched_exec(void) 2785 { 2786 struct task_struct *p = current; 2787 unsigned long flags; 2788 int dest_cpu; 2789 2790 raw_spin_lock_irqsave(&p->pi_lock, flags); 2791 dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), SD_BALANCE_EXEC, 0); 2792 if (dest_cpu == smp_processor_id()) 2793 goto unlock; 2794 2795 if (likely(cpu_active(dest_cpu))) { 2796 struct migration_arg arg = { p, dest_cpu }; 2797 2798 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 2799 stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg); 2800 return; 2801 } 2802 unlock: 2803 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 2804 } 2805 2806 #endif 2807 2808 DEFINE_PER_CPU(struct kernel_stat, kstat); 2809 DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat); 2810 2811 EXPORT_PER_CPU_SYMBOL(kstat); 2812 EXPORT_PER_CPU_SYMBOL(kernel_cpustat); 2813 2814 /* 2815 * Return accounted runtime for the task. 2816 * In case the task is currently running, return the runtime plus current's 2817 * pending runtime that have not been accounted yet. 2818 */ 2819 unsigned long long task_sched_runtime(struct task_struct *p) 2820 { 2821 unsigned long flags; 2822 struct rq *rq; 2823 u64 ns; 2824 2825 #if defined(CONFIG_64BIT) && defined(CONFIG_SMP) 2826 /* 2827 * 64-bit doesn't need locks to atomically read a 64bit value. 2828 * So we have a optimization chance when the task's delta_exec is 0. 2829 * Reading ->on_cpu is racy, but this is ok. 2830 * 2831 * If we race with it leaving cpu, we'll take a lock. So we're correct. 2832 * If we race with it entering cpu, unaccounted time is 0. This is 2833 * indistinguishable from the read occurring a few cycles earlier. 2834 * If we see ->on_cpu without ->on_rq, the task is leaving, and has 2835 * been accounted, so we're correct here as well. 2836 */ 2837 if (!p->on_cpu || !task_on_rq_queued(p)) 2838 return p->se.sum_exec_runtime; 2839 #endif 2840 2841 rq = task_rq_lock(p, &flags); 2842 /* 2843 * Must be ->curr _and_ ->on_rq. If dequeued, we would 2844 * project cycles that may never be accounted to this 2845 * thread, breaking clock_gettime(). 2846 */ 2847 if (task_current(rq, p) && task_on_rq_queued(p)) { 2848 update_rq_clock(rq); 2849 p->sched_class->update_curr(rq); 2850 } 2851 ns = p->se.sum_exec_runtime; 2852 task_rq_unlock(rq, p, &flags); 2853 2854 return ns; 2855 } 2856 2857 /* 2858 * This function gets called by the timer code, with HZ frequency. 2859 * We call it with interrupts disabled. 2860 */ 2861 void scheduler_tick(void) 2862 { 2863 int cpu = smp_processor_id(); 2864 struct rq *rq = cpu_rq(cpu); 2865 struct task_struct *curr = rq->curr; 2866 2867 sched_clock_tick(); 2868 2869 raw_spin_lock(&rq->lock); 2870 update_rq_clock(rq); 2871 curr->sched_class->task_tick(rq, curr, 0); 2872 update_cpu_load_active(rq); 2873 calc_global_load_tick(rq); 2874 raw_spin_unlock(&rq->lock); 2875 2876 perf_event_task_tick(); 2877 2878 #ifdef CONFIG_SMP 2879 rq->idle_balance = idle_cpu(cpu); 2880 trigger_load_balance(rq); 2881 #endif 2882 rq_last_tick_reset(rq); 2883 } 2884 2885 #ifdef CONFIG_NO_HZ_FULL 2886 /** 2887 * scheduler_tick_max_deferment 2888 * 2889 * Keep at least one tick per second when a single 2890 * active task is running because the scheduler doesn't 2891 * yet completely support full dynticks environment. 2892 * 2893 * This makes sure that uptime, CFS vruntime, load 2894 * balancing, etc... continue to move forward, even 2895 * with a very low granularity. 2896 * 2897 * Return: Maximum deferment in nanoseconds. 2898 */ 2899 u64 scheduler_tick_max_deferment(void) 2900 { 2901 struct rq *rq = this_rq(); 2902 unsigned long next, now = READ_ONCE(jiffies); 2903 2904 next = rq->last_sched_tick + HZ; 2905 2906 if (time_before_eq(next, now)) 2907 return 0; 2908 2909 return jiffies_to_nsecs(next - now); 2910 } 2911 #endif 2912 2913 notrace unsigned long get_parent_ip(unsigned long addr) 2914 { 2915 if (in_lock_functions(addr)) { 2916 addr = CALLER_ADDR2; 2917 if (in_lock_functions(addr)) 2918 addr = CALLER_ADDR3; 2919 } 2920 return addr; 2921 } 2922 2923 #if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \ 2924 defined(CONFIG_PREEMPT_TRACER)) 2925 2926 void preempt_count_add(int val) 2927 { 2928 #ifdef CONFIG_DEBUG_PREEMPT 2929 /* 2930 * Underflow? 2931 */ 2932 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0))) 2933 return; 2934 #endif 2935 __preempt_count_add(val); 2936 #ifdef CONFIG_DEBUG_PREEMPT 2937 /* 2938 * Spinlock count overflowing soon? 2939 */ 2940 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >= 2941 PREEMPT_MASK - 10); 2942 #endif 2943 if (preempt_count() == val) { 2944 unsigned long ip = get_parent_ip(CALLER_ADDR1); 2945 #ifdef CONFIG_DEBUG_PREEMPT 2946 current->preempt_disable_ip = ip; 2947 #endif 2948 trace_preempt_off(CALLER_ADDR0, ip); 2949 } 2950 } 2951 EXPORT_SYMBOL(preempt_count_add); 2952 NOKPROBE_SYMBOL(preempt_count_add); 2953 2954 void preempt_count_sub(int val) 2955 { 2956 #ifdef CONFIG_DEBUG_PREEMPT 2957 /* 2958 * Underflow? 2959 */ 2960 if (DEBUG_LOCKS_WARN_ON(val > preempt_count())) 2961 return; 2962 /* 2963 * Is the spinlock portion underflowing? 2964 */ 2965 if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) && 2966 !(preempt_count() & PREEMPT_MASK))) 2967 return; 2968 #endif 2969 2970 if (preempt_count() == val) 2971 trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); 2972 __preempt_count_sub(val); 2973 } 2974 EXPORT_SYMBOL(preempt_count_sub); 2975 NOKPROBE_SYMBOL(preempt_count_sub); 2976 2977 #endif 2978 2979 /* 2980 * Print scheduling while atomic bug: 2981 */ 2982 static noinline void __schedule_bug(struct task_struct *prev) 2983 { 2984 if (oops_in_progress) 2985 return; 2986 2987 printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n", 2988 prev->comm, prev->pid, preempt_count()); 2989 2990 debug_show_held_locks(prev); 2991 print_modules(); 2992 if (irqs_disabled()) 2993 print_irqtrace_events(prev); 2994 #ifdef CONFIG_DEBUG_PREEMPT 2995 if (in_atomic_preempt_off()) { 2996 pr_err("Preemption disabled at:"); 2997 print_ip_sym(current->preempt_disable_ip); 2998 pr_cont("\n"); 2999 } 3000 #endif 3001 dump_stack(); 3002 add_taint(TAINT_WARN, LOCKDEP_STILL_OK); 3003 } 3004 3005 /* 3006 * Various schedule()-time debugging checks and statistics: 3007 */ 3008 static inline void schedule_debug(struct task_struct *prev) 3009 { 3010 #ifdef CONFIG_SCHED_STACK_END_CHECK 3011 BUG_ON(task_stack_end_corrupted(prev)); 3012 #endif 3013 3014 if (unlikely(in_atomic_preempt_off())) { 3015 __schedule_bug(prev); 3016 preempt_count_set(PREEMPT_DISABLED); 3017 } 3018 rcu_sleep_check(); 3019 3020 profile_hit(SCHED_PROFILING, __builtin_return_address(0)); 3021 3022 schedstat_inc(this_rq(), sched_count); 3023 } 3024 3025 /* 3026 * Pick up the highest-prio task: 3027 */ 3028 static inline struct task_struct * 3029 pick_next_task(struct rq *rq, struct task_struct *prev) 3030 { 3031 const struct sched_class *class = &fair_sched_class; 3032 struct task_struct *p; 3033 3034 /* 3035 * Optimization: we know that if all tasks are in 3036 * the fair class we can call that function directly: 3037 */ 3038 if (likely(prev->sched_class == class && 3039 rq->nr_running == rq->cfs.h_nr_running)) { 3040 p = fair_sched_class.pick_next_task(rq, prev); 3041 if (unlikely(p == RETRY_TASK)) 3042 goto again; 3043 3044 /* assumes fair_sched_class->next == idle_sched_class */ 3045 if (unlikely(!p)) 3046 p = idle_sched_class.pick_next_task(rq, prev); 3047 3048 return p; 3049 } 3050 3051 again: 3052 for_each_class(class) { 3053 p = class->pick_next_task(rq, prev); 3054 if (p) { 3055 if (unlikely(p == RETRY_TASK)) 3056 goto again; 3057 return p; 3058 } 3059 } 3060 3061 BUG(); /* the idle class will always have a runnable task */ 3062 } 3063 3064 /* 3065 * __schedule() is the main scheduler function. 3066 * 3067 * The main means of driving the scheduler and thus entering this function are: 3068 * 3069 * 1. Explicit blocking: mutex, semaphore, waitqueue, etc. 3070 * 3071 * 2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return 3072 * paths. For example, see arch/x86/entry_64.S. 3073 * 3074 * To drive preemption between tasks, the scheduler sets the flag in timer 3075 * interrupt handler scheduler_tick(). 3076 * 3077 * 3. Wakeups don't really cause entry into schedule(). They add a 3078 * task to the run-queue and that's it. 3079 * 3080 * Now, if the new task added to the run-queue preempts the current 3081 * task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets 3082 * called on the nearest possible occasion: 3083 * 3084 * - If the kernel is preemptible (CONFIG_PREEMPT=y): 3085 * 3086 * - in syscall or exception context, at the next outmost 3087 * preempt_enable(). (this might be as soon as the wake_up()'s 3088 * spin_unlock()!) 3089 * 3090 * - in IRQ context, return from interrupt-handler to 3091 * preemptible context 3092 * 3093 * - If the kernel is not preemptible (CONFIG_PREEMPT is not set) 3094 * then at the next: 3095 * 3096 * - cond_resched() call 3097 * - explicit schedule() call 3098 * - return from syscall or exception to user-space 3099 * - return from interrupt-handler to user-space 3100 * 3101 * WARNING: must be called with preemption disabled! 3102 */ 3103 static void __sched notrace __schedule(bool preempt) 3104 { 3105 struct task_struct *prev, *next; 3106 unsigned long *switch_count; 3107 struct rq *rq; 3108 int cpu; 3109 3110 cpu = smp_processor_id(); 3111 rq = cpu_rq(cpu); 3112 rcu_note_context_switch(); 3113 prev = rq->curr; 3114 3115 /* 3116 * do_exit() calls schedule() with preemption disabled as an exception; 3117 * however we must fix that up, otherwise the next task will see an 3118 * inconsistent (higher) preempt count. 3119 * 3120 * It also avoids the below schedule_debug() test from complaining 3121 * about this. 3122 */ 3123 if (unlikely(prev->state == TASK_DEAD)) 3124 preempt_enable_no_resched_notrace(); 3125 3126 schedule_debug(prev); 3127 3128 if (sched_feat(HRTICK)) 3129 hrtick_clear(rq); 3130 3131 /* 3132 * Make sure that signal_pending_state()->signal_pending() below 3133 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE) 3134 * done by the caller to avoid the race with signal_wake_up(). 3135 */ 3136 smp_mb__before_spinlock(); 3137 raw_spin_lock_irq(&rq->lock); 3138 lockdep_pin_lock(&rq->lock); 3139 3140 rq->clock_skip_update <<= 1; /* promote REQ to ACT */ 3141 3142 switch_count = &prev->nivcsw; 3143 if (!preempt && prev->state) { 3144 if (unlikely(signal_pending_state(prev->state, prev))) { 3145 prev->state = TASK_RUNNING; 3146 } else { 3147 deactivate_task(rq, prev, DEQUEUE_SLEEP); 3148 prev->on_rq = 0; 3149 3150 /* 3151 * If a worker went to sleep, notify and ask workqueue 3152 * whether it wants to wake up a task to maintain 3153 * concurrency. 3154 */ 3155 if (prev->flags & PF_WQ_WORKER) { 3156 struct task_struct *to_wakeup; 3157 3158 to_wakeup = wq_worker_sleeping(prev, cpu); 3159 if (to_wakeup) 3160 try_to_wake_up_local(to_wakeup); 3161 } 3162 } 3163 switch_count = &prev->nvcsw; 3164 } 3165 3166 if (task_on_rq_queued(prev)) 3167 update_rq_clock(rq); 3168 3169 next = pick_next_task(rq, prev); 3170 clear_tsk_need_resched(prev); 3171 clear_preempt_need_resched(); 3172 rq->clock_skip_update = 0; 3173 3174 if (likely(prev != next)) { 3175 rq->nr_switches++; 3176 rq->curr = next; 3177 ++*switch_count; 3178 3179 trace_sched_switch(preempt, prev, next); 3180 rq = context_switch(rq, prev, next); /* unlocks the rq */ 3181 cpu = cpu_of(rq); 3182 } else { 3183 lockdep_unpin_lock(&rq->lock); 3184 raw_spin_unlock_irq(&rq->lock); 3185 } 3186 3187 balance_callback(rq); 3188 } 3189 3190 static inline void sched_submit_work(struct task_struct *tsk) 3191 { 3192 if (!tsk->state || tsk_is_pi_blocked(tsk)) 3193 return; 3194 /* 3195 * If we are going to sleep and we have plugged IO queued, 3196 * make sure to submit it to avoid deadlocks. 3197 */ 3198 if (blk_needs_flush_plug(tsk)) 3199 blk_schedule_flush_plug(tsk); 3200 } 3201 3202 asmlinkage __visible void __sched schedule(void) 3203 { 3204 struct task_struct *tsk = current; 3205 3206 sched_submit_work(tsk); 3207 do { 3208 preempt_disable(); 3209 __schedule(false); 3210 sched_preempt_enable_no_resched(); 3211 } while (need_resched()); 3212 } 3213 EXPORT_SYMBOL(schedule); 3214 3215 #ifdef CONFIG_CONTEXT_TRACKING 3216 asmlinkage __visible void __sched schedule_user(void) 3217 { 3218 /* 3219 * If we come here after a random call to set_need_resched(), 3220 * or we have been woken up remotely but the IPI has not yet arrived, 3221 * we haven't yet exited the RCU idle mode. Do it here manually until 3222 * we find a better solution. 3223 * 3224 * NB: There are buggy callers of this function. Ideally we 3225 * should warn if prev_state != CONTEXT_USER, but that will trigger 3226 * too frequently to make sense yet. 3227 */ 3228 enum ctx_state prev_state = exception_enter(); 3229 schedule(); 3230 exception_exit(prev_state); 3231 } 3232 #endif 3233 3234 /** 3235 * schedule_preempt_disabled - called with preemption disabled 3236 * 3237 * Returns with preemption disabled. Note: preempt_count must be 1 3238 */ 3239 void __sched schedule_preempt_disabled(void) 3240 { 3241 sched_preempt_enable_no_resched(); 3242 schedule(); 3243 preempt_disable(); 3244 } 3245 3246 static void __sched notrace preempt_schedule_common(void) 3247 { 3248 do { 3249 preempt_disable_notrace(); 3250 __schedule(true); 3251 preempt_enable_no_resched_notrace(); 3252 3253 /* 3254 * Check again in case we missed a preemption opportunity 3255 * between schedule and now. 3256 */ 3257 } while (need_resched()); 3258 } 3259 3260 #ifdef CONFIG_PREEMPT 3261 /* 3262 * this is the entry point to schedule() from in-kernel preemption 3263 * off of preempt_enable. Kernel preemptions off return from interrupt 3264 * occur there and call schedule directly. 3265 */ 3266 asmlinkage __visible void __sched notrace preempt_schedule(void) 3267 { 3268 /* 3269 * If there is a non-zero preempt_count or interrupts are disabled, 3270 * we do not want to preempt the current task. Just return.. 3271 */ 3272 if (likely(!preemptible())) 3273 return; 3274 3275 preempt_schedule_common(); 3276 } 3277 NOKPROBE_SYMBOL(preempt_schedule); 3278 EXPORT_SYMBOL(preempt_schedule); 3279 3280 /** 3281 * preempt_schedule_notrace - preempt_schedule called by tracing 3282 * 3283 * The tracing infrastructure uses preempt_enable_notrace to prevent 3284 * recursion and tracing preempt enabling caused by the tracing 3285 * infrastructure itself. But as tracing can happen in areas coming 3286 * from userspace or just about to enter userspace, a preempt enable 3287 * can occur before user_exit() is called. This will cause the scheduler 3288 * to be called when the system is still in usermode. 3289 * 3290 * To prevent this, the preempt_enable_notrace will use this function 3291 * instead of preempt_schedule() to exit user context if needed before 3292 * calling the scheduler. 3293 */ 3294 asmlinkage __visible void __sched notrace preempt_schedule_notrace(void) 3295 { 3296 enum ctx_state prev_ctx; 3297 3298 if (likely(!preemptible())) 3299 return; 3300 3301 do { 3302 preempt_disable_notrace(); 3303 /* 3304 * Needs preempt disabled in case user_exit() is traced 3305 * and the tracer calls preempt_enable_notrace() causing 3306 * an infinite recursion. 3307 */ 3308 prev_ctx = exception_enter(); 3309 __schedule(true); 3310 exception_exit(prev_ctx); 3311 3312 preempt_enable_no_resched_notrace(); 3313 } while (need_resched()); 3314 } 3315 EXPORT_SYMBOL_GPL(preempt_schedule_notrace); 3316 3317 #endif /* CONFIG_PREEMPT */ 3318 3319 /* 3320 * this is the entry point to schedule() from kernel preemption 3321 * off of irq context. 3322 * Note, that this is called and return with irqs disabled. This will 3323 * protect us against recursive calling from irq. 3324 */ 3325 asmlinkage __visible void __sched preempt_schedule_irq(void) 3326 { 3327 enum ctx_state prev_state; 3328 3329 /* Catch callers which need to be fixed */ 3330 BUG_ON(preempt_count() || !irqs_disabled()); 3331 3332 prev_state = exception_enter(); 3333 3334 do { 3335 preempt_disable(); 3336 local_irq_enable(); 3337 __schedule(true); 3338 local_irq_disable(); 3339 sched_preempt_enable_no_resched(); 3340 } while (need_resched()); 3341 3342 exception_exit(prev_state); 3343 } 3344 3345 int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags, 3346 void *key) 3347 { 3348 return try_to_wake_up(curr->private, mode, wake_flags); 3349 } 3350 EXPORT_SYMBOL(default_wake_function); 3351 3352 #ifdef CONFIG_RT_MUTEXES 3353 3354 /* 3355 * rt_mutex_setprio - set the current priority of a task 3356 * @p: task 3357 * @prio: prio value (kernel-internal form) 3358 * 3359 * This function changes the 'effective' priority of a task. It does 3360 * not touch ->normal_prio like __setscheduler(). 3361 * 3362 * Used by the rt_mutex code to implement priority inheritance 3363 * logic. Call site only calls if the priority of the task changed. 3364 */ 3365 void rt_mutex_setprio(struct task_struct *p, int prio) 3366 { 3367 int oldprio, queued, running, enqueue_flag = ENQUEUE_RESTORE; 3368 struct rq *rq; 3369 const struct sched_class *prev_class; 3370 3371 BUG_ON(prio > MAX_PRIO); 3372 3373 rq = __task_rq_lock(p); 3374 3375 /* 3376 * Idle task boosting is a nono in general. There is one 3377 * exception, when PREEMPT_RT and NOHZ is active: 3378 * 3379 * The idle task calls get_next_timer_interrupt() and holds 3380 * the timer wheel base->lock on the CPU and another CPU wants 3381 * to access the timer (probably to cancel it). We can safely 3382 * ignore the boosting request, as the idle CPU runs this code 3383 * with interrupts disabled and will complete the lock 3384 * protected section without being interrupted. So there is no 3385 * real need to boost. 3386 */ 3387 if (unlikely(p == rq->idle)) { 3388 WARN_ON(p != rq->curr); 3389 WARN_ON(p->pi_blocked_on); 3390 goto out_unlock; 3391 } 3392 3393 trace_sched_pi_setprio(p, prio); 3394 oldprio = p->prio; 3395 prev_class = p->sched_class; 3396 queued = task_on_rq_queued(p); 3397 running = task_current(rq, p); 3398 if (queued) 3399 dequeue_task(rq, p, DEQUEUE_SAVE); 3400 if (running) 3401 put_prev_task(rq, p); 3402 3403 /* 3404 * Boosting condition are: 3405 * 1. -rt task is running and holds mutex A 3406 * --> -dl task blocks on mutex A 3407 * 3408 * 2. -dl task is running and holds mutex A 3409 * --> -dl task blocks on mutex A and could preempt the 3410 * running task 3411 */ 3412 if (dl_prio(prio)) { 3413 struct task_struct *pi_task = rt_mutex_get_top_task(p); 3414 if (!dl_prio(p->normal_prio) || 3415 (pi_task && dl_entity_preempt(&pi_task->dl, &p->dl))) { 3416 p->dl.dl_boosted = 1; 3417 enqueue_flag |= ENQUEUE_REPLENISH; 3418 } else 3419 p->dl.dl_boosted = 0; 3420 p->sched_class = &dl_sched_class; 3421 } else if (rt_prio(prio)) { 3422 if (dl_prio(oldprio)) 3423 p->dl.dl_boosted = 0; 3424 if (oldprio < prio) 3425 enqueue_flag |= ENQUEUE_HEAD; 3426 p->sched_class = &rt_sched_class; 3427 } else { 3428 if (dl_prio(oldprio)) 3429 p->dl.dl_boosted = 0; 3430 if (rt_prio(oldprio)) 3431 p->rt.timeout = 0; 3432 p->sched_class = &fair_sched_class; 3433 } 3434 3435 p->prio = prio; 3436 3437 if (running) 3438 p->sched_class->set_curr_task(rq); 3439 if (queued) 3440 enqueue_task(rq, p, enqueue_flag); 3441 3442 check_class_changed(rq, p, prev_class, oldprio); 3443 out_unlock: 3444 preempt_disable(); /* avoid rq from going away on us */ 3445 __task_rq_unlock(rq); 3446 3447 balance_callback(rq); 3448 preempt_enable(); 3449 } 3450 #endif 3451 3452 void set_user_nice(struct task_struct *p, long nice) 3453 { 3454 int old_prio, delta, queued; 3455 unsigned long flags; 3456 struct rq *rq; 3457 3458 if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE) 3459 return; 3460 /* 3461 * We have to be careful, if called from sys_setpriority(), 3462 * the task might be in the middle of scheduling on another CPU. 3463 */ 3464 rq = task_rq_lock(p, &flags); 3465 /* 3466 * The RT priorities are set via sched_setscheduler(), but we still 3467 * allow the 'normal' nice value to be set - but as expected 3468 * it wont have any effect on scheduling until the task is 3469 * SCHED_DEADLINE, SCHED_FIFO or SCHED_RR: 3470 */ 3471 if (task_has_dl_policy(p) || task_has_rt_policy(p)) { 3472 p->static_prio = NICE_TO_PRIO(nice); 3473 goto out_unlock; 3474 } 3475 queued = task_on_rq_queued(p); 3476 if (queued) 3477 dequeue_task(rq, p, DEQUEUE_SAVE); 3478 3479 p->static_prio = NICE_TO_PRIO(nice); 3480 set_load_weight(p); 3481 old_prio = p->prio; 3482 p->prio = effective_prio(p); 3483 delta = p->prio - old_prio; 3484 3485 if (queued) { 3486 enqueue_task(rq, p, ENQUEUE_RESTORE); 3487 /* 3488 * If the task increased its priority or is running and 3489 * lowered its priority, then reschedule its CPU: 3490 */ 3491 if (delta < 0 || (delta > 0 && task_running(rq, p))) 3492 resched_curr(rq); 3493 } 3494 out_unlock: 3495 task_rq_unlock(rq, p, &flags); 3496 } 3497 EXPORT_SYMBOL(set_user_nice); 3498 3499 /* 3500 * can_nice - check if a task can reduce its nice value 3501 * @p: task 3502 * @nice: nice value 3503 */ 3504 int can_nice(const struct task_struct *p, const int nice) 3505 { 3506 /* convert nice value [19,-20] to rlimit style value [1,40] */ 3507 int nice_rlim = nice_to_rlimit(nice); 3508 3509 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) || 3510 capable(CAP_SYS_NICE)); 3511 } 3512 3513 #ifdef __ARCH_WANT_SYS_NICE 3514 3515 /* 3516 * sys_nice - change the priority of the current process. 3517 * @increment: priority increment 3518 * 3519 * sys_setpriority is a more generic, but much slower function that 3520 * does similar things. 3521 */ 3522 SYSCALL_DEFINE1(nice, int, increment) 3523 { 3524 long nice, retval; 3525 3526 /* 3527 * Setpriority might change our priority at the same moment. 3528 * We don't have to worry. Conceptually one call occurs first 3529 * and we have a single winner. 3530 */ 3531 increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH); 3532 nice = task_nice(current) + increment; 3533 3534 nice = clamp_val(nice, MIN_NICE, MAX_NICE); 3535 if (increment < 0 && !can_nice(current, nice)) 3536 return -EPERM; 3537 3538 retval = security_task_setnice(current, nice); 3539 if (retval) 3540 return retval; 3541 3542 set_user_nice(current, nice); 3543 return 0; 3544 } 3545 3546 #endif 3547 3548 /** 3549 * task_prio - return the priority value of a given task. 3550 * @p: the task in question. 3551 * 3552 * Return: The priority value as seen by users in /proc. 3553 * RT tasks are offset by -200. Normal tasks are centered 3554 * around 0, value goes from -16 to +15. 3555 */ 3556 int task_prio(const struct task_struct *p) 3557 { 3558 return p->prio - MAX_RT_PRIO; 3559 } 3560 3561 /** 3562 * idle_cpu - is a given cpu idle currently? 3563 * @cpu: the processor in question. 3564 * 3565 * Return: 1 if the CPU is currently idle. 0 otherwise. 3566 */ 3567 int idle_cpu(int cpu) 3568 { 3569 struct rq *rq = cpu_rq(cpu); 3570 3571 if (rq->curr != rq->idle) 3572 return 0; 3573 3574 if (rq->nr_running) 3575 return 0; 3576 3577 #ifdef CONFIG_SMP 3578 if (!llist_empty(&rq->wake_list)) 3579 return 0; 3580 #endif 3581 3582 return 1; 3583 } 3584 3585 /** 3586 * idle_task - return the idle task for a given cpu. 3587 * @cpu: the processor in question. 3588 * 3589 * Return: The idle task for the cpu @cpu. 3590 */ 3591 struct task_struct *idle_task(int cpu) 3592 { 3593 return cpu_rq(cpu)->idle; 3594 } 3595 3596 /** 3597 * find_process_by_pid - find a process with a matching PID value. 3598 * @pid: the pid in question. 3599 * 3600 * The task of @pid, if found. %NULL otherwise. 3601 */ 3602 static struct task_struct *find_process_by_pid(pid_t pid) 3603 { 3604 return pid ? find_task_by_vpid(pid) : current; 3605 } 3606 3607 /* 3608 * This function initializes the sched_dl_entity of a newly becoming 3609 * SCHED_DEADLINE task. 3610 * 3611 * Only the static values are considered here, the actual runtime and the 3612 * absolute deadline will be properly calculated when the task is enqueued 3613 * for the first time with its new policy. 3614 */ 3615 static void 3616 __setparam_dl(struct task_struct *p, const struct sched_attr *attr) 3617 { 3618 struct sched_dl_entity *dl_se = &p->dl; 3619 3620 dl_se->dl_runtime = attr->sched_runtime; 3621 dl_se->dl_deadline = attr->sched_deadline; 3622 dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline; 3623 dl_se->flags = attr->sched_flags; 3624 dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime); 3625 3626 /* 3627 * Changing the parameters of a task is 'tricky' and we're not doing 3628 * the correct thing -- also see task_dead_dl() and switched_from_dl(). 3629 * 3630 * What we SHOULD do is delay the bandwidth release until the 0-lag 3631 * point. This would include retaining the task_struct until that time 3632 * and change dl_overflow() to not immediately decrement the current 3633 * amount. 3634 * 3635 * Instead we retain the current runtime/deadline and let the new 3636 * parameters take effect after the current reservation period lapses. 3637 * This is safe (albeit pessimistic) because the 0-lag point is always 3638 * before the current scheduling deadline. 3639 * 3640 * We can still have temporary overloads because we do not delay the 3641 * change in bandwidth until that time; so admission control is 3642 * not on the safe side. It does however guarantee tasks will never 3643 * consume more than promised. 3644 */ 3645 } 3646 3647 /* 3648 * sched_setparam() passes in -1 for its policy, to let the functions 3649 * it calls know not to change it. 3650 */ 3651 #define SETPARAM_POLICY -1 3652 3653 static void __setscheduler_params(struct task_struct *p, 3654 const struct sched_attr *attr) 3655 { 3656 int policy = attr->sched_policy; 3657 3658 if (policy == SETPARAM_POLICY) 3659 policy = p->policy; 3660 3661 p->policy = policy; 3662 3663 if (dl_policy(policy)) 3664 __setparam_dl(p, attr); 3665 else if (fair_policy(policy)) 3666 p->static_prio = NICE_TO_PRIO(attr->sched_nice); 3667 3668 /* 3669 * __sched_setscheduler() ensures attr->sched_priority == 0 when 3670 * !rt_policy. Always setting this ensures that things like 3671 * getparam()/getattr() don't report silly values for !rt tasks. 3672 */ 3673 p->rt_priority = attr->sched_priority; 3674 p->normal_prio = normal_prio(p); 3675 set_load_weight(p); 3676 } 3677 3678 /* Actually do priority change: must hold pi & rq lock. */ 3679 static void __setscheduler(struct rq *rq, struct task_struct *p, 3680 const struct sched_attr *attr, bool keep_boost) 3681 { 3682 __setscheduler_params(p, attr); 3683 3684 /* 3685 * Keep a potential priority boosting if called from 3686 * sched_setscheduler(). 3687 */ 3688 if (keep_boost) 3689 p->prio = rt_mutex_get_effective_prio(p, normal_prio(p)); 3690 else 3691 p->prio = normal_prio(p); 3692 3693 if (dl_prio(p->prio)) 3694 p->sched_class = &dl_sched_class; 3695 else if (rt_prio(p->prio)) 3696 p->sched_class = &rt_sched_class; 3697 else 3698 p->sched_class = &fair_sched_class; 3699 } 3700 3701 static void 3702 __getparam_dl(struct task_struct *p, struct sched_attr *attr) 3703 { 3704 struct sched_dl_entity *dl_se = &p->dl; 3705 3706 attr->sched_priority = p->rt_priority; 3707 attr->sched_runtime = dl_se->dl_runtime; 3708 attr->sched_deadline = dl_se->dl_deadline; 3709 attr->sched_period = dl_se->dl_period; 3710 attr->sched_flags = dl_se->flags; 3711 } 3712 3713 /* 3714 * This function validates the new parameters of a -deadline task. 3715 * We ask for the deadline not being zero, and greater or equal 3716 * than the runtime, as well as the period of being zero or 3717 * greater than deadline. Furthermore, we have to be sure that 3718 * user parameters are above the internal resolution of 1us (we 3719 * check sched_runtime only since it is always the smaller one) and 3720 * below 2^63 ns (we have to check both sched_deadline and 3721 * sched_period, as the latter can be zero). 3722 */ 3723 static bool 3724 __checkparam_dl(const struct sched_attr *attr) 3725 { 3726 /* deadline != 0 */ 3727 if (attr->sched_deadline == 0) 3728 return false; 3729 3730 /* 3731 * Since we truncate DL_SCALE bits, make sure we're at least 3732 * that big. 3733 */ 3734 if (attr->sched_runtime < (1ULL << DL_SCALE)) 3735 return false; 3736 3737 /* 3738 * Since we use the MSB for wrap-around and sign issues, make 3739 * sure it's not set (mind that period can be equal to zero). 3740 */ 3741 if (attr->sched_deadline & (1ULL << 63) || 3742 attr->sched_period & (1ULL << 63)) 3743 return false; 3744 3745 /* runtime <= deadline <= period (if period != 0) */ 3746 if ((attr->sched_period != 0 && 3747 attr->sched_period < attr->sched_deadline) || 3748 attr->sched_deadline < attr->sched_runtime) 3749 return false; 3750 3751 return true; 3752 } 3753 3754 /* 3755 * check the target process has a UID that matches the current process's 3756 */ 3757 static bool check_same_owner(struct task_struct *p) 3758 { 3759 const struct cred *cred = current_cred(), *pcred; 3760 bool match; 3761 3762 rcu_read_lock(); 3763 pcred = __task_cred(p); 3764 match = (uid_eq(cred->euid, pcred->euid) || 3765 uid_eq(cred->euid, pcred->uid)); 3766 rcu_read_unlock(); 3767 return match; 3768 } 3769 3770 static bool dl_param_changed(struct task_struct *p, 3771 const struct sched_attr *attr) 3772 { 3773 struct sched_dl_entity *dl_se = &p->dl; 3774 3775 if (dl_se->dl_runtime != attr->sched_runtime || 3776 dl_se->dl_deadline != attr->sched_deadline || 3777 dl_se->dl_period != attr->sched_period || 3778 dl_se->flags != attr->sched_flags) 3779 return true; 3780 3781 return false; 3782 } 3783 3784 static int __sched_setscheduler(struct task_struct *p, 3785 const struct sched_attr *attr, 3786 bool user, bool pi) 3787 { 3788 int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 : 3789 MAX_RT_PRIO - 1 - attr->sched_priority; 3790 int retval, oldprio, oldpolicy = -1, queued, running; 3791 int new_effective_prio, policy = attr->sched_policy; 3792 unsigned long flags; 3793 const struct sched_class *prev_class; 3794 struct rq *rq; 3795 int reset_on_fork; 3796 3797 /* may grab non-irq protected spin_locks */ 3798 BUG_ON(in_interrupt()); 3799 recheck: 3800 /* double check policy once rq lock held */ 3801 if (policy < 0) { 3802 reset_on_fork = p->sched_reset_on_fork; 3803 policy = oldpolicy = p->policy; 3804 } else { 3805 reset_on_fork = !!(attr->sched_flags & SCHED_FLAG_RESET_ON_FORK); 3806 3807 if (!valid_policy(policy)) 3808 return -EINVAL; 3809 } 3810 3811 if (attr->sched_flags & ~(SCHED_FLAG_RESET_ON_FORK)) 3812 return -EINVAL; 3813 3814 /* 3815 * Valid priorities for SCHED_FIFO and SCHED_RR are 3816 * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL, 3817 * SCHED_BATCH and SCHED_IDLE is 0. 3818 */ 3819 if ((p->mm && attr->sched_priority > MAX_USER_RT_PRIO-1) || 3820 (!p->mm && attr->sched_priority > MAX_RT_PRIO-1)) 3821 return -EINVAL; 3822 if ((dl_policy(policy) && !__checkparam_dl(attr)) || 3823 (rt_policy(policy) != (attr->sched_priority != 0))) 3824 return -EINVAL; 3825 3826 /* 3827 * Allow unprivileged RT tasks to decrease priority: 3828 */ 3829 if (user && !capable(CAP_SYS_NICE)) { 3830 if (fair_policy(policy)) { 3831 if (attr->sched_nice < task_nice(p) && 3832 !can_nice(p, attr->sched_nice)) 3833 return -EPERM; 3834 } 3835 3836 if (rt_policy(policy)) { 3837 unsigned long rlim_rtprio = 3838 task_rlimit(p, RLIMIT_RTPRIO); 3839 3840 /* can't set/change the rt policy */ 3841 if (policy != p->policy && !rlim_rtprio) 3842 return -EPERM; 3843 3844 /* can't increase priority */ 3845 if (attr->sched_priority > p->rt_priority && 3846 attr->sched_priority > rlim_rtprio) 3847 return -EPERM; 3848 } 3849 3850 /* 3851 * Can't set/change SCHED_DEADLINE policy at all for now 3852 * (safest behavior); in the future we would like to allow 3853 * unprivileged DL tasks to increase their relative deadline 3854 * or reduce their runtime (both ways reducing utilization) 3855 */ 3856 if (dl_policy(policy)) 3857 return -EPERM; 3858 3859 /* 3860 * Treat SCHED_IDLE as nice 20. Only allow a switch to 3861 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it. 3862 */ 3863 if (idle_policy(p->policy) && !idle_policy(policy)) { 3864 if (!can_nice(p, task_nice(p))) 3865 return -EPERM; 3866 } 3867 3868 /* can't change other user's priorities */ 3869 if (!check_same_owner(p)) 3870 return -EPERM; 3871 3872 /* Normal users shall not reset the sched_reset_on_fork flag */ 3873 if (p->sched_reset_on_fork && !reset_on_fork) 3874 return -EPERM; 3875 } 3876 3877 if (user) { 3878 retval = security_task_setscheduler(p); 3879 if (retval) 3880 return retval; 3881 } 3882 3883 /* 3884 * make sure no PI-waiters arrive (or leave) while we are 3885 * changing the priority of the task: 3886 * 3887 * To be able to change p->policy safely, the appropriate 3888 * runqueue lock must be held. 3889 */ 3890 rq = task_rq_lock(p, &flags); 3891 3892 /* 3893 * Changing the policy of the stop threads its a very bad idea 3894 */ 3895 if (p == rq->stop) { 3896 task_rq_unlock(rq, p, &flags); 3897 return -EINVAL; 3898 } 3899 3900 /* 3901 * If not changing anything there's no need to proceed further, 3902 * but store a possible modification of reset_on_fork. 3903 */ 3904 if (unlikely(policy == p->policy)) { 3905 if (fair_policy(policy) && attr->sched_nice != task_nice(p)) 3906 goto change; 3907 if (rt_policy(policy) && attr->sched_priority != p->rt_priority) 3908 goto change; 3909 if (dl_policy(policy) && dl_param_changed(p, attr)) 3910 goto change; 3911 3912 p->sched_reset_on_fork = reset_on_fork; 3913 task_rq_unlock(rq, p, &flags); 3914 return 0; 3915 } 3916 change: 3917 3918 if (user) { 3919 #ifdef CONFIG_RT_GROUP_SCHED 3920 /* 3921 * Do not allow realtime tasks into groups that have no runtime 3922 * assigned. 3923 */ 3924 if (rt_bandwidth_enabled() && rt_policy(policy) && 3925 task_group(p)->rt_bandwidth.rt_runtime == 0 && 3926 !task_group_is_autogroup(task_group(p))) { 3927 task_rq_unlock(rq, p, &flags); 3928 return -EPERM; 3929 } 3930 #endif 3931 #ifdef CONFIG_SMP 3932 if (dl_bandwidth_enabled() && dl_policy(policy)) { 3933 cpumask_t *span = rq->rd->span; 3934 3935 /* 3936 * Don't allow tasks with an affinity mask smaller than 3937 * the entire root_domain to become SCHED_DEADLINE. We 3938 * will also fail if there's no bandwidth available. 3939 */ 3940 if (!cpumask_subset(span, &p->cpus_allowed) || 3941 rq->rd->dl_bw.bw == 0) { 3942 task_rq_unlock(rq, p, &flags); 3943 return -EPERM; 3944 } 3945 } 3946 #endif 3947 } 3948 3949 /* recheck policy now with rq lock held */ 3950 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { 3951 policy = oldpolicy = -1; 3952 task_rq_unlock(rq, p, &flags); 3953 goto recheck; 3954 } 3955 3956 /* 3957 * If setscheduling to SCHED_DEADLINE (or changing the parameters 3958 * of a SCHED_DEADLINE task) we need to check if enough bandwidth 3959 * is available. 3960 */ 3961 if ((dl_policy(policy) || dl_task(p)) && dl_overflow(p, policy, attr)) { 3962 task_rq_unlock(rq, p, &flags); 3963 return -EBUSY; 3964 } 3965 3966 p->sched_reset_on_fork = reset_on_fork; 3967 oldprio = p->prio; 3968 3969 if (pi) { 3970 /* 3971 * Take priority boosted tasks into account. If the new 3972 * effective priority is unchanged, we just store the new 3973 * normal parameters and do not touch the scheduler class and 3974 * the runqueue. This will be done when the task deboost 3975 * itself. 3976 */ 3977 new_effective_prio = rt_mutex_get_effective_prio(p, newprio); 3978 if (new_effective_prio == oldprio) { 3979 __setscheduler_params(p, attr); 3980 task_rq_unlock(rq, p, &flags); 3981 return 0; 3982 } 3983 } 3984 3985 queued = task_on_rq_queued(p); 3986 running = task_current(rq, p); 3987 if (queued) 3988 dequeue_task(rq, p, DEQUEUE_SAVE); 3989 if (running) 3990 put_prev_task(rq, p); 3991 3992 prev_class = p->sched_class; 3993 __setscheduler(rq, p, attr, pi); 3994 3995 if (running) 3996 p->sched_class->set_curr_task(rq); 3997 if (queued) { 3998 int enqueue_flags = ENQUEUE_RESTORE; 3999 /* 4000 * We enqueue to tail when the priority of a task is 4001 * increased (user space view). 4002 */ 4003 if (oldprio <= p->prio) 4004 enqueue_flags |= ENQUEUE_HEAD; 4005 4006 enqueue_task(rq, p, enqueue_flags); 4007 } 4008 4009 check_class_changed(rq, p, prev_class, oldprio); 4010 preempt_disable(); /* avoid rq from going away on us */ 4011 task_rq_unlock(rq, p, &flags); 4012 4013 if (pi) 4014 rt_mutex_adjust_pi(p); 4015 4016 /* 4017 * Run balance callbacks after we've adjusted the PI chain. 4018 */ 4019 balance_callback(rq); 4020 preempt_enable(); 4021 4022 return 0; 4023 } 4024 4025 static int _sched_setscheduler(struct task_struct *p, int policy, 4026 const struct sched_param *param, bool check) 4027 { 4028 struct sched_attr attr = { 4029 .sched_policy = policy, 4030 .sched_priority = param->sched_priority, 4031 .sched_nice = PRIO_TO_NICE(p->static_prio), 4032 }; 4033 4034 /* Fixup the legacy SCHED_RESET_ON_FORK hack. */ 4035 if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) { 4036 attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; 4037 policy &= ~SCHED_RESET_ON_FORK; 4038 attr.sched_policy = policy; 4039 } 4040 4041 return __sched_setscheduler(p, &attr, check, true); 4042 } 4043 /** 4044 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread. 4045 * @p: the task in question. 4046 * @policy: new policy. 4047 * @param: structure containing the new RT priority. 4048 * 4049 * Return: 0 on success. An error code otherwise. 4050 * 4051 * NOTE that the task may be already dead. 4052 */ 4053 int sched_setscheduler(struct task_struct *p, int policy, 4054 const struct sched_param *param) 4055 { 4056 return _sched_setscheduler(p, policy, param, true); 4057 } 4058 EXPORT_SYMBOL_GPL(sched_setscheduler); 4059 4060 int sched_setattr(struct task_struct *p, const struct sched_attr *attr) 4061 { 4062 return __sched_setscheduler(p, attr, true, true); 4063 } 4064 EXPORT_SYMBOL_GPL(sched_setattr); 4065 4066 /** 4067 * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace. 4068 * @p: the task in question. 4069 * @policy: new policy. 4070 * @param: structure containing the new RT priority. 4071 * 4072 * Just like sched_setscheduler, only don't bother checking if the 4073 * current context has permission. For example, this is needed in 4074 * stop_machine(): we create temporary high priority worker threads, 4075 * but our caller might not have that capability. 4076 * 4077 * Return: 0 on success. An error code otherwise. 4078 */ 4079 int sched_setscheduler_nocheck(struct task_struct *p, int policy, 4080 const struct sched_param *param) 4081 { 4082 return _sched_setscheduler(p, policy, param, false); 4083 } 4084 EXPORT_SYMBOL_GPL(sched_setscheduler_nocheck); 4085 4086 static int 4087 do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) 4088 { 4089 struct sched_param lparam; 4090 struct task_struct *p; 4091 int retval; 4092 4093 if (!param || pid < 0) 4094 return -EINVAL; 4095 if (copy_from_user(&lparam, param, sizeof(struct sched_param))) 4096 return -EFAULT; 4097 4098 rcu_read_lock(); 4099 retval = -ESRCH; 4100 p = find_process_by_pid(pid); 4101 if (p != NULL) 4102 retval = sched_setscheduler(p, policy, &lparam); 4103 rcu_read_unlock(); 4104 4105 return retval; 4106 } 4107 4108 /* 4109 * Mimics kernel/events/core.c perf_copy_attr(). 4110 */ 4111 static int sched_copy_attr(struct sched_attr __user *uattr, 4112 struct sched_attr *attr) 4113 { 4114 u32 size; 4115 int ret; 4116 4117 if (!access_ok(VERIFY_WRITE, uattr, SCHED_ATTR_SIZE_VER0)) 4118 return -EFAULT; 4119 4120 /* 4121 * zero the full structure, so that a short copy will be nice. 4122 */ 4123 memset(attr, 0, sizeof(*attr)); 4124 4125 ret = get_user(size, &uattr->size); 4126 if (ret) 4127 return ret; 4128 4129 if (size > PAGE_SIZE) /* silly large */ 4130 goto err_size; 4131 4132 if (!size) /* abi compat */ 4133 size = SCHED_ATTR_SIZE_VER0; 4134 4135 if (size < SCHED_ATTR_SIZE_VER0) 4136 goto err_size; 4137 4138 /* 4139 * If we're handed a bigger struct than we know of, 4140 * ensure all the unknown bits are 0 - i.e. new 4141 * user-space does not rely on any kernel feature 4142 * extensions we dont know about yet. 4143 */ 4144 if (size > sizeof(*attr)) { 4145 unsigned char __user *addr; 4146 unsigned char __user *end; 4147 unsigned char val; 4148 4149 addr = (void __user *)uattr + sizeof(*attr); 4150 end = (void __user *)uattr + size; 4151 4152 for (; addr < end; addr++) { 4153 ret = get_user(val, addr); 4154 if (ret) 4155 return ret; 4156 if (val) 4157 goto err_size; 4158 } 4159 size = sizeof(*attr); 4160 } 4161 4162 ret = copy_from_user(attr, uattr, size); 4163 if (ret) 4164 return -EFAULT; 4165 4166 /* 4167 * XXX: do we want to be lenient like existing syscalls; or do we want 4168 * to be strict and return an error on out-of-bounds values? 4169 */ 4170 attr->sched_nice = clamp(attr->sched_nice, MIN_NICE, MAX_NICE); 4171 4172 return 0; 4173 4174 err_size: 4175 put_user(sizeof(*attr), &uattr->size); 4176 return -E2BIG; 4177 } 4178 4179 /** 4180 * sys_sched_setscheduler - set/change the scheduler policy and RT priority 4181 * @pid: the pid in question. 4182 * @policy: new policy. 4183 * @param: structure containing the new RT priority. 4184 * 4185 * Return: 0 on success. An error code otherwise. 4186 */ 4187 SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, 4188 struct sched_param __user *, param) 4189 { 4190 /* negative values for policy are not valid */ 4191 if (policy < 0) 4192 return -EINVAL; 4193 4194 return do_sched_setscheduler(pid, policy, param); 4195 } 4196 4197 /** 4198 * sys_sched_setparam - set/change the RT priority of a thread 4199 * @pid: the pid in question. 4200 * @param: structure containing the new RT priority. 4201 * 4202 * Return: 0 on success. An error code otherwise. 4203 */ 4204 SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) 4205 { 4206 return do_sched_setscheduler(pid, SETPARAM_POLICY, param); 4207 } 4208 4209 /** 4210 * sys_sched_setattr - same as above, but with extended sched_attr 4211 * @pid: the pid in question. 4212 * @uattr: structure containing the extended parameters. 4213 * @flags: for future extension. 4214 */ 4215 SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr, 4216 unsigned int, flags) 4217 { 4218 struct sched_attr attr; 4219 struct task_struct *p; 4220 int retval; 4221 4222 if (!uattr || pid < 0 || flags) 4223 return -EINVAL; 4224 4225 retval = sched_copy_attr(uattr, &attr); 4226 if (retval) 4227 return retval; 4228 4229 if ((int)attr.sched_policy < 0) 4230 return -EINVAL; 4231 4232 rcu_read_lock(); 4233 retval = -ESRCH; 4234 p = find_process_by_pid(pid); 4235 if (p != NULL) 4236 retval = sched_setattr(p, &attr); 4237 rcu_read_unlock(); 4238 4239 return retval; 4240 } 4241 4242 /** 4243 * sys_sched_getscheduler - get the policy (scheduling class) of a thread 4244 * @pid: the pid in question. 4245 * 4246 * Return: On success, the policy of the thread. Otherwise, a negative error 4247 * code. 4248 */ 4249 SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) 4250 { 4251 struct task_struct *p; 4252 int retval; 4253 4254 if (pid < 0) 4255 return -EINVAL; 4256 4257 retval = -ESRCH; 4258 rcu_read_lock(); 4259 p = find_process_by_pid(pid); 4260 if (p) { 4261 retval = security_task_getscheduler(p); 4262 if (!retval) 4263 retval = p->policy 4264 | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0); 4265 } 4266 rcu_read_unlock(); 4267 return retval; 4268 } 4269 4270 /** 4271 * sys_sched_getparam - get the RT priority of a thread 4272 * @pid: the pid in question. 4273 * @param: structure containing the RT priority. 4274 * 4275 * Return: On success, 0 and the RT priority is in @param. Otherwise, an error 4276 * code. 4277 */ 4278 SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) 4279 { 4280 struct sched_param lp = { .sched_priority = 0 }; 4281 struct task_struct *p; 4282 int retval; 4283 4284 if (!param || pid < 0) 4285 return -EINVAL; 4286 4287 rcu_read_lock(); 4288 p = find_process_by_pid(pid); 4289 retval = -ESRCH; 4290 if (!p) 4291 goto out_unlock; 4292 4293 retval = security_task_getscheduler(p); 4294 if (retval) 4295 goto out_unlock; 4296 4297 if (task_has_rt_policy(p)) 4298 lp.sched_priority = p->rt_priority; 4299 rcu_read_unlock(); 4300 4301 /* 4302 * This one might sleep, we cannot do it with a spinlock held ... 4303 */ 4304 retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0; 4305 4306 return retval; 4307 4308 out_unlock: 4309 rcu_read_unlock(); 4310 return retval; 4311 } 4312 4313 static int sched_read_attr(struct sched_attr __user *uattr, 4314 struct sched_attr *attr, 4315 unsigned int usize) 4316 { 4317 int ret; 4318 4319 if (!access_ok(VERIFY_WRITE, uattr, usize)) 4320 return -EFAULT; 4321 4322 /* 4323 * If we're handed a smaller struct than we know of, 4324 * ensure all the unknown bits are 0 - i.e. old 4325 * user-space does not get uncomplete information. 4326 */ 4327 if (usize < sizeof(*attr)) { 4328 unsigned char *addr; 4329 unsigned char *end; 4330 4331 addr = (void *)attr + usize; 4332 end = (void *)attr + sizeof(*attr); 4333 4334 for (; addr < end; addr++) { 4335 if (*addr) 4336 return -EFBIG; 4337 } 4338 4339 attr->size = usize; 4340 } 4341 4342 ret = copy_to_user(uattr, attr, attr->size); 4343 if (ret) 4344 return -EFAULT; 4345 4346 return 0; 4347 } 4348 4349 /** 4350 * sys_sched_getattr - similar to sched_getparam, but with sched_attr 4351 * @pid: the pid in question. 4352 * @uattr: structure containing the extended parameters. 4353 * @size: sizeof(attr) for fwd/bwd comp. 4354 * @flags: for future extension. 4355 */ 4356 SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, 4357 unsigned int, size, unsigned int, flags) 4358 { 4359 struct sched_attr attr = { 4360 .size = sizeof(struct sched_attr), 4361 }; 4362 struct task_struct *p; 4363 int retval; 4364 4365 if (!uattr || pid < 0 || size > PAGE_SIZE || 4366 size < SCHED_ATTR_SIZE_VER0 || flags) 4367 return -EINVAL; 4368 4369 rcu_read_lock(); 4370 p = find_process_by_pid(pid); 4371 retval = -ESRCH; 4372 if (!p) 4373 goto out_unlock; 4374 4375 retval = security_task_getscheduler(p); 4376 if (retval) 4377 goto out_unlock; 4378 4379 attr.sched_policy = p->policy; 4380 if (p->sched_reset_on_fork) 4381 attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; 4382 if (task_has_dl_policy(p)) 4383 __getparam_dl(p, &attr); 4384 else if (task_has_rt_policy(p)) 4385 attr.sched_priority = p->rt_priority; 4386 else 4387 attr.sched_nice = task_nice(p); 4388 4389 rcu_read_unlock(); 4390 4391 retval = sched_read_attr(uattr, &attr, size); 4392 return retval; 4393 4394 out_unlock: 4395 rcu_read_unlock(); 4396 return retval; 4397 } 4398 4399 long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) 4400 { 4401 cpumask_var_t cpus_allowed, new_mask; 4402 struct task_struct *p; 4403 int retval; 4404 4405 rcu_read_lock(); 4406 4407 p = find_process_by_pid(pid); 4408 if (!p) { 4409 rcu_read_unlock(); 4410 return -ESRCH; 4411 } 4412 4413 /* Prevent p going away */ 4414 get_task_struct(p); 4415 rcu_read_unlock(); 4416 4417 if (p->flags & PF_NO_SETAFFINITY) { 4418 retval = -EINVAL; 4419 goto out_put_task; 4420 } 4421 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) { 4422 retval = -ENOMEM; 4423 goto out_put_task; 4424 } 4425 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) { 4426 retval = -ENOMEM; 4427 goto out_free_cpus_allowed; 4428 } 4429 retval = -EPERM; 4430 if (!check_same_owner(p)) { 4431 rcu_read_lock(); 4432 if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) { 4433 rcu_read_unlock(); 4434 goto out_free_new_mask; 4435 } 4436 rcu_read_unlock(); 4437 } 4438 4439 retval = security_task_setscheduler(p); 4440 if (retval) 4441 goto out_free_new_mask; 4442 4443 4444 cpuset_cpus_allowed(p, cpus_allowed); 4445 cpumask_and(new_mask, in_mask, cpus_allowed); 4446 4447 /* 4448 * Since bandwidth control happens on root_domain basis, 4449 * if admission test is enabled, we only admit -deadline 4450 * tasks allowed to run on all the CPUs in the task's 4451 * root_domain. 4452 */ 4453 #ifdef CONFIG_SMP 4454 if (task_has_dl_policy(p) && dl_bandwidth_enabled()) { 4455 rcu_read_lock(); 4456 if (!cpumask_subset(task_rq(p)->rd->span, new_mask)) { 4457 retval = -EBUSY; 4458 rcu_read_unlock(); 4459 goto out_free_new_mask; 4460 } 4461 rcu_read_unlock(); 4462 } 4463 #endif 4464 again: 4465 retval = __set_cpus_allowed_ptr(p, new_mask, true); 4466 4467 if (!retval) { 4468 cpuset_cpus_allowed(p, cpus_allowed); 4469 if (!cpumask_subset(new_mask, cpus_allowed)) { 4470 /* 4471 * We must have raced with a concurrent cpuset 4472 * update. Just reset the cpus_allowed to the 4473 * cpuset's cpus_allowed 4474 */ 4475 cpumask_copy(new_mask, cpus_allowed); 4476 goto again; 4477 } 4478 } 4479 out_free_new_mask: 4480 free_cpumask_var(new_mask); 4481 out_free_cpus_allowed: 4482 free_cpumask_var(cpus_allowed); 4483 out_put_task: 4484 put_task_struct(p); 4485 return retval; 4486 } 4487 4488 static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, 4489 struct cpumask *new_mask) 4490 { 4491 if (len < cpumask_size()) 4492 cpumask_clear(new_mask); 4493 else if (len > cpumask_size()) 4494 len = cpumask_size(); 4495 4496 return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0; 4497 } 4498 4499 /** 4500 * sys_sched_setaffinity - set the cpu affinity of a process 4501 * @pid: pid of the process 4502 * @len: length in bytes of the bitmask pointed to by user_mask_ptr 4503 * @user_mask_ptr: user-space pointer to the new cpu mask 4504 * 4505 * Return: 0 on success. An error code otherwise. 4506 */ 4507 SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len, 4508 unsigned long __user *, user_mask_ptr) 4509 { 4510 cpumask_var_t new_mask; 4511 int retval; 4512 4513 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) 4514 return -ENOMEM; 4515 4516 retval = get_user_cpu_mask(user_mask_ptr, len, new_mask); 4517 if (retval == 0) 4518 retval = sched_setaffinity(pid, new_mask); 4519 free_cpumask_var(new_mask); 4520 return retval; 4521 } 4522 4523 long sched_getaffinity(pid_t pid, struct cpumask *mask) 4524 { 4525 struct task_struct *p; 4526 unsigned long flags; 4527 int retval; 4528 4529 rcu_read_lock(); 4530 4531 retval = -ESRCH; 4532 p = find_process_by_pid(pid); 4533 if (!p) 4534 goto out_unlock; 4535 4536 retval = security_task_getscheduler(p); 4537 if (retval) 4538 goto out_unlock; 4539 4540 raw_spin_lock_irqsave(&p->pi_lock, flags); 4541 cpumask_and(mask, &p->cpus_allowed, cpu_active_mask); 4542 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 4543 4544 out_unlock: 4545 rcu_read_unlock(); 4546 4547 return retval; 4548 } 4549 4550 /** 4551 * sys_sched_getaffinity - get the cpu affinity of a process 4552 * @pid: pid of the process 4553 * @len: length in bytes of the bitmask pointed to by user_mask_ptr 4554 * @user_mask_ptr: user-space pointer to hold the current cpu mask 4555 * 4556 * Return: 0 on success. An error code otherwise. 4557 */ 4558 SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, 4559 unsigned long __user *, user_mask_ptr) 4560 { 4561 int ret; 4562 cpumask_var_t mask; 4563 4564 if ((len * BITS_PER_BYTE) < nr_cpu_ids) 4565 return -EINVAL; 4566 if (len & (sizeof(unsigned long)-1)) 4567 return -EINVAL; 4568 4569 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) 4570 return -ENOMEM; 4571 4572 ret = sched_getaffinity(pid, mask); 4573 if (ret == 0) { 4574 size_t retlen = min_t(size_t, len, cpumask_size()); 4575 4576 if (copy_to_user(user_mask_ptr, mask, retlen)) 4577 ret = -EFAULT; 4578 else 4579 ret = retlen; 4580 } 4581 free_cpumask_var(mask); 4582 4583 return ret; 4584 } 4585 4586 /** 4587 * sys_sched_yield - yield the current processor to other threads. 4588 * 4589 * This function yields the current CPU to other tasks. If there are no 4590 * other threads running on this CPU then this function will return. 4591 * 4592 * Return: 0. 4593 */ 4594 SYSCALL_DEFINE0(sched_yield) 4595 { 4596 struct rq *rq = this_rq_lock(); 4597 4598 schedstat_inc(rq, yld_count); 4599 current->sched_class->yield_task(rq); 4600 4601 /* 4602 * Since we are going to call schedule() anyway, there's 4603 * no need to preempt or enable interrupts: 4604 */ 4605 __release(rq->lock); 4606 spin_release(&rq->lock.dep_map, 1, _THIS_IP_); 4607 do_raw_spin_unlock(&rq->lock); 4608 sched_preempt_enable_no_resched(); 4609 4610 schedule(); 4611 4612 return 0; 4613 } 4614 4615 int __sched _cond_resched(void) 4616 { 4617 if (should_resched(0)) { 4618 preempt_schedule_common(); 4619 return 1; 4620 } 4621 return 0; 4622 } 4623 EXPORT_SYMBOL(_cond_resched); 4624 4625 /* 4626 * __cond_resched_lock() - if a reschedule is pending, drop the given lock, 4627 * call schedule, and on return reacquire the lock. 4628 * 4629 * This works OK both with and without CONFIG_PREEMPT. We do strange low-level 4630 * operations here to prevent schedule() from being called twice (once via 4631 * spin_unlock(), once by hand). 4632 */ 4633 int __cond_resched_lock(spinlock_t *lock) 4634 { 4635 int resched = should_resched(PREEMPT_LOCK_OFFSET); 4636 int ret = 0; 4637 4638 lockdep_assert_held(lock); 4639 4640 if (spin_needbreak(lock) || resched) { 4641 spin_unlock(lock); 4642 if (resched) 4643 preempt_schedule_common(); 4644 else 4645 cpu_relax(); 4646 ret = 1; 4647 spin_lock(lock); 4648 } 4649 return ret; 4650 } 4651 EXPORT_SYMBOL(__cond_resched_lock); 4652 4653 int __sched __cond_resched_softirq(void) 4654 { 4655 BUG_ON(!in_softirq()); 4656 4657 if (should_resched(SOFTIRQ_DISABLE_OFFSET)) { 4658 local_bh_enable(); 4659 preempt_schedule_common(); 4660 local_bh_disable(); 4661 return 1; 4662 } 4663 return 0; 4664 } 4665 EXPORT_SYMBOL(__cond_resched_softirq); 4666 4667 /** 4668 * yield - yield the current processor to other threads. 4669 * 4670 * Do not ever use this function, there's a 99% chance you're doing it wrong. 4671 * 4672 * The scheduler is at all times free to pick the calling task as the most 4673 * eligible task to run, if removing the yield() call from your code breaks 4674 * it, its already broken. 4675 * 4676 * Typical broken usage is: 4677 * 4678 * while (!event) 4679 * yield(); 4680 * 4681 * where one assumes that yield() will let 'the other' process run that will 4682 * make event true. If the current task is a SCHED_FIFO task that will never 4683 * happen. Never use yield() as a progress guarantee!! 4684 * 4685 * If you want to use yield() to wait for something, use wait_event(). 4686 * If you want to use yield() to be 'nice' for others, use cond_resched(). 4687 * If you still want to use yield(), do not! 4688 */ 4689 void __sched yield(void) 4690 { 4691 set_current_state(TASK_RUNNING); 4692 sys_sched_yield(); 4693 } 4694 EXPORT_SYMBOL(yield); 4695 4696 /** 4697 * yield_to - yield the current processor to another thread in 4698 * your thread group, or accelerate that thread toward the 4699 * processor it's on. 4700 * @p: target task 4701 * @preempt: whether task preemption is allowed or not 4702 * 4703 * It's the caller's job to ensure that the target task struct 4704 * can't go away on us before we can do any checks. 4705 * 4706 * Return: 4707 * true (>0) if we indeed boosted the target task. 4708 * false (0) if we failed to boost the target. 4709 * -ESRCH if there's no task to yield to. 4710 */ 4711 int __sched yield_to(struct task_struct *p, bool preempt) 4712 { 4713 struct task_struct *curr = current; 4714 struct rq *rq, *p_rq; 4715 unsigned long flags; 4716 int yielded = 0; 4717 4718 local_irq_save(flags); 4719 rq = this_rq(); 4720 4721 again: 4722 p_rq = task_rq(p); 4723 /* 4724 * If we're the only runnable task on the rq and target rq also 4725 * has only one task, there's absolutely no point in yielding. 4726 */ 4727 if (rq->nr_running == 1 && p_rq->nr_running == 1) { 4728 yielded = -ESRCH; 4729 goto out_irq; 4730 } 4731 4732 double_rq_lock(rq, p_rq); 4733 if (task_rq(p) != p_rq) { 4734 double_rq_unlock(rq, p_rq); 4735 goto again; 4736 } 4737 4738 if (!curr->sched_class->yield_to_task) 4739 goto out_unlock; 4740 4741 if (curr->sched_class != p->sched_class) 4742 goto out_unlock; 4743 4744 if (task_running(p_rq, p) || p->state) 4745 goto out_unlock; 4746 4747 yielded = curr->sched_class->yield_to_task(rq, p, preempt); 4748 if (yielded) { 4749 schedstat_inc(rq, yld_count); 4750 /* 4751 * Make p's CPU reschedule; pick_next_entity takes care of 4752 * fairness. 4753 */ 4754 if (preempt && rq != p_rq) 4755 resched_curr(p_rq); 4756 } 4757 4758 out_unlock: 4759 double_rq_unlock(rq, p_rq); 4760 out_irq: 4761 local_irq_restore(flags); 4762 4763 if (yielded > 0) 4764 schedule(); 4765 4766 return yielded; 4767 } 4768 EXPORT_SYMBOL_GPL(yield_to); 4769 4770 /* 4771 * This task is about to go to sleep on IO. Increment rq->nr_iowait so 4772 * that process accounting knows that this is a task in IO wait state. 4773 */ 4774 long __sched io_schedule_timeout(long timeout) 4775 { 4776 int old_iowait = current->in_iowait; 4777 struct rq *rq; 4778 long ret; 4779 4780 current->in_iowait = 1; 4781 blk_schedule_flush_plug(current); 4782 4783 delayacct_blkio_start(); 4784 rq = raw_rq(); 4785 atomic_inc(&rq->nr_iowait); 4786 ret = schedule_timeout(timeout); 4787 current->in_iowait = old_iowait; 4788 atomic_dec(&rq->nr_iowait); 4789 delayacct_blkio_end(); 4790 4791 return ret; 4792 } 4793 EXPORT_SYMBOL(io_schedule_timeout); 4794 4795 /** 4796 * sys_sched_get_priority_max - return maximum RT priority. 4797 * @policy: scheduling class. 4798 * 4799 * Return: On success, this syscall returns the maximum 4800 * rt_priority that can be used by a given scheduling class. 4801 * On failure, a negative error code is returned. 4802 */ 4803 SYSCALL_DEFINE1(sched_get_priority_max, int, policy) 4804 { 4805 int ret = -EINVAL; 4806 4807 switch (policy) { 4808 case SCHED_FIFO: 4809 case SCHED_RR: 4810 ret = MAX_USER_RT_PRIO-1; 4811 break; 4812 case SCHED_DEADLINE: 4813 case SCHED_NORMAL: 4814 case SCHED_BATCH: 4815 case SCHED_IDLE: 4816 ret = 0; 4817 break; 4818 } 4819 return ret; 4820 } 4821 4822 /** 4823 * sys_sched_get_priority_min - return minimum RT priority. 4824 * @policy: scheduling class. 4825 * 4826 * Return: On success, this syscall returns the minimum 4827 * rt_priority that can be used by a given scheduling class. 4828 * On failure, a negative error code is returned. 4829 */ 4830 SYSCALL_DEFINE1(sched_get_priority_min, int, policy) 4831 { 4832 int ret = -EINVAL; 4833 4834 switch (policy) { 4835 case SCHED_FIFO: 4836 case SCHED_RR: 4837 ret = 1; 4838 break; 4839 case SCHED_DEADLINE: 4840 case SCHED_NORMAL: 4841 case SCHED_BATCH: 4842 case SCHED_IDLE: 4843 ret = 0; 4844 } 4845 return ret; 4846 } 4847 4848 /** 4849 * sys_sched_rr_get_interval - return the default timeslice of a process. 4850 * @pid: pid of the process. 4851 * @interval: userspace pointer to the timeslice value. 4852 * 4853 * this syscall writes the default timeslice value of a given process 4854 * into the user-space timespec buffer. A value of '0' means infinity. 4855 * 4856 * Return: On success, 0 and the timeslice is in @interval. Otherwise, 4857 * an error code. 4858 */ 4859 SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, 4860 struct timespec __user *, interval) 4861 { 4862 struct task_struct *p; 4863 unsigned int time_slice; 4864 unsigned long flags; 4865 struct rq *rq; 4866 int retval; 4867 struct timespec t; 4868 4869 if (pid < 0) 4870 return -EINVAL; 4871 4872 retval = -ESRCH; 4873 rcu_read_lock(); 4874 p = find_process_by_pid(pid); 4875 if (!p) 4876 goto out_unlock; 4877 4878 retval = security_task_getscheduler(p); 4879 if (retval) 4880 goto out_unlock; 4881 4882 rq = task_rq_lock(p, &flags); 4883 time_slice = 0; 4884 if (p->sched_class->get_rr_interval) 4885 time_slice = p->sched_class->get_rr_interval(rq, p); 4886 task_rq_unlock(rq, p, &flags); 4887 4888 rcu_read_unlock(); 4889 jiffies_to_timespec(time_slice, &t); 4890 retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0; 4891 return retval; 4892 4893 out_unlock: 4894 rcu_read_unlock(); 4895 return retval; 4896 } 4897 4898 static const char stat_nam[] = TASK_STATE_TO_CHAR_STR; 4899 4900 void sched_show_task(struct task_struct *p) 4901 { 4902 unsigned long free = 0; 4903 int ppid; 4904 unsigned long state = p->state; 4905 4906 if (state) 4907 state = __ffs(state) + 1; 4908 printk(KERN_INFO "%-15.15s %c", p->comm, 4909 state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?'); 4910 #if BITS_PER_LONG == 32 4911 if (state == TASK_RUNNING) 4912 printk(KERN_CONT " running "); 4913 else 4914 printk(KERN_CONT " %08lx ", thread_saved_pc(p)); 4915 #else 4916 if (state == TASK_RUNNING) 4917 printk(KERN_CONT " running task "); 4918 else 4919 printk(KERN_CONT " %016lx ", thread_saved_pc(p)); 4920 #endif 4921 #ifdef CONFIG_DEBUG_STACK_USAGE 4922 free = stack_not_used(p); 4923 #endif 4924 ppid = 0; 4925 rcu_read_lock(); 4926 if (pid_alive(p)) 4927 ppid = task_pid_nr(rcu_dereference(p->real_parent)); 4928 rcu_read_unlock(); 4929 printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free, 4930 task_pid_nr(p), ppid, 4931 (unsigned long)task_thread_info(p)->flags); 4932 4933 print_worker_info(KERN_INFO, p); 4934 show_stack(p, NULL); 4935 } 4936 4937 void show_state_filter(unsigned long state_filter) 4938 { 4939 struct task_struct *g, *p; 4940 4941 #if BITS_PER_LONG == 32 4942 printk(KERN_INFO 4943 " task PC stack pid father\n"); 4944 #else 4945 printk(KERN_INFO 4946 " task PC stack pid father\n"); 4947 #endif 4948 rcu_read_lock(); 4949 for_each_process_thread(g, p) { 4950 /* 4951 * reset the NMI-timeout, listing all files on a slow 4952 * console might take a lot of time: 4953 */ 4954 touch_nmi_watchdog(); 4955 if (!state_filter || (p->state & state_filter)) 4956 sched_show_task(p); 4957 } 4958 4959 touch_all_softlockup_watchdogs(); 4960 4961 #ifdef CONFIG_SCHED_DEBUG 4962 sysrq_sched_debug_show(); 4963 #endif 4964 rcu_read_unlock(); 4965 /* 4966 * Only show locks if all tasks are dumped: 4967 */ 4968 if (!state_filter) 4969 debug_show_all_locks(); 4970 } 4971 4972 void init_idle_bootup_task(struct task_struct *idle) 4973 { 4974 idle->sched_class = &idle_sched_class; 4975 } 4976 4977 /** 4978 * init_idle - set up an idle thread for a given CPU 4979 * @idle: task in question 4980 * @cpu: cpu the idle task belongs to 4981 * 4982 * NOTE: this function does not set the idle thread's NEED_RESCHED 4983 * flag, to make booting more robust. 4984 */ 4985 void init_idle(struct task_struct *idle, int cpu) 4986 { 4987 struct rq *rq = cpu_rq(cpu); 4988 unsigned long flags; 4989 4990 raw_spin_lock_irqsave(&idle->pi_lock, flags); 4991 raw_spin_lock(&rq->lock); 4992 4993 __sched_fork(0, idle); 4994 idle->state = TASK_RUNNING; 4995 idle->se.exec_start = sched_clock(); 4996 4997 #ifdef CONFIG_SMP 4998 /* 4999 * Its possible that init_idle() gets called multiple times on a task, 5000 * in that case do_set_cpus_allowed() will not do the right thing. 5001 * 5002 * And since this is boot we can forgo the serialization. 5003 */ 5004 set_cpus_allowed_common(idle, cpumask_of(cpu)); 5005 #endif 5006 /* 5007 * We're having a chicken and egg problem, even though we are 5008 * holding rq->lock, the cpu isn't yet set to this cpu so the 5009 * lockdep check in task_group() will fail. 5010 * 5011 * Similar case to sched_fork(). / Alternatively we could 5012 * use task_rq_lock() here and obtain the other rq->lock. 5013 * 5014 * Silence PROVE_RCU 5015 */ 5016 rcu_read_lock(); 5017 __set_task_cpu(idle, cpu); 5018 rcu_read_unlock(); 5019 5020 rq->curr = rq->idle = idle; 5021 idle->on_rq = TASK_ON_RQ_QUEUED; 5022 #ifdef CONFIG_SMP 5023 idle->on_cpu = 1; 5024 #endif 5025 raw_spin_unlock(&rq->lock); 5026 raw_spin_unlock_irqrestore(&idle->pi_lock, flags); 5027 5028 /* Set the preempt count _outside_ the spinlocks! */ 5029 init_idle_preempt_count(idle, cpu); 5030 5031 /* 5032 * The idle tasks have their own, simple scheduling class: 5033 */ 5034 idle->sched_class = &idle_sched_class; 5035 ftrace_graph_init_idle_task(idle, cpu); 5036 vtime_init_idle(idle, cpu); 5037 #ifdef CONFIG_SMP 5038 sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu); 5039 #endif 5040 } 5041 5042 int cpuset_cpumask_can_shrink(const struct cpumask *cur, 5043 const struct cpumask *trial) 5044 { 5045 int ret = 1, trial_cpus; 5046 struct dl_bw *cur_dl_b; 5047 unsigned long flags; 5048 5049 if (!cpumask_weight(cur)) 5050 return ret; 5051 5052 rcu_read_lock_sched(); 5053 cur_dl_b = dl_bw_of(cpumask_any(cur)); 5054 trial_cpus = cpumask_weight(trial); 5055 5056 raw_spin_lock_irqsave(&cur_dl_b->lock, flags); 5057 if (cur_dl_b->bw != -1 && 5058 cur_dl_b->bw * trial_cpus < cur_dl_b->total_bw) 5059 ret = 0; 5060 raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags); 5061 rcu_read_unlock_sched(); 5062 5063 return ret; 5064 } 5065 5066 int task_can_attach(struct task_struct *p, 5067 const struct cpumask *cs_cpus_allowed) 5068 { 5069 int ret = 0; 5070 5071 /* 5072 * Kthreads which disallow setaffinity shouldn't be moved 5073 * to a new cpuset; we don't want to change their cpu 5074 * affinity and isolating such threads by their set of 5075 * allowed nodes is unnecessary. Thus, cpusets are not 5076 * applicable for such threads. This prevents checking for 5077 * success of set_cpus_allowed_ptr() on all attached tasks 5078 * before cpus_allowed may be changed. 5079 */ 5080 if (p->flags & PF_NO_SETAFFINITY) { 5081 ret = -EINVAL; 5082 goto out; 5083 } 5084 5085 #ifdef CONFIG_SMP 5086 if (dl_task(p) && !cpumask_intersects(task_rq(p)->rd->span, 5087 cs_cpus_allowed)) { 5088 unsigned int dest_cpu = cpumask_any_and(cpu_active_mask, 5089 cs_cpus_allowed); 5090 struct dl_bw *dl_b; 5091 bool overflow; 5092 int cpus; 5093 unsigned long flags; 5094 5095 rcu_read_lock_sched(); 5096 dl_b = dl_bw_of(dest_cpu); 5097 raw_spin_lock_irqsave(&dl_b->lock, flags); 5098 cpus = dl_bw_cpus(dest_cpu); 5099 overflow = __dl_overflow(dl_b, cpus, 0, p->dl.dl_bw); 5100 if (overflow) 5101 ret = -EBUSY; 5102 else { 5103 /* 5104 * We reserve space for this task in the destination 5105 * root_domain, as we can't fail after this point. 5106 * We will free resources in the source root_domain 5107 * later on (see set_cpus_allowed_dl()). 5108 */ 5109 __dl_add(dl_b, p->dl.dl_bw); 5110 } 5111 raw_spin_unlock_irqrestore(&dl_b->lock, flags); 5112 rcu_read_unlock_sched(); 5113 5114 } 5115 #endif 5116 out: 5117 return ret; 5118 } 5119 5120 #ifdef CONFIG_SMP 5121 5122 #ifdef CONFIG_NUMA_BALANCING 5123 /* Migrate current task p to target_cpu */ 5124 int migrate_task_to(struct task_struct *p, int target_cpu) 5125 { 5126 struct migration_arg arg = { p, target_cpu }; 5127 int curr_cpu = task_cpu(p); 5128 5129 if (curr_cpu == target_cpu) 5130 return 0; 5131 5132 if (!cpumask_test_cpu(target_cpu, tsk_cpus_allowed(p))) 5133 return -EINVAL; 5134 5135 /* TODO: This is not properly updating schedstats */ 5136 5137 trace_sched_move_numa(p, curr_cpu, target_cpu); 5138 return stop_one_cpu(curr_cpu, migration_cpu_stop, &arg); 5139 } 5140 5141 /* 5142 * Requeue a task on a given node and accurately track the number of NUMA 5143 * tasks on the runqueues 5144 */ 5145 void sched_setnuma(struct task_struct *p, int nid) 5146 { 5147 struct rq *rq; 5148 unsigned long flags; 5149 bool queued, running; 5150 5151 rq = task_rq_lock(p, &flags); 5152 queued = task_on_rq_queued(p); 5153 running = task_current(rq, p); 5154 5155 if (queued) 5156 dequeue_task(rq, p, DEQUEUE_SAVE); 5157 if (running) 5158 put_prev_task(rq, p); 5159 5160 p->numa_preferred_nid = nid; 5161 5162 if (running) 5163 p->sched_class->set_curr_task(rq); 5164 if (queued) 5165 enqueue_task(rq, p, ENQUEUE_RESTORE); 5166 task_rq_unlock(rq, p, &flags); 5167 } 5168 #endif /* CONFIG_NUMA_BALANCING */ 5169 5170 #ifdef CONFIG_HOTPLUG_CPU 5171 /* 5172 * Ensures that the idle task is using init_mm right before its cpu goes 5173 * offline. 5174 */ 5175 void idle_task_exit(void) 5176 { 5177 struct mm_struct *mm = current->active_mm; 5178 5179 BUG_ON(cpu_online(smp_processor_id())); 5180 5181 if (mm != &init_mm) { 5182 switch_mm(mm, &init_mm, current); 5183 finish_arch_post_lock_switch(); 5184 } 5185 mmdrop(mm); 5186 } 5187 5188 /* 5189 * Since this CPU is going 'away' for a while, fold any nr_active delta 5190 * we might have. Assumes we're called after migrate_tasks() so that the 5191 * nr_active count is stable. 5192 * 5193 * Also see the comment "Global load-average calculations". 5194 */ 5195 static void calc_load_migrate(struct rq *rq) 5196 { 5197 long delta = calc_load_fold_active(rq); 5198 if (delta) 5199 atomic_long_add(delta, &calc_load_tasks); 5200 } 5201 5202 static void put_prev_task_fake(struct rq *rq, struct task_struct *prev) 5203 { 5204 } 5205 5206 static const struct sched_class fake_sched_class = { 5207 .put_prev_task = put_prev_task_fake, 5208 }; 5209 5210 static struct task_struct fake_task = { 5211 /* 5212 * Avoid pull_{rt,dl}_task() 5213 */ 5214 .prio = MAX_PRIO + 1, 5215 .sched_class = &fake_sched_class, 5216 }; 5217 5218 /* 5219 * Migrate all tasks from the rq, sleeping tasks will be migrated by 5220 * try_to_wake_up()->select_task_rq(). 5221 * 5222 * Called with rq->lock held even though we'er in stop_machine() and 5223 * there's no concurrency possible, we hold the required locks anyway 5224 * because of lock validation efforts. 5225 */ 5226 static void migrate_tasks(struct rq *dead_rq) 5227 { 5228 struct rq *rq = dead_rq; 5229 struct task_struct *next, *stop = rq->stop; 5230 int dest_cpu; 5231 5232 /* 5233 * Fudge the rq selection such that the below task selection loop 5234 * doesn't get stuck on the currently eligible stop task. 5235 * 5236 * We're currently inside stop_machine() and the rq is either stuck 5237 * in the stop_machine_cpu_stop() loop, or we're executing this code, 5238 * either way we should never end up calling schedule() until we're 5239 * done here. 5240 */ 5241 rq->stop = NULL; 5242 5243 /* 5244 * put_prev_task() and pick_next_task() sched 5245 * class method both need to have an up-to-date 5246 * value of rq->clock[_task] 5247 */ 5248 update_rq_clock(rq); 5249 5250 for (;;) { 5251 /* 5252 * There's this thread running, bail when that's the only 5253 * remaining thread. 5254 */ 5255 if (rq->nr_running == 1) 5256 break; 5257 5258 /* 5259 * pick_next_task assumes pinned rq->lock. 5260 */ 5261 lockdep_pin_lock(&rq->lock); 5262 next = pick_next_task(rq, &fake_task); 5263 BUG_ON(!next); 5264 next->sched_class->put_prev_task(rq, next); 5265 5266 /* 5267 * Rules for changing task_struct::cpus_allowed are holding 5268 * both pi_lock and rq->lock, such that holding either 5269 * stabilizes the mask. 5270 * 5271 * Drop rq->lock is not quite as disastrous as it usually is 5272 * because !cpu_active at this point, which means load-balance 5273 * will not interfere. Also, stop-machine. 5274 */ 5275 lockdep_unpin_lock(&rq->lock); 5276 raw_spin_unlock(&rq->lock); 5277 raw_spin_lock(&next->pi_lock); 5278 raw_spin_lock(&rq->lock); 5279 5280 /* 5281 * Since we're inside stop-machine, _nothing_ should have 5282 * changed the task, WARN if weird stuff happened, because in 5283 * that case the above rq->lock drop is a fail too. 5284 */ 5285 if (WARN_ON(task_rq(next) != rq || !task_on_rq_queued(next))) { 5286 raw_spin_unlock(&next->pi_lock); 5287 continue; 5288 } 5289 5290 /* Find suitable destination for @next, with force if needed. */ 5291 dest_cpu = select_fallback_rq(dead_rq->cpu, next); 5292 5293 rq = __migrate_task(rq, next, dest_cpu); 5294 if (rq != dead_rq) { 5295 raw_spin_unlock(&rq->lock); 5296 rq = dead_rq; 5297 raw_spin_lock(&rq->lock); 5298 } 5299 raw_spin_unlock(&next->pi_lock); 5300 } 5301 5302 rq->stop = stop; 5303 } 5304 #endif /* CONFIG_HOTPLUG_CPU */ 5305 5306 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL) 5307 5308 static struct ctl_table sd_ctl_dir[] = { 5309 { 5310 .procname = "sched_domain", 5311 .mode = 0555, 5312 }, 5313 {} 5314 }; 5315 5316 static struct ctl_table sd_ctl_root[] = { 5317 { 5318 .procname = "kernel", 5319 .mode = 0555, 5320 .child = sd_ctl_dir, 5321 }, 5322 {} 5323 }; 5324 5325 static struct ctl_table *sd_alloc_ctl_entry(int n) 5326 { 5327 struct ctl_table *entry = 5328 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL); 5329 5330 return entry; 5331 } 5332 5333 static void sd_free_ctl_entry(struct ctl_table **tablep) 5334 { 5335 struct ctl_table *entry; 5336 5337 /* 5338 * In the intermediate directories, both the child directory and 5339 * procname are dynamically allocated and could fail but the mode 5340 * will always be set. In the lowest directory the names are 5341 * static strings and all have proc handlers. 5342 */ 5343 for (entry = *tablep; entry->mode; entry++) { 5344 if (entry->child) 5345 sd_free_ctl_entry(&entry->child); 5346 if (entry->proc_handler == NULL) 5347 kfree(entry->procname); 5348 } 5349 5350 kfree(*tablep); 5351 *tablep = NULL; 5352 } 5353 5354 static int min_load_idx = 0; 5355 static int max_load_idx = CPU_LOAD_IDX_MAX-1; 5356 5357 static void 5358 set_table_entry(struct ctl_table *entry, 5359 const char *procname, void *data, int maxlen, 5360 umode_t mode, proc_handler *proc_handler, 5361 bool load_idx) 5362 { 5363 entry->procname = procname; 5364 entry->data = data; 5365 entry->maxlen = maxlen; 5366 entry->mode = mode; 5367 entry->proc_handler = proc_handler; 5368 5369 if (load_idx) { 5370 entry->extra1 = &min_load_idx; 5371 entry->extra2 = &max_load_idx; 5372 } 5373 } 5374 5375 static struct ctl_table * 5376 sd_alloc_ctl_domain_table(struct sched_domain *sd) 5377 { 5378 struct ctl_table *table = sd_alloc_ctl_entry(14); 5379 5380 if (table == NULL) 5381 return NULL; 5382 5383 set_table_entry(&table[0], "min_interval", &sd->min_interval, 5384 sizeof(long), 0644, proc_doulongvec_minmax, false); 5385 set_table_entry(&table[1], "max_interval", &sd->max_interval, 5386 sizeof(long), 0644, proc_doulongvec_minmax, false); 5387 set_table_entry(&table[2], "busy_idx", &sd->busy_idx, 5388 sizeof(int), 0644, proc_dointvec_minmax, true); 5389 set_table_entry(&table[3], "idle_idx", &sd->idle_idx, 5390 sizeof(int), 0644, proc_dointvec_minmax, true); 5391 set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx, 5392 sizeof(int), 0644, proc_dointvec_minmax, true); 5393 set_table_entry(&table[5], "wake_idx", &sd->wake_idx, 5394 sizeof(int), 0644, proc_dointvec_minmax, true); 5395 set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx, 5396 sizeof(int), 0644, proc_dointvec_minmax, true); 5397 set_table_entry(&table[7], "busy_factor", &sd->busy_factor, 5398 sizeof(int), 0644, proc_dointvec_minmax, false); 5399 set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct, 5400 sizeof(int), 0644, proc_dointvec_minmax, false); 5401 set_table_entry(&table[9], "cache_nice_tries", 5402 &sd->cache_nice_tries, 5403 sizeof(int), 0644, proc_dointvec_minmax, false); 5404 set_table_entry(&table[10], "flags", &sd->flags, 5405 sizeof(int), 0644, proc_dointvec_minmax, false); 5406 set_table_entry(&table[11], "max_newidle_lb_cost", 5407 &sd->max_newidle_lb_cost, 5408 sizeof(long), 0644, proc_doulongvec_minmax, false); 5409 set_table_entry(&table[12], "name", sd->name, 5410 CORENAME_MAX_SIZE, 0444, proc_dostring, false); 5411 /* &table[13] is terminator */ 5412 5413 return table; 5414 } 5415 5416 static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu) 5417 { 5418 struct ctl_table *entry, *table; 5419 struct sched_domain *sd; 5420 int domain_num = 0, i; 5421 char buf[32]; 5422 5423 for_each_domain(cpu, sd) 5424 domain_num++; 5425 entry = table = sd_alloc_ctl_entry(domain_num + 1); 5426 if (table == NULL) 5427 return NULL; 5428 5429 i = 0; 5430 for_each_domain(cpu, sd) { 5431 snprintf(buf, 32, "domain%d", i); 5432 entry->procname = kstrdup(buf, GFP_KERNEL); 5433 entry->mode = 0555; 5434 entry->child = sd_alloc_ctl_domain_table(sd); 5435 entry++; 5436 i++; 5437 } 5438 return table; 5439 } 5440 5441 static struct ctl_table_header *sd_sysctl_header; 5442 static void register_sched_domain_sysctl(void) 5443 { 5444 int i, cpu_num = num_possible_cpus(); 5445 struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1); 5446 char buf[32]; 5447 5448 WARN_ON(sd_ctl_dir[0].child); 5449 sd_ctl_dir[0].child = entry; 5450 5451 if (entry == NULL) 5452 return; 5453 5454 for_each_possible_cpu(i) { 5455 snprintf(buf, 32, "cpu%d", i); 5456 entry->procname = kstrdup(buf, GFP_KERNEL); 5457 entry->mode = 0555; 5458 entry->child = sd_alloc_ctl_cpu_table(i); 5459 entry++; 5460 } 5461 5462 WARN_ON(sd_sysctl_header); 5463 sd_sysctl_header = register_sysctl_table(sd_ctl_root); 5464 } 5465 5466 /* may be called multiple times per register */ 5467 static void unregister_sched_domain_sysctl(void) 5468 { 5469 unregister_sysctl_table(sd_sysctl_header); 5470 sd_sysctl_header = NULL; 5471 if (sd_ctl_dir[0].child) 5472 sd_free_ctl_entry(&sd_ctl_dir[0].child); 5473 } 5474 #else 5475 static void register_sched_domain_sysctl(void) 5476 { 5477 } 5478 static void unregister_sched_domain_sysctl(void) 5479 { 5480 } 5481 #endif /* CONFIG_SCHED_DEBUG && CONFIG_SYSCTL */ 5482 5483 static void set_rq_online(struct rq *rq) 5484 { 5485 if (!rq->online) { 5486 const struct sched_class *class; 5487 5488 cpumask_set_cpu(rq->cpu, rq->rd->online); 5489 rq->online = 1; 5490 5491 for_each_class(class) { 5492 if (class->rq_online) 5493 class->rq_online(rq); 5494 } 5495 } 5496 } 5497 5498 static void set_rq_offline(struct rq *rq) 5499 { 5500 if (rq->online) { 5501 const struct sched_class *class; 5502 5503 for_each_class(class) { 5504 if (class->rq_offline) 5505 class->rq_offline(rq); 5506 } 5507 5508 cpumask_clear_cpu(rq->cpu, rq->rd->online); 5509 rq->online = 0; 5510 } 5511 } 5512 5513 /* 5514 * migration_call - callback that gets triggered when a CPU is added. 5515 * Here we can start up the necessary migration thread for the new CPU. 5516 */ 5517 static int 5518 migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) 5519 { 5520 int cpu = (long)hcpu; 5521 unsigned long flags; 5522 struct rq *rq = cpu_rq(cpu); 5523 5524 switch (action & ~CPU_TASKS_FROZEN) { 5525 5526 case CPU_UP_PREPARE: 5527 rq->calc_load_update = calc_load_update; 5528 break; 5529 5530 case CPU_ONLINE: 5531 /* Update our root-domain */ 5532 raw_spin_lock_irqsave(&rq->lock, flags); 5533 if (rq->rd) { 5534 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 5535 5536 set_rq_online(rq); 5537 } 5538 raw_spin_unlock_irqrestore(&rq->lock, flags); 5539 break; 5540 5541 #ifdef CONFIG_HOTPLUG_CPU 5542 case CPU_DYING: 5543 sched_ttwu_pending(); 5544 /* Update our root-domain */ 5545 raw_spin_lock_irqsave(&rq->lock, flags); 5546 if (rq->rd) { 5547 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 5548 set_rq_offline(rq); 5549 } 5550 migrate_tasks(rq); 5551 BUG_ON(rq->nr_running != 1); /* the migration thread */ 5552 raw_spin_unlock_irqrestore(&rq->lock, flags); 5553 break; 5554 5555 case CPU_DEAD: 5556 calc_load_migrate(rq); 5557 break; 5558 #endif 5559 } 5560 5561 update_max_interval(); 5562 5563 return NOTIFY_OK; 5564 } 5565 5566 /* 5567 * Register at high priority so that task migration (migrate_all_tasks) 5568 * happens before everything else. This has to be lower priority than 5569 * the notifier in the perf_event subsystem, though. 5570 */ 5571 static struct notifier_block migration_notifier = { 5572 .notifier_call = migration_call, 5573 .priority = CPU_PRI_MIGRATION, 5574 }; 5575 5576 static void set_cpu_rq_start_time(void) 5577 { 5578 int cpu = smp_processor_id(); 5579 struct rq *rq = cpu_rq(cpu); 5580 rq->age_stamp = sched_clock_cpu(cpu); 5581 } 5582 5583 static int sched_cpu_active(struct notifier_block *nfb, 5584 unsigned long action, void *hcpu) 5585 { 5586 int cpu = (long)hcpu; 5587 5588 switch (action & ~CPU_TASKS_FROZEN) { 5589 case CPU_STARTING: 5590 set_cpu_rq_start_time(); 5591 return NOTIFY_OK; 5592 5593 case CPU_ONLINE: 5594 /* 5595 * At this point a starting CPU has marked itself as online via 5596 * set_cpu_online(). But it might not yet have marked itself 5597 * as active, which is essential from here on. 5598 */ 5599 set_cpu_active(cpu, true); 5600 stop_machine_unpark(cpu); 5601 return NOTIFY_OK; 5602 5603 case CPU_DOWN_FAILED: 5604 set_cpu_active(cpu, true); 5605 return NOTIFY_OK; 5606 5607 default: 5608 return NOTIFY_DONE; 5609 } 5610 } 5611 5612 static int sched_cpu_inactive(struct notifier_block *nfb, 5613 unsigned long action, void *hcpu) 5614 { 5615 switch (action & ~CPU_TASKS_FROZEN) { 5616 case CPU_DOWN_PREPARE: 5617 set_cpu_active((long)hcpu, false); 5618 return NOTIFY_OK; 5619 default: 5620 return NOTIFY_DONE; 5621 } 5622 } 5623 5624 static int __init migration_init(void) 5625 { 5626 void *cpu = (void *)(long)smp_processor_id(); 5627 int err; 5628 5629 /* Initialize migration for the boot CPU */ 5630 err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu); 5631 BUG_ON(err == NOTIFY_BAD); 5632 migration_call(&migration_notifier, CPU_ONLINE, cpu); 5633 register_cpu_notifier(&migration_notifier); 5634 5635 /* Register cpu active notifiers */ 5636 cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE); 5637 cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE); 5638 5639 return 0; 5640 } 5641 early_initcall(migration_init); 5642 5643 static cpumask_var_t sched_domains_tmpmask; /* sched_domains_mutex */ 5644 5645 #ifdef CONFIG_SCHED_DEBUG 5646 5647 static __read_mostly int sched_debug_enabled; 5648 5649 static int __init sched_debug_setup(char *str) 5650 { 5651 sched_debug_enabled = 1; 5652 5653 return 0; 5654 } 5655 early_param("sched_debug", sched_debug_setup); 5656 5657 static inline bool sched_debug(void) 5658 { 5659 return sched_debug_enabled; 5660 } 5661 5662 static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, 5663 struct cpumask *groupmask) 5664 { 5665 struct sched_group *group = sd->groups; 5666 5667 cpumask_clear(groupmask); 5668 5669 printk(KERN_DEBUG "%*s domain %d: ", level, "", level); 5670 5671 if (!(sd->flags & SD_LOAD_BALANCE)) { 5672 printk("does not load-balance\n"); 5673 if (sd->parent) 5674 printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain" 5675 " has parent"); 5676 return -1; 5677 } 5678 5679 printk(KERN_CONT "span %*pbl level %s\n", 5680 cpumask_pr_args(sched_domain_span(sd)), sd->name); 5681 5682 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) { 5683 printk(KERN_ERR "ERROR: domain->span does not contain " 5684 "CPU%d\n", cpu); 5685 } 5686 if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) { 5687 printk(KERN_ERR "ERROR: domain->groups does not contain" 5688 " CPU%d\n", cpu); 5689 } 5690 5691 printk(KERN_DEBUG "%*s groups:", level + 1, ""); 5692 do { 5693 if (!group) { 5694 printk("\n"); 5695 printk(KERN_ERR "ERROR: group is NULL\n"); 5696 break; 5697 } 5698 5699 if (!cpumask_weight(sched_group_cpus(group))) { 5700 printk(KERN_CONT "\n"); 5701 printk(KERN_ERR "ERROR: empty group\n"); 5702 break; 5703 } 5704 5705 if (!(sd->flags & SD_OVERLAP) && 5706 cpumask_intersects(groupmask, sched_group_cpus(group))) { 5707 printk(KERN_CONT "\n"); 5708 printk(KERN_ERR "ERROR: repeated CPUs\n"); 5709 break; 5710 } 5711 5712 cpumask_or(groupmask, groupmask, sched_group_cpus(group)); 5713 5714 printk(KERN_CONT " %*pbl", 5715 cpumask_pr_args(sched_group_cpus(group))); 5716 if (group->sgc->capacity != SCHED_CAPACITY_SCALE) { 5717 printk(KERN_CONT " (cpu_capacity = %d)", 5718 group->sgc->capacity); 5719 } 5720 5721 group = group->next; 5722 } while (group != sd->groups); 5723 printk(KERN_CONT "\n"); 5724 5725 if (!cpumask_equal(sched_domain_span(sd), groupmask)) 5726 printk(KERN_ERR "ERROR: groups don't span domain->span\n"); 5727 5728 if (sd->parent && 5729 !cpumask_subset(groupmask, sched_domain_span(sd->parent))) 5730 printk(KERN_ERR "ERROR: parent span is not a superset " 5731 "of domain->span\n"); 5732 return 0; 5733 } 5734 5735 static void sched_domain_debug(struct sched_domain *sd, int cpu) 5736 { 5737 int level = 0; 5738 5739 if (!sched_debug_enabled) 5740 return; 5741 5742 if (!sd) { 5743 printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu); 5744 return; 5745 } 5746 5747 printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu); 5748 5749 for (;;) { 5750 if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask)) 5751 break; 5752 level++; 5753 sd = sd->parent; 5754 if (!sd) 5755 break; 5756 } 5757 } 5758 #else /* !CONFIG_SCHED_DEBUG */ 5759 # define sched_domain_debug(sd, cpu) do { } while (0) 5760 static inline bool sched_debug(void) 5761 { 5762 return false; 5763 } 5764 #endif /* CONFIG_SCHED_DEBUG */ 5765 5766 static int sd_degenerate(struct sched_domain *sd) 5767 { 5768 if (cpumask_weight(sched_domain_span(sd)) == 1) 5769 return 1; 5770 5771 /* Following flags need at least 2 groups */ 5772 if (sd->flags & (SD_LOAD_BALANCE | 5773 SD_BALANCE_NEWIDLE | 5774 SD_BALANCE_FORK | 5775 SD_BALANCE_EXEC | 5776 SD_SHARE_CPUCAPACITY | 5777 SD_SHARE_PKG_RESOURCES | 5778 SD_SHARE_POWERDOMAIN)) { 5779 if (sd->groups != sd->groups->next) 5780 return 0; 5781 } 5782 5783 /* Following flags don't use groups */ 5784 if (sd->flags & (SD_WAKE_AFFINE)) 5785 return 0; 5786 5787 return 1; 5788 } 5789 5790 static int 5791 sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) 5792 { 5793 unsigned long cflags = sd->flags, pflags = parent->flags; 5794 5795 if (sd_degenerate(parent)) 5796 return 1; 5797 5798 if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent))) 5799 return 0; 5800 5801 /* Flags needing groups don't count if only 1 group in parent */ 5802 if (parent->groups == parent->groups->next) { 5803 pflags &= ~(SD_LOAD_BALANCE | 5804 SD_BALANCE_NEWIDLE | 5805 SD_BALANCE_FORK | 5806 SD_BALANCE_EXEC | 5807 SD_SHARE_CPUCAPACITY | 5808 SD_SHARE_PKG_RESOURCES | 5809 SD_PREFER_SIBLING | 5810 SD_SHARE_POWERDOMAIN); 5811 if (nr_node_ids == 1) 5812 pflags &= ~SD_SERIALIZE; 5813 } 5814 if (~cflags & pflags) 5815 return 0; 5816 5817 return 1; 5818 } 5819 5820 static void free_rootdomain(struct rcu_head *rcu) 5821 { 5822 struct root_domain *rd = container_of(rcu, struct root_domain, rcu); 5823 5824 cpupri_cleanup(&rd->cpupri); 5825 cpudl_cleanup(&rd->cpudl); 5826 free_cpumask_var(rd->dlo_mask); 5827 free_cpumask_var(rd->rto_mask); 5828 free_cpumask_var(rd->online); 5829 free_cpumask_var(rd->span); 5830 kfree(rd); 5831 } 5832 5833 static void rq_attach_root(struct rq *rq, struct root_domain *rd) 5834 { 5835 struct root_domain *old_rd = NULL; 5836 unsigned long flags; 5837 5838 raw_spin_lock_irqsave(&rq->lock, flags); 5839 5840 if (rq->rd) { 5841 old_rd = rq->rd; 5842 5843 if (cpumask_test_cpu(rq->cpu, old_rd->online)) 5844 set_rq_offline(rq); 5845 5846 cpumask_clear_cpu(rq->cpu, old_rd->span); 5847 5848 /* 5849 * If we dont want to free the old_rd yet then 5850 * set old_rd to NULL to skip the freeing later 5851 * in this function: 5852 */ 5853 if (!atomic_dec_and_test(&old_rd->refcount)) 5854 old_rd = NULL; 5855 } 5856 5857 atomic_inc(&rd->refcount); 5858 rq->rd = rd; 5859 5860 cpumask_set_cpu(rq->cpu, rd->span); 5861 if (cpumask_test_cpu(rq->cpu, cpu_active_mask)) 5862 set_rq_online(rq); 5863 5864 raw_spin_unlock_irqrestore(&rq->lock, flags); 5865 5866 if (old_rd) 5867 call_rcu_sched(&old_rd->rcu, free_rootdomain); 5868 } 5869 5870 static int init_rootdomain(struct root_domain *rd) 5871 { 5872 memset(rd, 0, sizeof(*rd)); 5873 5874 if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL)) 5875 goto out; 5876 if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL)) 5877 goto free_span; 5878 if (!zalloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL)) 5879 goto free_online; 5880 if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) 5881 goto free_dlo_mask; 5882 5883 init_dl_bw(&rd->dl_bw); 5884 if (cpudl_init(&rd->cpudl) != 0) 5885 goto free_dlo_mask; 5886 5887 if (cpupri_init(&rd->cpupri) != 0) 5888 goto free_rto_mask; 5889 return 0; 5890 5891 free_rto_mask: 5892 free_cpumask_var(rd->rto_mask); 5893 free_dlo_mask: 5894 free_cpumask_var(rd->dlo_mask); 5895 free_online: 5896 free_cpumask_var(rd->online); 5897 free_span: 5898 free_cpumask_var(rd->span); 5899 out: 5900 return -ENOMEM; 5901 } 5902 5903 /* 5904 * By default the system creates a single root-domain with all cpus as 5905 * members (mimicking the global state we have today). 5906 */ 5907 struct root_domain def_root_domain; 5908 5909 static void init_defrootdomain(void) 5910 { 5911 init_rootdomain(&def_root_domain); 5912 5913 atomic_set(&def_root_domain.refcount, 1); 5914 } 5915 5916 static struct root_domain *alloc_rootdomain(void) 5917 { 5918 struct root_domain *rd; 5919 5920 rd = kmalloc(sizeof(*rd), GFP_KERNEL); 5921 if (!rd) 5922 return NULL; 5923 5924 if (init_rootdomain(rd) != 0) { 5925 kfree(rd); 5926 return NULL; 5927 } 5928 5929 return rd; 5930 } 5931 5932 static void free_sched_groups(struct sched_group *sg, int free_sgc) 5933 { 5934 struct sched_group *tmp, *first; 5935 5936 if (!sg) 5937 return; 5938 5939 first = sg; 5940 do { 5941 tmp = sg->next; 5942 5943 if (free_sgc && atomic_dec_and_test(&sg->sgc->ref)) 5944 kfree(sg->sgc); 5945 5946 kfree(sg); 5947 sg = tmp; 5948 } while (sg != first); 5949 } 5950 5951 static void free_sched_domain(struct rcu_head *rcu) 5952 { 5953 struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu); 5954 5955 /* 5956 * If its an overlapping domain it has private groups, iterate and 5957 * nuke them all. 5958 */ 5959 if (sd->flags & SD_OVERLAP) { 5960 free_sched_groups(sd->groups, 1); 5961 } else if (atomic_dec_and_test(&sd->groups->ref)) { 5962 kfree(sd->groups->sgc); 5963 kfree(sd->groups); 5964 } 5965 kfree(sd); 5966 } 5967 5968 static void destroy_sched_domain(struct sched_domain *sd, int cpu) 5969 { 5970 call_rcu(&sd->rcu, free_sched_domain); 5971 } 5972 5973 static void destroy_sched_domains(struct sched_domain *sd, int cpu) 5974 { 5975 for (; sd; sd = sd->parent) 5976 destroy_sched_domain(sd, cpu); 5977 } 5978 5979 /* 5980 * Keep a special pointer to the highest sched_domain that has 5981 * SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this 5982 * allows us to avoid some pointer chasing select_idle_sibling(). 5983 * 5984 * Also keep a unique ID per domain (we use the first cpu number in 5985 * the cpumask of the domain), this allows us to quickly tell if 5986 * two cpus are in the same cache domain, see cpus_share_cache(). 5987 */ 5988 DEFINE_PER_CPU(struct sched_domain *, sd_llc); 5989 DEFINE_PER_CPU(int, sd_llc_size); 5990 DEFINE_PER_CPU(int, sd_llc_id); 5991 DEFINE_PER_CPU(struct sched_domain *, sd_numa); 5992 DEFINE_PER_CPU(struct sched_domain *, sd_busy); 5993 DEFINE_PER_CPU(struct sched_domain *, sd_asym); 5994 5995 static void update_top_cache_domain(int cpu) 5996 { 5997 struct sched_domain *sd; 5998 struct sched_domain *busy_sd = NULL; 5999 int id = cpu; 6000 int size = 1; 6001 6002 sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES); 6003 if (sd) { 6004 id = cpumask_first(sched_domain_span(sd)); 6005 size = cpumask_weight(sched_domain_span(sd)); 6006 busy_sd = sd->parent; /* sd_busy */ 6007 } 6008 rcu_assign_pointer(per_cpu(sd_busy, cpu), busy_sd); 6009 6010 rcu_assign_pointer(per_cpu(sd_llc, cpu), sd); 6011 per_cpu(sd_llc_size, cpu) = size; 6012 per_cpu(sd_llc_id, cpu) = id; 6013 6014 sd = lowest_flag_domain(cpu, SD_NUMA); 6015 rcu_assign_pointer(per_cpu(sd_numa, cpu), sd); 6016 6017 sd = highest_flag_domain(cpu, SD_ASYM_PACKING); 6018 rcu_assign_pointer(per_cpu(sd_asym, cpu), sd); 6019 } 6020 6021 /* 6022 * Attach the domain 'sd' to 'cpu' as its base domain. Callers must 6023 * hold the hotplug lock. 6024 */ 6025 static void 6026 cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) 6027 { 6028 struct rq *rq = cpu_rq(cpu); 6029 struct sched_domain *tmp; 6030 6031 /* Remove the sched domains which do not contribute to scheduling. */ 6032 for (tmp = sd; tmp; ) { 6033 struct sched_domain *parent = tmp->parent; 6034 if (!parent) 6035 break; 6036 6037 if (sd_parent_degenerate(tmp, parent)) { 6038 tmp->parent = parent->parent; 6039 if (parent->parent) 6040 parent->parent->child = tmp; 6041 /* 6042 * Transfer SD_PREFER_SIBLING down in case of a 6043 * degenerate parent; the spans match for this 6044 * so the property transfers. 6045 */ 6046 if (parent->flags & SD_PREFER_SIBLING) 6047 tmp->flags |= SD_PREFER_SIBLING; 6048 destroy_sched_domain(parent, cpu); 6049 } else 6050 tmp = tmp->parent; 6051 } 6052 6053 if (sd && sd_degenerate(sd)) { 6054 tmp = sd; 6055 sd = sd->parent; 6056 destroy_sched_domain(tmp, cpu); 6057 if (sd) 6058 sd->child = NULL; 6059 } 6060 6061 sched_domain_debug(sd, cpu); 6062 6063 rq_attach_root(rq, rd); 6064 tmp = rq->sd; 6065 rcu_assign_pointer(rq->sd, sd); 6066 destroy_sched_domains(tmp, cpu); 6067 6068 update_top_cache_domain(cpu); 6069 } 6070 6071 /* Setup the mask of cpus configured for isolated domains */ 6072 static int __init isolated_cpu_setup(char *str) 6073 { 6074 alloc_bootmem_cpumask_var(&cpu_isolated_map); 6075 cpulist_parse(str, cpu_isolated_map); 6076 return 1; 6077 } 6078 6079 __setup("isolcpus=", isolated_cpu_setup); 6080 6081 struct s_data { 6082 struct sched_domain ** __percpu sd; 6083 struct root_domain *rd; 6084 }; 6085 6086 enum s_alloc { 6087 sa_rootdomain, 6088 sa_sd, 6089 sa_sd_storage, 6090 sa_none, 6091 }; 6092 6093 /* 6094 * Build an iteration mask that can exclude certain CPUs from the upwards 6095 * domain traversal. 6096 * 6097 * Asymmetric node setups can result in situations where the domain tree is of 6098 * unequal depth, make sure to skip domains that already cover the entire 6099 * range. 6100 * 6101 * In that case build_sched_domains() will have terminated the iteration early 6102 * and our sibling sd spans will be empty. Domains should always include the 6103 * cpu they're built on, so check that. 6104 * 6105 */ 6106 static void build_group_mask(struct sched_domain *sd, struct sched_group *sg) 6107 { 6108 const struct cpumask *span = sched_domain_span(sd); 6109 struct sd_data *sdd = sd->private; 6110 struct sched_domain *sibling; 6111 int i; 6112 6113 for_each_cpu(i, span) { 6114 sibling = *per_cpu_ptr(sdd->sd, i); 6115 if (!cpumask_test_cpu(i, sched_domain_span(sibling))) 6116 continue; 6117 6118 cpumask_set_cpu(i, sched_group_mask(sg)); 6119 } 6120 } 6121 6122 /* 6123 * Return the canonical balance cpu for this group, this is the first cpu 6124 * of this group that's also in the iteration mask. 6125 */ 6126 int group_balance_cpu(struct sched_group *sg) 6127 { 6128 return cpumask_first_and(sched_group_cpus(sg), sched_group_mask(sg)); 6129 } 6130 6131 static int 6132 build_overlap_sched_groups(struct sched_domain *sd, int cpu) 6133 { 6134 struct sched_group *first = NULL, *last = NULL, *groups = NULL, *sg; 6135 const struct cpumask *span = sched_domain_span(sd); 6136 struct cpumask *covered = sched_domains_tmpmask; 6137 struct sd_data *sdd = sd->private; 6138 struct sched_domain *sibling; 6139 int i; 6140 6141 cpumask_clear(covered); 6142 6143 for_each_cpu(i, span) { 6144 struct cpumask *sg_span; 6145 6146 if (cpumask_test_cpu(i, covered)) 6147 continue; 6148 6149 sibling = *per_cpu_ptr(sdd->sd, i); 6150 6151 /* See the comment near build_group_mask(). */ 6152 if (!cpumask_test_cpu(i, sched_domain_span(sibling))) 6153 continue; 6154 6155 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), 6156 GFP_KERNEL, cpu_to_node(cpu)); 6157 6158 if (!sg) 6159 goto fail; 6160 6161 sg_span = sched_group_cpus(sg); 6162 if (sibling->child) 6163 cpumask_copy(sg_span, sched_domain_span(sibling->child)); 6164 else 6165 cpumask_set_cpu(i, sg_span); 6166 6167 cpumask_or(covered, covered, sg_span); 6168 6169 sg->sgc = *per_cpu_ptr(sdd->sgc, i); 6170 if (atomic_inc_return(&sg->sgc->ref) == 1) 6171 build_group_mask(sd, sg); 6172 6173 /* 6174 * Initialize sgc->capacity such that even if we mess up the 6175 * domains and no possible iteration will get us here, we won't 6176 * die on a /0 trap. 6177 */ 6178 sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span); 6179 6180 /* 6181 * Make sure the first group of this domain contains the 6182 * canonical balance cpu. Otherwise the sched_domain iteration 6183 * breaks. See update_sg_lb_stats(). 6184 */ 6185 if ((!groups && cpumask_test_cpu(cpu, sg_span)) || 6186 group_balance_cpu(sg) == cpu) 6187 groups = sg; 6188 6189 if (!first) 6190 first = sg; 6191 if (last) 6192 last->next = sg; 6193 last = sg; 6194 last->next = first; 6195 } 6196 sd->groups = groups; 6197 6198 return 0; 6199 6200 fail: 6201 free_sched_groups(first, 0); 6202 6203 return -ENOMEM; 6204 } 6205 6206 static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg) 6207 { 6208 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); 6209 struct sched_domain *child = sd->child; 6210 6211 if (child) 6212 cpu = cpumask_first(sched_domain_span(child)); 6213 6214 if (sg) { 6215 *sg = *per_cpu_ptr(sdd->sg, cpu); 6216 (*sg)->sgc = *per_cpu_ptr(sdd->sgc, cpu); 6217 atomic_set(&(*sg)->sgc->ref, 1); /* for claim_allocations */ 6218 } 6219 6220 return cpu; 6221 } 6222 6223 /* 6224 * build_sched_groups will build a circular linked list of the groups 6225 * covered by the given span, and will set each group's ->cpumask correctly, 6226 * and ->cpu_capacity to 0. 6227 * 6228 * Assumes the sched_domain tree is fully constructed 6229 */ 6230 static int 6231 build_sched_groups(struct sched_domain *sd, int cpu) 6232 { 6233 struct sched_group *first = NULL, *last = NULL; 6234 struct sd_data *sdd = sd->private; 6235 const struct cpumask *span = sched_domain_span(sd); 6236 struct cpumask *covered; 6237 int i; 6238 6239 get_group(cpu, sdd, &sd->groups); 6240 atomic_inc(&sd->groups->ref); 6241 6242 if (cpu != cpumask_first(span)) 6243 return 0; 6244 6245 lockdep_assert_held(&sched_domains_mutex); 6246 covered = sched_domains_tmpmask; 6247 6248 cpumask_clear(covered); 6249 6250 for_each_cpu(i, span) { 6251 struct sched_group *sg; 6252 int group, j; 6253 6254 if (cpumask_test_cpu(i, covered)) 6255 continue; 6256 6257 group = get_group(i, sdd, &sg); 6258 cpumask_setall(sched_group_mask(sg)); 6259 6260 for_each_cpu(j, span) { 6261 if (get_group(j, sdd, NULL) != group) 6262 continue; 6263 6264 cpumask_set_cpu(j, covered); 6265 cpumask_set_cpu(j, sched_group_cpus(sg)); 6266 } 6267 6268 if (!first) 6269 first = sg; 6270 if (last) 6271 last->next = sg; 6272 last = sg; 6273 } 6274 last->next = first; 6275 6276 return 0; 6277 } 6278 6279 /* 6280 * Initialize sched groups cpu_capacity. 6281 * 6282 * cpu_capacity indicates the capacity of sched group, which is used while 6283 * distributing the load between different sched groups in a sched domain. 6284 * Typically cpu_capacity for all the groups in a sched domain will be same 6285 * unless there are asymmetries in the topology. If there are asymmetries, 6286 * group having more cpu_capacity will pickup more load compared to the 6287 * group having less cpu_capacity. 6288 */ 6289 static void init_sched_groups_capacity(int cpu, struct sched_domain *sd) 6290 { 6291 struct sched_group *sg = sd->groups; 6292 6293 WARN_ON(!sg); 6294 6295 do { 6296 sg->group_weight = cpumask_weight(sched_group_cpus(sg)); 6297 sg = sg->next; 6298 } while (sg != sd->groups); 6299 6300 if (cpu != group_balance_cpu(sg)) 6301 return; 6302 6303 update_group_capacity(sd, cpu); 6304 atomic_set(&sg->sgc->nr_busy_cpus, sg->group_weight); 6305 } 6306 6307 /* 6308 * Initializers for schedule domains 6309 * Non-inlined to reduce accumulated stack pressure in build_sched_domains() 6310 */ 6311 6312 static int default_relax_domain_level = -1; 6313 int sched_domain_level_max; 6314 6315 static int __init setup_relax_domain_level(char *str) 6316 { 6317 if (kstrtoint(str, 0, &default_relax_domain_level)) 6318 pr_warn("Unable to set relax_domain_level\n"); 6319 6320 return 1; 6321 } 6322 __setup("relax_domain_level=", setup_relax_domain_level); 6323 6324 static void set_domain_attribute(struct sched_domain *sd, 6325 struct sched_domain_attr *attr) 6326 { 6327 int request; 6328 6329 if (!attr || attr->relax_domain_level < 0) { 6330 if (default_relax_domain_level < 0) 6331 return; 6332 else 6333 request = default_relax_domain_level; 6334 } else 6335 request = attr->relax_domain_level; 6336 if (request < sd->level) { 6337 /* turn off idle balance on this domain */ 6338 sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE); 6339 } else { 6340 /* turn on idle balance on this domain */ 6341 sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE); 6342 } 6343 } 6344 6345 static void __sdt_free(const struct cpumask *cpu_map); 6346 static int __sdt_alloc(const struct cpumask *cpu_map); 6347 6348 static void __free_domain_allocs(struct s_data *d, enum s_alloc what, 6349 const struct cpumask *cpu_map) 6350 { 6351 switch (what) { 6352 case sa_rootdomain: 6353 if (!atomic_read(&d->rd->refcount)) 6354 free_rootdomain(&d->rd->rcu); /* fall through */ 6355 case sa_sd: 6356 free_percpu(d->sd); /* fall through */ 6357 case sa_sd_storage: 6358 __sdt_free(cpu_map); /* fall through */ 6359 case sa_none: 6360 break; 6361 } 6362 } 6363 6364 static enum s_alloc __visit_domain_allocation_hell(struct s_data *d, 6365 const struct cpumask *cpu_map) 6366 { 6367 memset(d, 0, sizeof(*d)); 6368 6369 if (__sdt_alloc(cpu_map)) 6370 return sa_sd_storage; 6371 d->sd = alloc_percpu(struct sched_domain *); 6372 if (!d->sd) 6373 return sa_sd_storage; 6374 d->rd = alloc_rootdomain(); 6375 if (!d->rd) 6376 return sa_sd; 6377 return sa_rootdomain; 6378 } 6379 6380 /* 6381 * NULL the sd_data elements we've used to build the sched_domain and 6382 * sched_group structure so that the subsequent __free_domain_allocs() 6383 * will not free the data we're using. 6384 */ 6385 static void claim_allocations(int cpu, struct sched_domain *sd) 6386 { 6387 struct sd_data *sdd = sd->private; 6388 6389 WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd); 6390 *per_cpu_ptr(sdd->sd, cpu) = NULL; 6391 6392 if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref)) 6393 *per_cpu_ptr(sdd->sg, cpu) = NULL; 6394 6395 if (atomic_read(&(*per_cpu_ptr(sdd->sgc, cpu))->ref)) 6396 *per_cpu_ptr(sdd->sgc, cpu) = NULL; 6397 } 6398 6399 #ifdef CONFIG_NUMA 6400 static int sched_domains_numa_levels; 6401 enum numa_topology_type sched_numa_topology_type; 6402 static int *sched_domains_numa_distance; 6403 int sched_max_numa_distance; 6404 static struct cpumask ***sched_domains_numa_masks; 6405 static int sched_domains_curr_level; 6406 #endif 6407 6408 /* 6409 * SD_flags allowed in topology descriptions. 6410 * 6411 * SD_SHARE_CPUCAPACITY - describes SMT topologies 6412 * SD_SHARE_PKG_RESOURCES - describes shared caches 6413 * SD_NUMA - describes NUMA topologies 6414 * SD_SHARE_POWERDOMAIN - describes shared power domain 6415 * 6416 * Odd one out: 6417 * SD_ASYM_PACKING - describes SMT quirks 6418 */ 6419 #define TOPOLOGY_SD_FLAGS \ 6420 (SD_SHARE_CPUCAPACITY | \ 6421 SD_SHARE_PKG_RESOURCES | \ 6422 SD_NUMA | \ 6423 SD_ASYM_PACKING | \ 6424 SD_SHARE_POWERDOMAIN) 6425 6426 static struct sched_domain * 6427 sd_init(struct sched_domain_topology_level *tl, int cpu) 6428 { 6429 struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu); 6430 int sd_weight, sd_flags = 0; 6431 6432 #ifdef CONFIG_NUMA 6433 /* 6434 * Ugly hack to pass state to sd_numa_mask()... 6435 */ 6436 sched_domains_curr_level = tl->numa_level; 6437 #endif 6438 6439 sd_weight = cpumask_weight(tl->mask(cpu)); 6440 6441 if (tl->sd_flags) 6442 sd_flags = (*tl->sd_flags)(); 6443 if (WARN_ONCE(sd_flags & ~TOPOLOGY_SD_FLAGS, 6444 "wrong sd_flags in topology description\n")) 6445 sd_flags &= ~TOPOLOGY_SD_FLAGS; 6446 6447 *sd = (struct sched_domain){ 6448 .min_interval = sd_weight, 6449 .max_interval = 2*sd_weight, 6450 .busy_factor = 32, 6451 .imbalance_pct = 125, 6452 6453 .cache_nice_tries = 0, 6454 .busy_idx = 0, 6455 .idle_idx = 0, 6456 .newidle_idx = 0, 6457 .wake_idx = 0, 6458 .forkexec_idx = 0, 6459 6460 .flags = 1*SD_LOAD_BALANCE 6461 | 1*SD_BALANCE_NEWIDLE 6462 | 1*SD_BALANCE_EXEC 6463 | 1*SD_BALANCE_FORK 6464 | 0*SD_BALANCE_WAKE 6465 | 1*SD_WAKE_AFFINE 6466 | 0*SD_SHARE_CPUCAPACITY 6467 | 0*SD_SHARE_PKG_RESOURCES 6468 | 0*SD_SERIALIZE 6469 | 0*SD_PREFER_SIBLING 6470 | 0*SD_NUMA 6471 | sd_flags 6472 , 6473 6474 .last_balance = jiffies, 6475 .balance_interval = sd_weight, 6476 .smt_gain = 0, 6477 .max_newidle_lb_cost = 0, 6478 .next_decay_max_lb_cost = jiffies, 6479 #ifdef CONFIG_SCHED_DEBUG 6480 .name = tl->name, 6481 #endif 6482 }; 6483 6484 /* 6485 * Convert topological properties into behaviour. 6486 */ 6487 6488 if (sd->flags & SD_SHARE_CPUCAPACITY) { 6489 sd->flags |= SD_PREFER_SIBLING; 6490 sd->imbalance_pct = 110; 6491 sd->smt_gain = 1178; /* ~15% */ 6492 6493 } else if (sd->flags & SD_SHARE_PKG_RESOURCES) { 6494 sd->imbalance_pct = 117; 6495 sd->cache_nice_tries = 1; 6496 sd->busy_idx = 2; 6497 6498 #ifdef CONFIG_NUMA 6499 } else if (sd->flags & SD_NUMA) { 6500 sd->cache_nice_tries = 2; 6501 sd->busy_idx = 3; 6502 sd->idle_idx = 2; 6503 6504 sd->flags |= SD_SERIALIZE; 6505 if (sched_domains_numa_distance[tl->numa_level] > RECLAIM_DISTANCE) { 6506 sd->flags &= ~(SD_BALANCE_EXEC | 6507 SD_BALANCE_FORK | 6508 SD_WAKE_AFFINE); 6509 } 6510 6511 #endif 6512 } else { 6513 sd->flags |= SD_PREFER_SIBLING; 6514 sd->cache_nice_tries = 1; 6515 sd->busy_idx = 2; 6516 sd->idle_idx = 1; 6517 } 6518 6519 sd->private = &tl->data; 6520 6521 return sd; 6522 } 6523 6524 /* 6525 * Topology list, bottom-up. 6526 */ 6527 static struct sched_domain_topology_level default_topology[] = { 6528 #ifdef CONFIG_SCHED_SMT 6529 { cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT) }, 6530 #endif 6531 #ifdef CONFIG_SCHED_MC 6532 { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) }, 6533 #endif 6534 { cpu_cpu_mask, SD_INIT_NAME(DIE) }, 6535 { NULL, }, 6536 }; 6537 6538 static struct sched_domain_topology_level *sched_domain_topology = 6539 default_topology; 6540 6541 #define for_each_sd_topology(tl) \ 6542 for (tl = sched_domain_topology; tl->mask; tl++) 6543 6544 void set_sched_topology(struct sched_domain_topology_level *tl) 6545 { 6546 sched_domain_topology = tl; 6547 } 6548 6549 #ifdef CONFIG_NUMA 6550 6551 static const struct cpumask *sd_numa_mask(int cpu) 6552 { 6553 return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)]; 6554 } 6555 6556 static void sched_numa_warn(const char *str) 6557 { 6558 static int done = false; 6559 int i,j; 6560 6561 if (done) 6562 return; 6563 6564 done = true; 6565 6566 printk(KERN_WARNING "ERROR: %s\n\n", str); 6567 6568 for (i = 0; i < nr_node_ids; i++) { 6569 printk(KERN_WARNING " "); 6570 for (j = 0; j < nr_node_ids; j++) 6571 printk(KERN_CONT "%02d ", node_distance(i,j)); 6572 printk(KERN_CONT "\n"); 6573 } 6574 printk(KERN_WARNING "\n"); 6575 } 6576 6577 bool find_numa_distance(int distance) 6578 { 6579 int i; 6580 6581 if (distance == node_distance(0, 0)) 6582 return true; 6583 6584 for (i = 0; i < sched_domains_numa_levels; i++) { 6585 if (sched_domains_numa_distance[i] == distance) 6586 return true; 6587 } 6588 6589 return false; 6590 } 6591 6592 /* 6593 * A system can have three types of NUMA topology: 6594 * NUMA_DIRECT: all nodes are directly connected, or not a NUMA system 6595 * NUMA_GLUELESS_MESH: some nodes reachable through intermediary nodes 6596 * NUMA_BACKPLANE: nodes can reach other nodes through a backplane 6597 * 6598 * The difference between a glueless mesh topology and a backplane 6599 * topology lies in whether communication between not directly 6600 * connected nodes goes through intermediary nodes (where programs 6601 * could run), or through backplane controllers. This affects 6602 * placement of programs. 6603 * 6604 * The type of topology can be discerned with the following tests: 6605 * - If the maximum distance between any nodes is 1 hop, the system 6606 * is directly connected. 6607 * - If for two nodes A and B, located N > 1 hops away from each other, 6608 * there is an intermediary node C, which is < N hops away from both 6609 * nodes A and B, the system is a glueless mesh. 6610 */ 6611 static void init_numa_topology_type(void) 6612 { 6613 int a, b, c, n; 6614 6615 n = sched_max_numa_distance; 6616 6617 if (sched_domains_numa_levels <= 1) { 6618 sched_numa_topology_type = NUMA_DIRECT; 6619 return; 6620 } 6621 6622 for_each_online_node(a) { 6623 for_each_online_node(b) { 6624 /* Find two nodes furthest removed from each other. */ 6625 if (node_distance(a, b) < n) 6626 continue; 6627 6628 /* Is there an intermediary node between a and b? */ 6629 for_each_online_node(c) { 6630 if (node_distance(a, c) < n && 6631 node_distance(b, c) < n) { 6632 sched_numa_topology_type = 6633 NUMA_GLUELESS_MESH; 6634 return; 6635 } 6636 } 6637 6638 sched_numa_topology_type = NUMA_BACKPLANE; 6639 return; 6640 } 6641 } 6642 } 6643 6644 static void sched_init_numa(void) 6645 { 6646 int next_distance, curr_distance = node_distance(0, 0); 6647 struct sched_domain_topology_level *tl; 6648 int level = 0; 6649 int i, j, k; 6650 6651 sched_domains_numa_distance = kzalloc(sizeof(int) * nr_node_ids, GFP_KERNEL); 6652 if (!sched_domains_numa_distance) 6653 return; 6654 6655 /* 6656 * O(nr_nodes^2) deduplicating selection sort -- in order to find the 6657 * unique distances in the node_distance() table. 6658 * 6659 * Assumes node_distance(0,j) includes all distances in 6660 * node_distance(i,j) in order to avoid cubic time. 6661 */ 6662 next_distance = curr_distance; 6663 for (i = 0; i < nr_node_ids; i++) { 6664 for (j = 0; j < nr_node_ids; j++) { 6665 for (k = 0; k < nr_node_ids; k++) { 6666 int distance = node_distance(i, k); 6667 6668 if (distance > curr_distance && 6669 (distance < next_distance || 6670 next_distance == curr_distance)) 6671 next_distance = distance; 6672 6673 /* 6674 * While not a strong assumption it would be nice to know 6675 * about cases where if node A is connected to B, B is not 6676 * equally connected to A. 6677 */ 6678 if (sched_debug() && node_distance(k, i) != distance) 6679 sched_numa_warn("Node-distance not symmetric"); 6680 6681 if (sched_debug() && i && !find_numa_distance(distance)) 6682 sched_numa_warn("Node-0 not representative"); 6683 } 6684 if (next_distance != curr_distance) { 6685 sched_domains_numa_distance[level++] = next_distance; 6686 sched_domains_numa_levels = level; 6687 curr_distance = next_distance; 6688 } else break; 6689 } 6690 6691 /* 6692 * In case of sched_debug() we verify the above assumption. 6693 */ 6694 if (!sched_debug()) 6695 break; 6696 } 6697 6698 if (!level) 6699 return; 6700 6701 /* 6702 * 'level' contains the number of unique distances, excluding the 6703 * identity distance node_distance(i,i). 6704 * 6705 * The sched_domains_numa_distance[] array includes the actual distance 6706 * numbers. 6707 */ 6708 6709 /* 6710 * Here, we should temporarily reset sched_domains_numa_levels to 0. 6711 * If it fails to allocate memory for array sched_domains_numa_masks[][], 6712 * the array will contain less then 'level' members. This could be 6713 * dangerous when we use it to iterate array sched_domains_numa_masks[][] 6714 * in other functions. 6715 * 6716 * We reset it to 'level' at the end of this function. 6717 */ 6718 sched_domains_numa_levels = 0; 6719 6720 sched_domains_numa_masks = kzalloc(sizeof(void *) * level, GFP_KERNEL); 6721 if (!sched_domains_numa_masks) 6722 return; 6723 6724 /* 6725 * Now for each level, construct a mask per node which contains all 6726 * cpus of nodes that are that many hops away from us. 6727 */ 6728 for (i = 0; i < level; i++) { 6729 sched_domains_numa_masks[i] = 6730 kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL); 6731 if (!sched_domains_numa_masks[i]) 6732 return; 6733 6734 for (j = 0; j < nr_node_ids; j++) { 6735 struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL); 6736 if (!mask) 6737 return; 6738 6739 sched_domains_numa_masks[i][j] = mask; 6740 6741 for (k = 0; k < nr_node_ids; k++) { 6742 if (node_distance(j, k) > sched_domains_numa_distance[i]) 6743 continue; 6744 6745 cpumask_or(mask, mask, cpumask_of_node(k)); 6746 } 6747 } 6748 } 6749 6750 /* Compute default topology size */ 6751 for (i = 0; sched_domain_topology[i].mask; i++); 6752 6753 tl = kzalloc((i + level + 1) * 6754 sizeof(struct sched_domain_topology_level), GFP_KERNEL); 6755 if (!tl) 6756 return; 6757 6758 /* 6759 * Copy the default topology bits.. 6760 */ 6761 for (i = 0; sched_domain_topology[i].mask; i++) 6762 tl[i] = sched_domain_topology[i]; 6763 6764 /* 6765 * .. and append 'j' levels of NUMA goodness. 6766 */ 6767 for (j = 0; j < level; i++, j++) { 6768 tl[i] = (struct sched_domain_topology_level){ 6769 .mask = sd_numa_mask, 6770 .sd_flags = cpu_numa_flags, 6771 .flags = SDTL_OVERLAP, 6772 .numa_level = j, 6773 SD_INIT_NAME(NUMA) 6774 }; 6775 } 6776 6777 sched_domain_topology = tl; 6778 6779 sched_domains_numa_levels = level; 6780 sched_max_numa_distance = sched_domains_numa_distance[level - 1]; 6781 6782 init_numa_topology_type(); 6783 } 6784 6785 static void sched_domains_numa_masks_set(int cpu) 6786 { 6787 int i, j; 6788 int node = cpu_to_node(cpu); 6789 6790 for (i = 0; i < sched_domains_numa_levels; i++) { 6791 for (j = 0; j < nr_node_ids; j++) { 6792 if (node_distance(j, node) <= sched_domains_numa_distance[i]) 6793 cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]); 6794 } 6795 } 6796 } 6797 6798 static void sched_domains_numa_masks_clear(int cpu) 6799 { 6800 int i, j; 6801 for (i = 0; i < sched_domains_numa_levels; i++) { 6802 for (j = 0; j < nr_node_ids; j++) 6803 cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]); 6804 } 6805 } 6806 6807 /* 6808 * Update sched_domains_numa_masks[level][node] array when new cpus 6809 * are onlined. 6810 */ 6811 static int sched_domains_numa_masks_update(struct notifier_block *nfb, 6812 unsigned long action, 6813 void *hcpu) 6814 { 6815 int cpu = (long)hcpu; 6816 6817 switch (action & ~CPU_TASKS_FROZEN) { 6818 case CPU_ONLINE: 6819 sched_domains_numa_masks_set(cpu); 6820 break; 6821 6822 case CPU_DEAD: 6823 sched_domains_numa_masks_clear(cpu); 6824 break; 6825 6826 default: 6827 return NOTIFY_DONE; 6828 } 6829 6830 return NOTIFY_OK; 6831 } 6832 #else 6833 static inline void sched_init_numa(void) 6834 { 6835 } 6836 6837 static int sched_domains_numa_masks_update(struct notifier_block *nfb, 6838 unsigned long action, 6839 void *hcpu) 6840 { 6841 return 0; 6842 } 6843 #endif /* CONFIG_NUMA */ 6844 6845 static int __sdt_alloc(const struct cpumask *cpu_map) 6846 { 6847 struct sched_domain_topology_level *tl; 6848 int j; 6849 6850 for_each_sd_topology(tl) { 6851 struct sd_data *sdd = &tl->data; 6852 6853 sdd->sd = alloc_percpu(struct sched_domain *); 6854 if (!sdd->sd) 6855 return -ENOMEM; 6856 6857 sdd->sg = alloc_percpu(struct sched_group *); 6858 if (!sdd->sg) 6859 return -ENOMEM; 6860 6861 sdd->sgc = alloc_percpu(struct sched_group_capacity *); 6862 if (!sdd->sgc) 6863 return -ENOMEM; 6864 6865 for_each_cpu(j, cpu_map) { 6866 struct sched_domain *sd; 6867 struct sched_group *sg; 6868 struct sched_group_capacity *sgc; 6869 6870 sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(), 6871 GFP_KERNEL, cpu_to_node(j)); 6872 if (!sd) 6873 return -ENOMEM; 6874 6875 *per_cpu_ptr(sdd->sd, j) = sd; 6876 6877 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), 6878 GFP_KERNEL, cpu_to_node(j)); 6879 if (!sg) 6880 return -ENOMEM; 6881 6882 sg->next = sg; 6883 6884 *per_cpu_ptr(sdd->sg, j) = sg; 6885 6886 sgc = kzalloc_node(sizeof(struct sched_group_capacity) + cpumask_size(), 6887 GFP_KERNEL, cpu_to_node(j)); 6888 if (!sgc) 6889 return -ENOMEM; 6890 6891 *per_cpu_ptr(sdd->sgc, j) = sgc; 6892 } 6893 } 6894 6895 return 0; 6896 } 6897 6898 static void __sdt_free(const struct cpumask *cpu_map) 6899 { 6900 struct sched_domain_topology_level *tl; 6901 int j; 6902 6903 for_each_sd_topology(tl) { 6904 struct sd_data *sdd = &tl->data; 6905 6906 for_each_cpu(j, cpu_map) { 6907 struct sched_domain *sd; 6908 6909 if (sdd->sd) { 6910 sd = *per_cpu_ptr(sdd->sd, j); 6911 if (sd && (sd->flags & SD_OVERLAP)) 6912 free_sched_groups(sd->groups, 0); 6913 kfree(*per_cpu_ptr(sdd->sd, j)); 6914 } 6915 6916 if (sdd->sg) 6917 kfree(*per_cpu_ptr(sdd->sg, j)); 6918 if (sdd->sgc) 6919 kfree(*per_cpu_ptr(sdd->sgc, j)); 6920 } 6921 free_percpu(sdd->sd); 6922 sdd->sd = NULL; 6923 free_percpu(sdd->sg); 6924 sdd->sg = NULL; 6925 free_percpu(sdd->sgc); 6926 sdd->sgc = NULL; 6927 } 6928 } 6929 6930 struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl, 6931 const struct cpumask *cpu_map, struct sched_domain_attr *attr, 6932 struct sched_domain *child, int cpu) 6933 { 6934 struct sched_domain *sd = sd_init(tl, cpu); 6935 if (!sd) 6936 return child; 6937 6938 cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu)); 6939 if (child) { 6940 sd->level = child->level + 1; 6941 sched_domain_level_max = max(sched_domain_level_max, sd->level); 6942 child->parent = sd; 6943 sd->child = child; 6944 6945 if (!cpumask_subset(sched_domain_span(child), 6946 sched_domain_span(sd))) { 6947 pr_err("BUG: arch topology borken\n"); 6948 #ifdef CONFIG_SCHED_DEBUG 6949 pr_err(" the %s domain not a subset of the %s domain\n", 6950 child->name, sd->name); 6951 #endif 6952 /* Fixup, ensure @sd has at least @child cpus. */ 6953 cpumask_or(sched_domain_span(sd), 6954 sched_domain_span(sd), 6955 sched_domain_span(child)); 6956 } 6957 6958 } 6959 set_domain_attribute(sd, attr); 6960 6961 return sd; 6962 } 6963 6964 /* 6965 * Build sched domains for a given set of cpus and attach the sched domains 6966 * to the individual cpus 6967 */ 6968 static int build_sched_domains(const struct cpumask *cpu_map, 6969 struct sched_domain_attr *attr) 6970 { 6971 enum s_alloc alloc_state; 6972 struct sched_domain *sd; 6973 struct s_data d; 6974 int i, ret = -ENOMEM; 6975 6976 alloc_state = __visit_domain_allocation_hell(&d, cpu_map); 6977 if (alloc_state != sa_rootdomain) 6978 goto error; 6979 6980 /* Set up domains for cpus specified by the cpu_map. */ 6981 for_each_cpu(i, cpu_map) { 6982 struct sched_domain_topology_level *tl; 6983 6984 sd = NULL; 6985 for_each_sd_topology(tl) { 6986 sd = build_sched_domain(tl, cpu_map, attr, sd, i); 6987 if (tl == sched_domain_topology) 6988 *per_cpu_ptr(d.sd, i) = sd; 6989 if (tl->flags & SDTL_OVERLAP || sched_feat(FORCE_SD_OVERLAP)) 6990 sd->flags |= SD_OVERLAP; 6991 if (cpumask_equal(cpu_map, sched_domain_span(sd))) 6992 break; 6993 } 6994 } 6995 6996 /* Build the groups for the domains */ 6997 for_each_cpu(i, cpu_map) { 6998 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { 6999 sd->span_weight = cpumask_weight(sched_domain_span(sd)); 7000 if (sd->flags & SD_OVERLAP) { 7001 if (build_overlap_sched_groups(sd, i)) 7002 goto error; 7003 } else { 7004 if (build_sched_groups(sd, i)) 7005 goto error; 7006 } 7007 } 7008 } 7009 7010 /* Calculate CPU capacity for physical packages and nodes */ 7011 for (i = nr_cpumask_bits-1; i >= 0; i--) { 7012 if (!cpumask_test_cpu(i, cpu_map)) 7013 continue; 7014 7015 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { 7016 claim_allocations(i, sd); 7017 init_sched_groups_capacity(i, sd); 7018 } 7019 } 7020 7021 /* Attach the domains */ 7022 rcu_read_lock(); 7023 for_each_cpu(i, cpu_map) { 7024 sd = *per_cpu_ptr(d.sd, i); 7025 cpu_attach_domain(sd, d.rd, i); 7026 } 7027 rcu_read_unlock(); 7028 7029 ret = 0; 7030 error: 7031 __free_domain_allocs(&d, alloc_state, cpu_map); 7032 return ret; 7033 } 7034 7035 static cpumask_var_t *doms_cur; /* current sched domains */ 7036 static int ndoms_cur; /* number of sched domains in 'doms_cur' */ 7037 static struct sched_domain_attr *dattr_cur; 7038 /* attribues of custom domains in 'doms_cur' */ 7039 7040 /* 7041 * Special case: If a kmalloc of a doms_cur partition (array of 7042 * cpumask) fails, then fallback to a single sched domain, 7043 * as determined by the single cpumask fallback_doms. 7044 */ 7045 static cpumask_var_t fallback_doms; 7046 7047 /* 7048 * arch_update_cpu_topology lets virtualized architectures update the 7049 * cpu core maps. It is supposed to return 1 if the topology changed 7050 * or 0 if it stayed the same. 7051 */ 7052 int __weak arch_update_cpu_topology(void) 7053 { 7054 return 0; 7055 } 7056 7057 cpumask_var_t *alloc_sched_domains(unsigned int ndoms) 7058 { 7059 int i; 7060 cpumask_var_t *doms; 7061 7062 doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL); 7063 if (!doms) 7064 return NULL; 7065 for (i = 0; i < ndoms; i++) { 7066 if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) { 7067 free_sched_domains(doms, i); 7068 return NULL; 7069 } 7070 } 7071 return doms; 7072 } 7073 7074 void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms) 7075 { 7076 unsigned int i; 7077 for (i = 0; i < ndoms; i++) 7078 free_cpumask_var(doms[i]); 7079 kfree(doms); 7080 } 7081 7082 /* 7083 * Set up scheduler domains and groups. Callers must hold the hotplug lock. 7084 * For now this just excludes isolated cpus, but could be used to 7085 * exclude other special cases in the future. 7086 */ 7087 static int init_sched_domains(const struct cpumask *cpu_map) 7088 { 7089 int err; 7090 7091 arch_update_cpu_topology(); 7092 ndoms_cur = 1; 7093 doms_cur = alloc_sched_domains(ndoms_cur); 7094 if (!doms_cur) 7095 doms_cur = &fallback_doms; 7096 cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map); 7097 err = build_sched_domains(doms_cur[0], NULL); 7098 register_sched_domain_sysctl(); 7099 7100 return err; 7101 } 7102 7103 /* 7104 * Detach sched domains from a group of cpus specified in cpu_map 7105 * These cpus will now be attached to the NULL domain 7106 */ 7107 static void detach_destroy_domains(const struct cpumask *cpu_map) 7108 { 7109 int i; 7110 7111 rcu_read_lock(); 7112 for_each_cpu(i, cpu_map) 7113 cpu_attach_domain(NULL, &def_root_domain, i); 7114 rcu_read_unlock(); 7115 } 7116 7117 /* handle null as "default" */ 7118 static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur, 7119 struct sched_domain_attr *new, int idx_new) 7120 { 7121 struct sched_domain_attr tmp; 7122 7123 /* fast path */ 7124 if (!new && !cur) 7125 return 1; 7126 7127 tmp = SD_ATTR_INIT; 7128 return !memcmp(cur ? (cur + idx_cur) : &tmp, 7129 new ? (new + idx_new) : &tmp, 7130 sizeof(struct sched_domain_attr)); 7131 } 7132 7133 /* 7134 * Partition sched domains as specified by the 'ndoms_new' 7135 * cpumasks in the array doms_new[] of cpumasks. This compares 7136 * doms_new[] to the current sched domain partitioning, doms_cur[]. 7137 * It destroys each deleted domain and builds each new domain. 7138 * 7139 * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'. 7140 * The masks don't intersect (don't overlap.) We should setup one 7141 * sched domain for each mask. CPUs not in any of the cpumasks will 7142 * not be load balanced. If the same cpumask appears both in the 7143 * current 'doms_cur' domains and in the new 'doms_new', we can leave 7144 * it as it is. 7145 * 7146 * The passed in 'doms_new' should be allocated using 7147 * alloc_sched_domains. This routine takes ownership of it and will 7148 * free_sched_domains it when done with it. If the caller failed the 7149 * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1, 7150 * and partition_sched_domains() will fallback to the single partition 7151 * 'fallback_doms', it also forces the domains to be rebuilt. 7152 * 7153 * If doms_new == NULL it will be replaced with cpu_online_mask. 7154 * ndoms_new == 0 is a special case for destroying existing domains, 7155 * and it will not create the default domain. 7156 * 7157 * Call with hotplug lock held 7158 */ 7159 void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], 7160 struct sched_domain_attr *dattr_new) 7161 { 7162 int i, j, n; 7163 int new_topology; 7164 7165 mutex_lock(&sched_domains_mutex); 7166 7167 /* always unregister in case we don't destroy any domains */ 7168 unregister_sched_domain_sysctl(); 7169 7170 /* Let architecture update cpu core mappings. */ 7171 new_topology = arch_update_cpu_topology(); 7172 7173 n = doms_new ? ndoms_new : 0; 7174 7175 /* Destroy deleted domains */ 7176 for (i = 0; i < ndoms_cur; i++) { 7177 for (j = 0; j < n && !new_topology; j++) { 7178 if (cpumask_equal(doms_cur[i], doms_new[j]) 7179 && dattrs_equal(dattr_cur, i, dattr_new, j)) 7180 goto match1; 7181 } 7182 /* no match - a current sched domain not in new doms_new[] */ 7183 detach_destroy_domains(doms_cur[i]); 7184 match1: 7185 ; 7186 } 7187 7188 n = ndoms_cur; 7189 if (doms_new == NULL) { 7190 n = 0; 7191 doms_new = &fallback_doms; 7192 cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map); 7193 WARN_ON_ONCE(dattr_new); 7194 } 7195 7196 /* Build new domains */ 7197 for (i = 0; i < ndoms_new; i++) { 7198 for (j = 0; j < n && !new_topology; j++) { 7199 if (cpumask_equal(doms_new[i], doms_cur[j]) 7200 && dattrs_equal(dattr_new, i, dattr_cur, j)) 7201 goto match2; 7202 } 7203 /* no match - add a new doms_new */ 7204 build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL); 7205 match2: 7206 ; 7207 } 7208 7209 /* Remember the new sched domains */ 7210 if (doms_cur != &fallback_doms) 7211 free_sched_domains(doms_cur, ndoms_cur); 7212 kfree(dattr_cur); /* kfree(NULL) is safe */ 7213 doms_cur = doms_new; 7214 dattr_cur = dattr_new; 7215 ndoms_cur = ndoms_new; 7216 7217 register_sched_domain_sysctl(); 7218 7219 mutex_unlock(&sched_domains_mutex); 7220 } 7221 7222 static int num_cpus_frozen; /* used to mark begin/end of suspend/resume */ 7223 7224 /* 7225 * Update cpusets according to cpu_active mask. If cpusets are 7226 * disabled, cpuset_update_active_cpus() becomes a simple wrapper 7227 * around partition_sched_domains(). 7228 * 7229 * If we come here as part of a suspend/resume, don't touch cpusets because we 7230 * want to restore it back to its original state upon resume anyway. 7231 */ 7232 static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action, 7233 void *hcpu) 7234 { 7235 switch (action) { 7236 case CPU_ONLINE_FROZEN: 7237 case CPU_DOWN_FAILED_FROZEN: 7238 7239 /* 7240 * num_cpus_frozen tracks how many CPUs are involved in suspend 7241 * resume sequence. As long as this is not the last online 7242 * operation in the resume sequence, just build a single sched 7243 * domain, ignoring cpusets. 7244 */ 7245 num_cpus_frozen--; 7246 if (likely(num_cpus_frozen)) { 7247 partition_sched_domains(1, NULL, NULL); 7248 break; 7249 } 7250 7251 /* 7252 * This is the last CPU online operation. So fall through and 7253 * restore the original sched domains by considering the 7254 * cpuset configurations. 7255 */ 7256 7257 case CPU_ONLINE: 7258 cpuset_update_active_cpus(true); 7259 break; 7260 default: 7261 return NOTIFY_DONE; 7262 } 7263 return NOTIFY_OK; 7264 } 7265 7266 static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action, 7267 void *hcpu) 7268 { 7269 unsigned long flags; 7270 long cpu = (long)hcpu; 7271 struct dl_bw *dl_b; 7272 bool overflow; 7273 int cpus; 7274 7275 switch (action) { 7276 case CPU_DOWN_PREPARE: 7277 rcu_read_lock_sched(); 7278 dl_b = dl_bw_of(cpu); 7279 7280 raw_spin_lock_irqsave(&dl_b->lock, flags); 7281 cpus = dl_bw_cpus(cpu); 7282 overflow = __dl_overflow(dl_b, cpus, 0, 0); 7283 raw_spin_unlock_irqrestore(&dl_b->lock, flags); 7284 7285 rcu_read_unlock_sched(); 7286 7287 if (overflow) 7288 return notifier_from_errno(-EBUSY); 7289 cpuset_update_active_cpus(false); 7290 break; 7291 case CPU_DOWN_PREPARE_FROZEN: 7292 num_cpus_frozen++; 7293 partition_sched_domains(1, NULL, NULL); 7294 break; 7295 default: 7296 return NOTIFY_DONE; 7297 } 7298 return NOTIFY_OK; 7299 } 7300 7301 void __init sched_init_smp(void) 7302 { 7303 cpumask_var_t non_isolated_cpus; 7304 7305 alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL); 7306 alloc_cpumask_var(&fallback_doms, GFP_KERNEL); 7307 7308 sched_init_numa(); 7309 7310 /* 7311 * There's no userspace yet to cause hotplug operations; hence all the 7312 * cpu masks are stable and all blatant races in the below code cannot 7313 * happen. 7314 */ 7315 mutex_lock(&sched_domains_mutex); 7316 init_sched_domains(cpu_active_mask); 7317 cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map); 7318 if (cpumask_empty(non_isolated_cpus)) 7319 cpumask_set_cpu(smp_processor_id(), non_isolated_cpus); 7320 mutex_unlock(&sched_domains_mutex); 7321 7322 hotcpu_notifier(sched_domains_numa_masks_update, CPU_PRI_SCHED_ACTIVE); 7323 hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE); 7324 hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE); 7325 7326 init_hrtick(); 7327 7328 /* Move init over to a non-isolated CPU */ 7329 if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0) 7330 BUG(); 7331 sched_init_granularity(); 7332 free_cpumask_var(non_isolated_cpus); 7333 7334 init_sched_rt_class(); 7335 init_sched_dl_class(); 7336 } 7337 #else 7338 void __init sched_init_smp(void) 7339 { 7340 sched_init_granularity(); 7341 } 7342 #endif /* CONFIG_SMP */ 7343 7344 int in_sched_functions(unsigned long addr) 7345 { 7346 return in_lock_functions(addr) || 7347 (addr >= (unsigned long)__sched_text_start 7348 && addr < (unsigned long)__sched_text_end); 7349 } 7350 7351 #ifdef CONFIG_CGROUP_SCHED 7352 /* 7353 * Default task group. 7354 * Every task in system belongs to this group at bootup. 7355 */ 7356 struct task_group root_task_group; 7357 LIST_HEAD(task_groups); 7358 #endif 7359 7360 DECLARE_PER_CPU(cpumask_var_t, load_balance_mask); 7361 7362 void __init sched_init(void) 7363 { 7364 int i, j; 7365 unsigned long alloc_size = 0, ptr; 7366 7367 #ifdef CONFIG_FAIR_GROUP_SCHED 7368 alloc_size += 2 * nr_cpu_ids * sizeof(void **); 7369 #endif 7370 #ifdef CONFIG_RT_GROUP_SCHED 7371 alloc_size += 2 * nr_cpu_ids * sizeof(void **); 7372 #endif 7373 if (alloc_size) { 7374 ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT); 7375 7376 #ifdef CONFIG_FAIR_GROUP_SCHED 7377 root_task_group.se = (struct sched_entity **)ptr; 7378 ptr += nr_cpu_ids * sizeof(void **); 7379 7380 root_task_group.cfs_rq = (struct cfs_rq **)ptr; 7381 ptr += nr_cpu_ids * sizeof(void **); 7382 7383 #endif /* CONFIG_FAIR_GROUP_SCHED */ 7384 #ifdef CONFIG_RT_GROUP_SCHED 7385 root_task_group.rt_se = (struct sched_rt_entity **)ptr; 7386 ptr += nr_cpu_ids * sizeof(void **); 7387 7388 root_task_group.rt_rq = (struct rt_rq **)ptr; 7389 ptr += nr_cpu_ids * sizeof(void **); 7390 7391 #endif /* CONFIG_RT_GROUP_SCHED */ 7392 } 7393 #ifdef CONFIG_CPUMASK_OFFSTACK 7394 for_each_possible_cpu(i) { 7395 per_cpu(load_balance_mask, i) = (cpumask_var_t)kzalloc_node( 7396 cpumask_size(), GFP_KERNEL, cpu_to_node(i)); 7397 } 7398 #endif /* CONFIG_CPUMASK_OFFSTACK */ 7399 7400 init_rt_bandwidth(&def_rt_bandwidth, 7401 global_rt_period(), global_rt_runtime()); 7402 init_dl_bandwidth(&def_dl_bandwidth, 7403 global_rt_period(), global_rt_runtime()); 7404 7405 #ifdef CONFIG_SMP 7406 init_defrootdomain(); 7407 #endif 7408 7409 #ifdef CONFIG_RT_GROUP_SCHED 7410 init_rt_bandwidth(&root_task_group.rt_bandwidth, 7411 global_rt_period(), global_rt_runtime()); 7412 #endif /* CONFIG_RT_GROUP_SCHED */ 7413 7414 #ifdef CONFIG_CGROUP_SCHED 7415 list_add(&root_task_group.list, &task_groups); 7416 INIT_LIST_HEAD(&root_task_group.children); 7417 INIT_LIST_HEAD(&root_task_group.siblings); 7418 autogroup_init(&init_task); 7419 7420 #endif /* CONFIG_CGROUP_SCHED */ 7421 7422 for_each_possible_cpu(i) { 7423 struct rq *rq; 7424 7425 rq = cpu_rq(i); 7426 raw_spin_lock_init(&rq->lock); 7427 rq->nr_running = 0; 7428 rq->calc_load_active = 0; 7429 rq->calc_load_update = jiffies + LOAD_FREQ; 7430 init_cfs_rq(&rq->cfs); 7431 init_rt_rq(&rq->rt); 7432 init_dl_rq(&rq->dl); 7433 #ifdef CONFIG_FAIR_GROUP_SCHED 7434 root_task_group.shares = ROOT_TASK_GROUP_LOAD; 7435 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list); 7436 /* 7437 * How much cpu bandwidth does root_task_group get? 7438 * 7439 * In case of task-groups formed thr' the cgroup filesystem, it 7440 * gets 100% of the cpu resources in the system. This overall 7441 * system cpu resource is divided among the tasks of 7442 * root_task_group and its child task-groups in a fair manner, 7443 * based on each entity's (task or task-group's) weight 7444 * (se->load.weight). 7445 * 7446 * In other words, if root_task_group has 10 tasks of weight 7447 * 1024) and two child groups A0 and A1 (of weight 1024 each), 7448 * then A0's share of the cpu resource is: 7449 * 7450 * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33% 7451 * 7452 * We achieve this by letting root_task_group's tasks sit 7453 * directly in rq->cfs (i.e root_task_group->se[] = NULL). 7454 */ 7455 init_cfs_bandwidth(&root_task_group.cfs_bandwidth); 7456 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL); 7457 #endif /* CONFIG_FAIR_GROUP_SCHED */ 7458 7459 rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime; 7460 #ifdef CONFIG_RT_GROUP_SCHED 7461 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL); 7462 #endif 7463 7464 for (j = 0; j < CPU_LOAD_IDX_MAX; j++) 7465 rq->cpu_load[j] = 0; 7466 7467 rq->last_load_update_tick = jiffies; 7468 7469 #ifdef CONFIG_SMP 7470 rq->sd = NULL; 7471 rq->rd = NULL; 7472 rq->cpu_capacity = rq->cpu_capacity_orig = SCHED_CAPACITY_SCALE; 7473 rq->balance_callback = NULL; 7474 rq->active_balance = 0; 7475 rq->next_balance = jiffies; 7476 rq->push_cpu = 0; 7477 rq->cpu = i; 7478 rq->online = 0; 7479 rq->idle_stamp = 0; 7480 rq->avg_idle = 2*sysctl_sched_migration_cost; 7481 rq->max_idle_balance_cost = sysctl_sched_migration_cost; 7482 7483 INIT_LIST_HEAD(&rq->cfs_tasks); 7484 7485 rq_attach_root(rq, &def_root_domain); 7486 #ifdef CONFIG_NO_HZ_COMMON 7487 rq->nohz_flags = 0; 7488 #endif 7489 #ifdef CONFIG_NO_HZ_FULL 7490 rq->last_sched_tick = 0; 7491 #endif 7492 #endif 7493 init_rq_hrtick(rq); 7494 atomic_set(&rq->nr_iowait, 0); 7495 } 7496 7497 set_load_weight(&init_task); 7498 7499 #ifdef CONFIG_PREEMPT_NOTIFIERS 7500 INIT_HLIST_HEAD(&init_task.preempt_notifiers); 7501 #endif 7502 7503 /* 7504 * The boot idle thread does lazy MMU switching as well: 7505 */ 7506 atomic_inc(&init_mm.mm_count); 7507 enter_lazy_tlb(&init_mm, current); 7508 7509 /* 7510 * During early bootup we pretend to be a normal task: 7511 */ 7512 current->sched_class = &fair_sched_class; 7513 7514 /* 7515 * Make us the idle thread. Technically, schedule() should not be 7516 * called from this thread, however somewhere below it might be, 7517 * but because we are the idle thread, we just pick up running again 7518 * when this runqueue becomes "idle". 7519 */ 7520 init_idle(current, smp_processor_id()); 7521 7522 calc_load_update = jiffies + LOAD_FREQ; 7523 7524 #ifdef CONFIG_SMP 7525 zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT); 7526 /* May be allocated at isolcpus cmdline parse time */ 7527 if (cpu_isolated_map == NULL) 7528 zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT); 7529 idle_thread_set_boot_cpu(); 7530 set_cpu_rq_start_time(); 7531 #endif 7532 init_sched_fair_class(); 7533 7534 scheduler_running = 1; 7535 } 7536 7537 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP 7538 static inline int preempt_count_equals(int preempt_offset) 7539 { 7540 int nested = preempt_count() + rcu_preempt_depth(); 7541 7542 return (nested == preempt_offset); 7543 } 7544 7545 void __might_sleep(const char *file, int line, int preempt_offset) 7546 { 7547 /* 7548 * Blocking primitives will set (and therefore destroy) current->state, 7549 * since we will exit with TASK_RUNNING make sure we enter with it, 7550 * otherwise we will destroy state. 7551 */ 7552 WARN_ONCE(current->state != TASK_RUNNING && current->task_state_change, 7553 "do not call blocking ops when !TASK_RUNNING; " 7554 "state=%lx set at [<%p>] %pS\n", 7555 current->state, 7556 (void *)current->task_state_change, 7557 (void *)current->task_state_change); 7558 7559 ___might_sleep(file, line, preempt_offset); 7560 } 7561 EXPORT_SYMBOL(__might_sleep); 7562 7563 void ___might_sleep(const char *file, int line, int preempt_offset) 7564 { 7565 static unsigned long prev_jiffy; /* ratelimiting */ 7566 7567 rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */ 7568 if ((preempt_count_equals(preempt_offset) && !irqs_disabled() && 7569 !is_idle_task(current)) || 7570 system_state != SYSTEM_RUNNING || oops_in_progress) 7571 return; 7572 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) 7573 return; 7574 prev_jiffy = jiffies; 7575 7576 printk(KERN_ERR 7577 "BUG: sleeping function called from invalid context at %s:%d\n", 7578 file, line); 7579 printk(KERN_ERR 7580 "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n", 7581 in_atomic(), irqs_disabled(), 7582 current->pid, current->comm); 7583 7584 if (task_stack_end_corrupted(current)) 7585 printk(KERN_EMERG "Thread overran stack, or stack corrupted\n"); 7586 7587 debug_show_held_locks(current); 7588 if (irqs_disabled()) 7589 print_irqtrace_events(current); 7590 #ifdef CONFIG_DEBUG_PREEMPT 7591 if (!preempt_count_equals(preempt_offset)) { 7592 pr_err("Preemption disabled at:"); 7593 print_ip_sym(current->preempt_disable_ip); 7594 pr_cont("\n"); 7595 } 7596 #endif 7597 dump_stack(); 7598 } 7599 EXPORT_SYMBOL(___might_sleep); 7600 #endif 7601 7602 #ifdef CONFIG_MAGIC_SYSRQ 7603 void normalize_rt_tasks(void) 7604 { 7605 struct task_struct *g, *p; 7606 struct sched_attr attr = { 7607 .sched_policy = SCHED_NORMAL, 7608 }; 7609 7610 read_lock(&tasklist_lock); 7611 for_each_process_thread(g, p) { 7612 /* 7613 * Only normalize user tasks: 7614 */ 7615 if (p->flags & PF_KTHREAD) 7616 continue; 7617 7618 p->se.exec_start = 0; 7619 #ifdef CONFIG_SCHEDSTATS 7620 p->se.statistics.wait_start = 0; 7621 p->se.statistics.sleep_start = 0; 7622 p->se.statistics.block_start = 0; 7623 #endif 7624 7625 if (!dl_task(p) && !rt_task(p)) { 7626 /* 7627 * Renice negative nice level userspace 7628 * tasks back to 0: 7629 */ 7630 if (task_nice(p) < 0) 7631 set_user_nice(p, 0); 7632 continue; 7633 } 7634 7635 __sched_setscheduler(p, &attr, false, false); 7636 } 7637 read_unlock(&tasklist_lock); 7638 } 7639 7640 #endif /* CONFIG_MAGIC_SYSRQ */ 7641 7642 #if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) 7643 /* 7644 * These functions are only useful for the IA64 MCA handling, or kdb. 7645 * 7646 * They can only be called when the whole system has been 7647 * stopped - every CPU needs to be quiescent, and no scheduling 7648 * activity can take place. Using them for anything else would 7649 * be a serious bug, and as a result, they aren't even visible 7650 * under any other configuration. 7651 */ 7652 7653 /** 7654 * curr_task - return the current task for a given cpu. 7655 * @cpu: the processor in question. 7656 * 7657 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! 7658 * 7659 * Return: The current task for @cpu. 7660 */ 7661 struct task_struct *curr_task(int cpu) 7662 { 7663 return cpu_curr(cpu); 7664 } 7665 7666 #endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */ 7667 7668 #ifdef CONFIG_IA64 7669 /** 7670 * set_curr_task - set the current task for a given cpu. 7671 * @cpu: the processor in question. 7672 * @p: the task pointer to set. 7673 * 7674 * Description: This function must only be used when non-maskable interrupts 7675 * are serviced on a separate stack. It allows the architecture to switch the 7676 * notion of the current task on a cpu in a non-blocking manner. This function 7677 * must be called with all CPU's synchronized, and interrupts disabled, the 7678 * and caller must save the original value of the current task (see 7679 * curr_task() above) and restore that value before reenabling interrupts and 7680 * re-starting the system. 7681 * 7682 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! 7683 */ 7684 void set_curr_task(int cpu, struct task_struct *p) 7685 { 7686 cpu_curr(cpu) = p; 7687 } 7688 7689 #endif 7690 7691 #ifdef CONFIG_CGROUP_SCHED 7692 /* task_group_lock serializes the addition/removal of task groups */ 7693 static DEFINE_SPINLOCK(task_group_lock); 7694 7695 static void free_sched_group(struct task_group *tg) 7696 { 7697 free_fair_sched_group(tg); 7698 free_rt_sched_group(tg); 7699 autogroup_free(tg); 7700 kfree(tg); 7701 } 7702 7703 /* allocate runqueue etc for a new task group */ 7704 struct task_group *sched_create_group(struct task_group *parent) 7705 { 7706 struct task_group *tg; 7707 7708 tg = kzalloc(sizeof(*tg), GFP_KERNEL); 7709 if (!tg) 7710 return ERR_PTR(-ENOMEM); 7711 7712 if (!alloc_fair_sched_group(tg, parent)) 7713 goto err; 7714 7715 if (!alloc_rt_sched_group(tg, parent)) 7716 goto err; 7717 7718 return tg; 7719 7720 err: 7721 free_sched_group(tg); 7722 return ERR_PTR(-ENOMEM); 7723 } 7724 7725 void sched_online_group(struct task_group *tg, struct task_group *parent) 7726 { 7727 unsigned long flags; 7728 7729 spin_lock_irqsave(&task_group_lock, flags); 7730 list_add_rcu(&tg->list, &task_groups); 7731 7732 WARN_ON(!parent); /* root should already exist */ 7733 7734 tg->parent = parent; 7735 INIT_LIST_HEAD(&tg->children); 7736 list_add_rcu(&tg->siblings, &parent->children); 7737 spin_unlock_irqrestore(&task_group_lock, flags); 7738 } 7739 7740 /* rcu callback to free various structures associated with a task group */ 7741 static void free_sched_group_rcu(struct rcu_head *rhp) 7742 { 7743 /* now it should be safe to free those cfs_rqs */ 7744 free_sched_group(container_of(rhp, struct task_group, rcu)); 7745 } 7746 7747 /* Destroy runqueue etc associated with a task group */ 7748 void sched_destroy_group(struct task_group *tg) 7749 { 7750 /* wait for possible concurrent references to cfs_rqs complete */ 7751 call_rcu(&tg->rcu, free_sched_group_rcu); 7752 } 7753 7754 void sched_offline_group(struct task_group *tg) 7755 { 7756 unsigned long flags; 7757 int i; 7758 7759 /* end participation in shares distribution */ 7760 for_each_possible_cpu(i) 7761 unregister_fair_sched_group(tg, i); 7762 7763 spin_lock_irqsave(&task_group_lock, flags); 7764 list_del_rcu(&tg->list); 7765 list_del_rcu(&tg->siblings); 7766 spin_unlock_irqrestore(&task_group_lock, flags); 7767 } 7768 7769 /* change task's runqueue when it moves between groups. 7770 * The caller of this function should have put the task in its new group 7771 * by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to 7772 * reflect its new group. 7773 */ 7774 void sched_move_task(struct task_struct *tsk) 7775 { 7776 struct task_group *tg; 7777 int queued, running; 7778 unsigned long flags; 7779 struct rq *rq; 7780 7781 rq = task_rq_lock(tsk, &flags); 7782 7783 running = task_current(rq, tsk); 7784 queued = task_on_rq_queued(tsk); 7785 7786 if (queued) 7787 dequeue_task(rq, tsk, DEQUEUE_SAVE); 7788 if (unlikely(running)) 7789 put_prev_task(rq, tsk); 7790 7791 /* 7792 * All callers are synchronized by task_rq_lock(); we do not use RCU 7793 * which is pointless here. Thus, we pass "true" to task_css_check() 7794 * to prevent lockdep warnings. 7795 */ 7796 tg = container_of(task_css_check(tsk, cpu_cgrp_id, true), 7797 struct task_group, css); 7798 tg = autogroup_task_group(tsk, tg); 7799 tsk->sched_task_group = tg; 7800 7801 #ifdef CONFIG_FAIR_GROUP_SCHED 7802 if (tsk->sched_class->task_move_group) 7803 tsk->sched_class->task_move_group(tsk); 7804 else 7805 #endif 7806 set_task_rq(tsk, task_cpu(tsk)); 7807 7808 if (unlikely(running)) 7809 tsk->sched_class->set_curr_task(rq); 7810 if (queued) 7811 enqueue_task(rq, tsk, ENQUEUE_RESTORE); 7812 7813 task_rq_unlock(rq, tsk, &flags); 7814 } 7815 #endif /* CONFIG_CGROUP_SCHED */ 7816 7817 #ifdef CONFIG_RT_GROUP_SCHED 7818 /* 7819 * Ensure that the real time constraints are schedulable. 7820 */ 7821 static DEFINE_MUTEX(rt_constraints_mutex); 7822 7823 /* Must be called with tasklist_lock held */ 7824 static inline int tg_has_rt_tasks(struct task_group *tg) 7825 { 7826 struct task_struct *g, *p; 7827 7828 /* 7829 * Autogroups do not have RT tasks; see autogroup_create(). 7830 */ 7831 if (task_group_is_autogroup(tg)) 7832 return 0; 7833 7834 for_each_process_thread(g, p) { 7835 if (rt_task(p) && task_group(p) == tg) 7836 return 1; 7837 } 7838 7839 return 0; 7840 } 7841 7842 struct rt_schedulable_data { 7843 struct task_group *tg; 7844 u64 rt_period; 7845 u64 rt_runtime; 7846 }; 7847 7848 static int tg_rt_schedulable(struct task_group *tg, void *data) 7849 { 7850 struct rt_schedulable_data *d = data; 7851 struct task_group *child; 7852 unsigned long total, sum = 0; 7853 u64 period, runtime; 7854 7855 period = ktime_to_ns(tg->rt_bandwidth.rt_period); 7856 runtime = tg->rt_bandwidth.rt_runtime; 7857 7858 if (tg == d->tg) { 7859 period = d->rt_period; 7860 runtime = d->rt_runtime; 7861 } 7862 7863 /* 7864 * Cannot have more runtime than the period. 7865 */ 7866 if (runtime > period && runtime != RUNTIME_INF) 7867 return -EINVAL; 7868 7869 /* 7870 * Ensure we don't starve existing RT tasks. 7871 */ 7872 if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg)) 7873 return -EBUSY; 7874 7875 total = to_ratio(period, runtime); 7876 7877 /* 7878 * Nobody can have more than the global setting allows. 7879 */ 7880 if (total > to_ratio(global_rt_period(), global_rt_runtime())) 7881 return -EINVAL; 7882 7883 /* 7884 * The sum of our children's runtime should not exceed our own. 7885 */ 7886 list_for_each_entry_rcu(child, &tg->children, siblings) { 7887 period = ktime_to_ns(child->rt_bandwidth.rt_period); 7888 runtime = child->rt_bandwidth.rt_runtime; 7889 7890 if (child == d->tg) { 7891 period = d->rt_period; 7892 runtime = d->rt_runtime; 7893 } 7894 7895 sum += to_ratio(period, runtime); 7896 } 7897 7898 if (sum > total) 7899 return -EINVAL; 7900 7901 return 0; 7902 } 7903 7904 static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) 7905 { 7906 int ret; 7907 7908 struct rt_schedulable_data data = { 7909 .tg = tg, 7910 .rt_period = period, 7911 .rt_runtime = runtime, 7912 }; 7913 7914 rcu_read_lock(); 7915 ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data); 7916 rcu_read_unlock(); 7917 7918 return ret; 7919 } 7920 7921 static int tg_set_rt_bandwidth(struct task_group *tg, 7922 u64 rt_period, u64 rt_runtime) 7923 { 7924 int i, err = 0; 7925 7926 /* 7927 * Disallowing the root group RT runtime is BAD, it would disallow the 7928 * kernel creating (and or operating) RT threads. 7929 */ 7930 if (tg == &root_task_group && rt_runtime == 0) 7931 return -EINVAL; 7932 7933 /* No period doesn't make any sense. */ 7934 if (rt_period == 0) 7935 return -EINVAL; 7936 7937 mutex_lock(&rt_constraints_mutex); 7938 read_lock(&tasklist_lock); 7939 err = __rt_schedulable(tg, rt_period, rt_runtime); 7940 if (err) 7941 goto unlock; 7942 7943 raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock); 7944 tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period); 7945 tg->rt_bandwidth.rt_runtime = rt_runtime; 7946 7947 for_each_possible_cpu(i) { 7948 struct rt_rq *rt_rq = tg->rt_rq[i]; 7949 7950 raw_spin_lock(&rt_rq->rt_runtime_lock); 7951 rt_rq->rt_runtime = rt_runtime; 7952 raw_spin_unlock(&rt_rq->rt_runtime_lock); 7953 } 7954 raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock); 7955 unlock: 7956 read_unlock(&tasklist_lock); 7957 mutex_unlock(&rt_constraints_mutex); 7958 7959 return err; 7960 } 7961 7962 static int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us) 7963 { 7964 u64 rt_runtime, rt_period; 7965 7966 rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period); 7967 rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC; 7968 if (rt_runtime_us < 0) 7969 rt_runtime = RUNTIME_INF; 7970 7971 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime); 7972 } 7973 7974 static long sched_group_rt_runtime(struct task_group *tg) 7975 { 7976 u64 rt_runtime_us; 7977 7978 if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF) 7979 return -1; 7980 7981 rt_runtime_us = tg->rt_bandwidth.rt_runtime; 7982 do_div(rt_runtime_us, NSEC_PER_USEC); 7983 return rt_runtime_us; 7984 } 7985 7986 static int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us) 7987 { 7988 u64 rt_runtime, rt_period; 7989 7990 rt_period = rt_period_us * NSEC_PER_USEC; 7991 rt_runtime = tg->rt_bandwidth.rt_runtime; 7992 7993 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime); 7994 } 7995 7996 static long sched_group_rt_period(struct task_group *tg) 7997 { 7998 u64 rt_period_us; 7999 8000 rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period); 8001 do_div(rt_period_us, NSEC_PER_USEC); 8002 return rt_period_us; 8003 } 8004 #endif /* CONFIG_RT_GROUP_SCHED */ 8005 8006 #ifdef CONFIG_RT_GROUP_SCHED 8007 static int sched_rt_global_constraints(void) 8008 { 8009 int ret = 0; 8010 8011 mutex_lock(&rt_constraints_mutex); 8012 read_lock(&tasklist_lock); 8013 ret = __rt_schedulable(NULL, 0, 0); 8014 read_unlock(&tasklist_lock); 8015 mutex_unlock(&rt_constraints_mutex); 8016 8017 return ret; 8018 } 8019 8020 static int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk) 8021 { 8022 /* Don't accept realtime tasks when there is no way for them to run */ 8023 if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0) 8024 return 0; 8025 8026 return 1; 8027 } 8028 8029 #else /* !CONFIG_RT_GROUP_SCHED */ 8030 static int sched_rt_global_constraints(void) 8031 { 8032 unsigned long flags; 8033 int i, ret = 0; 8034 8035 raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags); 8036 for_each_possible_cpu(i) { 8037 struct rt_rq *rt_rq = &cpu_rq(i)->rt; 8038 8039 raw_spin_lock(&rt_rq->rt_runtime_lock); 8040 rt_rq->rt_runtime = global_rt_runtime(); 8041 raw_spin_unlock(&rt_rq->rt_runtime_lock); 8042 } 8043 raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags); 8044 8045 return ret; 8046 } 8047 #endif /* CONFIG_RT_GROUP_SCHED */ 8048 8049 static int sched_dl_global_validate(void) 8050 { 8051 u64 runtime = global_rt_runtime(); 8052 u64 period = global_rt_period(); 8053 u64 new_bw = to_ratio(period, runtime); 8054 struct dl_bw *dl_b; 8055 int cpu, ret = 0; 8056 unsigned long flags; 8057 8058 /* 8059 * Here we want to check the bandwidth not being set to some 8060 * value smaller than the currently allocated bandwidth in 8061 * any of the root_domains. 8062 * 8063 * FIXME: Cycling on all the CPUs is overdoing, but simpler than 8064 * cycling on root_domains... Discussion on different/better 8065 * solutions is welcome! 8066 */ 8067 for_each_possible_cpu(cpu) { 8068 rcu_read_lock_sched(); 8069 dl_b = dl_bw_of(cpu); 8070 8071 raw_spin_lock_irqsave(&dl_b->lock, flags); 8072 if (new_bw < dl_b->total_bw) 8073 ret = -EBUSY; 8074 raw_spin_unlock_irqrestore(&dl_b->lock, flags); 8075 8076 rcu_read_unlock_sched(); 8077 8078 if (ret) 8079 break; 8080 } 8081 8082 return ret; 8083 } 8084 8085 static void sched_dl_do_global(void) 8086 { 8087 u64 new_bw = -1; 8088 struct dl_bw *dl_b; 8089 int cpu; 8090 unsigned long flags; 8091 8092 def_dl_bandwidth.dl_period = global_rt_period(); 8093 def_dl_bandwidth.dl_runtime = global_rt_runtime(); 8094 8095 if (global_rt_runtime() != RUNTIME_INF) 8096 new_bw = to_ratio(global_rt_period(), global_rt_runtime()); 8097 8098 /* 8099 * FIXME: As above... 8100 */ 8101 for_each_possible_cpu(cpu) { 8102 rcu_read_lock_sched(); 8103 dl_b = dl_bw_of(cpu); 8104 8105 raw_spin_lock_irqsave(&dl_b->lock, flags); 8106 dl_b->bw = new_bw; 8107 raw_spin_unlock_irqrestore(&dl_b->lock, flags); 8108 8109 rcu_read_unlock_sched(); 8110 } 8111 } 8112 8113 static int sched_rt_global_validate(void) 8114 { 8115 if (sysctl_sched_rt_period <= 0) 8116 return -EINVAL; 8117 8118 if ((sysctl_sched_rt_runtime != RUNTIME_INF) && 8119 (sysctl_sched_rt_runtime > sysctl_sched_rt_period)) 8120 return -EINVAL; 8121 8122 return 0; 8123 } 8124 8125 static void sched_rt_do_global(void) 8126 { 8127 def_rt_bandwidth.rt_runtime = global_rt_runtime(); 8128 def_rt_bandwidth.rt_period = ns_to_ktime(global_rt_period()); 8129 } 8130 8131 int sched_rt_handler(struct ctl_table *table, int write, 8132 void __user *buffer, size_t *lenp, 8133 loff_t *ppos) 8134 { 8135 int old_period, old_runtime; 8136 static DEFINE_MUTEX(mutex); 8137 int ret; 8138 8139 mutex_lock(&mutex); 8140 old_period = sysctl_sched_rt_period; 8141 old_runtime = sysctl_sched_rt_runtime; 8142 8143 ret = proc_dointvec(table, write, buffer, lenp, ppos); 8144 8145 if (!ret && write) { 8146 ret = sched_rt_global_validate(); 8147 if (ret) 8148 goto undo; 8149 8150 ret = sched_dl_global_validate(); 8151 if (ret) 8152 goto undo; 8153 8154 ret = sched_rt_global_constraints(); 8155 if (ret) 8156 goto undo; 8157 8158 sched_rt_do_global(); 8159 sched_dl_do_global(); 8160 } 8161 if (0) { 8162 undo: 8163 sysctl_sched_rt_period = old_period; 8164 sysctl_sched_rt_runtime = old_runtime; 8165 } 8166 mutex_unlock(&mutex); 8167 8168 return ret; 8169 } 8170 8171 int sched_rr_handler(struct ctl_table *table, int write, 8172 void __user *buffer, size_t *lenp, 8173 loff_t *ppos) 8174 { 8175 int ret; 8176 static DEFINE_MUTEX(mutex); 8177 8178 mutex_lock(&mutex); 8179 ret = proc_dointvec(table, write, buffer, lenp, ppos); 8180 /* make sure that internally we keep jiffies */ 8181 /* also, writing zero resets timeslice to default */ 8182 if (!ret && write) { 8183 sched_rr_timeslice = sched_rr_timeslice <= 0 ? 8184 RR_TIMESLICE : msecs_to_jiffies(sched_rr_timeslice); 8185 } 8186 mutex_unlock(&mutex); 8187 return ret; 8188 } 8189 8190 #ifdef CONFIG_CGROUP_SCHED 8191 8192 static inline struct task_group *css_tg(struct cgroup_subsys_state *css) 8193 { 8194 return css ? container_of(css, struct task_group, css) : NULL; 8195 } 8196 8197 static struct cgroup_subsys_state * 8198 cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) 8199 { 8200 struct task_group *parent = css_tg(parent_css); 8201 struct task_group *tg; 8202 8203 if (!parent) { 8204 /* This is early initialization for the top cgroup */ 8205 return &root_task_group.css; 8206 } 8207 8208 tg = sched_create_group(parent); 8209 if (IS_ERR(tg)) 8210 return ERR_PTR(-ENOMEM); 8211 8212 return &tg->css; 8213 } 8214 8215 static int cpu_cgroup_css_online(struct cgroup_subsys_state *css) 8216 { 8217 struct task_group *tg = css_tg(css); 8218 struct task_group *parent = css_tg(css->parent); 8219 8220 if (parent) 8221 sched_online_group(tg, parent); 8222 return 0; 8223 } 8224 8225 static void cpu_cgroup_css_free(struct cgroup_subsys_state *css) 8226 { 8227 struct task_group *tg = css_tg(css); 8228 8229 sched_destroy_group(tg); 8230 } 8231 8232 static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css) 8233 { 8234 struct task_group *tg = css_tg(css); 8235 8236 sched_offline_group(tg); 8237 } 8238 8239 static void cpu_cgroup_fork(struct task_struct *task, void *private) 8240 { 8241 sched_move_task(task); 8242 } 8243 8244 static int cpu_cgroup_can_attach(struct cgroup_taskset *tset) 8245 { 8246 struct task_struct *task; 8247 struct cgroup_subsys_state *css; 8248 8249 cgroup_taskset_for_each(task, css, tset) { 8250 #ifdef CONFIG_RT_GROUP_SCHED 8251 if (!sched_rt_can_attach(css_tg(css), task)) 8252 return -EINVAL; 8253 #else 8254 /* We don't support RT-tasks being in separate groups */ 8255 if (task->sched_class != &fair_sched_class) 8256 return -EINVAL; 8257 #endif 8258 } 8259 return 0; 8260 } 8261 8262 static void cpu_cgroup_attach(struct cgroup_taskset *tset) 8263 { 8264 struct task_struct *task; 8265 struct cgroup_subsys_state *css; 8266 8267 cgroup_taskset_for_each(task, css, tset) 8268 sched_move_task(task); 8269 } 8270 8271 #ifdef CONFIG_FAIR_GROUP_SCHED 8272 static int cpu_shares_write_u64(struct cgroup_subsys_state *css, 8273 struct cftype *cftype, u64 shareval) 8274 { 8275 return sched_group_set_shares(css_tg(css), scale_load(shareval)); 8276 } 8277 8278 static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css, 8279 struct cftype *cft) 8280 { 8281 struct task_group *tg = css_tg(css); 8282 8283 return (u64) scale_load_down(tg->shares); 8284 } 8285 8286 #ifdef CONFIG_CFS_BANDWIDTH 8287 static DEFINE_MUTEX(cfs_constraints_mutex); 8288 8289 const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */ 8290 const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */ 8291 8292 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime); 8293 8294 static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota) 8295 { 8296 int i, ret = 0, runtime_enabled, runtime_was_enabled; 8297 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 8298 8299 if (tg == &root_task_group) 8300 return -EINVAL; 8301 8302 /* 8303 * Ensure we have at some amount of bandwidth every period. This is 8304 * to prevent reaching a state of large arrears when throttled via 8305 * entity_tick() resulting in prolonged exit starvation. 8306 */ 8307 if (quota < min_cfs_quota_period || period < min_cfs_quota_period) 8308 return -EINVAL; 8309 8310 /* 8311 * Likewise, bound things on the otherside by preventing insane quota 8312 * periods. This also allows us to normalize in computing quota 8313 * feasibility. 8314 */ 8315 if (period > max_cfs_quota_period) 8316 return -EINVAL; 8317 8318 /* 8319 * Prevent race between setting of cfs_rq->runtime_enabled and 8320 * unthrottle_offline_cfs_rqs(). 8321 */ 8322 get_online_cpus(); 8323 mutex_lock(&cfs_constraints_mutex); 8324 ret = __cfs_schedulable(tg, period, quota); 8325 if (ret) 8326 goto out_unlock; 8327 8328 runtime_enabled = quota != RUNTIME_INF; 8329 runtime_was_enabled = cfs_b->quota != RUNTIME_INF; 8330 /* 8331 * If we need to toggle cfs_bandwidth_used, off->on must occur 8332 * before making related changes, and on->off must occur afterwards 8333 */ 8334 if (runtime_enabled && !runtime_was_enabled) 8335 cfs_bandwidth_usage_inc(); 8336 raw_spin_lock_irq(&cfs_b->lock); 8337 cfs_b->period = ns_to_ktime(period); 8338 cfs_b->quota = quota; 8339 8340 __refill_cfs_bandwidth_runtime(cfs_b); 8341 /* restart the period timer (if active) to handle new period expiry */ 8342 if (runtime_enabled) 8343 start_cfs_bandwidth(cfs_b); 8344 raw_spin_unlock_irq(&cfs_b->lock); 8345 8346 for_each_online_cpu(i) { 8347 struct cfs_rq *cfs_rq = tg->cfs_rq[i]; 8348 struct rq *rq = cfs_rq->rq; 8349 8350 raw_spin_lock_irq(&rq->lock); 8351 cfs_rq->runtime_enabled = runtime_enabled; 8352 cfs_rq->runtime_remaining = 0; 8353 8354 if (cfs_rq->throttled) 8355 unthrottle_cfs_rq(cfs_rq); 8356 raw_spin_unlock_irq(&rq->lock); 8357 } 8358 if (runtime_was_enabled && !runtime_enabled) 8359 cfs_bandwidth_usage_dec(); 8360 out_unlock: 8361 mutex_unlock(&cfs_constraints_mutex); 8362 put_online_cpus(); 8363 8364 return ret; 8365 } 8366 8367 int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us) 8368 { 8369 u64 quota, period; 8370 8371 period = ktime_to_ns(tg->cfs_bandwidth.period); 8372 if (cfs_quota_us < 0) 8373 quota = RUNTIME_INF; 8374 else 8375 quota = (u64)cfs_quota_us * NSEC_PER_USEC; 8376 8377 return tg_set_cfs_bandwidth(tg, period, quota); 8378 } 8379 8380 long tg_get_cfs_quota(struct task_group *tg) 8381 { 8382 u64 quota_us; 8383 8384 if (tg->cfs_bandwidth.quota == RUNTIME_INF) 8385 return -1; 8386 8387 quota_us = tg->cfs_bandwidth.quota; 8388 do_div(quota_us, NSEC_PER_USEC); 8389 8390 return quota_us; 8391 } 8392 8393 int tg_set_cfs_period(struct task_group *tg, long cfs_period_us) 8394 { 8395 u64 quota, period; 8396 8397 period = (u64)cfs_period_us * NSEC_PER_USEC; 8398 quota = tg->cfs_bandwidth.quota; 8399 8400 return tg_set_cfs_bandwidth(tg, period, quota); 8401 } 8402 8403 long tg_get_cfs_period(struct task_group *tg) 8404 { 8405 u64 cfs_period_us; 8406 8407 cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period); 8408 do_div(cfs_period_us, NSEC_PER_USEC); 8409 8410 return cfs_period_us; 8411 } 8412 8413 static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css, 8414 struct cftype *cft) 8415 { 8416 return tg_get_cfs_quota(css_tg(css)); 8417 } 8418 8419 static int cpu_cfs_quota_write_s64(struct cgroup_subsys_state *css, 8420 struct cftype *cftype, s64 cfs_quota_us) 8421 { 8422 return tg_set_cfs_quota(css_tg(css), cfs_quota_us); 8423 } 8424 8425 static u64 cpu_cfs_period_read_u64(struct cgroup_subsys_state *css, 8426 struct cftype *cft) 8427 { 8428 return tg_get_cfs_period(css_tg(css)); 8429 } 8430 8431 static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css, 8432 struct cftype *cftype, u64 cfs_period_us) 8433 { 8434 return tg_set_cfs_period(css_tg(css), cfs_period_us); 8435 } 8436 8437 struct cfs_schedulable_data { 8438 struct task_group *tg; 8439 u64 period, quota; 8440 }; 8441 8442 /* 8443 * normalize group quota/period to be quota/max_period 8444 * note: units are usecs 8445 */ 8446 static u64 normalize_cfs_quota(struct task_group *tg, 8447 struct cfs_schedulable_data *d) 8448 { 8449 u64 quota, period; 8450 8451 if (tg == d->tg) { 8452 period = d->period; 8453 quota = d->quota; 8454 } else { 8455 period = tg_get_cfs_period(tg); 8456 quota = tg_get_cfs_quota(tg); 8457 } 8458 8459 /* note: these should typically be equivalent */ 8460 if (quota == RUNTIME_INF || quota == -1) 8461 return RUNTIME_INF; 8462 8463 return to_ratio(period, quota); 8464 } 8465 8466 static int tg_cfs_schedulable_down(struct task_group *tg, void *data) 8467 { 8468 struct cfs_schedulable_data *d = data; 8469 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 8470 s64 quota = 0, parent_quota = -1; 8471 8472 if (!tg->parent) { 8473 quota = RUNTIME_INF; 8474 } else { 8475 struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth; 8476 8477 quota = normalize_cfs_quota(tg, d); 8478 parent_quota = parent_b->hierarchical_quota; 8479 8480 /* 8481 * ensure max(child_quota) <= parent_quota, inherit when no 8482 * limit is set 8483 */ 8484 if (quota == RUNTIME_INF) 8485 quota = parent_quota; 8486 else if (parent_quota != RUNTIME_INF && quota > parent_quota) 8487 return -EINVAL; 8488 } 8489 cfs_b->hierarchical_quota = quota; 8490 8491 return 0; 8492 } 8493 8494 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota) 8495 { 8496 int ret; 8497 struct cfs_schedulable_data data = { 8498 .tg = tg, 8499 .period = period, 8500 .quota = quota, 8501 }; 8502 8503 if (quota != RUNTIME_INF) { 8504 do_div(data.period, NSEC_PER_USEC); 8505 do_div(data.quota, NSEC_PER_USEC); 8506 } 8507 8508 rcu_read_lock(); 8509 ret = walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data); 8510 rcu_read_unlock(); 8511 8512 return ret; 8513 } 8514 8515 static int cpu_stats_show(struct seq_file *sf, void *v) 8516 { 8517 struct task_group *tg = css_tg(seq_css(sf)); 8518 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 8519 8520 seq_printf(sf, "nr_periods %d\n", cfs_b->nr_periods); 8521 seq_printf(sf, "nr_throttled %d\n", cfs_b->nr_throttled); 8522 seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time); 8523 8524 return 0; 8525 } 8526 #endif /* CONFIG_CFS_BANDWIDTH */ 8527 #endif /* CONFIG_FAIR_GROUP_SCHED */ 8528 8529 #ifdef CONFIG_RT_GROUP_SCHED 8530 static int cpu_rt_runtime_write(struct cgroup_subsys_state *css, 8531 struct cftype *cft, s64 val) 8532 { 8533 return sched_group_set_rt_runtime(css_tg(css), val); 8534 } 8535 8536 static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css, 8537 struct cftype *cft) 8538 { 8539 return sched_group_rt_runtime(css_tg(css)); 8540 } 8541 8542 static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css, 8543 struct cftype *cftype, u64 rt_period_us) 8544 { 8545 return sched_group_set_rt_period(css_tg(css), rt_period_us); 8546 } 8547 8548 static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css, 8549 struct cftype *cft) 8550 { 8551 return sched_group_rt_period(css_tg(css)); 8552 } 8553 #endif /* CONFIG_RT_GROUP_SCHED */ 8554 8555 static struct cftype cpu_files[] = { 8556 #ifdef CONFIG_FAIR_GROUP_SCHED 8557 { 8558 .name = "shares", 8559 .read_u64 = cpu_shares_read_u64, 8560 .write_u64 = cpu_shares_write_u64, 8561 }, 8562 #endif 8563 #ifdef CONFIG_CFS_BANDWIDTH 8564 { 8565 .name = "cfs_quota_us", 8566 .read_s64 = cpu_cfs_quota_read_s64, 8567 .write_s64 = cpu_cfs_quota_write_s64, 8568 }, 8569 { 8570 .name = "cfs_period_us", 8571 .read_u64 = cpu_cfs_period_read_u64, 8572 .write_u64 = cpu_cfs_period_write_u64, 8573 }, 8574 { 8575 .name = "stat", 8576 .seq_show = cpu_stats_show, 8577 }, 8578 #endif 8579 #ifdef CONFIG_RT_GROUP_SCHED 8580 { 8581 .name = "rt_runtime_us", 8582 .read_s64 = cpu_rt_runtime_read, 8583 .write_s64 = cpu_rt_runtime_write, 8584 }, 8585 { 8586 .name = "rt_period_us", 8587 .read_u64 = cpu_rt_period_read_uint, 8588 .write_u64 = cpu_rt_period_write_uint, 8589 }, 8590 #endif 8591 { } /* terminate */ 8592 }; 8593 8594 struct cgroup_subsys cpu_cgrp_subsys = { 8595 .css_alloc = cpu_cgroup_css_alloc, 8596 .css_free = cpu_cgroup_css_free, 8597 .css_online = cpu_cgroup_css_online, 8598 .css_offline = cpu_cgroup_css_offline, 8599 .fork = cpu_cgroup_fork, 8600 .can_attach = cpu_cgroup_can_attach, 8601 .attach = cpu_cgroup_attach, 8602 .legacy_cftypes = cpu_files, 8603 .early_init = 1, 8604 }; 8605 8606 #endif /* CONFIG_CGROUP_SCHED */ 8607 8608 void dump_cpu_task(int cpu) 8609 { 8610 pr_info("Task dump for CPU %d:\n", cpu); 8611 sched_show_task(cpu_curr(cpu)); 8612 } 8613