1 /* 2 * kernel/sched/core.c 3 * 4 * Kernel scheduler and related syscalls 5 * 6 * Copyright (C) 1991-2002 Linus Torvalds 7 * 8 * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and 9 * make semaphores SMP safe 10 * 1998-11-19 Implemented schedule_timeout() and related stuff 11 * by Andrea Arcangeli 12 * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar: 13 * hybrid priority-list and round-robin design with 14 * an array-switch method of distributing timeslices 15 * and per-CPU runqueues. Cleanups and useful suggestions 16 * by Davide Libenzi, preemptible kernel bits by Robert Love. 17 * 2003-09-03 Interactivity tuning by Con Kolivas. 18 * 2004-04-02 Scheduler domains code by Nick Piggin 19 * 2007-04-15 Work begun on replacing all interactivity tuning with a 20 * fair scheduling design by Con Kolivas. 21 * 2007-05-05 Load balancing (smp-nice) and other improvements 22 * by Peter Williams 23 * 2007-05-06 Interactivity improvements to CFS by Mike Galbraith 24 * 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri 25 * 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins, 26 * Thomas Gleixner, Mike Kravetz 27 */ 28 29 #include <linux/mm.h> 30 #include <linux/module.h> 31 #include <linux/nmi.h> 32 #include <linux/init.h> 33 #include <linux/uaccess.h> 34 #include <linux/highmem.h> 35 #include <asm/mmu_context.h> 36 #include <linux/interrupt.h> 37 #include <linux/capability.h> 38 #include <linux/completion.h> 39 #include <linux/kernel_stat.h> 40 #include <linux/debug_locks.h> 41 #include <linux/perf_event.h> 42 #include <linux/security.h> 43 #include <linux/notifier.h> 44 #include <linux/profile.h> 45 #include <linux/freezer.h> 46 #include <linux/vmalloc.h> 47 #include <linux/blkdev.h> 48 #include <linux/delay.h> 49 #include <linux/pid_namespace.h> 50 #include <linux/smp.h> 51 #include <linux/threads.h> 52 #include <linux/timer.h> 53 #include <linux/rcupdate.h> 54 #include <linux/cpu.h> 55 #include <linux/cpuset.h> 56 #include <linux/percpu.h> 57 #include <linux/proc_fs.h> 58 #include <linux/seq_file.h> 59 #include <linux/sysctl.h> 60 #include <linux/syscalls.h> 61 #include <linux/times.h> 62 #include <linux/tsacct_kern.h> 63 #include <linux/kprobes.h> 64 #include <linux/delayacct.h> 65 #include <linux/unistd.h> 66 #include <linux/pagemap.h> 67 #include <linux/hrtimer.h> 68 #include <linux/tick.h> 69 #include <linux/debugfs.h> 70 #include <linux/ctype.h> 71 #include <linux/ftrace.h> 72 #include <linux/slab.h> 73 #include <linux/init_task.h> 74 #include <linux/binfmts.h> 75 #include <linux/context_tracking.h> 76 #include <linux/compiler.h> 77 78 #include <asm/switch_to.h> 79 #include <asm/tlb.h> 80 #include <asm/irq_regs.h> 81 #include <asm/mutex.h> 82 #ifdef CONFIG_PARAVIRT 83 #include <asm/paravirt.h> 84 #endif 85 86 #include "sched.h" 87 #include "../workqueue_internal.h" 88 #include "../smpboot.h" 89 90 #define CREATE_TRACE_POINTS 91 #include <trace/events/sched.h> 92 93 DEFINE_MUTEX(sched_domains_mutex); 94 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); 95 96 static void update_rq_clock_task(struct rq *rq, s64 delta); 97 98 void update_rq_clock(struct rq *rq) 99 { 100 s64 delta; 101 102 lockdep_assert_held(&rq->lock); 103 104 if (rq->clock_skip_update & RQCF_ACT_SKIP) 105 return; 106 107 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock; 108 if (delta < 0) 109 return; 110 rq->clock += delta; 111 update_rq_clock_task(rq, delta); 112 } 113 114 /* 115 * Debugging: various feature bits 116 */ 117 118 #define SCHED_FEAT(name, enabled) \ 119 (1UL << __SCHED_FEAT_##name) * enabled | 120 121 const_debug unsigned int sysctl_sched_features = 122 #include "features.h" 123 0; 124 125 #undef SCHED_FEAT 126 127 #ifdef CONFIG_SCHED_DEBUG 128 #define SCHED_FEAT(name, enabled) \ 129 #name , 130 131 static const char * const sched_feat_names[] = { 132 #include "features.h" 133 }; 134 135 #undef SCHED_FEAT 136 137 static int sched_feat_show(struct seq_file *m, void *v) 138 { 139 int i; 140 141 for (i = 0; i < __SCHED_FEAT_NR; i++) { 142 if (!(sysctl_sched_features & (1UL << i))) 143 seq_puts(m, "NO_"); 144 seq_printf(m, "%s ", sched_feat_names[i]); 145 } 146 seq_puts(m, "\n"); 147 148 return 0; 149 } 150 151 #ifdef HAVE_JUMP_LABEL 152 153 #define jump_label_key__true STATIC_KEY_INIT_TRUE 154 #define jump_label_key__false STATIC_KEY_INIT_FALSE 155 156 #define SCHED_FEAT(name, enabled) \ 157 jump_label_key__##enabled , 158 159 struct static_key sched_feat_keys[__SCHED_FEAT_NR] = { 160 #include "features.h" 161 }; 162 163 #undef SCHED_FEAT 164 165 static void sched_feat_disable(int i) 166 { 167 static_key_disable(&sched_feat_keys[i]); 168 } 169 170 static void sched_feat_enable(int i) 171 { 172 static_key_enable(&sched_feat_keys[i]); 173 } 174 #else 175 static void sched_feat_disable(int i) { }; 176 static void sched_feat_enable(int i) { }; 177 #endif /* HAVE_JUMP_LABEL */ 178 179 static int sched_feat_set(char *cmp) 180 { 181 int i; 182 int neg = 0; 183 184 if (strncmp(cmp, "NO_", 3) == 0) { 185 neg = 1; 186 cmp += 3; 187 } 188 189 for (i = 0; i < __SCHED_FEAT_NR; i++) { 190 if (strcmp(cmp, sched_feat_names[i]) == 0) { 191 if (neg) { 192 sysctl_sched_features &= ~(1UL << i); 193 sched_feat_disable(i); 194 } else { 195 sysctl_sched_features |= (1UL << i); 196 sched_feat_enable(i); 197 } 198 break; 199 } 200 } 201 202 return i; 203 } 204 205 static ssize_t 206 sched_feat_write(struct file *filp, const char __user *ubuf, 207 size_t cnt, loff_t *ppos) 208 { 209 char buf[64]; 210 char *cmp; 211 int i; 212 struct inode *inode; 213 214 if (cnt > 63) 215 cnt = 63; 216 217 if (copy_from_user(&buf, ubuf, cnt)) 218 return -EFAULT; 219 220 buf[cnt] = 0; 221 cmp = strstrip(buf); 222 223 /* Ensure the static_key remains in a consistent state */ 224 inode = file_inode(filp); 225 mutex_lock(&inode->i_mutex); 226 i = sched_feat_set(cmp); 227 mutex_unlock(&inode->i_mutex); 228 if (i == __SCHED_FEAT_NR) 229 return -EINVAL; 230 231 *ppos += cnt; 232 233 return cnt; 234 } 235 236 static int sched_feat_open(struct inode *inode, struct file *filp) 237 { 238 return single_open(filp, sched_feat_show, NULL); 239 } 240 241 static const struct file_operations sched_feat_fops = { 242 .open = sched_feat_open, 243 .write = sched_feat_write, 244 .read = seq_read, 245 .llseek = seq_lseek, 246 .release = single_release, 247 }; 248 249 static __init int sched_init_debug(void) 250 { 251 debugfs_create_file("sched_features", 0644, NULL, NULL, 252 &sched_feat_fops); 253 254 return 0; 255 } 256 late_initcall(sched_init_debug); 257 #endif /* CONFIG_SCHED_DEBUG */ 258 259 /* 260 * Number of tasks to iterate in a single balance run. 261 * Limited because this is done with IRQs disabled. 262 */ 263 const_debug unsigned int sysctl_sched_nr_migrate = 32; 264 265 /* 266 * period over which we average the RT time consumption, measured 267 * in ms. 268 * 269 * default: 1s 270 */ 271 const_debug unsigned int sysctl_sched_time_avg = MSEC_PER_SEC; 272 273 /* 274 * period over which we measure -rt task cpu usage in us. 275 * default: 1s 276 */ 277 unsigned int sysctl_sched_rt_period = 1000000; 278 279 __read_mostly int scheduler_running; 280 281 /* 282 * part of the period that we allow rt tasks to run in us. 283 * default: 0.95s 284 */ 285 int sysctl_sched_rt_runtime = 950000; 286 287 /* cpus with isolated domains */ 288 cpumask_var_t cpu_isolated_map; 289 290 /* 291 * this_rq_lock - lock this runqueue and disable interrupts. 292 */ 293 static struct rq *this_rq_lock(void) 294 __acquires(rq->lock) 295 { 296 struct rq *rq; 297 298 local_irq_disable(); 299 rq = this_rq(); 300 raw_spin_lock(&rq->lock); 301 302 return rq; 303 } 304 305 #ifdef CONFIG_SCHED_HRTICK 306 /* 307 * Use HR-timers to deliver accurate preemption points. 308 */ 309 310 static void hrtick_clear(struct rq *rq) 311 { 312 if (hrtimer_active(&rq->hrtick_timer)) 313 hrtimer_cancel(&rq->hrtick_timer); 314 } 315 316 /* 317 * High-resolution timer tick. 318 * Runs from hardirq context with interrupts disabled. 319 */ 320 static enum hrtimer_restart hrtick(struct hrtimer *timer) 321 { 322 struct rq *rq = container_of(timer, struct rq, hrtick_timer); 323 324 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); 325 326 raw_spin_lock(&rq->lock); 327 update_rq_clock(rq); 328 rq->curr->sched_class->task_tick(rq, rq->curr, 1); 329 raw_spin_unlock(&rq->lock); 330 331 return HRTIMER_NORESTART; 332 } 333 334 #ifdef CONFIG_SMP 335 336 static void __hrtick_restart(struct rq *rq) 337 { 338 struct hrtimer *timer = &rq->hrtick_timer; 339 340 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED); 341 } 342 343 /* 344 * called from hardirq (IPI) context 345 */ 346 static void __hrtick_start(void *arg) 347 { 348 struct rq *rq = arg; 349 350 raw_spin_lock(&rq->lock); 351 __hrtick_restart(rq); 352 rq->hrtick_csd_pending = 0; 353 raw_spin_unlock(&rq->lock); 354 } 355 356 /* 357 * Called to set the hrtick timer state. 358 * 359 * called with rq->lock held and irqs disabled 360 */ 361 void hrtick_start(struct rq *rq, u64 delay) 362 { 363 struct hrtimer *timer = &rq->hrtick_timer; 364 ktime_t time; 365 s64 delta; 366 367 /* 368 * Don't schedule slices shorter than 10000ns, that just 369 * doesn't make sense and can cause timer DoS. 370 */ 371 delta = max_t(s64, delay, 10000LL); 372 time = ktime_add_ns(timer->base->get_time(), delta); 373 374 hrtimer_set_expires(timer, time); 375 376 if (rq == this_rq()) { 377 __hrtick_restart(rq); 378 } else if (!rq->hrtick_csd_pending) { 379 smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd); 380 rq->hrtick_csd_pending = 1; 381 } 382 } 383 384 static int 385 hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu) 386 { 387 int cpu = (int)(long)hcpu; 388 389 switch (action) { 390 case CPU_UP_CANCELED: 391 case CPU_UP_CANCELED_FROZEN: 392 case CPU_DOWN_PREPARE: 393 case CPU_DOWN_PREPARE_FROZEN: 394 case CPU_DEAD: 395 case CPU_DEAD_FROZEN: 396 hrtick_clear(cpu_rq(cpu)); 397 return NOTIFY_OK; 398 } 399 400 return NOTIFY_DONE; 401 } 402 403 static __init void init_hrtick(void) 404 { 405 hotcpu_notifier(hotplug_hrtick, 0); 406 } 407 #else 408 /* 409 * Called to set the hrtick timer state. 410 * 411 * called with rq->lock held and irqs disabled 412 */ 413 void hrtick_start(struct rq *rq, u64 delay) 414 { 415 /* 416 * Don't schedule slices shorter than 10000ns, that just 417 * doesn't make sense. Rely on vruntime for fairness. 418 */ 419 delay = max_t(u64, delay, 10000LL); 420 hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay), 421 HRTIMER_MODE_REL_PINNED); 422 } 423 424 static inline void init_hrtick(void) 425 { 426 } 427 #endif /* CONFIG_SMP */ 428 429 static void init_rq_hrtick(struct rq *rq) 430 { 431 #ifdef CONFIG_SMP 432 rq->hrtick_csd_pending = 0; 433 434 rq->hrtick_csd.flags = 0; 435 rq->hrtick_csd.func = __hrtick_start; 436 rq->hrtick_csd.info = rq; 437 #endif 438 439 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 440 rq->hrtick_timer.function = hrtick; 441 } 442 #else /* CONFIG_SCHED_HRTICK */ 443 static inline void hrtick_clear(struct rq *rq) 444 { 445 } 446 447 static inline void init_rq_hrtick(struct rq *rq) 448 { 449 } 450 451 static inline void init_hrtick(void) 452 { 453 } 454 #endif /* CONFIG_SCHED_HRTICK */ 455 456 /* 457 * cmpxchg based fetch_or, macro so it works for different integer types 458 */ 459 #define fetch_or(ptr, val) \ 460 ({ typeof(*(ptr)) __old, __val = *(ptr); \ 461 for (;;) { \ 462 __old = cmpxchg((ptr), __val, __val | (val)); \ 463 if (__old == __val) \ 464 break; \ 465 __val = __old; \ 466 } \ 467 __old; \ 468 }) 469 470 #if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG) 471 /* 472 * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG, 473 * this avoids any races wrt polling state changes and thereby avoids 474 * spurious IPIs. 475 */ 476 static bool set_nr_and_not_polling(struct task_struct *p) 477 { 478 struct thread_info *ti = task_thread_info(p); 479 return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG); 480 } 481 482 /* 483 * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set. 484 * 485 * If this returns true, then the idle task promises to call 486 * sched_ttwu_pending() and reschedule soon. 487 */ 488 static bool set_nr_if_polling(struct task_struct *p) 489 { 490 struct thread_info *ti = task_thread_info(p); 491 typeof(ti->flags) old, val = READ_ONCE(ti->flags); 492 493 for (;;) { 494 if (!(val & _TIF_POLLING_NRFLAG)) 495 return false; 496 if (val & _TIF_NEED_RESCHED) 497 return true; 498 old = cmpxchg(&ti->flags, val, val | _TIF_NEED_RESCHED); 499 if (old == val) 500 break; 501 val = old; 502 } 503 return true; 504 } 505 506 #else 507 static bool set_nr_and_not_polling(struct task_struct *p) 508 { 509 set_tsk_need_resched(p); 510 return true; 511 } 512 513 #ifdef CONFIG_SMP 514 static bool set_nr_if_polling(struct task_struct *p) 515 { 516 return false; 517 } 518 #endif 519 #endif 520 521 void wake_q_add(struct wake_q_head *head, struct task_struct *task) 522 { 523 struct wake_q_node *node = &task->wake_q; 524 525 /* 526 * Atomically grab the task, if ->wake_q is !nil already it means 527 * its already queued (either by us or someone else) and will get the 528 * wakeup due to that. 529 * 530 * This cmpxchg() implies a full barrier, which pairs with the write 531 * barrier implied by the wakeup in wake_up_list(). 532 */ 533 if (cmpxchg(&node->next, NULL, WAKE_Q_TAIL)) 534 return; 535 536 get_task_struct(task); 537 538 /* 539 * The head is context local, there can be no concurrency. 540 */ 541 *head->lastp = node; 542 head->lastp = &node->next; 543 } 544 545 void wake_up_q(struct wake_q_head *head) 546 { 547 struct wake_q_node *node = head->first; 548 549 while (node != WAKE_Q_TAIL) { 550 struct task_struct *task; 551 552 task = container_of(node, struct task_struct, wake_q); 553 BUG_ON(!task); 554 /* task can safely be re-inserted now */ 555 node = node->next; 556 task->wake_q.next = NULL; 557 558 /* 559 * wake_up_process() implies a wmb() to pair with the queueing 560 * in wake_q_add() so as not to miss wakeups. 561 */ 562 wake_up_process(task); 563 put_task_struct(task); 564 } 565 } 566 567 /* 568 * resched_curr - mark rq's current task 'to be rescheduled now'. 569 * 570 * On UP this means the setting of the need_resched flag, on SMP it 571 * might also involve a cross-CPU call to trigger the scheduler on 572 * the target CPU. 573 */ 574 void resched_curr(struct rq *rq) 575 { 576 struct task_struct *curr = rq->curr; 577 int cpu; 578 579 lockdep_assert_held(&rq->lock); 580 581 if (test_tsk_need_resched(curr)) 582 return; 583 584 cpu = cpu_of(rq); 585 586 if (cpu == smp_processor_id()) { 587 set_tsk_need_resched(curr); 588 set_preempt_need_resched(); 589 return; 590 } 591 592 if (set_nr_and_not_polling(curr)) 593 smp_send_reschedule(cpu); 594 else 595 trace_sched_wake_idle_without_ipi(cpu); 596 } 597 598 void resched_cpu(int cpu) 599 { 600 struct rq *rq = cpu_rq(cpu); 601 unsigned long flags; 602 603 if (!raw_spin_trylock_irqsave(&rq->lock, flags)) 604 return; 605 resched_curr(rq); 606 raw_spin_unlock_irqrestore(&rq->lock, flags); 607 } 608 609 #ifdef CONFIG_SMP 610 #ifdef CONFIG_NO_HZ_COMMON 611 /* 612 * In the semi idle case, use the nearest busy cpu for migrating timers 613 * from an idle cpu. This is good for power-savings. 614 * 615 * We don't do similar optimization for completely idle system, as 616 * selecting an idle cpu will add more delays to the timers than intended 617 * (as that cpu's timer base may not be uptodate wrt jiffies etc). 618 */ 619 int get_nohz_timer_target(void) 620 { 621 int i, cpu = smp_processor_id(); 622 struct sched_domain *sd; 623 624 if (!idle_cpu(cpu) && is_housekeeping_cpu(cpu)) 625 return cpu; 626 627 rcu_read_lock(); 628 for_each_domain(cpu, sd) { 629 for_each_cpu(i, sched_domain_span(sd)) { 630 if (!idle_cpu(i) && is_housekeeping_cpu(cpu)) { 631 cpu = i; 632 goto unlock; 633 } 634 } 635 } 636 637 if (!is_housekeeping_cpu(cpu)) 638 cpu = housekeeping_any_cpu(); 639 unlock: 640 rcu_read_unlock(); 641 return cpu; 642 } 643 /* 644 * When add_timer_on() enqueues a timer into the timer wheel of an 645 * idle CPU then this timer might expire before the next timer event 646 * which is scheduled to wake up that CPU. In case of a completely 647 * idle system the next event might even be infinite time into the 648 * future. wake_up_idle_cpu() ensures that the CPU is woken up and 649 * leaves the inner idle loop so the newly added timer is taken into 650 * account when the CPU goes back to idle and evaluates the timer 651 * wheel for the next timer event. 652 */ 653 static void wake_up_idle_cpu(int cpu) 654 { 655 struct rq *rq = cpu_rq(cpu); 656 657 if (cpu == smp_processor_id()) 658 return; 659 660 if (set_nr_and_not_polling(rq->idle)) 661 smp_send_reschedule(cpu); 662 else 663 trace_sched_wake_idle_without_ipi(cpu); 664 } 665 666 static bool wake_up_full_nohz_cpu(int cpu) 667 { 668 /* 669 * We just need the target to call irq_exit() and re-evaluate 670 * the next tick. The nohz full kick at least implies that. 671 * If needed we can still optimize that later with an 672 * empty IRQ. 673 */ 674 if (tick_nohz_full_cpu(cpu)) { 675 if (cpu != smp_processor_id() || 676 tick_nohz_tick_stopped()) 677 tick_nohz_full_kick_cpu(cpu); 678 return true; 679 } 680 681 return false; 682 } 683 684 void wake_up_nohz_cpu(int cpu) 685 { 686 if (!wake_up_full_nohz_cpu(cpu)) 687 wake_up_idle_cpu(cpu); 688 } 689 690 static inline bool got_nohz_idle_kick(void) 691 { 692 int cpu = smp_processor_id(); 693 694 if (!test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu))) 695 return false; 696 697 if (idle_cpu(cpu) && !need_resched()) 698 return true; 699 700 /* 701 * We can't run Idle Load Balance on this CPU for this time so we 702 * cancel it and clear NOHZ_BALANCE_KICK 703 */ 704 clear_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu)); 705 return false; 706 } 707 708 #else /* CONFIG_NO_HZ_COMMON */ 709 710 static inline bool got_nohz_idle_kick(void) 711 { 712 return false; 713 } 714 715 #endif /* CONFIG_NO_HZ_COMMON */ 716 717 #ifdef CONFIG_NO_HZ_FULL 718 bool sched_can_stop_tick(void) 719 { 720 /* 721 * FIFO realtime policy runs the highest priority task. Other runnable 722 * tasks are of a lower priority. The scheduler tick does nothing. 723 */ 724 if (current->policy == SCHED_FIFO) 725 return true; 726 727 /* 728 * Round-robin realtime tasks time slice with other tasks at the same 729 * realtime priority. Is this task the only one at this priority? 730 */ 731 if (current->policy == SCHED_RR) { 732 struct sched_rt_entity *rt_se = ¤t->rt; 733 734 return rt_se->run_list.prev == rt_se->run_list.next; 735 } 736 737 /* 738 * More than one running task need preemption. 739 * nr_running update is assumed to be visible 740 * after IPI is sent from wakers. 741 */ 742 if (this_rq()->nr_running > 1) 743 return false; 744 745 return true; 746 } 747 #endif /* CONFIG_NO_HZ_FULL */ 748 749 void sched_avg_update(struct rq *rq) 750 { 751 s64 period = sched_avg_period(); 752 753 while ((s64)(rq_clock(rq) - rq->age_stamp) > period) { 754 /* 755 * Inline assembly required to prevent the compiler 756 * optimising this loop into a divmod call. 757 * See __iter_div_u64_rem() for another example of this. 758 */ 759 asm("" : "+rm" (rq->age_stamp)); 760 rq->age_stamp += period; 761 rq->rt_avg /= 2; 762 } 763 } 764 765 #endif /* CONFIG_SMP */ 766 767 #if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \ 768 (defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH))) 769 /* 770 * Iterate task_group tree rooted at *from, calling @down when first entering a 771 * node and @up when leaving it for the final time. 772 * 773 * Caller must hold rcu_lock or sufficient equivalent. 774 */ 775 int walk_tg_tree_from(struct task_group *from, 776 tg_visitor down, tg_visitor up, void *data) 777 { 778 struct task_group *parent, *child; 779 int ret; 780 781 parent = from; 782 783 down: 784 ret = (*down)(parent, data); 785 if (ret) 786 goto out; 787 list_for_each_entry_rcu(child, &parent->children, siblings) { 788 parent = child; 789 goto down; 790 791 up: 792 continue; 793 } 794 ret = (*up)(parent, data); 795 if (ret || parent == from) 796 goto out; 797 798 child = parent; 799 parent = parent->parent; 800 if (parent) 801 goto up; 802 out: 803 return ret; 804 } 805 806 int tg_nop(struct task_group *tg, void *data) 807 { 808 return 0; 809 } 810 #endif 811 812 static void set_load_weight(struct task_struct *p) 813 { 814 int prio = p->static_prio - MAX_RT_PRIO; 815 struct load_weight *load = &p->se.load; 816 817 /* 818 * SCHED_IDLE tasks get minimal weight: 819 */ 820 if (p->policy == SCHED_IDLE) { 821 load->weight = scale_load(WEIGHT_IDLEPRIO); 822 load->inv_weight = WMULT_IDLEPRIO; 823 return; 824 } 825 826 load->weight = scale_load(prio_to_weight[prio]); 827 load->inv_weight = prio_to_wmult[prio]; 828 } 829 830 static void enqueue_task(struct rq *rq, struct task_struct *p, int flags) 831 { 832 update_rq_clock(rq); 833 sched_info_queued(rq, p); 834 p->sched_class->enqueue_task(rq, p, flags); 835 } 836 837 static void dequeue_task(struct rq *rq, struct task_struct *p, int flags) 838 { 839 update_rq_clock(rq); 840 sched_info_dequeued(rq, p); 841 p->sched_class->dequeue_task(rq, p, flags); 842 } 843 844 void activate_task(struct rq *rq, struct task_struct *p, int flags) 845 { 846 if (task_contributes_to_load(p)) 847 rq->nr_uninterruptible--; 848 849 enqueue_task(rq, p, flags); 850 } 851 852 void deactivate_task(struct rq *rq, struct task_struct *p, int flags) 853 { 854 if (task_contributes_to_load(p)) 855 rq->nr_uninterruptible++; 856 857 dequeue_task(rq, p, flags); 858 } 859 860 static void update_rq_clock_task(struct rq *rq, s64 delta) 861 { 862 /* 863 * In theory, the compile should just see 0 here, and optimize out the call 864 * to sched_rt_avg_update. But I don't trust it... 865 */ 866 #if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING) 867 s64 steal = 0, irq_delta = 0; 868 #endif 869 #ifdef CONFIG_IRQ_TIME_ACCOUNTING 870 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time; 871 872 /* 873 * Since irq_time is only updated on {soft,}irq_exit, we might run into 874 * this case when a previous update_rq_clock() happened inside a 875 * {soft,}irq region. 876 * 877 * When this happens, we stop ->clock_task and only update the 878 * prev_irq_time stamp to account for the part that fit, so that a next 879 * update will consume the rest. This ensures ->clock_task is 880 * monotonic. 881 * 882 * It does however cause some slight miss-attribution of {soft,}irq 883 * time, a more accurate solution would be to update the irq_time using 884 * the current rq->clock timestamp, except that would require using 885 * atomic ops. 886 */ 887 if (irq_delta > delta) 888 irq_delta = delta; 889 890 rq->prev_irq_time += irq_delta; 891 delta -= irq_delta; 892 #endif 893 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING 894 if (static_key_false((¶virt_steal_rq_enabled))) { 895 steal = paravirt_steal_clock(cpu_of(rq)); 896 steal -= rq->prev_steal_time_rq; 897 898 if (unlikely(steal > delta)) 899 steal = delta; 900 901 rq->prev_steal_time_rq += steal; 902 delta -= steal; 903 } 904 #endif 905 906 rq->clock_task += delta; 907 908 #if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING) 909 if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY)) 910 sched_rt_avg_update(rq, irq_delta + steal); 911 #endif 912 } 913 914 void sched_set_stop_task(int cpu, struct task_struct *stop) 915 { 916 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; 917 struct task_struct *old_stop = cpu_rq(cpu)->stop; 918 919 if (stop) { 920 /* 921 * Make it appear like a SCHED_FIFO task, its something 922 * userspace knows about and won't get confused about. 923 * 924 * Also, it will make PI more or less work without too 925 * much confusion -- but then, stop work should not 926 * rely on PI working anyway. 927 */ 928 sched_setscheduler_nocheck(stop, SCHED_FIFO, ¶m); 929 930 stop->sched_class = &stop_sched_class; 931 } 932 933 cpu_rq(cpu)->stop = stop; 934 935 if (old_stop) { 936 /* 937 * Reset it back to a normal scheduling class so that 938 * it can die in pieces. 939 */ 940 old_stop->sched_class = &rt_sched_class; 941 } 942 } 943 944 /* 945 * __normal_prio - return the priority that is based on the static prio 946 */ 947 static inline int __normal_prio(struct task_struct *p) 948 { 949 return p->static_prio; 950 } 951 952 /* 953 * Calculate the expected normal priority: i.e. priority 954 * without taking RT-inheritance into account. Might be 955 * boosted by interactivity modifiers. Changes upon fork, 956 * setprio syscalls, and whenever the interactivity 957 * estimator recalculates. 958 */ 959 static inline int normal_prio(struct task_struct *p) 960 { 961 int prio; 962 963 if (task_has_dl_policy(p)) 964 prio = MAX_DL_PRIO-1; 965 else if (task_has_rt_policy(p)) 966 prio = MAX_RT_PRIO-1 - p->rt_priority; 967 else 968 prio = __normal_prio(p); 969 return prio; 970 } 971 972 /* 973 * Calculate the current priority, i.e. the priority 974 * taken into account by the scheduler. This value might 975 * be boosted by RT tasks, or might be boosted by 976 * interactivity modifiers. Will be RT if the task got 977 * RT-boosted. If not then it returns p->normal_prio. 978 */ 979 static int effective_prio(struct task_struct *p) 980 { 981 p->normal_prio = normal_prio(p); 982 /* 983 * If we are RT tasks or we were boosted to RT priority, 984 * keep the priority unchanged. Otherwise, update priority 985 * to the normal priority: 986 */ 987 if (!rt_prio(p->prio)) 988 return p->normal_prio; 989 return p->prio; 990 } 991 992 /** 993 * task_curr - is this task currently executing on a CPU? 994 * @p: the task in question. 995 * 996 * Return: 1 if the task is currently executing. 0 otherwise. 997 */ 998 inline int task_curr(const struct task_struct *p) 999 { 1000 return cpu_curr(task_cpu(p)) == p; 1001 } 1002 1003 /* 1004 * switched_from, switched_to and prio_changed must _NOT_ drop rq->lock, 1005 * use the balance_callback list if you want balancing. 1006 * 1007 * this means any call to check_class_changed() must be followed by a call to 1008 * balance_callback(). 1009 */ 1010 static inline void check_class_changed(struct rq *rq, struct task_struct *p, 1011 const struct sched_class *prev_class, 1012 int oldprio) 1013 { 1014 if (prev_class != p->sched_class) { 1015 if (prev_class->switched_from) 1016 prev_class->switched_from(rq, p); 1017 1018 p->sched_class->switched_to(rq, p); 1019 } else if (oldprio != p->prio || dl_task(p)) 1020 p->sched_class->prio_changed(rq, p, oldprio); 1021 } 1022 1023 void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) 1024 { 1025 const struct sched_class *class; 1026 1027 if (p->sched_class == rq->curr->sched_class) { 1028 rq->curr->sched_class->check_preempt_curr(rq, p, flags); 1029 } else { 1030 for_each_class(class) { 1031 if (class == rq->curr->sched_class) 1032 break; 1033 if (class == p->sched_class) { 1034 resched_curr(rq); 1035 break; 1036 } 1037 } 1038 } 1039 1040 /* 1041 * A queue event has occurred, and we're going to schedule. In 1042 * this case, we can save a useless back to back clock update. 1043 */ 1044 if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr)) 1045 rq_clock_skip_update(rq, true); 1046 } 1047 1048 #ifdef CONFIG_SMP 1049 /* 1050 * This is how migration works: 1051 * 1052 * 1) we invoke migration_cpu_stop() on the target CPU using 1053 * stop_one_cpu(). 1054 * 2) stopper starts to run (implicitly forcing the migrated thread 1055 * off the CPU) 1056 * 3) it checks whether the migrated task is still in the wrong runqueue. 1057 * 4) if it's in the wrong runqueue then the migration thread removes 1058 * it and puts it into the right queue. 1059 * 5) stopper completes and stop_one_cpu() returns and the migration 1060 * is done. 1061 */ 1062 1063 /* 1064 * move_queued_task - move a queued task to new rq. 1065 * 1066 * Returns (locked) new rq. Old rq's lock is released. 1067 */ 1068 static struct rq *move_queued_task(struct rq *rq, struct task_struct *p, int new_cpu) 1069 { 1070 lockdep_assert_held(&rq->lock); 1071 1072 dequeue_task(rq, p, 0); 1073 p->on_rq = TASK_ON_RQ_MIGRATING; 1074 set_task_cpu(p, new_cpu); 1075 raw_spin_unlock(&rq->lock); 1076 1077 rq = cpu_rq(new_cpu); 1078 1079 raw_spin_lock(&rq->lock); 1080 BUG_ON(task_cpu(p) != new_cpu); 1081 p->on_rq = TASK_ON_RQ_QUEUED; 1082 enqueue_task(rq, p, 0); 1083 check_preempt_curr(rq, p, 0); 1084 1085 return rq; 1086 } 1087 1088 struct migration_arg { 1089 struct task_struct *task; 1090 int dest_cpu; 1091 }; 1092 1093 /* 1094 * Move (not current) task off this cpu, onto dest cpu. We're doing 1095 * this because either it can't run here any more (set_cpus_allowed() 1096 * away from this CPU, or CPU going down), or because we're 1097 * attempting to rebalance this task on exec (sched_exec). 1098 * 1099 * So we race with normal scheduler movements, but that's OK, as long 1100 * as the task is no longer on this CPU. 1101 */ 1102 static struct rq *__migrate_task(struct rq *rq, struct task_struct *p, int dest_cpu) 1103 { 1104 if (unlikely(!cpu_active(dest_cpu))) 1105 return rq; 1106 1107 /* Affinity changed (again). */ 1108 if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p))) 1109 return rq; 1110 1111 rq = move_queued_task(rq, p, dest_cpu); 1112 1113 return rq; 1114 } 1115 1116 /* 1117 * migration_cpu_stop - this will be executed by a highprio stopper thread 1118 * and performs thread migration by bumping thread off CPU then 1119 * 'pushing' onto another runqueue. 1120 */ 1121 static int migration_cpu_stop(void *data) 1122 { 1123 struct migration_arg *arg = data; 1124 struct task_struct *p = arg->task; 1125 struct rq *rq = this_rq(); 1126 1127 /* 1128 * The original target cpu might have gone down and we might 1129 * be on another cpu but it doesn't matter. 1130 */ 1131 local_irq_disable(); 1132 /* 1133 * We need to explicitly wake pending tasks before running 1134 * __migrate_task() such that we will not miss enforcing cpus_allowed 1135 * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test. 1136 */ 1137 sched_ttwu_pending(); 1138 1139 raw_spin_lock(&p->pi_lock); 1140 raw_spin_lock(&rq->lock); 1141 /* 1142 * If task_rq(p) != rq, it cannot be migrated here, because we're 1143 * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because 1144 * we're holding p->pi_lock. 1145 */ 1146 if (task_rq(p) == rq && task_on_rq_queued(p)) 1147 rq = __migrate_task(rq, p, arg->dest_cpu); 1148 raw_spin_unlock(&rq->lock); 1149 raw_spin_unlock(&p->pi_lock); 1150 1151 local_irq_enable(); 1152 return 0; 1153 } 1154 1155 /* 1156 * sched_class::set_cpus_allowed must do the below, but is not required to 1157 * actually call this function. 1158 */ 1159 void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask) 1160 { 1161 cpumask_copy(&p->cpus_allowed, new_mask); 1162 p->nr_cpus_allowed = cpumask_weight(new_mask); 1163 } 1164 1165 void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) 1166 { 1167 struct rq *rq = task_rq(p); 1168 bool queued, running; 1169 1170 lockdep_assert_held(&p->pi_lock); 1171 1172 queued = task_on_rq_queued(p); 1173 running = task_current(rq, p); 1174 1175 if (queued) { 1176 /* 1177 * Because __kthread_bind() calls this on blocked tasks without 1178 * holding rq->lock. 1179 */ 1180 lockdep_assert_held(&rq->lock); 1181 dequeue_task(rq, p, 0); 1182 } 1183 if (running) 1184 put_prev_task(rq, p); 1185 1186 p->sched_class->set_cpus_allowed(p, new_mask); 1187 1188 if (running) 1189 p->sched_class->set_curr_task(rq); 1190 if (queued) 1191 enqueue_task(rq, p, 0); 1192 } 1193 1194 /* 1195 * Change a given task's CPU affinity. Migrate the thread to a 1196 * proper CPU and schedule it away if the CPU it's executing on 1197 * is removed from the allowed bitmask. 1198 * 1199 * NOTE: the caller must have a valid reference to the task, the 1200 * task must not exit() & deallocate itself prematurely. The 1201 * call is not atomic; no spinlocks may be held. 1202 */ 1203 static int __set_cpus_allowed_ptr(struct task_struct *p, 1204 const struct cpumask *new_mask, bool check) 1205 { 1206 unsigned long flags; 1207 struct rq *rq; 1208 unsigned int dest_cpu; 1209 int ret = 0; 1210 1211 rq = task_rq_lock(p, &flags); 1212 1213 /* 1214 * Must re-check here, to close a race against __kthread_bind(), 1215 * sched_setaffinity() is not guaranteed to observe the flag. 1216 */ 1217 if (check && (p->flags & PF_NO_SETAFFINITY)) { 1218 ret = -EINVAL; 1219 goto out; 1220 } 1221 1222 if (cpumask_equal(&p->cpus_allowed, new_mask)) 1223 goto out; 1224 1225 if (!cpumask_intersects(new_mask, cpu_active_mask)) { 1226 ret = -EINVAL; 1227 goto out; 1228 } 1229 1230 do_set_cpus_allowed(p, new_mask); 1231 1232 /* Can the task run on the task's current CPU? If so, we're done */ 1233 if (cpumask_test_cpu(task_cpu(p), new_mask)) 1234 goto out; 1235 1236 dest_cpu = cpumask_any_and(cpu_active_mask, new_mask); 1237 if (task_running(rq, p) || p->state == TASK_WAKING) { 1238 struct migration_arg arg = { p, dest_cpu }; 1239 /* Need help from migration thread: drop lock and wait. */ 1240 task_rq_unlock(rq, p, &flags); 1241 stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg); 1242 tlb_migrate_finish(p->mm); 1243 return 0; 1244 } else if (task_on_rq_queued(p)) { 1245 /* 1246 * OK, since we're going to drop the lock immediately 1247 * afterwards anyway. 1248 */ 1249 lockdep_unpin_lock(&rq->lock); 1250 rq = move_queued_task(rq, p, dest_cpu); 1251 lockdep_pin_lock(&rq->lock); 1252 } 1253 out: 1254 task_rq_unlock(rq, p, &flags); 1255 1256 return ret; 1257 } 1258 1259 int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) 1260 { 1261 return __set_cpus_allowed_ptr(p, new_mask, false); 1262 } 1263 EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr); 1264 1265 void set_task_cpu(struct task_struct *p, unsigned int new_cpu) 1266 { 1267 #ifdef CONFIG_SCHED_DEBUG 1268 /* 1269 * We should never call set_task_cpu() on a blocked task, 1270 * ttwu() will sort out the placement. 1271 */ 1272 WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING && 1273 !p->on_rq); 1274 1275 #ifdef CONFIG_LOCKDEP 1276 /* 1277 * The caller should hold either p->pi_lock or rq->lock, when changing 1278 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks. 1279 * 1280 * sched_move_task() holds both and thus holding either pins the cgroup, 1281 * see task_group(). 1282 * 1283 * Furthermore, all task_rq users should acquire both locks, see 1284 * task_rq_lock(). 1285 */ 1286 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) || 1287 lockdep_is_held(&task_rq(p)->lock))); 1288 #endif 1289 #endif 1290 1291 trace_sched_migrate_task(p, new_cpu); 1292 1293 if (task_cpu(p) != new_cpu) { 1294 if (p->sched_class->migrate_task_rq) 1295 p->sched_class->migrate_task_rq(p, new_cpu); 1296 p->se.nr_migrations++; 1297 perf_event_task_migrate(p); 1298 } 1299 1300 __set_task_cpu(p, new_cpu); 1301 } 1302 1303 static void __migrate_swap_task(struct task_struct *p, int cpu) 1304 { 1305 if (task_on_rq_queued(p)) { 1306 struct rq *src_rq, *dst_rq; 1307 1308 src_rq = task_rq(p); 1309 dst_rq = cpu_rq(cpu); 1310 1311 deactivate_task(src_rq, p, 0); 1312 set_task_cpu(p, cpu); 1313 activate_task(dst_rq, p, 0); 1314 check_preempt_curr(dst_rq, p, 0); 1315 } else { 1316 /* 1317 * Task isn't running anymore; make it appear like we migrated 1318 * it before it went to sleep. This means on wakeup we make the 1319 * previous cpu our targer instead of where it really is. 1320 */ 1321 p->wake_cpu = cpu; 1322 } 1323 } 1324 1325 struct migration_swap_arg { 1326 struct task_struct *src_task, *dst_task; 1327 int src_cpu, dst_cpu; 1328 }; 1329 1330 static int migrate_swap_stop(void *data) 1331 { 1332 struct migration_swap_arg *arg = data; 1333 struct rq *src_rq, *dst_rq; 1334 int ret = -EAGAIN; 1335 1336 src_rq = cpu_rq(arg->src_cpu); 1337 dst_rq = cpu_rq(arg->dst_cpu); 1338 1339 double_raw_lock(&arg->src_task->pi_lock, 1340 &arg->dst_task->pi_lock); 1341 double_rq_lock(src_rq, dst_rq); 1342 if (task_cpu(arg->dst_task) != arg->dst_cpu) 1343 goto unlock; 1344 1345 if (task_cpu(arg->src_task) != arg->src_cpu) 1346 goto unlock; 1347 1348 if (!cpumask_test_cpu(arg->dst_cpu, tsk_cpus_allowed(arg->src_task))) 1349 goto unlock; 1350 1351 if (!cpumask_test_cpu(arg->src_cpu, tsk_cpus_allowed(arg->dst_task))) 1352 goto unlock; 1353 1354 __migrate_swap_task(arg->src_task, arg->dst_cpu); 1355 __migrate_swap_task(arg->dst_task, arg->src_cpu); 1356 1357 ret = 0; 1358 1359 unlock: 1360 double_rq_unlock(src_rq, dst_rq); 1361 raw_spin_unlock(&arg->dst_task->pi_lock); 1362 raw_spin_unlock(&arg->src_task->pi_lock); 1363 1364 return ret; 1365 } 1366 1367 /* 1368 * Cross migrate two tasks 1369 */ 1370 int migrate_swap(struct task_struct *cur, struct task_struct *p) 1371 { 1372 struct migration_swap_arg arg; 1373 int ret = -EINVAL; 1374 1375 arg = (struct migration_swap_arg){ 1376 .src_task = cur, 1377 .src_cpu = task_cpu(cur), 1378 .dst_task = p, 1379 .dst_cpu = task_cpu(p), 1380 }; 1381 1382 if (arg.src_cpu == arg.dst_cpu) 1383 goto out; 1384 1385 /* 1386 * These three tests are all lockless; this is OK since all of them 1387 * will be re-checked with proper locks held further down the line. 1388 */ 1389 if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu)) 1390 goto out; 1391 1392 if (!cpumask_test_cpu(arg.dst_cpu, tsk_cpus_allowed(arg.src_task))) 1393 goto out; 1394 1395 if (!cpumask_test_cpu(arg.src_cpu, tsk_cpus_allowed(arg.dst_task))) 1396 goto out; 1397 1398 trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu); 1399 ret = stop_two_cpus(arg.dst_cpu, arg.src_cpu, migrate_swap_stop, &arg); 1400 1401 out: 1402 return ret; 1403 } 1404 1405 /* 1406 * wait_task_inactive - wait for a thread to unschedule. 1407 * 1408 * If @match_state is nonzero, it's the @p->state value just checked and 1409 * not expected to change. If it changes, i.e. @p might have woken up, 1410 * then return zero. When we succeed in waiting for @p to be off its CPU, 1411 * we return a positive number (its total switch count). If a second call 1412 * a short while later returns the same number, the caller can be sure that 1413 * @p has remained unscheduled the whole time. 1414 * 1415 * The caller must ensure that the task *will* unschedule sometime soon, 1416 * else this function might spin for a *long* time. This function can't 1417 * be called with interrupts off, or it may introduce deadlock with 1418 * smp_call_function() if an IPI is sent by the same process we are 1419 * waiting to become inactive. 1420 */ 1421 unsigned long wait_task_inactive(struct task_struct *p, long match_state) 1422 { 1423 unsigned long flags; 1424 int running, queued; 1425 unsigned long ncsw; 1426 struct rq *rq; 1427 1428 for (;;) { 1429 /* 1430 * We do the initial early heuristics without holding 1431 * any task-queue locks at all. We'll only try to get 1432 * the runqueue lock when things look like they will 1433 * work out! 1434 */ 1435 rq = task_rq(p); 1436 1437 /* 1438 * If the task is actively running on another CPU 1439 * still, just relax and busy-wait without holding 1440 * any locks. 1441 * 1442 * NOTE! Since we don't hold any locks, it's not 1443 * even sure that "rq" stays as the right runqueue! 1444 * But we don't care, since "task_running()" will 1445 * return false if the runqueue has changed and p 1446 * is actually now running somewhere else! 1447 */ 1448 while (task_running(rq, p)) { 1449 if (match_state && unlikely(p->state != match_state)) 1450 return 0; 1451 cpu_relax(); 1452 } 1453 1454 /* 1455 * Ok, time to look more closely! We need the rq 1456 * lock now, to be *sure*. If we're wrong, we'll 1457 * just go back and repeat. 1458 */ 1459 rq = task_rq_lock(p, &flags); 1460 trace_sched_wait_task(p); 1461 running = task_running(rq, p); 1462 queued = task_on_rq_queued(p); 1463 ncsw = 0; 1464 if (!match_state || p->state == match_state) 1465 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ 1466 task_rq_unlock(rq, p, &flags); 1467 1468 /* 1469 * If it changed from the expected state, bail out now. 1470 */ 1471 if (unlikely(!ncsw)) 1472 break; 1473 1474 /* 1475 * Was it really running after all now that we 1476 * checked with the proper locks actually held? 1477 * 1478 * Oops. Go back and try again.. 1479 */ 1480 if (unlikely(running)) { 1481 cpu_relax(); 1482 continue; 1483 } 1484 1485 /* 1486 * It's not enough that it's not actively running, 1487 * it must be off the runqueue _entirely_, and not 1488 * preempted! 1489 * 1490 * So if it was still runnable (but just not actively 1491 * running right now), it's preempted, and we should 1492 * yield - it could be a while. 1493 */ 1494 if (unlikely(queued)) { 1495 ktime_t to = ktime_set(0, NSEC_PER_SEC/HZ); 1496 1497 set_current_state(TASK_UNINTERRUPTIBLE); 1498 schedule_hrtimeout(&to, HRTIMER_MODE_REL); 1499 continue; 1500 } 1501 1502 /* 1503 * Ahh, all good. It wasn't running, and it wasn't 1504 * runnable, which means that it will never become 1505 * running in the future either. We're all done! 1506 */ 1507 break; 1508 } 1509 1510 return ncsw; 1511 } 1512 1513 /*** 1514 * kick_process - kick a running thread to enter/exit the kernel 1515 * @p: the to-be-kicked thread 1516 * 1517 * Cause a process which is running on another CPU to enter 1518 * kernel-mode, without any delay. (to get signals handled.) 1519 * 1520 * NOTE: this function doesn't have to take the runqueue lock, 1521 * because all it wants to ensure is that the remote task enters 1522 * the kernel. If the IPI races and the task has been migrated 1523 * to another CPU then no harm is done and the purpose has been 1524 * achieved as well. 1525 */ 1526 void kick_process(struct task_struct *p) 1527 { 1528 int cpu; 1529 1530 preempt_disable(); 1531 cpu = task_cpu(p); 1532 if ((cpu != smp_processor_id()) && task_curr(p)) 1533 smp_send_reschedule(cpu); 1534 preempt_enable(); 1535 } 1536 EXPORT_SYMBOL_GPL(kick_process); 1537 1538 /* 1539 * ->cpus_allowed is protected by both rq->lock and p->pi_lock 1540 */ 1541 static int select_fallback_rq(int cpu, struct task_struct *p) 1542 { 1543 int nid = cpu_to_node(cpu); 1544 const struct cpumask *nodemask = NULL; 1545 enum { cpuset, possible, fail } state = cpuset; 1546 int dest_cpu; 1547 1548 /* 1549 * If the node that the cpu is on has been offlined, cpu_to_node() 1550 * will return -1. There is no cpu on the node, and we should 1551 * select the cpu on the other node. 1552 */ 1553 if (nid != -1) { 1554 nodemask = cpumask_of_node(nid); 1555 1556 /* Look for allowed, online CPU in same node. */ 1557 for_each_cpu(dest_cpu, nodemask) { 1558 if (!cpu_online(dest_cpu)) 1559 continue; 1560 if (!cpu_active(dest_cpu)) 1561 continue; 1562 if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p))) 1563 return dest_cpu; 1564 } 1565 } 1566 1567 for (;;) { 1568 /* Any allowed, online CPU? */ 1569 for_each_cpu(dest_cpu, tsk_cpus_allowed(p)) { 1570 if (!cpu_online(dest_cpu)) 1571 continue; 1572 if (!cpu_active(dest_cpu)) 1573 continue; 1574 goto out; 1575 } 1576 1577 switch (state) { 1578 case cpuset: 1579 /* No more Mr. Nice Guy. */ 1580 cpuset_cpus_allowed_fallback(p); 1581 state = possible; 1582 break; 1583 1584 case possible: 1585 do_set_cpus_allowed(p, cpu_possible_mask); 1586 state = fail; 1587 break; 1588 1589 case fail: 1590 BUG(); 1591 break; 1592 } 1593 } 1594 1595 out: 1596 if (state != cpuset) { 1597 /* 1598 * Don't tell them about moving exiting tasks or 1599 * kernel threads (both mm NULL), since they never 1600 * leave kernel. 1601 */ 1602 if (p->mm && printk_ratelimit()) { 1603 printk_deferred("process %d (%s) no longer affine to cpu%d\n", 1604 task_pid_nr(p), p->comm, cpu); 1605 } 1606 } 1607 1608 return dest_cpu; 1609 } 1610 1611 /* 1612 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable. 1613 */ 1614 static inline 1615 int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags) 1616 { 1617 lockdep_assert_held(&p->pi_lock); 1618 1619 if (p->nr_cpus_allowed > 1) 1620 cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags); 1621 1622 /* 1623 * In order not to call set_task_cpu() on a blocking task we need 1624 * to rely on ttwu() to place the task on a valid ->cpus_allowed 1625 * cpu. 1626 * 1627 * Since this is common to all placement strategies, this lives here. 1628 * 1629 * [ this allows ->select_task() to simply return task_cpu(p) and 1630 * not worry about this generic constraint ] 1631 */ 1632 if (unlikely(!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) || 1633 !cpu_online(cpu))) 1634 cpu = select_fallback_rq(task_cpu(p), p); 1635 1636 return cpu; 1637 } 1638 1639 static void update_avg(u64 *avg, u64 sample) 1640 { 1641 s64 diff = sample - *avg; 1642 *avg += diff >> 3; 1643 } 1644 1645 #else 1646 1647 static inline int __set_cpus_allowed_ptr(struct task_struct *p, 1648 const struct cpumask *new_mask, bool check) 1649 { 1650 return set_cpus_allowed_ptr(p, new_mask); 1651 } 1652 1653 #endif /* CONFIG_SMP */ 1654 1655 static void 1656 ttwu_stat(struct task_struct *p, int cpu, int wake_flags) 1657 { 1658 #ifdef CONFIG_SCHEDSTATS 1659 struct rq *rq = this_rq(); 1660 1661 #ifdef CONFIG_SMP 1662 int this_cpu = smp_processor_id(); 1663 1664 if (cpu == this_cpu) { 1665 schedstat_inc(rq, ttwu_local); 1666 schedstat_inc(p, se.statistics.nr_wakeups_local); 1667 } else { 1668 struct sched_domain *sd; 1669 1670 schedstat_inc(p, se.statistics.nr_wakeups_remote); 1671 rcu_read_lock(); 1672 for_each_domain(this_cpu, sd) { 1673 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) { 1674 schedstat_inc(sd, ttwu_wake_remote); 1675 break; 1676 } 1677 } 1678 rcu_read_unlock(); 1679 } 1680 1681 if (wake_flags & WF_MIGRATED) 1682 schedstat_inc(p, se.statistics.nr_wakeups_migrate); 1683 1684 #endif /* CONFIG_SMP */ 1685 1686 schedstat_inc(rq, ttwu_count); 1687 schedstat_inc(p, se.statistics.nr_wakeups); 1688 1689 if (wake_flags & WF_SYNC) 1690 schedstat_inc(p, se.statistics.nr_wakeups_sync); 1691 1692 #endif /* CONFIG_SCHEDSTATS */ 1693 } 1694 1695 static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags) 1696 { 1697 activate_task(rq, p, en_flags); 1698 p->on_rq = TASK_ON_RQ_QUEUED; 1699 1700 /* if a worker is waking up, notify workqueue */ 1701 if (p->flags & PF_WQ_WORKER) 1702 wq_worker_waking_up(p, cpu_of(rq)); 1703 } 1704 1705 /* 1706 * Mark the task runnable and perform wakeup-preemption. 1707 */ 1708 static void 1709 ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags) 1710 { 1711 check_preempt_curr(rq, p, wake_flags); 1712 p->state = TASK_RUNNING; 1713 trace_sched_wakeup(p); 1714 1715 #ifdef CONFIG_SMP 1716 if (p->sched_class->task_woken) { 1717 /* 1718 * Our task @p is fully woken up and running; so its safe to 1719 * drop the rq->lock, hereafter rq is only used for statistics. 1720 */ 1721 lockdep_unpin_lock(&rq->lock); 1722 p->sched_class->task_woken(rq, p); 1723 lockdep_pin_lock(&rq->lock); 1724 } 1725 1726 if (rq->idle_stamp) { 1727 u64 delta = rq_clock(rq) - rq->idle_stamp; 1728 u64 max = 2*rq->max_idle_balance_cost; 1729 1730 update_avg(&rq->avg_idle, delta); 1731 1732 if (rq->avg_idle > max) 1733 rq->avg_idle = max; 1734 1735 rq->idle_stamp = 0; 1736 } 1737 #endif 1738 } 1739 1740 static void 1741 ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags) 1742 { 1743 lockdep_assert_held(&rq->lock); 1744 1745 #ifdef CONFIG_SMP 1746 if (p->sched_contributes_to_load) 1747 rq->nr_uninterruptible--; 1748 #endif 1749 1750 ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_WAKING); 1751 ttwu_do_wakeup(rq, p, wake_flags); 1752 } 1753 1754 /* 1755 * Called in case the task @p isn't fully descheduled from its runqueue, 1756 * in this case we must do a remote wakeup. Its a 'light' wakeup though, 1757 * since all we need to do is flip p->state to TASK_RUNNING, since 1758 * the task is still ->on_rq. 1759 */ 1760 static int ttwu_remote(struct task_struct *p, int wake_flags) 1761 { 1762 struct rq *rq; 1763 int ret = 0; 1764 1765 rq = __task_rq_lock(p); 1766 if (task_on_rq_queued(p)) { 1767 /* check_preempt_curr() may use rq clock */ 1768 update_rq_clock(rq); 1769 ttwu_do_wakeup(rq, p, wake_flags); 1770 ret = 1; 1771 } 1772 __task_rq_unlock(rq); 1773 1774 return ret; 1775 } 1776 1777 #ifdef CONFIG_SMP 1778 void sched_ttwu_pending(void) 1779 { 1780 struct rq *rq = this_rq(); 1781 struct llist_node *llist = llist_del_all(&rq->wake_list); 1782 struct task_struct *p; 1783 unsigned long flags; 1784 1785 if (!llist) 1786 return; 1787 1788 raw_spin_lock_irqsave(&rq->lock, flags); 1789 lockdep_pin_lock(&rq->lock); 1790 1791 while (llist) { 1792 p = llist_entry(llist, struct task_struct, wake_entry); 1793 llist = llist_next(llist); 1794 ttwu_do_activate(rq, p, 0); 1795 } 1796 1797 lockdep_unpin_lock(&rq->lock); 1798 raw_spin_unlock_irqrestore(&rq->lock, flags); 1799 } 1800 1801 void scheduler_ipi(void) 1802 { 1803 /* 1804 * Fold TIF_NEED_RESCHED into the preempt_count; anybody setting 1805 * TIF_NEED_RESCHED remotely (for the first time) will also send 1806 * this IPI. 1807 */ 1808 preempt_fold_need_resched(); 1809 1810 if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick()) 1811 return; 1812 1813 /* 1814 * Not all reschedule IPI handlers call irq_enter/irq_exit, since 1815 * traditionally all their work was done from the interrupt return 1816 * path. Now that we actually do some work, we need to make sure 1817 * we do call them. 1818 * 1819 * Some archs already do call them, luckily irq_enter/exit nest 1820 * properly. 1821 * 1822 * Arguably we should visit all archs and update all handlers, 1823 * however a fair share of IPIs are still resched only so this would 1824 * somewhat pessimize the simple resched case. 1825 */ 1826 irq_enter(); 1827 sched_ttwu_pending(); 1828 1829 /* 1830 * Check if someone kicked us for doing the nohz idle load balance. 1831 */ 1832 if (unlikely(got_nohz_idle_kick())) { 1833 this_rq()->idle_balance = 1; 1834 raise_softirq_irqoff(SCHED_SOFTIRQ); 1835 } 1836 irq_exit(); 1837 } 1838 1839 static void ttwu_queue_remote(struct task_struct *p, int cpu) 1840 { 1841 struct rq *rq = cpu_rq(cpu); 1842 1843 if (llist_add(&p->wake_entry, &cpu_rq(cpu)->wake_list)) { 1844 if (!set_nr_if_polling(rq->idle)) 1845 smp_send_reschedule(cpu); 1846 else 1847 trace_sched_wake_idle_without_ipi(cpu); 1848 } 1849 } 1850 1851 void wake_up_if_idle(int cpu) 1852 { 1853 struct rq *rq = cpu_rq(cpu); 1854 unsigned long flags; 1855 1856 rcu_read_lock(); 1857 1858 if (!is_idle_task(rcu_dereference(rq->curr))) 1859 goto out; 1860 1861 if (set_nr_if_polling(rq->idle)) { 1862 trace_sched_wake_idle_without_ipi(cpu); 1863 } else { 1864 raw_spin_lock_irqsave(&rq->lock, flags); 1865 if (is_idle_task(rq->curr)) 1866 smp_send_reschedule(cpu); 1867 /* Else cpu is not in idle, do nothing here */ 1868 raw_spin_unlock_irqrestore(&rq->lock, flags); 1869 } 1870 1871 out: 1872 rcu_read_unlock(); 1873 } 1874 1875 bool cpus_share_cache(int this_cpu, int that_cpu) 1876 { 1877 return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu); 1878 } 1879 #endif /* CONFIG_SMP */ 1880 1881 static void ttwu_queue(struct task_struct *p, int cpu) 1882 { 1883 struct rq *rq = cpu_rq(cpu); 1884 1885 #if defined(CONFIG_SMP) 1886 if (sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) { 1887 sched_clock_cpu(cpu); /* sync clocks x-cpu */ 1888 ttwu_queue_remote(p, cpu); 1889 return; 1890 } 1891 #endif 1892 1893 raw_spin_lock(&rq->lock); 1894 lockdep_pin_lock(&rq->lock); 1895 ttwu_do_activate(rq, p, 0); 1896 lockdep_unpin_lock(&rq->lock); 1897 raw_spin_unlock(&rq->lock); 1898 } 1899 1900 /** 1901 * try_to_wake_up - wake up a thread 1902 * @p: the thread to be awakened 1903 * @state: the mask of task states that can be woken 1904 * @wake_flags: wake modifier flags (WF_*) 1905 * 1906 * Put it on the run-queue if it's not already there. The "current" 1907 * thread is always on the run-queue (except when the actual 1908 * re-schedule is in progress), and as such you're allowed to do 1909 * the simpler "current->state = TASK_RUNNING" to mark yourself 1910 * runnable without the overhead of this. 1911 * 1912 * Return: %true if @p was woken up, %false if it was already running. 1913 * or @state didn't match @p's state. 1914 */ 1915 static int 1916 try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) 1917 { 1918 unsigned long flags; 1919 int cpu, success = 0; 1920 1921 /* 1922 * If we are going to wake up a thread waiting for CONDITION we 1923 * need to ensure that CONDITION=1 done by the caller can not be 1924 * reordered with p->state check below. This pairs with mb() in 1925 * set_current_state() the waiting thread does. 1926 */ 1927 smp_mb__before_spinlock(); 1928 raw_spin_lock_irqsave(&p->pi_lock, flags); 1929 if (!(p->state & state)) 1930 goto out; 1931 1932 trace_sched_waking(p); 1933 1934 success = 1; /* we're going to change ->state */ 1935 cpu = task_cpu(p); 1936 1937 if (p->on_rq && ttwu_remote(p, wake_flags)) 1938 goto stat; 1939 1940 #ifdef CONFIG_SMP 1941 /* 1942 * If the owning (remote) cpu is still in the middle of schedule() with 1943 * this task as prev, wait until its done referencing the task. 1944 */ 1945 while (p->on_cpu) 1946 cpu_relax(); 1947 /* 1948 * Pairs with the smp_wmb() in finish_lock_switch(). 1949 */ 1950 smp_rmb(); 1951 1952 p->sched_contributes_to_load = !!task_contributes_to_load(p); 1953 p->state = TASK_WAKING; 1954 1955 if (p->sched_class->task_waking) 1956 p->sched_class->task_waking(p); 1957 1958 cpu = select_task_rq(p, p->wake_cpu, SD_BALANCE_WAKE, wake_flags); 1959 if (task_cpu(p) != cpu) { 1960 wake_flags |= WF_MIGRATED; 1961 set_task_cpu(p, cpu); 1962 } 1963 #endif /* CONFIG_SMP */ 1964 1965 ttwu_queue(p, cpu); 1966 stat: 1967 ttwu_stat(p, cpu, wake_flags); 1968 out: 1969 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 1970 1971 return success; 1972 } 1973 1974 /** 1975 * try_to_wake_up_local - try to wake up a local task with rq lock held 1976 * @p: the thread to be awakened 1977 * 1978 * Put @p on the run-queue if it's not already there. The caller must 1979 * ensure that this_rq() is locked, @p is bound to this_rq() and not 1980 * the current task. 1981 */ 1982 static void try_to_wake_up_local(struct task_struct *p) 1983 { 1984 struct rq *rq = task_rq(p); 1985 1986 if (WARN_ON_ONCE(rq != this_rq()) || 1987 WARN_ON_ONCE(p == current)) 1988 return; 1989 1990 lockdep_assert_held(&rq->lock); 1991 1992 if (!raw_spin_trylock(&p->pi_lock)) { 1993 /* 1994 * This is OK, because current is on_cpu, which avoids it being 1995 * picked for load-balance and preemption/IRQs are still 1996 * disabled avoiding further scheduler activity on it and we've 1997 * not yet picked a replacement task. 1998 */ 1999 lockdep_unpin_lock(&rq->lock); 2000 raw_spin_unlock(&rq->lock); 2001 raw_spin_lock(&p->pi_lock); 2002 raw_spin_lock(&rq->lock); 2003 lockdep_pin_lock(&rq->lock); 2004 } 2005 2006 if (!(p->state & TASK_NORMAL)) 2007 goto out; 2008 2009 trace_sched_waking(p); 2010 2011 if (!task_on_rq_queued(p)) 2012 ttwu_activate(rq, p, ENQUEUE_WAKEUP); 2013 2014 ttwu_do_wakeup(rq, p, 0); 2015 ttwu_stat(p, smp_processor_id(), 0); 2016 out: 2017 raw_spin_unlock(&p->pi_lock); 2018 } 2019 2020 /** 2021 * wake_up_process - Wake up a specific process 2022 * @p: The process to be woken up. 2023 * 2024 * Attempt to wake up the nominated process and move it to the set of runnable 2025 * processes. 2026 * 2027 * Return: 1 if the process was woken up, 0 if it was already running. 2028 * 2029 * It may be assumed that this function implies a write memory barrier before 2030 * changing the task state if and only if any tasks are woken up. 2031 */ 2032 int wake_up_process(struct task_struct *p) 2033 { 2034 WARN_ON(task_is_stopped_or_traced(p)); 2035 return try_to_wake_up(p, TASK_NORMAL, 0); 2036 } 2037 EXPORT_SYMBOL(wake_up_process); 2038 2039 int wake_up_state(struct task_struct *p, unsigned int state) 2040 { 2041 return try_to_wake_up(p, state, 0); 2042 } 2043 2044 /* 2045 * This function clears the sched_dl_entity static params. 2046 */ 2047 void __dl_clear_params(struct task_struct *p) 2048 { 2049 struct sched_dl_entity *dl_se = &p->dl; 2050 2051 dl_se->dl_runtime = 0; 2052 dl_se->dl_deadline = 0; 2053 dl_se->dl_period = 0; 2054 dl_se->flags = 0; 2055 dl_se->dl_bw = 0; 2056 2057 dl_se->dl_throttled = 0; 2058 dl_se->dl_new = 1; 2059 dl_se->dl_yielded = 0; 2060 } 2061 2062 /* 2063 * Perform scheduler related setup for a newly forked process p. 2064 * p is forked by current. 2065 * 2066 * __sched_fork() is basic setup used by init_idle() too: 2067 */ 2068 static void __sched_fork(unsigned long clone_flags, struct task_struct *p) 2069 { 2070 p->on_rq = 0; 2071 2072 p->se.on_rq = 0; 2073 p->se.exec_start = 0; 2074 p->se.sum_exec_runtime = 0; 2075 p->se.prev_sum_exec_runtime = 0; 2076 p->se.nr_migrations = 0; 2077 p->se.vruntime = 0; 2078 INIT_LIST_HEAD(&p->se.group_node); 2079 2080 #ifdef CONFIG_SCHEDSTATS 2081 memset(&p->se.statistics, 0, sizeof(p->se.statistics)); 2082 #endif 2083 2084 RB_CLEAR_NODE(&p->dl.rb_node); 2085 init_dl_task_timer(&p->dl); 2086 __dl_clear_params(p); 2087 2088 INIT_LIST_HEAD(&p->rt.run_list); 2089 2090 #ifdef CONFIG_PREEMPT_NOTIFIERS 2091 INIT_HLIST_HEAD(&p->preempt_notifiers); 2092 #endif 2093 2094 #ifdef CONFIG_NUMA_BALANCING 2095 if (p->mm && atomic_read(&p->mm->mm_users) == 1) { 2096 p->mm->numa_next_scan = jiffies + msecs_to_jiffies(sysctl_numa_balancing_scan_delay); 2097 p->mm->numa_scan_seq = 0; 2098 } 2099 2100 if (clone_flags & CLONE_VM) 2101 p->numa_preferred_nid = current->numa_preferred_nid; 2102 else 2103 p->numa_preferred_nid = -1; 2104 2105 p->node_stamp = 0ULL; 2106 p->numa_scan_seq = p->mm ? p->mm->numa_scan_seq : 0; 2107 p->numa_scan_period = sysctl_numa_balancing_scan_delay; 2108 p->numa_work.next = &p->numa_work; 2109 p->numa_faults = NULL; 2110 p->last_task_numa_placement = 0; 2111 p->last_sum_exec_runtime = 0; 2112 2113 p->numa_group = NULL; 2114 #endif /* CONFIG_NUMA_BALANCING */ 2115 } 2116 2117 #ifdef CONFIG_NUMA_BALANCING 2118 #ifdef CONFIG_SCHED_DEBUG 2119 void set_numabalancing_state(bool enabled) 2120 { 2121 if (enabled) 2122 sched_feat_set("NUMA"); 2123 else 2124 sched_feat_set("NO_NUMA"); 2125 } 2126 #else 2127 __read_mostly bool numabalancing_enabled; 2128 2129 void set_numabalancing_state(bool enabled) 2130 { 2131 numabalancing_enabled = enabled; 2132 } 2133 #endif /* CONFIG_SCHED_DEBUG */ 2134 2135 #ifdef CONFIG_PROC_SYSCTL 2136 int sysctl_numa_balancing(struct ctl_table *table, int write, 2137 void __user *buffer, size_t *lenp, loff_t *ppos) 2138 { 2139 struct ctl_table t; 2140 int err; 2141 int state = numabalancing_enabled; 2142 2143 if (write && !capable(CAP_SYS_ADMIN)) 2144 return -EPERM; 2145 2146 t = *table; 2147 t.data = &state; 2148 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); 2149 if (err < 0) 2150 return err; 2151 if (write) 2152 set_numabalancing_state(state); 2153 return err; 2154 } 2155 #endif 2156 #endif 2157 2158 /* 2159 * fork()/clone()-time setup: 2160 */ 2161 int sched_fork(unsigned long clone_flags, struct task_struct *p) 2162 { 2163 unsigned long flags; 2164 int cpu = get_cpu(); 2165 2166 __sched_fork(clone_flags, p); 2167 /* 2168 * We mark the process as running here. This guarantees that 2169 * nobody will actually run it, and a signal or other external 2170 * event cannot wake it up and insert it on the runqueue either. 2171 */ 2172 p->state = TASK_RUNNING; 2173 2174 /* 2175 * Make sure we do not leak PI boosting priority to the child. 2176 */ 2177 p->prio = current->normal_prio; 2178 2179 /* 2180 * Revert to default priority/policy on fork if requested. 2181 */ 2182 if (unlikely(p->sched_reset_on_fork)) { 2183 if (task_has_dl_policy(p) || task_has_rt_policy(p)) { 2184 p->policy = SCHED_NORMAL; 2185 p->static_prio = NICE_TO_PRIO(0); 2186 p->rt_priority = 0; 2187 } else if (PRIO_TO_NICE(p->static_prio) < 0) 2188 p->static_prio = NICE_TO_PRIO(0); 2189 2190 p->prio = p->normal_prio = __normal_prio(p); 2191 set_load_weight(p); 2192 2193 /* 2194 * We don't need the reset flag anymore after the fork. It has 2195 * fulfilled its duty: 2196 */ 2197 p->sched_reset_on_fork = 0; 2198 } 2199 2200 if (dl_prio(p->prio)) { 2201 put_cpu(); 2202 return -EAGAIN; 2203 } else if (rt_prio(p->prio)) { 2204 p->sched_class = &rt_sched_class; 2205 } else { 2206 p->sched_class = &fair_sched_class; 2207 } 2208 2209 if (p->sched_class->task_fork) 2210 p->sched_class->task_fork(p); 2211 2212 /* 2213 * The child is not yet in the pid-hash so no cgroup attach races, 2214 * and the cgroup is pinned to this child due to cgroup_fork() 2215 * is ran before sched_fork(). 2216 * 2217 * Silence PROVE_RCU. 2218 */ 2219 raw_spin_lock_irqsave(&p->pi_lock, flags); 2220 set_task_cpu(p, cpu); 2221 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 2222 2223 #ifdef CONFIG_SCHED_INFO 2224 if (likely(sched_info_on())) 2225 memset(&p->sched_info, 0, sizeof(p->sched_info)); 2226 #endif 2227 #if defined(CONFIG_SMP) 2228 p->on_cpu = 0; 2229 #endif 2230 init_task_preempt_count(p); 2231 #ifdef CONFIG_SMP 2232 plist_node_init(&p->pushable_tasks, MAX_PRIO); 2233 RB_CLEAR_NODE(&p->pushable_dl_tasks); 2234 #endif 2235 2236 put_cpu(); 2237 return 0; 2238 } 2239 2240 unsigned long to_ratio(u64 period, u64 runtime) 2241 { 2242 if (runtime == RUNTIME_INF) 2243 return 1ULL << 20; 2244 2245 /* 2246 * Doing this here saves a lot of checks in all 2247 * the calling paths, and returning zero seems 2248 * safe for them anyway. 2249 */ 2250 if (period == 0) 2251 return 0; 2252 2253 return div64_u64(runtime << 20, period); 2254 } 2255 2256 #ifdef CONFIG_SMP 2257 inline struct dl_bw *dl_bw_of(int i) 2258 { 2259 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(), 2260 "sched RCU must be held"); 2261 return &cpu_rq(i)->rd->dl_bw; 2262 } 2263 2264 static inline int dl_bw_cpus(int i) 2265 { 2266 struct root_domain *rd = cpu_rq(i)->rd; 2267 int cpus = 0; 2268 2269 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(), 2270 "sched RCU must be held"); 2271 for_each_cpu_and(i, rd->span, cpu_active_mask) 2272 cpus++; 2273 2274 return cpus; 2275 } 2276 #else 2277 inline struct dl_bw *dl_bw_of(int i) 2278 { 2279 return &cpu_rq(i)->dl.dl_bw; 2280 } 2281 2282 static inline int dl_bw_cpus(int i) 2283 { 2284 return 1; 2285 } 2286 #endif 2287 2288 /* 2289 * We must be sure that accepting a new task (or allowing changing the 2290 * parameters of an existing one) is consistent with the bandwidth 2291 * constraints. If yes, this function also accordingly updates the currently 2292 * allocated bandwidth to reflect the new situation. 2293 * 2294 * This function is called while holding p's rq->lock. 2295 * 2296 * XXX we should delay bw change until the task's 0-lag point, see 2297 * __setparam_dl(). 2298 */ 2299 static int dl_overflow(struct task_struct *p, int policy, 2300 const struct sched_attr *attr) 2301 { 2302 2303 struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); 2304 u64 period = attr->sched_period ?: attr->sched_deadline; 2305 u64 runtime = attr->sched_runtime; 2306 u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0; 2307 int cpus, err = -1; 2308 2309 if (new_bw == p->dl.dl_bw) 2310 return 0; 2311 2312 /* 2313 * Either if a task, enters, leave, or stays -deadline but changes 2314 * its parameters, we may need to update accordingly the total 2315 * allocated bandwidth of the container. 2316 */ 2317 raw_spin_lock(&dl_b->lock); 2318 cpus = dl_bw_cpus(task_cpu(p)); 2319 if (dl_policy(policy) && !task_has_dl_policy(p) && 2320 !__dl_overflow(dl_b, cpus, 0, new_bw)) { 2321 __dl_add(dl_b, new_bw); 2322 err = 0; 2323 } else if (dl_policy(policy) && task_has_dl_policy(p) && 2324 !__dl_overflow(dl_b, cpus, p->dl.dl_bw, new_bw)) { 2325 __dl_clear(dl_b, p->dl.dl_bw); 2326 __dl_add(dl_b, new_bw); 2327 err = 0; 2328 } else if (!dl_policy(policy) && task_has_dl_policy(p)) { 2329 __dl_clear(dl_b, p->dl.dl_bw); 2330 err = 0; 2331 } 2332 raw_spin_unlock(&dl_b->lock); 2333 2334 return err; 2335 } 2336 2337 extern void init_dl_bw(struct dl_bw *dl_b); 2338 2339 /* 2340 * wake_up_new_task - wake up a newly created task for the first time. 2341 * 2342 * This function will do some initial scheduler statistics housekeeping 2343 * that must be done for every newly created context, then puts the task 2344 * on the runqueue and wakes it. 2345 */ 2346 void wake_up_new_task(struct task_struct *p) 2347 { 2348 unsigned long flags; 2349 struct rq *rq; 2350 2351 raw_spin_lock_irqsave(&p->pi_lock, flags); 2352 #ifdef CONFIG_SMP 2353 /* 2354 * Fork balancing, do it here and not earlier because: 2355 * - cpus_allowed can change in the fork path 2356 * - any previously selected cpu might disappear through hotplug 2357 */ 2358 set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0)); 2359 #endif 2360 2361 /* Initialize new task's runnable average */ 2362 init_entity_runnable_average(&p->se); 2363 rq = __task_rq_lock(p); 2364 activate_task(rq, p, 0); 2365 p->on_rq = TASK_ON_RQ_QUEUED; 2366 trace_sched_wakeup_new(p); 2367 check_preempt_curr(rq, p, WF_FORK); 2368 #ifdef CONFIG_SMP 2369 if (p->sched_class->task_woken) { 2370 /* 2371 * Nothing relies on rq->lock after this, so its fine to 2372 * drop it. 2373 */ 2374 lockdep_unpin_lock(&rq->lock); 2375 p->sched_class->task_woken(rq, p); 2376 lockdep_pin_lock(&rq->lock); 2377 } 2378 #endif 2379 task_rq_unlock(rq, p, &flags); 2380 } 2381 2382 #ifdef CONFIG_PREEMPT_NOTIFIERS 2383 2384 static struct static_key preempt_notifier_key = STATIC_KEY_INIT_FALSE; 2385 2386 void preempt_notifier_inc(void) 2387 { 2388 static_key_slow_inc(&preempt_notifier_key); 2389 } 2390 EXPORT_SYMBOL_GPL(preempt_notifier_inc); 2391 2392 void preempt_notifier_dec(void) 2393 { 2394 static_key_slow_dec(&preempt_notifier_key); 2395 } 2396 EXPORT_SYMBOL_GPL(preempt_notifier_dec); 2397 2398 /** 2399 * preempt_notifier_register - tell me when current is being preempted & rescheduled 2400 * @notifier: notifier struct to register 2401 */ 2402 void preempt_notifier_register(struct preempt_notifier *notifier) 2403 { 2404 if (!static_key_false(&preempt_notifier_key)) 2405 WARN(1, "registering preempt_notifier while notifiers disabled\n"); 2406 2407 hlist_add_head(¬ifier->link, ¤t->preempt_notifiers); 2408 } 2409 EXPORT_SYMBOL_GPL(preempt_notifier_register); 2410 2411 /** 2412 * preempt_notifier_unregister - no longer interested in preemption notifications 2413 * @notifier: notifier struct to unregister 2414 * 2415 * This is *not* safe to call from within a preemption notifier. 2416 */ 2417 void preempt_notifier_unregister(struct preempt_notifier *notifier) 2418 { 2419 hlist_del(¬ifier->link); 2420 } 2421 EXPORT_SYMBOL_GPL(preempt_notifier_unregister); 2422 2423 static void __fire_sched_in_preempt_notifiers(struct task_struct *curr) 2424 { 2425 struct preempt_notifier *notifier; 2426 2427 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) 2428 notifier->ops->sched_in(notifier, raw_smp_processor_id()); 2429 } 2430 2431 static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr) 2432 { 2433 if (static_key_false(&preempt_notifier_key)) 2434 __fire_sched_in_preempt_notifiers(curr); 2435 } 2436 2437 static void 2438 __fire_sched_out_preempt_notifiers(struct task_struct *curr, 2439 struct task_struct *next) 2440 { 2441 struct preempt_notifier *notifier; 2442 2443 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) 2444 notifier->ops->sched_out(notifier, next); 2445 } 2446 2447 static __always_inline void 2448 fire_sched_out_preempt_notifiers(struct task_struct *curr, 2449 struct task_struct *next) 2450 { 2451 if (static_key_false(&preempt_notifier_key)) 2452 __fire_sched_out_preempt_notifiers(curr, next); 2453 } 2454 2455 #else /* !CONFIG_PREEMPT_NOTIFIERS */ 2456 2457 static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr) 2458 { 2459 } 2460 2461 static inline void 2462 fire_sched_out_preempt_notifiers(struct task_struct *curr, 2463 struct task_struct *next) 2464 { 2465 } 2466 2467 #endif /* CONFIG_PREEMPT_NOTIFIERS */ 2468 2469 /** 2470 * prepare_task_switch - prepare to switch tasks 2471 * @rq: the runqueue preparing to switch 2472 * @prev: the current task that is being switched out 2473 * @next: the task we are going to switch to. 2474 * 2475 * This is called with the rq lock held and interrupts off. It must 2476 * be paired with a subsequent finish_task_switch after the context 2477 * switch. 2478 * 2479 * prepare_task_switch sets up locking and calls architecture specific 2480 * hooks. 2481 */ 2482 static inline void 2483 prepare_task_switch(struct rq *rq, struct task_struct *prev, 2484 struct task_struct *next) 2485 { 2486 trace_sched_switch(prev, next); 2487 sched_info_switch(rq, prev, next); 2488 perf_event_task_sched_out(prev, next); 2489 fire_sched_out_preempt_notifiers(prev, next); 2490 prepare_lock_switch(rq, next); 2491 prepare_arch_switch(next); 2492 } 2493 2494 /** 2495 * finish_task_switch - clean up after a task-switch 2496 * @prev: the thread we just switched away from. 2497 * 2498 * finish_task_switch must be called after the context switch, paired 2499 * with a prepare_task_switch call before the context switch. 2500 * finish_task_switch will reconcile locking set up by prepare_task_switch, 2501 * and do any other architecture-specific cleanup actions. 2502 * 2503 * Note that we may have delayed dropping an mm in context_switch(). If 2504 * so, we finish that here outside of the runqueue lock. (Doing it 2505 * with the lock held can cause deadlocks; see schedule() for 2506 * details.) 2507 * 2508 * The context switch have flipped the stack from under us and restored the 2509 * local variables which were saved when this task called schedule() in the 2510 * past. prev == current is still correct but we need to recalculate this_rq 2511 * because prev may have moved to another CPU. 2512 */ 2513 static struct rq *finish_task_switch(struct task_struct *prev) 2514 __releases(rq->lock) 2515 { 2516 struct rq *rq = this_rq(); 2517 struct mm_struct *mm = rq->prev_mm; 2518 long prev_state; 2519 2520 rq->prev_mm = NULL; 2521 2522 /* 2523 * A task struct has one reference for the use as "current". 2524 * If a task dies, then it sets TASK_DEAD in tsk->state and calls 2525 * schedule one last time. The schedule call will never return, and 2526 * the scheduled task must drop that reference. 2527 * 2528 * We must observe prev->state before clearing prev->on_cpu (in 2529 * finish_lock_switch), otherwise a concurrent wakeup can get prev 2530 * running on another CPU and we could rave with its RUNNING -> DEAD 2531 * transition, resulting in a double drop. 2532 */ 2533 prev_state = prev->state; 2534 vtime_task_switch(prev); 2535 perf_event_task_sched_in(prev, current); 2536 finish_lock_switch(rq, prev); 2537 finish_arch_post_lock_switch(); 2538 2539 fire_sched_in_preempt_notifiers(current); 2540 if (mm) 2541 mmdrop(mm); 2542 if (unlikely(prev_state == TASK_DEAD)) { 2543 if (prev->sched_class->task_dead) 2544 prev->sched_class->task_dead(prev); 2545 2546 /* 2547 * Remove function-return probe instances associated with this 2548 * task and put them back on the free list. 2549 */ 2550 kprobe_flush_task(prev); 2551 put_task_struct(prev); 2552 } 2553 2554 tick_nohz_task_switch(); 2555 return rq; 2556 } 2557 2558 #ifdef CONFIG_SMP 2559 2560 /* rq->lock is NOT held, but preemption is disabled */ 2561 static void __balance_callback(struct rq *rq) 2562 { 2563 struct callback_head *head, *next; 2564 void (*func)(struct rq *rq); 2565 unsigned long flags; 2566 2567 raw_spin_lock_irqsave(&rq->lock, flags); 2568 head = rq->balance_callback; 2569 rq->balance_callback = NULL; 2570 while (head) { 2571 func = (void (*)(struct rq *))head->func; 2572 next = head->next; 2573 head->next = NULL; 2574 head = next; 2575 2576 func(rq); 2577 } 2578 raw_spin_unlock_irqrestore(&rq->lock, flags); 2579 } 2580 2581 static inline void balance_callback(struct rq *rq) 2582 { 2583 if (unlikely(rq->balance_callback)) 2584 __balance_callback(rq); 2585 } 2586 2587 #else 2588 2589 static inline void balance_callback(struct rq *rq) 2590 { 2591 } 2592 2593 #endif 2594 2595 /** 2596 * schedule_tail - first thing a freshly forked thread must call. 2597 * @prev: the thread we just switched away from. 2598 */ 2599 asmlinkage __visible void schedule_tail(struct task_struct *prev) 2600 __releases(rq->lock) 2601 { 2602 struct rq *rq; 2603 2604 /* finish_task_switch() drops rq->lock and enables preemtion */ 2605 preempt_disable(); 2606 rq = finish_task_switch(prev); 2607 balance_callback(rq); 2608 preempt_enable(); 2609 2610 if (current->set_child_tid) 2611 put_user(task_pid_vnr(current), current->set_child_tid); 2612 } 2613 2614 /* 2615 * context_switch - switch to the new MM and the new thread's register state. 2616 */ 2617 static inline struct rq * 2618 context_switch(struct rq *rq, struct task_struct *prev, 2619 struct task_struct *next) 2620 { 2621 struct mm_struct *mm, *oldmm; 2622 2623 prepare_task_switch(rq, prev, next); 2624 2625 mm = next->mm; 2626 oldmm = prev->active_mm; 2627 /* 2628 * For paravirt, this is coupled with an exit in switch_to to 2629 * combine the page table reload and the switch backend into 2630 * one hypercall. 2631 */ 2632 arch_start_context_switch(prev); 2633 2634 if (!mm) { 2635 next->active_mm = oldmm; 2636 atomic_inc(&oldmm->mm_count); 2637 enter_lazy_tlb(oldmm, next); 2638 } else 2639 switch_mm(oldmm, mm, next); 2640 2641 if (!prev->mm) { 2642 prev->active_mm = NULL; 2643 rq->prev_mm = oldmm; 2644 } 2645 /* 2646 * Since the runqueue lock will be released by the next 2647 * task (which is an invalid locking op but in the case 2648 * of the scheduler it's an obvious special-case), so we 2649 * do an early lockdep release here: 2650 */ 2651 lockdep_unpin_lock(&rq->lock); 2652 spin_release(&rq->lock.dep_map, 1, _THIS_IP_); 2653 2654 /* Here we just switch the register state and the stack. */ 2655 switch_to(prev, next, prev); 2656 barrier(); 2657 2658 return finish_task_switch(prev); 2659 } 2660 2661 /* 2662 * nr_running and nr_context_switches: 2663 * 2664 * externally visible scheduler statistics: current number of runnable 2665 * threads, total number of context switches performed since bootup. 2666 */ 2667 unsigned long nr_running(void) 2668 { 2669 unsigned long i, sum = 0; 2670 2671 for_each_online_cpu(i) 2672 sum += cpu_rq(i)->nr_running; 2673 2674 return sum; 2675 } 2676 2677 /* 2678 * Check if only the current task is running on the cpu. 2679 * 2680 * Caution: this function does not check that the caller has disabled 2681 * preemption, thus the result might have a time-of-check-to-time-of-use 2682 * race. The caller is responsible to use it correctly, for example: 2683 * 2684 * - from a non-preemptable section (of course) 2685 * 2686 * - from a thread that is bound to a single CPU 2687 * 2688 * - in a loop with very short iterations (e.g. a polling loop) 2689 */ 2690 bool single_task_running(void) 2691 { 2692 return raw_rq()->nr_running == 1; 2693 } 2694 EXPORT_SYMBOL(single_task_running); 2695 2696 unsigned long long nr_context_switches(void) 2697 { 2698 int i; 2699 unsigned long long sum = 0; 2700 2701 for_each_possible_cpu(i) 2702 sum += cpu_rq(i)->nr_switches; 2703 2704 return sum; 2705 } 2706 2707 unsigned long nr_iowait(void) 2708 { 2709 unsigned long i, sum = 0; 2710 2711 for_each_possible_cpu(i) 2712 sum += atomic_read(&cpu_rq(i)->nr_iowait); 2713 2714 return sum; 2715 } 2716 2717 unsigned long nr_iowait_cpu(int cpu) 2718 { 2719 struct rq *this = cpu_rq(cpu); 2720 return atomic_read(&this->nr_iowait); 2721 } 2722 2723 void get_iowait_load(unsigned long *nr_waiters, unsigned long *load) 2724 { 2725 struct rq *rq = this_rq(); 2726 *nr_waiters = atomic_read(&rq->nr_iowait); 2727 *load = rq->load.weight; 2728 } 2729 2730 #ifdef CONFIG_SMP 2731 2732 /* 2733 * sched_exec - execve() is a valuable balancing opportunity, because at 2734 * this point the task has the smallest effective memory and cache footprint. 2735 */ 2736 void sched_exec(void) 2737 { 2738 struct task_struct *p = current; 2739 unsigned long flags; 2740 int dest_cpu; 2741 2742 raw_spin_lock_irqsave(&p->pi_lock, flags); 2743 dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), SD_BALANCE_EXEC, 0); 2744 if (dest_cpu == smp_processor_id()) 2745 goto unlock; 2746 2747 if (likely(cpu_active(dest_cpu))) { 2748 struct migration_arg arg = { p, dest_cpu }; 2749 2750 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 2751 stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg); 2752 return; 2753 } 2754 unlock: 2755 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 2756 } 2757 2758 #endif 2759 2760 DEFINE_PER_CPU(struct kernel_stat, kstat); 2761 DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat); 2762 2763 EXPORT_PER_CPU_SYMBOL(kstat); 2764 EXPORT_PER_CPU_SYMBOL(kernel_cpustat); 2765 2766 /* 2767 * Return accounted runtime for the task. 2768 * In case the task is currently running, return the runtime plus current's 2769 * pending runtime that have not been accounted yet. 2770 */ 2771 unsigned long long task_sched_runtime(struct task_struct *p) 2772 { 2773 unsigned long flags; 2774 struct rq *rq; 2775 u64 ns; 2776 2777 #if defined(CONFIG_64BIT) && defined(CONFIG_SMP) 2778 /* 2779 * 64-bit doesn't need locks to atomically read a 64bit value. 2780 * So we have a optimization chance when the task's delta_exec is 0. 2781 * Reading ->on_cpu is racy, but this is ok. 2782 * 2783 * If we race with it leaving cpu, we'll take a lock. So we're correct. 2784 * If we race with it entering cpu, unaccounted time is 0. This is 2785 * indistinguishable from the read occurring a few cycles earlier. 2786 * If we see ->on_cpu without ->on_rq, the task is leaving, and has 2787 * been accounted, so we're correct here as well. 2788 */ 2789 if (!p->on_cpu || !task_on_rq_queued(p)) 2790 return p->se.sum_exec_runtime; 2791 #endif 2792 2793 rq = task_rq_lock(p, &flags); 2794 /* 2795 * Must be ->curr _and_ ->on_rq. If dequeued, we would 2796 * project cycles that may never be accounted to this 2797 * thread, breaking clock_gettime(). 2798 */ 2799 if (task_current(rq, p) && task_on_rq_queued(p)) { 2800 update_rq_clock(rq); 2801 p->sched_class->update_curr(rq); 2802 } 2803 ns = p->se.sum_exec_runtime; 2804 task_rq_unlock(rq, p, &flags); 2805 2806 return ns; 2807 } 2808 2809 /* 2810 * This function gets called by the timer code, with HZ frequency. 2811 * We call it with interrupts disabled. 2812 */ 2813 void scheduler_tick(void) 2814 { 2815 int cpu = smp_processor_id(); 2816 struct rq *rq = cpu_rq(cpu); 2817 struct task_struct *curr = rq->curr; 2818 2819 sched_clock_tick(); 2820 2821 raw_spin_lock(&rq->lock); 2822 update_rq_clock(rq); 2823 curr->sched_class->task_tick(rq, curr, 0); 2824 update_cpu_load_active(rq); 2825 calc_global_load_tick(rq); 2826 raw_spin_unlock(&rq->lock); 2827 2828 perf_event_task_tick(); 2829 2830 #ifdef CONFIG_SMP 2831 rq->idle_balance = idle_cpu(cpu); 2832 trigger_load_balance(rq); 2833 #endif 2834 rq_last_tick_reset(rq); 2835 } 2836 2837 #ifdef CONFIG_NO_HZ_FULL 2838 /** 2839 * scheduler_tick_max_deferment 2840 * 2841 * Keep at least one tick per second when a single 2842 * active task is running because the scheduler doesn't 2843 * yet completely support full dynticks environment. 2844 * 2845 * This makes sure that uptime, CFS vruntime, load 2846 * balancing, etc... continue to move forward, even 2847 * with a very low granularity. 2848 * 2849 * Return: Maximum deferment in nanoseconds. 2850 */ 2851 u64 scheduler_tick_max_deferment(void) 2852 { 2853 struct rq *rq = this_rq(); 2854 unsigned long next, now = READ_ONCE(jiffies); 2855 2856 next = rq->last_sched_tick + HZ; 2857 2858 if (time_before_eq(next, now)) 2859 return 0; 2860 2861 return jiffies_to_nsecs(next - now); 2862 } 2863 #endif 2864 2865 notrace unsigned long get_parent_ip(unsigned long addr) 2866 { 2867 if (in_lock_functions(addr)) { 2868 addr = CALLER_ADDR2; 2869 if (in_lock_functions(addr)) 2870 addr = CALLER_ADDR3; 2871 } 2872 return addr; 2873 } 2874 2875 #if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \ 2876 defined(CONFIG_PREEMPT_TRACER)) 2877 2878 void preempt_count_add(int val) 2879 { 2880 #ifdef CONFIG_DEBUG_PREEMPT 2881 /* 2882 * Underflow? 2883 */ 2884 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0))) 2885 return; 2886 #endif 2887 __preempt_count_add(val); 2888 #ifdef CONFIG_DEBUG_PREEMPT 2889 /* 2890 * Spinlock count overflowing soon? 2891 */ 2892 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >= 2893 PREEMPT_MASK - 10); 2894 #endif 2895 if (preempt_count() == val) { 2896 unsigned long ip = get_parent_ip(CALLER_ADDR1); 2897 #ifdef CONFIG_DEBUG_PREEMPT 2898 current->preempt_disable_ip = ip; 2899 #endif 2900 trace_preempt_off(CALLER_ADDR0, ip); 2901 } 2902 } 2903 EXPORT_SYMBOL(preempt_count_add); 2904 NOKPROBE_SYMBOL(preempt_count_add); 2905 2906 void preempt_count_sub(int val) 2907 { 2908 #ifdef CONFIG_DEBUG_PREEMPT 2909 /* 2910 * Underflow? 2911 */ 2912 if (DEBUG_LOCKS_WARN_ON(val > preempt_count())) 2913 return; 2914 /* 2915 * Is the spinlock portion underflowing? 2916 */ 2917 if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) && 2918 !(preempt_count() & PREEMPT_MASK))) 2919 return; 2920 #endif 2921 2922 if (preempt_count() == val) 2923 trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); 2924 __preempt_count_sub(val); 2925 } 2926 EXPORT_SYMBOL(preempt_count_sub); 2927 NOKPROBE_SYMBOL(preempt_count_sub); 2928 2929 #endif 2930 2931 /* 2932 * Print scheduling while atomic bug: 2933 */ 2934 static noinline void __schedule_bug(struct task_struct *prev) 2935 { 2936 if (oops_in_progress) 2937 return; 2938 2939 printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n", 2940 prev->comm, prev->pid, preempt_count()); 2941 2942 debug_show_held_locks(prev); 2943 print_modules(); 2944 if (irqs_disabled()) 2945 print_irqtrace_events(prev); 2946 #ifdef CONFIG_DEBUG_PREEMPT 2947 if (in_atomic_preempt_off()) { 2948 pr_err("Preemption disabled at:"); 2949 print_ip_sym(current->preempt_disable_ip); 2950 pr_cont("\n"); 2951 } 2952 #endif 2953 dump_stack(); 2954 add_taint(TAINT_WARN, LOCKDEP_STILL_OK); 2955 } 2956 2957 /* 2958 * Various schedule()-time debugging checks and statistics: 2959 */ 2960 static inline void schedule_debug(struct task_struct *prev) 2961 { 2962 #ifdef CONFIG_SCHED_STACK_END_CHECK 2963 BUG_ON(unlikely(task_stack_end_corrupted(prev))); 2964 #endif 2965 /* 2966 * Test if we are atomic. Since do_exit() needs to call into 2967 * schedule() atomically, we ignore that path. Otherwise whine 2968 * if we are scheduling when we should not. 2969 */ 2970 if (unlikely(in_atomic_preempt_off() && prev->state != TASK_DEAD)) 2971 __schedule_bug(prev); 2972 rcu_sleep_check(); 2973 2974 profile_hit(SCHED_PROFILING, __builtin_return_address(0)); 2975 2976 schedstat_inc(this_rq(), sched_count); 2977 } 2978 2979 /* 2980 * Pick up the highest-prio task: 2981 */ 2982 static inline struct task_struct * 2983 pick_next_task(struct rq *rq, struct task_struct *prev) 2984 { 2985 const struct sched_class *class = &fair_sched_class; 2986 struct task_struct *p; 2987 2988 /* 2989 * Optimization: we know that if all tasks are in 2990 * the fair class we can call that function directly: 2991 */ 2992 if (likely(prev->sched_class == class && 2993 rq->nr_running == rq->cfs.h_nr_running)) { 2994 p = fair_sched_class.pick_next_task(rq, prev); 2995 if (unlikely(p == RETRY_TASK)) 2996 goto again; 2997 2998 /* assumes fair_sched_class->next == idle_sched_class */ 2999 if (unlikely(!p)) 3000 p = idle_sched_class.pick_next_task(rq, prev); 3001 3002 return p; 3003 } 3004 3005 again: 3006 for_each_class(class) { 3007 p = class->pick_next_task(rq, prev); 3008 if (p) { 3009 if (unlikely(p == RETRY_TASK)) 3010 goto again; 3011 return p; 3012 } 3013 } 3014 3015 BUG(); /* the idle class will always have a runnable task */ 3016 } 3017 3018 /* 3019 * __schedule() is the main scheduler function. 3020 * 3021 * The main means of driving the scheduler and thus entering this function are: 3022 * 3023 * 1. Explicit blocking: mutex, semaphore, waitqueue, etc. 3024 * 3025 * 2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return 3026 * paths. For example, see arch/x86/entry_64.S. 3027 * 3028 * To drive preemption between tasks, the scheduler sets the flag in timer 3029 * interrupt handler scheduler_tick(). 3030 * 3031 * 3. Wakeups don't really cause entry into schedule(). They add a 3032 * task to the run-queue and that's it. 3033 * 3034 * Now, if the new task added to the run-queue preempts the current 3035 * task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets 3036 * called on the nearest possible occasion: 3037 * 3038 * - If the kernel is preemptible (CONFIG_PREEMPT=y): 3039 * 3040 * - in syscall or exception context, at the next outmost 3041 * preempt_enable(). (this might be as soon as the wake_up()'s 3042 * spin_unlock()!) 3043 * 3044 * - in IRQ context, return from interrupt-handler to 3045 * preemptible context 3046 * 3047 * - If the kernel is not preemptible (CONFIG_PREEMPT is not set) 3048 * then at the next: 3049 * 3050 * - cond_resched() call 3051 * - explicit schedule() call 3052 * - return from syscall or exception to user-space 3053 * - return from interrupt-handler to user-space 3054 * 3055 * WARNING: must be called with preemption disabled! 3056 */ 3057 static void __sched __schedule(void) 3058 { 3059 struct task_struct *prev, *next; 3060 unsigned long *switch_count; 3061 struct rq *rq; 3062 int cpu; 3063 3064 cpu = smp_processor_id(); 3065 rq = cpu_rq(cpu); 3066 rcu_note_context_switch(); 3067 prev = rq->curr; 3068 3069 schedule_debug(prev); 3070 3071 if (sched_feat(HRTICK)) 3072 hrtick_clear(rq); 3073 3074 /* 3075 * Make sure that signal_pending_state()->signal_pending() below 3076 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE) 3077 * done by the caller to avoid the race with signal_wake_up(). 3078 */ 3079 smp_mb__before_spinlock(); 3080 raw_spin_lock_irq(&rq->lock); 3081 lockdep_pin_lock(&rq->lock); 3082 3083 rq->clock_skip_update <<= 1; /* promote REQ to ACT */ 3084 3085 switch_count = &prev->nivcsw; 3086 if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { 3087 if (unlikely(signal_pending_state(prev->state, prev))) { 3088 prev->state = TASK_RUNNING; 3089 } else { 3090 deactivate_task(rq, prev, DEQUEUE_SLEEP); 3091 prev->on_rq = 0; 3092 3093 /* 3094 * If a worker went to sleep, notify and ask workqueue 3095 * whether it wants to wake up a task to maintain 3096 * concurrency. 3097 */ 3098 if (prev->flags & PF_WQ_WORKER) { 3099 struct task_struct *to_wakeup; 3100 3101 to_wakeup = wq_worker_sleeping(prev, cpu); 3102 if (to_wakeup) 3103 try_to_wake_up_local(to_wakeup); 3104 } 3105 } 3106 switch_count = &prev->nvcsw; 3107 } 3108 3109 if (task_on_rq_queued(prev)) 3110 update_rq_clock(rq); 3111 3112 next = pick_next_task(rq, prev); 3113 clear_tsk_need_resched(prev); 3114 clear_preempt_need_resched(); 3115 rq->clock_skip_update = 0; 3116 3117 if (likely(prev != next)) { 3118 rq->nr_switches++; 3119 rq->curr = next; 3120 ++*switch_count; 3121 3122 rq = context_switch(rq, prev, next); /* unlocks the rq */ 3123 cpu = cpu_of(rq); 3124 } else { 3125 lockdep_unpin_lock(&rq->lock); 3126 raw_spin_unlock_irq(&rq->lock); 3127 } 3128 3129 balance_callback(rq); 3130 } 3131 3132 static inline void sched_submit_work(struct task_struct *tsk) 3133 { 3134 if (!tsk->state || tsk_is_pi_blocked(tsk)) 3135 return; 3136 /* 3137 * If we are going to sleep and we have plugged IO queued, 3138 * make sure to submit it to avoid deadlocks. 3139 */ 3140 if (blk_needs_flush_plug(tsk)) 3141 blk_schedule_flush_plug(tsk); 3142 } 3143 3144 asmlinkage __visible void __sched schedule(void) 3145 { 3146 struct task_struct *tsk = current; 3147 3148 sched_submit_work(tsk); 3149 do { 3150 preempt_disable(); 3151 __schedule(); 3152 sched_preempt_enable_no_resched(); 3153 } while (need_resched()); 3154 } 3155 EXPORT_SYMBOL(schedule); 3156 3157 #ifdef CONFIG_CONTEXT_TRACKING 3158 asmlinkage __visible void __sched schedule_user(void) 3159 { 3160 /* 3161 * If we come here after a random call to set_need_resched(), 3162 * or we have been woken up remotely but the IPI has not yet arrived, 3163 * we haven't yet exited the RCU idle mode. Do it here manually until 3164 * we find a better solution. 3165 * 3166 * NB: There are buggy callers of this function. Ideally we 3167 * should warn if prev_state != CONTEXT_USER, but that will trigger 3168 * too frequently to make sense yet. 3169 */ 3170 enum ctx_state prev_state = exception_enter(); 3171 schedule(); 3172 exception_exit(prev_state); 3173 } 3174 #endif 3175 3176 /** 3177 * schedule_preempt_disabled - called with preemption disabled 3178 * 3179 * Returns with preemption disabled. Note: preempt_count must be 1 3180 */ 3181 void __sched schedule_preempt_disabled(void) 3182 { 3183 sched_preempt_enable_no_resched(); 3184 schedule(); 3185 preempt_disable(); 3186 } 3187 3188 static void __sched notrace preempt_schedule_common(void) 3189 { 3190 do { 3191 preempt_active_enter(); 3192 __schedule(); 3193 preempt_active_exit(); 3194 3195 /* 3196 * Check again in case we missed a preemption opportunity 3197 * between schedule and now. 3198 */ 3199 } while (need_resched()); 3200 } 3201 3202 #ifdef CONFIG_PREEMPT 3203 /* 3204 * this is the entry point to schedule() from in-kernel preemption 3205 * off of preempt_enable. Kernel preemptions off return from interrupt 3206 * occur there and call schedule directly. 3207 */ 3208 asmlinkage __visible void __sched notrace preempt_schedule(void) 3209 { 3210 /* 3211 * If there is a non-zero preempt_count or interrupts are disabled, 3212 * we do not want to preempt the current task. Just return.. 3213 */ 3214 if (likely(!preemptible())) 3215 return; 3216 3217 preempt_schedule_common(); 3218 } 3219 NOKPROBE_SYMBOL(preempt_schedule); 3220 EXPORT_SYMBOL(preempt_schedule); 3221 3222 /** 3223 * preempt_schedule_notrace - preempt_schedule called by tracing 3224 * 3225 * The tracing infrastructure uses preempt_enable_notrace to prevent 3226 * recursion and tracing preempt enabling caused by the tracing 3227 * infrastructure itself. But as tracing can happen in areas coming 3228 * from userspace or just about to enter userspace, a preempt enable 3229 * can occur before user_exit() is called. This will cause the scheduler 3230 * to be called when the system is still in usermode. 3231 * 3232 * To prevent this, the preempt_enable_notrace will use this function 3233 * instead of preempt_schedule() to exit user context if needed before 3234 * calling the scheduler. 3235 */ 3236 asmlinkage __visible void __sched notrace preempt_schedule_notrace(void) 3237 { 3238 enum ctx_state prev_ctx; 3239 3240 if (likely(!preemptible())) 3241 return; 3242 3243 do { 3244 /* 3245 * Use raw __prempt_count() ops that don't call function. 3246 * We can't call functions before disabling preemption which 3247 * disarm preemption tracing recursions. 3248 */ 3249 __preempt_count_add(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET); 3250 barrier(); 3251 /* 3252 * Needs preempt disabled in case user_exit() is traced 3253 * and the tracer calls preempt_enable_notrace() causing 3254 * an infinite recursion. 3255 */ 3256 prev_ctx = exception_enter(); 3257 __schedule(); 3258 exception_exit(prev_ctx); 3259 3260 barrier(); 3261 __preempt_count_sub(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET); 3262 } while (need_resched()); 3263 } 3264 EXPORT_SYMBOL_GPL(preempt_schedule_notrace); 3265 3266 #endif /* CONFIG_PREEMPT */ 3267 3268 /* 3269 * this is the entry point to schedule() from kernel preemption 3270 * off of irq context. 3271 * Note, that this is called and return with irqs disabled. This will 3272 * protect us against recursive calling from irq. 3273 */ 3274 asmlinkage __visible void __sched preempt_schedule_irq(void) 3275 { 3276 enum ctx_state prev_state; 3277 3278 /* Catch callers which need to be fixed */ 3279 BUG_ON(preempt_count() || !irqs_disabled()); 3280 3281 prev_state = exception_enter(); 3282 3283 do { 3284 preempt_active_enter(); 3285 local_irq_enable(); 3286 __schedule(); 3287 local_irq_disable(); 3288 preempt_active_exit(); 3289 } while (need_resched()); 3290 3291 exception_exit(prev_state); 3292 } 3293 3294 int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags, 3295 void *key) 3296 { 3297 return try_to_wake_up(curr->private, mode, wake_flags); 3298 } 3299 EXPORT_SYMBOL(default_wake_function); 3300 3301 #ifdef CONFIG_RT_MUTEXES 3302 3303 /* 3304 * rt_mutex_setprio - set the current priority of a task 3305 * @p: task 3306 * @prio: prio value (kernel-internal form) 3307 * 3308 * This function changes the 'effective' priority of a task. It does 3309 * not touch ->normal_prio like __setscheduler(). 3310 * 3311 * Used by the rt_mutex code to implement priority inheritance 3312 * logic. Call site only calls if the priority of the task changed. 3313 */ 3314 void rt_mutex_setprio(struct task_struct *p, int prio) 3315 { 3316 int oldprio, queued, running, enqueue_flag = 0; 3317 struct rq *rq; 3318 const struct sched_class *prev_class; 3319 3320 BUG_ON(prio > MAX_PRIO); 3321 3322 rq = __task_rq_lock(p); 3323 3324 /* 3325 * Idle task boosting is a nono in general. There is one 3326 * exception, when PREEMPT_RT and NOHZ is active: 3327 * 3328 * The idle task calls get_next_timer_interrupt() and holds 3329 * the timer wheel base->lock on the CPU and another CPU wants 3330 * to access the timer (probably to cancel it). We can safely 3331 * ignore the boosting request, as the idle CPU runs this code 3332 * with interrupts disabled and will complete the lock 3333 * protected section without being interrupted. So there is no 3334 * real need to boost. 3335 */ 3336 if (unlikely(p == rq->idle)) { 3337 WARN_ON(p != rq->curr); 3338 WARN_ON(p->pi_blocked_on); 3339 goto out_unlock; 3340 } 3341 3342 trace_sched_pi_setprio(p, prio); 3343 oldprio = p->prio; 3344 prev_class = p->sched_class; 3345 queued = task_on_rq_queued(p); 3346 running = task_current(rq, p); 3347 if (queued) 3348 dequeue_task(rq, p, 0); 3349 if (running) 3350 put_prev_task(rq, p); 3351 3352 /* 3353 * Boosting condition are: 3354 * 1. -rt task is running and holds mutex A 3355 * --> -dl task blocks on mutex A 3356 * 3357 * 2. -dl task is running and holds mutex A 3358 * --> -dl task blocks on mutex A and could preempt the 3359 * running task 3360 */ 3361 if (dl_prio(prio)) { 3362 struct task_struct *pi_task = rt_mutex_get_top_task(p); 3363 if (!dl_prio(p->normal_prio) || 3364 (pi_task && dl_entity_preempt(&pi_task->dl, &p->dl))) { 3365 p->dl.dl_boosted = 1; 3366 enqueue_flag = ENQUEUE_REPLENISH; 3367 } else 3368 p->dl.dl_boosted = 0; 3369 p->sched_class = &dl_sched_class; 3370 } else if (rt_prio(prio)) { 3371 if (dl_prio(oldprio)) 3372 p->dl.dl_boosted = 0; 3373 if (oldprio < prio) 3374 enqueue_flag = ENQUEUE_HEAD; 3375 p->sched_class = &rt_sched_class; 3376 } else { 3377 if (dl_prio(oldprio)) 3378 p->dl.dl_boosted = 0; 3379 if (rt_prio(oldprio)) 3380 p->rt.timeout = 0; 3381 p->sched_class = &fair_sched_class; 3382 } 3383 3384 p->prio = prio; 3385 3386 if (running) 3387 p->sched_class->set_curr_task(rq); 3388 if (queued) 3389 enqueue_task(rq, p, enqueue_flag); 3390 3391 check_class_changed(rq, p, prev_class, oldprio); 3392 out_unlock: 3393 preempt_disable(); /* avoid rq from going away on us */ 3394 __task_rq_unlock(rq); 3395 3396 balance_callback(rq); 3397 preempt_enable(); 3398 } 3399 #endif 3400 3401 void set_user_nice(struct task_struct *p, long nice) 3402 { 3403 int old_prio, delta, queued; 3404 unsigned long flags; 3405 struct rq *rq; 3406 3407 if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE) 3408 return; 3409 /* 3410 * We have to be careful, if called from sys_setpriority(), 3411 * the task might be in the middle of scheduling on another CPU. 3412 */ 3413 rq = task_rq_lock(p, &flags); 3414 /* 3415 * The RT priorities are set via sched_setscheduler(), but we still 3416 * allow the 'normal' nice value to be set - but as expected 3417 * it wont have any effect on scheduling until the task is 3418 * SCHED_DEADLINE, SCHED_FIFO or SCHED_RR: 3419 */ 3420 if (task_has_dl_policy(p) || task_has_rt_policy(p)) { 3421 p->static_prio = NICE_TO_PRIO(nice); 3422 goto out_unlock; 3423 } 3424 queued = task_on_rq_queued(p); 3425 if (queued) 3426 dequeue_task(rq, p, 0); 3427 3428 p->static_prio = NICE_TO_PRIO(nice); 3429 set_load_weight(p); 3430 old_prio = p->prio; 3431 p->prio = effective_prio(p); 3432 delta = p->prio - old_prio; 3433 3434 if (queued) { 3435 enqueue_task(rq, p, 0); 3436 /* 3437 * If the task increased its priority or is running and 3438 * lowered its priority, then reschedule its CPU: 3439 */ 3440 if (delta < 0 || (delta > 0 && task_running(rq, p))) 3441 resched_curr(rq); 3442 } 3443 out_unlock: 3444 task_rq_unlock(rq, p, &flags); 3445 } 3446 EXPORT_SYMBOL(set_user_nice); 3447 3448 /* 3449 * can_nice - check if a task can reduce its nice value 3450 * @p: task 3451 * @nice: nice value 3452 */ 3453 int can_nice(const struct task_struct *p, const int nice) 3454 { 3455 /* convert nice value [19,-20] to rlimit style value [1,40] */ 3456 int nice_rlim = nice_to_rlimit(nice); 3457 3458 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) || 3459 capable(CAP_SYS_NICE)); 3460 } 3461 3462 #ifdef __ARCH_WANT_SYS_NICE 3463 3464 /* 3465 * sys_nice - change the priority of the current process. 3466 * @increment: priority increment 3467 * 3468 * sys_setpriority is a more generic, but much slower function that 3469 * does similar things. 3470 */ 3471 SYSCALL_DEFINE1(nice, int, increment) 3472 { 3473 long nice, retval; 3474 3475 /* 3476 * Setpriority might change our priority at the same moment. 3477 * We don't have to worry. Conceptually one call occurs first 3478 * and we have a single winner. 3479 */ 3480 increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH); 3481 nice = task_nice(current) + increment; 3482 3483 nice = clamp_val(nice, MIN_NICE, MAX_NICE); 3484 if (increment < 0 && !can_nice(current, nice)) 3485 return -EPERM; 3486 3487 retval = security_task_setnice(current, nice); 3488 if (retval) 3489 return retval; 3490 3491 set_user_nice(current, nice); 3492 return 0; 3493 } 3494 3495 #endif 3496 3497 /** 3498 * task_prio - return the priority value of a given task. 3499 * @p: the task in question. 3500 * 3501 * Return: The priority value as seen by users in /proc. 3502 * RT tasks are offset by -200. Normal tasks are centered 3503 * around 0, value goes from -16 to +15. 3504 */ 3505 int task_prio(const struct task_struct *p) 3506 { 3507 return p->prio - MAX_RT_PRIO; 3508 } 3509 3510 /** 3511 * idle_cpu - is a given cpu idle currently? 3512 * @cpu: the processor in question. 3513 * 3514 * Return: 1 if the CPU is currently idle. 0 otherwise. 3515 */ 3516 int idle_cpu(int cpu) 3517 { 3518 struct rq *rq = cpu_rq(cpu); 3519 3520 if (rq->curr != rq->idle) 3521 return 0; 3522 3523 if (rq->nr_running) 3524 return 0; 3525 3526 #ifdef CONFIG_SMP 3527 if (!llist_empty(&rq->wake_list)) 3528 return 0; 3529 #endif 3530 3531 return 1; 3532 } 3533 3534 /** 3535 * idle_task - return the idle task for a given cpu. 3536 * @cpu: the processor in question. 3537 * 3538 * Return: The idle task for the cpu @cpu. 3539 */ 3540 struct task_struct *idle_task(int cpu) 3541 { 3542 return cpu_rq(cpu)->idle; 3543 } 3544 3545 /** 3546 * find_process_by_pid - find a process with a matching PID value. 3547 * @pid: the pid in question. 3548 * 3549 * The task of @pid, if found. %NULL otherwise. 3550 */ 3551 static struct task_struct *find_process_by_pid(pid_t pid) 3552 { 3553 return pid ? find_task_by_vpid(pid) : current; 3554 } 3555 3556 /* 3557 * This function initializes the sched_dl_entity of a newly becoming 3558 * SCHED_DEADLINE task. 3559 * 3560 * Only the static values are considered here, the actual runtime and the 3561 * absolute deadline will be properly calculated when the task is enqueued 3562 * for the first time with its new policy. 3563 */ 3564 static void 3565 __setparam_dl(struct task_struct *p, const struct sched_attr *attr) 3566 { 3567 struct sched_dl_entity *dl_se = &p->dl; 3568 3569 dl_se->dl_runtime = attr->sched_runtime; 3570 dl_se->dl_deadline = attr->sched_deadline; 3571 dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline; 3572 dl_se->flags = attr->sched_flags; 3573 dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime); 3574 3575 /* 3576 * Changing the parameters of a task is 'tricky' and we're not doing 3577 * the correct thing -- also see task_dead_dl() and switched_from_dl(). 3578 * 3579 * What we SHOULD do is delay the bandwidth release until the 0-lag 3580 * point. This would include retaining the task_struct until that time 3581 * and change dl_overflow() to not immediately decrement the current 3582 * amount. 3583 * 3584 * Instead we retain the current runtime/deadline and let the new 3585 * parameters take effect after the current reservation period lapses. 3586 * This is safe (albeit pessimistic) because the 0-lag point is always 3587 * before the current scheduling deadline. 3588 * 3589 * We can still have temporary overloads because we do not delay the 3590 * change in bandwidth until that time; so admission control is 3591 * not on the safe side. It does however guarantee tasks will never 3592 * consume more than promised. 3593 */ 3594 } 3595 3596 /* 3597 * sched_setparam() passes in -1 for its policy, to let the functions 3598 * it calls know not to change it. 3599 */ 3600 #define SETPARAM_POLICY -1 3601 3602 static void __setscheduler_params(struct task_struct *p, 3603 const struct sched_attr *attr) 3604 { 3605 int policy = attr->sched_policy; 3606 3607 if (policy == SETPARAM_POLICY) 3608 policy = p->policy; 3609 3610 p->policy = policy; 3611 3612 if (dl_policy(policy)) 3613 __setparam_dl(p, attr); 3614 else if (fair_policy(policy)) 3615 p->static_prio = NICE_TO_PRIO(attr->sched_nice); 3616 3617 /* 3618 * __sched_setscheduler() ensures attr->sched_priority == 0 when 3619 * !rt_policy. Always setting this ensures that things like 3620 * getparam()/getattr() don't report silly values for !rt tasks. 3621 */ 3622 p->rt_priority = attr->sched_priority; 3623 p->normal_prio = normal_prio(p); 3624 set_load_weight(p); 3625 } 3626 3627 /* Actually do priority change: must hold pi & rq lock. */ 3628 static void __setscheduler(struct rq *rq, struct task_struct *p, 3629 const struct sched_attr *attr, bool keep_boost) 3630 { 3631 __setscheduler_params(p, attr); 3632 3633 /* 3634 * Keep a potential priority boosting if called from 3635 * sched_setscheduler(). 3636 */ 3637 if (keep_boost) 3638 p->prio = rt_mutex_get_effective_prio(p, normal_prio(p)); 3639 else 3640 p->prio = normal_prio(p); 3641 3642 if (dl_prio(p->prio)) 3643 p->sched_class = &dl_sched_class; 3644 else if (rt_prio(p->prio)) 3645 p->sched_class = &rt_sched_class; 3646 else 3647 p->sched_class = &fair_sched_class; 3648 } 3649 3650 static void 3651 __getparam_dl(struct task_struct *p, struct sched_attr *attr) 3652 { 3653 struct sched_dl_entity *dl_se = &p->dl; 3654 3655 attr->sched_priority = p->rt_priority; 3656 attr->sched_runtime = dl_se->dl_runtime; 3657 attr->sched_deadline = dl_se->dl_deadline; 3658 attr->sched_period = dl_se->dl_period; 3659 attr->sched_flags = dl_se->flags; 3660 } 3661 3662 /* 3663 * This function validates the new parameters of a -deadline task. 3664 * We ask for the deadline not being zero, and greater or equal 3665 * than the runtime, as well as the period of being zero or 3666 * greater than deadline. Furthermore, we have to be sure that 3667 * user parameters are above the internal resolution of 1us (we 3668 * check sched_runtime only since it is always the smaller one) and 3669 * below 2^63 ns (we have to check both sched_deadline and 3670 * sched_period, as the latter can be zero). 3671 */ 3672 static bool 3673 __checkparam_dl(const struct sched_attr *attr) 3674 { 3675 /* deadline != 0 */ 3676 if (attr->sched_deadline == 0) 3677 return false; 3678 3679 /* 3680 * Since we truncate DL_SCALE bits, make sure we're at least 3681 * that big. 3682 */ 3683 if (attr->sched_runtime < (1ULL << DL_SCALE)) 3684 return false; 3685 3686 /* 3687 * Since we use the MSB for wrap-around and sign issues, make 3688 * sure it's not set (mind that period can be equal to zero). 3689 */ 3690 if (attr->sched_deadline & (1ULL << 63) || 3691 attr->sched_period & (1ULL << 63)) 3692 return false; 3693 3694 /* runtime <= deadline <= period (if period != 0) */ 3695 if ((attr->sched_period != 0 && 3696 attr->sched_period < attr->sched_deadline) || 3697 attr->sched_deadline < attr->sched_runtime) 3698 return false; 3699 3700 return true; 3701 } 3702 3703 /* 3704 * check the target process has a UID that matches the current process's 3705 */ 3706 static bool check_same_owner(struct task_struct *p) 3707 { 3708 const struct cred *cred = current_cred(), *pcred; 3709 bool match; 3710 3711 rcu_read_lock(); 3712 pcred = __task_cred(p); 3713 match = (uid_eq(cred->euid, pcred->euid) || 3714 uid_eq(cred->euid, pcred->uid)); 3715 rcu_read_unlock(); 3716 return match; 3717 } 3718 3719 static bool dl_param_changed(struct task_struct *p, 3720 const struct sched_attr *attr) 3721 { 3722 struct sched_dl_entity *dl_se = &p->dl; 3723 3724 if (dl_se->dl_runtime != attr->sched_runtime || 3725 dl_se->dl_deadline != attr->sched_deadline || 3726 dl_se->dl_period != attr->sched_period || 3727 dl_se->flags != attr->sched_flags) 3728 return true; 3729 3730 return false; 3731 } 3732 3733 static int __sched_setscheduler(struct task_struct *p, 3734 const struct sched_attr *attr, 3735 bool user, bool pi) 3736 { 3737 int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 : 3738 MAX_RT_PRIO - 1 - attr->sched_priority; 3739 int retval, oldprio, oldpolicy = -1, queued, running; 3740 int new_effective_prio, policy = attr->sched_policy; 3741 unsigned long flags; 3742 const struct sched_class *prev_class; 3743 struct rq *rq; 3744 int reset_on_fork; 3745 3746 /* may grab non-irq protected spin_locks */ 3747 BUG_ON(in_interrupt()); 3748 recheck: 3749 /* double check policy once rq lock held */ 3750 if (policy < 0) { 3751 reset_on_fork = p->sched_reset_on_fork; 3752 policy = oldpolicy = p->policy; 3753 } else { 3754 reset_on_fork = !!(attr->sched_flags & SCHED_FLAG_RESET_ON_FORK); 3755 3756 if (policy != SCHED_DEADLINE && 3757 policy != SCHED_FIFO && policy != SCHED_RR && 3758 policy != SCHED_NORMAL && policy != SCHED_BATCH && 3759 policy != SCHED_IDLE) 3760 return -EINVAL; 3761 } 3762 3763 if (attr->sched_flags & ~(SCHED_FLAG_RESET_ON_FORK)) 3764 return -EINVAL; 3765 3766 /* 3767 * Valid priorities for SCHED_FIFO and SCHED_RR are 3768 * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL, 3769 * SCHED_BATCH and SCHED_IDLE is 0. 3770 */ 3771 if ((p->mm && attr->sched_priority > MAX_USER_RT_PRIO-1) || 3772 (!p->mm && attr->sched_priority > MAX_RT_PRIO-1)) 3773 return -EINVAL; 3774 if ((dl_policy(policy) && !__checkparam_dl(attr)) || 3775 (rt_policy(policy) != (attr->sched_priority != 0))) 3776 return -EINVAL; 3777 3778 /* 3779 * Allow unprivileged RT tasks to decrease priority: 3780 */ 3781 if (user && !capable(CAP_SYS_NICE)) { 3782 if (fair_policy(policy)) { 3783 if (attr->sched_nice < task_nice(p) && 3784 !can_nice(p, attr->sched_nice)) 3785 return -EPERM; 3786 } 3787 3788 if (rt_policy(policy)) { 3789 unsigned long rlim_rtprio = 3790 task_rlimit(p, RLIMIT_RTPRIO); 3791 3792 /* can't set/change the rt policy */ 3793 if (policy != p->policy && !rlim_rtprio) 3794 return -EPERM; 3795 3796 /* can't increase priority */ 3797 if (attr->sched_priority > p->rt_priority && 3798 attr->sched_priority > rlim_rtprio) 3799 return -EPERM; 3800 } 3801 3802 /* 3803 * Can't set/change SCHED_DEADLINE policy at all for now 3804 * (safest behavior); in the future we would like to allow 3805 * unprivileged DL tasks to increase their relative deadline 3806 * or reduce their runtime (both ways reducing utilization) 3807 */ 3808 if (dl_policy(policy)) 3809 return -EPERM; 3810 3811 /* 3812 * Treat SCHED_IDLE as nice 20. Only allow a switch to 3813 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it. 3814 */ 3815 if (p->policy == SCHED_IDLE && policy != SCHED_IDLE) { 3816 if (!can_nice(p, task_nice(p))) 3817 return -EPERM; 3818 } 3819 3820 /* can't change other user's priorities */ 3821 if (!check_same_owner(p)) 3822 return -EPERM; 3823 3824 /* Normal users shall not reset the sched_reset_on_fork flag */ 3825 if (p->sched_reset_on_fork && !reset_on_fork) 3826 return -EPERM; 3827 } 3828 3829 if (user) { 3830 retval = security_task_setscheduler(p); 3831 if (retval) 3832 return retval; 3833 } 3834 3835 /* 3836 * make sure no PI-waiters arrive (or leave) while we are 3837 * changing the priority of the task: 3838 * 3839 * To be able to change p->policy safely, the appropriate 3840 * runqueue lock must be held. 3841 */ 3842 rq = task_rq_lock(p, &flags); 3843 3844 /* 3845 * Changing the policy of the stop threads its a very bad idea 3846 */ 3847 if (p == rq->stop) { 3848 task_rq_unlock(rq, p, &flags); 3849 return -EINVAL; 3850 } 3851 3852 /* 3853 * If not changing anything there's no need to proceed further, 3854 * but store a possible modification of reset_on_fork. 3855 */ 3856 if (unlikely(policy == p->policy)) { 3857 if (fair_policy(policy) && attr->sched_nice != task_nice(p)) 3858 goto change; 3859 if (rt_policy(policy) && attr->sched_priority != p->rt_priority) 3860 goto change; 3861 if (dl_policy(policy) && dl_param_changed(p, attr)) 3862 goto change; 3863 3864 p->sched_reset_on_fork = reset_on_fork; 3865 task_rq_unlock(rq, p, &flags); 3866 return 0; 3867 } 3868 change: 3869 3870 if (user) { 3871 #ifdef CONFIG_RT_GROUP_SCHED 3872 /* 3873 * Do not allow realtime tasks into groups that have no runtime 3874 * assigned. 3875 */ 3876 if (rt_bandwidth_enabled() && rt_policy(policy) && 3877 task_group(p)->rt_bandwidth.rt_runtime == 0 && 3878 !task_group_is_autogroup(task_group(p))) { 3879 task_rq_unlock(rq, p, &flags); 3880 return -EPERM; 3881 } 3882 #endif 3883 #ifdef CONFIG_SMP 3884 if (dl_bandwidth_enabled() && dl_policy(policy)) { 3885 cpumask_t *span = rq->rd->span; 3886 3887 /* 3888 * Don't allow tasks with an affinity mask smaller than 3889 * the entire root_domain to become SCHED_DEADLINE. We 3890 * will also fail if there's no bandwidth available. 3891 */ 3892 if (!cpumask_subset(span, &p->cpus_allowed) || 3893 rq->rd->dl_bw.bw == 0) { 3894 task_rq_unlock(rq, p, &flags); 3895 return -EPERM; 3896 } 3897 } 3898 #endif 3899 } 3900 3901 /* recheck policy now with rq lock held */ 3902 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { 3903 policy = oldpolicy = -1; 3904 task_rq_unlock(rq, p, &flags); 3905 goto recheck; 3906 } 3907 3908 /* 3909 * If setscheduling to SCHED_DEADLINE (or changing the parameters 3910 * of a SCHED_DEADLINE task) we need to check if enough bandwidth 3911 * is available. 3912 */ 3913 if ((dl_policy(policy) || dl_task(p)) && dl_overflow(p, policy, attr)) { 3914 task_rq_unlock(rq, p, &flags); 3915 return -EBUSY; 3916 } 3917 3918 p->sched_reset_on_fork = reset_on_fork; 3919 oldprio = p->prio; 3920 3921 if (pi) { 3922 /* 3923 * Take priority boosted tasks into account. If the new 3924 * effective priority is unchanged, we just store the new 3925 * normal parameters and do not touch the scheduler class and 3926 * the runqueue. This will be done when the task deboost 3927 * itself. 3928 */ 3929 new_effective_prio = rt_mutex_get_effective_prio(p, newprio); 3930 if (new_effective_prio == oldprio) { 3931 __setscheduler_params(p, attr); 3932 task_rq_unlock(rq, p, &flags); 3933 return 0; 3934 } 3935 } 3936 3937 queued = task_on_rq_queued(p); 3938 running = task_current(rq, p); 3939 if (queued) 3940 dequeue_task(rq, p, 0); 3941 if (running) 3942 put_prev_task(rq, p); 3943 3944 prev_class = p->sched_class; 3945 __setscheduler(rq, p, attr, pi); 3946 3947 if (running) 3948 p->sched_class->set_curr_task(rq); 3949 if (queued) { 3950 /* 3951 * We enqueue to tail when the priority of a task is 3952 * increased (user space view). 3953 */ 3954 enqueue_task(rq, p, oldprio <= p->prio ? ENQUEUE_HEAD : 0); 3955 } 3956 3957 check_class_changed(rq, p, prev_class, oldprio); 3958 preempt_disable(); /* avoid rq from going away on us */ 3959 task_rq_unlock(rq, p, &flags); 3960 3961 if (pi) 3962 rt_mutex_adjust_pi(p); 3963 3964 /* 3965 * Run balance callbacks after we've adjusted the PI chain. 3966 */ 3967 balance_callback(rq); 3968 preempt_enable(); 3969 3970 return 0; 3971 } 3972 3973 static int _sched_setscheduler(struct task_struct *p, int policy, 3974 const struct sched_param *param, bool check) 3975 { 3976 struct sched_attr attr = { 3977 .sched_policy = policy, 3978 .sched_priority = param->sched_priority, 3979 .sched_nice = PRIO_TO_NICE(p->static_prio), 3980 }; 3981 3982 /* Fixup the legacy SCHED_RESET_ON_FORK hack. */ 3983 if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) { 3984 attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; 3985 policy &= ~SCHED_RESET_ON_FORK; 3986 attr.sched_policy = policy; 3987 } 3988 3989 return __sched_setscheduler(p, &attr, check, true); 3990 } 3991 /** 3992 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread. 3993 * @p: the task in question. 3994 * @policy: new policy. 3995 * @param: structure containing the new RT priority. 3996 * 3997 * Return: 0 on success. An error code otherwise. 3998 * 3999 * NOTE that the task may be already dead. 4000 */ 4001 int sched_setscheduler(struct task_struct *p, int policy, 4002 const struct sched_param *param) 4003 { 4004 return _sched_setscheduler(p, policy, param, true); 4005 } 4006 EXPORT_SYMBOL_GPL(sched_setscheduler); 4007 4008 int sched_setattr(struct task_struct *p, const struct sched_attr *attr) 4009 { 4010 return __sched_setscheduler(p, attr, true, true); 4011 } 4012 EXPORT_SYMBOL_GPL(sched_setattr); 4013 4014 /** 4015 * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace. 4016 * @p: the task in question. 4017 * @policy: new policy. 4018 * @param: structure containing the new RT priority. 4019 * 4020 * Just like sched_setscheduler, only don't bother checking if the 4021 * current context has permission. For example, this is needed in 4022 * stop_machine(): we create temporary high priority worker threads, 4023 * but our caller might not have that capability. 4024 * 4025 * Return: 0 on success. An error code otherwise. 4026 */ 4027 int sched_setscheduler_nocheck(struct task_struct *p, int policy, 4028 const struct sched_param *param) 4029 { 4030 return _sched_setscheduler(p, policy, param, false); 4031 } 4032 4033 static int 4034 do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) 4035 { 4036 struct sched_param lparam; 4037 struct task_struct *p; 4038 int retval; 4039 4040 if (!param || pid < 0) 4041 return -EINVAL; 4042 if (copy_from_user(&lparam, param, sizeof(struct sched_param))) 4043 return -EFAULT; 4044 4045 rcu_read_lock(); 4046 retval = -ESRCH; 4047 p = find_process_by_pid(pid); 4048 if (p != NULL) 4049 retval = sched_setscheduler(p, policy, &lparam); 4050 rcu_read_unlock(); 4051 4052 return retval; 4053 } 4054 4055 /* 4056 * Mimics kernel/events/core.c perf_copy_attr(). 4057 */ 4058 static int sched_copy_attr(struct sched_attr __user *uattr, 4059 struct sched_attr *attr) 4060 { 4061 u32 size; 4062 int ret; 4063 4064 if (!access_ok(VERIFY_WRITE, uattr, SCHED_ATTR_SIZE_VER0)) 4065 return -EFAULT; 4066 4067 /* 4068 * zero the full structure, so that a short copy will be nice. 4069 */ 4070 memset(attr, 0, sizeof(*attr)); 4071 4072 ret = get_user(size, &uattr->size); 4073 if (ret) 4074 return ret; 4075 4076 if (size > PAGE_SIZE) /* silly large */ 4077 goto err_size; 4078 4079 if (!size) /* abi compat */ 4080 size = SCHED_ATTR_SIZE_VER0; 4081 4082 if (size < SCHED_ATTR_SIZE_VER0) 4083 goto err_size; 4084 4085 /* 4086 * If we're handed a bigger struct than we know of, 4087 * ensure all the unknown bits are 0 - i.e. new 4088 * user-space does not rely on any kernel feature 4089 * extensions we dont know about yet. 4090 */ 4091 if (size > sizeof(*attr)) { 4092 unsigned char __user *addr; 4093 unsigned char __user *end; 4094 unsigned char val; 4095 4096 addr = (void __user *)uattr + sizeof(*attr); 4097 end = (void __user *)uattr + size; 4098 4099 for (; addr < end; addr++) { 4100 ret = get_user(val, addr); 4101 if (ret) 4102 return ret; 4103 if (val) 4104 goto err_size; 4105 } 4106 size = sizeof(*attr); 4107 } 4108 4109 ret = copy_from_user(attr, uattr, size); 4110 if (ret) 4111 return -EFAULT; 4112 4113 /* 4114 * XXX: do we want to be lenient like existing syscalls; or do we want 4115 * to be strict and return an error on out-of-bounds values? 4116 */ 4117 attr->sched_nice = clamp(attr->sched_nice, MIN_NICE, MAX_NICE); 4118 4119 return 0; 4120 4121 err_size: 4122 put_user(sizeof(*attr), &uattr->size); 4123 return -E2BIG; 4124 } 4125 4126 /** 4127 * sys_sched_setscheduler - set/change the scheduler policy and RT priority 4128 * @pid: the pid in question. 4129 * @policy: new policy. 4130 * @param: structure containing the new RT priority. 4131 * 4132 * Return: 0 on success. An error code otherwise. 4133 */ 4134 SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, 4135 struct sched_param __user *, param) 4136 { 4137 /* negative values for policy are not valid */ 4138 if (policy < 0) 4139 return -EINVAL; 4140 4141 return do_sched_setscheduler(pid, policy, param); 4142 } 4143 4144 /** 4145 * sys_sched_setparam - set/change the RT priority of a thread 4146 * @pid: the pid in question. 4147 * @param: structure containing the new RT priority. 4148 * 4149 * Return: 0 on success. An error code otherwise. 4150 */ 4151 SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) 4152 { 4153 return do_sched_setscheduler(pid, SETPARAM_POLICY, param); 4154 } 4155 4156 /** 4157 * sys_sched_setattr - same as above, but with extended sched_attr 4158 * @pid: the pid in question. 4159 * @uattr: structure containing the extended parameters. 4160 * @flags: for future extension. 4161 */ 4162 SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr, 4163 unsigned int, flags) 4164 { 4165 struct sched_attr attr; 4166 struct task_struct *p; 4167 int retval; 4168 4169 if (!uattr || pid < 0 || flags) 4170 return -EINVAL; 4171 4172 retval = sched_copy_attr(uattr, &attr); 4173 if (retval) 4174 return retval; 4175 4176 if ((int)attr.sched_policy < 0) 4177 return -EINVAL; 4178 4179 rcu_read_lock(); 4180 retval = -ESRCH; 4181 p = find_process_by_pid(pid); 4182 if (p != NULL) 4183 retval = sched_setattr(p, &attr); 4184 rcu_read_unlock(); 4185 4186 return retval; 4187 } 4188 4189 /** 4190 * sys_sched_getscheduler - get the policy (scheduling class) of a thread 4191 * @pid: the pid in question. 4192 * 4193 * Return: On success, the policy of the thread. Otherwise, a negative error 4194 * code. 4195 */ 4196 SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) 4197 { 4198 struct task_struct *p; 4199 int retval; 4200 4201 if (pid < 0) 4202 return -EINVAL; 4203 4204 retval = -ESRCH; 4205 rcu_read_lock(); 4206 p = find_process_by_pid(pid); 4207 if (p) { 4208 retval = security_task_getscheduler(p); 4209 if (!retval) 4210 retval = p->policy 4211 | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0); 4212 } 4213 rcu_read_unlock(); 4214 return retval; 4215 } 4216 4217 /** 4218 * sys_sched_getparam - get the RT priority of a thread 4219 * @pid: the pid in question. 4220 * @param: structure containing the RT priority. 4221 * 4222 * Return: On success, 0 and the RT priority is in @param. Otherwise, an error 4223 * code. 4224 */ 4225 SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) 4226 { 4227 struct sched_param lp = { .sched_priority = 0 }; 4228 struct task_struct *p; 4229 int retval; 4230 4231 if (!param || pid < 0) 4232 return -EINVAL; 4233 4234 rcu_read_lock(); 4235 p = find_process_by_pid(pid); 4236 retval = -ESRCH; 4237 if (!p) 4238 goto out_unlock; 4239 4240 retval = security_task_getscheduler(p); 4241 if (retval) 4242 goto out_unlock; 4243 4244 if (task_has_rt_policy(p)) 4245 lp.sched_priority = p->rt_priority; 4246 rcu_read_unlock(); 4247 4248 /* 4249 * This one might sleep, we cannot do it with a spinlock held ... 4250 */ 4251 retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0; 4252 4253 return retval; 4254 4255 out_unlock: 4256 rcu_read_unlock(); 4257 return retval; 4258 } 4259 4260 static int sched_read_attr(struct sched_attr __user *uattr, 4261 struct sched_attr *attr, 4262 unsigned int usize) 4263 { 4264 int ret; 4265 4266 if (!access_ok(VERIFY_WRITE, uattr, usize)) 4267 return -EFAULT; 4268 4269 /* 4270 * If we're handed a smaller struct than we know of, 4271 * ensure all the unknown bits are 0 - i.e. old 4272 * user-space does not get uncomplete information. 4273 */ 4274 if (usize < sizeof(*attr)) { 4275 unsigned char *addr; 4276 unsigned char *end; 4277 4278 addr = (void *)attr + usize; 4279 end = (void *)attr + sizeof(*attr); 4280 4281 for (; addr < end; addr++) { 4282 if (*addr) 4283 return -EFBIG; 4284 } 4285 4286 attr->size = usize; 4287 } 4288 4289 ret = copy_to_user(uattr, attr, attr->size); 4290 if (ret) 4291 return -EFAULT; 4292 4293 return 0; 4294 } 4295 4296 /** 4297 * sys_sched_getattr - similar to sched_getparam, but with sched_attr 4298 * @pid: the pid in question. 4299 * @uattr: structure containing the extended parameters. 4300 * @size: sizeof(attr) for fwd/bwd comp. 4301 * @flags: for future extension. 4302 */ 4303 SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, 4304 unsigned int, size, unsigned int, flags) 4305 { 4306 struct sched_attr attr = { 4307 .size = sizeof(struct sched_attr), 4308 }; 4309 struct task_struct *p; 4310 int retval; 4311 4312 if (!uattr || pid < 0 || size > PAGE_SIZE || 4313 size < SCHED_ATTR_SIZE_VER0 || flags) 4314 return -EINVAL; 4315 4316 rcu_read_lock(); 4317 p = find_process_by_pid(pid); 4318 retval = -ESRCH; 4319 if (!p) 4320 goto out_unlock; 4321 4322 retval = security_task_getscheduler(p); 4323 if (retval) 4324 goto out_unlock; 4325 4326 attr.sched_policy = p->policy; 4327 if (p->sched_reset_on_fork) 4328 attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; 4329 if (task_has_dl_policy(p)) 4330 __getparam_dl(p, &attr); 4331 else if (task_has_rt_policy(p)) 4332 attr.sched_priority = p->rt_priority; 4333 else 4334 attr.sched_nice = task_nice(p); 4335 4336 rcu_read_unlock(); 4337 4338 retval = sched_read_attr(uattr, &attr, size); 4339 return retval; 4340 4341 out_unlock: 4342 rcu_read_unlock(); 4343 return retval; 4344 } 4345 4346 long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) 4347 { 4348 cpumask_var_t cpus_allowed, new_mask; 4349 struct task_struct *p; 4350 int retval; 4351 4352 rcu_read_lock(); 4353 4354 p = find_process_by_pid(pid); 4355 if (!p) { 4356 rcu_read_unlock(); 4357 return -ESRCH; 4358 } 4359 4360 /* Prevent p going away */ 4361 get_task_struct(p); 4362 rcu_read_unlock(); 4363 4364 if (p->flags & PF_NO_SETAFFINITY) { 4365 retval = -EINVAL; 4366 goto out_put_task; 4367 } 4368 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) { 4369 retval = -ENOMEM; 4370 goto out_put_task; 4371 } 4372 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) { 4373 retval = -ENOMEM; 4374 goto out_free_cpus_allowed; 4375 } 4376 retval = -EPERM; 4377 if (!check_same_owner(p)) { 4378 rcu_read_lock(); 4379 if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) { 4380 rcu_read_unlock(); 4381 goto out_free_new_mask; 4382 } 4383 rcu_read_unlock(); 4384 } 4385 4386 retval = security_task_setscheduler(p); 4387 if (retval) 4388 goto out_free_new_mask; 4389 4390 4391 cpuset_cpus_allowed(p, cpus_allowed); 4392 cpumask_and(new_mask, in_mask, cpus_allowed); 4393 4394 /* 4395 * Since bandwidth control happens on root_domain basis, 4396 * if admission test is enabled, we only admit -deadline 4397 * tasks allowed to run on all the CPUs in the task's 4398 * root_domain. 4399 */ 4400 #ifdef CONFIG_SMP 4401 if (task_has_dl_policy(p) && dl_bandwidth_enabled()) { 4402 rcu_read_lock(); 4403 if (!cpumask_subset(task_rq(p)->rd->span, new_mask)) { 4404 retval = -EBUSY; 4405 rcu_read_unlock(); 4406 goto out_free_new_mask; 4407 } 4408 rcu_read_unlock(); 4409 } 4410 #endif 4411 again: 4412 retval = __set_cpus_allowed_ptr(p, new_mask, true); 4413 4414 if (!retval) { 4415 cpuset_cpus_allowed(p, cpus_allowed); 4416 if (!cpumask_subset(new_mask, cpus_allowed)) { 4417 /* 4418 * We must have raced with a concurrent cpuset 4419 * update. Just reset the cpus_allowed to the 4420 * cpuset's cpus_allowed 4421 */ 4422 cpumask_copy(new_mask, cpus_allowed); 4423 goto again; 4424 } 4425 } 4426 out_free_new_mask: 4427 free_cpumask_var(new_mask); 4428 out_free_cpus_allowed: 4429 free_cpumask_var(cpus_allowed); 4430 out_put_task: 4431 put_task_struct(p); 4432 return retval; 4433 } 4434 4435 static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, 4436 struct cpumask *new_mask) 4437 { 4438 if (len < cpumask_size()) 4439 cpumask_clear(new_mask); 4440 else if (len > cpumask_size()) 4441 len = cpumask_size(); 4442 4443 return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0; 4444 } 4445 4446 /** 4447 * sys_sched_setaffinity - set the cpu affinity of a process 4448 * @pid: pid of the process 4449 * @len: length in bytes of the bitmask pointed to by user_mask_ptr 4450 * @user_mask_ptr: user-space pointer to the new cpu mask 4451 * 4452 * Return: 0 on success. An error code otherwise. 4453 */ 4454 SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len, 4455 unsigned long __user *, user_mask_ptr) 4456 { 4457 cpumask_var_t new_mask; 4458 int retval; 4459 4460 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) 4461 return -ENOMEM; 4462 4463 retval = get_user_cpu_mask(user_mask_ptr, len, new_mask); 4464 if (retval == 0) 4465 retval = sched_setaffinity(pid, new_mask); 4466 free_cpumask_var(new_mask); 4467 return retval; 4468 } 4469 4470 long sched_getaffinity(pid_t pid, struct cpumask *mask) 4471 { 4472 struct task_struct *p; 4473 unsigned long flags; 4474 int retval; 4475 4476 rcu_read_lock(); 4477 4478 retval = -ESRCH; 4479 p = find_process_by_pid(pid); 4480 if (!p) 4481 goto out_unlock; 4482 4483 retval = security_task_getscheduler(p); 4484 if (retval) 4485 goto out_unlock; 4486 4487 raw_spin_lock_irqsave(&p->pi_lock, flags); 4488 cpumask_and(mask, &p->cpus_allowed, cpu_active_mask); 4489 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 4490 4491 out_unlock: 4492 rcu_read_unlock(); 4493 4494 return retval; 4495 } 4496 4497 /** 4498 * sys_sched_getaffinity - get the cpu affinity of a process 4499 * @pid: pid of the process 4500 * @len: length in bytes of the bitmask pointed to by user_mask_ptr 4501 * @user_mask_ptr: user-space pointer to hold the current cpu mask 4502 * 4503 * Return: 0 on success. An error code otherwise. 4504 */ 4505 SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, 4506 unsigned long __user *, user_mask_ptr) 4507 { 4508 int ret; 4509 cpumask_var_t mask; 4510 4511 if ((len * BITS_PER_BYTE) < nr_cpu_ids) 4512 return -EINVAL; 4513 if (len & (sizeof(unsigned long)-1)) 4514 return -EINVAL; 4515 4516 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) 4517 return -ENOMEM; 4518 4519 ret = sched_getaffinity(pid, mask); 4520 if (ret == 0) { 4521 size_t retlen = min_t(size_t, len, cpumask_size()); 4522 4523 if (copy_to_user(user_mask_ptr, mask, retlen)) 4524 ret = -EFAULT; 4525 else 4526 ret = retlen; 4527 } 4528 free_cpumask_var(mask); 4529 4530 return ret; 4531 } 4532 4533 /** 4534 * sys_sched_yield - yield the current processor to other threads. 4535 * 4536 * This function yields the current CPU to other tasks. If there are no 4537 * other threads running on this CPU then this function will return. 4538 * 4539 * Return: 0. 4540 */ 4541 SYSCALL_DEFINE0(sched_yield) 4542 { 4543 struct rq *rq = this_rq_lock(); 4544 4545 schedstat_inc(rq, yld_count); 4546 current->sched_class->yield_task(rq); 4547 4548 /* 4549 * Since we are going to call schedule() anyway, there's 4550 * no need to preempt or enable interrupts: 4551 */ 4552 __release(rq->lock); 4553 spin_release(&rq->lock.dep_map, 1, _THIS_IP_); 4554 do_raw_spin_unlock(&rq->lock); 4555 sched_preempt_enable_no_resched(); 4556 4557 schedule(); 4558 4559 return 0; 4560 } 4561 4562 int __sched _cond_resched(void) 4563 { 4564 if (should_resched(0)) { 4565 preempt_schedule_common(); 4566 return 1; 4567 } 4568 return 0; 4569 } 4570 EXPORT_SYMBOL(_cond_resched); 4571 4572 /* 4573 * __cond_resched_lock() - if a reschedule is pending, drop the given lock, 4574 * call schedule, and on return reacquire the lock. 4575 * 4576 * This works OK both with and without CONFIG_PREEMPT. We do strange low-level 4577 * operations here to prevent schedule() from being called twice (once via 4578 * spin_unlock(), once by hand). 4579 */ 4580 int __cond_resched_lock(spinlock_t *lock) 4581 { 4582 int resched = should_resched(PREEMPT_LOCK_OFFSET); 4583 int ret = 0; 4584 4585 lockdep_assert_held(lock); 4586 4587 if (spin_needbreak(lock) || resched) { 4588 spin_unlock(lock); 4589 if (resched) 4590 preempt_schedule_common(); 4591 else 4592 cpu_relax(); 4593 ret = 1; 4594 spin_lock(lock); 4595 } 4596 return ret; 4597 } 4598 EXPORT_SYMBOL(__cond_resched_lock); 4599 4600 int __sched __cond_resched_softirq(void) 4601 { 4602 BUG_ON(!in_softirq()); 4603 4604 if (should_resched(SOFTIRQ_DISABLE_OFFSET)) { 4605 local_bh_enable(); 4606 preempt_schedule_common(); 4607 local_bh_disable(); 4608 return 1; 4609 } 4610 return 0; 4611 } 4612 EXPORT_SYMBOL(__cond_resched_softirq); 4613 4614 /** 4615 * yield - yield the current processor to other threads. 4616 * 4617 * Do not ever use this function, there's a 99% chance you're doing it wrong. 4618 * 4619 * The scheduler is at all times free to pick the calling task as the most 4620 * eligible task to run, if removing the yield() call from your code breaks 4621 * it, its already broken. 4622 * 4623 * Typical broken usage is: 4624 * 4625 * while (!event) 4626 * yield(); 4627 * 4628 * where one assumes that yield() will let 'the other' process run that will 4629 * make event true. If the current task is a SCHED_FIFO task that will never 4630 * happen. Never use yield() as a progress guarantee!! 4631 * 4632 * If you want to use yield() to wait for something, use wait_event(). 4633 * If you want to use yield() to be 'nice' for others, use cond_resched(). 4634 * If you still want to use yield(), do not! 4635 */ 4636 void __sched yield(void) 4637 { 4638 set_current_state(TASK_RUNNING); 4639 sys_sched_yield(); 4640 } 4641 EXPORT_SYMBOL(yield); 4642 4643 /** 4644 * yield_to - yield the current processor to another thread in 4645 * your thread group, or accelerate that thread toward the 4646 * processor it's on. 4647 * @p: target task 4648 * @preempt: whether task preemption is allowed or not 4649 * 4650 * It's the caller's job to ensure that the target task struct 4651 * can't go away on us before we can do any checks. 4652 * 4653 * Return: 4654 * true (>0) if we indeed boosted the target task. 4655 * false (0) if we failed to boost the target. 4656 * -ESRCH if there's no task to yield to. 4657 */ 4658 int __sched yield_to(struct task_struct *p, bool preempt) 4659 { 4660 struct task_struct *curr = current; 4661 struct rq *rq, *p_rq; 4662 unsigned long flags; 4663 int yielded = 0; 4664 4665 local_irq_save(flags); 4666 rq = this_rq(); 4667 4668 again: 4669 p_rq = task_rq(p); 4670 /* 4671 * If we're the only runnable task on the rq and target rq also 4672 * has only one task, there's absolutely no point in yielding. 4673 */ 4674 if (rq->nr_running == 1 && p_rq->nr_running == 1) { 4675 yielded = -ESRCH; 4676 goto out_irq; 4677 } 4678 4679 double_rq_lock(rq, p_rq); 4680 if (task_rq(p) != p_rq) { 4681 double_rq_unlock(rq, p_rq); 4682 goto again; 4683 } 4684 4685 if (!curr->sched_class->yield_to_task) 4686 goto out_unlock; 4687 4688 if (curr->sched_class != p->sched_class) 4689 goto out_unlock; 4690 4691 if (task_running(p_rq, p) || p->state) 4692 goto out_unlock; 4693 4694 yielded = curr->sched_class->yield_to_task(rq, p, preempt); 4695 if (yielded) { 4696 schedstat_inc(rq, yld_count); 4697 /* 4698 * Make p's CPU reschedule; pick_next_entity takes care of 4699 * fairness. 4700 */ 4701 if (preempt && rq != p_rq) 4702 resched_curr(p_rq); 4703 } 4704 4705 out_unlock: 4706 double_rq_unlock(rq, p_rq); 4707 out_irq: 4708 local_irq_restore(flags); 4709 4710 if (yielded > 0) 4711 schedule(); 4712 4713 return yielded; 4714 } 4715 EXPORT_SYMBOL_GPL(yield_to); 4716 4717 /* 4718 * This task is about to go to sleep on IO. Increment rq->nr_iowait so 4719 * that process accounting knows that this is a task in IO wait state. 4720 */ 4721 long __sched io_schedule_timeout(long timeout) 4722 { 4723 int old_iowait = current->in_iowait; 4724 struct rq *rq; 4725 long ret; 4726 4727 current->in_iowait = 1; 4728 blk_schedule_flush_plug(current); 4729 4730 delayacct_blkio_start(); 4731 rq = raw_rq(); 4732 atomic_inc(&rq->nr_iowait); 4733 ret = schedule_timeout(timeout); 4734 current->in_iowait = old_iowait; 4735 atomic_dec(&rq->nr_iowait); 4736 delayacct_blkio_end(); 4737 4738 return ret; 4739 } 4740 EXPORT_SYMBOL(io_schedule_timeout); 4741 4742 /** 4743 * sys_sched_get_priority_max - return maximum RT priority. 4744 * @policy: scheduling class. 4745 * 4746 * Return: On success, this syscall returns the maximum 4747 * rt_priority that can be used by a given scheduling class. 4748 * On failure, a negative error code is returned. 4749 */ 4750 SYSCALL_DEFINE1(sched_get_priority_max, int, policy) 4751 { 4752 int ret = -EINVAL; 4753 4754 switch (policy) { 4755 case SCHED_FIFO: 4756 case SCHED_RR: 4757 ret = MAX_USER_RT_PRIO-1; 4758 break; 4759 case SCHED_DEADLINE: 4760 case SCHED_NORMAL: 4761 case SCHED_BATCH: 4762 case SCHED_IDLE: 4763 ret = 0; 4764 break; 4765 } 4766 return ret; 4767 } 4768 4769 /** 4770 * sys_sched_get_priority_min - return minimum RT priority. 4771 * @policy: scheduling class. 4772 * 4773 * Return: On success, this syscall returns the minimum 4774 * rt_priority that can be used by a given scheduling class. 4775 * On failure, a negative error code is returned. 4776 */ 4777 SYSCALL_DEFINE1(sched_get_priority_min, int, policy) 4778 { 4779 int ret = -EINVAL; 4780 4781 switch (policy) { 4782 case SCHED_FIFO: 4783 case SCHED_RR: 4784 ret = 1; 4785 break; 4786 case SCHED_DEADLINE: 4787 case SCHED_NORMAL: 4788 case SCHED_BATCH: 4789 case SCHED_IDLE: 4790 ret = 0; 4791 } 4792 return ret; 4793 } 4794 4795 /** 4796 * sys_sched_rr_get_interval - return the default timeslice of a process. 4797 * @pid: pid of the process. 4798 * @interval: userspace pointer to the timeslice value. 4799 * 4800 * this syscall writes the default timeslice value of a given process 4801 * into the user-space timespec buffer. A value of '0' means infinity. 4802 * 4803 * Return: On success, 0 and the timeslice is in @interval. Otherwise, 4804 * an error code. 4805 */ 4806 SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, 4807 struct timespec __user *, interval) 4808 { 4809 struct task_struct *p; 4810 unsigned int time_slice; 4811 unsigned long flags; 4812 struct rq *rq; 4813 int retval; 4814 struct timespec t; 4815 4816 if (pid < 0) 4817 return -EINVAL; 4818 4819 retval = -ESRCH; 4820 rcu_read_lock(); 4821 p = find_process_by_pid(pid); 4822 if (!p) 4823 goto out_unlock; 4824 4825 retval = security_task_getscheduler(p); 4826 if (retval) 4827 goto out_unlock; 4828 4829 rq = task_rq_lock(p, &flags); 4830 time_slice = 0; 4831 if (p->sched_class->get_rr_interval) 4832 time_slice = p->sched_class->get_rr_interval(rq, p); 4833 task_rq_unlock(rq, p, &flags); 4834 4835 rcu_read_unlock(); 4836 jiffies_to_timespec(time_slice, &t); 4837 retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0; 4838 return retval; 4839 4840 out_unlock: 4841 rcu_read_unlock(); 4842 return retval; 4843 } 4844 4845 static const char stat_nam[] = TASK_STATE_TO_CHAR_STR; 4846 4847 void sched_show_task(struct task_struct *p) 4848 { 4849 unsigned long free = 0; 4850 int ppid; 4851 unsigned long state = p->state; 4852 4853 if (state) 4854 state = __ffs(state) + 1; 4855 printk(KERN_INFO "%-15.15s %c", p->comm, 4856 state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?'); 4857 #if BITS_PER_LONG == 32 4858 if (state == TASK_RUNNING) 4859 printk(KERN_CONT " running "); 4860 else 4861 printk(KERN_CONT " %08lx ", thread_saved_pc(p)); 4862 #else 4863 if (state == TASK_RUNNING) 4864 printk(KERN_CONT " running task "); 4865 else 4866 printk(KERN_CONT " %016lx ", thread_saved_pc(p)); 4867 #endif 4868 #ifdef CONFIG_DEBUG_STACK_USAGE 4869 free = stack_not_used(p); 4870 #endif 4871 ppid = 0; 4872 rcu_read_lock(); 4873 if (pid_alive(p)) 4874 ppid = task_pid_nr(rcu_dereference(p->real_parent)); 4875 rcu_read_unlock(); 4876 printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free, 4877 task_pid_nr(p), ppid, 4878 (unsigned long)task_thread_info(p)->flags); 4879 4880 print_worker_info(KERN_INFO, p); 4881 show_stack(p, NULL); 4882 } 4883 4884 void show_state_filter(unsigned long state_filter) 4885 { 4886 struct task_struct *g, *p; 4887 4888 #if BITS_PER_LONG == 32 4889 printk(KERN_INFO 4890 " task PC stack pid father\n"); 4891 #else 4892 printk(KERN_INFO 4893 " task PC stack pid father\n"); 4894 #endif 4895 rcu_read_lock(); 4896 for_each_process_thread(g, p) { 4897 /* 4898 * reset the NMI-timeout, listing all files on a slow 4899 * console might take a lot of time: 4900 */ 4901 touch_nmi_watchdog(); 4902 if (!state_filter || (p->state & state_filter)) 4903 sched_show_task(p); 4904 } 4905 4906 touch_all_softlockup_watchdogs(); 4907 4908 #ifdef CONFIG_SCHED_DEBUG 4909 sysrq_sched_debug_show(); 4910 #endif 4911 rcu_read_unlock(); 4912 /* 4913 * Only show locks if all tasks are dumped: 4914 */ 4915 if (!state_filter) 4916 debug_show_all_locks(); 4917 } 4918 4919 void init_idle_bootup_task(struct task_struct *idle) 4920 { 4921 idle->sched_class = &idle_sched_class; 4922 } 4923 4924 /** 4925 * init_idle - set up an idle thread for a given CPU 4926 * @idle: task in question 4927 * @cpu: cpu the idle task belongs to 4928 * 4929 * NOTE: this function does not set the idle thread's NEED_RESCHED 4930 * flag, to make booting more robust. 4931 */ 4932 void init_idle(struct task_struct *idle, int cpu) 4933 { 4934 struct rq *rq = cpu_rq(cpu); 4935 unsigned long flags; 4936 4937 raw_spin_lock_irqsave(&idle->pi_lock, flags); 4938 raw_spin_lock(&rq->lock); 4939 4940 __sched_fork(0, idle); 4941 idle->state = TASK_RUNNING; 4942 idle->se.exec_start = sched_clock(); 4943 4944 #ifdef CONFIG_SMP 4945 /* 4946 * Its possible that init_idle() gets called multiple times on a task, 4947 * in that case do_set_cpus_allowed() will not do the right thing. 4948 * 4949 * And since this is boot we can forgo the serialization. 4950 */ 4951 set_cpus_allowed_common(idle, cpumask_of(cpu)); 4952 #endif 4953 /* 4954 * We're having a chicken and egg problem, even though we are 4955 * holding rq->lock, the cpu isn't yet set to this cpu so the 4956 * lockdep check in task_group() will fail. 4957 * 4958 * Similar case to sched_fork(). / Alternatively we could 4959 * use task_rq_lock() here and obtain the other rq->lock. 4960 * 4961 * Silence PROVE_RCU 4962 */ 4963 rcu_read_lock(); 4964 __set_task_cpu(idle, cpu); 4965 rcu_read_unlock(); 4966 4967 rq->curr = rq->idle = idle; 4968 idle->on_rq = TASK_ON_RQ_QUEUED; 4969 #ifdef CONFIG_SMP 4970 idle->on_cpu = 1; 4971 #endif 4972 raw_spin_unlock(&rq->lock); 4973 raw_spin_unlock_irqrestore(&idle->pi_lock, flags); 4974 4975 /* Set the preempt count _outside_ the spinlocks! */ 4976 init_idle_preempt_count(idle, cpu); 4977 4978 /* 4979 * The idle tasks have their own, simple scheduling class: 4980 */ 4981 idle->sched_class = &idle_sched_class; 4982 ftrace_graph_init_idle_task(idle, cpu); 4983 vtime_init_idle(idle, cpu); 4984 #ifdef CONFIG_SMP 4985 sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu); 4986 #endif 4987 } 4988 4989 int cpuset_cpumask_can_shrink(const struct cpumask *cur, 4990 const struct cpumask *trial) 4991 { 4992 int ret = 1, trial_cpus; 4993 struct dl_bw *cur_dl_b; 4994 unsigned long flags; 4995 4996 if (!cpumask_weight(cur)) 4997 return ret; 4998 4999 rcu_read_lock_sched(); 5000 cur_dl_b = dl_bw_of(cpumask_any(cur)); 5001 trial_cpus = cpumask_weight(trial); 5002 5003 raw_spin_lock_irqsave(&cur_dl_b->lock, flags); 5004 if (cur_dl_b->bw != -1 && 5005 cur_dl_b->bw * trial_cpus < cur_dl_b->total_bw) 5006 ret = 0; 5007 raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags); 5008 rcu_read_unlock_sched(); 5009 5010 return ret; 5011 } 5012 5013 int task_can_attach(struct task_struct *p, 5014 const struct cpumask *cs_cpus_allowed) 5015 { 5016 int ret = 0; 5017 5018 /* 5019 * Kthreads which disallow setaffinity shouldn't be moved 5020 * to a new cpuset; we don't want to change their cpu 5021 * affinity and isolating such threads by their set of 5022 * allowed nodes is unnecessary. Thus, cpusets are not 5023 * applicable for such threads. This prevents checking for 5024 * success of set_cpus_allowed_ptr() on all attached tasks 5025 * before cpus_allowed may be changed. 5026 */ 5027 if (p->flags & PF_NO_SETAFFINITY) { 5028 ret = -EINVAL; 5029 goto out; 5030 } 5031 5032 #ifdef CONFIG_SMP 5033 if (dl_task(p) && !cpumask_intersects(task_rq(p)->rd->span, 5034 cs_cpus_allowed)) { 5035 unsigned int dest_cpu = cpumask_any_and(cpu_active_mask, 5036 cs_cpus_allowed); 5037 struct dl_bw *dl_b; 5038 bool overflow; 5039 int cpus; 5040 unsigned long flags; 5041 5042 rcu_read_lock_sched(); 5043 dl_b = dl_bw_of(dest_cpu); 5044 raw_spin_lock_irqsave(&dl_b->lock, flags); 5045 cpus = dl_bw_cpus(dest_cpu); 5046 overflow = __dl_overflow(dl_b, cpus, 0, p->dl.dl_bw); 5047 if (overflow) 5048 ret = -EBUSY; 5049 else { 5050 /* 5051 * We reserve space for this task in the destination 5052 * root_domain, as we can't fail after this point. 5053 * We will free resources in the source root_domain 5054 * later on (see set_cpus_allowed_dl()). 5055 */ 5056 __dl_add(dl_b, p->dl.dl_bw); 5057 } 5058 raw_spin_unlock_irqrestore(&dl_b->lock, flags); 5059 rcu_read_unlock_sched(); 5060 5061 } 5062 #endif 5063 out: 5064 return ret; 5065 } 5066 5067 #ifdef CONFIG_SMP 5068 5069 #ifdef CONFIG_NUMA_BALANCING 5070 /* Migrate current task p to target_cpu */ 5071 int migrate_task_to(struct task_struct *p, int target_cpu) 5072 { 5073 struct migration_arg arg = { p, target_cpu }; 5074 int curr_cpu = task_cpu(p); 5075 5076 if (curr_cpu == target_cpu) 5077 return 0; 5078 5079 if (!cpumask_test_cpu(target_cpu, tsk_cpus_allowed(p))) 5080 return -EINVAL; 5081 5082 /* TODO: This is not properly updating schedstats */ 5083 5084 trace_sched_move_numa(p, curr_cpu, target_cpu); 5085 return stop_one_cpu(curr_cpu, migration_cpu_stop, &arg); 5086 } 5087 5088 /* 5089 * Requeue a task on a given node and accurately track the number of NUMA 5090 * tasks on the runqueues 5091 */ 5092 void sched_setnuma(struct task_struct *p, int nid) 5093 { 5094 struct rq *rq; 5095 unsigned long flags; 5096 bool queued, running; 5097 5098 rq = task_rq_lock(p, &flags); 5099 queued = task_on_rq_queued(p); 5100 running = task_current(rq, p); 5101 5102 if (queued) 5103 dequeue_task(rq, p, 0); 5104 if (running) 5105 put_prev_task(rq, p); 5106 5107 p->numa_preferred_nid = nid; 5108 5109 if (running) 5110 p->sched_class->set_curr_task(rq); 5111 if (queued) 5112 enqueue_task(rq, p, 0); 5113 task_rq_unlock(rq, p, &flags); 5114 } 5115 #endif /* CONFIG_NUMA_BALANCING */ 5116 5117 #ifdef CONFIG_HOTPLUG_CPU 5118 /* 5119 * Ensures that the idle task is using init_mm right before its cpu goes 5120 * offline. 5121 */ 5122 void idle_task_exit(void) 5123 { 5124 struct mm_struct *mm = current->active_mm; 5125 5126 BUG_ON(cpu_online(smp_processor_id())); 5127 5128 if (mm != &init_mm) { 5129 switch_mm(mm, &init_mm, current); 5130 finish_arch_post_lock_switch(); 5131 } 5132 mmdrop(mm); 5133 } 5134 5135 /* 5136 * Since this CPU is going 'away' for a while, fold any nr_active delta 5137 * we might have. Assumes we're called after migrate_tasks() so that the 5138 * nr_active count is stable. 5139 * 5140 * Also see the comment "Global load-average calculations". 5141 */ 5142 static void calc_load_migrate(struct rq *rq) 5143 { 5144 long delta = calc_load_fold_active(rq); 5145 if (delta) 5146 atomic_long_add(delta, &calc_load_tasks); 5147 } 5148 5149 static void put_prev_task_fake(struct rq *rq, struct task_struct *prev) 5150 { 5151 } 5152 5153 static const struct sched_class fake_sched_class = { 5154 .put_prev_task = put_prev_task_fake, 5155 }; 5156 5157 static struct task_struct fake_task = { 5158 /* 5159 * Avoid pull_{rt,dl}_task() 5160 */ 5161 .prio = MAX_PRIO + 1, 5162 .sched_class = &fake_sched_class, 5163 }; 5164 5165 /* 5166 * Migrate all tasks from the rq, sleeping tasks will be migrated by 5167 * try_to_wake_up()->select_task_rq(). 5168 * 5169 * Called with rq->lock held even though we'er in stop_machine() and 5170 * there's no concurrency possible, we hold the required locks anyway 5171 * because of lock validation efforts. 5172 */ 5173 static void migrate_tasks(struct rq *dead_rq) 5174 { 5175 struct rq *rq = dead_rq; 5176 struct task_struct *next, *stop = rq->stop; 5177 int dest_cpu; 5178 5179 /* 5180 * Fudge the rq selection such that the below task selection loop 5181 * doesn't get stuck on the currently eligible stop task. 5182 * 5183 * We're currently inside stop_machine() and the rq is either stuck 5184 * in the stop_machine_cpu_stop() loop, or we're executing this code, 5185 * either way we should never end up calling schedule() until we're 5186 * done here. 5187 */ 5188 rq->stop = NULL; 5189 5190 /* 5191 * put_prev_task() and pick_next_task() sched 5192 * class method both need to have an up-to-date 5193 * value of rq->clock[_task] 5194 */ 5195 update_rq_clock(rq); 5196 5197 for (;;) { 5198 /* 5199 * There's this thread running, bail when that's the only 5200 * remaining thread. 5201 */ 5202 if (rq->nr_running == 1) 5203 break; 5204 5205 /* 5206 * pick_next_task assumes pinned rq->lock. 5207 */ 5208 lockdep_pin_lock(&rq->lock); 5209 next = pick_next_task(rq, &fake_task); 5210 BUG_ON(!next); 5211 next->sched_class->put_prev_task(rq, next); 5212 5213 /* 5214 * Rules for changing task_struct::cpus_allowed are holding 5215 * both pi_lock and rq->lock, such that holding either 5216 * stabilizes the mask. 5217 * 5218 * Drop rq->lock is not quite as disastrous as it usually is 5219 * because !cpu_active at this point, which means load-balance 5220 * will not interfere. Also, stop-machine. 5221 */ 5222 lockdep_unpin_lock(&rq->lock); 5223 raw_spin_unlock(&rq->lock); 5224 raw_spin_lock(&next->pi_lock); 5225 raw_spin_lock(&rq->lock); 5226 5227 /* 5228 * Since we're inside stop-machine, _nothing_ should have 5229 * changed the task, WARN if weird stuff happened, because in 5230 * that case the above rq->lock drop is a fail too. 5231 */ 5232 if (WARN_ON(task_rq(next) != rq || !task_on_rq_queued(next))) { 5233 raw_spin_unlock(&next->pi_lock); 5234 continue; 5235 } 5236 5237 /* Find suitable destination for @next, with force if needed. */ 5238 dest_cpu = select_fallback_rq(dead_rq->cpu, next); 5239 5240 rq = __migrate_task(rq, next, dest_cpu); 5241 if (rq != dead_rq) { 5242 raw_spin_unlock(&rq->lock); 5243 rq = dead_rq; 5244 raw_spin_lock(&rq->lock); 5245 } 5246 raw_spin_unlock(&next->pi_lock); 5247 } 5248 5249 rq->stop = stop; 5250 } 5251 #endif /* CONFIG_HOTPLUG_CPU */ 5252 5253 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL) 5254 5255 static struct ctl_table sd_ctl_dir[] = { 5256 { 5257 .procname = "sched_domain", 5258 .mode = 0555, 5259 }, 5260 {} 5261 }; 5262 5263 static struct ctl_table sd_ctl_root[] = { 5264 { 5265 .procname = "kernel", 5266 .mode = 0555, 5267 .child = sd_ctl_dir, 5268 }, 5269 {} 5270 }; 5271 5272 static struct ctl_table *sd_alloc_ctl_entry(int n) 5273 { 5274 struct ctl_table *entry = 5275 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL); 5276 5277 return entry; 5278 } 5279 5280 static void sd_free_ctl_entry(struct ctl_table **tablep) 5281 { 5282 struct ctl_table *entry; 5283 5284 /* 5285 * In the intermediate directories, both the child directory and 5286 * procname are dynamically allocated and could fail but the mode 5287 * will always be set. In the lowest directory the names are 5288 * static strings and all have proc handlers. 5289 */ 5290 for (entry = *tablep; entry->mode; entry++) { 5291 if (entry->child) 5292 sd_free_ctl_entry(&entry->child); 5293 if (entry->proc_handler == NULL) 5294 kfree(entry->procname); 5295 } 5296 5297 kfree(*tablep); 5298 *tablep = NULL; 5299 } 5300 5301 static int min_load_idx = 0; 5302 static int max_load_idx = CPU_LOAD_IDX_MAX-1; 5303 5304 static void 5305 set_table_entry(struct ctl_table *entry, 5306 const char *procname, void *data, int maxlen, 5307 umode_t mode, proc_handler *proc_handler, 5308 bool load_idx) 5309 { 5310 entry->procname = procname; 5311 entry->data = data; 5312 entry->maxlen = maxlen; 5313 entry->mode = mode; 5314 entry->proc_handler = proc_handler; 5315 5316 if (load_idx) { 5317 entry->extra1 = &min_load_idx; 5318 entry->extra2 = &max_load_idx; 5319 } 5320 } 5321 5322 static struct ctl_table * 5323 sd_alloc_ctl_domain_table(struct sched_domain *sd) 5324 { 5325 struct ctl_table *table = sd_alloc_ctl_entry(14); 5326 5327 if (table == NULL) 5328 return NULL; 5329 5330 set_table_entry(&table[0], "min_interval", &sd->min_interval, 5331 sizeof(long), 0644, proc_doulongvec_minmax, false); 5332 set_table_entry(&table[1], "max_interval", &sd->max_interval, 5333 sizeof(long), 0644, proc_doulongvec_minmax, false); 5334 set_table_entry(&table[2], "busy_idx", &sd->busy_idx, 5335 sizeof(int), 0644, proc_dointvec_minmax, true); 5336 set_table_entry(&table[3], "idle_idx", &sd->idle_idx, 5337 sizeof(int), 0644, proc_dointvec_minmax, true); 5338 set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx, 5339 sizeof(int), 0644, proc_dointvec_minmax, true); 5340 set_table_entry(&table[5], "wake_idx", &sd->wake_idx, 5341 sizeof(int), 0644, proc_dointvec_minmax, true); 5342 set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx, 5343 sizeof(int), 0644, proc_dointvec_minmax, true); 5344 set_table_entry(&table[7], "busy_factor", &sd->busy_factor, 5345 sizeof(int), 0644, proc_dointvec_minmax, false); 5346 set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct, 5347 sizeof(int), 0644, proc_dointvec_minmax, false); 5348 set_table_entry(&table[9], "cache_nice_tries", 5349 &sd->cache_nice_tries, 5350 sizeof(int), 0644, proc_dointvec_minmax, false); 5351 set_table_entry(&table[10], "flags", &sd->flags, 5352 sizeof(int), 0644, proc_dointvec_minmax, false); 5353 set_table_entry(&table[11], "max_newidle_lb_cost", 5354 &sd->max_newidle_lb_cost, 5355 sizeof(long), 0644, proc_doulongvec_minmax, false); 5356 set_table_entry(&table[12], "name", sd->name, 5357 CORENAME_MAX_SIZE, 0444, proc_dostring, false); 5358 /* &table[13] is terminator */ 5359 5360 return table; 5361 } 5362 5363 static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu) 5364 { 5365 struct ctl_table *entry, *table; 5366 struct sched_domain *sd; 5367 int domain_num = 0, i; 5368 char buf[32]; 5369 5370 for_each_domain(cpu, sd) 5371 domain_num++; 5372 entry = table = sd_alloc_ctl_entry(domain_num + 1); 5373 if (table == NULL) 5374 return NULL; 5375 5376 i = 0; 5377 for_each_domain(cpu, sd) { 5378 snprintf(buf, 32, "domain%d", i); 5379 entry->procname = kstrdup(buf, GFP_KERNEL); 5380 entry->mode = 0555; 5381 entry->child = sd_alloc_ctl_domain_table(sd); 5382 entry++; 5383 i++; 5384 } 5385 return table; 5386 } 5387 5388 static struct ctl_table_header *sd_sysctl_header; 5389 static void register_sched_domain_sysctl(void) 5390 { 5391 int i, cpu_num = num_possible_cpus(); 5392 struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1); 5393 char buf[32]; 5394 5395 WARN_ON(sd_ctl_dir[0].child); 5396 sd_ctl_dir[0].child = entry; 5397 5398 if (entry == NULL) 5399 return; 5400 5401 for_each_possible_cpu(i) { 5402 snprintf(buf, 32, "cpu%d", i); 5403 entry->procname = kstrdup(buf, GFP_KERNEL); 5404 entry->mode = 0555; 5405 entry->child = sd_alloc_ctl_cpu_table(i); 5406 entry++; 5407 } 5408 5409 WARN_ON(sd_sysctl_header); 5410 sd_sysctl_header = register_sysctl_table(sd_ctl_root); 5411 } 5412 5413 /* may be called multiple times per register */ 5414 static void unregister_sched_domain_sysctl(void) 5415 { 5416 unregister_sysctl_table(sd_sysctl_header); 5417 sd_sysctl_header = NULL; 5418 if (sd_ctl_dir[0].child) 5419 sd_free_ctl_entry(&sd_ctl_dir[0].child); 5420 } 5421 #else 5422 static void register_sched_domain_sysctl(void) 5423 { 5424 } 5425 static void unregister_sched_domain_sysctl(void) 5426 { 5427 } 5428 #endif /* CONFIG_SCHED_DEBUG && CONFIG_SYSCTL */ 5429 5430 static void set_rq_online(struct rq *rq) 5431 { 5432 if (!rq->online) { 5433 const struct sched_class *class; 5434 5435 cpumask_set_cpu(rq->cpu, rq->rd->online); 5436 rq->online = 1; 5437 5438 for_each_class(class) { 5439 if (class->rq_online) 5440 class->rq_online(rq); 5441 } 5442 } 5443 } 5444 5445 static void set_rq_offline(struct rq *rq) 5446 { 5447 if (rq->online) { 5448 const struct sched_class *class; 5449 5450 for_each_class(class) { 5451 if (class->rq_offline) 5452 class->rq_offline(rq); 5453 } 5454 5455 cpumask_clear_cpu(rq->cpu, rq->rd->online); 5456 rq->online = 0; 5457 } 5458 } 5459 5460 /* 5461 * migration_call - callback that gets triggered when a CPU is added. 5462 * Here we can start up the necessary migration thread for the new CPU. 5463 */ 5464 static int 5465 migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) 5466 { 5467 int cpu = (long)hcpu; 5468 unsigned long flags; 5469 struct rq *rq = cpu_rq(cpu); 5470 5471 switch (action & ~CPU_TASKS_FROZEN) { 5472 5473 case CPU_UP_PREPARE: 5474 rq->calc_load_update = calc_load_update; 5475 break; 5476 5477 case CPU_ONLINE: 5478 /* Update our root-domain */ 5479 raw_spin_lock_irqsave(&rq->lock, flags); 5480 if (rq->rd) { 5481 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 5482 5483 set_rq_online(rq); 5484 } 5485 raw_spin_unlock_irqrestore(&rq->lock, flags); 5486 break; 5487 5488 #ifdef CONFIG_HOTPLUG_CPU 5489 case CPU_DYING: 5490 sched_ttwu_pending(); 5491 /* Update our root-domain */ 5492 raw_spin_lock_irqsave(&rq->lock, flags); 5493 if (rq->rd) { 5494 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 5495 set_rq_offline(rq); 5496 } 5497 migrate_tasks(rq); 5498 BUG_ON(rq->nr_running != 1); /* the migration thread */ 5499 raw_spin_unlock_irqrestore(&rq->lock, flags); 5500 break; 5501 5502 case CPU_DEAD: 5503 calc_load_migrate(rq); 5504 break; 5505 #endif 5506 } 5507 5508 update_max_interval(); 5509 5510 return NOTIFY_OK; 5511 } 5512 5513 /* 5514 * Register at high priority so that task migration (migrate_all_tasks) 5515 * happens before everything else. This has to be lower priority than 5516 * the notifier in the perf_event subsystem, though. 5517 */ 5518 static struct notifier_block migration_notifier = { 5519 .notifier_call = migration_call, 5520 .priority = CPU_PRI_MIGRATION, 5521 }; 5522 5523 static void set_cpu_rq_start_time(void) 5524 { 5525 int cpu = smp_processor_id(); 5526 struct rq *rq = cpu_rq(cpu); 5527 rq->age_stamp = sched_clock_cpu(cpu); 5528 } 5529 5530 static int sched_cpu_active(struct notifier_block *nfb, 5531 unsigned long action, void *hcpu) 5532 { 5533 switch (action & ~CPU_TASKS_FROZEN) { 5534 case CPU_STARTING: 5535 set_cpu_rq_start_time(); 5536 return NOTIFY_OK; 5537 case CPU_ONLINE: 5538 /* 5539 * At this point a starting CPU has marked itself as online via 5540 * set_cpu_online(). But it might not yet have marked itself 5541 * as active, which is essential from here on. 5542 * 5543 * Thus, fall-through and help the starting CPU along. 5544 */ 5545 case CPU_DOWN_FAILED: 5546 set_cpu_active((long)hcpu, true); 5547 return NOTIFY_OK; 5548 default: 5549 return NOTIFY_DONE; 5550 } 5551 } 5552 5553 static int sched_cpu_inactive(struct notifier_block *nfb, 5554 unsigned long action, void *hcpu) 5555 { 5556 switch (action & ~CPU_TASKS_FROZEN) { 5557 case CPU_DOWN_PREPARE: 5558 set_cpu_active((long)hcpu, false); 5559 return NOTIFY_OK; 5560 default: 5561 return NOTIFY_DONE; 5562 } 5563 } 5564 5565 static int __init migration_init(void) 5566 { 5567 void *cpu = (void *)(long)smp_processor_id(); 5568 int err; 5569 5570 /* Initialize migration for the boot CPU */ 5571 err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu); 5572 BUG_ON(err == NOTIFY_BAD); 5573 migration_call(&migration_notifier, CPU_ONLINE, cpu); 5574 register_cpu_notifier(&migration_notifier); 5575 5576 /* Register cpu active notifiers */ 5577 cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE); 5578 cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE); 5579 5580 return 0; 5581 } 5582 early_initcall(migration_init); 5583 5584 static cpumask_var_t sched_domains_tmpmask; /* sched_domains_mutex */ 5585 5586 #ifdef CONFIG_SCHED_DEBUG 5587 5588 static __read_mostly int sched_debug_enabled; 5589 5590 static int __init sched_debug_setup(char *str) 5591 { 5592 sched_debug_enabled = 1; 5593 5594 return 0; 5595 } 5596 early_param("sched_debug", sched_debug_setup); 5597 5598 static inline bool sched_debug(void) 5599 { 5600 return sched_debug_enabled; 5601 } 5602 5603 static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, 5604 struct cpumask *groupmask) 5605 { 5606 struct sched_group *group = sd->groups; 5607 5608 cpumask_clear(groupmask); 5609 5610 printk(KERN_DEBUG "%*s domain %d: ", level, "", level); 5611 5612 if (!(sd->flags & SD_LOAD_BALANCE)) { 5613 printk("does not load-balance\n"); 5614 if (sd->parent) 5615 printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain" 5616 " has parent"); 5617 return -1; 5618 } 5619 5620 printk(KERN_CONT "span %*pbl level %s\n", 5621 cpumask_pr_args(sched_domain_span(sd)), sd->name); 5622 5623 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) { 5624 printk(KERN_ERR "ERROR: domain->span does not contain " 5625 "CPU%d\n", cpu); 5626 } 5627 if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) { 5628 printk(KERN_ERR "ERROR: domain->groups does not contain" 5629 " CPU%d\n", cpu); 5630 } 5631 5632 printk(KERN_DEBUG "%*s groups:", level + 1, ""); 5633 do { 5634 if (!group) { 5635 printk("\n"); 5636 printk(KERN_ERR "ERROR: group is NULL\n"); 5637 break; 5638 } 5639 5640 if (!cpumask_weight(sched_group_cpus(group))) { 5641 printk(KERN_CONT "\n"); 5642 printk(KERN_ERR "ERROR: empty group\n"); 5643 break; 5644 } 5645 5646 if (!(sd->flags & SD_OVERLAP) && 5647 cpumask_intersects(groupmask, sched_group_cpus(group))) { 5648 printk(KERN_CONT "\n"); 5649 printk(KERN_ERR "ERROR: repeated CPUs\n"); 5650 break; 5651 } 5652 5653 cpumask_or(groupmask, groupmask, sched_group_cpus(group)); 5654 5655 printk(KERN_CONT " %*pbl", 5656 cpumask_pr_args(sched_group_cpus(group))); 5657 if (group->sgc->capacity != SCHED_CAPACITY_SCALE) { 5658 printk(KERN_CONT " (cpu_capacity = %d)", 5659 group->sgc->capacity); 5660 } 5661 5662 group = group->next; 5663 } while (group != sd->groups); 5664 printk(KERN_CONT "\n"); 5665 5666 if (!cpumask_equal(sched_domain_span(sd), groupmask)) 5667 printk(KERN_ERR "ERROR: groups don't span domain->span\n"); 5668 5669 if (sd->parent && 5670 !cpumask_subset(groupmask, sched_domain_span(sd->parent))) 5671 printk(KERN_ERR "ERROR: parent span is not a superset " 5672 "of domain->span\n"); 5673 return 0; 5674 } 5675 5676 static void sched_domain_debug(struct sched_domain *sd, int cpu) 5677 { 5678 int level = 0; 5679 5680 if (!sched_debug_enabled) 5681 return; 5682 5683 if (!sd) { 5684 printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu); 5685 return; 5686 } 5687 5688 printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu); 5689 5690 for (;;) { 5691 if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask)) 5692 break; 5693 level++; 5694 sd = sd->parent; 5695 if (!sd) 5696 break; 5697 } 5698 } 5699 #else /* !CONFIG_SCHED_DEBUG */ 5700 # define sched_domain_debug(sd, cpu) do { } while (0) 5701 static inline bool sched_debug(void) 5702 { 5703 return false; 5704 } 5705 #endif /* CONFIG_SCHED_DEBUG */ 5706 5707 static int sd_degenerate(struct sched_domain *sd) 5708 { 5709 if (cpumask_weight(sched_domain_span(sd)) == 1) 5710 return 1; 5711 5712 /* Following flags need at least 2 groups */ 5713 if (sd->flags & (SD_LOAD_BALANCE | 5714 SD_BALANCE_NEWIDLE | 5715 SD_BALANCE_FORK | 5716 SD_BALANCE_EXEC | 5717 SD_SHARE_CPUCAPACITY | 5718 SD_SHARE_PKG_RESOURCES | 5719 SD_SHARE_POWERDOMAIN)) { 5720 if (sd->groups != sd->groups->next) 5721 return 0; 5722 } 5723 5724 /* Following flags don't use groups */ 5725 if (sd->flags & (SD_WAKE_AFFINE)) 5726 return 0; 5727 5728 return 1; 5729 } 5730 5731 static int 5732 sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) 5733 { 5734 unsigned long cflags = sd->flags, pflags = parent->flags; 5735 5736 if (sd_degenerate(parent)) 5737 return 1; 5738 5739 if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent))) 5740 return 0; 5741 5742 /* Flags needing groups don't count if only 1 group in parent */ 5743 if (parent->groups == parent->groups->next) { 5744 pflags &= ~(SD_LOAD_BALANCE | 5745 SD_BALANCE_NEWIDLE | 5746 SD_BALANCE_FORK | 5747 SD_BALANCE_EXEC | 5748 SD_SHARE_CPUCAPACITY | 5749 SD_SHARE_PKG_RESOURCES | 5750 SD_PREFER_SIBLING | 5751 SD_SHARE_POWERDOMAIN); 5752 if (nr_node_ids == 1) 5753 pflags &= ~SD_SERIALIZE; 5754 } 5755 if (~cflags & pflags) 5756 return 0; 5757 5758 return 1; 5759 } 5760 5761 static void free_rootdomain(struct rcu_head *rcu) 5762 { 5763 struct root_domain *rd = container_of(rcu, struct root_domain, rcu); 5764 5765 cpupri_cleanup(&rd->cpupri); 5766 cpudl_cleanup(&rd->cpudl); 5767 free_cpumask_var(rd->dlo_mask); 5768 free_cpumask_var(rd->rto_mask); 5769 free_cpumask_var(rd->online); 5770 free_cpumask_var(rd->span); 5771 kfree(rd); 5772 } 5773 5774 static void rq_attach_root(struct rq *rq, struct root_domain *rd) 5775 { 5776 struct root_domain *old_rd = NULL; 5777 unsigned long flags; 5778 5779 raw_spin_lock_irqsave(&rq->lock, flags); 5780 5781 if (rq->rd) { 5782 old_rd = rq->rd; 5783 5784 if (cpumask_test_cpu(rq->cpu, old_rd->online)) 5785 set_rq_offline(rq); 5786 5787 cpumask_clear_cpu(rq->cpu, old_rd->span); 5788 5789 /* 5790 * If we dont want to free the old_rd yet then 5791 * set old_rd to NULL to skip the freeing later 5792 * in this function: 5793 */ 5794 if (!atomic_dec_and_test(&old_rd->refcount)) 5795 old_rd = NULL; 5796 } 5797 5798 atomic_inc(&rd->refcount); 5799 rq->rd = rd; 5800 5801 cpumask_set_cpu(rq->cpu, rd->span); 5802 if (cpumask_test_cpu(rq->cpu, cpu_active_mask)) 5803 set_rq_online(rq); 5804 5805 raw_spin_unlock_irqrestore(&rq->lock, flags); 5806 5807 if (old_rd) 5808 call_rcu_sched(&old_rd->rcu, free_rootdomain); 5809 } 5810 5811 static int init_rootdomain(struct root_domain *rd) 5812 { 5813 memset(rd, 0, sizeof(*rd)); 5814 5815 if (!alloc_cpumask_var(&rd->span, GFP_KERNEL)) 5816 goto out; 5817 if (!alloc_cpumask_var(&rd->online, GFP_KERNEL)) 5818 goto free_span; 5819 if (!alloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL)) 5820 goto free_online; 5821 if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) 5822 goto free_dlo_mask; 5823 5824 init_dl_bw(&rd->dl_bw); 5825 if (cpudl_init(&rd->cpudl) != 0) 5826 goto free_dlo_mask; 5827 5828 if (cpupri_init(&rd->cpupri) != 0) 5829 goto free_rto_mask; 5830 return 0; 5831 5832 free_rto_mask: 5833 free_cpumask_var(rd->rto_mask); 5834 free_dlo_mask: 5835 free_cpumask_var(rd->dlo_mask); 5836 free_online: 5837 free_cpumask_var(rd->online); 5838 free_span: 5839 free_cpumask_var(rd->span); 5840 out: 5841 return -ENOMEM; 5842 } 5843 5844 /* 5845 * By default the system creates a single root-domain with all cpus as 5846 * members (mimicking the global state we have today). 5847 */ 5848 struct root_domain def_root_domain; 5849 5850 static void init_defrootdomain(void) 5851 { 5852 init_rootdomain(&def_root_domain); 5853 5854 atomic_set(&def_root_domain.refcount, 1); 5855 } 5856 5857 static struct root_domain *alloc_rootdomain(void) 5858 { 5859 struct root_domain *rd; 5860 5861 rd = kmalloc(sizeof(*rd), GFP_KERNEL); 5862 if (!rd) 5863 return NULL; 5864 5865 if (init_rootdomain(rd) != 0) { 5866 kfree(rd); 5867 return NULL; 5868 } 5869 5870 return rd; 5871 } 5872 5873 static void free_sched_groups(struct sched_group *sg, int free_sgc) 5874 { 5875 struct sched_group *tmp, *first; 5876 5877 if (!sg) 5878 return; 5879 5880 first = sg; 5881 do { 5882 tmp = sg->next; 5883 5884 if (free_sgc && atomic_dec_and_test(&sg->sgc->ref)) 5885 kfree(sg->sgc); 5886 5887 kfree(sg); 5888 sg = tmp; 5889 } while (sg != first); 5890 } 5891 5892 static void free_sched_domain(struct rcu_head *rcu) 5893 { 5894 struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu); 5895 5896 /* 5897 * If its an overlapping domain it has private groups, iterate and 5898 * nuke them all. 5899 */ 5900 if (sd->flags & SD_OVERLAP) { 5901 free_sched_groups(sd->groups, 1); 5902 } else if (atomic_dec_and_test(&sd->groups->ref)) { 5903 kfree(sd->groups->sgc); 5904 kfree(sd->groups); 5905 } 5906 kfree(sd); 5907 } 5908 5909 static void destroy_sched_domain(struct sched_domain *sd, int cpu) 5910 { 5911 call_rcu(&sd->rcu, free_sched_domain); 5912 } 5913 5914 static void destroy_sched_domains(struct sched_domain *sd, int cpu) 5915 { 5916 for (; sd; sd = sd->parent) 5917 destroy_sched_domain(sd, cpu); 5918 } 5919 5920 /* 5921 * Keep a special pointer to the highest sched_domain that has 5922 * SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this 5923 * allows us to avoid some pointer chasing select_idle_sibling(). 5924 * 5925 * Also keep a unique ID per domain (we use the first cpu number in 5926 * the cpumask of the domain), this allows us to quickly tell if 5927 * two cpus are in the same cache domain, see cpus_share_cache(). 5928 */ 5929 DEFINE_PER_CPU(struct sched_domain *, sd_llc); 5930 DEFINE_PER_CPU(int, sd_llc_size); 5931 DEFINE_PER_CPU(int, sd_llc_id); 5932 DEFINE_PER_CPU(struct sched_domain *, sd_numa); 5933 DEFINE_PER_CPU(struct sched_domain *, sd_busy); 5934 DEFINE_PER_CPU(struct sched_domain *, sd_asym); 5935 5936 static void update_top_cache_domain(int cpu) 5937 { 5938 struct sched_domain *sd; 5939 struct sched_domain *busy_sd = NULL; 5940 int id = cpu; 5941 int size = 1; 5942 5943 sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES); 5944 if (sd) { 5945 id = cpumask_first(sched_domain_span(sd)); 5946 size = cpumask_weight(sched_domain_span(sd)); 5947 busy_sd = sd->parent; /* sd_busy */ 5948 } 5949 rcu_assign_pointer(per_cpu(sd_busy, cpu), busy_sd); 5950 5951 rcu_assign_pointer(per_cpu(sd_llc, cpu), sd); 5952 per_cpu(sd_llc_size, cpu) = size; 5953 per_cpu(sd_llc_id, cpu) = id; 5954 5955 sd = lowest_flag_domain(cpu, SD_NUMA); 5956 rcu_assign_pointer(per_cpu(sd_numa, cpu), sd); 5957 5958 sd = highest_flag_domain(cpu, SD_ASYM_PACKING); 5959 rcu_assign_pointer(per_cpu(sd_asym, cpu), sd); 5960 } 5961 5962 /* 5963 * Attach the domain 'sd' to 'cpu' as its base domain. Callers must 5964 * hold the hotplug lock. 5965 */ 5966 static void 5967 cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) 5968 { 5969 struct rq *rq = cpu_rq(cpu); 5970 struct sched_domain *tmp; 5971 5972 /* Remove the sched domains which do not contribute to scheduling. */ 5973 for (tmp = sd; tmp; ) { 5974 struct sched_domain *parent = tmp->parent; 5975 if (!parent) 5976 break; 5977 5978 if (sd_parent_degenerate(tmp, parent)) { 5979 tmp->parent = parent->parent; 5980 if (parent->parent) 5981 parent->parent->child = tmp; 5982 /* 5983 * Transfer SD_PREFER_SIBLING down in case of a 5984 * degenerate parent; the spans match for this 5985 * so the property transfers. 5986 */ 5987 if (parent->flags & SD_PREFER_SIBLING) 5988 tmp->flags |= SD_PREFER_SIBLING; 5989 destroy_sched_domain(parent, cpu); 5990 } else 5991 tmp = tmp->parent; 5992 } 5993 5994 if (sd && sd_degenerate(sd)) { 5995 tmp = sd; 5996 sd = sd->parent; 5997 destroy_sched_domain(tmp, cpu); 5998 if (sd) 5999 sd->child = NULL; 6000 } 6001 6002 sched_domain_debug(sd, cpu); 6003 6004 rq_attach_root(rq, rd); 6005 tmp = rq->sd; 6006 rcu_assign_pointer(rq->sd, sd); 6007 destroy_sched_domains(tmp, cpu); 6008 6009 update_top_cache_domain(cpu); 6010 } 6011 6012 /* Setup the mask of cpus configured for isolated domains */ 6013 static int __init isolated_cpu_setup(char *str) 6014 { 6015 alloc_bootmem_cpumask_var(&cpu_isolated_map); 6016 cpulist_parse(str, cpu_isolated_map); 6017 return 1; 6018 } 6019 6020 __setup("isolcpus=", isolated_cpu_setup); 6021 6022 struct s_data { 6023 struct sched_domain ** __percpu sd; 6024 struct root_domain *rd; 6025 }; 6026 6027 enum s_alloc { 6028 sa_rootdomain, 6029 sa_sd, 6030 sa_sd_storage, 6031 sa_none, 6032 }; 6033 6034 /* 6035 * Build an iteration mask that can exclude certain CPUs from the upwards 6036 * domain traversal. 6037 * 6038 * Asymmetric node setups can result in situations where the domain tree is of 6039 * unequal depth, make sure to skip domains that already cover the entire 6040 * range. 6041 * 6042 * In that case build_sched_domains() will have terminated the iteration early 6043 * and our sibling sd spans will be empty. Domains should always include the 6044 * cpu they're built on, so check that. 6045 * 6046 */ 6047 static void build_group_mask(struct sched_domain *sd, struct sched_group *sg) 6048 { 6049 const struct cpumask *span = sched_domain_span(sd); 6050 struct sd_data *sdd = sd->private; 6051 struct sched_domain *sibling; 6052 int i; 6053 6054 for_each_cpu(i, span) { 6055 sibling = *per_cpu_ptr(sdd->sd, i); 6056 if (!cpumask_test_cpu(i, sched_domain_span(sibling))) 6057 continue; 6058 6059 cpumask_set_cpu(i, sched_group_mask(sg)); 6060 } 6061 } 6062 6063 /* 6064 * Return the canonical balance cpu for this group, this is the first cpu 6065 * of this group that's also in the iteration mask. 6066 */ 6067 int group_balance_cpu(struct sched_group *sg) 6068 { 6069 return cpumask_first_and(sched_group_cpus(sg), sched_group_mask(sg)); 6070 } 6071 6072 static int 6073 build_overlap_sched_groups(struct sched_domain *sd, int cpu) 6074 { 6075 struct sched_group *first = NULL, *last = NULL, *groups = NULL, *sg; 6076 const struct cpumask *span = sched_domain_span(sd); 6077 struct cpumask *covered = sched_domains_tmpmask; 6078 struct sd_data *sdd = sd->private; 6079 struct sched_domain *sibling; 6080 int i; 6081 6082 cpumask_clear(covered); 6083 6084 for_each_cpu(i, span) { 6085 struct cpumask *sg_span; 6086 6087 if (cpumask_test_cpu(i, covered)) 6088 continue; 6089 6090 sibling = *per_cpu_ptr(sdd->sd, i); 6091 6092 /* See the comment near build_group_mask(). */ 6093 if (!cpumask_test_cpu(i, sched_domain_span(sibling))) 6094 continue; 6095 6096 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), 6097 GFP_KERNEL, cpu_to_node(cpu)); 6098 6099 if (!sg) 6100 goto fail; 6101 6102 sg_span = sched_group_cpus(sg); 6103 if (sibling->child) 6104 cpumask_copy(sg_span, sched_domain_span(sibling->child)); 6105 else 6106 cpumask_set_cpu(i, sg_span); 6107 6108 cpumask_or(covered, covered, sg_span); 6109 6110 sg->sgc = *per_cpu_ptr(sdd->sgc, i); 6111 if (atomic_inc_return(&sg->sgc->ref) == 1) 6112 build_group_mask(sd, sg); 6113 6114 /* 6115 * Initialize sgc->capacity such that even if we mess up the 6116 * domains and no possible iteration will get us here, we won't 6117 * die on a /0 trap. 6118 */ 6119 sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span); 6120 6121 /* 6122 * Make sure the first group of this domain contains the 6123 * canonical balance cpu. Otherwise the sched_domain iteration 6124 * breaks. See update_sg_lb_stats(). 6125 */ 6126 if ((!groups && cpumask_test_cpu(cpu, sg_span)) || 6127 group_balance_cpu(sg) == cpu) 6128 groups = sg; 6129 6130 if (!first) 6131 first = sg; 6132 if (last) 6133 last->next = sg; 6134 last = sg; 6135 last->next = first; 6136 } 6137 sd->groups = groups; 6138 6139 return 0; 6140 6141 fail: 6142 free_sched_groups(first, 0); 6143 6144 return -ENOMEM; 6145 } 6146 6147 static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg) 6148 { 6149 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); 6150 struct sched_domain *child = sd->child; 6151 6152 if (child) 6153 cpu = cpumask_first(sched_domain_span(child)); 6154 6155 if (sg) { 6156 *sg = *per_cpu_ptr(sdd->sg, cpu); 6157 (*sg)->sgc = *per_cpu_ptr(sdd->sgc, cpu); 6158 atomic_set(&(*sg)->sgc->ref, 1); /* for claim_allocations */ 6159 } 6160 6161 return cpu; 6162 } 6163 6164 /* 6165 * build_sched_groups will build a circular linked list of the groups 6166 * covered by the given span, and will set each group's ->cpumask correctly, 6167 * and ->cpu_capacity to 0. 6168 * 6169 * Assumes the sched_domain tree is fully constructed 6170 */ 6171 static int 6172 build_sched_groups(struct sched_domain *sd, int cpu) 6173 { 6174 struct sched_group *first = NULL, *last = NULL; 6175 struct sd_data *sdd = sd->private; 6176 const struct cpumask *span = sched_domain_span(sd); 6177 struct cpumask *covered; 6178 int i; 6179 6180 get_group(cpu, sdd, &sd->groups); 6181 atomic_inc(&sd->groups->ref); 6182 6183 if (cpu != cpumask_first(span)) 6184 return 0; 6185 6186 lockdep_assert_held(&sched_domains_mutex); 6187 covered = sched_domains_tmpmask; 6188 6189 cpumask_clear(covered); 6190 6191 for_each_cpu(i, span) { 6192 struct sched_group *sg; 6193 int group, j; 6194 6195 if (cpumask_test_cpu(i, covered)) 6196 continue; 6197 6198 group = get_group(i, sdd, &sg); 6199 cpumask_setall(sched_group_mask(sg)); 6200 6201 for_each_cpu(j, span) { 6202 if (get_group(j, sdd, NULL) != group) 6203 continue; 6204 6205 cpumask_set_cpu(j, covered); 6206 cpumask_set_cpu(j, sched_group_cpus(sg)); 6207 } 6208 6209 if (!first) 6210 first = sg; 6211 if (last) 6212 last->next = sg; 6213 last = sg; 6214 } 6215 last->next = first; 6216 6217 return 0; 6218 } 6219 6220 /* 6221 * Initialize sched groups cpu_capacity. 6222 * 6223 * cpu_capacity indicates the capacity of sched group, which is used while 6224 * distributing the load between different sched groups in a sched domain. 6225 * Typically cpu_capacity for all the groups in a sched domain will be same 6226 * unless there are asymmetries in the topology. If there are asymmetries, 6227 * group having more cpu_capacity will pickup more load compared to the 6228 * group having less cpu_capacity. 6229 */ 6230 static void init_sched_groups_capacity(int cpu, struct sched_domain *sd) 6231 { 6232 struct sched_group *sg = sd->groups; 6233 6234 WARN_ON(!sg); 6235 6236 do { 6237 sg->group_weight = cpumask_weight(sched_group_cpus(sg)); 6238 sg = sg->next; 6239 } while (sg != sd->groups); 6240 6241 if (cpu != group_balance_cpu(sg)) 6242 return; 6243 6244 update_group_capacity(sd, cpu); 6245 atomic_set(&sg->sgc->nr_busy_cpus, sg->group_weight); 6246 } 6247 6248 /* 6249 * Initializers for schedule domains 6250 * Non-inlined to reduce accumulated stack pressure in build_sched_domains() 6251 */ 6252 6253 static int default_relax_domain_level = -1; 6254 int sched_domain_level_max; 6255 6256 static int __init setup_relax_domain_level(char *str) 6257 { 6258 if (kstrtoint(str, 0, &default_relax_domain_level)) 6259 pr_warn("Unable to set relax_domain_level\n"); 6260 6261 return 1; 6262 } 6263 __setup("relax_domain_level=", setup_relax_domain_level); 6264 6265 static void set_domain_attribute(struct sched_domain *sd, 6266 struct sched_domain_attr *attr) 6267 { 6268 int request; 6269 6270 if (!attr || attr->relax_domain_level < 0) { 6271 if (default_relax_domain_level < 0) 6272 return; 6273 else 6274 request = default_relax_domain_level; 6275 } else 6276 request = attr->relax_domain_level; 6277 if (request < sd->level) { 6278 /* turn off idle balance on this domain */ 6279 sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE); 6280 } else { 6281 /* turn on idle balance on this domain */ 6282 sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE); 6283 } 6284 } 6285 6286 static void __sdt_free(const struct cpumask *cpu_map); 6287 static int __sdt_alloc(const struct cpumask *cpu_map); 6288 6289 static void __free_domain_allocs(struct s_data *d, enum s_alloc what, 6290 const struct cpumask *cpu_map) 6291 { 6292 switch (what) { 6293 case sa_rootdomain: 6294 if (!atomic_read(&d->rd->refcount)) 6295 free_rootdomain(&d->rd->rcu); /* fall through */ 6296 case sa_sd: 6297 free_percpu(d->sd); /* fall through */ 6298 case sa_sd_storage: 6299 __sdt_free(cpu_map); /* fall through */ 6300 case sa_none: 6301 break; 6302 } 6303 } 6304 6305 static enum s_alloc __visit_domain_allocation_hell(struct s_data *d, 6306 const struct cpumask *cpu_map) 6307 { 6308 memset(d, 0, sizeof(*d)); 6309 6310 if (__sdt_alloc(cpu_map)) 6311 return sa_sd_storage; 6312 d->sd = alloc_percpu(struct sched_domain *); 6313 if (!d->sd) 6314 return sa_sd_storage; 6315 d->rd = alloc_rootdomain(); 6316 if (!d->rd) 6317 return sa_sd; 6318 return sa_rootdomain; 6319 } 6320 6321 /* 6322 * NULL the sd_data elements we've used to build the sched_domain and 6323 * sched_group structure so that the subsequent __free_domain_allocs() 6324 * will not free the data we're using. 6325 */ 6326 static void claim_allocations(int cpu, struct sched_domain *sd) 6327 { 6328 struct sd_data *sdd = sd->private; 6329 6330 WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd); 6331 *per_cpu_ptr(sdd->sd, cpu) = NULL; 6332 6333 if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref)) 6334 *per_cpu_ptr(sdd->sg, cpu) = NULL; 6335 6336 if (atomic_read(&(*per_cpu_ptr(sdd->sgc, cpu))->ref)) 6337 *per_cpu_ptr(sdd->sgc, cpu) = NULL; 6338 } 6339 6340 #ifdef CONFIG_NUMA 6341 static int sched_domains_numa_levels; 6342 enum numa_topology_type sched_numa_topology_type; 6343 static int *sched_domains_numa_distance; 6344 int sched_max_numa_distance; 6345 static struct cpumask ***sched_domains_numa_masks; 6346 static int sched_domains_curr_level; 6347 #endif 6348 6349 /* 6350 * SD_flags allowed in topology descriptions. 6351 * 6352 * SD_SHARE_CPUCAPACITY - describes SMT topologies 6353 * SD_SHARE_PKG_RESOURCES - describes shared caches 6354 * SD_NUMA - describes NUMA topologies 6355 * SD_SHARE_POWERDOMAIN - describes shared power domain 6356 * 6357 * Odd one out: 6358 * SD_ASYM_PACKING - describes SMT quirks 6359 */ 6360 #define TOPOLOGY_SD_FLAGS \ 6361 (SD_SHARE_CPUCAPACITY | \ 6362 SD_SHARE_PKG_RESOURCES | \ 6363 SD_NUMA | \ 6364 SD_ASYM_PACKING | \ 6365 SD_SHARE_POWERDOMAIN) 6366 6367 static struct sched_domain * 6368 sd_init(struct sched_domain_topology_level *tl, int cpu) 6369 { 6370 struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu); 6371 int sd_weight, sd_flags = 0; 6372 6373 #ifdef CONFIG_NUMA 6374 /* 6375 * Ugly hack to pass state to sd_numa_mask()... 6376 */ 6377 sched_domains_curr_level = tl->numa_level; 6378 #endif 6379 6380 sd_weight = cpumask_weight(tl->mask(cpu)); 6381 6382 if (tl->sd_flags) 6383 sd_flags = (*tl->sd_flags)(); 6384 if (WARN_ONCE(sd_flags & ~TOPOLOGY_SD_FLAGS, 6385 "wrong sd_flags in topology description\n")) 6386 sd_flags &= ~TOPOLOGY_SD_FLAGS; 6387 6388 *sd = (struct sched_domain){ 6389 .min_interval = sd_weight, 6390 .max_interval = 2*sd_weight, 6391 .busy_factor = 32, 6392 .imbalance_pct = 125, 6393 6394 .cache_nice_tries = 0, 6395 .busy_idx = 0, 6396 .idle_idx = 0, 6397 .newidle_idx = 0, 6398 .wake_idx = 0, 6399 .forkexec_idx = 0, 6400 6401 .flags = 1*SD_LOAD_BALANCE 6402 | 1*SD_BALANCE_NEWIDLE 6403 | 1*SD_BALANCE_EXEC 6404 | 1*SD_BALANCE_FORK 6405 | 0*SD_BALANCE_WAKE 6406 | 1*SD_WAKE_AFFINE 6407 | 0*SD_SHARE_CPUCAPACITY 6408 | 0*SD_SHARE_PKG_RESOURCES 6409 | 0*SD_SERIALIZE 6410 | 0*SD_PREFER_SIBLING 6411 | 0*SD_NUMA 6412 | sd_flags 6413 , 6414 6415 .last_balance = jiffies, 6416 .balance_interval = sd_weight, 6417 .smt_gain = 0, 6418 .max_newidle_lb_cost = 0, 6419 .next_decay_max_lb_cost = jiffies, 6420 #ifdef CONFIG_SCHED_DEBUG 6421 .name = tl->name, 6422 #endif 6423 }; 6424 6425 /* 6426 * Convert topological properties into behaviour. 6427 */ 6428 6429 if (sd->flags & SD_SHARE_CPUCAPACITY) { 6430 sd->flags |= SD_PREFER_SIBLING; 6431 sd->imbalance_pct = 110; 6432 sd->smt_gain = 1178; /* ~15% */ 6433 6434 } else if (sd->flags & SD_SHARE_PKG_RESOURCES) { 6435 sd->imbalance_pct = 117; 6436 sd->cache_nice_tries = 1; 6437 sd->busy_idx = 2; 6438 6439 #ifdef CONFIG_NUMA 6440 } else if (sd->flags & SD_NUMA) { 6441 sd->cache_nice_tries = 2; 6442 sd->busy_idx = 3; 6443 sd->idle_idx = 2; 6444 6445 sd->flags |= SD_SERIALIZE; 6446 if (sched_domains_numa_distance[tl->numa_level] > RECLAIM_DISTANCE) { 6447 sd->flags &= ~(SD_BALANCE_EXEC | 6448 SD_BALANCE_FORK | 6449 SD_WAKE_AFFINE); 6450 } 6451 6452 #endif 6453 } else { 6454 sd->flags |= SD_PREFER_SIBLING; 6455 sd->cache_nice_tries = 1; 6456 sd->busy_idx = 2; 6457 sd->idle_idx = 1; 6458 } 6459 6460 sd->private = &tl->data; 6461 6462 return sd; 6463 } 6464 6465 /* 6466 * Topology list, bottom-up. 6467 */ 6468 static struct sched_domain_topology_level default_topology[] = { 6469 #ifdef CONFIG_SCHED_SMT 6470 { cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT) }, 6471 #endif 6472 #ifdef CONFIG_SCHED_MC 6473 { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) }, 6474 #endif 6475 { cpu_cpu_mask, SD_INIT_NAME(DIE) }, 6476 { NULL, }, 6477 }; 6478 6479 struct sched_domain_topology_level *sched_domain_topology = default_topology; 6480 6481 #define for_each_sd_topology(tl) \ 6482 for (tl = sched_domain_topology; tl->mask; tl++) 6483 6484 void set_sched_topology(struct sched_domain_topology_level *tl) 6485 { 6486 sched_domain_topology = tl; 6487 } 6488 6489 #ifdef CONFIG_NUMA 6490 6491 static const struct cpumask *sd_numa_mask(int cpu) 6492 { 6493 return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)]; 6494 } 6495 6496 static void sched_numa_warn(const char *str) 6497 { 6498 static int done = false; 6499 int i,j; 6500 6501 if (done) 6502 return; 6503 6504 done = true; 6505 6506 printk(KERN_WARNING "ERROR: %s\n\n", str); 6507 6508 for (i = 0; i < nr_node_ids; i++) { 6509 printk(KERN_WARNING " "); 6510 for (j = 0; j < nr_node_ids; j++) 6511 printk(KERN_CONT "%02d ", node_distance(i,j)); 6512 printk(KERN_CONT "\n"); 6513 } 6514 printk(KERN_WARNING "\n"); 6515 } 6516 6517 bool find_numa_distance(int distance) 6518 { 6519 int i; 6520 6521 if (distance == node_distance(0, 0)) 6522 return true; 6523 6524 for (i = 0; i < sched_domains_numa_levels; i++) { 6525 if (sched_domains_numa_distance[i] == distance) 6526 return true; 6527 } 6528 6529 return false; 6530 } 6531 6532 /* 6533 * A system can have three types of NUMA topology: 6534 * NUMA_DIRECT: all nodes are directly connected, or not a NUMA system 6535 * NUMA_GLUELESS_MESH: some nodes reachable through intermediary nodes 6536 * NUMA_BACKPLANE: nodes can reach other nodes through a backplane 6537 * 6538 * The difference between a glueless mesh topology and a backplane 6539 * topology lies in whether communication between not directly 6540 * connected nodes goes through intermediary nodes (where programs 6541 * could run), or through backplane controllers. This affects 6542 * placement of programs. 6543 * 6544 * The type of topology can be discerned with the following tests: 6545 * - If the maximum distance between any nodes is 1 hop, the system 6546 * is directly connected. 6547 * - If for two nodes A and B, located N > 1 hops away from each other, 6548 * there is an intermediary node C, which is < N hops away from both 6549 * nodes A and B, the system is a glueless mesh. 6550 */ 6551 static void init_numa_topology_type(void) 6552 { 6553 int a, b, c, n; 6554 6555 n = sched_max_numa_distance; 6556 6557 if (sched_domains_numa_levels <= 1) { 6558 sched_numa_topology_type = NUMA_DIRECT; 6559 return; 6560 } 6561 6562 for_each_online_node(a) { 6563 for_each_online_node(b) { 6564 /* Find two nodes furthest removed from each other. */ 6565 if (node_distance(a, b) < n) 6566 continue; 6567 6568 /* Is there an intermediary node between a and b? */ 6569 for_each_online_node(c) { 6570 if (node_distance(a, c) < n && 6571 node_distance(b, c) < n) { 6572 sched_numa_topology_type = 6573 NUMA_GLUELESS_MESH; 6574 return; 6575 } 6576 } 6577 6578 sched_numa_topology_type = NUMA_BACKPLANE; 6579 return; 6580 } 6581 } 6582 } 6583 6584 static void sched_init_numa(void) 6585 { 6586 int next_distance, curr_distance = node_distance(0, 0); 6587 struct sched_domain_topology_level *tl; 6588 int level = 0; 6589 int i, j, k; 6590 6591 sched_domains_numa_distance = kzalloc(sizeof(int) * nr_node_ids, GFP_KERNEL); 6592 if (!sched_domains_numa_distance) 6593 return; 6594 6595 /* 6596 * O(nr_nodes^2) deduplicating selection sort -- in order to find the 6597 * unique distances in the node_distance() table. 6598 * 6599 * Assumes node_distance(0,j) includes all distances in 6600 * node_distance(i,j) in order to avoid cubic time. 6601 */ 6602 next_distance = curr_distance; 6603 for (i = 0; i < nr_node_ids; i++) { 6604 for (j = 0; j < nr_node_ids; j++) { 6605 for (k = 0; k < nr_node_ids; k++) { 6606 int distance = node_distance(i, k); 6607 6608 if (distance > curr_distance && 6609 (distance < next_distance || 6610 next_distance == curr_distance)) 6611 next_distance = distance; 6612 6613 /* 6614 * While not a strong assumption it would be nice to know 6615 * about cases where if node A is connected to B, B is not 6616 * equally connected to A. 6617 */ 6618 if (sched_debug() && node_distance(k, i) != distance) 6619 sched_numa_warn("Node-distance not symmetric"); 6620 6621 if (sched_debug() && i && !find_numa_distance(distance)) 6622 sched_numa_warn("Node-0 not representative"); 6623 } 6624 if (next_distance != curr_distance) { 6625 sched_domains_numa_distance[level++] = next_distance; 6626 sched_domains_numa_levels = level; 6627 curr_distance = next_distance; 6628 } else break; 6629 } 6630 6631 /* 6632 * In case of sched_debug() we verify the above assumption. 6633 */ 6634 if (!sched_debug()) 6635 break; 6636 } 6637 6638 if (!level) 6639 return; 6640 6641 /* 6642 * 'level' contains the number of unique distances, excluding the 6643 * identity distance node_distance(i,i). 6644 * 6645 * The sched_domains_numa_distance[] array includes the actual distance 6646 * numbers. 6647 */ 6648 6649 /* 6650 * Here, we should temporarily reset sched_domains_numa_levels to 0. 6651 * If it fails to allocate memory for array sched_domains_numa_masks[][], 6652 * the array will contain less then 'level' members. This could be 6653 * dangerous when we use it to iterate array sched_domains_numa_masks[][] 6654 * in other functions. 6655 * 6656 * We reset it to 'level' at the end of this function. 6657 */ 6658 sched_domains_numa_levels = 0; 6659 6660 sched_domains_numa_masks = kzalloc(sizeof(void *) * level, GFP_KERNEL); 6661 if (!sched_domains_numa_masks) 6662 return; 6663 6664 /* 6665 * Now for each level, construct a mask per node which contains all 6666 * cpus of nodes that are that many hops away from us. 6667 */ 6668 for (i = 0; i < level; i++) { 6669 sched_domains_numa_masks[i] = 6670 kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL); 6671 if (!sched_domains_numa_masks[i]) 6672 return; 6673 6674 for (j = 0; j < nr_node_ids; j++) { 6675 struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL); 6676 if (!mask) 6677 return; 6678 6679 sched_domains_numa_masks[i][j] = mask; 6680 6681 for (k = 0; k < nr_node_ids; k++) { 6682 if (node_distance(j, k) > sched_domains_numa_distance[i]) 6683 continue; 6684 6685 cpumask_or(mask, mask, cpumask_of_node(k)); 6686 } 6687 } 6688 } 6689 6690 /* Compute default topology size */ 6691 for (i = 0; sched_domain_topology[i].mask; i++); 6692 6693 tl = kzalloc((i + level + 1) * 6694 sizeof(struct sched_domain_topology_level), GFP_KERNEL); 6695 if (!tl) 6696 return; 6697 6698 /* 6699 * Copy the default topology bits.. 6700 */ 6701 for (i = 0; sched_domain_topology[i].mask; i++) 6702 tl[i] = sched_domain_topology[i]; 6703 6704 /* 6705 * .. and append 'j' levels of NUMA goodness. 6706 */ 6707 for (j = 0; j < level; i++, j++) { 6708 tl[i] = (struct sched_domain_topology_level){ 6709 .mask = sd_numa_mask, 6710 .sd_flags = cpu_numa_flags, 6711 .flags = SDTL_OVERLAP, 6712 .numa_level = j, 6713 SD_INIT_NAME(NUMA) 6714 }; 6715 } 6716 6717 sched_domain_topology = tl; 6718 6719 sched_domains_numa_levels = level; 6720 sched_max_numa_distance = sched_domains_numa_distance[level - 1]; 6721 6722 init_numa_topology_type(); 6723 } 6724 6725 static void sched_domains_numa_masks_set(int cpu) 6726 { 6727 int i, j; 6728 int node = cpu_to_node(cpu); 6729 6730 for (i = 0; i < sched_domains_numa_levels; i++) { 6731 for (j = 0; j < nr_node_ids; j++) { 6732 if (node_distance(j, node) <= sched_domains_numa_distance[i]) 6733 cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]); 6734 } 6735 } 6736 } 6737 6738 static void sched_domains_numa_masks_clear(int cpu) 6739 { 6740 int i, j; 6741 for (i = 0; i < sched_domains_numa_levels; i++) { 6742 for (j = 0; j < nr_node_ids; j++) 6743 cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]); 6744 } 6745 } 6746 6747 /* 6748 * Update sched_domains_numa_masks[level][node] array when new cpus 6749 * are onlined. 6750 */ 6751 static int sched_domains_numa_masks_update(struct notifier_block *nfb, 6752 unsigned long action, 6753 void *hcpu) 6754 { 6755 int cpu = (long)hcpu; 6756 6757 switch (action & ~CPU_TASKS_FROZEN) { 6758 case CPU_ONLINE: 6759 sched_domains_numa_masks_set(cpu); 6760 break; 6761 6762 case CPU_DEAD: 6763 sched_domains_numa_masks_clear(cpu); 6764 break; 6765 6766 default: 6767 return NOTIFY_DONE; 6768 } 6769 6770 return NOTIFY_OK; 6771 } 6772 #else 6773 static inline void sched_init_numa(void) 6774 { 6775 } 6776 6777 static int sched_domains_numa_masks_update(struct notifier_block *nfb, 6778 unsigned long action, 6779 void *hcpu) 6780 { 6781 return 0; 6782 } 6783 #endif /* CONFIG_NUMA */ 6784 6785 static int __sdt_alloc(const struct cpumask *cpu_map) 6786 { 6787 struct sched_domain_topology_level *tl; 6788 int j; 6789 6790 for_each_sd_topology(tl) { 6791 struct sd_data *sdd = &tl->data; 6792 6793 sdd->sd = alloc_percpu(struct sched_domain *); 6794 if (!sdd->sd) 6795 return -ENOMEM; 6796 6797 sdd->sg = alloc_percpu(struct sched_group *); 6798 if (!sdd->sg) 6799 return -ENOMEM; 6800 6801 sdd->sgc = alloc_percpu(struct sched_group_capacity *); 6802 if (!sdd->sgc) 6803 return -ENOMEM; 6804 6805 for_each_cpu(j, cpu_map) { 6806 struct sched_domain *sd; 6807 struct sched_group *sg; 6808 struct sched_group_capacity *sgc; 6809 6810 sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(), 6811 GFP_KERNEL, cpu_to_node(j)); 6812 if (!sd) 6813 return -ENOMEM; 6814 6815 *per_cpu_ptr(sdd->sd, j) = sd; 6816 6817 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), 6818 GFP_KERNEL, cpu_to_node(j)); 6819 if (!sg) 6820 return -ENOMEM; 6821 6822 sg->next = sg; 6823 6824 *per_cpu_ptr(sdd->sg, j) = sg; 6825 6826 sgc = kzalloc_node(sizeof(struct sched_group_capacity) + cpumask_size(), 6827 GFP_KERNEL, cpu_to_node(j)); 6828 if (!sgc) 6829 return -ENOMEM; 6830 6831 *per_cpu_ptr(sdd->sgc, j) = sgc; 6832 } 6833 } 6834 6835 return 0; 6836 } 6837 6838 static void __sdt_free(const struct cpumask *cpu_map) 6839 { 6840 struct sched_domain_topology_level *tl; 6841 int j; 6842 6843 for_each_sd_topology(tl) { 6844 struct sd_data *sdd = &tl->data; 6845 6846 for_each_cpu(j, cpu_map) { 6847 struct sched_domain *sd; 6848 6849 if (sdd->sd) { 6850 sd = *per_cpu_ptr(sdd->sd, j); 6851 if (sd && (sd->flags & SD_OVERLAP)) 6852 free_sched_groups(sd->groups, 0); 6853 kfree(*per_cpu_ptr(sdd->sd, j)); 6854 } 6855 6856 if (sdd->sg) 6857 kfree(*per_cpu_ptr(sdd->sg, j)); 6858 if (sdd->sgc) 6859 kfree(*per_cpu_ptr(sdd->sgc, j)); 6860 } 6861 free_percpu(sdd->sd); 6862 sdd->sd = NULL; 6863 free_percpu(sdd->sg); 6864 sdd->sg = NULL; 6865 free_percpu(sdd->sgc); 6866 sdd->sgc = NULL; 6867 } 6868 } 6869 6870 struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl, 6871 const struct cpumask *cpu_map, struct sched_domain_attr *attr, 6872 struct sched_domain *child, int cpu) 6873 { 6874 struct sched_domain *sd = sd_init(tl, cpu); 6875 if (!sd) 6876 return child; 6877 6878 cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu)); 6879 if (child) { 6880 sd->level = child->level + 1; 6881 sched_domain_level_max = max(sched_domain_level_max, sd->level); 6882 child->parent = sd; 6883 sd->child = child; 6884 6885 if (!cpumask_subset(sched_domain_span(child), 6886 sched_domain_span(sd))) { 6887 pr_err("BUG: arch topology borken\n"); 6888 #ifdef CONFIG_SCHED_DEBUG 6889 pr_err(" the %s domain not a subset of the %s domain\n", 6890 child->name, sd->name); 6891 #endif 6892 /* Fixup, ensure @sd has at least @child cpus. */ 6893 cpumask_or(sched_domain_span(sd), 6894 sched_domain_span(sd), 6895 sched_domain_span(child)); 6896 } 6897 6898 } 6899 set_domain_attribute(sd, attr); 6900 6901 return sd; 6902 } 6903 6904 /* 6905 * Build sched domains for a given set of cpus and attach the sched domains 6906 * to the individual cpus 6907 */ 6908 static int build_sched_domains(const struct cpumask *cpu_map, 6909 struct sched_domain_attr *attr) 6910 { 6911 enum s_alloc alloc_state; 6912 struct sched_domain *sd; 6913 struct s_data d; 6914 int i, ret = -ENOMEM; 6915 6916 alloc_state = __visit_domain_allocation_hell(&d, cpu_map); 6917 if (alloc_state != sa_rootdomain) 6918 goto error; 6919 6920 /* Set up domains for cpus specified by the cpu_map. */ 6921 for_each_cpu(i, cpu_map) { 6922 struct sched_domain_topology_level *tl; 6923 6924 sd = NULL; 6925 for_each_sd_topology(tl) { 6926 sd = build_sched_domain(tl, cpu_map, attr, sd, i); 6927 if (tl == sched_domain_topology) 6928 *per_cpu_ptr(d.sd, i) = sd; 6929 if (tl->flags & SDTL_OVERLAP || sched_feat(FORCE_SD_OVERLAP)) 6930 sd->flags |= SD_OVERLAP; 6931 if (cpumask_equal(cpu_map, sched_domain_span(sd))) 6932 break; 6933 } 6934 } 6935 6936 /* Build the groups for the domains */ 6937 for_each_cpu(i, cpu_map) { 6938 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { 6939 sd->span_weight = cpumask_weight(sched_domain_span(sd)); 6940 if (sd->flags & SD_OVERLAP) { 6941 if (build_overlap_sched_groups(sd, i)) 6942 goto error; 6943 } else { 6944 if (build_sched_groups(sd, i)) 6945 goto error; 6946 } 6947 } 6948 } 6949 6950 /* Calculate CPU capacity for physical packages and nodes */ 6951 for (i = nr_cpumask_bits-1; i >= 0; i--) { 6952 if (!cpumask_test_cpu(i, cpu_map)) 6953 continue; 6954 6955 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { 6956 claim_allocations(i, sd); 6957 init_sched_groups_capacity(i, sd); 6958 } 6959 } 6960 6961 /* Attach the domains */ 6962 rcu_read_lock(); 6963 for_each_cpu(i, cpu_map) { 6964 sd = *per_cpu_ptr(d.sd, i); 6965 cpu_attach_domain(sd, d.rd, i); 6966 } 6967 rcu_read_unlock(); 6968 6969 ret = 0; 6970 error: 6971 __free_domain_allocs(&d, alloc_state, cpu_map); 6972 return ret; 6973 } 6974 6975 static cpumask_var_t *doms_cur; /* current sched domains */ 6976 static int ndoms_cur; /* number of sched domains in 'doms_cur' */ 6977 static struct sched_domain_attr *dattr_cur; 6978 /* attribues of custom domains in 'doms_cur' */ 6979 6980 /* 6981 * Special case: If a kmalloc of a doms_cur partition (array of 6982 * cpumask) fails, then fallback to a single sched domain, 6983 * as determined by the single cpumask fallback_doms. 6984 */ 6985 static cpumask_var_t fallback_doms; 6986 6987 /* 6988 * arch_update_cpu_topology lets virtualized architectures update the 6989 * cpu core maps. It is supposed to return 1 if the topology changed 6990 * or 0 if it stayed the same. 6991 */ 6992 int __weak arch_update_cpu_topology(void) 6993 { 6994 return 0; 6995 } 6996 6997 cpumask_var_t *alloc_sched_domains(unsigned int ndoms) 6998 { 6999 int i; 7000 cpumask_var_t *doms; 7001 7002 doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL); 7003 if (!doms) 7004 return NULL; 7005 for (i = 0; i < ndoms; i++) { 7006 if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) { 7007 free_sched_domains(doms, i); 7008 return NULL; 7009 } 7010 } 7011 return doms; 7012 } 7013 7014 void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms) 7015 { 7016 unsigned int i; 7017 for (i = 0; i < ndoms; i++) 7018 free_cpumask_var(doms[i]); 7019 kfree(doms); 7020 } 7021 7022 /* 7023 * Set up scheduler domains and groups. Callers must hold the hotplug lock. 7024 * For now this just excludes isolated cpus, but could be used to 7025 * exclude other special cases in the future. 7026 */ 7027 static int init_sched_domains(const struct cpumask *cpu_map) 7028 { 7029 int err; 7030 7031 arch_update_cpu_topology(); 7032 ndoms_cur = 1; 7033 doms_cur = alloc_sched_domains(ndoms_cur); 7034 if (!doms_cur) 7035 doms_cur = &fallback_doms; 7036 cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map); 7037 err = build_sched_domains(doms_cur[0], NULL); 7038 register_sched_domain_sysctl(); 7039 7040 return err; 7041 } 7042 7043 /* 7044 * Detach sched domains from a group of cpus specified in cpu_map 7045 * These cpus will now be attached to the NULL domain 7046 */ 7047 static void detach_destroy_domains(const struct cpumask *cpu_map) 7048 { 7049 int i; 7050 7051 rcu_read_lock(); 7052 for_each_cpu(i, cpu_map) 7053 cpu_attach_domain(NULL, &def_root_domain, i); 7054 rcu_read_unlock(); 7055 } 7056 7057 /* handle null as "default" */ 7058 static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur, 7059 struct sched_domain_attr *new, int idx_new) 7060 { 7061 struct sched_domain_attr tmp; 7062 7063 /* fast path */ 7064 if (!new && !cur) 7065 return 1; 7066 7067 tmp = SD_ATTR_INIT; 7068 return !memcmp(cur ? (cur + idx_cur) : &tmp, 7069 new ? (new + idx_new) : &tmp, 7070 sizeof(struct sched_domain_attr)); 7071 } 7072 7073 /* 7074 * Partition sched domains as specified by the 'ndoms_new' 7075 * cpumasks in the array doms_new[] of cpumasks. This compares 7076 * doms_new[] to the current sched domain partitioning, doms_cur[]. 7077 * It destroys each deleted domain and builds each new domain. 7078 * 7079 * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'. 7080 * The masks don't intersect (don't overlap.) We should setup one 7081 * sched domain for each mask. CPUs not in any of the cpumasks will 7082 * not be load balanced. If the same cpumask appears both in the 7083 * current 'doms_cur' domains and in the new 'doms_new', we can leave 7084 * it as it is. 7085 * 7086 * The passed in 'doms_new' should be allocated using 7087 * alloc_sched_domains. This routine takes ownership of it and will 7088 * free_sched_domains it when done with it. If the caller failed the 7089 * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1, 7090 * and partition_sched_domains() will fallback to the single partition 7091 * 'fallback_doms', it also forces the domains to be rebuilt. 7092 * 7093 * If doms_new == NULL it will be replaced with cpu_online_mask. 7094 * ndoms_new == 0 is a special case for destroying existing domains, 7095 * and it will not create the default domain. 7096 * 7097 * Call with hotplug lock held 7098 */ 7099 void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], 7100 struct sched_domain_attr *dattr_new) 7101 { 7102 int i, j, n; 7103 int new_topology; 7104 7105 mutex_lock(&sched_domains_mutex); 7106 7107 /* always unregister in case we don't destroy any domains */ 7108 unregister_sched_domain_sysctl(); 7109 7110 /* Let architecture update cpu core mappings. */ 7111 new_topology = arch_update_cpu_topology(); 7112 7113 n = doms_new ? ndoms_new : 0; 7114 7115 /* Destroy deleted domains */ 7116 for (i = 0; i < ndoms_cur; i++) { 7117 for (j = 0; j < n && !new_topology; j++) { 7118 if (cpumask_equal(doms_cur[i], doms_new[j]) 7119 && dattrs_equal(dattr_cur, i, dattr_new, j)) 7120 goto match1; 7121 } 7122 /* no match - a current sched domain not in new doms_new[] */ 7123 detach_destroy_domains(doms_cur[i]); 7124 match1: 7125 ; 7126 } 7127 7128 n = ndoms_cur; 7129 if (doms_new == NULL) { 7130 n = 0; 7131 doms_new = &fallback_doms; 7132 cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map); 7133 WARN_ON_ONCE(dattr_new); 7134 } 7135 7136 /* Build new domains */ 7137 for (i = 0; i < ndoms_new; i++) { 7138 for (j = 0; j < n && !new_topology; j++) { 7139 if (cpumask_equal(doms_new[i], doms_cur[j]) 7140 && dattrs_equal(dattr_new, i, dattr_cur, j)) 7141 goto match2; 7142 } 7143 /* no match - add a new doms_new */ 7144 build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL); 7145 match2: 7146 ; 7147 } 7148 7149 /* Remember the new sched domains */ 7150 if (doms_cur != &fallback_doms) 7151 free_sched_domains(doms_cur, ndoms_cur); 7152 kfree(dattr_cur); /* kfree(NULL) is safe */ 7153 doms_cur = doms_new; 7154 dattr_cur = dattr_new; 7155 ndoms_cur = ndoms_new; 7156 7157 register_sched_domain_sysctl(); 7158 7159 mutex_unlock(&sched_domains_mutex); 7160 } 7161 7162 static int num_cpus_frozen; /* used to mark begin/end of suspend/resume */ 7163 7164 /* 7165 * Update cpusets according to cpu_active mask. If cpusets are 7166 * disabled, cpuset_update_active_cpus() becomes a simple wrapper 7167 * around partition_sched_domains(). 7168 * 7169 * If we come here as part of a suspend/resume, don't touch cpusets because we 7170 * want to restore it back to its original state upon resume anyway. 7171 */ 7172 static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action, 7173 void *hcpu) 7174 { 7175 switch (action) { 7176 case CPU_ONLINE_FROZEN: 7177 case CPU_DOWN_FAILED_FROZEN: 7178 7179 /* 7180 * num_cpus_frozen tracks how many CPUs are involved in suspend 7181 * resume sequence. As long as this is not the last online 7182 * operation in the resume sequence, just build a single sched 7183 * domain, ignoring cpusets. 7184 */ 7185 num_cpus_frozen--; 7186 if (likely(num_cpus_frozen)) { 7187 partition_sched_domains(1, NULL, NULL); 7188 break; 7189 } 7190 7191 /* 7192 * This is the last CPU online operation. So fall through and 7193 * restore the original sched domains by considering the 7194 * cpuset configurations. 7195 */ 7196 7197 case CPU_ONLINE: 7198 cpuset_update_active_cpus(true); 7199 break; 7200 default: 7201 return NOTIFY_DONE; 7202 } 7203 return NOTIFY_OK; 7204 } 7205 7206 static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action, 7207 void *hcpu) 7208 { 7209 unsigned long flags; 7210 long cpu = (long)hcpu; 7211 struct dl_bw *dl_b; 7212 bool overflow; 7213 int cpus; 7214 7215 switch (action) { 7216 case CPU_DOWN_PREPARE: 7217 rcu_read_lock_sched(); 7218 dl_b = dl_bw_of(cpu); 7219 7220 raw_spin_lock_irqsave(&dl_b->lock, flags); 7221 cpus = dl_bw_cpus(cpu); 7222 overflow = __dl_overflow(dl_b, cpus, 0, 0); 7223 raw_spin_unlock_irqrestore(&dl_b->lock, flags); 7224 7225 rcu_read_unlock_sched(); 7226 7227 if (overflow) 7228 return notifier_from_errno(-EBUSY); 7229 cpuset_update_active_cpus(false); 7230 break; 7231 case CPU_DOWN_PREPARE_FROZEN: 7232 num_cpus_frozen++; 7233 partition_sched_domains(1, NULL, NULL); 7234 break; 7235 default: 7236 return NOTIFY_DONE; 7237 } 7238 return NOTIFY_OK; 7239 } 7240 7241 void __init sched_init_smp(void) 7242 { 7243 cpumask_var_t non_isolated_cpus; 7244 7245 alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL); 7246 alloc_cpumask_var(&fallback_doms, GFP_KERNEL); 7247 7248 sched_init_numa(); 7249 7250 /* 7251 * There's no userspace yet to cause hotplug operations; hence all the 7252 * cpu masks are stable and all blatant races in the below code cannot 7253 * happen. 7254 */ 7255 mutex_lock(&sched_domains_mutex); 7256 init_sched_domains(cpu_active_mask); 7257 cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map); 7258 if (cpumask_empty(non_isolated_cpus)) 7259 cpumask_set_cpu(smp_processor_id(), non_isolated_cpus); 7260 mutex_unlock(&sched_domains_mutex); 7261 7262 hotcpu_notifier(sched_domains_numa_masks_update, CPU_PRI_SCHED_ACTIVE); 7263 hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE); 7264 hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE); 7265 7266 init_hrtick(); 7267 7268 /* Move init over to a non-isolated CPU */ 7269 if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0) 7270 BUG(); 7271 sched_init_granularity(); 7272 free_cpumask_var(non_isolated_cpus); 7273 7274 init_sched_rt_class(); 7275 init_sched_dl_class(); 7276 } 7277 #else 7278 void __init sched_init_smp(void) 7279 { 7280 sched_init_granularity(); 7281 } 7282 #endif /* CONFIG_SMP */ 7283 7284 int in_sched_functions(unsigned long addr) 7285 { 7286 return in_lock_functions(addr) || 7287 (addr >= (unsigned long)__sched_text_start 7288 && addr < (unsigned long)__sched_text_end); 7289 } 7290 7291 #ifdef CONFIG_CGROUP_SCHED 7292 /* 7293 * Default task group. 7294 * Every task in system belongs to this group at bootup. 7295 */ 7296 struct task_group root_task_group; 7297 LIST_HEAD(task_groups); 7298 #endif 7299 7300 DECLARE_PER_CPU(cpumask_var_t, load_balance_mask); 7301 7302 void __init sched_init(void) 7303 { 7304 int i, j; 7305 unsigned long alloc_size = 0, ptr; 7306 7307 #ifdef CONFIG_FAIR_GROUP_SCHED 7308 alloc_size += 2 * nr_cpu_ids * sizeof(void **); 7309 #endif 7310 #ifdef CONFIG_RT_GROUP_SCHED 7311 alloc_size += 2 * nr_cpu_ids * sizeof(void **); 7312 #endif 7313 if (alloc_size) { 7314 ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT); 7315 7316 #ifdef CONFIG_FAIR_GROUP_SCHED 7317 root_task_group.se = (struct sched_entity **)ptr; 7318 ptr += nr_cpu_ids * sizeof(void **); 7319 7320 root_task_group.cfs_rq = (struct cfs_rq **)ptr; 7321 ptr += nr_cpu_ids * sizeof(void **); 7322 7323 #endif /* CONFIG_FAIR_GROUP_SCHED */ 7324 #ifdef CONFIG_RT_GROUP_SCHED 7325 root_task_group.rt_se = (struct sched_rt_entity **)ptr; 7326 ptr += nr_cpu_ids * sizeof(void **); 7327 7328 root_task_group.rt_rq = (struct rt_rq **)ptr; 7329 ptr += nr_cpu_ids * sizeof(void **); 7330 7331 #endif /* CONFIG_RT_GROUP_SCHED */ 7332 } 7333 #ifdef CONFIG_CPUMASK_OFFSTACK 7334 for_each_possible_cpu(i) { 7335 per_cpu(load_balance_mask, i) = (cpumask_var_t)kzalloc_node( 7336 cpumask_size(), GFP_KERNEL, cpu_to_node(i)); 7337 } 7338 #endif /* CONFIG_CPUMASK_OFFSTACK */ 7339 7340 init_rt_bandwidth(&def_rt_bandwidth, 7341 global_rt_period(), global_rt_runtime()); 7342 init_dl_bandwidth(&def_dl_bandwidth, 7343 global_rt_period(), global_rt_runtime()); 7344 7345 #ifdef CONFIG_SMP 7346 init_defrootdomain(); 7347 #endif 7348 7349 #ifdef CONFIG_RT_GROUP_SCHED 7350 init_rt_bandwidth(&root_task_group.rt_bandwidth, 7351 global_rt_period(), global_rt_runtime()); 7352 #endif /* CONFIG_RT_GROUP_SCHED */ 7353 7354 #ifdef CONFIG_CGROUP_SCHED 7355 list_add(&root_task_group.list, &task_groups); 7356 INIT_LIST_HEAD(&root_task_group.children); 7357 INIT_LIST_HEAD(&root_task_group.siblings); 7358 autogroup_init(&init_task); 7359 7360 #endif /* CONFIG_CGROUP_SCHED */ 7361 7362 for_each_possible_cpu(i) { 7363 struct rq *rq; 7364 7365 rq = cpu_rq(i); 7366 raw_spin_lock_init(&rq->lock); 7367 rq->nr_running = 0; 7368 rq->calc_load_active = 0; 7369 rq->calc_load_update = jiffies + LOAD_FREQ; 7370 init_cfs_rq(&rq->cfs); 7371 init_rt_rq(&rq->rt); 7372 init_dl_rq(&rq->dl); 7373 #ifdef CONFIG_FAIR_GROUP_SCHED 7374 root_task_group.shares = ROOT_TASK_GROUP_LOAD; 7375 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list); 7376 /* 7377 * How much cpu bandwidth does root_task_group get? 7378 * 7379 * In case of task-groups formed thr' the cgroup filesystem, it 7380 * gets 100% of the cpu resources in the system. This overall 7381 * system cpu resource is divided among the tasks of 7382 * root_task_group and its child task-groups in a fair manner, 7383 * based on each entity's (task or task-group's) weight 7384 * (se->load.weight). 7385 * 7386 * In other words, if root_task_group has 10 tasks of weight 7387 * 1024) and two child groups A0 and A1 (of weight 1024 each), 7388 * then A0's share of the cpu resource is: 7389 * 7390 * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33% 7391 * 7392 * We achieve this by letting root_task_group's tasks sit 7393 * directly in rq->cfs (i.e root_task_group->se[] = NULL). 7394 */ 7395 init_cfs_bandwidth(&root_task_group.cfs_bandwidth); 7396 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL); 7397 #endif /* CONFIG_FAIR_GROUP_SCHED */ 7398 7399 rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime; 7400 #ifdef CONFIG_RT_GROUP_SCHED 7401 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL); 7402 #endif 7403 7404 for (j = 0; j < CPU_LOAD_IDX_MAX; j++) 7405 rq->cpu_load[j] = 0; 7406 7407 rq->last_load_update_tick = jiffies; 7408 7409 #ifdef CONFIG_SMP 7410 rq->sd = NULL; 7411 rq->rd = NULL; 7412 rq->cpu_capacity = rq->cpu_capacity_orig = SCHED_CAPACITY_SCALE; 7413 rq->balance_callback = NULL; 7414 rq->active_balance = 0; 7415 rq->next_balance = jiffies; 7416 rq->push_cpu = 0; 7417 rq->cpu = i; 7418 rq->online = 0; 7419 rq->idle_stamp = 0; 7420 rq->avg_idle = 2*sysctl_sched_migration_cost; 7421 rq->max_idle_balance_cost = sysctl_sched_migration_cost; 7422 7423 INIT_LIST_HEAD(&rq->cfs_tasks); 7424 7425 rq_attach_root(rq, &def_root_domain); 7426 #ifdef CONFIG_NO_HZ_COMMON 7427 rq->nohz_flags = 0; 7428 #endif 7429 #ifdef CONFIG_NO_HZ_FULL 7430 rq->last_sched_tick = 0; 7431 #endif 7432 #endif 7433 init_rq_hrtick(rq); 7434 atomic_set(&rq->nr_iowait, 0); 7435 } 7436 7437 set_load_weight(&init_task); 7438 7439 #ifdef CONFIG_PREEMPT_NOTIFIERS 7440 INIT_HLIST_HEAD(&init_task.preempt_notifiers); 7441 #endif 7442 7443 /* 7444 * The boot idle thread does lazy MMU switching as well: 7445 */ 7446 atomic_inc(&init_mm.mm_count); 7447 enter_lazy_tlb(&init_mm, current); 7448 7449 /* 7450 * During early bootup we pretend to be a normal task: 7451 */ 7452 current->sched_class = &fair_sched_class; 7453 7454 /* 7455 * Make us the idle thread. Technically, schedule() should not be 7456 * called from this thread, however somewhere below it might be, 7457 * but because we are the idle thread, we just pick up running again 7458 * when this runqueue becomes "idle". 7459 */ 7460 init_idle(current, smp_processor_id()); 7461 7462 calc_load_update = jiffies + LOAD_FREQ; 7463 7464 #ifdef CONFIG_SMP 7465 zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT); 7466 /* May be allocated at isolcpus cmdline parse time */ 7467 if (cpu_isolated_map == NULL) 7468 zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT); 7469 idle_thread_set_boot_cpu(); 7470 set_cpu_rq_start_time(); 7471 #endif 7472 init_sched_fair_class(); 7473 7474 scheduler_running = 1; 7475 } 7476 7477 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP 7478 static inline int preempt_count_equals(int preempt_offset) 7479 { 7480 int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth(); 7481 7482 return (nested == preempt_offset); 7483 } 7484 7485 void __might_sleep(const char *file, int line, int preempt_offset) 7486 { 7487 /* 7488 * Blocking primitives will set (and therefore destroy) current->state, 7489 * since we will exit with TASK_RUNNING make sure we enter with it, 7490 * otherwise we will destroy state. 7491 */ 7492 WARN_ONCE(current->state != TASK_RUNNING && current->task_state_change, 7493 "do not call blocking ops when !TASK_RUNNING; " 7494 "state=%lx set at [<%p>] %pS\n", 7495 current->state, 7496 (void *)current->task_state_change, 7497 (void *)current->task_state_change); 7498 7499 ___might_sleep(file, line, preempt_offset); 7500 } 7501 EXPORT_SYMBOL(__might_sleep); 7502 7503 void ___might_sleep(const char *file, int line, int preempt_offset) 7504 { 7505 static unsigned long prev_jiffy; /* ratelimiting */ 7506 7507 rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */ 7508 if ((preempt_count_equals(preempt_offset) && !irqs_disabled() && 7509 !is_idle_task(current)) || 7510 system_state != SYSTEM_RUNNING || oops_in_progress) 7511 return; 7512 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) 7513 return; 7514 prev_jiffy = jiffies; 7515 7516 printk(KERN_ERR 7517 "BUG: sleeping function called from invalid context at %s:%d\n", 7518 file, line); 7519 printk(KERN_ERR 7520 "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n", 7521 in_atomic(), irqs_disabled(), 7522 current->pid, current->comm); 7523 7524 if (task_stack_end_corrupted(current)) 7525 printk(KERN_EMERG "Thread overran stack, or stack corrupted\n"); 7526 7527 debug_show_held_locks(current); 7528 if (irqs_disabled()) 7529 print_irqtrace_events(current); 7530 #ifdef CONFIG_DEBUG_PREEMPT 7531 if (!preempt_count_equals(preempt_offset)) { 7532 pr_err("Preemption disabled at:"); 7533 print_ip_sym(current->preempt_disable_ip); 7534 pr_cont("\n"); 7535 } 7536 #endif 7537 dump_stack(); 7538 } 7539 EXPORT_SYMBOL(___might_sleep); 7540 #endif 7541 7542 #ifdef CONFIG_MAGIC_SYSRQ 7543 void normalize_rt_tasks(void) 7544 { 7545 struct task_struct *g, *p; 7546 struct sched_attr attr = { 7547 .sched_policy = SCHED_NORMAL, 7548 }; 7549 7550 read_lock(&tasklist_lock); 7551 for_each_process_thread(g, p) { 7552 /* 7553 * Only normalize user tasks: 7554 */ 7555 if (p->flags & PF_KTHREAD) 7556 continue; 7557 7558 p->se.exec_start = 0; 7559 #ifdef CONFIG_SCHEDSTATS 7560 p->se.statistics.wait_start = 0; 7561 p->se.statistics.sleep_start = 0; 7562 p->se.statistics.block_start = 0; 7563 #endif 7564 7565 if (!dl_task(p) && !rt_task(p)) { 7566 /* 7567 * Renice negative nice level userspace 7568 * tasks back to 0: 7569 */ 7570 if (task_nice(p) < 0) 7571 set_user_nice(p, 0); 7572 continue; 7573 } 7574 7575 __sched_setscheduler(p, &attr, false, false); 7576 } 7577 read_unlock(&tasklist_lock); 7578 } 7579 7580 #endif /* CONFIG_MAGIC_SYSRQ */ 7581 7582 #if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) 7583 /* 7584 * These functions are only useful for the IA64 MCA handling, or kdb. 7585 * 7586 * They can only be called when the whole system has been 7587 * stopped - every CPU needs to be quiescent, and no scheduling 7588 * activity can take place. Using them for anything else would 7589 * be a serious bug, and as a result, they aren't even visible 7590 * under any other configuration. 7591 */ 7592 7593 /** 7594 * curr_task - return the current task for a given cpu. 7595 * @cpu: the processor in question. 7596 * 7597 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! 7598 * 7599 * Return: The current task for @cpu. 7600 */ 7601 struct task_struct *curr_task(int cpu) 7602 { 7603 return cpu_curr(cpu); 7604 } 7605 7606 #endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */ 7607 7608 #ifdef CONFIG_IA64 7609 /** 7610 * set_curr_task - set the current task for a given cpu. 7611 * @cpu: the processor in question. 7612 * @p: the task pointer to set. 7613 * 7614 * Description: This function must only be used when non-maskable interrupts 7615 * are serviced on a separate stack. It allows the architecture to switch the 7616 * notion of the current task on a cpu in a non-blocking manner. This function 7617 * must be called with all CPU's synchronized, and interrupts disabled, the 7618 * and caller must save the original value of the current task (see 7619 * curr_task() above) and restore that value before reenabling interrupts and 7620 * re-starting the system. 7621 * 7622 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! 7623 */ 7624 void set_curr_task(int cpu, struct task_struct *p) 7625 { 7626 cpu_curr(cpu) = p; 7627 } 7628 7629 #endif 7630 7631 #ifdef CONFIG_CGROUP_SCHED 7632 /* task_group_lock serializes the addition/removal of task groups */ 7633 static DEFINE_SPINLOCK(task_group_lock); 7634 7635 static void free_sched_group(struct task_group *tg) 7636 { 7637 free_fair_sched_group(tg); 7638 free_rt_sched_group(tg); 7639 autogroup_free(tg); 7640 kfree(tg); 7641 } 7642 7643 /* allocate runqueue etc for a new task group */ 7644 struct task_group *sched_create_group(struct task_group *parent) 7645 { 7646 struct task_group *tg; 7647 7648 tg = kzalloc(sizeof(*tg), GFP_KERNEL); 7649 if (!tg) 7650 return ERR_PTR(-ENOMEM); 7651 7652 if (!alloc_fair_sched_group(tg, parent)) 7653 goto err; 7654 7655 if (!alloc_rt_sched_group(tg, parent)) 7656 goto err; 7657 7658 return tg; 7659 7660 err: 7661 free_sched_group(tg); 7662 return ERR_PTR(-ENOMEM); 7663 } 7664 7665 void sched_online_group(struct task_group *tg, struct task_group *parent) 7666 { 7667 unsigned long flags; 7668 7669 spin_lock_irqsave(&task_group_lock, flags); 7670 list_add_rcu(&tg->list, &task_groups); 7671 7672 WARN_ON(!parent); /* root should already exist */ 7673 7674 tg->parent = parent; 7675 INIT_LIST_HEAD(&tg->children); 7676 list_add_rcu(&tg->siblings, &parent->children); 7677 spin_unlock_irqrestore(&task_group_lock, flags); 7678 } 7679 7680 /* rcu callback to free various structures associated with a task group */ 7681 static void free_sched_group_rcu(struct rcu_head *rhp) 7682 { 7683 /* now it should be safe to free those cfs_rqs */ 7684 free_sched_group(container_of(rhp, struct task_group, rcu)); 7685 } 7686 7687 /* Destroy runqueue etc associated with a task group */ 7688 void sched_destroy_group(struct task_group *tg) 7689 { 7690 /* wait for possible concurrent references to cfs_rqs complete */ 7691 call_rcu(&tg->rcu, free_sched_group_rcu); 7692 } 7693 7694 void sched_offline_group(struct task_group *tg) 7695 { 7696 unsigned long flags; 7697 int i; 7698 7699 /* end participation in shares distribution */ 7700 for_each_possible_cpu(i) 7701 unregister_fair_sched_group(tg, i); 7702 7703 spin_lock_irqsave(&task_group_lock, flags); 7704 list_del_rcu(&tg->list); 7705 list_del_rcu(&tg->siblings); 7706 spin_unlock_irqrestore(&task_group_lock, flags); 7707 } 7708 7709 /* change task's runqueue when it moves between groups. 7710 * The caller of this function should have put the task in its new group 7711 * by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to 7712 * reflect its new group. 7713 */ 7714 void sched_move_task(struct task_struct *tsk) 7715 { 7716 struct task_group *tg; 7717 int queued, running; 7718 unsigned long flags; 7719 struct rq *rq; 7720 7721 rq = task_rq_lock(tsk, &flags); 7722 7723 running = task_current(rq, tsk); 7724 queued = task_on_rq_queued(tsk); 7725 7726 if (queued) 7727 dequeue_task(rq, tsk, 0); 7728 if (unlikely(running)) 7729 put_prev_task(rq, tsk); 7730 7731 /* 7732 * All callers are synchronized by task_rq_lock(); we do not use RCU 7733 * which is pointless here. Thus, we pass "true" to task_css_check() 7734 * to prevent lockdep warnings. 7735 */ 7736 tg = container_of(task_css_check(tsk, cpu_cgrp_id, true), 7737 struct task_group, css); 7738 tg = autogroup_task_group(tsk, tg); 7739 tsk->sched_task_group = tg; 7740 7741 #ifdef CONFIG_FAIR_GROUP_SCHED 7742 if (tsk->sched_class->task_move_group) 7743 tsk->sched_class->task_move_group(tsk, queued); 7744 else 7745 #endif 7746 set_task_rq(tsk, task_cpu(tsk)); 7747 7748 if (unlikely(running)) 7749 tsk->sched_class->set_curr_task(rq); 7750 if (queued) 7751 enqueue_task(rq, tsk, 0); 7752 7753 task_rq_unlock(rq, tsk, &flags); 7754 } 7755 #endif /* CONFIG_CGROUP_SCHED */ 7756 7757 #ifdef CONFIG_RT_GROUP_SCHED 7758 /* 7759 * Ensure that the real time constraints are schedulable. 7760 */ 7761 static DEFINE_MUTEX(rt_constraints_mutex); 7762 7763 /* Must be called with tasklist_lock held */ 7764 static inline int tg_has_rt_tasks(struct task_group *tg) 7765 { 7766 struct task_struct *g, *p; 7767 7768 /* 7769 * Autogroups do not have RT tasks; see autogroup_create(). 7770 */ 7771 if (task_group_is_autogroup(tg)) 7772 return 0; 7773 7774 for_each_process_thread(g, p) { 7775 if (rt_task(p) && task_group(p) == tg) 7776 return 1; 7777 } 7778 7779 return 0; 7780 } 7781 7782 struct rt_schedulable_data { 7783 struct task_group *tg; 7784 u64 rt_period; 7785 u64 rt_runtime; 7786 }; 7787 7788 static int tg_rt_schedulable(struct task_group *tg, void *data) 7789 { 7790 struct rt_schedulable_data *d = data; 7791 struct task_group *child; 7792 unsigned long total, sum = 0; 7793 u64 period, runtime; 7794 7795 period = ktime_to_ns(tg->rt_bandwidth.rt_period); 7796 runtime = tg->rt_bandwidth.rt_runtime; 7797 7798 if (tg == d->tg) { 7799 period = d->rt_period; 7800 runtime = d->rt_runtime; 7801 } 7802 7803 /* 7804 * Cannot have more runtime than the period. 7805 */ 7806 if (runtime > period && runtime != RUNTIME_INF) 7807 return -EINVAL; 7808 7809 /* 7810 * Ensure we don't starve existing RT tasks. 7811 */ 7812 if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg)) 7813 return -EBUSY; 7814 7815 total = to_ratio(period, runtime); 7816 7817 /* 7818 * Nobody can have more than the global setting allows. 7819 */ 7820 if (total > to_ratio(global_rt_period(), global_rt_runtime())) 7821 return -EINVAL; 7822 7823 /* 7824 * The sum of our children's runtime should not exceed our own. 7825 */ 7826 list_for_each_entry_rcu(child, &tg->children, siblings) { 7827 period = ktime_to_ns(child->rt_bandwidth.rt_period); 7828 runtime = child->rt_bandwidth.rt_runtime; 7829 7830 if (child == d->tg) { 7831 period = d->rt_period; 7832 runtime = d->rt_runtime; 7833 } 7834 7835 sum += to_ratio(period, runtime); 7836 } 7837 7838 if (sum > total) 7839 return -EINVAL; 7840 7841 return 0; 7842 } 7843 7844 static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) 7845 { 7846 int ret; 7847 7848 struct rt_schedulable_data data = { 7849 .tg = tg, 7850 .rt_period = period, 7851 .rt_runtime = runtime, 7852 }; 7853 7854 rcu_read_lock(); 7855 ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data); 7856 rcu_read_unlock(); 7857 7858 return ret; 7859 } 7860 7861 static int tg_set_rt_bandwidth(struct task_group *tg, 7862 u64 rt_period, u64 rt_runtime) 7863 { 7864 int i, err = 0; 7865 7866 /* 7867 * Disallowing the root group RT runtime is BAD, it would disallow the 7868 * kernel creating (and or operating) RT threads. 7869 */ 7870 if (tg == &root_task_group && rt_runtime == 0) 7871 return -EINVAL; 7872 7873 /* No period doesn't make any sense. */ 7874 if (rt_period == 0) 7875 return -EINVAL; 7876 7877 mutex_lock(&rt_constraints_mutex); 7878 read_lock(&tasklist_lock); 7879 err = __rt_schedulable(tg, rt_period, rt_runtime); 7880 if (err) 7881 goto unlock; 7882 7883 raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock); 7884 tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period); 7885 tg->rt_bandwidth.rt_runtime = rt_runtime; 7886 7887 for_each_possible_cpu(i) { 7888 struct rt_rq *rt_rq = tg->rt_rq[i]; 7889 7890 raw_spin_lock(&rt_rq->rt_runtime_lock); 7891 rt_rq->rt_runtime = rt_runtime; 7892 raw_spin_unlock(&rt_rq->rt_runtime_lock); 7893 } 7894 raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock); 7895 unlock: 7896 read_unlock(&tasklist_lock); 7897 mutex_unlock(&rt_constraints_mutex); 7898 7899 return err; 7900 } 7901 7902 static int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us) 7903 { 7904 u64 rt_runtime, rt_period; 7905 7906 rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period); 7907 rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC; 7908 if (rt_runtime_us < 0) 7909 rt_runtime = RUNTIME_INF; 7910 7911 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime); 7912 } 7913 7914 static long sched_group_rt_runtime(struct task_group *tg) 7915 { 7916 u64 rt_runtime_us; 7917 7918 if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF) 7919 return -1; 7920 7921 rt_runtime_us = tg->rt_bandwidth.rt_runtime; 7922 do_div(rt_runtime_us, NSEC_PER_USEC); 7923 return rt_runtime_us; 7924 } 7925 7926 static int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us) 7927 { 7928 u64 rt_runtime, rt_period; 7929 7930 rt_period = rt_period_us * NSEC_PER_USEC; 7931 rt_runtime = tg->rt_bandwidth.rt_runtime; 7932 7933 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime); 7934 } 7935 7936 static long sched_group_rt_period(struct task_group *tg) 7937 { 7938 u64 rt_period_us; 7939 7940 rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period); 7941 do_div(rt_period_us, NSEC_PER_USEC); 7942 return rt_period_us; 7943 } 7944 #endif /* CONFIG_RT_GROUP_SCHED */ 7945 7946 #ifdef CONFIG_RT_GROUP_SCHED 7947 static int sched_rt_global_constraints(void) 7948 { 7949 int ret = 0; 7950 7951 mutex_lock(&rt_constraints_mutex); 7952 read_lock(&tasklist_lock); 7953 ret = __rt_schedulable(NULL, 0, 0); 7954 read_unlock(&tasklist_lock); 7955 mutex_unlock(&rt_constraints_mutex); 7956 7957 return ret; 7958 } 7959 7960 static int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk) 7961 { 7962 /* Don't accept realtime tasks when there is no way for them to run */ 7963 if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0) 7964 return 0; 7965 7966 return 1; 7967 } 7968 7969 #else /* !CONFIG_RT_GROUP_SCHED */ 7970 static int sched_rt_global_constraints(void) 7971 { 7972 unsigned long flags; 7973 int i, ret = 0; 7974 7975 raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags); 7976 for_each_possible_cpu(i) { 7977 struct rt_rq *rt_rq = &cpu_rq(i)->rt; 7978 7979 raw_spin_lock(&rt_rq->rt_runtime_lock); 7980 rt_rq->rt_runtime = global_rt_runtime(); 7981 raw_spin_unlock(&rt_rq->rt_runtime_lock); 7982 } 7983 raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags); 7984 7985 return ret; 7986 } 7987 #endif /* CONFIG_RT_GROUP_SCHED */ 7988 7989 static int sched_dl_global_validate(void) 7990 { 7991 u64 runtime = global_rt_runtime(); 7992 u64 period = global_rt_period(); 7993 u64 new_bw = to_ratio(period, runtime); 7994 struct dl_bw *dl_b; 7995 int cpu, ret = 0; 7996 unsigned long flags; 7997 7998 /* 7999 * Here we want to check the bandwidth not being set to some 8000 * value smaller than the currently allocated bandwidth in 8001 * any of the root_domains. 8002 * 8003 * FIXME: Cycling on all the CPUs is overdoing, but simpler than 8004 * cycling on root_domains... Discussion on different/better 8005 * solutions is welcome! 8006 */ 8007 for_each_possible_cpu(cpu) { 8008 rcu_read_lock_sched(); 8009 dl_b = dl_bw_of(cpu); 8010 8011 raw_spin_lock_irqsave(&dl_b->lock, flags); 8012 if (new_bw < dl_b->total_bw) 8013 ret = -EBUSY; 8014 raw_spin_unlock_irqrestore(&dl_b->lock, flags); 8015 8016 rcu_read_unlock_sched(); 8017 8018 if (ret) 8019 break; 8020 } 8021 8022 return ret; 8023 } 8024 8025 static void sched_dl_do_global(void) 8026 { 8027 u64 new_bw = -1; 8028 struct dl_bw *dl_b; 8029 int cpu; 8030 unsigned long flags; 8031 8032 def_dl_bandwidth.dl_period = global_rt_period(); 8033 def_dl_bandwidth.dl_runtime = global_rt_runtime(); 8034 8035 if (global_rt_runtime() != RUNTIME_INF) 8036 new_bw = to_ratio(global_rt_period(), global_rt_runtime()); 8037 8038 /* 8039 * FIXME: As above... 8040 */ 8041 for_each_possible_cpu(cpu) { 8042 rcu_read_lock_sched(); 8043 dl_b = dl_bw_of(cpu); 8044 8045 raw_spin_lock_irqsave(&dl_b->lock, flags); 8046 dl_b->bw = new_bw; 8047 raw_spin_unlock_irqrestore(&dl_b->lock, flags); 8048 8049 rcu_read_unlock_sched(); 8050 } 8051 } 8052 8053 static int sched_rt_global_validate(void) 8054 { 8055 if (sysctl_sched_rt_period <= 0) 8056 return -EINVAL; 8057 8058 if ((sysctl_sched_rt_runtime != RUNTIME_INF) && 8059 (sysctl_sched_rt_runtime > sysctl_sched_rt_period)) 8060 return -EINVAL; 8061 8062 return 0; 8063 } 8064 8065 static void sched_rt_do_global(void) 8066 { 8067 def_rt_bandwidth.rt_runtime = global_rt_runtime(); 8068 def_rt_bandwidth.rt_period = ns_to_ktime(global_rt_period()); 8069 } 8070 8071 int sched_rt_handler(struct ctl_table *table, int write, 8072 void __user *buffer, size_t *lenp, 8073 loff_t *ppos) 8074 { 8075 int old_period, old_runtime; 8076 static DEFINE_MUTEX(mutex); 8077 int ret; 8078 8079 mutex_lock(&mutex); 8080 old_period = sysctl_sched_rt_period; 8081 old_runtime = sysctl_sched_rt_runtime; 8082 8083 ret = proc_dointvec(table, write, buffer, lenp, ppos); 8084 8085 if (!ret && write) { 8086 ret = sched_rt_global_validate(); 8087 if (ret) 8088 goto undo; 8089 8090 ret = sched_dl_global_validate(); 8091 if (ret) 8092 goto undo; 8093 8094 ret = sched_rt_global_constraints(); 8095 if (ret) 8096 goto undo; 8097 8098 sched_rt_do_global(); 8099 sched_dl_do_global(); 8100 } 8101 if (0) { 8102 undo: 8103 sysctl_sched_rt_period = old_period; 8104 sysctl_sched_rt_runtime = old_runtime; 8105 } 8106 mutex_unlock(&mutex); 8107 8108 return ret; 8109 } 8110 8111 int sched_rr_handler(struct ctl_table *table, int write, 8112 void __user *buffer, size_t *lenp, 8113 loff_t *ppos) 8114 { 8115 int ret; 8116 static DEFINE_MUTEX(mutex); 8117 8118 mutex_lock(&mutex); 8119 ret = proc_dointvec(table, write, buffer, lenp, ppos); 8120 /* make sure that internally we keep jiffies */ 8121 /* also, writing zero resets timeslice to default */ 8122 if (!ret && write) { 8123 sched_rr_timeslice = sched_rr_timeslice <= 0 ? 8124 RR_TIMESLICE : msecs_to_jiffies(sched_rr_timeslice); 8125 } 8126 mutex_unlock(&mutex); 8127 return ret; 8128 } 8129 8130 #ifdef CONFIG_CGROUP_SCHED 8131 8132 static inline struct task_group *css_tg(struct cgroup_subsys_state *css) 8133 { 8134 return css ? container_of(css, struct task_group, css) : NULL; 8135 } 8136 8137 static struct cgroup_subsys_state * 8138 cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) 8139 { 8140 struct task_group *parent = css_tg(parent_css); 8141 struct task_group *tg; 8142 8143 if (!parent) { 8144 /* This is early initialization for the top cgroup */ 8145 return &root_task_group.css; 8146 } 8147 8148 tg = sched_create_group(parent); 8149 if (IS_ERR(tg)) 8150 return ERR_PTR(-ENOMEM); 8151 8152 return &tg->css; 8153 } 8154 8155 static int cpu_cgroup_css_online(struct cgroup_subsys_state *css) 8156 { 8157 struct task_group *tg = css_tg(css); 8158 struct task_group *parent = css_tg(css->parent); 8159 8160 if (parent) 8161 sched_online_group(tg, parent); 8162 return 0; 8163 } 8164 8165 static void cpu_cgroup_css_free(struct cgroup_subsys_state *css) 8166 { 8167 struct task_group *tg = css_tg(css); 8168 8169 sched_destroy_group(tg); 8170 } 8171 8172 static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css) 8173 { 8174 struct task_group *tg = css_tg(css); 8175 8176 sched_offline_group(tg); 8177 } 8178 8179 static void cpu_cgroup_fork(struct task_struct *task, void *private) 8180 { 8181 sched_move_task(task); 8182 } 8183 8184 static int cpu_cgroup_can_attach(struct cgroup_subsys_state *css, 8185 struct cgroup_taskset *tset) 8186 { 8187 struct task_struct *task; 8188 8189 cgroup_taskset_for_each(task, tset) { 8190 #ifdef CONFIG_RT_GROUP_SCHED 8191 if (!sched_rt_can_attach(css_tg(css), task)) 8192 return -EINVAL; 8193 #else 8194 /* We don't support RT-tasks being in separate groups */ 8195 if (task->sched_class != &fair_sched_class) 8196 return -EINVAL; 8197 #endif 8198 } 8199 return 0; 8200 } 8201 8202 static void cpu_cgroup_attach(struct cgroup_subsys_state *css, 8203 struct cgroup_taskset *tset) 8204 { 8205 struct task_struct *task; 8206 8207 cgroup_taskset_for_each(task, tset) 8208 sched_move_task(task); 8209 } 8210 8211 static void cpu_cgroup_exit(struct cgroup_subsys_state *css, 8212 struct cgroup_subsys_state *old_css, 8213 struct task_struct *task) 8214 { 8215 /* 8216 * cgroup_exit() is called in the copy_process() failure path. 8217 * Ignore this case since the task hasn't ran yet, this avoids 8218 * trying to poke a half freed task state from generic code. 8219 */ 8220 if (!(task->flags & PF_EXITING)) 8221 return; 8222 8223 sched_move_task(task); 8224 } 8225 8226 #ifdef CONFIG_FAIR_GROUP_SCHED 8227 static int cpu_shares_write_u64(struct cgroup_subsys_state *css, 8228 struct cftype *cftype, u64 shareval) 8229 { 8230 return sched_group_set_shares(css_tg(css), scale_load(shareval)); 8231 } 8232 8233 static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css, 8234 struct cftype *cft) 8235 { 8236 struct task_group *tg = css_tg(css); 8237 8238 return (u64) scale_load_down(tg->shares); 8239 } 8240 8241 #ifdef CONFIG_CFS_BANDWIDTH 8242 static DEFINE_MUTEX(cfs_constraints_mutex); 8243 8244 const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */ 8245 const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */ 8246 8247 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime); 8248 8249 static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota) 8250 { 8251 int i, ret = 0, runtime_enabled, runtime_was_enabled; 8252 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 8253 8254 if (tg == &root_task_group) 8255 return -EINVAL; 8256 8257 /* 8258 * Ensure we have at some amount of bandwidth every period. This is 8259 * to prevent reaching a state of large arrears when throttled via 8260 * entity_tick() resulting in prolonged exit starvation. 8261 */ 8262 if (quota < min_cfs_quota_period || period < min_cfs_quota_period) 8263 return -EINVAL; 8264 8265 /* 8266 * Likewise, bound things on the otherside by preventing insane quota 8267 * periods. This also allows us to normalize in computing quota 8268 * feasibility. 8269 */ 8270 if (period > max_cfs_quota_period) 8271 return -EINVAL; 8272 8273 /* 8274 * Prevent race between setting of cfs_rq->runtime_enabled and 8275 * unthrottle_offline_cfs_rqs(). 8276 */ 8277 get_online_cpus(); 8278 mutex_lock(&cfs_constraints_mutex); 8279 ret = __cfs_schedulable(tg, period, quota); 8280 if (ret) 8281 goto out_unlock; 8282 8283 runtime_enabled = quota != RUNTIME_INF; 8284 runtime_was_enabled = cfs_b->quota != RUNTIME_INF; 8285 /* 8286 * If we need to toggle cfs_bandwidth_used, off->on must occur 8287 * before making related changes, and on->off must occur afterwards 8288 */ 8289 if (runtime_enabled && !runtime_was_enabled) 8290 cfs_bandwidth_usage_inc(); 8291 raw_spin_lock_irq(&cfs_b->lock); 8292 cfs_b->period = ns_to_ktime(period); 8293 cfs_b->quota = quota; 8294 8295 __refill_cfs_bandwidth_runtime(cfs_b); 8296 /* restart the period timer (if active) to handle new period expiry */ 8297 if (runtime_enabled) 8298 start_cfs_bandwidth(cfs_b); 8299 raw_spin_unlock_irq(&cfs_b->lock); 8300 8301 for_each_online_cpu(i) { 8302 struct cfs_rq *cfs_rq = tg->cfs_rq[i]; 8303 struct rq *rq = cfs_rq->rq; 8304 8305 raw_spin_lock_irq(&rq->lock); 8306 cfs_rq->runtime_enabled = runtime_enabled; 8307 cfs_rq->runtime_remaining = 0; 8308 8309 if (cfs_rq->throttled) 8310 unthrottle_cfs_rq(cfs_rq); 8311 raw_spin_unlock_irq(&rq->lock); 8312 } 8313 if (runtime_was_enabled && !runtime_enabled) 8314 cfs_bandwidth_usage_dec(); 8315 out_unlock: 8316 mutex_unlock(&cfs_constraints_mutex); 8317 put_online_cpus(); 8318 8319 return ret; 8320 } 8321 8322 int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us) 8323 { 8324 u64 quota, period; 8325 8326 period = ktime_to_ns(tg->cfs_bandwidth.period); 8327 if (cfs_quota_us < 0) 8328 quota = RUNTIME_INF; 8329 else 8330 quota = (u64)cfs_quota_us * NSEC_PER_USEC; 8331 8332 return tg_set_cfs_bandwidth(tg, period, quota); 8333 } 8334 8335 long tg_get_cfs_quota(struct task_group *tg) 8336 { 8337 u64 quota_us; 8338 8339 if (tg->cfs_bandwidth.quota == RUNTIME_INF) 8340 return -1; 8341 8342 quota_us = tg->cfs_bandwidth.quota; 8343 do_div(quota_us, NSEC_PER_USEC); 8344 8345 return quota_us; 8346 } 8347 8348 int tg_set_cfs_period(struct task_group *tg, long cfs_period_us) 8349 { 8350 u64 quota, period; 8351 8352 period = (u64)cfs_period_us * NSEC_PER_USEC; 8353 quota = tg->cfs_bandwidth.quota; 8354 8355 return tg_set_cfs_bandwidth(tg, period, quota); 8356 } 8357 8358 long tg_get_cfs_period(struct task_group *tg) 8359 { 8360 u64 cfs_period_us; 8361 8362 cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period); 8363 do_div(cfs_period_us, NSEC_PER_USEC); 8364 8365 return cfs_period_us; 8366 } 8367 8368 static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css, 8369 struct cftype *cft) 8370 { 8371 return tg_get_cfs_quota(css_tg(css)); 8372 } 8373 8374 static int cpu_cfs_quota_write_s64(struct cgroup_subsys_state *css, 8375 struct cftype *cftype, s64 cfs_quota_us) 8376 { 8377 return tg_set_cfs_quota(css_tg(css), cfs_quota_us); 8378 } 8379 8380 static u64 cpu_cfs_period_read_u64(struct cgroup_subsys_state *css, 8381 struct cftype *cft) 8382 { 8383 return tg_get_cfs_period(css_tg(css)); 8384 } 8385 8386 static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css, 8387 struct cftype *cftype, u64 cfs_period_us) 8388 { 8389 return tg_set_cfs_period(css_tg(css), cfs_period_us); 8390 } 8391 8392 struct cfs_schedulable_data { 8393 struct task_group *tg; 8394 u64 period, quota; 8395 }; 8396 8397 /* 8398 * normalize group quota/period to be quota/max_period 8399 * note: units are usecs 8400 */ 8401 static u64 normalize_cfs_quota(struct task_group *tg, 8402 struct cfs_schedulable_data *d) 8403 { 8404 u64 quota, period; 8405 8406 if (tg == d->tg) { 8407 period = d->period; 8408 quota = d->quota; 8409 } else { 8410 period = tg_get_cfs_period(tg); 8411 quota = tg_get_cfs_quota(tg); 8412 } 8413 8414 /* note: these should typically be equivalent */ 8415 if (quota == RUNTIME_INF || quota == -1) 8416 return RUNTIME_INF; 8417 8418 return to_ratio(period, quota); 8419 } 8420 8421 static int tg_cfs_schedulable_down(struct task_group *tg, void *data) 8422 { 8423 struct cfs_schedulable_data *d = data; 8424 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 8425 s64 quota = 0, parent_quota = -1; 8426 8427 if (!tg->parent) { 8428 quota = RUNTIME_INF; 8429 } else { 8430 struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth; 8431 8432 quota = normalize_cfs_quota(tg, d); 8433 parent_quota = parent_b->hierarchical_quota; 8434 8435 /* 8436 * ensure max(child_quota) <= parent_quota, inherit when no 8437 * limit is set 8438 */ 8439 if (quota == RUNTIME_INF) 8440 quota = parent_quota; 8441 else if (parent_quota != RUNTIME_INF && quota > parent_quota) 8442 return -EINVAL; 8443 } 8444 cfs_b->hierarchical_quota = quota; 8445 8446 return 0; 8447 } 8448 8449 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota) 8450 { 8451 int ret; 8452 struct cfs_schedulable_data data = { 8453 .tg = tg, 8454 .period = period, 8455 .quota = quota, 8456 }; 8457 8458 if (quota != RUNTIME_INF) { 8459 do_div(data.period, NSEC_PER_USEC); 8460 do_div(data.quota, NSEC_PER_USEC); 8461 } 8462 8463 rcu_read_lock(); 8464 ret = walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data); 8465 rcu_read_unlock(); 8466 8467 return ret; 8468 } 8469 8470 static int cpu_stats_show(struct seq_file *sf, void *v) 8471 { 8472 struct task_group *tg = css_tg(seq_css(sf)); 8473 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 8474 8475 seq_printf(sf, "nr_periods %d\n", cfs_b->nr_periods); 8476 seq_printf(sf, "nr_throttled %d\n", cfs_b->nr_throttled); 8477 seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time); 8478 8479 return 0; 8480 } 8481 #endif /* CONFIG_CFS_BANDWIDTH */ 8482 #endif /* CONFIG_FAIR_GROUP_SCHED */ 8483 8484 #ifdef CONFIG_RT_GROUP_SCHED 8485 static int cpu_rt_runtime_write(struct cgroup_subsys_state *css, 8486 struct cftype *cft, s64 val) 8487 { 8488 return sched_group_set_rt_runtime(css_tg(css), val); 8489 } 8490 8491 static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css, 8492 struct cftype *cft) 8493 { 8494 return sched_group_rt_runtime(css_tg(css)); 8495 } 8496 8497 static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css, 8498 struct cftype *cftype, u64 rt_period_us) 8499 { 8500 return sched_group_set_rt_period(css_tg(css), rt_period_us); 8501 } 8502 8503 static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css, 8504 struct cftype *cft) 8505 { 8506 return sched_group_rt_period(css_tg(css)); 8507 } 8508 #endif /* CONFIG_RT_GROUP_SCHED */ 8509 8510 static struct cftype cpu_files[] = { 8511 #ifdef CONFIG_FAIR_GROUP_SCHED 8512 { 8513 .name = "shares", 8514 .read_u64 = cpu_shares_read_u64, 8515 .write_u64 = cpu_shares_write_u64, 8516 }, 8517 #endif 8518 #ifdef CONFIG_CFS_BANDWIDTH 8519 { 8520 .name = "cfs_quota_us", 8521 .read_s64 = cpu_cfs_quota_read_s64, 8522 .write_s64 = cpu_cfs_quota_write_s64, 8523 }, 8524 { 8525 .name = "cfs_period_us", 8526 .read_u64 = cpu_cfs_period_read_u64, 8527 .write_u64 = cpu_cfs_period_write_u64, 8528 }, 8529 { 8530 .name = "stat", 8531 .seq_show = cpu_stats_show, 8532 }, 8533 #endif 8534 #ifdef CONFIG_RT_GROUP_SCHED 8535 { 8536 .name = "rt_runtime_us", 8537 .read_s64 = cpu_rt_runtime_read, 8538 .write_s64 = cpu_rt_runtime_write, 8539 }, 8540 { 8541 .name = "rt_period_us", 8542 .read_u64 = cpu_rt_period_read_uint, 8543 .write_u64 = cpu_rt_period_write_uint, 8544 }, 8545 #endif 8546 { } /* terminate */ 8547 }; 8548 8549 struct cgroup_subsys cpu_cgrp_subsys = { 8550 .css_alloc = cpu_cgroup_css_alloc, 8551 .css_free = cpu_cgroup_css_free, 8552 .css_online = cpu_cgroup_css_online, 8553 .css_offline = cpu_cgroup_css_offline, 8554 .fork = cpu_cgroup_fork, 8555 .can_attach = cpu_cgroup_can_attach, 8556 .attach = cpu_cgroup_attach, 8557 .exit = cpu_cgroup_exit, 8558 .legacy_cftypes = cpu_files, 8559 .early_init = 1, 8560 }; 8561 8562 #endif /* CONFIG_CGROUP_SCHED */ 8563 8564 void dump_cpu_task(int cpu) 8565 { 8566 pr_info("Task dump for CPU %d:\n", cpu); 8567 sched_show_task(cpu_curr(cpu)); 8568 } 8569