1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */ 297fb7a0aSIngo Molnar /* 397fb7a0aSIngo Molnar * Scheduler internal types and methods: 497fb7a0aSIngo Molnar */ 5391e43daSPeter Zijlstra #include <linux/sched.h> 6325ea10cSIngo Molnar 7dfc3401aSIngo Molnar #include <linux/sched/autogroup.h> 8e6017571SIngo Molnar #include <linux/sched/clock.h> 9325ea10cSIngo Molnar #include <linux/sched/coredump.h> 1055687da1SIngo Molnar #include <linux/sched/cpufreq.h> 11325ea10cSIngo Molnar #include <linux/sched/cputime.h> 12325ea10cSIngo Molnar #include <linux/sched/deadline.h> 13b17b0153SIngo Molnar #include <linux/sched/debug.h> 14ef8bd77fSIngo Molnar #include <linux/sched/hotplug.h> 15325ea10cSIngo Molnar #include <linux/sched/idle.h> 16325ea10cSIngo Molnar #include <linux/sched/init.h> 17325ea10cSIngo Molnar #include <linux/sched/isolation.h> 18325ea10cSIngo Molnar #include <linux/sched/jobctl.h> 19325ea10cSIngo Molnar #include <linux/sched/loadavg.h> 20325ea10cSIngo Molnar #include <linux/sched/mm.h> 21325ea10cSIngo Molnar #include <linux/sched/nohz.h> 22325ea10cSIngo Molnar #include <linux/sched/numa_balancing.h> 23325ea10cSIngo Molnar #include <linux/sched/prio.h> 24325ea10cSIngo Molnar #include <linux/sched/rt.h> 25325ea10cSIngo Molnar #include <linux/sched/signal.h> 26325ea10cSIngo Molnar #include <linux/sched/stat.h> 27325ea10cSIngo Molnar #include <linux/sched/sysctl.h> 2829930025SIngo Molnar #include <linux/sched/task.h> 2968db0cf1SIngo Molnar #include <linux/sched/task_stack.h> 30325ea10cSIngo Molnar #include <linux/sched/topology.h> 31325ea10cSIngo Molnar #include <linux/sched/user.h> 32325ea10cSIngo Molnar #include <linux/sched/wake_q.h> 33325ea10cSIngo Molnar #include <linux/sched/xacct.h> 34ef8bd77fSIngo Molnar 35325ea10cSIngo Molnar #include <uapi/linux/sched/types.h> 36325ea10cSIngo Molnar 373866e845SSteven Rostedt (Red Hat) #include <linux/binfmts.h> 38325ea10cSIngo Molnar #include <linux/blkdev.h> 39325ea10cSIngo Molnar #include <linux/compat.h> 40325ea10cSIngo Molnar #include <linux/context_tracking.h> 41325ea10cSIngo Molnar #include <linux/cpufreq.h> 42325ea10cSIngo Molnar #include <linux/cpuidle.h> 43325ea10cSIngo Molnar #include <linux/cpuset.h> 44325ea10cSIngo Molnar #include <linux/ctype.h> 45325ea10cSIngo Molnar #include <linux/debugfs.h> 46325ea10cSIngo Molnar #include <linux/delayacct.h> 47325ea10cSIngo Molnar #include <linux/init_task.h> 48325ea10cSIngo Molnar #include <linux/kprobes.h> 49325ea10cSIngo Molnar #include <linux/kthread.h> 50325ea10cSIngo Molnar #include <linux/membarrier.h> 51325ea10cSIngo Molnar #include <linux/migrate.h> 52325ea10cSIngo Molnar #include <linux/mmu_context.h> 53325ea10cSIngo Molnar #include <linux/nmi.h> 54325ea10cSIngo Molnar #include <linux/proc_fs.h> 55325ea10cSIngo Molnar #include <linux/prefetch.h> 56325ea10cSIngo Molnar #include <linux/profile.h> 57325ea10cSIngo Molnar #include <linux/rcupdate_wait.h> 58325ea10cSIngo Molnar #include <linux/security.h> 59325ea10cSIngo Molnar #include <linux/stackprotector.h> 60391e43daSPeter Zijlstra #include <linux/stop_machine.h> 61325ea10cSIngo Molnar #include <linux/suspend.h> 62325ea10cSIngo Molnar #include <linux/swait.h> 63325ea10cSIngo Molnar #include <linux/syscalls.h> 64325ea10cSIngo Molnar #include <linux/task_work.h> 65325ea10cSIngo Molnar #include <linux/tsacct_kern.h> 66325ea10cSIngo Molnar 67325ea10cSIngo Molnar #include <asm/tlb.h> 68391e43daSPeter Zijlstra 697fce777cSIngo Molnar #ifdef CONFIG_PARAVIRT 707fce777cSIngo Molnar # include <asm/paravirt.h> 717fce777cSIngo Molnar #endif 727fce777cSIngo Molnar 73391e43daSPeter Zijlstra #include "cpupri.h" 746bfd6d72SJuri Lelli #include "cpudeadline.h" 75391e43daSPeter Zijlstra 769148a3a1SPeter Zijlstra #ifdef CONFIG_SCHED_DEBUG 779148a3a1SPeter Zijlstra # define SCHED_WARN_ON(x) WARN_ONCE(x, #x) 789148a3a1SPeter Zijlstra #else 796d3aed3dSIngo Molnar # define SCHED_WARN_ON(x) ({ (void)(x), 0; }) 809148a3a1SPeter Zijlstra #endif 819148a3a1SPeter Zijlstra 8245ceebf7SPaul Gortmaker struct rq; 83442bf3aaSDaniel Lezcano struct cpuidle_state; 8445ceebf7SPaul Gortmaker 85da0c1e65SKirill Tkhai /* task_struct::on_rq states: */ 86da0c1e65SKirill Tkhai #define TASK_ON_RQ_QUEUED 1 87cca26e80SKirill Tkhai #define TASK_ON_RQ_MIGRATING 2 88da0c1e65SKirill Tkhai 89391e43daSPeter Zijlstra extern __read_mostly int scheduler_running; 90391e43daSPeter Zijlstra 9145ceebf7SPaul Gortmaker extern unsigned long calc_load_update; 9245ceebf7SPaul Gortmaker extern atomic_long_t calc_load_tasks; 9345ceebf7SPaul Gortmaker 943289bdb4SPeter Zijlstra extern void calc_global_load_tick(struct rq *this_rq); 95d60585c5SThomas Gleixner extern long calc_load_fold_active(struct rq *this_rq, long adjust); 963289bdb4SPeter Zijlstra 973289bdb4SPeter Zijlstra #ifdef CONFIG_SMP 98cee1afceSFrederic Weisbecker extern void cpu_load_update_active(struct rq *this_rq); 993289bdb4SPeter Zijlstra #else 100cee1afceSFrederic Weisbecker static inline void cpu_load_update_active(struct rq *this_rq) { } 1013289bdb4SPeter Zijlstra #endif 10245ceebf7SPaul Gortmaker 103391e43daSPeter Zijlstra /* 104391e43daSPeter Zijlstra * Helpers for converting nanosecond timing to jiffy resolution 105391e43daSPeter Zijlstra */ 106391e43daSPeter Zijlstra #define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ)) 107391e43daSPeter Zijlstra 108cc1f4b1fSLi Zefan /* 109cc1f4b1fSLi Zefan * Increase resolution of nice-level calculations for 64-bit architectures. 110cc1f4b1fSLi Zefan * The extra resolution improves shares distribution and load balancing of 111cc1f4b1fSLi Zefan * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup 112cc1f4b1fSLi Zefan * hierarchies, especially on larger systems. This is not a user-visible change 113cc1f4b1fSLi Zefan * and does not change the user-interface for setting shares/weights. 114cc1f4b1fSLi Zefan * 115cc1f4b1fSLi Zefan * We increase resolution only if we have enough bits to allow this increased 11697fb7a0aSIngo Molnar * resolution (i.e. 64-bit). The costs for increasing resolution when 32-bit 11797fb7a0aSIngo Molnar * are pretty high and the returns do not justify the increased costs. 1182159197dSPeter Zijlstra * 11997fb7a0aSIngo Molnar * Really only required when CONFIG_FAIR_GROUP_SCHED=y is also set, but to 12097fb7a0aSIngo Molnar * increase coverage and consistency always enable it on 64-bit platforms. 121cc1f4b1fSLi Zefan */ 1222159197dSPeter Zijlstra #ifdef CONFIG_64BIT 123172895e6SYuyang Du # define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT) 1246ecdd749SYuyang Du # define scale_load(w) ((w) << SCHED_FIXEDPOINT_SHIFT) 1256ecdd749SYuyang Du # define scale_load_down(w) ((w) >> SCHED_FIXEDPOINT_SHIFT) 126cc1f4b1fSLi Zefan #else 127172895e6SYuyang Du # define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT) 128cc1f4b1fSLi Zefan # define scale_load(w) (w) 129cc1f4b1fSLi Zefan # define scale_load_down(w) (w) 130cc1f4b1fSLi Zefan #endif 131cc1f4b1fSLi Zefan 1326ecdd749SYuyang Du /* 133172895e6SYuyang Du * Task weight (visible to users) and its load (invisible to users) have 134172895e6SYuyang Du * independent resolution, but they should be well calibrated. We use 135172895e6SYuyang Du * scale_load() and scale_load_down(w) to convert between them. The 136172895e6SYuyang Du * following must be true: 137172895e6SYuyang Du * 138172895e6SYuyang Du * scale_load(sched_prio_to_weight[USER_PRIO(NICE_TO_PRIO(0))]) == NICE_0_LOAD 139172895e6SYuyang Du * 1406ecdd749SYuyang Du */ 141172895e6SYuyang Du #define NICE_0_LOAD (1L << NICE_0_LOAD_SHIFT) 142391e43daSPeter Zijlstra 143391e43daSPeter Zijlstra /* 144332ac17eSDario Faggioli * Single value that decides SCHED_DEADLINE internal math precision. 145332ac17eSDario Faggioli * 10 -> just above 1us 146332ac17eSDario Faggioli * 9 -> just above 0.5us 147332ac17eSDario Faggioli */ 14897fb7a0aSIngo Molnar #define DL_SCALE 10 149332ac17eSDario Faggioli 150332ac17eSDario Faggioli /* 15197fb7a0aSIngo Molnar * Single value that denotes runtime == period, ie unlimited time. 152391e43daSPeter Zijlstra */ 153391e43daSPeter Zijlstra #define RUNTIME_INF ((u64)~0ULL) 154391e43daSPeter Zijlstra 15520f9cd2aSHenrik Austad static inline int idle_policy(int policy) 15620f9cd2aSHenrik Austad { 15720f9cd2aSHenrik Austad return policy == SCHED_IDLE; 15820f9cd2aSHenrik Austad } 159d50dde5aSDario Faggioli static inline int fair_policy(int policy) 160d50dde5aSDario Faggioli { 161d50dde5aSDario Faggioli return policy == SCHED_NORMAL || policy == SCHED_BATCH; 162d50dde5aSDario Faggioli } 163d50dde5aSDario Faggioli 164391e43daSPeter Zijlstra static inline int rt_policy(int policy) 165391e43daSPeter Zijlstra { 166d50dde5aSDario Faggioli return policy == SCHED_FIFO || policy == SCHED_RR; 167391e43daSPeter Zijlstra } 168391e43daSPeter Zijlstra 169aab03e05SDario Faggioli static inline int dl_policy(int policy) 170aab03e05SDario Faggioli { 171aab03e05SDario Faggioli return policy == SCHED_DEADLINE; 172aab03e05SDario Faggioli } 17320f9cd2aSHenrik Austad static inline bool valid_policy(int policy) 17420f9cd2aSHenrik Austad { 17520f9cd2aSHenrik Austad return idle_policy(policy) || fair_policy(policy) || 17620f9cd2aSHenrik Austad rt_policy(policy) || dl_policy(policy); 17720f9cd2aSHenrik Austad } 178aab03e05SDario Faggioli 179391e43daSPeter Zijlstra static inline int task_has_rt_policy(struct task_struct *p) 180391e43daSPeter Zijlstra { 181391e43daSPeter Zijlstra return rt_policy(p->policy); 182391e43daSPeter Zijlstra } 183391e43daSPeter Zijlstra 184aab03e05SDario Faggioli static inline int task_has_dl_policy(struct task_struct *p) 185aab03e05SDario Faggioli { 186aab03e05SDario Faggioli return dl_policy(p->policy); 187aab03e05SDario Faggioli } 188aab03e05SDario Faggioli 18907881166SJuri Lelli #define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT) 19007881166SJuri Lelli 1912d3d891dSDario Faggioli /* 192794a56ebSJuri Lelli * !! For sched_setattr_nocheck() (kernel) only !! 193794a56ebSJuri Lelli * 194794a56ebSJuri Lelli * This is actually gross. :( 195794a56ebSJuri Lelli * 196794a56ebSJuri Lelli * It is used to make schedutil kworker(s) higher priority than SCHED_DEADLINE 197794a56ebSJuri Lelli * tasks, but still be able to sleep. We need this on platforms that cannot 198794a56ebSJuri Lelli * atomically change clock frequency. Remove once fast switching will be 199794a56ebSJuri Lelli * available on such platforms. 200794a56ebSJuri Lelli * 201794a56ebSJuri Lelli * SUGOV stands for SchedUtil GOVernor. 202794a56ebSJuri Lelli */ 203794a56ebSJuri Lelli #define SCHED_FLAG_SUGOV 0x10000000 204794a56ebSJuri Lelli 205794a56ebSJuri Lelli static inline bool dl_entity_is_special(struct sched_dl_entity *dl_se) 206794a56ebSJuri Lelli { 207794a56ebSJuri Lelli #ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL 208794a56ebSJuri Lelli return unlikely(dl_se->flags & SCHED_FLAG_SUGOV); 209794a56ebSJuri Lelli #else 210794a56ebSJuri Lelli return false; 211794a56ebSJuri Lelli #endif 212794a56ebSJuri Lelli } 213794a56ebSJuri Lelli 214794a56ebSJuri Lelli /* 2152d3d891dSDario Faggioli * Tells if entity @a should preempt entity @b. 2162d3d891dSDario Faggioli */ 217332ac17eSDario Faggioli static inline bool 218332ac17eSDario Faggioli dl_entity_preempt(struct sched_dl_entity *a, struct sched_dl_entity *b) 2192d3d891dSDario Faggioli { 220794a56ebSJuri Lelli return dl_entity_is_special(a) || 221794a56ebSJuri Lelli dl_time_before(a->deadline, b->deadline); 2222d3d891dSDario Faggioli } 2232d3d891dSDario Faggioli 224391e43daSPeter Zijlstra /* 225391e43daSPeter Zijlstra * This is the priority-queue data structure of the RT scheduling class: 226391e43daSPeter Zijlstra */ 227391e43daSPeter Zijlstra struct rt_prio_array { 228391e43daSPeter Zijlstra DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */ 229391e43daSPeter Zijlstra struct list_head queue[MAX_RT_PRIO]; 230391e43daSPeter Zijlstra }; 231391e43daSPeter Zijlstra 232391e43daSPeter Zijlstra struct rt_bandwidth { 233391e43daSPeter Zijlstra /* nests inside the rq lock: */ 234391e43daSPeter Zijlstra raw_spinlock_t rt_runtime_lock; 235391e43daSPeter Zijlstra ktime_t rt_period; 236391e43daSPeter Zijlstra u64 rt_runtime; 237391e43daSPeter Zijlstra struct hrtimer rt_period_timer; 2384cfafd30SPeter Zijlstra unsigned int rt_period_active; 239391e43daSPeter Zijlstra }; 240a5e7be3bSJuri Lelli 241a5e7be3bSJuri Lelli void __dl_clear_params(struct task_struct *p); 242a5e7be3bSJuri Lelli 243332ac17eSDario Faggioli /* 244332ac17eSDario Faggioli * To keep the bandwidth of -deadline tasks and groups under control 245332ac17eSDario Faggioli * we need some place where: 246332ac17eSDario Faggioli * - store the maximum -deadline bandwidth of the system (the group); 247332ac17eSDario Faggioli * - cache the fraction of that bandwidth that is currently allocated. 248332ac17eSDario Faggioli * 249332ac17eSDario Faggioli * This is all done in the data structure below. It is similar to the 250332ac17eSDario Faggioli * one used for RT-throttling (rt_bandwidth), with the main difference 251332ac17eSDario Faggioli * that, since here we are only interested in admission control, we 252332ac17eSDario Faggioli * do not decrease any runtime while the group "executes", neither we 253332ac17eSDario Faggioli * need a timer to replenish it. 254332ac17eSDario Faggioli * 255332ac17eSDario Faggioli * With respect to SMP, the bandwidth is given on a per-CPU basis, 256332ac17eSDario Faggioli * meaning that: 257332ac17eSDario Faggioli * - dl_bw (< 100%) is the bandwidth of the system (group) on each CPU; 258332ac17eSDario Faggioli * - dl_total_bw array contains, in the i-eth element, the currently 259332ac17eSDario Faggioli * allocated bandwidth on the i-eth CPU. 260332ac17eSDario Faggioli * Moreover, groups consume bandwidth on each CPU, while tasks only 261332ac17eSDario Faggioli * consume bandwidth on the CPU they're running on. 262332ac17eSDario Faggioli * Finally, dl_total_bw_cpu is used to cache the index of dl_total_bw 263332ac17eSDario Faggioli * that will be shown the next time the proc or cgroup controls will 264332ac17eSDario Faggioli * be red. It on its turn can be changed by writing on its own 265332ac17eSDario Faggioli * control. 266332ac17eSDario Faggioli */ 267332ac17eSDario Faggioli struct dl_bandwidth { 268332ac17eSDario Faggioli raw_spinlock_t dl_runtime_lock; 269332ac17eSDario Faggioli u64 dl_runtime; 270332ac17eSDario Faggioli u64 dl_period; 271332ac17eSDario Faggioli }; 272332ac17eSDario Faggioli 273332ac17eSDario Faggioli static inline int dl_bandwidth_enabled(void) 274332ac17eSDario Faggioli { 2751724813dSPeter Zijlstra return sysctl_sched_rt_runtime >= 0; 276332ac17eSDario Faggioli } 277332ac17eSDario Faggioli 278332ac17eSDario Faggioli struct dl_bw { 279332ac17eSDario Faggioli raw_spinlock_t lock; 28097fb7a0aSIngo Molnar u64 bw; 28197fb7a0aSIngo Molnar u64 total_bw; 282332ac17eSDario Faggioli }; 283332ac17eSDario Faggioli 284daec5798SLuca Abeni static inline void __dl_update(struct dl_bw *dl_b, s64 bw); 285daec5798SLuca Abeni 2867f51412aSJuri Lelli static inline 2878c0944ceSPeter Zijlstra void __dl_sub(struct dl_bw *dl_b, u64 tsk_bw, int cpus) 2887f51412aSJuri Lelli { 2897f51412aSJuri Lelli dl_b->total_bw -= tsk_bw; 290daec5798SLuca Abeni __dl_update(dl_b, (s32)tsk_bw / cpus); 2917f51412aSJuri Lelli } 2927f51412aSJuri Lelli 2937f51412aSJuri Lelli static inline 294daec5798SLuca Abeni void __dl_add(struct dl_bw *dl_b, u64 tsk_bw, int cpus) 2957f51412aSJuri Lelli { 2967f51412aSJuri Lelli dl_b->total_bw += tsk_bw; 297daec5798SLuca Abeni __dl_update(dl_b, -((s32)tsk_bw / cpus)); 2987f51412aSJuri Lelli } 2997f51412aSJuri Lelli 3007f51412aSJuri Lelli static inline 3017f51412aSJuri Lelli bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw) 3027f51412aSJuri Lelli { 3037f51412aSJuri Lelli return dl_b->bw != -1 && 3047f51412aSJuri Lelli dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw; 3057f51412aSJuri Lelli } 3067f51412aSJuri Lelli 30797fb7a0aSIngo Molnar extern void dl_change_utilization(struct task_struct *p, u64 new_bw); 308f2cb1360SIngo Molnar extern void init_dl_bw(struct dl_bw *dl_b); 30906a76fe0SNicolas Pitre extern int sched_dl_global_validate(void); 31006a76fe0SNicolas Pitre extern void sched_dl_do_global(void); 31197fb7a0aSIngo Molnar extern int sched_dl_overflow(struct task_struct *p, int policy, const struct sched_attr *attr); 31206a76fe0SNicolas Pitre extern void __setparam_dl(struct task_struct *p, const struct sched_attr *attr); 31306a76fe0SNicolas Pitre extern void __getparam_dl(struct task_struct *p, struct sched_attr *attr); 31406a76fe0SNicolas Pitre extern bool __checkparam_dl(const struct sched_attr *attr); 31506a76fe0SNicolas Pitre extern bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr); 31697fb7a0aSIngo Molnar extern int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed); 31797fb7a0aSIngo Molnar extern int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial); 31806a76fe0SNicolas Pitre extern bool dl_cpu_busy(unsigned int cpu); 319391e43daSPeter Zijlstra 320391e43daSPeter Zijlstra #ifdef CONFIG_CGROUP_SCHED 321391e43daSPeter Zijlstra 322391e43daSPeter Zijlstra #include <linux/cgroup.h> 323391e43daSPeter Zijlstra 324391e43daSPeter Zijlstra struct cfs_rq; 325391e43daSPeter Zijlstra struct rt_rq; 326391e43daSPeter Zijlstra 32735cf4e50SMike Galbraith extern struct list_head task_groups; 328391e43daSPeter Zijlstra 329391e43daSPeter Zijlstra struct cfs_bandwidth { 330391e43daSPeter Zijlstra #ifdef CONFIG_CFS_BANDWIDTH 331391e43daSPeter Zijlstra raw_spinlock_t lock; 332391e43daSPeter Zijlstra ktime_t period; 33397fb7a0aSIngo Molnar u64 quota; 33497fb7a0aSIngo Molnar u64 runtime; 3359c58c79aSZhihui Zhang s64 hierarchical_quota; 336391e43daSPeter Zijlstra u64 runtime_expires; 337391e43daSPeter Zijlstra 33897fb7a0aSIngo Molnar int idle; 33997fb7a0aSIngo Molnar int period_active; 34097fb7a0aSIngo Molnar struct hrtimer period_timer; 34197fb7a0aSIngo Molnar struct hrtimer slack_timer; 342391e43daSPeter Zijlstra struct list_head throttled_cfs_rq; 343391e43daSPeter Zijlstra 34497fb7a0aSIngo Molnar /* Statistics: */ 34597fb7a0aSIngo Molnar int nr_periods; 34697fb7a0aSIngo Molnar int nr_throttled; 347391e43daSPeter Zijlstra u64 throttled_time; 348391e43daSPeter Zijlstra #endif 349391e43daSPeter Zijlstra }; 350391e43daSPeter Zijlstra 35197fb7a0aSIngo Molnar /* Task group related information */ 352391e43daSPeter Zijlstra struct task_group { 353391e43daSPeter Zijlstra struct cgroup_subsys_state css; 354391e43daSPeter Zijlstra 355391e43daSPeter Zijlstra #ifdef CONFIG_FAIR_GROUP_SCHED 35697fb7a0aSIngo Molnar /* schedulable entities of this group on each CPU */ 357391e43daSPeter Zijlstra struct sched_entity **se; 35897fb7a0aSIngo Molnar /* runqueue "owned" by this group on each CPU */ 359391e43daSPeter Zijlstra struct cfs_rq **cfs_rq; 360391e43daSPeter Zijlstra unsigned long shares; 361391e43daSPeter Zijlstra 362fa6bddebSAlex Shi #ifdef CONFIG_SMP 363b0367629SWaiman Long /* 364b0367629SWaiman Long * load_avg can be heavily contended at clock tick time, so put 365b0367629SWaiman Long * it in its own cacheline separated from the fields above which 366b0367629SWaiman Long * will also be accessed at each tick. 367b0367629SWaiman Long */ 368b0367629SWaiman Long atomic_long_t load_avg ____cacheline_aligned; 369391e43daSPeter Zijlstra #endif 370fa6bddebSAlex Shi #endif 371391e43daSPeter Zijlstra 372391e43daSPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED 373391e43daSPeter Zijlstra struct sched_rt_entity **rt_se; 374391e43daSPeter Zijlstra struct rt_rq **rt_rq; 375391e43daSPeter Zijlstra 376391e43daSPeter Zijlstra struct rt_bandwidth rt_bandwidth; 377391e43daSPeter Zijlstra #endif 378391e43daSPeter Zijlstra 379391e43daSPeter Zijlstra struct rcu_head rcu; 380391e43daSPeter Zijlstra struct list_head list; 381391e43daSPeter Zijlstra 382391e43daSPeter Zijlstra struct task_group *parent; 383391e43daSPeter Zijlstra struct list_head siblings; 384391e43daSPeter Zijlstra struct list_head children; 385391e43daSPeter Zijlstra 386391e43daSPeter Zijlstra #ifdef CONFIG_SCHED_AUTOGROUP 387391e43daSPeter Zijlstra struct autogroup *autogroup; 388391e43daSPeter Zijlstra #endif 389391e43daSPeter Zijlstra 390391e43daSPeter Zijlstra struct cfs_bandwidth cfs_bandwidth; 391391e43daSPeter Zijlstra }; 392391e43daSPeter Zijlstra 393391e43daSPeter Zijlstra #ifdef CONFIG_FAIR_GROUP_SCHED 394391e43daSPeter Zijlstra #define ROOT_TASK_GROUP_LOAD NICE_0_LOAD 395391e43daSPeter Zijlstra 396391e43daSPeter Zijlstra /* 397391e43daSPeter Zijlstra * A weight of 0 or 1 can cause arithmetics problems. 398391e43daSPeter Zijlstra * A weight of a cfs_rq is the sum of weights of which entities 399391e43daSPeter Zijlstra * are queued on this cfs_rq, so a weight of a entity should not be 400391e43daSPeter Zijlstra * too large, so as the shares value of a task group. 401391e43daSPeter Zijlstra * (The default weight is 1024 - so there's no practical 402391e43daSPeter Zijlstra * limitation from this.) 403391e43daSPeter Zijlstra */ 404391e43daSPeter Zijlstra #define MIN_SHARES (1UL << 1) 405391e43daSPeter Zijlstra #define MAX_SHARES (1UL << 18) 406391e43daSPeter Zijlstra #endif 407391e43daSPeter Zijlstra 408391e43daSPeter Zijlstra typedef int (*tg_visitor)(struct task_group *, void *); 409391e43daSPeter Zijlstra 410391e43daSPeter Zijlstra extern int walk_tg_tree_from(struct task_group *from, 411391e43daSPeter Zijlstra tg_visitor down, tg_visitor up, void *data); 412391e43daSPeter Zijlstra 413391e43daSPeter Zijlstra /* 414391e43daSPeter Zijlstra * Iterate the full tree, calling @down when first entering a node and @up when 415391e43daSPeter Zijlstra * leaving it for the final time. 416391e43daSPeter Zijlstra * 417391e43daSPeter Zijlstra * Caller must hold rcu_lock or sufficient equivalent. 418391e43daSPeter Zijlstra */ 419391e43daSPeter Zijlstra static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data) 420391e43daSPeter Zijlstra { 421391e43daSPeter Zijlstra return walk_tg_tree_from(&root_task_group, down, up, data); 422391e43daSPeter Zijlstra } 423391e43daSPeter Zijlstra 424391e43daSPeter Zijlstra extern int tg_nop(struct task_group *tg, void *data); 425391e43daSPeter Zijlstra 426391e43daSPeter Zijlstra extern void free_fair_sched_group(struct task_group *tg); 427391e43daSPeter Zijlstra extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent); 4288663e24dSPeter Zijlstra extern void online_fair_sched_group(struct task_group *tg); 4296fe1f348SPeter Zijlstra extern void unregister_fair_sched_group(struct task_group *tg); 430391e43daSPeter Zijlstra extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, 431391e43daSPeter Zijlstra struct sched_entity *se, int cpu, 432391e43daSPeter Zijlstra struct sched_entity *parent); 433391e43daSPeter Zijlstra extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b); 434391e43daSPeter Zijlstra 435391e43daSPeter Zijlstra extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b); 43677a4d1a1SPeter Zijlstra extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b); 437391e43daSPeter Zijlstra extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq); 438391e43daSPeter Zijlstra 439391e43daSPeter Zijlstra extern void free_rt_sched_group(struct task_group *tg); 440391e43daSPeter Zijlstra extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent); 441391e43daSPeter Zijlstra extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, 442391e43daSPeter Zijlstra struct sched_rt_entity *rt_se, int cpu, 443391e43daSPeter Zijlstra struct sched_rt_entity *parent); 4448887cd99SNicolas Pitre extern int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us); 4458887cd99SNicolas Pitre extern int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us); 4468887cd99SNicolas Pitre extern long sched_group_rt_runtime(struct task_group *tg); 4478887cd99SNicolas Pitre extern long sched_group_rt_period(struct task_group *tg); 4488887cd99SNicolas Pitre extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk); 449391e43daSPeter Zijlstra 45025cc7da7SLi Zefan extern struct task_group *sched_create_group(struct task_group *parent); 45125cc7da7SLi Zefan extern void sched_online_group(struct task_group *tg, 45225cc7da7SLi Zefan struct task_group *parent); 45325cc7da7SLi Zefan extern void sched_destroy_group(struct task_group *tg); 45425cc7da7SLi Zefan extern void sched_offline_group(struct task_group *tg); 45525cc7da7SLi Zefan 45625cc7da7SLi Zefan extern void sched_move_task(struct task_struct *tsk); 45725cc7da7SLi Zefan 45825cc7da7SLi Zefan #ifdef CONFIG_FAIR_GROUP_SCHED 45925cc7da7SLi Zefan extern int sched_group_set_shares(struct task_group *tg, unsigned long shares); 460ad936d86SByungchul Park 461ad936d86SByungchul Park #ifdef CONFIG_SMP 462ad936d86SByungchul Park extern void set_task_rq_fair(struct sched_entity *se, 463ad936d86SByungchul Park struct cfs_rq *prev, struct cfs_rq *next); 464ad936d86SByungchul Park #else /* !CONFIG_SMP */ 465ad936d86SByungchul Park static inline void set_task_rq_fair(struct sched_entity *se, 466ad936d86SByungchul Park struct cfs_rq *prev, struct cfs_rq *next) { } 467ad936d86SByungchul Park #endif /* CONFIG_SMP */ 468ad936d86SByungchul Park #endif /* CONFIG_FAIR_GROUP_SCHED */ 46925cc7da7SLi Zefan 470391e43daSPeter Zijlstra #else /* CONFIG_CGROUP_SCHED */ 471391e43daSPeter Zijlstra 472391e43daSPeter Zijlstra struct cfs_bandwidth { }; 473391e43daSPeter Zijlstra 474391e43daSPeter Zijlstra #endif /* CONFIG_CGROUP_SCHED */ 475391e43daSPeter Zijlstra 476391e43daSPeter Zijlstra /* CFS-related fields in a runqueue */ 477391e43daSPeter Zijlstra struct cfs_rq { 478391e43daSPeter Zijlstra struct load_weight load; 4791ea6c46aSPeter Zijlstra unsigned long runnable_weight; 48097fb7a0aSIngo Molnar unsigned int nr_running; 48197fb7a0aSIngo Molnar unsigned int h_nr_running; 482391e43daSPeter Zijlstra 483391e43daSPeter Zijlstra u64 exec_clock; 484391e43daSPeter Zijlstra u64 min_vruntime; 485391e43daSPeter Zijlstra #ifndef CONFIG_64BIT 486391e43daSPeter Zijlstra u64 min_vruntime_copy; 487391e43daSPeter Zijlstra #endif 488391e43daSPeter Zijlstra 489bfb06889SDavidlohr Bueso struct rb_root_cached tasks_timeline; 490391e43daSPeter Zijlstra 491391e43daSPeter Zijlstra /* 492391e43daSPeter Zijlstra * 'curr' points to currently running entity on this cfs_rq. 493391e43daSPeter Zijlstra * It is set to NULL otherwise (i.e when none are currently running). 494391e43daSPeter Zijlstra */ 49597fb7a0aSIngo Molnar struct sched_entity *curr; 49697fb7a0aSIngo Molnar struct sched_entity *next; 49797fb7a0aSIngo Molnar struct sched_entity *last; 49897fb7a0aSIngo Molnar struct sched_entity *skip; 499391e43daSPeter Zijlstra 500391e43daSPeter Zijlstra #ifdef CONFIG_SCHED_DEBUG 501391e43daSPeter Zijlstra unsigned int nr_spread_over; 502391e43daSPeter Zijlstra #endif 503391e43daSPeter Zijlstra 5042dac754eSPaul Turner #ifdef CONFIG_SMP 5052dac754eSPaul Turner /* 5069d89c257SYuyang Du * CFS load tracking 5072dac754eSPaul Turner */ 5089d89c257SYuyang Du struct sched_avg avg; 5092a2f5d4eSPeter Zijlstra #ifndef CONFIG_64BIT 5102a2f5d4eSPeter Zijlstra u64 load_last_update_time_copy; 5112a2f5d4eSPeter Zijlstra #endif 5122a2f5d4eSPeter Zijlstra struct { 5132a2f5d4eSPeter Zijlstra raw_spinlock_t lock ____cacheline_aligned; 5142a2f5d4eSPeter Zijlstra int nr; 5152a2f5d4eSPeter Zijlstra unsigned long load_avg; 5162a2f5d4eSPeter Zijlstra unsigned long util_avg; 5170e2d2aaaSPeter Zijlstra unsigned long runnable_sum; 5182a2f5d4eSPeter Zijlstra } removed; 519141965c7SAlex Shi 520c566e8e9SPaul Turner #ifdef CONFIG_FAIR_GROUP_SCHED 5210e2d2aaaSPeter Zijlstra unsigned long tg_load_avg_contrib; 5220e2d2aaaSPeter Zijlstra long propagate; 5230e2d2aaaSPeter Zijlstra long prop_runnable_sum; 5240e2d2aaaSPeter Zijlstra 52582958366SPaul Turner /* 52682958366SPaul Turner * h_load = weight * f(tg) 52782958366SPaul Turner * 52882958366SPaul Turner * Where f(tg) is the recursive weight fraction assigned to 52982958366SPaul Turner * this group. 53082958366SPaul Turner */ 53182958366SPaul Turner unsigned long h_load; 53268520796SVladimir Davydov u64 last_h_load_update; 53368520796SVladimir Davydov struct sched_entity *h_load_next; 53468520796SVladimir Davydov #endif /* CONFIG_FAIR_GROUP_SCHED */ 53582958366SPaul Turner #endif /* CONFIG_SMP */ 53682958366SPaul Turner 537391e43daSPeter Zijlstra #ifdef CONFIG_FAIR_GROUP_SCHED 53897fb7a0aSIngo Molnar struct rq *rq; /* CPU runqueue to which this cfs_rq is attached */ 539391e43daSPeter Zijlstra 540391e43daSPeter Zijlstra /* 541391e43daSPeter Zijlstra * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in 542391e43daSPeter Zijlstra * a hierarchy). Non-leaf lrqs hold other higher schedulable entities 543391e43daSPeter Zijlstra * (like users, containers etc.) 544391e43daSPeter Zijlstra * 54597fb7a0aSIngo Molnar * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a CPU. 54697fb7a0aSIngo Molnar * This list is used during load balance. 547391e43daSPeter Zijlstra */ 548391e43daSPeter Zijlstra int on_list; 549391e43daSPeter Zijlstra struct list_head leaf_cfs_rq_list; 550391e43daSPeter Zijlstra struct task_group *tg; /* group that "owns" this runqueue */ 551391e43daSPeter Zijlstra 552391e43daSPeter Zijlstra #ifdef CONFIG_CFS_BANDWIDTH 553391e43daSPeter Zijlstra int runtime_enabled; 554391e43daSPeter Zijlstra u64 runtime_expires; 555391e43daSPeter Zijlstra s64 runtime_remaining; 556391e43daSPeter Zijlstra 55797fb7a0aSIngo Molnar u64 throttled_clock; 55897fb7a0aSIngo Molnar u64 throttled_clock_task; 559f1b17280SPaul Turner u64 throttled_clock_task_time; 56097fb7a0aSIngo Molnar int throttled; 56197fb7a0aSIngo Molnar int throttle_count; 562391e43daSPeter Zijlstra struct list_head throttled_list; 563391e43daSPeter Zijlstra #endif /* CONFIG_CFS_BANDWIDTH */ 564391e43daSPeter Zijlstra #endif /* CONFIG_FAIR_GROUP_SCHED */ 565391e43daSPeter Zijlstra }; 566391e43daSPeter Zijlstra 567391e43daSPeter Zijlstra static inline int rt_bandwidth_enabled(void) 568391e43daSPeter Zijlstra { 569391e43daSPeter Zijlstra return sysctl_sched_rt_runtime >= 0; 570391e43daSPeter Zijlstra } 571391e43daSPeter Zijlstra 572b6366f04SSteven Rostedt /* RT IPI pull logic requires IRQ_WORK */ 5734bdced5cSSteven Rostedt (Red Hat) #if defined(CONFIG_IRQ_WORK) && defined(CONFIG_SMP) 574b6366f04SSteven Rostedt # define HAVE_RT_PUSH_IPI 575b6366f04SSteven Rostedt #endif 576b6366f04SSteven Rostedt 577391e43daSPeter Zijlstra /* Real-Time classes' related field in a runqueue: */ 578391e43daSPeter Zijlstra struct rt_rq { 579391e43daSPeter Zijlstra struct rt_prio_array active; 580c82513e5SPeter Zijlstra unsigned int rt_nr_running; 58101d36d0aSFrederic Weisbecker unsigned int rr_nr_running; 582391e43daSPeter Zijlstra #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED 583391e43daSPeter Zijlstra struct { 584391e43daSPeter Zijlstra int curr; /* highest queued rt task prio */ 585391e43daSPeter Zijlstra #ifdef CONFIG_SMP 586391e43daSPeter Zijlstra int next; /* next highest */ 587391e43daSPeter Zijlstra #endif 588391e43daSPeter Zijlstra } highest_prio; 589391e43daSPeter Zijlstra #endif 590391e43daSPeter Zijlstra #ifdef CONFIG_SMP 591391e43daSPeter Zijlstra unsigned long rt_nr_migratory; 592391e43daSPeter Zijlstra unsigned long rt_nr_total; 593391e43daSPeter Zijlstra int overloaded; 594391e43daSPeter Zijlstra struct plist_head pushable_tasks; 595b6366f04SSteven Rostedt #endif /* CONFIG_SMP */ 596f4ebcbc0SKirill Tkhai int rt_queued; 597f4ebcbc0SKirill Tkhai 598391e43daSPeter Zijlstra int rt_throttled; 599391e43daSPeter Zijlstra u64 rt_time; 600391e43daSPeter Zijlstra u64 rt_runtime; 601391e43daSPeter Zijlstra /* Nests inside the rq lock: */ 602391e43daSPeter Zijlstra raw_spinlock_t rt_runtime_lock; 603391e43daSPeter Zijlstra 604391e43daSPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED 605391e43daSPeter Zijlstra unsigned long rt_nr_boosted; 606391e43daSPeter Zijlstra 607391e43daSPeter Zijlstra struct rq *rq; 608391e43daSPeter Zijlstra struct task_group *tg; 609391e43daSPeter Zijlstra #endif 610391e43daSPeter Zijlstra }; 611391e43daSPeter Zijlstra 612aab03e05SDario Faggioli /* Deadline class' related fields in a runqueue */ 613aab03e05SDario Faggioli struct dl_rq { 614aab03e05SDario Faggioli /* runqueue is an rbtree, ordered by deadline */ 6152161573eSDavidlohr Bueso struct rb_root_cached root; 616aab03e05SDario Faggioli 617aab03e05SDario Faggioli unsigned long dl_nr_running; 6181baca4ceSJuri Lelli 6191baca4ceSJuri Lelli #ifdef CONFIG_SMP 6201baca4ceSJuri Lelli /* 6211baca4ceSJuri Lelli * Deadline values of the currently executing and the 6221baca4ceSJuri Lelli * earliest ready task on this rq. Caching these facilitates 6231baca4ceSJuri Lelli * the decision wether or not a ready but not running task 6241baca4ceSJuri Lelli * should migrate somewhere else. 6251baca4ceSJuri Lelli */ 6261baca4ceSJuri Lelli struct { 6271baca4ceSJuri Lelli u64 curr; 6281baca4ceSJuri Lelli u64 next; 6291baca4ceSJuri Lelli } earliest_dl; 6301baca4ceSJuri Lelli 6311baca4ceSJuri Lelli unsigned long dl_nr_migratory; 6321baca4ceSJuri Lelli int overloaded; 6331baca4ceSJuri Lelli 6341baca4ceSJuri Lelli /* 6351baca4ceSJuri Lelli * Tasks on this rq that can be pushed away. They are kept in 6361baca4ceSJuri Lelli * an rb-tree, ordered by tasks' deadlines, with caching 6371baca4ceSJuri Lelli * of the leftmost (earliest deadline) element. 6381baca4ceSJuri Lelli */ 6392161573eSDavidlohr Bueso struct rb_root_cached pushable_dl_tasks_root; 640332ac17eSDario Faggioli #else 641332ac17eSDario Faggioli struct dl_bw dl_bw; 6421baca4ceSJuri Lelli #endif 643e36d8677SLuca Abeni /* 644e36d8677SLuca Abeni * "Active utilization" for this runqueue: increased when a 645e36d8677SLuca Abeni * task wakes up (becomes TASK_RUNNING) and decreased when a 646e36d8677SLuca Abeni * task blocks 647e36d8677SLuca Abeni */ 648e36d8677SLuca Abeni u64 running_bw; 6494da3abceSLuca Abeni 6504da3abceSLuca Abeni /* 6518fd27231SLuca Abeni * Utilization of the tasks "assigned" to this runqueue (including 6528fd27231SLuca Abeni * the tasks that are in runqueue and the tasks that executed on this 6538fd27231SLuca Abeni * CPU and blocked). Increased when a task moves to this runqueue, and 6548fd27231SLuca Abeni * decreased when the task moves away (migrates, changes scheduling 6558fd27231SLuca Abeni * policy, or terminates). 6568fd27231SLuca Abeni * This is needed to compute the "inactive utilization" for the 6578fd27231SLuca Abeni * runqueue (inactive utilization = this_bw - running_bw). 6588fd27231SLuca Abeni */ 6598fd27231SLuca Abeni u64 this_bw; 660daec5798SLuca Abeni u64 extra_bw; 6618fd27231SLuca Abeni 6628fd27231SLuca Abeni /* 6634da3abceSLuca Abeni * Inverse of the fraction of CPU utilization that can be reclaimed 6644da3abceSLuca Abeni * by the GRUB algorithm. 6654da3abceSLuca Abeni */ 6664da3abceSLuca Abeni u64 bw_ratio; 667aab03e05SDario Faggioli }; 668aab03e05SDario Faggioli 669391e43daSPeter Zijlstra #ifdef CONFIG_SMP 670391e43daSPeter Zijlstra 671afe06efdSTim Chen static inline bool sched_asym_prefer(int a, int b) 672afe06efdSTim Chen { 673afe06efdSTim Chen return arch_asym_cpu_priority(a) > arch_asym_cpu_priority(b); 674afe06efdSTim Chen } 675afe06efdSTim Chen 676391e43daSPeter Zijlstra /* 677391e43daSPeter Zijlstra * We add the notion of a root-domain which will be used to define per-domain 678391e43daSPeter Zijlstra * variables. Each exclusive cpuset essentially defines an island domain by 67997fb7a0aSIngo Molnar * fully partitioning the member CPUs from any other cpuset. Whenever a new 680391e43daSPeter Zijlstra * exclusive cpuset is created, we also create and attach a new root-domain 681391e43daSPeter Zijlstra * object. 682391e43daSPeter Zijlstra * 683391e43daSPeter Zijlstra */ 684391e43daSPeter Zijlstra struct root_domain { 685391e43daSPeter Zijlstra atomic_t refcount; 686391e43daSPeter Zijlstra atomic_t rto_count; 687391e43daSPeter Zijlstra struct rcu_head rcu; 688391e43daSPeter Zijlstra cpumask_var_t span; 689391e43daSPeter Zijlstra cpumask_var_t online; 690391e43daSPeter Zijlstra 6914486edd1STim Chen /* Indicate more than one runnable task for any CPU */ 6924486edd1STim Chen bool overload; 6934486edd1STim Chen 694391e43daSPeter Zijlstra /* 6951baca4ceSJuri Lelli * The bit corresponding to a CPU gets set here if such CPU has more 6961baca4ceSJuri Lelli * than one runnable -deadline task (as it is below for RT tasks). 6971baca4ceSJuri Lelli */ 6981baca4ceSJuri Lelli cpumask_var_t dlo_mask; 6991baca4ceSJuri Lelli atomic_t dlo_count; 700332ac17eSDario Faggioli struct dl_bw dl_bw; 7016bfd6d72SJuri Lelli struct cpudl cpudl; 7021baca4ceSJuri Lelli 7034bdced5cSSteven Rostedt (Red Hat) #ifdef HAVE_RT_PUSH_IPI 7044bdced5cSSteven Rostedt (Red Hat) /* 7054bdced5cSSteven Rostedt (Red Hat) * For IPI pull requests, loop across the rto_mask. 7064bdced5cSSteven Rostedt (Red Hat) */ 7074bdced5cSSteven Rostedt (Red Hat) struct irq_work rto_push_work; 7084bdced5cSSteven Rostedt (Red Hat) raw_spinlock_t rto_lock; 7094bdced5cSSteven Rostedt (Red Hat) /* These are only updated and read within rto_lock */ 7104bdced5cSSteven Rostedt (Red Hat) int rto_loop; 7114bdced5cSSteven Rostedt (Red Hat) int rto_cpu; 7124bdced5cSSteven Rostedt (Red Hat) /* These atomics are updated outside of a lock */ 7134bdced5cSSteven Rostedt (Red Hat) atomic_t rto_loop_next; 7144bdced5cSSteven Rostedt (Red Hat) atomic_t rto_loop_start; 7154bdced5cSSteven Rostedt (Red Hat) #endif 7161baca4ceSJuri Lelli /* 717391e43daSPeter Zijlstra * The "RT overload" flag: it gets set if a CPU has more than 718391e43daSPeter Zijlstra * one runnable RT task. 719391e43daSPeter Zijlstra */ 720391e43daSPeter Zijlstra cpumask_var_t rto_mask; 721391e43daSPeter Zijlstra struct cpupri cpupri; 722cd92bfd3SDietmar Eggemann 723cd92bfd3SDietmar Eggemann unsigned long max_cpu_capacity; 724391e43daSPeter Zijlstra }; 725391e43daSPeter Zijlstra 726391e43daSPeter Zijlstra extern struct root_domain def_root_domain; 727f2cb1360SIngo Molnar extern struct mutex sched_domains_mutex; 728f2cb1360SIngo Molnar 729f2cb1360SIngo Molnar extern void init_defrootdomain(void); 7308d5dc512SPeter Zijlstra extern int sched_init_domains(const struct cpumask *cpu_map); 731f2cb1360SIngo Molnar extern void rq_attach_root(struct rq *rq, struct root_domain *rd); 732364f5665SSteven Rostedt (VMware) extern void sched_get_rd(struct root_domain *rd); 733364f5665SSteven Rostedt (VMware) extern void sched_put_rd(struct root_domain *rd); 734391e43daSPeter Zijlstra 7354bdced5cSSteven Rostedt (Red Hat) #ifdef HAVE_RT_PUSH_IPI 7364bdced5cSSteven Rostedt (Red Hat) extern void rto_push_irq_work_func(struct irq_work *work); 7374bdced5cSSteven Rostedt (Red Hat) #endif 738391e43daSPeter Zijlstra #endif /* CONFIG_SMP */ 739391e43daSPeter Zijlstra 740391e43daSPeter Zijlstra /* 741391e43daSPeter Zijlstra * This is the main, per-CPU runqueue data structure. 742391e43daSPeter Zijlstra * 743391e43daSPeter Zijlstra * Locking rule: those places that want to lock multiple runqueues 744391e43daSPeter Zijlstra * (such as the load balancing or the thread migration code), lock 745391e43daSPeter Zijlstra * acquire operations must be ordered by ascending &runqueue. 746391e43daSPeter Zijlstra */ 747391e43daSPeter Zijlstra struct rq { 748391e43daSPeter Zijlstra /* runqueue lock: */ 749391e43daSPeter Zijlstra raw_spinlock_t lock; 750391e43daSPeter Zijlstra 751391e43daSPeter Zijlstra /* 752391e43daSPeter Zijlstra * nr_running and cpu_load should be in the same cacheline because 753391e43daSPeter Zijlstra * remote CPUs use both these fields when doing load calculation. 754391e43daSPeter Zijlstra */ 755c82513e5SPeter Zijlstra unsigned int nr_running; 7560ec8aa00SPeter Zijlstra #ifdef CONFIG_NUMA_BALANCING 7570ec8aa00SPeter Zijlstra unsigned int nr_numa_running; 7580ec8aa00SPeter Zijlstra unsigned int nr_preferred_running; 7590ec8aa00SPeter Zijlstra #endif 760391e43daSPeter Zijlstra #define CPU_LOAD_IDX_MAX 5 761391e43daSPeter Zijlstra unsigned long cpu_load[CPU_LOAD_IDX_MAX]; 7623451d024SFrederic Weisbecker #ifdef CONFIG_NO_HZ_COMMON 7639fd81dd5SFrederic Weisbecker #ifdef CONFIG_SMP 7649fd81dd5SFrederic Weisbecker unsigned long last_load_update_tick; 765e022e0d3SPeter Zijlstra unsigned long last_blocked_load_update_tick; 766*f643ea22SVincent Guittot unsigned int has_blocked_load; 7679fd81dd5SFrederic Weisbecker #endif /* CONFIG_SMP */ 76800357f5eSPeter Zijlstra unsigned int nohz_tick_stopped; 769a22e47a4SPeter Zijlstra atomic_t nohz_flags; 7709fd81dd5SFrederic Weisbecker #endif /* CONFIG_NO_HZ_COMMON */ 771dcdedb24SFrederic Weisbecker 77297fb7a0aSIngo Molnar /* capture load from *all* tasks on this CPU: */ 773391e43daSPeter Zijlstra struct load_weight load; 774391e43daSPeter Zijlstra unsigned long nr_load_updates; 775391e43daSPeter Zijlstra u64 nr_switches; 776391e43daSPeter Zijlstra 777391e43daSPeter Zijlstra struct cfs_rq cfs; 778391e43daSPeter Zijlstra struct rt_rq rt; 779aab03e05SDario Faggioli struct dl_rq dl; 780391e43daSPeter Zijlstra 781391e43daSPeter Zijlstra #ifdef CONFIG_FAIR_GROUP_SCHED 78297fb7a0aSIngo Molnar /* list of leaf cfs_rq on this CPU: */ 783391e43daSPeter Zijlstra struct list_head leaf_cfs_rq_list; 7849c2791f9SVincent Guittot struct list_head *tmp_alone_branch; 785a35b6466SPeter Zijlstra #endif /* CONFIG_FAIR_GROUP_SCHED */ 786a35b6466SPeter Zijlstra 787391e43daSPeter Zijlstra /* 788391e43daSPeter Zijlstra * This is part of a global counter where only the total sum 789391e43daSPeter Zijlstra * over all CPUs matters. A task can increase this counter on 790391e43daSPeter Zijlstra * one CPU and if it got migrated afterwards it may decrease 791391e43daSPeter Zijlstra * it on another CPU. Always updated under the runqueue lock: 792391e43daSPeter Zijlstra */ 793391e43daSPeter Zijlstra unsigned long nr_uninterruptible; 794391e43daSPeter Zijlstra 79597fb7a0aSIngo Molnar struct task_struct *curr; 79697fb7a0aSIngo Molnar struct task_struct *idle; 79797fb7a0aSIngo Molnar struct task_struct *stop; 798391e43daSPeter Zijlstra unsigned long next_balance; 799391e43daSPeter Zijlstra struct mm_struct *prev_mm; 800391e43daSPeter Zijlstra 801cb42c9a3SMatt Fleming unsigned int clock_update_flags; 802391e43daSPeter Zijlstra u64 clock; 803391e43daSPeter Zijlstra u64 clock_task; 804391e43daSPeter Zijlstra 805391e43daSPeter Zijlstra atomic_t nr_iowait; 806391e43daSPeter Zijlstra 807391e43daSPeter Zijlstra #ifdef CONFIG_SMP 808391e43daSPeter Zijlstra struct root_domain *rd; 809391e43daSPeter Zijlstra struct sched_domain *sd; 810391e43daSPeter Zijlstra 811ced549faSNicolas Pitre unsigned long cpu_capacity; 812ca6d75e6SVincent Guittot unsigned long cpu_capacity_orig; 813391e43daSPeter Zijlstra 814e3fca9e7SPeter Zijlstra struct callback_head *balance_callback; 815e3fca9e7SPeter Zijlstra 816391e43daSPeter Zijlstra unsigned char idle_balance; 81797fb7a0aSIngo Molnar 818391e43daSPeter Zijlstra /* For active balancing */ 819391e43daSPeter Zijlstra int active_balance; 820391e43daSPeter Zijlstra int push_cpu; 821391e43daSPeter Zijlstra struct cpu_stop_work active_balance_work; 82297fb7a0aSIngo Molnar 82397fb7a0aSIngo Molnar /* CPU of this runqueue: */ 824391e43daSPeter Zijlstra int cpu; 825391e43daSPeter Zijlstra int online; 826391e43daSPeter Zijlstra 827367456c7SPeter Zijlstra struct list_head cfs_tasks; 828367456c7SPeter Zijlstra 829391e43daSPeter Zijlstra u64 rt_avg; 830391e43daSPeter Zijlstra u64 age_stamp; 831391e43daSPeter Zijlstra u64 idle_stamp; 832391e43daSPeter Zijlstra u64 avg_idle; 8339bd721c5SJason Low 8349bd721c5SJason Low /* This is used to determine avg_idle's max value */ 8359bd721c5SJason Low u64 max_idle_balance_cost; 836391e43daSPeter Zijlstra #endif 837391e43daSPeter Zijlstra 838391e43daSPeter Zijlstra #ifdef CONFIG_IRQ_TIME_ACCOUNTING 839391e43daSPeter Zijlstra u64 prev_irq_time; 840391e43daSPeter Zijlstra #endif 841391e43daSPeter Zijlstra #ifdef CONFIG_PARAVIRT 842391e43daSPeter Zijlstra u64 prev_steal_time; 843391e43daSPeter Zijlstra #endif 844391e43daSPeter Zijlstra #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING 845391e43daSPeter Zijlstra u64 prev_steal_time_rq; 846391e43daSPeter Zijlstra #endif 847391e43daSPeter Zijlstra 848391e43daSPeter Zijlstra /* calc_load related fields */ 849391e43daSPeter Zijlstra unsigned long calc_load_update; 850391e43daSPeter Zijlstra long calc_load_active; 851391e43daSPeter Zijlstra 852391e43daSPeter Zijlstra #ifdef CONFIG_SCHED_HRTICK 853391e43daSPeter Zijlstra #ifdef CONFIG_SMP 854391e43daSPeter Zijlstra int hrtick_csd_pending; 855966a9671SYing Huang call_single_data_t hrtick_csd; 856391e43daSPeter Zijlstra #endif 857391e43daSPeter Zijlstra struct hrtimer hrtick_timer; 858391e43daSPeter Zijlstra #endif 859391e43daSPeter Zijlstra 860391e43daSPeter Zijlstra #ifdef CONFIG_SCHEDSTATS 861391e43daSPeter Zijlstra /* latency stats */ 862391e43daSPeter Zijlstra struct sched_info rq_sched_info; 863391e43daSPeter Zijlstra unsigned long long rq_cpu_time; 864391e43daSPeter Zijlstra /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */ 865391e43daSPeter Zijlstra 866391e43daSPeter Zijlstra /* sys_sched_yield() stats */ 867391e43daSPeter Zijlstra unsigned int yld_count; 868391e43daSPeter Zijlstra 869391e43daSPeter Zijlstra /* schedule() stats */ 870391e43daSPeter Zijlstra unsigned int sched_count; 871391e43daSPeter Zijlstra unsigned int sched_goidle; 872391e43daSPeter Zijlstra 873391e43daSPeter Zijlstra /* try_to_wake_up() stats */ 874391e43daSPeter Zijlstra unsigned int ttwu_count; 875391e43daSPeter Zijlstra unsigned int ttwu_local; 876391e43daSPeter Zijlstra #endif 877391e43daSPeter Zijlstra 878391e43daSPeter Zijlstra #ifdef CONFIG_SMP 879391e43daSPeter Zijlstra struct llist_head wake_list; 880391e43daSPeter Zijlstra #endif 881442bf3aaSDaniel Lezcano 882442bf3aaSDaniel Lezcano #ifdef CONFIG_CPU_IDLE 883442bf3aaSDaniel Lezcano /* Must be inspected within a rcu lock section */ 884442bf3aaSDaniel Lezcano struct cpuidle_state *idle_state; 885442bf3aaSDaniel Lezcano #endif 886391e43daSPeter Zijlstra }; 887391e43daSPeter Zijlstra 888391e43daSPeter Zijlstra static inline int cpu_of(struct rq *rq) 889391e43daSPeter Zijlstra { 890391e43daSPeter Zijlstra #ifdef CONFIG_SMP 891391e43daSPeter Zijlstra return rq->cpu; 892391e43daSPeter Zijlstra #else 893391e43daSPeter Zijlstra return 0; 894391e43daSPeter Zijlstra #endif 895391e43daSPeter Zijlstra } 896391e43daSPeter Zijlstra 8971b568f0aSPeter Zijlstra 8981b568f0aSPeter Zijlstra #ifdef CONFIG_SCHED_SMT 8991b568f0aSPeter Zijlstra 9001b568f0aSPeter Zijlstra extern struct static_key_false sched_smt_present; 9011b568f0aSPeter Zijlstra 9021b568f0aSPeter Zijlstra extern void __update_idle_core(struct rq *rq); 9031b568f0aSPeter Zijlstra 9041b568f0aSPeter Zijlstra static inline void update_idle_core(struct rq *rq) 9051b568f0aSPeter Zijlstra { 9061b568f0aSPeter Zijlstra if (static_branch_unlikely(&sched_smt_present)) 9071b568f0aSPeter Zijlstra __update_idle_core(rq); 9081b568f0aSPeter Zijlstra } 9091b568f0aSPeter Zijlstra 9101b568f0aSPeter Zijlstra #else 9111b568f0aSPeter Zijlstra static inline void update_idle_core(struct rq *rq) { } 9121b568f0aSPeter Zijlstra #endif 9131b568f0aSPeter Zijlstra 9148b06c55bSPranith Kumar DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); 915391e43daSPeter Zijlstra 916518cd623SPeter Zijlstra #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) 9174a32fea9SChristoph Lameter #define this_rq() this_cpu_ptr(&runqueues) 918518cd623SPeter Zijlstra #define task_rq(p) cpu_rq(task_cpu(p)) 919518cd623SPeter Zijlstra #define cpu_curr(cpu) (cpu_rq(cpu)->curr) 9204a32fea9SChristoph Lameter #define raw_rq() raw_cpu_ptr(&runqueues) 921518cd623SPeter Zijlstra 922cebde6d6SPeter Zijlstra static inline u64 __rq_clock_broken(struct rq *rq) 923cebde6d6SPeter Zijlstra { 924316c1608SJason Low return READ_ONCE(rq->clock); 925cebde6d6SPeter Zijlstra } 926cebde6d6SPeter Zijlstra 927cb42c9a3SMatt Fleming /* 928cb42c9a3SMatt Fleming * rq::clock_update_flags bits 929cb42c9a3SMatt Fleming * 930cb42c9a3SMatt Fleming * %RQCF_REQ_SKIP - will request skipping of clock update on the next 931cb42c9a3SMatt Fleming * call to __schedule(). This is an optimisation to avoid 932cb42c9a3SMatt Fleming * neighbouring rq clock updates. 933cb42c9a3SMatt Fleming * 934cb42c9a3SMatt Fleming * %RQCF_ACT_SKIP - is set from inside of __schedule() when skipping is 935cb42c9a3SMatt Fleming * in effect and calls to update_rq_clock() are being ignored. 936cb42c9a3SMatt Fleming * 937cb42c9a3SMatt Fleming * %RQCF_UPDATED - is a debug flag that indicates whether a call has been 938cb42c9a3SMatt Fleming * made to update_rq_clock() since the last time rq::lock was pinned. 939cb42c9a3SMatt Fleming * 940cb42c9a3SMatt Fleming * If inside of __schedule(), clock_update_flags will have been 941cb42c9a3SMatt Fleming * shifted left (a left shift is a cheap operation for the fast path 942cb42c9a3SMatt Fleming * to promote %RQCF_REQ_SKIP to %RQCF_ACT_SKIP), so you must use, 943cb42c9a3SMatt Fleming * 944cb42c9a3SMatt Fleming * if (rq-clock_update_flags >= RQCF_UPDATED) 945cb42c9a3SMatt Fleming * 946cb42c9a3SMatt Fleming * to check if %RQCF_UPADTED is set. It'll never be shifted more than 947cb42c9a3SMatt Fleming * one position though, because the next rq_unpin_lock() will shift it 948cb42c9a3SMatt Fleming * back. 949cb42c9a3SMatt Fleming */ 950cb42c9a3SMatt Fleming #define RQCF_REQ_SKIP 0x01 951cb42c9a3SMatt Fleming #define RQCF_ACT_SKIP 0x02 952cb42c9a3SMatt Fleming #define RQCF_UPDATED 0x04 953cb42c9a3SMatt Fleming 954cb42c9a3SMatt Fleming static inline void assert_clock_updated(struct rq *rq) 955cb42c9a3SMatt Fleming { 956cb42c9a3SMatt Fleming /* 957cb42c9a3SMatt Fleming * The only reason for not seeing a clock update since the 958cb42c9a3SMatt Fleming * last rq_pin_lock() is if we're currently skipping updates. 959cb42c9a3SMatt Fleming */ 960cb42c9a3SMatt Fleming SCHED_WARN_ON(rq->clock_update_flags < RQCF_ACT_SKIP); 961cb42c9a3SMatt Fleming } 962cb42c9a3SMatt Fleming 96378becc27SFrederic Weisbecker static inline u64 rq_clock(struct rq *rq) 96478becc27SFrederic Weisbecker { 965cebde6d6SPeter Zijlstra lockdep_assert_held(&rq->lock); 966cb42c9a3SMatt Fleming assert_clock_updated(rq); 967cb42c9a3SMatt Fleming 96878becc27SFrederic Weisbecker return rq->clock; 96978becc27SFrederic Weisbecker } 97078becc27SFrederic Weisbecker 97178becc27SFrederic Weisbecker static inline u64 rq_clock_task(struct rq *rq) 97278becc27SFrederic Weisbecker { 973cebde6d6SPeter Zijlstra lockdep_assert_held(&rq->lock); 974cb42c9a3SMatt Fleming assert_clock_updated(rq); 975cb42c9a3SMatt Fleming 97678becc27SFrederic Weisbecker return rq->clock_task; 97778becc27SFrederic Weisbecker } 97878becc27SFrederic Weisbecker 9799edfbfedSPeter Zijlstra static inline void rq_clock_skip_update(struct rq *rq, bool skip) 9809edfbfedSPeter Zijlstra { 9819edfbfedSPeter Zijlstra lockdep_assert_held(&rq->lock); 9829edfbfedSPeter Zijlstra if (skip) 983cb42c9a3SMatt Fleming rq->clock_update_flags |= RQCF_REQ_SKIP; 9849edfbfedSPeter Zijlstra else 985cb42c9a3SMatt Fleming rq->clock_update_flags &= ~RQCF_REQ_SKIP; 9869edfbfedSPeter Zijlstra } 9879edfbfedSPeter Zijlstra 988d8ac8971SMatt Fleming struct rq_flags { 989d8ac8971SMatt Fleming unsigned long flags; 990d8ac8971SMatt Fleming struct pin_cookie cookie; 991cb42c9a3SMatt Fleming #ifdef CONFIG_SCHED_DEBUG 992cb42c9a3SMatt Fleming /* 993cb42c9a3SMatt Fleming * A copy of (rq::clock_update_flags & RQCF_UPDATED) for the 994cb42c9a3SMatt Fleming * current pin context is stashed here in case it needs to be 995cb42c9a3SMatt Fleming * restored in rq_repin_lock(). 996cb42c9a3SMatt Fleming */ 997cb42c9a3SMatt Fleming unsigned int clock_update_flags; 998cb42c9a3SMatt Fleming #endif 999d8ac8971SMatt Fleming }; 1000d8ac8971SMatt Fleming 1001d8ac8971SMatt Fleming static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf) 1002d8ac8971SMatt Fleming { 1003d8ac8971SMatt Fleming rf->cookie = lockdep_pin_lock(&rq->lock); 1004cb42c9a3SMatt Fleming 1005cb42c9a3SMatt Fleming #ifdef CONFIG_SCHED_DEBUG 1006cb42c9a3SMatt Fleming rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP); 1007cb42c9a3SMatt Fleming rf->clock_update_flags = 0; 1008cb42c9a3SMatt Fleming #endif 1009d8ac8971SMatt Fleming } 1010d8ac8971SMatt Fleming 1011d8ac8971SMatt Fleming static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf) 1012d8ac8971SMatt Fleming { 1013cb42c9a3SMatt Fleming #ifdef CONFIG_SCHED_DEBUG 1014cb42c9a3SMatt Fleming if (rq->clock_update_flags > RQCF_ACT_SKIP) 1015cb42c9a3SMatt Fleming rf->clock_update_flags = RQCF_UPDATED; 1016cb42c9a3SMatt Fleming #endif 1017cb42c9a3SMatt Fleming 1018d8ac8971SMatt Fleming lockdep_unpin_lock(&rq->lock, rf->cookie); 1019d8ac8971SMatt Fleming } 1020d8ac8971SMatt Fleming 1021d8ac8971SMatt Fleming static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf) 1022d8ac8971SMatt Fleming { 1023d8ac8971SMatt Fleming lockdep_repin_lock(&rq->lock, rf->cookie); 1024cb42c9a3SMatt Fleming 1025cb42c9a3SMatt Fleming #ifdef CONFIG_SCHED_DEBUG 1026cb42c9a3SMatt Fleming /* 1027cb42c9a3SMatt Fleming * Restore the value we stashed in @rf for this pin context. 1028cb42c9a3SMatt Fleming */ 1029cb42c9a3SMatt Fleming rq->clock_update_flags |= rf->clock_update_flags; 1030cb42c9a3SMatt Fleming #endif 1031d8ac8971SMatt Fleming } 1032d8ac8971SMatt Fleming 10339942f79bSRik van Riel #ifdef CONFIG_NUMA 1034e3fe70b1SRik van Riel enum numa_topology_type { 1035e3fe70b1SRik van Riel NUMA_DIRECT, 1036e3fe70b1SRik van Riel NUMA_GLUELESS_MESH, 1037e3fe70b1SRik van Riel NUMA_BACKPLANE, 1038e3fe70b1SRik van Riel }; 1039e3fe70b1SRik van Riel extern enum numa_topology_type sched_numa_topology_type; 10409942f79bSRik van Riel extern int sched_max_numa_distance; 10419942f79bSRik van Riel extern bool find_numa_distance(int distance); 10429942f79bSRik van Riel #endif 10439942f79bSRik van Riel 1044f2cb1360SIngo Molnar #ifdef CONFIG_NUMA 1045f2cb1360SIngo Molnar extern void sched_init_numa(void); 1046f2cb1360SIngo Molnar extern void sched_domains_numa_masks_set(unsigned int cpu); 1047f2cb1360SIngo Molnar extern void sched_domains_numa_masks_clear(unsigned int cpu); 1048f2cb1360SIngo Molnar #else 1049f2cb1360SIngo Molnar static inline void sched_init_numa(void) { } 1050f2cb1360SIngo Molnar static inline void sched_domains_numa_masks_set(unsigned int cpu) { } 1051f2cb1360SIngo Molnar static inline void sched_domains_numa_masks_clear(unsigned int cpu) { } 1052f2cb1360SIngo Molnar #endif 1053f2cb1360SIngo Molnar 1054f809ca9aSMel Gorman #ifdef CONFIG_NUMA_BALANCING 105544dba3d5SIulia Manda /* The regions in numa_faults array from task_struct */ 105644dba3d5SIulia Manda enum numa_faults_stats { 105744dba3d5SIulia Manda NUMA_MEM = 0, 105844dba3d5SIulia Manda NUMA_CPU, 105944dba3d5SIulia Manda NUMA_MEMBUF, 106044dba3d5SIulia Manda NUMA_CPUBUF 106144dba3d5SIulia Manda }; 10620ec8aa00SPeter Zijlstra extern void sched_setnuma(struct task_struct *p, int node); 1063e6628d5bSMel Gorman extern int migrate_task_to(struct task_struct *p, int cpu); 1064ac66f547SPeter Zijlstra extern int migrate_swap(struct task_struct *, struct task_struct *); 1065f809ca9aSMel Gorman #endif /* CONFIG_NUMA_BALANCING */ 1066f809ca9aSMel Gorman 1067518cd623SPeter Zijlstra #ifdef CONFIG_SMP 1068518cd623SPeter Zijlstra 1069e3fca9e7SPeter Zijlstra static inline void 1070e3fca9e7SPeter Zijlstra queue_balance_callback(struct rq *rq, 1071e3fca9e7SPeter Zijlstra struct callback_head *head, 1072e3fca9e7SPeter Zijlstra void (*func)(struct rq *rq)) 1073e3fca9e7SPeter Zijlstra { 1074e3fca9e7SPeter Zijlstra lockdep_assert_held(&rq->lock); 1075e3fca9e7SPeter Zijlstra 1076e3fca9e7SPeter Zijlstra if (unlikely(head->next)) 1077e3fca9e7SPeter Zijlstra return; 1078e3fca9e7SPeter Zijlstra 1079e3fca9e7SPeter Zijlstra head->func = (void (*)(struct callback_head *))func; 1080e3fca9e7SPeter Zijlstra head->next = rq->balance_callback; 1081e3fca9e7SPeter Zijlstra rq->balance_callback = head; 1082e3fca9e7SPeter Zijlstra } 1083e3fca9e7SPeter Zijlstra 1084e3baac47SPeter Zijlstra extern void sched_ttwu_pending(void); 1085e3baac47SPeter Zijlstra 1086391e43daSPeter Zijlstra #define rcu_dereference_check_sched_domain(p) \ 1087391e43daSPeter Zijlstra rcu_dereference_check((p), \ 1088391e43daSPeter Zijlstra lockdep_is_held(&sched_domains_mutex)) 1089391e43daSPeter Zijlstra 1090391e43daSPeter Zijlstra /* 1091391e43daSPeter Zijlstra * The domain tree (rq->sd) is protected by RCU's quiescent state transition. 1092391e43daSPeter Zijlstra * See detach_destroy_domains: synchronize_sched for details. 1093391e43daSPeter Zijlstra * 1094391e43daSPeter Zijlstra * The domain tree of any CPU may only be accessed from within 1095391e43daSPeter Zijlstra * preempt-disabled sections. 1096391e43daSPeter Zijlstra */ 1097391e43daSPeter Zijlstra #define for_each_domain(cpu, __sd) \ 1098518cd623SPeter Zijlstra for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \ 1099518cd623SPeter Zijlstra __sd; __sd = __sd->parent) 1100391e43daSPeter Zijlstra 110177e81365SSuresh Siddha #define for_each_lower_domain(sd) for (; sd; sd = sd->child) 110277e81365SSuresh Siddha 1103518cd623SPeter Zijlstra /** 1104518cd623SPeter Zijlstra * highest_flag_domain - Return highest sched_domain containing flag. 110597fb7a0aSIngo Molnar * @cpu: The CPU whose highest level of sched domain is to 1106518cd623SPeter Zijlstra * be returned. 1107518cd623SPeter Zijlstra * @flag: The flag to check for the highest sched_domain 110897fb7a0aSIngo Molnar * for the given CPU. 1109518cd623SPeter Zijlstra * 111097fb7a0aSIngo Molnar * Returns the highest sched_domain of a CPU which contains the given flag. 1111518cd623SPeter Zijlstra */ 1112518cd623SPeter Zijlstra static inline struct sched_domain *highest_flag_domain(int cpu, int flag) 1113518cd623SPeter Zijlstra { 1114518cd623SPeter Zijlstra struct sched_domain *sd, *hsd = NULL; 1115518cd623SPeter Zijlstra 1116518cd623SPeter Zijlstra for_each_domain(cpu, sd) { 1117518cd623SPeter Zijlstra if (!(sd->flags & flag)) 1118518cd623SPeter Zijlstra break; 1119518cd623SPeter Zijlstra hsd = sd; 1120518cd623SPeter Zijlstra } 1121518cd623SPeter Zijlstra 1122518cd623SPeter Zijlstra return hsd; 1123518cd623SPeter Zijlstra } 1124518cd623SPeter Zijlstra 1125fb13c7eeSMel Gorman static inline struct sched_domain *lowest_flag_domain(int cpu, int flag) 1126fb13c7eeSMel Gorman { 1127fb13c7eeSMel Gorman struct sched_domain *sd; 1128fb13c7eeSMel Gorman 1129fb13c7eeSMel Gorman for_each_domain(cpu, sd) { 1130fb13c7eeSMel Gorman if (sd->flags & flag) 1131fb13c7eeSMel Gorman break; 1132fb13c7eeSMel Gorman } 1133fb13c7eeSMel Gorman 1134fb13c7eeSMel Gorman return sd; 1135fb13c7eeSMel Gorman } 1136fb13c7eeSMel Gorman 1137518cd623SPeter Zijlstra DECLARE_PER_CPU(struct sched_domain *, sd_llc); 11387d9ffa89SPeter Zijlstra DECLARE_PER_CPU(int, sd_llc_size); 1139518cd623SPeter Zijlstra DECLARE_PER_CPU(int, sd_llc_id); 11400e369d75SPeter Zijlstra DECLARE_PER_CPU(struct sched_domain_shared *, sd_llc_shared); 1141fb13c7eeSMel Gorman DECLARE_PER_CPU(struct sched_domain *, sd_numa); 114237dc6b50SPreeti U Murthy DECLARE_PER_CPU(struct sched_domain *, sd_asym); 1143518cd623SPeter Zijlstra 114463b2ca30SNicolas Pitre struct sched_group_capacity { 11455e6521eaSLi Zefan atomic_t ref; 11465e6521eaSLi Zefan /* 1147172895e6SYuyang Du * CPU capacity of this group, SCHED_CAPACITY_SCALE being max capacity 114863b2ca30SNicolas Pitre * for a single CPU. 11495e6521eaSLi Zefan */ 1150bf475ce0SMorten Rasmussen unsigned long capacity; 1151bf475ce0SMorten Rasmussen unsigned long min_capacity; /* Min per-CPU capacity in group */ 11525e6521eaSLi Zefan unsigned long next_update; 115363b2ca30SNicolas Pitre int imbalance; /* XXX unrelated to capacity but shared group state */ 11545e6521eaSLi Zefan 1155005f874dSPeter Zijlstra #ifdef CONFIG_SCHED_DEBUG 1156005f874dSPeter Zijlstra int id; 1157005f874dSPeter Zijlstra #endif 1158005f874dSPeter Zijlstra 115997fb7a0aSIngo Molnar unsigned long cpumask[0]; /* Balance mask */ 11605e6521eaSLi Zefan }; 11615e6521eaSLi Zefan 11625e6521eaSLi Zefan struct sched_group { 11635e6521eaSLi Zefan struct sched_group *next; /* Must be a circular list */ 11645e6521eaSLi Zefan atomic_t ref; 11655e6521eaSLi Zefan 11665e6521eaSLi Zefan unsigned int group_weight; 116763b2ca30SNicolas Pitre struct sched_group_capacity *sgc; 116897fb7a0aSIngo Molnar int asym_prefer_cpu; /* CPU of highest priority in group */ 11695e6521eaSLi Zefan 11705e6521eaSLi Zefan /* 11715e6521eaSLi Zefan * The CPUs this group covers. 11725e6521eaSLi Zefan * 11735e6521eaSLi Zefan * NOTE: this field is variable length. (Allocated dynamically 11745e6521eaSLi Zefan * by attaching extra space to the end of the structure, 11755e6521eaSLi Zefan * depending on how many CPUs the kernel has booted up with) 11765e6521eaSLi Zefan */ 11775e6521eaSLi Zefan unsigned long cpumask[0]; 11785e6521eaSLi Zefan }; 11795e6521eaSLi Zefan 1180ae4df9d6SPeter Zijlstra static inline struct cpumask *sched_group_span(struct sched_group *sg) 11815e6521eaSLi Zefan { 11825e6521eaSLi Zefan return to_cpumask(sg->cpumask); 11835e6521eaSLi Zefan } 11845e6521eaSLi Zefan 11855e6521eaSLi Zefan /* 1186e5c14b1fSPeter Zijlstra * See build_balance_mask(). 11875e6521eaSLi Zefan */ 1188e5c14b1fSPeter Zijlstra static inline struct cpumask *group_balance_mask(struct sched_group *sg) 11895e6521eaSLi Zefan { 119063b2ca30SNicolas Pitre return to_cpumask(sg->sgc->cpumask); 11915e6521eaSLi Zefan } 11925e6521eaSLi Zefan 11935e6521eaSLi Zefan /** 119497fb7a0aSIngo Molnar * group_first_cpu - Returns the first CPU in the cpumask of a sched_group. 119597fb7a0aSIngo Molnar * @group: The group whose first CPU is to be returned. 11965e6521eaSLi Zefan */ 11975e6521eaSLi Zefan static inline unsigned int group_first_cpu(struct sched_group *group) 11985e6521eaSLi Zefan { 1199ae4df9d6SPeter Zijlstra return cpumask_first(sched_group_span(group)); 12005e6521eaSLi Zefan } 12015e6521eaSLi Zefan 1202c1174876SPeter Zijlstra extern int group_balance_cpu(struct sched_group *sg); 1203c1174876SPeter Zijlstra 12043866e845SSteven Rostedt (Red Hat) #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL) 12053866e845SSteven Rostedt (Red Hat) void register_sched_domain_sysctl(void); 1206bbdacdfeSPeter Zijlstra void dirty_sched_domain_sysctl(int cpu); 12073866e845SSteven Rostedt (Red Hat) void unregister_sched_domain_sysctl(void); 12083866e845SSteven Rostedt (Red Hat) #else 12093866e845SSteven Rostedt (Red Hat) static inline void register_sched_domain_sysctl(void) 12103866e845SSteven Rostedt (Red Hat) { 12113866e845SSteven Rostedt (Red Hat) } 1212bbdacdfeSPeter Zijlstra static inline void dirty_sched_domain_sysctl(int cpu) 1213bbdacdfeSPeter Zijlstra { 1214bbdacdfeSPeter Zijlstra } 12153866e845SSteven Rostedt (Red Hat) static inline void unregister_sched_domain_sysctl(void) 12163866e845SSteven Rostedt (Red Hat) { 12173866e845SSteven Rostedt (Red Hat) } 12183866e845SSteven Rostedt (Red Hat) #endif 12193866e845SSteven Rostedt (Red Hat) 1220e3baac47SPeter Zijlstra #else 1221e3baac47SPeter Zijlstra 1222e3baac47SPeter Zijlstra static inline void sched_ttwu_pending(void) { } 1223e3baac47SPeter Zijlstra 1224518cd623SPeter Zijlstra #endif /* CONFIG_SMP */ 1225391e43daSPeter Zijlstra 1226391e43daSPeter Zijlstra #include "stats.h" 12271051408fSIngo Molnar #include "autogroup.h" 1228391e43daSPeter Zijlstra 1229391e43daSPeter Zijlstra #ifdef CONFIG_CGROUP_SCHED 1230391e43daSPeter Zijlstra 1231391e43daSPeter Zijlstra /* 1232391e43daSPeter Zijlstra * Return the group to which this tasks belongs. 1233391e43daSPeter Zijlstra * 12348af01f56STejun Heo * We cannot use task_css() and friends because the cgroup subsystem 12358af01f56STejun Heo * changes that value before the cgroup_subsys::attach() method is called, 12368af01f56STejun Heo * therefore we cannot pin it and might observe the wrong value. 12378323f26cSPeter Zijlstra * 12388323f26cSPeter Zijlstra * The same is true for autogroup's p->signal->autogroup->tg, the autogroup 12398323f26cSPeter Zijlstra * core changes this before calling sched_move_task(). 12408323f26cSPeter Zijlstra * 12418323f26cSPeter Zijlstra * Instead we use a 'copy' which is updated from sched_move_task() while 12428323f26cSPeter Zijlstra * holding both task_struct::pi_lock and rq::lock. 1243391e43daSPeter Zijlstra */ 1244391e43daSPeter Zijlstra static inline struct task_group *task_group(struct task_struct *p) 1245391e43daSPeter Zijlstra { 12468323f26cSPeter Zijlstra return p->sched_task_group; 1247391e43daSPeter Zijlstra } 1248391e43daSPeter Zijlstra 1249391e43daSPeter Zijlstra /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ 1250391e43daSPeter Zijlstra static inline void set_task_rq(struct task_struct *p, unsigned int cpu) 1251391e43daSPeter Zijlstra { 1252391e43daSPeter Zijlstra #if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED) 1253391e43daSPeter Zijlstra struct task_group *tg = task_group(p); 1254391e43daSPeter Zijlstra #endif 1255391e43daSPeter Zijlstra 1256391e43daSPeter Zijlstra #ifdef CONFIG_FAIR_GROUP_SCHED 1257ad936d86SByungchul Park set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]); 1258391e43daSPeter Zijlstra p->se.cfs_rq = tg->cfs_rq[cpu]; 1259391e43daSPeter Zijlstra p->se.parent = tg->se[cpu]; 1260391e43daSPeter Zijlstra #endif 1261391e43daSPeter Zijlstra 1262391e43daSPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED 1263391e43daSPeter Zijlstra p->rt.rt_rq = tg->rt_rq[cpu]; 1264391e43daSPeter Zijlstra p->rt.parent = tg->rt_se[cpu]; 1265391e43daSPeter Zijlstra #endif 1266391e43daSPeter Zijlstra } 1267391e43daSPeter Zijlstra 1268391e43daSPeter Zijlstra #else /* CONFIG_CGROUP_SCHED */ 1269391e43daSPeter Zijlstra 1270391e43daSPeter Zijlstra static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { } 1271391e43daSPeter Zijlstra static inline struct task_group *task_group(struct task_struct *p) 1272391e43daSPeter Zijlstra { 1273391e43daSPeter Zijlstra return NULL; 1274391e43daSPeter Zijlstra } 1275391e43daSPeter Zijlstra 1276391e43daSPeter Zijlstra #endif /* CONFIG_CGROUP_SCHED */ 1277391e43daSPeter Zijlstra 1278391e43daSPeter Zijlstra static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) 1279391e43daSPeter Zijlstra { 1280391e43daSPeter Zijlstra set_task_rq(p, cpu); 1281391e43daSPeter Zijlstra #ifdef CONFIG_SMP 1282391e43daSPeter Zijlstra /* 1283391e43daSPeter Zijlstra * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be 1284391e43daSPeter Zijlstra * successfuly executed on another CPU. We must ensure that updates of 1285391e43daSPeter Zijlstra * per-task data have been completed by this moment. 1286391e43daSPeter Zijlstra */ 1287391e43daSPeter Zijlstra smp_wmb(); 1288c65eacbeSAndy Lutomirski #ifdef CONFIG_THREAD_INFO_IN_TASK 1289c65eacbeSAndy Lutomirski p->cpu = cpu; 1290c65eacbeSAndy Lutomirski #else 1291391e43daSPeter Zijlstra task_thread_info(p)->cpu = cpu; 1292c65eacbeSAndy Lutomirski #endif 1293ac66f547SPeter Zijlstra p->wake_cpu = cpu; 1294391e43daSPeter Zijlstra #endif 1295391e43daSPeter Zijlstra } 1296391e43daSPeter Zijlstra 1297391e43daSPeter Zijlstra /* 1298391e43daSPeter Zijlstra * Tunables that become constants when CONFIG_SCHED_DEBUG is off: 1299391e43daSPeter Zijlstra */ 1300391e43daSPeter Zijlstra #ifdef CONFIG_SCHED_DEBUG 1301c5905afbSIngo Molnar # include <linux/static_key.h> 1302391e43daSPeter Zijlstra # define const_debug __read_mostly 1303391e43daSPeter Zijlstra #else 1304391e43daSPeter Zijlstra # define const_debug const 1305391e43daSPeter Zijlstra #endif 1306391e43daSPeter Zijlstra 1307391e43daSPeter Zijlstra #define SCHED_FEAT(name, enabled) \ 1308391e43daSPeter Zijlstra __SCHED_FEAT_##name , 1309391e43daSPeter Zijlstra 1310391e43daSPeter Zijlstra enum { 1311391e43daSPeter Zijlstra #include "features.h" 1312f8b6d1ccSPeter Zijlstra __SCHED_FEAT_NR, 1313391e43daSPeter Zijlstra }; 1314391e43daSPeter Zijlstra 1315391e43daSPeter Zijlstra #undef SCHED_FEAT 1316391e43daSPeter Zijlstra 1317f8b6d1ccSPeter Zijlstra #if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL) 1318765cc3a4SPatrick Bellasi 1319765cc3a4SPatrick Bellasi /* 1320765cc3a4SPatrick Bellasi * To support run-time toggling of sched features, all the translation units 1321765cc3a4SPatrick Bellasi * (but core.c) reference the sysctl_sched_features defined in core.c. 1322765cc3a4SPatrick Bellasi */ 1323765cc3a4SPatrick Bellasi extern const_debug unsigned int sysctl_sched_features; 1324765cc3a4SPatrick Bellasi 1325f8b6d1ccSPeter Zijlstra #define SCHED_FEAT(name, enabled) \ 1326c5905afbSIngo Molnar static __always_inline bool static_branch_##name(struct static_key *key) \ 1327f8b6d1ccSPeter Zijlstra { \ 13286e76ea8aSJason Baron return static_key_##enabled(key); \ 1329f8b6d1ccSPeter Zijlstra } 1330f8b6d1ccSPeter Zijlstra 1331f8b6d1ccSPeter Zijlstra #include "features.h" 1332f8b6d1ccSPeter Zijlstra #undef SCHED_FEAT 1333f8b6d1ccSPeter Zijlstra 1334c5905afbSIngo Molnar extern struct static_key sched_feat_keys[__SCHED_FEAT_NR]; 1335f8b6d1ccSPeter Zijlstra #define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x])) 1336765cc3a4SPatrick Bellasi 1337f8b6d1ccSPeter Zijlstra #else /* !(SCHED_DEBUG && HAVE_JUMP_LABEL) */ 1338765cc3a4SPatrick Bellasi 1339765cc3a4SPatrick Bellasi /* 1340765cc3a4SPatrick Bellasi * Each translation unit has its own copy of sysctl_sched_features to allow 1341765cc3a4SPatrick Bellasi * constants propagation at compile time and compiler optimization based on 1342765cc3a4SPatrick Bellasi * features default. 1343765cc3a4SPatrick Bellasi */ 1344765cc3a4SPatrick Bellasi #define SCHED_FEAT(name, enabled) \ 1345765cc3a4SPatrick Bellasi (1UL << __SCHED_FEAT_##name) * enabled | 1346765cc3a4SPatrick Bellasi static const_debug __maybe_unused unsigned int sysctl_sched_features = 1347765cc3a4SPatrick Bellasi #include "features.h" 1348765cc3a4SPatrick Bellasi 0; 1349765cc3a4SPatrick Bellasi #undef SCHED_FEAT 1350765cc3a4SPatrick Bellasi 1351391e43daSPeter Zijlstra #define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x)) 1352765cc3a4SPatrick Bellasi 1353f8b6d1ccSPeter Zijlstra #endif /* SCHED_DEBUG && HAVE_JUMP_LABEL */ 1354391e43daSPeter Zijlstra 13552a595721SSrikar Dronamraju extern struct static_key_false sched_numa_balancing; 1356cb251765SMel Gorman extern struct static_key_false sched_schedstats; 1357cbee9f88SPeter Zijlstra 1358391e43daSPeter Zijlstra static inline u64 global_rt_period(void) 1359391e43daSPeter Zijlstra { 1360391e43daSPeter Zijlstra return (u64)sysctl_sched_rt_period * NSEC_PER_USEC; 1361391e43daSPeter Zijlstra } 1362391e43daSPeter Zijlstra 1363391e43daSPeter Zijlstra static inline u64 global_rt_runtime(void) 1364391e43daSPeter Zijlstra { 1365391e43daSPeter Zijlstra if (sysctl_sched_rt_runtime < 0) 1366391e43daSPeter Zijlstra return RUNTIME_INF; 1367391e43daSPeter Zijlstra 1368391e43daSPeter Zijlstra return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC; 1369391e43daSPeter Zijlstra } 1370391e43daSPeter Zijlstra 1371391e43daSPeter Zijlstra static inline int task_current(struct rq *rq, struct task_struct *p) 1372391e43daSPeter Zijlstra { 1373391e43daSPeter Zijlstra return rq->curr == p; 1374391e43daSPeter Zijlstra } 1375391e43daSPeter Zijlstra 1376391e43daSPeter Zijlstra static inline int task_running(struct rq *rq, struct task_struct *p) 1377391e43daSPeter Zijlstra { 1378391e43daSPeter Zijlstra #ifdef CONFIG_SMP 1379391e43daSPeter Zijlstra return p->on_cpu; 1380391e43daSPeter Zijlstra #else 1381391e43daSPeter Zijlstra return task_current(rq, p); 1382391e43daSPeter Zijlstra #endif 1383391e43daSPeter Zijlstra } 1384391e43daSPeter Zijlstra 1385da0c1e65SKirill Tkhai static inline int task_on_rq_queued(struct task_struct *p) 1386da0c1e65SKirill Tkhai { 1387da0c1e65SKirill Tkhai return p->on_rq == TASK_ON_RQ_QUEUED; 1388da0c1e65SKirill Tkhai } 1389391e43daSPeter Zijlstra 1390cca26e80SKirill Tkhai static inline int task_on_rq_migrating(struct task_struct *p) 1391cca26e80SKirill Tkhai { 1392cca26e80SKirill Tkhai return p->on_rq == TASK_ON_RQ_MIGRATING; 1393cca26e80SKirill Tkhai } 1394cca26e80SKirill Tkhai 1395b13095f0SLi Zefan /* 1396b13095f0SLi Zefan * wake flags 1397b13095f0SLi Zefan */ 139897fb7a0aSIngo Molnar #define WF_SYNC 0x01 /* Waker goes to sleep after wakeup */ 139997fb7a0aSIngo Molnar #define WF_FORK 0x02 /* Child wakeup after fork */ 140097fb7a0aSIngo Molnar #define WF_MIGRATED 0x4 /* Internal use, task got migrated */ 1401b13095f0SLi Zefan 1402391e43daSPeter Zijlstra /* 1403391e43daSPeter Zijlstra * To aid in avoiding the subversion of "niceness" due to uneven distribution 1404391e43daSPeter Zijlstra * of tasks with abnormal "nice" values across CPUs the contribution that 1405391e43daSPeter Zijlstra * each task makes to its run queue's load is weighted according to its 1406391e43daSPeter Zijlstra * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a 1407391e43daSPeter Zijlstra * scaled version of the new time slice allocation that they receive on time 1408391e43daSPeter Zijlstra * slice expiry etc. 1409391e43daSPeter Zijlstra */ 1410391e43daSPeter Zijlstra 1411391e43daSPeter Zijlstra #define WEIGHT_IDLEPRIO 3 1412391e43daSPeter Zijlstra #define WMULT_IDLEPRIO 1431655765 1413391e43daSPeter Zijlstra 1414ed82b8a1SAndi Kleen extern const int sched_prio_to_weight[40]; 1415ed82b8a1SAndi Kleen extern const u32 sched_prio_to_wmult[40]; 1416391e43daSPeter Zijlstra 1417ff77e468SPeter Zijlstra /* 1418ff77e468SPeter Zijlstra * {de,en}queue flags: 1419ff77e468SPeter Zijlstra * 1420ff77e468SPeter Zijlstra * DEQUEUE_SLEEP - task is no longer runnable 1421ff77e468SPeter Zijlstra * ENQUEUE_WAKEUP - task just became runnable 1422ff77e468SPeter Zijlstra * 1423ff77e468SPeter Zijlstra * SAVE/RESTORE - an otherwise spurious dequeue/enqueue, done to ensure tasks 1424ff77e468SPeter Zijlstra * are in a known state which allows modification. Such pairs 1425ff77e468SPeter Zijlstra * should preserve as much state as possible. 1426ff77e468SPeter Zijlstra * 1427ff77e468SPeter Zijlstra * MOVE - paired with SAVE/RESTORE, explicitly does not preserve the location 1428ff77e468SPeter Zijlstra * in the runqueue. 1429ff77e468SPeter Zijlstra * 1430ff77e468SPeter Zijlstra * ENQUEUE_HEAD - place at front of runqueue (tail if not specified) 1431ff77e468SPeter Zijlstra * ENQUEUE_REPLENISH - CBS (replenish runtime and postpone deadline) 143259efa0baSPeter Zijlstra * ENQUEUE_MIGRATED - the task was migrated during wakeup 1433ff77e468SPeter Zijlstra * 1434ff77e468SPeter Zijlstra */ 1435ff77e468SPeter Zijlstra 1436ff77e468SPeter Zijlstra #define DEQUEUE_SLEEP 0x01 143797fb7a0aSIngo Molnar #define DEQUEUE_SAVE 0x02 /* Matches ENQUEUE_RESTORE */ 143897fb7a0aSIngo Molnar #define DEQUEUE_MOVE 0x04 /* Matches ENQUEUE_MOVE */ 143997fb7a0aSIngo Molnar #define DEQUEUE_NOCLOCK 0x08 /* Matches ENQUEUE_NOCLOCK */ 1440ff77e468SPeter Zijlstra 14411de64443SPeter Zijlstra #define ENQUEUE_WAKEUP 0x01 1442ff77e468SPeter Zijlstra #define ENQUEUE_RESTORE 0x02 1443ff77e468SPeter Zijlstra #define ENQUEUE_MOVE 0x04 14440a67d1eeSPeter Zijlstra #define ENQUEUE_NOCLOCK 0x08 1445ff77e468SPeter Zijlstra 14460a67d1eeSPeter Zijlstra #define ENQUEUE_HEAD 0x10 14470a67d1eeSPeter Zijlstra #define ENQUEUE_REPLENISH 0x20 1448c82ba9faSLi Zefan #ifdef CONFIG_SMP 14490a67d1eeSPeter Zijlstra #define ENQUEUE_MIGRATED 0x40 1450c82ba9faSLi Zefan #else 145159efa0baSPeter Zijlstra #define ENQUEUE_MIGRATED 0x00 1452c82ba9faSLi Zefan #endif 1453c82ba9faSLi Zefan 145437e117c0SPeter Zijlstra #define RETRY_TASK ((void *)-1UL) 145537e117c0SPeter Zijlstra 1456c82ba9faSLi Zefan struct sched_class { 1457c82ba9faSLi Zefan const struct sched_class *next; 1458c82ba9faSLi Zefan 1459c82ba9faSLi Zefan void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags); 1460c82ba9faSLi Zefan void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags); 1461c82ba9faSLi Zefan void (*yield_task) (struct rq *rq); 1462c82ba9faSLi Zefan bool (*yield_to_task)(struct rq *rq, struct task_struct *p, bool preempt); 1463c82ba9faSLi Zefan 1464c82ba9faSLi Zefan void (*check_preempt_curr)(struct rq *rq, struct task_struct *p, int flags); 1465c82ba9faSLi Zefan 1466606dba2eSPeter Zijlstra /* 1467606dba2eSPeter Zijlstra * It is the responsibility of the pick_next_task() method that will 1468606dba2eSPeter Zijlstra * return the next task to call put_prev_task() on the @prev task or 1469606dba2eSPeter Zijlstra * something equivalent. 147037e117c0SPeter Zijlstra * 147137e117c0SPeter Zijlstra * May return RETRY_TASK when it finds a higher prio class has runnable 147237e117c0SPeter Zijlstra * tasks. 1473606dba2eSPeter Zijlstra */ 1474606dba2eSPeter Zijlstra struct task_struct * (*pick_next_task)(struct rq *rq, 1475e7904a28SPeter Zijlstra struct task_struct *prev, 1476d8ac8971SMatt Fleming struct rq_flags *rf); 1477c82ba9faSLi Zefan void (*put_prev_task)(struct rq *rq, struct task_struct *p); 1478c82ba9faSLi Zefan 1479c82ba9faSLi Zefan #ifdef CONFIG_SMP 1480ac66f547SPeter Zijlstra int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags); 14815a4fd036Sxiaofeng.yan void (*migrate_task_rq)(struct task_struct *p); 1482c82ba9faSLi Zefan 1483c82ba9faSLi Zefan void (*task_woken)(struct rq *this_rq, struct task_struct *task); 1484c82ba9faSLi Zefan 1485c82ba9faSLi Zefan void (*set_cpus_allowed)(struct task_struct *p, 1486c82ba9faSLi Zefan const struct cpumask *newmask); 1487c82ba9faSLi Zefan 1488c82ba9faSLi Zefan void (*rq_online)(struct rq *rq); 1489c82ba9faSLi Zefan void (*rq_offline)(struct rq *rq); 1490c82ba9faSLi Zefan #endif 1491c82ba9faSLi Zefan 1492c82ba9faSLi Zefan void (*set_curr_task)(struct rq *rq); 1493c82ba9faSLi Zefan void (*task_tick)(struct rq *rq, struct task_struct *p, int queued); 1494c82ba9faSLi Zefan void (*task_fork)(struct task_struct *p); 1495e6c390f2SDario Faggioli void (*task_dead)(struct task_struct *p); 1496c82ba9faSLi Zefan 149767dfa1b7SKirill Tkhai /* 149867dfa1b7SKirill Tkhai * The switched_from() call is allowed to drop rq->lock, therefore we 149967dfa1b7SKirill Tkhai * cannot assume the switched_from/switched_to pair is serliazed by 150067dfa1b7SKirill Tkhai * rq->lock. They are however serialized by p->pi_lock. 150167dfa1b7SKirill Tkhai */ 1502c82ba9faSLi Zefan void (*switched_from)(struct rq *this_rq, struct task_struct *task); 1503c82ba9faSLi Zefan void (*switched_to) (struct rq *this_rq, struct task_struct *task); 1504c82ba9faSLi Zefan void (*prio_changed) (struct rq *this_rq, struct task_struct *task, 1505c82ba9faSLi Zefan int oldprio); 1506c82ba9faSLi Zefan 1507c82ba9faSLi Zefan unsigned int (*get_rr_interval)(struct rq *rq, 1508c82ba9faSLi Zefan struct task_struct *task); 1509c82ba9faSLi Zefan 15106e998916SStanislaw Gruszka void (*update_curr)(struct rq *rq); 15116e998916SStanislaw Gruszka 1512ea86cb4bSVincent Guittot #define TASK_SET_GROUP 0 1513ea86cb4bSVincent Guittot #define TASK_MOVE_GROUP 1 1514ea86cb4bSVincent Guittot 1515c82ba9faSLi Zefan #ifdef CONFIG_FAIR_GROUP_SCHED 1516ea86cb4bSVincent Guittot void (*task_change_group)(struct task_struct *p, int type); 1517c82ba9faSLi Zefan #endif 1518c82ba9faSLi Zefan }; 1519391e43daSPeter Zijlstra 15203f1d2a31SPeter Zijlstra static inline void put_prev_task(struct rq *rq, struct task_struct *prev) 15213f1d2a31SPeter Zijlstra { 15223f1d2a31SPeter Zijlstra prev->sched_class->put_prev_task(rq, prev); 15233f1d2a31SPeter Zijlstra } 15243f1d2a31SPeter Zijlstra 1525b2bf6c31SPeter Zijlstra static inline void set_curr_task(struct rq *rq, struct task_struct *curr) 1526b2bf6c31SPeter Zijlstra { 1527b2bf6c31SPeter Zijlstra curr->sched_class->set_curr_task(rq); 1528b2bf6c31SPeter Zijlstra } 1529b2bf6c31SPeter Zijlstra 1530f5832c19SNicolas Pitre #ifdef CONFIG_SMP 1531391e43daSPeter Zijlstra #define sched_class_highest (&stop_sched_class) 1532f5832c19SNicolas Pitre #else 1533f5832c19SNicolas Pitre #define sched_class_highest (&dl_sched_class) 1534f5832c19SNicolas Pitre #endif 1535391e43daSPeter Zijlstra #define for_each_class(class) \ 1536391e43daSPeter Zijlstra for (class = sched_class_highest; class; class = class->next) 1537391e43daSPeter Zijlstra 1538391e43daSPeter Zijlstra extern const struct sched_class stop_sched_class; 1539aab03e05SDario Faggioli extern const struct sched_class dl_sched_class; 1540391e43daSPeter Zijlstra extern const struct sched_class rt_sched_class; 1541391e43daSPeter Zijlstra extern const struct sched_class fair_sched_class; 1542391e43daSPeter Zijlstra extern const struct sched_class idle_sched_class; 1543391e43daSPeter Zijlstra 1544391e43daSPeter Zijlstra 1545391e43daSPeter Zijlstra #ifdef CONFIG_SMP 1546391e43daSPeter Zijlstra 154763b2ca30SNicolas Pitre extern void update_group_capacity(struct sched_domain *sd, int cpu); 1548b719203bSLi Zefan 15497caff66fSDaniel Lezcano extern void trigger_load_balance(struct rq *rq); 1550391e43daSPeter Zijlstra 1551c5b28038SPeter Zijlstra extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask); 1552c5b28038SPeter Zijlstra 1553391e43daSPeter Zijlstra #endif 1554391e43daSPeter Zijlstra 1555442bf3aaSDaniel Lezcano #ifdef CONFIG_CPU_IDLE 1556442bf3aaSDaniel Lezcano static inline void idle_set_state(struct rq *rq, 1557442bf3aaSDaniel Lezcano struct cpuidle_state *idle_state) 1558442bf3aaSDaniel Lezcano { 1559442bf3aaSDaniel Lezcano rq->idle_state = idle_state; 1560442bf3aaSDaniel Lezcano } 1561442bf3aaSDaniel Lezcano 1562442bf3aaSDaniel Lezcano static inline struct cpuidle_state *idle_get_state(struct rq *rq) 1563442bf3aaSDaniel Lezcano { 15649148a3a1SPeter Zijlstra SCHED_WARN_ON(!rcu_read_lock_held()); 156597fb7a0aSIngo Molnar 1566442bf3aaSDaniel Lezcano return rq->idle_state; 1567442bf3aaSDaniel Lezcano } 1568442bf3aaSDaniel Lezcano #else 1569442bf3aaSDaniel Lezcano static inline void idle_set_state(struct rq *rq, 1570442bf3aaSDaniel Lezcano struct cpuidle_state *idle_state) 1571442bf3aaSDaniel Lezcano { 1572442bf3aaSDaniel Lezcano } 1573442bf3aaSDaniel Lezcano 1574442bf3aaSDaniel Lezcano static inline struct cpuidle_state *idle_get_state(struct rq *rq) 1575442bf3aaSDaniel Lezcano { 1576442bf3aaSDaniel Lezcano return NULL; 1577442bf3aaSDaniel Lezcano } 1578442bf3aaSDaniel Lezcano #endif 1579442bf3aaSDaniel Lezcano 15808663effbSSteven Rostedt (VMware) extern void schedule_idle(void); 15818663effbSSteven Rostedt (VMware) 1582391e43daSPeter Zijlstra extern void sysrq_sched_debug_show(void); 1583391e43daSPeter Zijlstra extern void sched_init_granularity(void); 1584391e43daSPeter Zijlstra extern void update_max_interval(void); 15851baca4ceSJuri Lelli 15861baca4ceSJuri Lelli extern void init_sched_dl_class(void); 1587391e43daSPeter Zijlstra extern void init_sched_rt_class(void); 1588391e43daSPeter Zijlstra extern void init_sched_fair_class(void); 1589391e43daSPeter Zijlstra 15909059393eSVincent Guittot extern void reweight_task(struct task_struct *p, int prio); 15919059393eSVincent Guittot 15928875125eSKirill Tkhai extern void resched_curr(struct rq *rq); 1593391e43daSPeter Zijlstra extern void resched_cpu(int cpu); 1594391e43daSPeter Zijlstra 1595391e43daSPeter Zijlstra extern struct rt_bandwidth def_rt_bandwidth; 1596391e43daSPeter Zijlstra extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime); 1597391e43daSPeter Zijlstra 1598332ac17eSDario Faggioli extern struct dl_bandwidth def_dl_bandwidth; 1599332ac17eSDario Faggioli extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime); 1600aab03e05SDario Faggioli extern void init_dl_task_timer(struct sched_dl_entity *dl_se); 1601209a0cbdSLuca Abeni extern void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se); 16024da3abceSLuca Abeni extern void init_dl_rq_bw_ratio(struct dl_rq *dl_rq); 1603aab03e05SDario Faggioli 1604c52f14d3SLuca Abeni #define BW_SHIFT 20 1605c52f14d3SLuca Abeni #define BW_UNIT (1 << BW_SHIFT) 16064da3abceSLuca Abeni #define RATIO_SHIFT 8 1607332ac17eSDario Faggioli unsigned long to_ratio(u64 period, u64 runtime); 1608332ac17eSDario Faggioli 1609540247fbSYuyang Du extern void init_entity_runnable_average(struct sched_entity *se); 16102b8c41daSYuyang Du extern void post_init_entity_util_avg(struct sched_entity *se); 1611a75cdaa9SAlex Shi 161276d92ac3SFrederic Weisbecker #ifdef CONFIG_NO_HZ_FULL 161376d92ac3SFrederic Weisbecker extern bool sched_can_stop_tick(struct rq *rq); 1614d84b3131SFrederic Weisbecker extern int __init sched_tick_offload_init(void); 161576d92ac3SFrederic Weisbecker 161676d92ac3SFrederic Weisbecker /* 161776d92ac3SFrederic Weisbecker * Tick may be needed by tasks in the runqueue depending on their policy and 161876d92ac3SFrederic Weisbecker * requirements. If tick is needed, lets send the target an IPI to kick it out of 161976d92ac3SFrederic Weisbecker * nohz mode if necessary. 162076d92ac3SFrederic Weisbecker */ 162176d92ac3SFrederic Weisbecker static inline void sched_update_tick_dependency(struct rq *rq) 162276d92ac3SFrederic Weisbecker { 162376d92ac3SFrederic Weisbecker int cpu; 162476d92ac3SFrederic Weisbecker 162576d92ac3SFrederic Weisbecker if (!tick_nohz_full_enabled()) 162676d92ac3SFrederic Weisbecker return; 162776d92ac3SFrederic Weisbecker 162876d92ac3SFrederic Weisbecker cpu = cpu_of(rq); 162976d92ac3SFrederic Weisbecker 163076d92ac3SFrederic Weisbecker if (!tick_nohz_full_cpu(cpu)) 163176d92ac3SFrederic Weisbecker return; 163276d92ac3SFrederic Weisbecker 163376d92ac3SFrederic Weisbecker if (sched_can_stop_tick(rq)) 163476d92ac3SFrederic Weisbecker tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED); 163576d92ac3SFrederic Weisbecker else 163676d92ac3SFrederic Weisbecker tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED); 163776d92ac3SFrederic Weisbecker } 163876d92ac3SFrederic Weisbecker #else 1639d84b3131SFrederic Weisbecker static inline int sched_tick_offload_init(void) { return 0; } 164076d92ac3SFrederic Weisbecker static inline void sched_update_tick_dependency(struct rq *rq) { } 164176d92ac3SFrederic Weisbecker #endif 164276d92ac3SFrederic Weisbecker 164372465447SKirill Tkhai static inline void add_nr_running(struct rq *rq, unsigned count) 1644391e43daSPeter Zijlstra { 164572465447SKirill Tkhai unsigned prev_nr = rq->nr_running; 164672465447SKirill Tkhai 164772465447SKirill Tkhai rq->nr_running = prev_nr + count; 16489f3660c2SFrederic Weisbecker 164972465447SKirill Tkhai if (prev_nr < 2 && rq->nr_running >= 2) { 16504486edd1STim Chen #ifdef CONFIG_SMP 16514486edd1STim Chen if (!rq->rd->overload) 16524486edd1STim Chen rq->rd->overload = true; 16534486edd1STim Chen #endif 165476d92ac3SFrederic Weisbecker } 16554486edd1STim Chen 165676d92ac3SFrederic Weisbecker sched_update_tick_dependency(rq); 16574486edd1STim Chen } 1658391e43daSPeter Zijlstra 165972465447SKirill Tkhai static inline void sub_nr_running(struct rq *rq, unsigned count) 1660391e43daSPeter Zijlstra { 166172465447SKirill Tkhai rq->nr_running -= count; 166276d92ac3SFrederic Weisbecker /* Check if we still need preemption */ 166376d92ac3SFrederic Weisbecker sched_update_tick_dependency(rq); 1664391e43daSPeter Zijlstra } 1665391e43daSPeter Zijlstra 1666391e43daSPeter Zijlstra extern void update_rq_clock(struct rq *rq); 1667391e43daSPeter Zijlstra 1668391e43daSPeter Zijlstra extern void activate_task(struct rq *rq, struct task_struct *p, int flags); 1669391e43daSPeter Zijlstra extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags); 1670391e43daSPeter Zijlstra 1671391e43daSPeter Zijlstra extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags); 1672391e43daSPeter Zijlstra 1673391e43daSPeter Zijlstra extern const_debug unsigned int sysctl_sched_time_avg; 1674391e43daSPeter Zijlstra extern const_debug unsigned int sysctl_sched_nr_migrate; 1675391e43daSPeter Zijlstra extern const_debug unsigned int sysctl_sched_migration_cost; 1676391e43daSPeter Zijlstra 1677391e43daSPeter Zijlstra static inline u64 sched_avg_period(void) 1678391e43daSPeter Zijlstra { 1679391e43daSPeter Zijlstra return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2; 1680391e43daSPeter Zijlstra } 1681391e43daSPeter Zijlstra 1682391e43daSPeter Zijlstra #ifdef CONFIG_SCHED_HRTICK 1683391e43daSPeter Zijlstra 1684391e43daSPeter Zijlstra /* 1685391e43daSPeter Zijlstra * Use hrtick when: 1686391e43daSPeter Zijlstra * - enabled by features 1687391e43daSPeter Zijlstra * - hrtimer is actually high res 1688391e43daSPeter Zijlstra */ 1689391e43daSPeter Zijlstra static inline int hrtick_enabled(struct rq *rq) 1690391e43daSPeter Zijlstra { 1691391e43daSPeter Zijlstra if (!sched_feat(HRTICK)) 1692391e43daSPeter Zijlstra return 0; 1693391e43daSPeter Zijlstra if (!cpu_active(cpu_of(rq))) 1694391e43daSPeter Zijlstra return 0; 1695391e43daSPeter Zijlstra return hrtimer_is_hres_active(&rq->hrtick_timer); 1696391e43daSPeter Zijlstra } 1697391e43daSPeter Zijlstra 1698391e43daSPeter Zijlstra void hrtick_start(struct rq *rq, u64 delay); 1699391e43daSPeter Zijlstra 1700b39e66eaSMike Galbraith #else 1701b39e66eaSMike Galbraith 1702b39e66eaSMike Galbraith static inline int hrtick_enabled(struct rq *rq) 1703b39e66eaSMike Galbraith { 1704b39e66eaSMike Galbraith return 0; 1705b39e66eaSMike Galbraith } 1706b39e66eaSMike Galbraith 1707391e43daSPeter Zijlstra #endif /* CONFIG_SCHED_HRTICK */ 1708391e43daSPeter Zijlstra 1709dfbca41fSPeter Zijlstra #ifndef arch_scale_freq_capacity 1710dfbca41fSPeter Zijlstra static __always_inline 17117673c8a4SJuri Lelli unsigned long arch_scale_freq_capacity(int cpu) 1712dfbca41fSPeter Zijlstra { 1713dfbca41fSPeter Zijlstra return SCHED_CAPACITY_SCALE; 1714dfbca41fSPeter Zijlstra } 1715dfbca41fSPeter Zijlstra #endif 1716b5b4860dSVincent Guittot 17177e1a9208SJuri Lelli #ifdef CONFIG_SMP 17187e1a9208SJuri Lelli extern void sched_avg_update(struct rq *rq); 17197e1a9208SJuri Lelli 17208cd5601cSMorten Rasmussen #ifndef arch_scale_cpu_capacity 17218cd5601cSMorten Rasmussen static __always_inline 17228cd5601cSMorten Rasmussen unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu) 17238cd5601cSMorten Rasmussen { 1724e3279a2eSDietmar Eggemann if (sd && (sd->flags & SD_SHARE_CPUCAPACITY) && (sd->span_weight > 1)) 17258cd5601cSMorten Rasmussen return sd->smt_gain / sd->span_weight; 17268cd5601cSMorten Rasmussen 17278cd5601cSMorten Rasmussen return SCHED_CAPACITY_SCALE; 17288cd5601cSMorten Rasmussen } 17298cd5601cSMorten Rasmussen #endif 17308cd5601cSMorten Rasmussen 1731391e43daSPeter Zijlstra static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) 1732391e43daSPeter Zijlstra { 17337673c8a4SJuri Lelli rq->rt_avg += rt_delta * arch_scale_freq_capacity(cpu_of(rq)); 1734391e43daSPeter Zijlstra sched_avg_update(rq); 1735391e43daSPeter Zijlstra } 1736391e43daSPeter Zijlstra #else 17377e1a9208SJuri Lelli #ifndef arch_scale_cpu_capacity 17387e1a9208SJuri Lelli static __always_inline 17397e1a9208SJuri Lelli unsigned long arch_scale_cpu_capacity(void __always_unused *sd, int cpu) 17407e1a9208SJuri Lelli { 17417e1a9208SJuri Lelli return SCHED_CAPACITY_SCALE; 17427e1a9208SJuri Lelli } 17437e1a9208SJuri Lelli #endif 1744391e43daSPeter Zijlstra static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { } 1745391e43daSPeter Zijlstra static inline void sched_avg_update(struct rq *rq) { } 1746391e43daSPeter Zijlstra #endif 1747391e43daSPeter Zijlstra 1748eb580751SPeter Zijlstra struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) 17493e71a462SPeter Zijlstra __acquires(rq->lock); 17508a8c69c3SPeter Zijlstra 1751eb580751SPeter Zijlstra struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) 17523960c8c0SPeter Zijlstra __acquires(p->pi_lock) 17533e71a462SPeter Zijlstra __acquires(rq->lock); 17543960c8c0SPeter Zijlstra 1755eb580751SPeter Zijlstra static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf) 17563960c8c0SPeter Zijlstra __releases(rq->lock) 17573960c8c0SPeter Zijlstra { 1758d8ac8971SMatt Fleming rq_unpin_lock(rq, rf); 17593960c8c0SPeter Zijlstra raw_spin_unlock(&rq->lock); 17603960c8c0SPeter Zijlstra } 17613960c8c0SPeter Zijlstra 17623960c8c0SPeter Zijlstra static inline void 1763eb580751SPeter Zijlstra task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf) 17643960c8c0SPeter Zijlstra __releases(rq->lock) 17653960c8c0SPeter Zijlstra __releases(p->pi_lock) 17663960c8c0SPeter Zijlstra { 1767d8ac8971SMatt Fleming rq_unpin_lock(rq, rf); 17683960c8c0SPeter Zijlstra raw_spin_unlock(&rq->lock); 1769eb580751SPeter Zijlstra raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); 17703960c8c0SPeter Zijlstra } 17713960c8c0SPeter Zijlstra 17728a8c69c3SPeter Zijlstra static inline void 17738a8c69c3SPeter Zijlstra rq_lock_irqsave(struct rq *rq, struct rq_flags *rf) 17748a8c69c3SPeter Zijlstra __acquires(rq->lock) 17758a8c69c3SPeter Zijlstra { 17768a8c69c3SPeter Zijlstra raw_spin_lock_irqsave(&rq->lock, rf->flags); 17778a8c69c3SPeter Zijlstra rq_pin_lock(rq, rf); 17788a8c69c3SPeter Zijlstra } 17798a8c69c3SPeter Zijlstra 17808a8c69c3SPeter Zijlstra static inline void 17818a8c69c3SPeter Zijlstra rq_lock_irq(struct rq *rq, struct rq_flags *rf) 17828a8c69c3SPeter Zijlstra __acquires(rq->lock) 17838a8c69c3SPeter Zijlstra { 17848a8c69c3SPeter Zijlstra raw_spin_lock_irq(&rq->lock); 17858a8c69c3SPeter Zijlstra rq_pin_lock(rq, rf); 17868a8c69c3SPeter Zijlstra } 17878a8c69c3SPeter Zijlstra 17888a8c69c3SPeter Zijlstra static inline void 17898a8c69c3SPeter Zijlstra rq_lock(struct rq *rq, struct rq_flags *rf) 17908a8c69c3SPeter Zijlstra __acquires(rq->lock) 17918a8c69c3SPeter Zijlstra { 17928a8c69c3SPeter Zijlstra raw_spin_lock(&rq->lock); 17938a8c69c3SPeter Zijlstra rq_pin_lock(rq, rf); 17948a8c69c3SPeter Zijlstra } 17958a8c69c3SPeter Zijlstra 17968a8c69c3SPeter Zijlstra static inline void 17978a8c69c3SPeter Zijlstra rq_relock(struct rq *rq, struct rq_flags *rf) 17988a8c69c3SPeter Zijlstra __acquires(rq->lock) 17998a8c69c3SPeter Zijlstra { 18008a8c69c3SPeter Zijlstra raw_spin_lock(&rq->lock); 18018a8c69c3SPeter Zijlstra rq_repin_lock(rq, rf); 18028a8c69c3SPeter Zijlstra } 18038a8c69c3SPeter Zijlstra 18048a8c69c3SPeter Zijlstra static inline void 18058a8c69c3SPeter Zijlstra rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf) 18068a8c69c3SPeter Zijlstra __releases(rq->lock) 18078a8c69c3SPeter Zijlstra { 18088a8c69c3SPeter Zijlstra rq_unpin_lock(rq, rf); 18098a8c69c3SPeter Zijlstra raw_spin_unlock_irqrestore(&rq->lock, rf->flags); 18108a8c69c3SPeter Zijlstra } 18118a8c69c3SPeter Zijlstra 18128a8c69c3SPeter Zijlstra static inline void 18138a8c69c3SPeter Zijlstra rq_unlock_irq(struct rq *rq, struct rq_flags *rf) 18148a8c69c3SPeter Zijlstra __releases(rq->lock) 18158a8c69c3SPeter Zijlstra { 18168a8c69c3SPeter Zijlstra rq_unpin_lock(rq, rf); 18178a8c69c3SPeter Zijlstra raw_spin_unlock_irq(&rq->lock); 18188a8c69c3SPeter Zijlstra } 18198a8c69c3SPeter Zijlstra 18208a8c69c3SPeter Zijlstra static inline void 18218a8c69c3SPeter Zijlstra rq_unlock(struct rq *rq, struct rq_flags *rf) 18228a8c69c3SPeter Zijlstra __releases(rq->lock) 18238a8c69c3SPeter Zijlstra { 18248a8c69c3SPeter Zijlstra rq_unpin_lock(rq, rf); 18258a8c69c3SPeter Zijlstra raw_spin_unlock(&rq->lock); 18268a8c69c3SPeter Zijlstra } 18278a8c69c3SPeter Zijlstra 1828391e43daSPeter Zijlstra #ifdef CONFIG_SMP 1829391e43daSPeter Zijlstra #ifdef CONFIG_PREEMPT 1830391e43daSPeter Zijlstra 1831391e43daSPeter Zijlstra static inline void double_rq_lock(struct rq *rq1, struct rq *rq2); 1832391e43daSPeter Zijlstra 1833391e43daSPeter Zijlstra /* 1834391e43daSPeter Zijlstra * fair double_lock_balance: Safely acquires both rq->locks in a fair 1835391e43daSPeter Zijlstra * way at the expense of forcing extra atomic operations in all 1836391e43daSPeter Zijlstra * invocations. This assures that the double_lock is acquired using the 1837391e43daSPeter Zijlstra * same underlying policy as the spinlock_t on this architecture, which 1838391e43daSPeter Zijlstra * reduces latency compared to the unfair variant below. However, it 1839391e43daSPeter Zijlstra * also adds more overhead and therefore may reduce throughput. 1840391e43daSPeter Zijlstra */ 1841391e43daSPeter Zijlstra static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) 1842391e43daSPeter Zijlstra __releases(this_rq->lock) 1843391e43daSPeter Zijlstra __acquires(busiest->lock) 1844391e43daSPeter Zijlstra __acquires(this_rq->lock) 1845391e43daSPeter Zijlstra { 1846391e43daSPeter Zijlstra raw_spin_unlock(&this_rq->lock); 1847391e43daSPeter Zijlstra double_rq_lock(this_rq, busiest); 1848391e43daSPeter Zijlstra 1849391e43daSPeter Zijlstra return 1; 1850391e43daSPeter Zijlstra } 1851391e43daSPeter Zijlstra 1852391e43daSPeter Zijlstra #else 1853391e43daSPeter Zijlstra /* 1854391e43daSPeter Zijlstra * Unfair double_lock_balance: Optimizes throughput at the expense of 1855391e43daSPeter Zijlstra * latency by eliminating extra atomic operations when the locks are 185697fb7a0aSIngo Molnar * already in proper order on entry. This favors lower CPU-ids and will 185797fb7a0aSIngo Molnar * grant the double lock to lower CPUs over higher ids under contention, 1858391e43daSPeter Zijlstra * regardless of entry order into the function. 1859391e43daSPeter Zijlstra */ 1860391e43daSPeter Zijlstra static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) 1861391e43daSPeter Zijlstra __releases(this_rq->lock) 1862391e43daSPeter Zijlstra __acquires(busiest->lock) 1863391e43daSPeter Zijlstra __acquires(this_rq->lock) 1864391e43daSPeter Zijlstra { 1865391e43daSPeter Zijlstra int ret = 0; 1866391e43daSPeter Zijlstra 1867391e43daSPeter Zijlstra if (unlikely(!raw_spin_trylock(&busiest->lock))) { 1868391e43daSPeter Zijlstra if (busiest < this_rq) { 1869391e43daSPeter Zijlstra raw_spin_unlock(&this_rq->lock); 1870391e43daSPeter Zijlstra raw_spin_lock(&busiest->lock); 1871391e43daSPeter Zijlstra raw_spin_lock_nested(&this_rq->lock, 1872391e43daSPeter Zijlstra SINGLE_DEPTH_NESTING); 1873391e43daSPeter Zijlstra ret = 1; 1874391e43daSPeter Zijlstra } else 1875391e43daSPeter Zijlstra raw_spin_lock_nested(&busiest->lock, 1876391e43daSPeter Zijlstra SINGLE_DEPTH_NESTING); 1877391e43daSPeter Zijlstra } 1878391e43daSPeter Zijlstra return ret; 1879391e43daSPeter Zijlstra } 1880391e43daSPeter Zijlstra 1881391e43daSPeter Zijlstra #endif /* CONFIG_PREEMPT */ 1882391e43daSPeter Zijlstra 1883391e43daSPeter Zijlstra /* 1884391e43daSPeter Zijlstra * double_lock_balance - lock the busiest runqueue, this_rq is locked already. 1885391e43daSPeter Zijlstra */ 1886391e43daSPeter Zijlstra static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest) 1887391e43daSPeter Zijlstra { 1888391e43daSPeter Zijlstra if (unlikely(!irqs_disabled())) { 188997fb7a0aSIngo Molnar /* printk() doesn't work well under rq->lock */ 1890391e43daSPeter Zijlstra raw_spin_unlock(&this_rq->lock); 1891391e43daSPeter Zijlstra BUG_ON(1); 1892391e43daSPeter Zijlstra } 1893391e43daSPeter Zijlstra 1894391e43daSPeter Zijlstra return _double_lock_balance(this_rq, busiest); 1895391e43daSPeter Zijlstra } 1896391e43daSPeter Zijlstra 1897391e43daSPeter Zijlstra static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) 1898391e43daSPeter Zijlstra __releases(busiest->lock) 1899391e43daSPeter Zijlstra { 1900391e43daSPeter Zijlstra raw_spin_unlock(&busiest->lock); 1901391e43daSPeter Zijlstra lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); 1902391e43daSPeter Zijlstra } 1903391e43daSPeter Zijlstra 190474602315SPeter Zijlstra static inline void double_lock(spinlock_t *l1, spinlock_t *l2) 190574602315SPeter Zijlstra { 190674602315SPeter Zijlstra if (l1 > l2) 190774602315SPeter Zijlstra swap(l1, l2); 190874602315SPeter Zijlstra 190974602315SPeter Zijlstra spin_lock(l1); 191074602315SPeter Zijlstra spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 191174602315SPeter Zijlstra } 191274602315SPeter Zijlstra 191360e69eedSMike Galbraith static inline void double_lock_irq(spinlock_t *l1, spinlock_t *l2) 191460e69eedSMike Galbraith { 191560e69eedSMike Galbraith if (l1 > l2) 191660e69eedSMike Galbraith swap(l1, l2); 191760e69eedSMike Galbraith 191860e69eedSMike Galbraith spin_lock_irq(l1); 191960e69eedSMike Galbraith spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 192060e69eedSMike Galbraith } 192160e69eedSMike Galbraith 192274602315SPeter Zijlstra static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2) 192374602315SPeter Zijlstra { 192474602315SPeter Zijlstra if (l1 > l2) 192574602315SPeter Zijlstra swap(l1, l2); 192674602315SPeter Zijlstra 192774602315SPeter Zijlstra raw_spin_lock(l1); 192874602315SPeter Zijlstra raw_spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 192974602315SPeter Zijlstra } 193074602315SPeter Zijlstra 1931391e43daSPeter Zijlstra /* 1932391e43daSPeter Zijlstra * double_rq_lock - safely lock two runqueues 1933391e43daSPeter Zijlstra * 1934391e43daSPeter Zijlstra * Note this does not disable interrupts like task_rq_lock, 1935391e43daSPeter Zijlstra * you need to do so manually before calling. 1936391e43daSPeter Zijlstra */ 1937391e43daSPeter Zijlstra static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) 1938391e43daSPeter Zijlstra __acquires(rq1->lock) 1939391e43daSPeter Zijlstra __acquires(rq2->lock) 1940391e43daSPeter Zijlstra { 1941391e43daSPeter Zijlstra BUG_ON(!irqs_disabled()); 1942391e43daSPeter Zijlstra if (rq1 == rq2) { 1943391e43daSPeter Zijlstra raw_spin_lock(&rq1->lock); 1944391e43daSPeter Zijlstra __acquire(rq2->lock); /* Fake it out ;) */ 1945391e43daSPeter Zijlstra } else { 1946391e43daSPeter Zijlstra if (rq1 < rq2) { 1947391e43daSPeter Zijlstra raw_spin_lock(&rq1->lock); 1948391e43daSPeter Zijlstra raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING); 1949391e43daSPeter Zijlstra } else { 1950391e43daSPeter Zijlstra raw_spin_lock(&rq2->lock); 1951391e43daSPeter Zijlstra raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING); 1952391e43daSPeter Zijlstra } 1953391e43daSPeter Zijlstra } 1954391e43daSPeter Zijlstra } 1955391e43daSPeter Zijlstra 1956391e43daSPeter Zijlstra /* 1957391e43daSPeter Zijlstra * double_rq_unlock - safely unlock two runqueues 1958391e43daSPeter Zijlstra * 1959391e43daSPeter Zijlstra * Note this does not restore interrupts like task_rq_unlock, 1960391e43daSPeter Zijlstra * you need to do so manually after calling. 1961391e43daSPeter Zijlstra */ 1962391e43daSPeter Zijlstra static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) 1963391e43daSPeter Zijlstra __releases(rq1->lock) 1964391e43daSPeter Zijlstra __releases(rq2->lock) 1965391e43daSPeter Zijlstra { 1966391e43daSPeter Zijlstra raw_spin_unlock(&rq1->lock); 1967391e43daSPeter Zijlstra if (rq1 != rq2) 1968391e43daSPeter Zijlstra raw_spin_unlock(&rq2->lock); 1969391e43daSPeter Zijlstra else 1970391e43daSPeter Zijlstra __release(rq2->lock); 1971391e43daSPeter Zijlstra } 1972391e43daSPeter Zijlstra 1973f2cb1360SIngo Molnar extern void set_rq_online (struct rq *rq); 1974f2cb1360SIngo Molnar extern void set_rq_offline(struct rq *rq); 1975f2cb1360SIngo Molnar extern bool sched_smp_initialized; 1976f2cb1360SIngo Molnar 1977391e43daSPeter Zijlstra #else /* CONFIG_SMP */ 1978391e43daSPeter Zijlstra 1979391e43daSPeter Zijlstra /* 1980391e43daSPeter Zijlstra * double_rq_lock - safely lock two runqueues 1981391e43daSPeter Zijlstra * 1982391e43daSPeter Zijlstra * Note this does not disable interrupts like task_rq_lock, 1983391e43daSPeter Zijlstra * you need to do so manually before calling. 1984391e43daSPeter Zijlstra */ 1985391e43daSPeter Zijlstra static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) 1986391e43daSPeter Zijlstra __acquires(rq1->lock) 1987391e43daSPeter Zijlstra __acquires(rq2->lock) 1988391e43daSPeter Zijlstra { 1989391e43daSPeter Zijlstra BUG_ON(!irqs_disabled()); 1990391e43daSPeter Zijlstra BUG_ON(rq1 != rq2); 1991391e43daSPeter Zijlstra raw_spin_lock(&rq1->lock); 1992391e43daSPeter Zijlstra __acquire(rq2->lock); /* Fake it out ;) */ 1993391e43daSPeter Zijlstra } 1994391e43daSPeter Zijlstra 1995391e43daSPeter Zijlstra /* 1996391e43daSPeter Zijlstra * double_rq_unlock - safely unlock two runqueues 1997391e43daSPeter Zijlstra * 1998391e43daSPeter Zijlstra * Note this does not restore interrupts like task_rq_unlock, 1999391e43daSPeter Zijlstra * you need to do so manually after calling. 2000391e43daSPeter Zijlstra */ 2001391e43daSPeter Zijlstra static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) 2002391e43daSPeter Zijlstra __releases(rq1->lock) 2003391e43daSPeter Zijlstra __releases(rq2->lock) 2004391e43daSPeter Zijlstra { 2005391e43daSPeter Zijlstra BUG_ON(rq1 != rq2); 2006391e43daSPeter Zijlstra raw_spin_unlock(&rq1->lock); 2007391e43daSPeter Zijlstra __release(rq2->lock); 2008391e43daSPeter Zijlstra } 2009391e43daSPeter Zijlstra 2010391e43daSPeter Zijlstra #endif 2011391e43daSPeter Zijlstra 2012391e43daSPeter Zijlstra extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq); 2013391e43daSPeter Zijlstra extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq); 20146b55c965SSrikar Dronamraju 20156b55c965SSrikar Dronamraju #ifdef CONFIG_SCHED_DEBUG 20169469eb01SPeter Zijlstra extern bool sched_debug_enabled; 20179469eb01SPeter Zijlstra 2018391e43daSPeter Zijlstra extern void print_cfs_stats(struct seq_file *m, int cpu); 2019391e43daSPeter Zijlstra extern void print_rt_stats(struct seq_file *m, int cpu); 2020acb32132SWanpeng Li extern void print_dl_stats(struct seq_file *m, int cpu); 20216b55c965SSrikar Dronamraju extern void 20226b55c965SSrikar Dronamraju print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq); 2023397f2378SSrikar Dronamraju #ifdef CONFIG_NUMA_BALANCING 2024397f2378SSrikar Dronamraju extern void 2025397f2378SSrikar Dronamraju show_numa_stats(struct task_struct *p, struct seq_file *m); 2026397f2378SSrikar Dronamraju extern void 2027397f2378SSrikar Dronamraju print_numa_stats(struct seq_file *m, int node, unsigned long tsf, 2028397f2378SSrikar Dronamraju unsigned long tpf, unsigned long gsf, unsigned long gpf); 2029397f2378SSrikar Dronamraju #endif /* CONFIG_NUMA_BALANCING */ 2030397f2378SSrikar Dronamraju #endif /* CONFIG_SCHED_DEBUG */ 2031391e43daSPeter Zijlstra 2032391e43daSPeter Zijlstra extern void init_cfs_rq(struct cfs_rq *cfs_rq); 203307c54f7aSAbel Vesa extern void init_rt_rq(struct rt_rq *rt_rq); 203407c54f7aSAbel Vesa extern void init_dl_rq(struct dl_rq *dl_rq); 2035391e43daSPeter Zijlstra 20361ee14e6cSBen Segall extern void cfs_bandwidth_usage_inc(void); 20371ee14e6cSBen Segall extern void cfs_bandwidth_usage_dec(void); 20381c792db7SSuresh Siddha 20393451d024SFrederic Weisbecker #ifdef CONFIG_NO_HZ_COMMON 204000357f5eSPeter Zijlstra #define NOHZ_BALANCE_KICK_BIT 0 204100357f5eSPeter Zijlstra #define NOHZ_STATS_KICK_BIT 1 2042a22e47a4SPeter Zijlstra 2043a22e47a4SPeter Zijlstra #define NOHZ_BALANCE_KICK BIT(NOHZ_BALANCE_KICK_BIT) 2044b7031a02SPeter Zijlstra #define NOHZ_STATS_KICK BIT(NOHZ_STATS_KICK_BIT) 2045b7031a02SPeter Zijlstra 2046b7031a02SPeter Zijlstra #define NOHZ_KICK_MASK (NOHZ_BALANCE_KICK | NOHZ_STATS_KICK) 20471c792db7SSuresh Siddha 20481c792db7SSuresh Siddha #define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags) 204920a5c8ccSThomas Gleixner 205000357f5eSPeter Zijlstra extern void nohz_balance_exit_idle(struct rq *rq); 205120a5c8ccSThomas Gleixner #else 205200357f5eSPeter Zijlstra static inline void nohz_balance_exit_idle(struct rq *rq) { } 20531c792db7SSuresh Siddha #endif 205473fbec60SFrederic Weisbecker 2055daec5798SLuca Abeni 2056daec5798SLuca Abeni #ifdef CONFIG_SMP 2057daec5798SLuca Abeni static inline 2058daec5798SLuca Abeni void __dl_update(struct dl_bw *dl_b, s64 bw) 2059daec5798SLuca Abeni { 2060daec5798SLuca Abeni struct root_domain *rd = container_of(dl_b, struct root_domain, dl_bw); 2061daec5798SLuca Abeni int i; 2062daec5798SLuca Abeni 2063daec5798SLuca Abeni RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(), 2064daec5798SLuca Abeni "sched RCU must be held"); 2065daec5798SLuca Abeni for_each_cpu_and(i, rd->span, cpu_active_mask) { 2066daec5798SLuca Abeni struct rq *rq = cpu_rq(i); 2067daec5798SLuca Abeni 2068daec5798SLuca Abeni rq->dl.extra_bw += bw; 2069daec5798SLuca Abeni } 2070daec5798SLuca Abeni } 2071daec5798SLuca Abeni #else 2072daec5798SLuca Abeni static inline 2073daec5798SLuca Abeni void __dl_update(struct dl_bw *dl_b, s64 bw) 2074daec5798SLuca Abeni { 2075daec5798SLuca Abeni struct dl_rq *dl = container_of(dl_b, struct dl_rq, dl_bw); 2076daec5798SLuca Abeni 2077daec5798SLuca Abeni dl->extra_bw += bw; 2078daec5798SLuca Abeni } 2079daec5798SLuca Abeni #endif 2080daec5798SLuca Abeni 2081daec5798SLuca Abeni 208273fbec60SFrederic Weisbecker #ifdef CONFIG_IRQ_TIME_ACCOUNTING 208319d23dbfSFrederic Weisbecker struct irqtime { 208425e2d8c1SFrederic Weisbecker u64 total; 2085a499a5a1SFrederic Weisbecker u64 tick_delta; 208619d23dbfSFrederic Weisbecker u64 irq_start_time; 208719d23dbfSFrederic Weisbecker struct u64_stats_sync sync; 208819d23dbfSFrederic Weisbecker }; 208973fbec60SFrederic Weisbecker 209019d23dbfSFrederic Weisbecker DECLARE_PER_CPU(struct irqtime, cpu_irqtime); 209173fbec60SFrederic Weisbecker 209225e2d8c1SFrederic Weisbecker /* 209325e2d8c1SFrederic Weisbecker * Returns the irqtime minus the softirq time computed by ksoftirqd. 209425e2d8c1SFrederic Weisbecker * Otherwise ksoftirqd's sum_exec_runtime is substracted its own runtime 209525e2d8c1SFrederic Weisbecker * and never move forward. 209625e2d8c1SFrederic Weisbecker */ 209773fbec60SFrederic Weisbecker static inline u64 irq_time_read(int cpu) 209873fbec60SFrederic Weisbecker { 209919d23dbfSFrederic Weisbecker struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu); 210019d23dbfSFrederic Weisbecker unsigned int seq; 210119d23dbfSFrederic Weisbecker u64 total; 210273fbec60SFrederic Weisbecker 210373fbec60SFrederic Weisbecker do { 210419d23dbfSFrederic Weisbecker seq = __u64_stats_fetch_begin(&irqtime->sync); 210525e2d8c1SFrederic Weisbecker total = irqtime->total; 210619d23dbfSFrederic Weisbecker } while (__u64_stats_fetch_retry(&irqtime->sync, seq)); 210773fbec60SFrederic Weisbecker 210819d23dbfSFrederic Weisbecker return total; 210973fbec60SFrederic Weisbecker } 211073fbec60SFrederic Weisbecker #endif /* CONFIG_IRQ_TIME_ACCOUNTING */ 2111adaf9fcdSRafael J. Wysocki 2112adaf9fcdSRafael J. Wysocki #ifdef CONFIG_CPU_FREQ 2113adaf9fcdSRafael J. Wysocki DECLARE_PER_CPU(struct update_util_data *, cpufreq_update_util_data); 2114adaf9fcdSRafael J. Wysocki 2115adaf9fcdSRafael J. Wysocki /** 2116adaf9fcdSRafael J. Wysocki * cpufreq_update_util - Take a note about CPU utilization changes. 211712bde33dSRafael J. Wysocki * @rq: Runqueue to carry out the update for. 211858919e83SRafael J. Wysocki * @flags: Update reason flags. 2119adaf9fcdSRafael J. Wysocki * 212058919e83SRafael J. Wysocki * This function is called by the scheduler on the CPU whose utilization is 212158919e83SRafael J. Wysocki * being updated. 2122adaf9fcdSRafael J. Wysocki * 2123adaf9fcdSRafael J. Wysocki * It can only be called from RCU-sched read-side critical sections. 2124adaf9fcdSRafael J. Wysocki * 2125adaf9fcdSRafael J. Wysocki * The way cpufreq is currently arranged requires it to evaluate the CPU 2126adaf9fcdSRafael J. Wysocki * performance state (frequency/voltage) on a regular basis to prevent it from 2127adaf9fcdSRafael J. Wysocki * being stuck in a completely inadequate performance level for too long. 2128e0367b12SJuri Lelli * That is not guaranteed to happen if the updates are only triggered from CFS 2129e0367b12SJuri Lelli * and DL, though, because they may not be coming in if only RT tasks are 2130e0367b12SJuri Lelli * active all the time (or there are RT tasks only). 2131adaf9fcdSRafael J. Wysocki * 2132e0367b12SJuri Lelli * As a workaround for that issue, this function is called periodically by the 2133e0367b12SJuri Lelli * RT sched class to trigger extra cpufreq updates to prevent it from stalling, 2134adaf9fcdSRafael J. Wysocki * but that really is a band-aid. Going forward it should be replaced with 2135e0367b12SJuri Lelli * solutions targeted more specifically at RT tasks. 2136adaf9fcdSRafael J. Wysocki */ 213712bde33dSRafael J. Wysocki static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) 2138adaf9fcdSRafael J. Wysocki { 213958919e83SRafael J. Wysocki struct update_util_data *data; 214058919e83SRafael J. Wysocki 2141674e7541SViresh Kumar data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data, 2142674e7541SViresh Kumar cpu_of(rq))); 214358919e83SRafael J. Wysocki if (data) 214412bde33dSRafael J. Wysocki data->func(data, rq_clock(rq), flags); 214512bde33dSRafael J. Wysocki } 2146adaf9fcdSRafael J. Wysocki #else 214712bde33dSRafael J. Wysocki static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {} 2148adaf9fcdSRafael J. Wysocki #endif /* CONFIG_CPU_FREQ */ 2149be53f58fSLinus Torvalds 21509bdcb44eSRafael J. Wysocki #ifdef arch_scale_freq_capacity 21519bdcb44eSRafael J. Wysocki # ifndef arch_scale_freq_invariant 215297fb7a0aSIngo Molnar # define arch_scale_freq_invariant() true 21539bdcb44eSRafael J. Wysocki # endif 215497fb7a0aSIngo Molnar #else 215597fb7a0aSIngo Molnar # define arch_scale_freq_invariant() false 21569bdcb44eSRafael J. Wysocki #endif 2157d4edd662SJuri Lelli 2158794a56ebSJuri Lelli #ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL 2159d4edd662SJuri Lelli static inline unsigned long cpu_util_dl(struct rq *rq) 2160d4edd662SJuri Lelli { 2161d4edd662SJuri Lelli return (rq->dl.running_bw * SCHED_CAPACITY_SCALE) >> BW_SHIFT; 2162d4edd662SJuri Lelli } 2163d4edd662SJuri Lelli 2164d4edd662SJuri Lelli static inline unsigned long cpu_util_cfs(struct rq *rq) 2165d4edd662SJuri Lelli { 2166d4edd662SJuri Lelli return rq->cfs.avg.util_avg; 2167d4edd662SJuri Lelli } 2168794a56ebSJuri Lelli #endif 2169