1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */ 297fb7a0aSIngo Molnar /* 397fb7a0aSIngo Molnar * Scheduler internal types and methods: 497fb7a0aSIngo Molnar */ 5391e43daSPeter Zijlstra #include <linux/sched.h> 6325ea10cSIngo Molnar 7dfc3401aSIngo Molnar #include <linux/sched/autogroup.h> 8e6017571SIngo Molnar #include <linux/sched/clock.h> 9325ea10cSIngo Molnar #include <linux/sched/coredump.h> 1055687da1SIngo Molnar #include <linux/sched/cpufreq.h> 11325ea10cSIngo Molnar #include <linux/sched/cputime.h> 12325ea10cSIngo Molnar #include <linux/sched/deadline.h> 13b17b0153SIngo Molnar #include <linux/sched/debug.h> 14ef8bd77fSIngo Molnar #include <linux/sched/hotplug.h> 15325ea10cSIngo Molnar #include <linux/sched/idle.h> 16325ea10cSIngo Molnar #include <linux/sched/init.h> 17325ea10cSIngo Molnar #include <linux/sched/isolation.h> 18325ea10cSIngo Molnar #include <linux/sched/jobctl.h> 19325ea10cSIngo Molnar #include <linux/sched/loadavg.h> 20325ea10cSIngo Molnar #include <linux/sched/mm.h> 21325ea10cSIngo Molnar #include <linux/sched/nohz.h> 22325ea10cSIngo Molnar #include <linux/sched/numa_balancing.h> 23325ea10cSIngo Molnar #include <linux/sched/prio.h> 24325ea10cSIngo Molnar #include <linux/sched/rt.h> 25325ea10cSIngo Molnar #include <linux/sched/signal.h> 26321a874aSThomas Gleixner #include <linux/sched/smt.h> 27325ea10cSIngo Molnar #include <linux/sched/stat.h> 28325ea10cSIngo Molnar #include <linux/sched/sysctl.h> 2929930025SIngo Molnar #include <linux/sched/task.h> 3068db0cf1SIngo Molnar #include <linux/sched/task_stack.h> 31325ea10cSIngo Molnar #include <linux/sched/topology.h> 32325ea10cSIngo Molnar #include <linux/sched/user.h> 33325ea10cSIngo Molnar #include <linux/sched/wake_q.h> 34325ea10cSIngo Molnar #include <linux/sched/xacct.h> 35ef8bd77fSIngo Molnar 36325ea10cSIngo Molnar #include <uapi/linux/sched/types.h> 37325ea10cSIngo Molnar 383866e845SSteven Rostedt (Red Hat) #include <linux/binfmts.h> 39325ea10cSIngo Molnar #include <linux/blkdev.h> 40325ea10cSIngo Molnar #include <linux/compat.h> 41325ea10cSIngo Molnar #include <linux/context_tracking.h> 42325ea10cSIngo Molnar #include <linux/cpufreq.h> 43325ea10cSIngo Molnar #include <linux/cpuidle.h> 44325ea10cSIngo Molnar #include <linux/cpuset.h> 45325ea10cSIngo Molnar #include <linux/ctype.h> 46325ea10cSIngo Molnar #include <linux/debugfs.h> 47325ea10cSIngo Molnar #include <linux/delayacct.h> 486aa140faSQuentin Perret #include <linux/energy_model.h> 49325ea10cSIngo Molnar #include <linux/init_task.h> 50325ea10cSIngo Molnar #include <linux/kprobes.h> 51325ea10cSIngo Molnar #include <linux/kthread.h> 52325ea10cSIngo Molnar #include <linux/membarrier.h> 53325ea10cSIngo Molnar #include <linux/migrate.h> 54325ea10cSIngo Molnar #include <linux/mmu_context.h> 55325ea10cSIngo Molnar #include <linux/nmi.h> 56325ea10cSIngo Molnar #include <linux/proc_fs.h> 57325ea10cSIngo Molnar #include <linux/prefetch.h> 58325ea10cSIngo Molnar #include <linux/profile.h> 59eb414681SJohannes Weiner #include <linux/psi.h> 60325ea10cSIngo Molnar #include <linux/rcupdate_wait.h> 61325ea10cSIngo Molnar #include <linux/security.h> 62391e43daSPeter Zijlstra #include <linux/stop_machine.h> 63325ea10cSIngo Molnar #include <linux/suspend.h> 64325ea10cSIngo Molnar #include <linux/swait.h> 65325ea10cSIngo Molnar #include <linux/syscalls.h> 66325ea10cSIngo Molnar #include <linux/task_work.h> 67325ea10cSIngo Molnar #include <linux/tsacct_kern.h> 68325ea10cSIngo Molnar 69325ea10cSIngo Molnar #include <asm/tlb.h> 7085c2ce91SPeter Zijlstra #include <asm-generic/vmlinux.lds.h> 71391e43daSPeter Zijlstra 727fce777cSIngo Molnar #ifdef CONFIG_PARAVIRT 737fce777cSIngo Molnar # include <asm/paravirt.h> 747fce777cSIngo Molnar #endif 757fce777cSIngo Molnar 76391e43daSPeter Zijlstra #include "cpupri.h" 776bfd6d72SJuri Lelli #include "cpudeadline.h" 78391e43daSPeter Zijlstra 799d246053SPhil Auld #include <trace/events/sched.h> 809d246053SPhil Auld 819148a3a1SPeter Zijlstra #ifdef CONFIG_SCHED_DEBUG 829148a3a1SPeter Zijlstra # define SCHED_WARN_ON(x) WARN_ONCE(x, #x) 839148a3a1SPeter Zijlstra #else 846d3aed3dSIngo Molnar # define SCHED_WARN_ON(x) ({ (void)(x), 0; }) 859148a3a1SPeter Zijlstra #endif 869148a3a1SPeter Zijlstra 8745ceebf7SPaul Gortmaker struct rq; 88442bf3aaSDaniel Lezcano struct cpuidle_state; 8945ceebf7SPaul Gortmaker 90da0c1e65SKirill Tkhai /* task_struct::on_rq states: */ 91da0c1e65SKirill Tkhai #define TASK_ON_RQ_QUEUED 1 92cca26e80SKirill Tkhai #define TASK_ON_RQ_MIGRATING 2 93da0c1e65SKirill Tkhai 94391e43daSPeter Zijlstra extern __read_mostly int scheduler_running; 95391e43daSPeter Zijlstra 9645ceebf7SPaul Gortmaker extern unsigned long calc_load_update; 9745ceebf7SPaul Gortmaker extern atomic_long_t calc_load_tasks; 9845ceebf7SPaul Gortmaker 993289bdb4SPeter Zijlstra extern void calc_global_load_tick(struct rq *this_rq); 100d60585c5SThomas Gleixner extern long calc_load_fold_active(struct rq *this_rq, long adjust); 1013289bdb4SPeter Zijlstra 1029d246053SPhil Auld extern void call_trace_sched_update_nr_running(struct rq *rq, int count); 103391e43daSPeter Zijlstra /* 104391e43daSPeter Zijlstra * Helpers for converting nanosecond timing to jiffy resolution 105391e43daSPeter Zijlstra */ 106391e43daSPeter Zijlstra #define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ)) 107391e43daSPeter Zijlstra 108cc1f4b1fSLi Zefan /* 109cc1f4b1fSLi Zefan * Increase resolution of nice-level calculations for 64-bit architectures. 110cc1f4b1fSLi Zefan * The extra resolution improves shares distribution and load balancing of 111cc1f4b1fSLi Zefan * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup 112cc1f4b1fSLi Zefan * hierarchies, especially on larger systems. This is not a user-visible change 113cc1f4b1fSLi Zefan * and does not change the user-interface for setting shares/weights. 114cc1f4b1fSLi Zefan * 115cc1f4b1fSLi Zefan * We increase resolution only if we have enough bits to allow this increased 11697fb7a0aSIngo Molnar * resolution (i.e. 64-bit). The costs for increasing resolution when 32-bit 11797fb7a0aSIngo Molnar * are pretty high and the returns do not justify the increased costs. 1182159197dSPeter Zijlstra * 11997fb7a0aSIngo Molnar * Really only required when CONFIG_FAIR_GROUP_SCHED=y is also set, but to 12097fb7a0aSIngo Molnar * increase coverage and consistency always enable it on 64-bit platforms. 121cc1f4b1fSLi Zefan */ 1222159197dSPeter Zijlstra #ifdef CONFIG_64BIT 123172895e6SYuyang Du # define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT) 1246ecdd749SYuyang Du # define scale_load(w) ((w) << SCHED_FIXEDPOINT_SHIFT) 12526cf5222SMichael Wang # define scale_load_down(w) \ 12626cf5222SMichael Wang ({ \ 12726cf5222SMichael Wang unsigned long __w = (w); \ 12826cf5222SMichael Wang if (__w) \ 12926cf5222SMichael Wang __w = max(2UL, __w >> SCHED_FIXEDPOINT_SHIFT); \ 13026cf5222SMichael Wang __w; \ 13126cf5222SMichael Wang }) 132cc1f4b1fSLi Zefan #else 133172895e6SYuyang Du # define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT) 134cc1f4b1fSLi Zefan # define scale_load(w) (w) 135cc1f4b1fSLi Zefan # define scale_load_down(w) (w) 136cc1f4b1fSLi Zefan #endif 137cc1f4b1fSLi Zefan 1386ecdd749SYuyang Du /* 139172895e6SYuyang Du * Task weight (visible to users) and its load (invisible to users) have 140172895e6SYuyang Du * independent resolution, but they should be well calibrated. We use 141172895e6SYuyang Du * scale_load() and scale_load_down(w) to convert between them. The 142172895e6SYuyang Du * following must be true: 143172895e6SYuyang Du * 144172895e6SYuyang Du * scale_load(sched_prio_to_weight[USER_PRIO(NICE_TO_PRIO(0))]) == NICE_0_LOAD 145172895e6SYuyang Du * 1466ecdd749SYuyang Du */ 147172895e6SYuyang Du #define NICE_0_LOAD (1L << NICE_0_LOAD_SHIFT) 148391e43daSPeter Zijlstra 149391e43daSPeter Zijlstra /* 150332ac17eSDario Faggioli * Single value that decides SCHED_DEADLINE internal math precision. 151332ac17eSDario Faggioli * 10 -> just above 1us 152332ac17eSDario Faggioli * 9 -> just above 0.5us 153332ac17eSDario Faggioli */ 15497fb7a0aSIngo Molnar #define DL_SCALE 10 155332ac17eSDario Faggioli 156332ac17eSDario Faggioli /* 15797fb7a0aSIngo Molnar * Single value that denotes runtime == period, ie unlimited time. 158391e43daSPeter Zijlstra */ 159391e43daSPeter Zijlstra #define RUNTIME_INF ((u64)~0ULL) 160391e43daSPeter Zijlstra 16120f9cd2aSHenrik Austad static inline int idle_policy(int policy) 16220f9cd2aSHenrik Austad { 16320f9cd2aSHenrik Austad return policy == SCHED_IDLE; 16420f9cd2aSHenrik Austad } 165d50dde5aSDario Faggioli static inline int fair_policy(int policy) 166d50dde5aSDario Faggioli { 167d50dde5aSDario Faggioli return policy == SCHED_NORMAL || policy == SCHED_BATCH; 168d50dde5aSDario Faggioli } 169d50dde5aSDario Faggioli 170391e43daSPeter Zijlstra static inline int rt_policy(int policy) 171391e43daSPeter Zijlstra { 172d50dde5aSDario Faggioli return policy == SCHED_FIFO || policy == SCHED_RR; 173391e43daSPeter Zijlstra } 174391e43daSPeter Zijlstra 175aab03e05SDario Faggioli static inline int dl_policy(int policy) 176aab03e05SDario Faggioli { 177aab03e05SDario Faggioli return policy == SCHED_DEADLINE; 178aab03e05SDario Faggioli } 17920f9cd2aSHenrik Austad static inline bool valid_policy(int policy) 18020f9cd2aSHenrik Austad { 18120f9cd2aSHenrik Austad return idle_policy(policy) || fair_policy(policy) || 18220f9cd2aSHenrik Austad rt_policy(policy) || dl_policy(policy); 18320f9cd2aSHenrik Austad } 184aab03e05SDario Faggioli 1851da1843fSViresh Kumar static inline int task_has_idle_policy(struct task_struct *p) 1861da1843fSViresh Kumar { 1871da1843fSViresh Kumar return idle_policy(p->policy); 1881da1843fSViresh Kumar } 1891da1843fSViresh Kumar 190391e43daSPeter Zijlstra static inline int task_has_rt_policy(struct task_struct *p) 191391e43daSPeter Zijlstra { 192391e43daSPeter Zijlstra return rt_policy(p->policy); 193391e43daSPeter Zijlstra } 194391e43daSPeter Zijlstra 195aab03e05SDario Faggioli static inline int task_has_dl_policy(struct task_struct *p) 196aab03e05SDario Faggioli { 197aab03e05SDario Faggioli return dl_policy(p->policy); 198aab03e05SDario Faggioli } 199aab03e05SDario Faggioli 20007881166SJuri Lelli #define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT) 20107881166SJuri Lelli 202d76343c6SValentin Schneider static inline void update_avg(u64 *avg, u64 sample) 203d76343c6SValentin Schneider { 204d76343c6SValentin Schneider s64 diff = sample - *avg; 205d76343c6SValentin Schneider *avg += diff / 8; 206d76343c6SValentin Schneider } 207d76343c6SValentin Schneider 2082d3d891dSDario Faggioli /* 209794a56ebSJuri Lelli * !! For sched_setattr_nocheck() (kernel) only !! 210794a56ebSJuri Lelli * 211794a56ebSJuri Lelli * This is actually gross. :( 212794a56ebSJuri Lelli * 213794a56ebSJuri Lelli * It is used to make schedutil kworker(s) higher priority than SCHED_DEADLINE 214794a56ebSJuri Lelli * tasks, but still be able to sleep. We need this on platforms that cannot 215794a56ebSJuri Lelli * atomically change clock frequency. Remove once fast switching will be 216794a56ebSJuri Lelli * available on such platforms. 217794a56ebSJuri Lelli * 218794a56ebSJuri Lelli * SUGOV stands for SchedUtil GOVernor. 219794a56ebSJuri Lelli */ 220794a56ebSJuri Lelli #define SCHED_FLAG_SUGOV 0x10000000 221794a56ebSJuri Lelli 222794a56ebSJuri Lelli static inline bool dl_entity_is_special(struct sched_dl_entity *dl_se) 223794a56ebSJuri Lelli { 224794a56ebSJuri Lelli #ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL 225794a56ebSJuri Lelli return unlikely(dl_se->flags & SCHED_FLAG_SUGOV); 226794a56ebSJuri Lelli #else 227794a56ebSJuri Lelli return false; 228794a56ebSJuri Lelli #endif 229794a56ebSJuri Lelli } 230794a56ebSJuri Lelli 231794a56ebSJuri Lelli /* 2322d3d891dSDario Faggioli * Tells if entity @a should preempt entity @b. 2332d3d891dSDario Faggioli */ 234332ac17eSDario Faggioli static inline bool 235332ac17eSDario Faggioli dl_entity_preempt(struct sched_dl_entity *a, struct sched_dl_entity *b) 2362d3d891dSDario Faggioli { 237794a56ebSJuri Lelli return dl_entity_is_special(a) || 238794a56ebSJuri Lelli dl_time_before(a->deadline, b->deadline); 2392d3d891dSDario Faggioli } 2402d3d891dSDario Faggioli 241391e43daSPeter Zijlstra /* 242391e43daSPeter Zijlstra * This is the priority-queue data structure of the RT scheduling class: 243391e43daSPeter Zijlstra */ 244391e43daSPeter Zijlstra struct rt_prio_array { 245391e43daSPeter Zijlstra DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */ 246391e43daSPeter Zijlstra struct list_head queue[MAX_RT_PRIO]; 247391e43daSPeter Zijlstra }; 248391e43daSPeter Zijlstra 249391e43daSPeter Zijlstra struct rt_bandwidth { 250391e43daSPeter Zijlstra /* nests inside the rq lock: */ 251391e43daSPeter Zijlstra raw_spinlock_t rt_runtime_lock; 252391e43daSPeter Zijlstra ktime_t rt_period; 253391e43daSPeter Zijlstra u64 rt_runtime; 254391e43daSPeter Zijlstra struct hrtimer rt_period_timer; 2554cfafd30SPeter Zijlstra unsigned int rt_period_active; 256391e43daSPeter Zijlstra }; 257a5e7be3bSJuri Lelli 258a5e7be3bSJuri Lelli void __dl_clear_params(struct task_struct *p); 259a5e7be3bSJuri Lelli 260332ac17eSDario Faggioli /* 261332ac17eSDario Faggioli * To keep the bandwidth of -deadline tasks and groups under control 262332ac17eSDario Faggioli * we need some place where: 263332ac17eSDario Faggioli * - store the maximum -deadline bandwidth of the system (the group); 264332ac17eSDario Faggioli * - cache the fraction of that bandwidth that is currently allocated. 265332ac17eSDario Faggioli * 266332ac17eSDario Faggioli * This is all done in the data structure below. It is similar to the 267332ac17eSDario Faggioli * one used for RT-throttling (rt_bandwidth), with the main difference 268332ac17eSDario Faggioli * that, since here we are only interested in admission control, we 269332ac17eSDario Faggioli * do not decrease any runtime while the group "executes", neither we 270332ac17eSDario Faggioli * need a timer to replenish it. 271332ac17eSDario Faggioli * 272332ac17eSDario Faggioli * With respect to SMP, the bandwidth is given on a per-CPU basis, 273332ac17eSDario Faggioli * meaning that: 274332ac17eSDario Faggioli * - dl_bw (< 100%) is the bandwidth of the system (group) on each CPU; 275332ac17eSDario Faggioli * - dl_total_bw array contains, in the i-eth element, the currently 276332ac17eSDario Faggioli * allocated bandwidth on the i-eth CPU. 277332ac17eSDario Faggioli * Moreover, groups consume bandwidth on each CPU, while tasks only 278332ac17eSDario Faggioli * consume bandwidth on the CPU they're running on. 279332ac17eSDario Faggioli * Finally, dl_total_bw_cpu is used to cache the index of dl_total_bw 280332ac17eSDario Faggioli * that will be shown the next time the proc or cgroup controls will 281332ac17eSDario Faggioli * be red. It on its turn can be changed by writing on its own 282332ac17eSDario Faggioli * control. 283332ac17eSDario Faggioli */ 284332ac17eSDario Faggioli struct dl_bandwidth { 285332ac17eSDario Faggioli raw_spinlock_t dl_runtime_lock; 286332ac17eSDario Faggioli u64 dl_runtime; 287332ac17eSDario Faggioli u64 dl_period; 288332ac17eSDario Faggioli }; 289332ac17eSDario Faggioli 290332ac17eSDario Faggioli static inline int dl_bandwidth_enabled(void) 291332ac17eSDario Faggioli { 2921724813dSPeter Zijlstra return sysctl_sched_rt_runtime >= 0; 293332ac17eSDario Faggioli } 294332ac17eSDario Faggioli 295332ac17eSDario Faggioli struct dl_bw { 296332ac17eSDario Faggioli raw_spinlock_t lock; 29797fb7a0aSIngo Molnar u64 bw; 29897fb7a0aSIngo Molnar u64 total_bw; 299332ac17eSDario Faggioli }; 300332ac17eSDario Faggioli 301daec5798SLuca Abeni static inline void __dl_update(struct dl_bw *dl_b, s64 bw); 302daec5798SLuca Abeni 3037f51412aSJuri Lelli static inline 3048c0944ceSPeter Zijlstra void __dl_sub(struct dl_bw *dl_b, u64 tsk_bw, int cpus) 3057f51412aSJuri Lelli { 3067f51412aSJuri Lelli dl_b->total_bw -= tsk_bw; 307daec5798SLuca Abeni __dl_update(dl_b, (s32)tsk_bw / cpus); 3087f51412aSJuri Lelli } 3097f51412aSJuri Lelli 3107f51412aSJuri Lelli static inline 311daec5798SLuca Abeni void __dl_add(struct dl_bw *dl_b, u64 tsk_bw, int cpus) 3127f51412aSJuri Lelli { 3137f51412aSJuri Lelli dl_b->total_bw += tsk_bw; 314daec5798SLuca Abeni __dl_update(dl_b, -((s32)tsk_bw / cpus)); 3157f51412aSJuri Lelli } 3167f51412aSJuri Lelli 31760ffd5edSLuca Abeni static inline bool __dl_overflow(struct dl_bw *dl_b, unsigned long cap, 31860ffd5edSLuca Abeni u64 old_bw, u64 new_bw) 3197f51412aSJuri Lelli { 3207f51412aSJuri Lelli return dl_b->bw != -1 && 32160ffd5edSLuca Abeni cap_scale(dl_b->bw, cap) < dl_b->total_bw - old_bw + new_bw; 3227f51412aSJuri Lelli } 3237f51412aSJuri Lelli 324b4118988SLuca Abeni /* 325b4118988SLuca Abeni * Verify the fitness of task @p to run on @cpu taking into account the 326b4118988SLuca Abeni * CPU original capacity and the runtime/deadline ratio of the task. 327b4118988SLuca Abeni * 328b4118988SLuca Abeni * The function will return true if the CPU original capacity of the 329b4118988SLuca Abeni * @cpu scaled by SCHED_CAPACITY_SCALE >= runtime/deadline ratio of the 330b4118988SLuca Abeni * task and false otherwise. 331b4118988SLuca Abeni */ 332b4118988SLuca Abeni static inline bool dl_task_fits_capacity(struct task_struct *p, int cpu) 333b4118988SLuca Abeni { 334b4118988SLuca Abeni unsigned long cap = arch_scale_cpu_capacity(cpu); 335b4118988SLuca Abeni 336b4118988SLuca Abeni return cap_scale(p->dl.dl_deadline, cap) >= p->dl.dl_runtime; 337b4118988SLuca Abeni } 338b4118988SLuca Abeni 339f2cb1360SIngo Molnar extern void init_dl_bw(struct dl_bw *dl_b); 34006a76fe0SNicolas Pitre extern int sched_dl_global_validate(void); 34106a76fe0SNicolas Pitre extern void sched_dl_do_global(void); 34297fb7a0aSIngo Molnar extern int sched_dl_overflow(struct task_struct *p, int policy, const struct sched_attr *attr); 34306a76fe0SNicolas Pitre extern void __setparam_dl(struct task_struct *p, const struct sched_attr *attr); 34406a76fe0SNicolas Pitre extern void __getparam_dl(struct task_struct *p, struct sched_attr *attr); 34506a76fe0SNicolas Pitre extern bool __checkparam_dl(const struct sched_attr *attr); 34606a76fe0SNicolas Pitre extern bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr); 34797fb7a0aSIngo Molnar extern int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed); 34897fb7a0aSIngo Molnar extern int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial); 34906a76fe0SNicolas Pitre extern bool dl_cpu_busy(unsigned int cpu); 350391e43daSPeter Zijlstra 351391e43daSPeter Zijlstra #ifdef CONFIG_CGROUP_SCHED 352391e43daSPeter Zijlstra 353391e43daSPeter Zijlstra #include <linux/cgroup.h> 354eb414681SJohannes Weiner #include <linux/psi.h> 355391e43daSPeter Zijlstra 356391e43daSPeter Zijlstra struct cfs_rq; 357391e43daSPeter Zijlstra struct rt_rq; 358391e43daSPeter Zijlstra 35935cf4e50SMike Galbraith extern struct list_head task_groups; 360391e43daSPeter Zijlstra 361391e43daSPeter Zijlstra struct cfs_bandwidth { 362391e43daSPeter Zijlstra #ifdef CONFIG_CFS_BANDWIDTH 363391e43daSPeter Zijlstra raw_spinlock_t lock; 364391e43daSPeter Zijlstra ktime_t period; 36597fb7a0aSIngo Molnar u64 quota; 36697fb7a0aSIngo Molnar u64 runtime; 3679c58c79aSZhihui Zhang s64 hierarchical_quota; 368391e43daSPeter Zijlstra 36966567fcbSbsegall@google.com u8 idle; 37066567fcbSbsegall@google.com u8 period_active; 37166567fcbSbsegall@google.com u8 slack_started; 37297fb7a0aSIngo Molnar struct hrtimer period_timer; 37397fb7a0aSIngo Molnar struct hrtimer slack_timer; 374391e43daSPeter Zijlstra struct list_head throttled_cfs_rq; 375391e43daSPeter Zijlstra 37697fb7a0aSIngo Molnar /* Statistics: */ 37797fb7a0aSIngo Molnar int nr_periods; 37897fb7a0aSIngo Molnar int nr_throttled; 379391e43daSPeter Zijlstra u64 throttled_time; 380391e43daSPeter Zijlstra #endif 381391e43daSPeter Zijlstra }; 382391e43daSPeter Zijlstra 38397fb7a0aSIngo Molnar /* Task group related information */ 384391e43daSPeter Zijlstra struct task_group { 385391e43daSPeter Zijlstra struct cgroup_subsys_state css; 386391e43daSPeter Zijlstra 387391e43daSPeter Zijlstra #ifdef CONFIG_FAIR_GROUP_SCHED 38897fb7a0aSIngo Molnar /* schedulable entities of this group on each CPU */ 389391e43daSPeter Zijlstra struct sched_entity **se; 39097fb7a0aSIngo Molnar /* runqueue "owned" by this group on each CPU */ 391391e43daSPeter Zijlstra struct cfs_rq **cfs_rq; 392391e43daSPeter Zijlstra unsigned long shares; 393391e43daSPeter Zijlstra 394fa6bddebSAlex Shi #ifdef CONFIG_SMP 395b0367629SWaiman Long /* 396b0367629SWaiman Long * load_avg can be heavily contended at clock tick time, so put 397b0367629SWaiman Long * it in its own cacheline separated from the fields above which 398b0367629SWaiman Long * will also be accessed at each tick. 399b0367629SWaiman Long */ 400b0367629SWaiman Long atomic_long_t load_avg ____cacheline_aligned; 401391e43daSPeter Zijlstra #endif 402fa6bddebSAlex Shi #endif 403391e43daSPeter Zijlstra 404391e43daSPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED 405391e43daSPeter Zijlstra struct sched_rt_entity **rt_se; 406391e43daSPeter Zijlstra struct rt_rq **rt_rq; 407391e43daSPeter Zijlstra 408391e43daSPeter Zijlstra struct rt_bandwidth rt_bandwidth; 409391e43daSPeter Zijlstra #endif 410391e43daSPeter Zijlstra 411391e43daSPeter Zijlstra struct rcu_head rcu; 412391e43daSPeter Zijlstra struct list_head list; 413391e43daSPeter Zijlstra 414391e43daSPeter Zijlstra struct task_group *parent; 415391e43daSPeter Zijlstra struct list_head siblings; 416391e43daSPeter Zijlstra struct list_head children; 417391e43daSPeter Zijlstra 418391e43daSPeter Zijlstra #ifdef CONFIG_SCHED_AUTOGROUP 419391e43daSPeter Zijlstra struct autogroup *autogroup; 420391e43daSPeter Zijlstra #endif 421391e43daSPeter Zijlstra 422391e43daSPeter Zijlstra struct cfs_bandwidth cfs_bandwidth; 4232480c093SPatrick Bellasi 4242480c093SPatrick Bellasi #ifdef CONFIG_UCLAMP_TASK_GROUP 4252480c093SPatrick Bellasi /* The two decimal precision [%] value requested from user-space */ 4262480c093SPatrick Bellasi unsigned int uclamp_pct[UCLAMP_CNT]; 4272480c093SPatrick Bellasi /* Clamp values requested for a task group */ 4282480c093SPatrick Bellasi struct uclamp_se uclamp_req[UCLAMP_CNT]; 4290b60ba2dSPatrick Bellasi /* Effective clamp values used for a task group */ 4300b60ba2dSPatrick Bellasi struct uclamp_se uclamp[UCLAMP_CNT]; 4312480c093SPatrick Bellasi #endif 4322480c093SPatrick Bellasi 433391e43daSPeter Zijlstra }; 434391e43daSPeter Zijlstra 435391e43daSPeter Zijlstra #ifdef CONFIG_FAIR_GROUP_SCHED 436391e43daSPeter Zijlstra #define ROOT_TASK_GROUP_LOAD NICE_0_LOAD 437391e43daSPeter Zijlstra 438391e43daSPeter Zijlstra /* 439391e43daSPeter Zijlstra * A weight of 0 or 1 can cause arithmetics problems. 440391e43daSPeter Zijlstra * A weight of a cfs_rq is the sum of weights of which entities 441391e43daSPeter Zijlstra * are queued on this cfs_rq, so a weight of a entity should not be 442391e43daSPeter Zijlstra * too large, so as the shares value of a task group. 443391e43daSPeter Zijlstra * (The default weight is 1024 - so there's no practical 444391e43daSPeter Zijlstra * limitation from this.) 445391e43daSPeter Zijlstra */ 446391e43daSPeter Zijlstra #define MIN_SHARES (1UL << 1) 447391e43daSPeter Zijlstra #define MAX_SHARES (1UL << 18) 448391e43daSPeter Zijlstra #endif 449391e43daSPeter Zijlstra 450391e43daSPeter Zijlstra typedef int (*tg_visitor)(struct task_group *, void *); 451391e43daSPeter Zijlstra 452391e43daSPeter Zijlstra extern int walk_tg_tree_from(struct task_group *from, 453391e43daSPeter Zijlstra tg_visitor down, tg_visitor up, void *data); 454391e43daSPeter Zijlstra 455391e43daSPeter Zijlstra /* 456391e43daSPeter Zijlstra * Iterate the full tree, calling @down when first entering a node and @up when 457391e43daSPeter Zijlstra * leaving it for the final time. 458391e43daSPeter Zijlstra * 459391e43daSPeter Zijlstra * Caller must hold rcu_lock or sufficient equivalent. 460391e43daSPeter Zijlstra */ 461391e43daSPeter Zijlstra static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data) 462391e43daSPeter Zijlstra { 463391e43daSPeter Zijlstra return walk_tg_tree_from(&root_task_group, down, up, data); 464391e43daSPeter Zijlstra } 465391e43daSPeter Zijlstra 466391e43daSPeter Zijlstra extern int tg_nop(struct task_group *tg, void *data); 467391e43daSPeter Zijlstra 468391e43daSPeter Zijlstra extern void free_fair_sched_group(struct task_group *tg); 469391e43daSPeter Zijlstra extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent); 4708663e24dSPeter Zijlstra extern void online_fair_sched_group(struct task_group *tg); 4716fe1f348SPeter Zijlstra extern void unregister_fair_sched_group(struct task_group *tg); 472391e43daSPeter Zijlstra extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, 473391e43daSPeter Zijlstra struct sched_entity *se, int cpu, 474391e43daSPeter Zijlstra struct sched_entity *parent); 475391e43daSPeter Zijlstra extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b); 476391e43daSPeter Zijlstra 477391e43daSPeter Zijlstra extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b); 47877a4d1a1SPeter Zijlstra extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b); 479391e43daSPeter Zijlstra extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq); 480391e43daSPeter Zijlstra 481391e43daSPeter Zijlstra extern void free_rt_sched_group(struct task_group *tg); 482391e43daSPeter Zijlstra extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent); 483391e43daSPeter Zijlstra extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, 484391e43daSPeter Zijlstra struct sched_rt_entity *rt_se, int cpu, 485391e43daSPeter Zijlstra struct sched_rt_entity *parent); 4868887cd99SNicolas Pitre extern int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us); 4878887cd99SNicolas Pitre extern int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us); 4888887cd99SNicolas Pitre extern long sched_group_rt_runtime(struct task_group *tg); 4898887cd99SNicolas Pitre extern long sched_group_rt_period(struct task_group *tg); 4908887cd99SNicolas Pitre extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk); 491391e43daSPeter Zijlstra 49225cc7da7SLi Zefan extern struct task_group *sched_create_group(struct task_group *parent); 49325cc7da7SLi Zefan extern void sched_online_group(struct task_group *tg, 49425cc7da7SLi Zefan struct task_group *parent); 49525cc7da7SLi Zefan extern void sched_destroy_group(struct task_group *tg); 49625cc7da7SLi Zefan extern void sched_offline_group(struct task_group *tg); 49725cc7da7SLi Zefan 49825cc7da7SLi Zefan extern void sched_move_task(struct task_struct *tsk); 49925cc7da7SLi Zefan 50025cc7da7SLi Zefan #ifdef CONFIG_FAIR_GROUP_SCHED 50125cc7da7SLi Zefan extern int sched_group_set_shares(struct task_group *tg, unsigned long shares); 502ad936d86SByungchul Park 503ad936d86SByungchul Park #ifdef CONFIG_SMP 504ad936d86SByungchul Park extern void set_task_rq_fair(struct sched_entity *se, 505ad936d86SByungchul Park struct cfs_rq *prev, struct cfs_rq *next); 506ad936d86SByungchul Park #else /* !CONFIG_SMP */ 507ad936d86SByungchul Park static inline void set_task_rq_fair(struct sched_entity *se, 508ad936d86SByungchul Park struct cfs_rq *prev, struct cfs_rq *next) { } 509ad936d86SByungchul Park #endif /* CONFIG_SMP */ 510ad936d86SByungchul Park #endif /* CONFIG_FAIR_GROUP_SCHED */ 51125cc7da7SLi Zefan 512391e43daSPeter Zijlstra #else /* CONFIG_CGROUP_SCHED */ 513391e43daSPeter Zijlstra 514391e43daSPeter Zijlstra struct cfs_bandwidth { }; 515391e43daSPeter Zijlstra 516391e43daSPeter Zijlstra #endif /* CONFIG_CGROUP_SCHED */ 517391e43daSPeter Zijlstra 518391e43daSPeter Zijlstra /* CFS-related fields in a runqueue */ 519391e43daSPeter Zijlstra struct cfs_rq { 520391e43daSPeter Zijlstra struct load_weight load; 52197fb7a0aSIngo Molnar unsigned int nr_running; 52243e9f7f2SViresh Kumar unsigned int h_nr_running; /* SCHED_{NORMAL,BATCH,IDLE} */ 52343e9f7f2SViresh Kumar unsigned int idle_h_nr_running; /* SCHED_IDLE */ 524391e43daSPeter Zijlstra 525391e43daSPeter Zijlstra u64 exec_clock; 526391e43daSPeter Zijlstra u64 min_vruntime; 527391e43daSPeter Zijlstra #ifndef CONFIG_64BIT 528391e43daSPeter Zijlstra u64 min_vruntime_copy; 529391e43daSPeter Zijlstra #endif 530391e43daSPeter Zijlstra 531bfb06889SDavidlohr Bueso struct rb_root_cached tasks_timeline; 532391e43daSPeter Zijlstra 533391e43daSPeter Zijlstra /* 534391e43daSPeter Zijlstra * 'curr' points to currently running entity on this cfs_rq. 535391e43daSPeter Zijlstra * It is set to NULL otherwise (i.e when none are currently running). 536391e43daSPeter Zijlstra */ 53797fb7a0aSIngo Molnar struct sched_entity *curr; 53897fb7a0aSIngo Molnar struct sched_entity *next; 53997fb7a0aSIngo Molnar struct sched_entity *last; 54097fb7a0aSIngo Molnar struct sched_entity *skip; 541391e43daSPeter Zijlstra 542391e43daSPeter Zijlstra #ifdef CONFIG_SCHED_DEBUG 543391e43daSPeter Zijlstra unsigned int nr_spread_over; 544391e43daSPeter Zijlstra #endif 545391e43daSPeter Zijlstra 5462dac754eSPaul Turner #ifdef CONFIG_SMP 5472dac754eSPaul Turner /* 5489d89c257SYuyang Du * CFS load tracking 5492dac754eSPaul Turner */ 5509d89c257SYuyang Du struct sched_avg avg; 5512a2f5d4eSPeter Zijlstra #ifndef CONFIG_64BIT 5522a2f5d4eSPeter Zijlstra u64 load_last_update_time_copy; 5532a2f5d4eSPeter Zijlstra #endif 5542a2f5d4eSPeter Zijlstra struct { 5552a2f5d4eSPeter Zijlstra raw_spinlock_t lock ____cacheline_aligned; 5562a2f5d4eSPeter Zijlstra int nr; 5572a2f5d4eSPeter Zijlstra unsigned long load_avg; 5582a2f5d4eSPeter Zijlstra unsigned long util_avg; 5599f683953SVincent Guittot unsigned long runnable_avg; 5602a2f5d4eSPeter Zijlstra } removed; 561141965c7SAlex Shi 562c566e8e9SPaul Turner #ifdef CONFIG_FAIR_GROUP_SCHED 5630e2d2aaaSPeter Zijlstra unsigned long tg_load_avg_contrib; 5640e2d2aaaSPeter Zijlstra long propagate; 5650e2d2aaaSPeter Zijlstra long prop_runnable_sum; 5660e2d2aaaSPeter Zijlstra 56782958366SPaul Turner /* 56882958366SPaul Turner * h_load = weight * f(tg) 56982958366SPaul Turner * 57082958366SPaul Turner * Where f(tg) is the recursive weight fraction assigned to 57182958366SPaul Turner * this group. 57282958366SPaul Turner */ 57382958366SPaul Turner unsigned long h_load; 57468520796SVladimir Davydov u64 last_h_load_update; 57568520796SVladimir Davydov struct sched_entity *h_load_next; 57668520796SVladimir Davydov #endif /* CONFIG_FAIR_GROUP_SCHED */ 57782958366SPaul Turner #endif /* CONFIG_SMP */ 57882958366SPaul Turner 579391e43daSPeter Zijlstra #ifdef CONFIG_FAIR_GROUP_SCHED 58097fb7a0aSIngo Molnar struct rq *rq; /* CPU runqueue to which this cfs_rq is attached */ 581391e43daSPeter Zijlstra 582391e43daSPeter Zijlstra /* 583391e43daSPeter Zijlstra * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in 584391e43daSPeter Zijlstra * a hierarchy). Non-leaf lrqs hold other higher schedulable entities 585391e43daSPeter Zijlstra * (like users, containers etc.) 586391e43daSPeter Zijlstra * 58797fb7a0aSIngo Molnar * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a CPU. 58897fb7a0aSIngo Molnar * This list is used during load balance. 589391e43daSPeter Zijlstra */ 590391e43daSPeter Zijlstra int on_list; 591391e43daSPeter Zijlstra struct list_head leaf_cfs_rq_list; 592391e43daSPeter Zijlstra struct task_group *tg; /* group that "owns" this runqueue */ 593391e43daSPeter Zijlstra 594391e43daSPeter Zijlstra #ifdef CONFIG_CFS_BANDWIDTH 595391e43daSPeter Zijlstra int runtime_enabled; 596391e43daSPeter Zijlstra s64 runtime_remaining; 597391e43daSPeter Zijlstra 59897fb7a0aSIngo Molnar u64 throttled_clock; 59997fb7a0aSIngo Molnar u64 throttled_clock_task; 600f1b17280SPaul Turner u64 throttled_clock_task_time; 60197fb7a0aSIngo Molnar int throttled; 60297fb7a0aSIngo Molnar int throttle_count; 603391e43daSPeter Zijlstra struct list_head throttled_list; 604391e43daSPeter Zijlstra #endif /* CONFIG_CFS_BANDWIDTH */ 605391e43daSPeter Zijlstra #endif /* CONFIG_FAIR_GROUP_SCHED */ 606391e43daSPeter Zijlstra }; 607391e43daSPeter Zijlstra 608391e43daSPeter Zijlstra static inline int rt_bandwidth_enabled(void) 609391e43daSPeter Zijlstra { 610391e43daSPeter Zijlstra return sysctl_sched_rt_runtime >= 0; 611391e43daSPeter Zijlstra } 612391e43daSPeter Zijlstra 613b6366f04SSteven Rostedt /* RT IPI pull logic requires IRQ_WORK */ 6144bdced5cSSteven Rostedt (Red Hat) #if defined(CONFIG_IRQ_WORK) && defined(CONFIG_SMP) 615b6366f04SSteven Rostedt # define HAVE_RT_PUSH_IPI 616b6366f04SSteven Rostedt #endif 617b6366f04SSteven Rostedt 618391e43daSPeter Zijlstra /* Real-Time classes' related field in a runqueue: */ 619391e43daSPeter Zijlstra struct rt_rq { 620391e43daSPeter Zijlstra struct rt_prio_array active; 621c82513e5SPeter Zijlstra unsigned int rt_nr_running; 62201d36d0aSFrederic Weisbecker unsigned int rr_nr_running; 623391e43daSPeter Zijlstra #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED 624391e43daSPeter Zijlstra struct { 625391e43daSPeter Zijlstra int curr; /* highest queued rt task prio */ 626391e43daSPeter Zijlstra #ifdef CONFIG_SMP 627391e43daSPeter Zijlstra int next; /* next highest */ 628391e43daSPeter Zijlstra #endif 629391e43daSPeter Zijlstra } highest_prio; 630391e43daSPeter Zijlstra #endif 631391e43daSPeter Zijlstra #ifdef CONFIG_SMP 632391e43daSPeter Zijlstra unsigned long rt_nr_migratory; 633391e43daSPeter Zijlstra unsigned long rt_nr_total; 634391e43daSPeter Zijlstra int overloaded; 635391e43daSPeter Zijlstra struct plist_head pushable_tasks; 636371bf427SVincent Guittot 637b6366f04SSteven Rostedt #endif /* CONFIG_SMP */ 638f4ebcbc0SKirill Tkhai int rt_queued; 639f4ebcbc0SKirill Tkhai 640391e43daSPeter Zijlstra int rt_throttled; 641391e43daSPeter Zijlstra u64 rt_time; 642391e43daSPeter Zijlstra u64 rt_runtime; 643391e43daSPeter Zijlstra /* Nests inside the rq lock: */ 644391e43daSPeter Zijlstra raw_spinlock_t rt_runtime_lock; 645391e43daSPeter Zijlstra 646391e43daSPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED 647391e43daSPeter Zijlstra unsigned long rt_nr_boosted; 648391e43daSPeter Zijlstra 649391e43daSPeter Zijlstra struct rq *rq; 650391e43daSPeter Zijlstra struct task_group *tg; 651391e43daSPeter Zijlstra #endif 652391e43daSPeter Zijlstra }; 653391e43daSPeter Zijlstra 654296b2ffeSVincent Guittot static inline bool rt_rq_is_runnable(struct rt_rq *rt_rq) 655296b2ffeSVincent Guittot { 656296b2ffeSVincent Guittot return rt_rq->rt_queued && rt_rq->rt_nr_running; 657296b2ffeSVincent Guittot } 658296b2ffeSVincent Guittot 659aab03e05SDario Faggioli /* Deadline class' related fields in a runqueue */ 660aab03e05SDario Faggioli struct dl_rq { 661aab03e05SDario Faggioli /* runqueue is an rbtree, ordered by deadline */ 6622161573eSDavidlohr Bueso struct rb_root_cached root; 663aab03e05SDario Faggioli 664aab03e05SDario Faggioli unsigned long dl_nr_running; 6651baca4ceSJuri Lelli 6661baca4ceSJuri Lelli #ifdef CONFIG_SMP 6671baca4ceSJuri Lelli /* 6681baca4ceSJuri Lelli * Deadline values of the currently executing and the 6691baca4ceSJuri Lelli * earliest ready task on this rq. Caching these facilitates 670dfcb245eSIngo Molnar * the decision whether or not a ready but not running task 6711baca4ceSJuri Lelli * should migrate somewhere else. 6721baca4ceSJuri Lelli */ 6731baca4ceSJuri Lelli struct { 6741baca4ceSJuri Lelli u64 curr; 6751baca4ceSJuri Lelli u64 next; 6761baca4ceSJuri Lelli } earliest_dl; 6771baca4ceSJuri Lelli 6781baca4ceSJuri Lelli unsigned long dl_nr_migratory; 6791baca4ceSJuri Lelli int overloaded; 6801baca4ceSJuri Lelli 6811baca4ceSJuri Lelli /* 6821baca4ceSJuri Lelli * Tasks on this rq that can be pushed away. They are kept in 6831baca4ceSJuri Lelli * an rb-tree, ordered by tasks' deadlines, with caching 6841baca4ceSJuri Lelli * of the leftmost (earliest deadline) element. 6851baca4ceSJuri Lelli */ 6862161573eSDavidlohr Bueso struct rb_root_cached pushable_dl_tasks_root; 687332ac17eSDario Faggioli #else 688332ac17eSDario Faggioli struct dl_bw dl_bw; 6891baca4ceSJuri Lelli #endif 690e36d8677SLuca Abeni /* 691e36d8677SLuca Abeni * "Active utilization" for this runqueue: increased when a 692e36d8677SLuca Abeni * task wakes up (becomes TASK_RUNNING) and decreased when a 693e36d8677SLuca Abeni * task blocks 694e36d8677SLuca Abeni */ 695e36d8677SLuca Abeni u64 running_bw; 6964da3abceSLuca Abeni 6974da3abceSLuca Abeni /* 6988fd27231SLuca Abeni * Utilization of the tasks "assigned" to this runqueue (including 6998fd27231SLuca Abeni * the tasks that are in runqueue and the tasks that executed on this 7008fd27231SLuca Abeni * CPU and blocked). Increased when a task moves to this runqueue, and 7018fd27231SLuca Abeni * decreased when the task moves away (migrates, changes scheduling 7028fd27231SLuca Abeni * policy, or terminates). 7038fd27231SLuca Abeni * This is needed to compute the "inactive utilization" for the 7048fd27231SLuca Abeni * runqueue (inactive utilization = this_bw - running_bw). 7058fd27231SLuca Abeni */ 7068fd27231SLuca Abeni u64 this_bw; 707daec5798SLuca Abeni u64 extra_bw; 7088fd27231SLuca Abeni 7098fd27231SLuca Abeni /* 7104da3abceSLuca Abeni * Inverse of the fraction of CPU utilization that can be reclaimed 7114da3abceSLuca Abeni * by the GRUB algorithm. 7124da3abceSLuca Abeni */ 7134da3abceSLuca Abeni u64 bw_ratio; 714aab03e05SDario Faggioli }; 715aab03e05SDario Faggioli 716c0796298SVincent Guittot #ifdef CONFIG_FAIR_GROUP_SCHED 717c0796298SVincent Guittot /* An entity is a task if it doesn't "own" a runqueue */ 718c0796298SVincent Guittot #define entity_is_task(se) (!se->my_q) 7190dacee1bSVincent Guittot 7209f683953SVincent Guittot static inline void se_update_runnable(struct sched_entity *se) 7219f683953SVincent Guittot { 7229f683953SVincent Guittot if (!entity_is_task(se)) 7239f683953SVincent Guittot se->runnable_weight = se->my_q->h_nr_running; 7249f683953SVincent Guittot } 7259f683953SVincent Guittot 7269f683953SVincent Guittot static inline long se_runnable(struct sched_entity *se) 7279f683953SVincent Guittot { 7289f683953SVincent Guittot if (entity_is_task(se)) 7299f683953SVincent Guittot return !!se->on_rq; 7309f683953SVincent Guittot else 7319f683953SVincent Guittot return se->runnable_weight; 7329f683953SVincent Guittot } 7339f683953SVincent Guittot 734c0796298SVincent Guittot #else 735c0796298SVincent Guittot #define entity_is_task(se) 1 7360dacee1bSVincent Guittot 7379f683953SVincent Guittot static inline void se_update_runnable(struct sched_entity *se) {} 7389f683953SVincent Guittot 7399f683953SVincent Guittot static inline long se_runnable(struct sched_entity *se) 7409f683953SVincent Guittot { 7419f683953SVincent Guittot return !!se->on_rq; 7429f683953SVincent Guittot } 743c0796298SVincent Guittot #endif 744c0796298SVincent Guittot 745391e43daSPeter Zijlstra #ifdef CONFIG_SMP 746c0796298SVincent Guittot /* 747c0796298SVincent Guittot * XXX we want to get rid of these helpers and use the full load resolution. 748c0796298SVincent Guittot */ 749c0796298SVincent Guittot static inline long se_weight(struct sched_entity *se) 750c0796298SVincent Guittot { 751c0796298SVincent Guittot return scale_load_down(se->load.weight); 752c0796298SVincent Guittot } 753c0796298SVincent Guittot 754391e43daSPeter Zijlstra 755afe06efdSTim Chen static inline bool sched_asym_prefer(int a, int b) 756afe06efdSTim Chen { 757afe06efdSTim Chen return arch_asym_cpu_priority(a) > arch_asym_cpu_priority(b); 758afe06efdSTim Chen } 759afe06efdSTim Chen 7606aa140faSQuentin Perret struct perf_domain { 7616aa140faSQuentin Perret struct em_perf_domain *em_pd; 7626aa140faSQuentin Perret struct perf_domain *next; 7636aa140faSQuentin Perret struct rcu_head rcu; 7646aa140faSQuentin Perret }; 7656aa140faSQuentin Perret 766630246a0SQuentin Perret /* Scheduling group status flags */ 767630246a0SQuentin Perret #define SG_OVERLOAD 0x1 /* More than one runnable task on a CPU. */ 7682802bf3cSMorten Rasmussen #define SG_OVERUTILIZED 0x2 /* One or more CPUs are over-utilized. */ 769630246a0SQuentin Perret 770391e43daSPeter Zijlstra /* 771391e43daSPeter Zijlstra * We add the notion of a root-domain which will be used to define per-domain 772391e43daSPeter Zijlstra * variables. Each exclusive cpuset essentially defines an island domain by 77397fb7a0aSIngo Molnar * fully partitioning the member CPUs from any other cpuset. Whenever a new 774391e43daSPeter Zijlstra * exclusive cpuset is created, we also create and attach a new root-domain 775391e43daSPeter Zijlstra * object. 776391e43daSPeter Zijlstra * 777391e43daSPeter Zijlstra */ 778391e43daSPeter Zijlstra struct root_domain { 779391e43daSPeter Zijlstra atomic_t refcount; 780391e43daSPeter Zijlstra atomic_t rto_count; 781391e43daSPeter Zijlstra struct rcu_head rcu; 782391e43daSPeter Zijlstra cpumask_var_t span; 783391e43daSPeter Zijlstra cpumask_var_t online; 784391e43daSPeter Zijlstra 785757ffdd7SValentin Schneider /* 786757ffdd7SValentin Schneider * Indicate pullable load on at least one CPU, e.g: 787757ffdd7SValentin Schneider * - More than one runnable task 788757ffdd7SValentin Schneider * - Running task is misfit 789757ffdd7SValentin Schneider */ 790575638d1SValentin Schneider int overload; 7914486edd1STim Chen 7922802bf3cSMorten Rasmussen /* Indicate one or more cpus over-utilized (tipping point) */ 7932802bf3cSMorten Rasmussen int overutilized; 7942802bf3cSMorten Rasmussen 795391e43daSPeter Zijlstra /* 7961baca4ceSJuri Lelli * The bit corresponding to a CPU gets set here if such CPU has more 7971baca4ceSJuri Lelli * than one runnable -deadline task (as it is below for RT tasks). 7981baca4ceSJuri Lelli */ 7991baca4ceSJuri Lelli cpumask_var_t dlo_mask; 8001baca4ceSJuri Lelli atomic_t dlo_count; 801332ac17eSDario Faggioli struct dl_bw dl_bw; 8026bfd6d72SJuri Lelli struct cpudl cpudl; 8031baca4ceSJuri Lelli 8044bdced5cSSteven Rostedt (Red Hat) #ifdef HAVE_RT_PUSH_IPI 8054bdced5cSSteven Rostedt (Red Hat) /* 8064bdced5cSSteven Rostedt (Red Hat) * For IPI pull requests, loop across the rto_mask. 8074bdced5cSSteven Rostedt (Red Hat) */ 8084bdced5cSSteven Rostedt (Red Hat) struct irq_work rto_push_work; 8094bdced5cSSteven Rostedt (Red Hat) raw_spinlock_t rto_lock; 8104bdced5cSSteven Rostedt (Red Hat) /* These are only updated and read within rto_lock */ 8114bdced5cSSteven Rostedt (Red Hat) int rto_loop; 8124bdced5cSSteven Rostedt (Red Hat) int rto_cpu; 8134bdced5cSSteven Rostedt (Red Hat) /* These atomics are updated outside of a lock */ 8144bdced5cSSteven Rostedt (Red Hat) atomic_t rto_loop_next; 8154bdced5cSSteven Rostedt (Red Hat) atomic_t rto_loop_start; 8164bdced5cSSteven Rostedt (Red Hat) #endif 8171baca4ceSJuri Lelli /* 818391e43daSPeter Zijlstra * The "RT overload" flag: it gets set if a CPU has more than 819391e43daSPeter Zijlstra * one runnable RT task. 820391e43daSPeter Zijlstra */ 821391e43daSPeter Zijlstra cpumask_var_t rto_mask; 822391e43daSPeter Zijlstra struct cpupri cpupri; 823cd92bfd3SDietmar Eggemann 824cd92bfd3SDietmar Eggemann unsigned long max_cpu_capacity; 8256aa140faSQuentin Perret 8266aa140faSQuentin Perret /* 8276aa140faSQuentin Perret * NULL-terminated list of performance domains intersecting with the 8286aa140faSQuentin Perret * CPUs of the rd. Protected by RCU. 8296aa140faSQuentin Perret */ 8307ba7319fSJoel Fernandes (Google) struct perf_domain __rcu *pd; 831391e43daSPeter Zijlstra }; 832391e43daSPeter Zijlstra 833f2cb1360SIngo Molnar extern void init_defrootdomain(void); 8348d5dc512SPeter Zijlstra extern int sched_init_domains(const struct cpumask *cpu_map); 835f2cb1360SIngo Molnar extern void rq_attach_root(struct rq *rq, struct root_domain *rd); 836364f5665SSteven Rostedt (VMware) extern void sched_get_rd(struct root_domain *rd); 837364f5665SSteven Rostedt (VMware) extern void sched_put_rd(struct root_domain *rd); 838391e43daSPeter Zijlstra 8394bdced5cSSteven Rostedt (Red Hat) #ifdef HAVE_RT_PUSH_IPI 8404bdced5cSSteven Rostedt (Red Hat) extern void rto_push_irq_work_func(struct irq_work *work); 8414bdced5cSSteven Rostedt (Red Hat) #endif 842391e43daSPeter Zijlstra #endif /* CONFIG_SMP */ 843391e43daSPeter Zijlstra 84469842cbaSPatrick Bellasi #ifdef CONFIG_UCLAMP_TASK 84569842cbaSPatrick Bellasi /* 84669842cbaSPatrick Bellasi * struct uclamp_bucket - Utilization clamp bucket 84769842cbaSPatrick Bellasi * @value: utilization clamp value for tasks on this clamp bucket 84869842cbaSPatrick Bellasi * @tasks: number of RUNNABLE tasks on this clamp bucket 84969842cbaSPatrick Bellasi * 85069842cbaSPatrick Bellasi * Keep track of how many tasks are RUNNABLE for a given utilization 85169842cbaSPatrick Bellasi * clamp value. 85269842cbaSPatrick Bellasi */ 85369842cbaSPatrick Bellasi struct uclamp_bucket { 85469842cbaSPatrick Bellasi unsigned long value : bits_per(SCHED_CAPACITY_SCALE); 85569842cbaSPatrick Bellasi unsigned long tasks : BITS_PER_LONG - bits_per(SCHED_CAPACITY_SCALE); 85669842cbaSPatrick Bellasi }; 85769842cbaSPatrick Bellasi 85869842cbaSPatrick Bellasi /* 85969842cbaSPatrick Bellasi * struct uclamp_rq - rq's utilization clamp 86069842cbaSPatrick Bellasi * @value: currently active clamp values for a rq 86169842cbaSPatrick Bellasi * @bucket: utilization clamp buckets affecting a rq 86269842cbaSPatrick Bellasi * 86369842cbaSPatrick Bellasi * Keep track of RUNNABLE tasks on a rq to aggregate their clamp values. 86469842cbaSPatrick Bellasi * A clamp value is affecting a rq when there is at least one task RUNNABLE 86569842cbaSPatrick Bellasi * (or actually running) with that value. 86669842cbaSPatrick Bellasi * 86769842cbaSPatrick Bellasi * There are up to UCLAMP_CNT possible different clamp values, currently there 86869842cbaSPatrick Bellasi * are only two: minimum utilization and maximum utilization. 86969842cbaSPatrick Bellasi * 87069842cbaSPatrick Bellasi * All utilization clamping values are MAX aggregated, since: 87169842cbaSPatrick Bellasi * - for util_min: we want to run the CPU at least at the max of the minimum 87269842cbaSPatrick Bellasi * utilization required by its currently RUNNABLE tasks. 87369842cbaSPatrick Bellasi * - for util_max: we want to allow the CPU to run up to the max of the 87469842cbaSPatrick Bellasi * maximum utilization allowed by its currently RUNNABLE tasks. 87569842cbaSPatrick Bellasi * 87669842cbaSPatrick Bellasi * Since on each system we expect only a limited number of different 87769842cbaSPatrick Bellasi * utilization clamp values (UCLAMP_BUCKETS), use a simple array to track 87869842cbaSPatrick Bellasi * the metrics required to compute all the per-rq utilization clamp values. 87969842cbaSPatrick Bellasi */ 88069842cbaSPatrick Bellasi struct uclamp_rq { 88169842cbaSPatrick Bellasi unsigned int value; 88269842cbaSPatrick Bellasi struct uclamp_bucket bucket[UCLAMP_BUCKETS]; 88369842cbaSPatrick Bellasi }; 88446609ce2SQais Yousef 88546609ce2SQais Yousef DECLARE_STATIC_KEY_FALSE(sched_uclamp_used); 88669842cbaSPatrick Bellasi #endif /* CONFIG_UCLAMP_TASK */ 88769842cbaSPatrick Bellasi 888391e43daSPeter Zijlstra /* 889391e43daSPeter Zijlstra * This is the main, per-CPU runqueue data structure. 890391e43daSPeter Zijlstra * 891391e43daSPeter Zijlstra * Locking rule: those places that want to lock multiple runqueues 892391e43daSPeter Zijlstra * (such as the load balancing or the thread migration code), lock 893391e43daSPeter Zijlstra * acquire operations must be ordered by ascending &runqueue. 894391e43daSPeter Zijlstra */ 895391e43daSPeter Zijlstra struct rq { 896391e43daSPeter Zijlstra /* runqueue lock: */ 897391e43daSPeter Zijlstra raw_spinlock_t lock; 898391e43daSPeter Zijlstra 899391e43daSPeter Zijlstra /* 900391e43daSPeter Zijlstra * nr_running and cpu_load should be in the same cacheline because 901391e43daSPeter Zijlstra * remote CPUs use both these fields when doing load calculation. 902391e43daSPeter Zijlstra */ 903c82513e5SPeter Zijlstra unsigned int nr_running; 9040ec8aa00SPeter Zijlstra #ifdef CONFIG_NUMA_BALANCING 9050ec8aa00SPeter Zijlstra unsigned int nr_numa_running; 9060ec8aa00SPeter Zijlstra unsigned int nr_preferred_running; 907a4739ecaSSrikar Dronamraju unsigned int numa_migrate_on; 9080ec8aa00SPeter Zijlstra #endif 9093451d024SFrederic Weisbecker #ifdef CONFIG_NO_HZ_COMMON 9109fd81dd5SFrederic Weisbecker #ifdef CONFIG_SMP 911e022e0d3SPeter Zijlstra unsigned long last_blocked_load_update_tick; 912f643ea22SVincent Guittot unsigned int has_blocked_load; 91390b5363aSPeter Zijlstra (Intel) call_single_data_t nohz_csd; 9149fd81dd5SFrederic Weisbecker #endif /* CONFIG_SMP */ 91500357f5eSPeter Zijlstra unsigned int nohz_tick_stopped; 916a22e47a4SPeter Zijlstra atomic_t nohz_flags; 9179fd81dd5SFrederic Weisbecker #endif /* CONFIG_NO_HZ_COMMON */ 918dcdedb24SFrederic Weisbecker 919126c2092SPeter Zijlstra #ifdef CONFIG_SMP 920126c2092SPeter Zijlstra unsigned int ttwu_pending; 921126c2092SPeter Zijlstra #endif 922391e43daSPeter Zijlstra u64 nr_switches; 923391e43daSPeter Zijlstra 92469842cbaSPatrick Bellasi #ifdef CONFIG_UCLAMP_TASK 92569842cbaSPatrick Bellasi /* Utilization clamp values based on CPU's RUNNABLE tasks */ 92669842cbaSPatrick Bellasi struct uclamp_rq uclamp[UCLAMP_CNT] ____cacheline_aligned; 927e496187dSPatrick Bellasi unsigned int uclamp_flags; 928e496187dSPatrick Bellasi #define UCLAMP_FLAG_IDLE 0x01 92969842cbaSPatrick Bellasi #endif 93069842cbaSPatrick Bellasi 931391e43daSPeter Zijlstra struct cfs_rq cfs; 932391e43daSPeter Zijlstra struct rt_rq rt; 933aab03e05SDario Faggioli struct dl_rq dl; 934391e43daSPeter Zijlstra 935391e43daSPeter Zijlstra #ifdef CONFIG_FAIR_GROUP_SCHED 93697fb7a0aSIngo Molnar /* list of leaf cfs_rq on this CPU: */ 937391e43daSPeter Zijlstra struct list_head leaf_cfs_rq_list; 9389c2791f9SVincent Guittot struct list_head *tmp_alone_branch; 939a35b6466SPeter Zijlstra #endif /* CONFIG_FAIR_GROUP_SCHED */ 940a35b6466SPeter Zijlstra 941391e43daSPeter Zijlstra /* 942391e43daSPeter Zijlstra * This is part of a global counter where only the total sum 943391e43daSPeter Zijlstra * over all CPUs matters. A task can increase this counter on 944391e43daSPeter Zijlstra * one CPU and if it got migrated afterwards it may decrease 945391e43daSPeter Zijlstra * it on another CPU. Always updated under the runqueue lock: 946391e43daSPeter Zijlstra */ 947391e43daSPeter Zijlstra unsigned long nr_uninterruptible; 948391e43daSPeter Zijlstra 9494104a562SMadhuparna Bhowmik struct task_struct __rcu *curr; 95097fb7a0aSIngo Molnar struct task_struct *idle; 95197fb7a0aSIngo Molnar struct task_struct *stop; 952391e43daSPeter Zijlstra unsigned long next_balance; 953391e43daSPeter Zijlstra struct mm_struct *prev_mm; 954391e43daSPeter Zijlstra 955cb42c9a3SMatt Fleming unsigned int clock_update_flags; 956391e43daSPeter Zijlstra u64 clock; 95723127296SVincent Guittot /* Ensure that all clocks are in the same cache line */ 95823127296SVincent Guittot u64 clock_task ____cacheline_aligned; 95923127296SVincent Guittot u64 clock_pelt; 96023127296SVincent Guittot unsigned long lost_idle_time; 961391e43daSPeter Zijlstra 962391e43daSPeter Zijlstra atomic_t nr_iowait; 963391e43daSPeter Zijlstra 964227a4aadSMathieu Desnoyers #ifdef CONFIG_MEMBARRIER 965227a4aadSMathieu Desnoyers int membarrier_state; 966227a4aadSMathieu Desnoyers #endif 967227a4aadSMathieu Desnoyers 968391e43daSPeter Zijlstra #ifdef CONFIG_SMP 969391e43daSPeter Zijlstra struct root_domain *rd; 970994aeb7aSJoel Fernandes (Google) struct sched_domain __rcu *sd; 971391e43daSPeter Zijlstra 972ced549faSNicolas Pitre unsigned long cpu_capacity; 973ca6d75e6SVincent Guittot unsigned long cpu_capacity_orig; 974391e43daSPeter Zijlstra 975e3fca9e7SPeter Zijlstra struct callback_head *balance_callback; 976e3fca9e7SPeter Zijlstra 97719a1f5ecSPeter Zijlstra unsigned char nohz_idle_balance; 978391e43daSPeter Zijlstra unsigned char idle_balance; 97997fb7a0aSIngo Molnar 9803b1baa64SMorten Rasmussen unsigned long misfit_task_load; 9813b1baa64SMorten Rasmussen 982391e43daSPeter Zijlstra /* For active balancing */ 983391e43daSPeter Zijlstra int active_balance; 984391e43daSPeter Zijlstra int push_cpu; 985391e43daSPeter Zijlstra struct cpu_stop_work active_balance_work; 98697fb7a0aSIngo Molnar 98797fb7a0aSIngo Molnar /* CPU of this runqueue: */ 988391e43daSPeter Zijlstra int cpu; 989391e43daSPeter Zijlstra int online; 990391e43daSPeter Zijlstra 991367456c7SPeter Zijlstra struct list_head cfs_tasks; 992367456c7SPeter Zijlstra 993371bf427SVincent Guittot struct sched_avg avg_rt; 9943727e0e1SVincent Guittot struct sched_avg avg_dl; 99511d4afd4SVincent Guittot #ifdef CONFIG_HAVE_SCHED_AVG_IRQ 99691c27493SVincent Guittot struct sched_avg avg_irq; 99791c27493SVincent Guittot #endif 99876504793SThara Gopinath #ifdef CONFIG_SCHED_THERMAL_PRESSURE 99976504793SThara Gopinath struct sched_avg avg_thermal; 100076504793SThara Gopinath #endif 1001391e43daSPeter Zijlstra u64 idle_stamp; 1002391e43daSPeter Zijlstra u64 avg_idle; 10039bd721c5SJason Low 10049bd721c5SJason Low /* This is used to determine avg_idle's max value */ 10059bd721c5SJason Low u64 max_idle_balance_cost; 100690b5363aSPeter Zijlstra (Intel) #endif /* CONFIG_SMP */ 1007391e43daSPeter Zijlstra 1008391e43daSPeter Zijlstra #ifdef CONFIG_IRQ_TIME_ACCOUNTING 1009391e43daSPeter Zijlstra u64 prev_irq_time; 1010391e43daSPeter Zijlstra #endif 1011391e43daSPeter Zijlstra #ifdef CONFIG_PARAVIRT 1012391e43daSPeter Zijlstra u64 prev_steal_time; 1013391e43daSPeter Zijlstra #endif 1014391e43daSPeter Zijlstra #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING 1015391e43daSPeter Zijlstra u64 prev_steal_time_rq; 1016391e43daSPeter Zijlstra #endif 1017391e43daSPeter Zijlstra 1018391e43daSPeter Zijlstra /* calc_load related fields */ 1019391e43daSPeter Zijlstra unsigned long calc_load_update; 1020391e43daSPeter Zijlstra long calc_load_active; 1021391e43daSPeter Zijlstra 1022391e43daSPeter Zijlstra #ifdef CONFIG_SCHED_HRTICK 1023391e43daSPeter Zijlstra #ifdef CONFIG_SMP 1024966a9671SYing Huang call_single_data_t hrtick_csd; 1025391e43daSPeter Zijlstra #endif 1026391e43daSPeter Zijlstra struct hrtimer hrtick_timer; 1027391e43daSPeter Zijlstra #endif 1028391e43daSPeter Zijlstra 1029391e43daSPeter Zijlstra #ifdef CONFIG_SCHEDSTATS 1030391e43daSPeter Zijlstra /* latency stats */ 1031391e43daSPeter Zijlstra struct sched_info rq_sched_info; 1032391e43daSPeter Zijlstra unsigned long long rq_cpu_time; 1033391e43daSPeter Zijlstra /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */ 1034391e43daSPeter Zijlstra 1035391e43daSPeter Zijlstra /* sys_sched_yield() stats */ 1036391e43daSPeter Zijlstra unsigned int yld_count; 1037391e43daSPeter Zijlstra 1038391e43daSPeter Zijlstra /* schedule() stats */ 1039391e43daSPeter Zijlstra unsigned int sched_count; 1040391e43daSPeter Zijlstra unsigned int sched_goidle; 1041391e43daSPeter Zijlstra 1042391e43daSPeter Zijlstra /* try_to_wake_up() stats */ 1043391e43daSPeter Zijlstra unsigned int ttwu_count; 1044391e43daSPeter Zijlstra unsigned int ttwu_local; 1045391e43daSPeter Zijlstra #endif 1046391e43daSPeter Zijlstra 1047442bf3aaSDaniel Lezcano #ifdef CONFIG_CPU_IDLE 1048442bf3aaSDaniel Lezcano /* Must be inspected within a rcu lock section */ 1049442bf3aaSDaniel Lezcano struct cpuidle_state *idle_state; 1050442bf3aaSDaniel Lezcano #endif 1051391e43daSPeter Zijlstra }; 1052391e43daSPeter Zijlstra 105362478d99SVincent Guittot #ifdef CONFIG_FAIR_GROUP_SCHED 105462478d99SVincent Guittot 105562478d99SVincent Guittot /* CPU runqueue to which this cfs_rq is attached */ 105662478d99SVincent Guittot static inline struct rq *rq_of(struct cfs_rq *cfs_rq) 105762478d99SVincent Guittot { 105862478d99SVincent Guittot return cfs_rq->rq; 105962478d99SVincent Guittot } 106062478d99SVincent Guittot 106162478d99SVincent Guittot #else 106262478d99SVincent Guittot 106362478d99SVincent Guittot static inline struct rq *rq_of(struct cfs_rq *cfs_rq) 106462478d99SVincent Guittot { 106562478d99SVincent Guittot return container_of(cfs_rq, struct rq, cfs); 106662478d99SVincent Guittot } 106762478d99SVincent Guittot #endif 106862478d99SVincent Guittot 1069391e43daSPeter Zijlstra static inline int cpu_of(struct rq *rq) 1070391e43daSPeter Zijlstra { 1071391e43daSPeter Zijlstra #ifdef CONFIG_SMP 1072391e43daSPeter Zijlstra return rq->cpu; 1073391e43daSPeter Zijlstra #else 1074391e43daSPeter Zijlstra return 0; 1075391e43daSPeter Zijlstra #endif 1076391e43daSPeter Zijlstra } 1077391e43daSPeter Zijlstra 10781b568f0aSPeter Zijlstra 10791b568f0aSPeter Zijlstra #ifdef CONFIG_SCHED_SMT 10801b568f0aSPeter Zijlstra extern void __update_idle_core(struct rq *rq); 10811b568f0aSPeter Zijlstra 10821b568f0aSPeter Zijlstra static inline void update_idle_core(struct rq *rq) 10831b568f0aSPeter Zijlstra { 10841b568f0aSPeter Zijlstra if (static_branch_unlikely(&sched_smt_present)) 10851b568f0aSPeter Zijlstra __update_idle_core(rq); 10861b568f0aSPeter Zijlstra } 10871b568f0aSPeter Zijlstra 10881b568f0aSPeter Zijlstra #else 10891b568f0aSPeter Zijlstra static inline void update_idle_core(struct rq *rq) { } 10901b568f0aSPeter Zijlstra #endif 10911b568f0aSPeter Zijlstra 10928b06c55bSPranith Kumar DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); 1093391e43daSPeter Zijlstra 1094518cd623SPeter Zijlstra #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) 10954a32fea9SChristoph Lameter #define this_rq() this_cpu_ptr(&runqueues) 1096518cd623SPeter Zijlstra #define task_rq(p) cpu_rq(task_cpu(p)) 1097518cd623SPeter Zijlstra #define cpu_curr(cpu) (cpu_rq(cpu)->curr) 10984a32fea9SChristoph Lameter #define raw_rq() raw_cpu_ptr(&runqueues) 1099518cd623SPeter Zijlstra 11001f351d7fSJohannes Weiner extern void update_rq_clock(struct rq *rq); 11011f351d7fSJohannes Weiner 1102cebde6d6SPeter Zijlstra static inline u64 __rq_clock_broken(struct rq *rq) 1103cebde6d6SPeter Zijlstra { 1104316c1608SJason Low return READ_ONCE(rq->clock); 1105cebde6d6SPeter Zijlstra } 1106cebde6d6SPeter Zijlstra 1107cb42c9a3SMatt Fleming /* 1108cb42c9a3SMatt Fleming * rq::clock_update_flags bits 1109cb42c9a3SMatt Fleming * 1110cb42c9a3SMatt Fleming * %RQCF_REQ_SKIP - will request skipping of clock update on the next 1111cb42c9a3SMatt Fleming * call to __schedule(). This is an optimisation to avoid 1112cb42c9a3SMatt Fleming * neighbouring rq clock updates. 1113cb42c9a3SMatt Fleming * 1114cb42c9a3SMatt Fleming * %RQCF_ACT_SKIP - is set from inside of __schedule() when skipping is 1115cb42c9a3SMatt Fleming * in effect and calls to update_rq_clock() are being ignored. 1116cb42c9a3SMatt Fleming * 1117cb42c9a3SMatt Fleming * %RQCF_UPDATED - is a debug flag that indicates whether a call has been 1118cb42c9a3SMatt Fleming * made to update_rq_clock() since the last time rq::lock was pinned. 1119cb42c9a3SMatt Fleming * 1120cb42c9a3SMatt Fleming * If inside of __schedule(), clock_update_flags will have been 1121cb42c9a3SMatt Fleming * shifted left (a left shift is a cheap operation for the fast path 1122cb42c9a3SMatt Fleming * to promote %RQCF_REQ_SKIP to %RQCF_ACT_SKIP), so you must use, 1123cb42c9a3SMatt Fleming * 1124cb42c9a3SMatt Fleming * if (rq-clock_update_flags >= RQCF_UPDATED) 1125cb42c9a3SMatt Fleming * 1126cb42c9a3SMatt Fleming * to check if %RQCF_UPADTED is set. It'll never be shifted more than 1127cb42c9a3SMatt Fleming * one position though, because the next rq_unpin_lock() will shift it 1128cb42c9a3SMatt Fleming * back. 1129cb42c9a3SMatt Fleming */ 1130cb42c9a3SMatt Fleming #define RQCF_REQ_SKIP 0x01 1131cb42c9a3SMatt Fleming #define RQCF_ACT_SKIP 0x02 1132cb42c9a3SMatt Fleming #define RQCF_UPDATED 0x04 1133cb42c9a3SMatt Fleming 1134cb42c9a3SMatt Fleming static inline void assert_clock_updated(struct rq *rq) 1135cb42c9a3SMatt Fleming { 1136cb42c9a3SMatt Fleming /* 1137cb42c9a3SMatt Fleming * The only reason for not seeing a clock update since the 1138cb42c9a3SMatt Fleming * last rq_pin_lock() is if we're currently skipping updates. 1139cb42c9a3SMatt Fleming */ 1140cb42c9a3SMatt Fleming SCHED_WARN_ON(rq->clock_update_flags < RQCF_ACT_SKIP); 1141cb42c9a3SMatt Fleming } 1142cb42c9a3SMatt Fleming 114378becc27SFrederic Weisbecker static inline u64 rq_clock(struct rq *rq) 114478becc27SFrederic Weisbecker { 1145cebde6d6SPeter Zijlstra lockdep_assert_held(&rq->lock); 1146cb42c9a3SMatt Fleming assert_clock_updated(rq); 1147cb42c9a3SMatt Fleming 114878becc27SFrederic Weisbecker return rq->clock; 114978becc27SFrederic Weisbecker } 115078becc27SFrederic Weisbecker 115178becc27SFrederic Weisbecker static inline u64 rq_clock_task(struct rq *rq) 115278becc27SFrederic Weisbecker { 1153cebde6d6SPeter Zijlstra lockdep_assert_held(&rq->lock); 1154cb42c9a3SMatt Fleming assert_clock_updated(rq); 1155cb42c9a3SMatt Fleming 115678becc27SFrederic Weisbecker return rq->clock_task; 115778becc27SFrederic Weisbecker } 115878becc27SFrederic Weisbecker 115905289b90SThara Gopinath /** 116005289b90SThara Gopinath * By default the decay is the default pelt decay period. 116105289b90SThara Gopinath * The decay shift can change the decay period in 116205289b90SThara Gopinath * multiples of 32. 116305289b90SThara Gopinath * Decay shift Decay period(ms) 116405289b90SThara Gopinath * 0 32 116505289b90SThara Gopinath * 1 64 116605289b90SThara Gopinath * 2 128 116705289b90SThara Gopinath * 3 256 116805289b90SThara Gopinath * 4 512 116905289b90SThara Gopinath */ 117005289b90SThara Gopinath extern int sched_thermal_decay_shift; 117105289b90SThara Gopinath 117205289b90SThara Gopinath static inline u64 rq_clock_thermal(struct rq *rq) 117305289b90SThara Gopinath { 117405289b90SThara Gopinath return rq_clock_task(rq) >> sched_thermal_decay_shift; 117505289b90SThara Gopinath } 117605289b90SThara Gopinath 1177adcc8da8SDavidlohr Bueso static inline void rq_clock_skip_update(struct rq *rq) 11789edfbfedSPeter Zijlstra { 11799edfbfedSPeter Zijlstra lockdep_assert_held(&rq->lock); 1180cb42c9a3SMatt Fleming rq->clock_update_flags |= RQCF_REQ_SKIP; 1181adcc8da8SDavidlohr Bueso } 1182adcc8da8SDavidlohr Bueso 1183adcc8da8SDavidlohr Bueso /* 1184595058b6SDavidlohr Bueso * See rt task throttling, which is the only time a skip 1185adcc8da8SDavidlohr Bueso * request is cancelled. 1186adcc8da8SDavidlohr Bueso */ 1187adcc8da8SDavidlohr Bueso static inline void rq_clock_cancel_skipupdate(struct rq *rq) 1188adcc8da8SDavidlohr Bueso { 1189adcc8da8SDavidlohr Bueso lockdep_assert_held(&rq->lock); 1190cb42c9a3SMatt Fleming rq->clock_update_flags &= ~RQCF_REQ_SKIP; 11919edfbfedSPeter Zijlstra } 11929edfbfedSPeter Zijlstra 1193d8ac8971SMatt Fleming struct rq_flags { 1194d8ac8971SMatt Fleming unsigned long flags; 1195d8ac8971SMatt Fleming struct pin_cookie cookie; 1196cb42c9a3SMatt Fleming #ifdef CONFIG_SCHED_DEBUG 1197cb42c9a3SMatt Fleming /* 1198cb42c9a3SMatt Fleming * A copy of (rq::clock_update_flags & RQCF_UPDATED) for the 1199cb42c9a3SMatt Fleming * current pin context is stashed here in case it needs to be 1200cb42c9a3SMatt Fleming * restored in rq_repin_lock(). 1201cb42c9a3SMatt Fleming */ 1202cb42c9a3SMatt Fleming unsigned int clock_update_flags; 1203cb42c9a3SMatt Fleming #endif 1204d8ac8971SMatt Fleming }; 1205d8ac8971SMatt Fleming 120658877d34SPeter Zijlstra /* 120758877d34SPeter Zijlstra * Lockdep annotation that avoids accidental unlocks; it's like a 120858877d34SPeter Zijlstra * sticky/continuous lockdep_assert_held(). 120958877d34SPeter Zijlstra * 121058877d34SPeter Zijlstra * This avoids code that has access to 'struct rq *rq' (basically everything in 121158877d34SPeter Zijlstra * the scheduler) from accidentally unlocking the rq if they do not also have a 121258877d34SPeter Zijlstra * copy of the (on-stack) 'struct rq_flags rf'. 121358877d34SPeter Zijlstra * 121458877d34SPeter Zijlstra * Also see Documentation/locking/lockdep-design.rst. 121558877d34SPeter Zijlstra */ 1216d8ac8971SMatt Fleming static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf) 1217d8ac8971SMatt Fleming { 1218d8ac8971SMatt Fleming rf->cookie = lockdep_pin_lock(&rq->lock); 1219cb42c9a3SMatt Fleming 1220cb42c9a3SMatt Fleming #ifdef CONFIG_SCHED_DEBUG 1221cb42c9a3SMatt Fleming rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP); 1222cb42c9a3SMatt Fleming rf->clock_update_flags = 0; 1223cb42c9a3SMatt Fleming #endif 1224d8ac8971SMatt Fleming } 1225d8ac8971SMatt Fleming 1226d8ac8971SMatt Fleming static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf) 1227d8ac8971SMatt Fleming { 1228cb42c9a3SMatt Fleming #ifdef CONFIG_SCHED_DEBUG 1229cb42c9a3SMatt Fleming if (rq->clock_update_flags > RQCF_ACT_SKIP) 1230cb42c9a3SMatt Fleming rf->clock_update_flags = RQCF_UPDATED; 1231cb42c9a3SMatt Fleming #endif 1232cb42c9a3SMatt Fleming 1233d8ac8971SMatt Fleming lockdep_unpin_lock(&rq->lock, rf->cookie); 1234d8ac8971SMatt Fleming } 1235d8ac8971SMatt Fleming 1236d8ac8971SMatt Fleming static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf) 1237d8ac8971SMatt Fleming { 1238d8ac8971SMatt Fleming lockdep_repin_lock(&rq->lock, rf->cookie); 1239cb42c9a3SMatt Fleming 1240cb42c9a3SMatt Fleming #ifdef CONFIG_SCHED_DEBUG 1241cb42c9a3SMatt Fleming /* 1242cb42c9a3SMatt Fleming * Restore the value we stashed in @rf for this pin context. 1243cb42c9a3SMatt Fleming */ 1244cb42c9a3SMatt Fleming rq->clock_update_flags |= rf->clock_update_flags; 1245cb42c9a3SMatt Fleming #endif 1246d8ac8971SMatt Fleming } 1247d8ac8971SMatt Fleming 12481f351d7fSJohannes Weiner struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) 12491f351d7fSJohannes Weiner __acquires(rq->lock); 12501f351d7fSJohannes Weiner 12511f351d7fSJohannes Weiner struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) 12521f351d7fSJohannes Weiner __acquires(p->pi_lock) 12531f351d7fSJohannes Weiner __acquires(rq->lock); 12541f351d7fSJohannes Weiner 12551f351d7fSJohannes Weiner static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf) 12561f351d7fSJohannes Weiner __releases(rq->lock) 12571f351d7fSJohannes Weiner { 12581f351d7fSJohannes Weiner rq_unpin_lock(rq, rf); 12591f351d7fSJohannes Weiner raw_spin_unlock(&rq->lock); 12601f351d7fSJohannes Weiner } 12611f351d7fSJohannes Weiner 12621f351d7fSJohannes Weiner static inline void 12631f351d7fSJohannes Weiner task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf) 12641f351d7fSJohannes Weiner __releases(rq->lock) 12651f351d7fSJohannes Weiner __releases(p->pi_lock) 12661f351d7fSJohannes Weiner { 12671f351d7fSJohannes Weiner rq_unpin_lock(rq, rf); 12681f351d7fSJohannes Weiner raw_spin_unlock(&rq->lock); 12691f351d7fSJohannes Weiner raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); 12701f351d7fSJohannes Weiner } 12711f351d7fSJohannes Weiner 12721f351d7fSJohannes Weiner static inline void 12731f351d7fSJohannes Weiner rq_lock_irqsave(struct rq *rq, struct rq_flags *rf) 12741f351d7fSJohannes Weiner __acquires(rq->lock) 12751f351d7fSJohannes Weiner { 12761f351d7fSJohannes Weiner raw_spin_lock_irqsave(&rq->lock, rf->flags); 12771f351d7fSJohannes Weiner rq_pin_lock(rq, rf); 12781f351d7fSJohannes Weiner } 12791f351d7fSJohannes Weiner 12801f351d7fSJohannes Weiner static inline void 12811f351d7fSJohannes Weiner rq_lock_irq(struct rq *rq, struct rq_flags *rf) 12821f351d7fSJohannes Weiner __acquires(rq->lock) 12831f351d7fSJohannes Weiner { 12841f351d7fSJohannes Weiner raw_spin_lock_irq(&rq->lock); 12851f351d7fSJohannes Weiner rq_pin_lock(rq, rf); 12861f351d7fSJohannes Weiner } 12871f351d7fSJohannes Weiner 12881f351d7fSJohannes Weiner static inline void 12891f351d7fSJohannes Weiner rq_lock(struct rq *rq, struct rq_flags *rf) 12901f351d7fSJohannes Weiner __acquires(rq->lock) 12911f351d7fSJohannes Weiner { 12921f351d7fSJohannes Weiner raw_spin_lock(&rq->lock); 12931f351d7fSJohannes Weiner rq_pin_lock(rq, rf); 12941f351d7fSJohannes Weiner } 12951f351d7fSJohannes Weiner 12961f351d7fSJohannes Weiner static inline void 12971f351d7fSJohannes Weiner rq_relock(struct rq *rq, struct rq_flags *rf) 12981f351d7fSJohannes Weiner __acquires(rq->lock) 12991f351d7fSJohannes Weiner { 13001f351d7fSJohannes Weiner raw_spin_lock(&rq->lock); 13011f351d7fSJohannes Weiner rq_repin_lock(rq, rf); 13021f351d7fSJohannes Weiner } 13031f351d7fSJohannes Weiner 13041f351d7fSJohannes Weiner static inline void 13051f351d7fSJohannes Weiner rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf) 13061f351d7fSJohannes Weiner __releases(rq->lock) 13071f351d7fSJohannes Weiner { 13081f351d7fSJohannes Weiner rq_unpin_lock(rq, rf); 13091f351d7fSJohannes Weiner raw_spin_unlock_irqrestore(&rq->lock, rf->flags); 13101f351d7fSJohannes Weiner } 13111f351d7fSJohannes Weiner 13121f351d7fSJohannes Weiner static inline void 13131f351d7fSJohannes Weiner rq_unlock_irq(struct rq *rq, struct rq_flags *rf) 13141f351d7fSJohannes Weiner __releases(rq->lock) 13151f351d7fSJohannes Weiner { 13161f351d7fSJohannes Weiner rq_unpin_lock(rq, rf); 13171f351d7fSJohannes Weiner raw_spin_unlock_irq(&rq->lock); 13181f351d7fSJohannes Weiner } 13191f351d7fSJohannes Weiner 13201f351d7fSJohannes Weiner static inline void 13211f351d7fSJohannes Weiner rq_unlock(struct rq *rq, struct rq_flags *rf) 13221f351d7fSJohannes Weiner __releases(rq->lock) 13231f351d7fSJohannes Weiner { 13241f351d7fSJohannes Weiner rq_unpin_lock(rq, rf); 13251f351d7fSJohannes Weiner raw_spin_unlock(&rq->lock); 13261f351d7fSJohannes Weiner } 13271f351d7fSJohannes Weiner 1328246b3b33SJohannes Weiner static inline struct rq * 1329246b3b33SJohannes Weiner this_rq_lock_irq(struct rq_flags *rf) 1330246b3b33SJohannes Weiner __acquires(rq->lock) 1331246b3b33SJohannes Weiner { 1332246b3b33SJohannes Weiner struct rq *rq; 1333246b3b33SJohannes Weiner 1334246b3b33SJohannes Weiner local_irq_disable(); 1335246b3b33SJohannes Weiner rq = this_rq(); 1336246b3b33SJohannes Weiner rq_lock(rq, rf); 1337246b3b33SJohannes Weiner return rq; 1338246b3b33SJohannes Weiner } 1339246b3b33SJohannes Weiner 13409942f79bSRik van Riel #ifdef CONFIG_NUMA 1341e3fe70b1SRik van Riel enum numa_topology_type { 1342e3fe70b1SRik van Riel NUMA_DIRECT, 1343e3fe70b1SRik van Riel NUMA_GLUELESS_MESH, 1344e3fe70b1SRik van Riel NUMA_BACKPLANE, 1345e3fe70b1SRik van Riel }; 1346e3fe70b1SRik van Riel extern enum numa_topology_type sched_numa_topology_type; 13479942f79bSRik van Riel extern int sched_max_numa_distance; 13489942f79bSRik van Riel extern bool find_numa_distance(int distance); 1349f2cb1360SIngo Molnar extern void sched_init_numa(void); 1350f2cb1360SIngo Molnar extern void sched_domains_numa_masks_set(unsigned int cpu); 1351f2cb1360SIngo Molnar extern void sched_domains_numa_masks_clear(unsigned int cpu); 1352e0e8d491SWanpeng Li extern int sched_numa_find_closest(const struct cpumask *cpus, int cpu); 1353f2cb1360SIngo Molnar #else 1354f2cb1360SIngo Molnar static inline void sched_init_numa(void) { } 1355f2cb1360SIngo Molnar static inline void sched_domains_numa_masks_set(unsigned int cpu) { } 1356f2cb1360SIngo Molnar static inline void sched_domains_numa_masks_clear(unsigned int cpu) { } 1357e0e8d491SWanpeng Li static inline int sched_numa_find_closest(const struct cpumask *cpus, int cpu) 1358e0e8d491SWanpeng Li { 1359e0e8d491SWanpeng Li return nr_cpu_ids; 1360e0e8d491SWanpeng Li } 1361f2cb1360SIngo Molnar #endif 1362f2cb1360SIngo Molnar 1363f809ca9aSMel Gorman #ifdef CONFIG_NUMA_BALANCING 136444dba3d5SIulia Manda /* The regions in numa_faults array from task_struct */ 136544dba3d5SIulia Manda enum numa_faults_stats { 136644dba3d5SIulia Manda NUMA_MEM = 0, 136744dba3d5SIulia Manda NUMA_CPU, 136844dba3d5SIulia Manda NUMA_MEMBUF, 136944dba3d5SIulia Manda NUMA_CPUBUF 137044dba3d5SIulia Manda }; 13710ec8aa00SPeter Zijlstra extern void sched_setnuma(struct task_struct *p, int node); 1372e6628d5bSMel Gorman extern int migrate_task_to(struct task_struct *p, int cpu); 13730ad4e3dfSSrikar Dronamraju extern int migrate_swap(struct task_struct *p, struct task_struct *t, 13740ad4e3dfSSrikar Dronamraju int cpu, int scpu); 137513784475SMel Gorman extern void init_numa_balancing(unsigned long clone_flags, struct task_struct *p); 137613784475SMel Gorman #else 137713784475SMel Gorman static inline void 137813784475SMel Gorman init_numa_balancing(unsigned long clone_flags, struct task_struct *p) 137913784475SMel Gorman { 138013784475SMel Gorman } 1381f809ca9aSMel Gorman #endif /* CONFIG_NUMA_BALANCING */ 1382f809ca9aSMel Gorman 1383518cd623SPeter Zijlstra #ifdef CONFIG_SMP 1384518cd623SPeter Zijlstra 1385e3fca9e7SPeter Zijlstra static inline void 1386e3fca9e7SPeter Zijlstra queue_balance_callback(struct rq *rq, 1387e3fca9e7SPeter Zijlstra struct callback_head *head, 1388e3fca9e7SPeter Zijlstra void (*func)(struct rq *rq)) 1389e3fca9e7SPeter Zijlstra { 1390e3fca9e7SPeter Zijlstra lockdep_assert_held(&rq->lock); 1391e3fca9e7SPeter Zijlstra 1392e3fca9e7SPeter Zijlstra if (unlikely(head->next)) 1393e3fca9e7SPeter Zijlstra return; 1394e3fca9e7SPeter Zijlstra 1395e3fca9e7SPeter Zijlstra head->func = (void (*)(struct callback_head *))func; 1396e3fca9e7SPeter Zijlstra head->next = rq->balance_callback; 1397e3fca9e7SPeter Zijlstra rq->balance_callback = head; 1398e3fca9e7SPeter Zijlstra } 1399e3fca9e7SPeter Zijlstra 1400391e43daSPeter Zijlstra #define rcu_dereference_check_sched_domain(p) \ 1401391e43daSPeter Zijlstra rcu_dereference_check((p), \ 1402391e43daSPeter Zijlstra lockdep_is_held(&sched_domains_mutex)) 1403391e43daSPeter Zijlstra 1404391e43daSPeter Zijlstra /* 1405391e43daSPeter Zijlstra * The domain tree (rq->sd) is protected by RCU's quiescent state transition. 1406337e9b07SPaul E. McKenney * See destroy_sched_domains: call_rcu for details. 1407391e43daSPeter Zijlstra * 1408391e43daSPeter Zijlstra * The domain tree of any CPU may only be accessed from within 1409391e43daSPeter Zijlstra * preempt-disabled sections. 1410391e43daSPeter Zijlstra */ 1411391e43daSPeter Zijlstra #define for_each_domain(cpu, __sd) \ 1412518cd623SPeter Zijlstra for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \ 1413518cd623SPeter Zijlstra __sd; __sd = __sd->parent) 1414391e43daSPeter Zijlstra 1415518cd623SPeter Zijlstra /** 1416518cd623SPeter Zijlstra * highest_flag_domain - Return highest sched_domain containing flag. 141797fb7a0aSIngo Molnar * @cpu: The CPU whose highest level of sched domain is to 1418518cd623SPeter Zijlstra * be returned. 1419518cd623SPeter Zijlstra * @flag: The flag to check for the highest sched_domain 142097fb7a0aSIngo Molnar * for the given CPU. 1421518cd623SPeter Zijlstra * 142297fb7a0aSIngo Molnar * Returns the highest sched_domain of a CPU which contains the given flag. 1423518cd623SPeter Zijlstra */ 1424518cd623SPeter Zijlstra static inline struct sched_domain *highest_flag_domain(int cpu, int flag) 1425518cd623SPeter Zijlstra { 1426518cd623SPeter Zijlstra struct sched_domain *sd, *hsd = NULL; 1427518cd623SPeter Zijlstra 1428518cd623SPeter Zijlstra for_each_domain(cpu, sd) { 1429518cd623SPeter Zijlstra if (!(sd->flags & flag)) 1430518cd623SPeter Zijlstra break; 1431518cd623SPeter Zijlstra hsd = sd; 1432518cd623SPeter Zijlstra } 1433518cd623SPeter Zijlstra 1434518cd623SPeter Zijlstra return hsd; 1435518cd623SPeter Zijlstra } 1436518cd623SPeter Zijlstra 1437fb13c7eeSMel Gorman static inline struct sched_domain *lowest_flag_domain(int cpu, int flag) 1438fb13c7eeSMel Gorman { 1439fb13c7eeSMel Gorman struct sched_domain *sd; 1440fb13c7eeSMel Gorman 1441fb13c7eeSMel Gorman for_each_domain(cpu, sd) { 1442fb13c7eeSMel Gorman if (sd->flags & flag) 1443fb13c7eeSMel Gorman break; 1444fb13c7eeSMel Gorman } 1445fb13c7eeSMel Gorman 1446fb13c7eeSMel Gorman return sd; 1447fb13c7eeSMel Gorman } 1448fb13c7eeSMel Gorman 1449994aeb7aSJoel Fernandes (Google) DECLARE_PER_CPU(struct sched_domain __rcu *, sd_llc); 14507d9ffa89SPeter Zijlstra DECLARE_PER_CPU(int, sd_llc_size); 1451518cd623SPeter Zijlstra DECLARE_PER_CPU(int, sd_llc_id); 1452994aeb7aSJoel Fernandes (Google) DECLARE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared); 1453994aeb7aSJoel Fernandes (Google) DECLARE_PER_CPU(struct sched_domain __rcu *, sd_numa); 1454994aeb7aSJoel Fernandes (Google) DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing); 1455994aeb7aSJoel Fernandes (Google) DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity); 1456df054e84SMorten Rasmussen extern struct static_key_false sched_asym_cpucapacity; 1457518cd623SPeter Zijlstra 145863b2ca30SNicolas Pitre struct sched_group_capacity { 14595e6521eaSLi Zefan atomic_t ref; 14605e6521eaSLi Zefan /* 1461172895e6SYuyang Du * CPU capacity of this group, SCHED_CAPACITY_SCALE being max capacity 146263b2ca30SNicolas Pitre * for a single CPU. 14635e6521eaSLi Zefan */ 1464bf475ce0SMorten Rasmussen unsigned long capacity; 1465bf475ce0SMorten Rasmussen unsigned long min_capacity; /* Min per-CPU capacity in group */ 1466e3d6d0cbSMorten Rasmussen unsigned long max_capacity; /* Max per-CPU capacity in group */ 14675e6521eaSLi Zefan unsigned long next_update; 146863b2ca30SNicolas Pitre int imbalance; /* XXX unrelated to capacity but shared group state */ 14695e6521eaSLi Zefan 1470005f874dSPeter Zijlstra #ifdef CONFIG_SCHED_DEBUG 1471005f874dSPeter Zijlstra int id; 1472005f874dSPeter Zijlstra #endif 1473005f874dSPeter Zijlstra 147497fb7a0aSIngo Molnar unsigned long cpumask[0]; /* Balance mask */ 14755e6521eaSLi Zefan }; 14765e6521eaSLi Zefan 14775e6521eaSLi Zefan struct sched_group { 14785e6521eaSLi Zefan struct sched_group *next; /* Must be a circular list */ 14795e6521eaSLi Zefan atomic_t ref; 14805e6521eaSLi Zefan 14815e6521eaSLi Zefan unsigned int group_weight; 148263b2ca30SNicolas Pitre struct sched_group_capacity *sgc; 148397fb7a0aSIngo Molnar int asym_prefer_cpu; /* CPU of highest priority in group */ 14845e6521eaSLi Zefan 14855e6521eaSLi Zefan /* 14865e6521eaSLi Zefan * The CPUs this group covers. 14875e6521eaSLi Zefan * 14885e6521eaSLi Zefan * NOTE: this field is variable length. (Allocated dynamically 14895e6521eaSLi Zefan * by attaching extra space to the end of the structure, 14905e6521eaSLi Zefan * depending on how many CPUs the kernel has booted up with) 14915e6521eaSLi Zefan */ 149204f5c362SGustavo A. R. Silva unsigned long cpumask[]; 14935e6521eaSLi Zefan }; 14945e6521eaSLi Zefan 1495ae4df9d6SPeter Zijlstra static inline struct cpumask *sched_group_span(struct sched_group *sg) 14965e6521eaSLi Zefan { 14975e6521eaSLi Zefan return to_cpumask(sg->cpumask); 14985e6521eaSLi Zefan } 14995e6521eaSLi Zefan 15005e6521eaSLi Zefan /* 1501e5c14b1fSPeter Zijlstra * See build_balance_mask(). 15025e6521eaSLi Zefan */ 1503e5c14b1fSPeter Zijlstra static inline struct cpumask *group_balance_mask(struct sched_group *sg) 15045e6521eaSLi Zefan { 150563b2ca30SNicolas Pitre return to_cpumask(sg->sgc->cpumask); 15065e6521eaSLi Zefan } 15075e6521eaSLi Zefan 15085e6521eaSLi Zefan /** 150997fb7a0aSIngo Molnar * group_first_cpu - Returns the first CPU in the cpumask of a sched_group. 151097fb7a0aSIngo Molnar * @group: The group whose first CPU is to be returned. 15115e6521eaSLi Zefan */ 15125e6521eaSLi Zefan static inline unsigned int group_first_cpu(struct sched_group *group) 15135e6521eaSLi Zefan { 1514ae4df9d6SPeter Zijlstra return cpumask_first(sched_group_span(group)); 15155e6521eaSLi Zefan } 15165e6521eaSLi Zefan 1517c1174876SPeter Zijlstra extern int group_balance_cpu(struct sched_group *sg); 1518c1174876SPeter Zijlstra 15193866e845SSteven Rostedt (Red Hat) #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL) 15203866e845SSteven Rostedt (Red Hat) void register_sched_domain_sysctl(void); 1521bbdacdfeSPeter Zijlstra void dirty_sched_domain_sysctl(int cpu); 15223866e845SSteven Rostedt (Red Hat) void unregister_sched_domain_sysctl(void); 15233866e845SSteven Rostedt (Red Hat) #else 15243866e845SSteven Rostedt (Red Hat) static inline void register_sched_domain_sysctl(void) 15253866e845SSteven Rostedt (Red Hat) { 15263866e845SSteven Rostedt (Red Hat) } 1527bbdacdfeSPeter Zijlstra static inline void dirty_sched_domain_sysctl(int cpu) 1528bbdacdfeSPeter Zijlstra { 1529bbdacdfeSPeter Zijlstra } 15303866e845SSteven Rostedt (Red Hat) static inline void unregister_sched_domain_sysctl(void) 15313866e845SSteven Rostedt (Red Hat) { 15323866e845SSteven Rostedt (Red Hat) } 15333866e845SSteven Rostedt (Red Hat) #endif 15343866e845SSteven Rostedt (Red Hat) 1535b2a02fc4SPeter Zijlstra extern void flush_smp_call_function_from_idle(void); 1536e3baac47SPeter Zijlstra 1537b2a02fc4SPeter Zijlstra #else /* !CONFIG_SMP: */ 1538b2a02fc4SPeter Zijlstra static inline void flush_smp_call_function_from_idle(void) { } 1539b2a02fc4SPeter Zijlstra #endif 1540391e43daSPeter Zijlstra 1541391e43daSPeter Zijlstra #include "stats.h" 15421051408fSIngo Molnar #include "autogroup.h" 1543391e43daSPeter Zijlstra 1544391e43daSPeter Zijlstra #ifdef CONFIG_CGROUP_SCHED 1545391e43daSPeter Zijlstra 1546391e43daSPeter Zijlstra /* 1547391e43daSPeter Zijlstra * Return the group to which this tasks belongs. 1548391e43daSPeter Zijlstra * 15498af01f56STejun Heo * We cannot use task_css() and friends because the cgroup subsystem 15508af01f56STejun Heo * changes that value before the cgroup_subsys::attach() method is called, 15518af01f56STejun Heo * therefore we cannot pin it and might observe the wrong value. 15528323f26cSPeter Zijlstra * 15538323f26cSPeter Zijlstra * The same is true for autogroup's p->signal->autogroup->tg, the autogroup 15548323f26cSPeter Zijlstra * core changes this before calling sched_move_task(). 15558323f26cSPeter Zijlstra * 15568323f26cSPeter Zijlstra * Instead we use a 'copy' which is updated from sched_move_task() while 15578323f26cSPeter Zijlstra * holding both task_struct::pi_lock and rq::lock. 1558391e43daSPeter Zijlstra */ 1559391e43daSPeter Zijlstra static inline struct task_group *task_group(struct task_struct *p) 1560391e43daSPeter Zijlstra { 15618323f26cSPeter Zijlstra return p->sched_task_group; 1562391e43daSPeter Zijlstra } 1563391e43daSPeter Zijlstra 1564391e43daSPeter Zijlstra /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ 1565391e43daSPeter Zijlstra static inline void set_task_rq(struct task_struct *p, unsigned int cpu) 1566391e43daSPeter Zijlstra { 1567391e43daSPeter Zijlstra #if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED) 1568391e43daSPeter Zijlstra struct task_group *tg = task_group(p); 1569391e43daSPeter Zijlstra #endif 1570391e43daSPeter Zijlstra 1571391e43daSPeter Zijlstra #ifdef CONFIG_FAIR_GROUP_SCHED 1572ad936d86SByungchul Park set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]); 1573391e43daSPeter Zijlstra p->se.cfs_rq = tg->cfs_rq[cpu]; 1574391e43daSPeter Zijlstra p->se.parent = tg->se[cpu]; 1575391e43daSPeter Zijlstra #endif 1576391e43daSPeter Zijlstra 1577391e43daSPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED 1578391e43daSPeter Zijlstra p->rt.rt_rq = tg->rt_rq[cpu]; 1579391e43daSPeter Zijlstra p->rt.parent = tg->rt_se[cpu]; 1580391e43daSPeter Zijlstra #endif 1581391e43daSPeter Zijlstra } 1582391e43daSPeter Zijlstra 1583391e43daSPeter Zijlstra #else /* CONFIG_CGROUP_SCHED */ 1584391e43daSPeter Zijlstra 1585391e43daSPeter Zijlstra static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { } 1586391e43daSPeter Zijlstra static inline struct task_group *task_group(struct task_struct *p) 1587391e43daSPeter Zijlstra { 1588391e43daSPeter Zijlstra return NULL; 1589391e43daSPeter Zijlstra } 1590391e43daSPeter Zijlstra 1591391e43daSPeter Zijlstra #endif /* CONFIG_CGROUP_SCHED */ 1592391e43daSPeter Zijlstra 1593391e43daSPeter Zijlstra static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) 1594391e43daSPeter Zijlstra { 1595391e43daSPeter Zijlstra set_task_rq(p, cpu); 1596391e43daSPeter Zijlstra #ifdef CONFIG_SMP 1597391e43daSPeter Zijlstra /* 1598391e43daSPeter Zijlstra * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be 1599dfcb245eSIngo Molnar * successfully executed on another CPU. We must ensure that updates of 1600391e43daSPeter Zijlstra * per-task data have been completed by this moment. 1601391e43daSPeter Zijlstra */ 1602391e43daSPeter Zijlstra smp_wmb(); 1603c65eacbeSAndy Lutomirski #ifdef CONFIG_THREAD_INFO_IN_TASK 1604c546951dSAndrea Parri WRITE_ONCE(p->cpu, cpu); 1605c65eacbeSAndy Lutomirski #else 1606c546951dSAndrea Parri WRITE_ONCE(task_thread_info(p)->cpu, cpu); 1607c65eacbeSAndy Lutomirski #endif 1608ac66f547SPeter Zijlstra p->wake_cpu = cpu; 1609391e43daSPeter Zijlstra #endif 1610391e43daSPeter Zijlstra } 1611391e43daSPeter Zijlstra 1612391e43daSPeter Zijlstra /* 1613391e43daSPeter Zijlstra * Tunables that become constants when CONFIG_SCHED_DEBUG is off: 1614391e43daSPeter Zijlstra */ 1615391e43daSPeter Zijlstra #ifdef CONFIG_SCHED_DEBUG 1616c5905afbSIngo Molnar # include <linux/static_key.h> 1617391e43daSPeter Zijlstra # define const_debug __read_mostly 1618391e43daSPeter Zijlstra #else 1619391e43daSPeter Zijlstra # define const_debug const 1620391e43daSPeter Zijlstra #endif 1621391e43daSPeter Zijlstra 1622391e43daSPeter Zijlstra #define SCHED_FEAT(name, enabled) \ 1623391e43daSPeter Zijlstra __SCHED_FEAT_##name , 1624391e43daSPeter Zijlstra 1625391e43daSPeter Zijlstra enum { 1626391e43daSPeter Zijlstra #include "features.h" 1627f8b6d1ccSPeter Zijlstra __SCHED_FEAT_NR, 1628391e43daSPeter Zijlstra }; 1629391e43daSPeter Zijlstra 1630391e43daSPeter Zijlstra #undef SCHED_FEAT 1631391e43daSPeter Zijlstra 1632e9666d10SMasahiro Yamada #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_JUMP_LABEL) 1633765cc3a4SPatrick Bellasi 1634765cc3a4SPatrick Bellasi /* 1635765cc3a4SPatrick Bellasi * To support run-time toggling of sched features, all the translation units 1636765cc3a4SPatrick Bellasi * (but core.c) reference the sysctl_sched_features defined in core.c. 1637765cc3a4SPatrick Bellasi */ 1638765cc3a4SPatrick Bellasi extern const_debug unsigned int sysctl_sched_features; 1639765cc3a4SPatrick Bellasi 1640f8b6d1ccSPeter Zijlstra #define SCHED_FEAT(name, enabled) \ 1641c5905afbSIngo Molnar static __always_inline bool static_branch_##name(struct static_key *key) \ 1642f8b6d1ccSPeter Zijlstra { \ 16436e76ea8aSJason Baron return static_key_##enabled(key); \ 1644f8b6d1ccSPeter Zijlstra } 1645f8b6d1ccSPeter Zijlstra 1646f8b6d1ccSPeter Zijlstra #include "features.h" 1647f8b6d1ccSPeter Zijlstra #undef SCHED_FEAT 1648f8b6d1ccSPeter Zijlstra 1649c5905afbSIngo Molnar extern struct static_key sched_feat_keys[__SCHED_FEAT_NR]; 1650f8b6d1ccSPeter Zijlstra #define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x])) 1651765cc3a4SPatrick Bellasi 1652e9666d10SMasahiro Yamada #else /* !(SCHED_DEBUG && CONFIG_JUMP_LABEL) */ 1653765cc3a4SPatrick Bellasi 1654765cc3a4SPatrick Bellasi /* 1655765cc3a4SPatrick Bellasi * Each translation unit has its own copy of sysctl_sched_features to allow 1656765cc3a4SPatrick Bellasi * constants propagation at compile time and compiler optimization based on 1657765cc3a4SPatrick Bellasi * features default. 1658765cc3a4SPatrick Bellasi */ 1659765cc3a4SPatrick Bellasi #define SCHED_FEAT(name, enabled) \ 1660765cc3a4SPatrick Bellasi (1UL << __SCHED_FEAT_##name) * enabled | 1661765cc3a4SPatrick Bellasi static const_debug __maybe_unused unsigned int sysctl_sched_features = 1662765cc3a4SPatrick Bellasi #include "features.h" 1663765cc3a4SPatrick Bellasi 0; 1664765cc3a4SPatrick Bellasi #undef SCHED_FEAT 1665765cc3a4SPatrick Bellasi 16667e6f4c5dSPeter Zijlstra #define sched_feat(x) !!(sysctl_sched_features & (1UL << __SCHED_FEAT_##x)) 1667765cc3a4SPatrick Bellasi 1668e9666d10SMasahiro Yamada #endif /* SCHED_DEBUG && CONFIG_JUMP_LABEL */ 1669391e43daSPeter Zijlstra 16702a595721SSrikar Dronamraju extern struct static_key_false sched_numa_balancing; 1671cb251765SMel Gorman extern struct static_key_false sched_schedstats; 1672cbee9f88SPeter Zijlstra 1673391e43daSPeter Zijlstra static inline u64 global_rt_period(void) 1674391e43daSPeter Zijlstra { 1675391e43daSPeter Zijlstra return (u64)sysctl_sched_rt_period * NSEC_PER_USEC; 1676391e43daSPeter Zijlstra } 1677391e43daSPeter Zijlstra 1678391e43daSPeter Zijlstra static inline u64 global_rt_runtime(void) 1679391e43daSPeter Zijlstra { 1680391e43daSPeter Zijlstra if (sysctl_sched_rt_runtime < 0) 1681391e43daSPeter Zijlstra return RUNTIME_INF; 1682391e43daSPeter Zijlstra 1683391e43daSPeter Zijlstra return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC; 1684391e43daSPeter Zijlstra } 1685391e43daSPeter Zijlstra 1686391e43daSPeter Zijlstra static inline int task_current(struct rq *rq, struct task_struct *p) 1687391e43daSPeter Zijlstra { 1688391e43daSPeter Zijlstra return rq->curr == p; 1689391e43daSPeter Zijlstra } 1690391e43daSPeter Zijlstra 1691391e43daSPeter Zijlstra static inline int task_running(struct rq *rq, struct task_struct *p) 1692391e43daSPeter Zijlstra { 1693391e43daSPeter Zijlstra #ifdef CONFIG_SMP 1694391e43daSPeter Zijlstra return p->on_cpu; 1695391e43daSPeter Zijlstra #else 1696391e43daSPeter Zijlstra return task_current(rq, p); 1697391e43daSPeter Zijlstra #endif 1698391e43daSPeter Zijlstra } 1699391e43daSPeter Zijlstra 1700da0c1e65SKirill Tkhai static inline int task_on_rq_queued(struct task_struct *p) 1701da0c1e65SKirill Tkhai { 1702da0c1e65SKirill Tkhai return p->on_rq == TASK_ON_RQ_QUEUED; 1703da0c1e65SKirill Tkhai } 1704391e43daSPeter Zijlstra 1705cca26e80SKirill Tkhai static inline int task_on_rq_migrating(struct task_struct *p) 1706cca26e80SKirill Tkhai { 1707c546951dSAndrea Parri return READ_ONCE(p->on_rq) == TASK_ON_RQ_MIGRATING; 1708cca26e80SKirill Tkhai } 1709cca26e80SKirill Tkhai 1710b13095f0SLi Zefan /* 1711b13095f0SLi Zefan * wake flags 1712b13095f0SLi Zefan */ 171397fb7a0aSIngo Molnar #define WF_SYNC 0x01 /* Waker goes to sleep after wakeup */ 171497fb7a0aSIngo Molnar #define WF_FORK 0x02 /* Child wakeup after fork */ 17152ebb1771SMel Gorman #define WF_MIGRATED 0x04 /* Internal use, task got migrated */ 1716739f70b4SPeter Zijlstra #define WF_ON_CPU 0x08 /* Wakee is on_cpu */ 1717b13095f0SLi Zefan 1718391e43daSPeter Zijlstra /* 1719391e43daSPeter Zijlstra * To aid in avoiding the subversion of "niceness" due to uneven distribution 1720391e43daSPeter Zijlstra * of tasks with abnormal "nice" values across CPUs the contribution that 1721391e43daSPeter Zijlstra * each task makes to its run queue's load is weighted according to its 1722391e43daSPeter Zijlstra * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a 1723391e43daSPeter Zijlstra * scaled version of the new time slice allocation that they receive on time 1724391e43daSPeter Zijlstra * slice expiry etc. 1725391e43daSPeter Zijlstra */ 1726391e43daSPeter Zijlstra 1727391e43daSPeter Zijlstra #define WEIGHT_IDLEPRIO 3 1728391e43daSPeter Zijlstra #define WMULT_IDLEPRIO 1431655765 1729391e43daSPeter Zijlstra 1730ed82b8a1SAndi Kleen extern const int sched_prio_to_weight[40]; 1731ed82b8a1SAndi Kleen extern const u32 sched_prio_to_wmult[40]; 1732391e43daSPeter Zijlstra 1733ff77e468SPeter Zijlstra /* 1734ff77e468SPeter Zijlstra * {de,en}queue flags: 1735ff77e468SPeter Zijlstra * 1736ff77e468SPeter Zijlstra * DEQUEUE_SLEEP - task is no longer runnable 1737ff77e468SPeter Zijlstra * ENQUEUE_WAKEUP - task just became runnable 1738ff77e468SPeter Zijlstra * 1739ff77e468SPeter Zijlstra * SAVE/RESTORE - an otherwise spurious dequeue/enqueue, done to ensure tasks 1740ff77e468SPeter Zijlstra * are in a known state which allows modification. Such pairs 1741ff77e468SPeter Zijlstra * should preserve as much state as possible. 1742ff77e468SPeter Zijlstra * 1743ff77e468SPeter Zijlstra * MOVE - paired with SAVE/RESTORE, explicitly does not preserve the location 1744ff77e468SPeter Zijlstra * in the runqueue. 1745ff77e468SPeter Zijlstra * 1746ff77e468SPeter Zijlstra * ENQUEUE_HEAD - place at front of runqueue (tail if not specified) 1747ff77e468SPeter Zijlstra * ENQUEUE_REPLENISH - CBS (replenish runtime and postpone deadline) 174859efa0baSPeter Zijlstra * ENQUEUE_MIGRATED - the task was migrated during wakeup 1749ff77e468SPeter Zijlstra * 1750ff77e468SPeter Zijlstra */ 1751ff77e468SPeter Zijlstra 1752ff77e468SPeter Zijlstra #define DEQUEUE_SLEEP 0x01 175397fb7a0aSIngo Molnar #define DEQUEUE_SAVE 0x02 /* Matches ENQUEUE_RESTORE */ 175497fb7a0aSIngo Molnar #define DEQUEUE_MOVE 0x04 /* Matches ENQUEUE_MOVE */ 175597fb7a0aSIngo Molnar #define DEQUEUE_NOCLOCK 0x08 /* Matches ENQUEUE_NOCLOCK */ 1756ff77e468SPeter Zijlstra 17571de64443SPeter Zijlstra #define ENQUEUE_WAKEUP 0x01 1758ff77e468SPeter Zijlstra #define ENQUEUE_RESTORE 0x02 1759ff77e468SPeter Zijlstra #define ENQUEUE_MOVE 0x04 17600a67d1eeSPeter Zijlstra #define ENQUEUE_NOCLOCK 0x08 1761ff77e468SPeter Zijlstra 17620a67d1eeSPeter Zijlstra #define ENQUEUE_HEAD 0x10 17630a67d1eeSPeter Zijlstra #define ENQUEUE_REPLENISH 0x20 1764c82ba9faSLi Zefan #ifdef CONFIG_SMP 17650a67d1eeSPeter Zijlstra #define ENQUEUE_MIGRATED 0x40 1766c82ba9faSLi Zefan #else 176759efa0baSPeter Zijlstra #define ENQUEUE_MIGRATED 0x00 1768c82ba9faSLi Zefan #endif 1769c82ba9faSLi Zefan 177037e117c0SPeter Zijlstra #define RETRY_TASK ((void *)-1UL) 177137e117c0SPeter Zijlstra 1772c82ba9faSLi Zefan struct sched_class { 1773c82ba9faSLi Zefan 177469842cbaSPatrick Bellasi #ifdef CONFIG_UCLAMP_TASK 177569842cbaSPatrick Bellasi int uclamp_enabled; 177669842cbaSPatrick Bellasi #endif 177769842cbaSPatrick Bellasi 1778c82ba9faSLi Zefan void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags); 1779c82ba9faSLi Zefan void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags); 1780c82ba9faSLi Zefan void (*yield_task) (struct rq *rq); 17810900acf2SDietmar Eggemann bool (*yield_to_task)(struct rq *rq, struct task_struct *p); 1782c82ba9faSLi Zefan 1783c82ba9faSLi Zefan void (*check_preempt_curr)(struct rq *rq, struct task_struct *p, int flags); 1784c82ba9faSLi Zefan 178598c2f700SPeter Zijlstra struct task_struct *(*pick_next_task)(struct rq *rq); 178698c2f700SPeter Zijlstra 17876e2df058SPeter Zijlstra void (*put_prev_task)(struct rq *rq, struct task_struct *p); 1788a0e813f2SPeter Zijlstra void (*set_next_task)(struct rq *rq, struct task_struct *p, bool first); 1789c82ba9faSLi Zefan 1790c82ba9faSLi Zefan #ifdef CONFIG_SMP 17916e2df058SPeter Zijlstra int (*balance)(struct rq *rq, struct task_struct *prev, struct rq_flags *rf); 1792ac66f547SPeter Zijlstra int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags); 17931327237aSSrikar Dronamraju void (*migrate_task_rq)(struct task_struct *p, int new_cpu); 1794c82ba9faSLi Zefan 1795c82ba9faSLi Zefan void (*task_woken)(struct rq *this_rq, struct task_struct *task); 1796c82ba9faSLi Zefan 1797c82ba9faSLi Zefan void (*set_cpus_allowed)(struct task_struct *p, 1798c82ba9faSLi Zefan const struct cpumask *newmask); 1799c82ba9faSLi Zefan 1800c82ba9faSLi Zefan void (*rq_online)(struct rq *rq); 1801c82ba9faSLi Zefan void (*rq_offline)(struct rq *rq); 1802c82ba9faSLi Zefan #endif 1803c82ba9faSLi Zefan 1804c82ba9faSLi Zefan void (*task_tick)(struct rq *rq, struct task_struct *p, int queued); 1805c82ba9faSLi Zefan void (*task_fork)(struct task_struct *p); 1806e6c390f2SDario Faggioli void (*task_dead)(struct task_struct *p); 1807c82ba9faSLi Zefan 180867dfa1b7SKirill Tkhai /* 180967dfa1b7SKirill Tkhai * The switched_from() call is allowed to drop rq->lock, therefore we 181067dfa1b7SKirill Tkhai * cannot assume the switched_from/switched_to pair is serliazed by 181167dfa1b7SKirill Tkhai * rq->lock. They are however serialized by p->pi_lock. 181267dfa1b7SKirill Tkhai */ 1813c82ba9faSLi Zefan void (*switched_from)(struct rq *this_rq, struct task_struct *task); 1814c82ba9faSLi Zefan void (*switched_to) (struct rq *this_rq, struct task_struct *task); 1815c82ba9faSLi Zefan void (*prio_changed) (struct rq *this_rq, struct task_struct *task, 1816c82ba9faSLi Zefan int oldprio); 1817c82ba9faSLi Zefan 1818c82ba9faSLi Zefan unsigned int (*get_rr_interval)(struct rq *rq, 1819c82ba9faSLi Zefan struct task_struct *task); 1820c82ba9faSLi Zefan 18216e998916SStanislaw Gruszka void (*update_curr)(struct rq *rq); 18226e998916SStanislaw Gruszka 1823ea86cb4bSVincent Guittot #define TASK_SET_GROUP 0 1824ea86cb4bSVincent Guittot #define TASK_MOVE_GROUP 1 1825ea86cb4bSVincent Guittot 1826c82ba9faSLi Zefan #ifdef CONFIG_FAIR_GROUP_SCHED 1827ea86cb4bSVincent Guittot void (*task_change_group)(struct task_struct *p, int type); 1828c82ba9faSLi Zefan #endif 182985c2ce91SPeter Zijlstra } __aligned(STRUCT_ALIGNMENT); /* STRUCT_ALIGN(), vmlinux.lds.h */ 1830391e43daSPeter Zijlstra 18313f1d2a31SPeter Zijlstra static inline void put_prev_task(struct rq *rq, struct task_struct *prev) 18323f1d2a31SPeter Zijlstra { 183310e7071bSPeter Zijlstra WARN_ON_ONCE(rq->curr != prev); 18346e2df058SPeter Zijlstra prev->sched_class->put_prev_task(rq, prev); 18353f1d2a31SPeter Zijlstra } 18363f1d2a31SPeter Zijlstra 183703b7fad1SPeter Zijlstra static inline void set_next_task(struct rq *rq, struct task_struct *next) 1838b2bf6c31SPeter Zijlstra { 183903b7fad1SPeter Zijlstra WARN_ON_ONCE(rq->curr != next); 1840a0e813f2SPeter Zijlstra next->sched_class->set_next_task(rq, next, false); 1841b2bf6c31SPeter Zijlstra } 1842b2bf6c31SPeter Zijlstra 1843c3a340f7SSteven Rostedt (VMware) /* Defined in include/asm-generic/vmlinux.lds.h */ 1844c3a340f7SSteven Rostedt (VMware) extern struct sched_class __begin_sched_classes[]; 1845c3a340f7SSteven Rostedt (VMware) extern struct sched_class __end_sched_classes[]; 1846c3a340f7SSteven Rostedt (VMware) 1847c3a340f7SSteven Rostedt (VMware) #define sched_class_highest (__end_sched_classes - 1) 1848c3a340f7SSteven Rostedt (VMware) #define sched_class_lowest (__begin_sched_classes - 1) 18496e2df058SPeter Zijlstra 18506e2df058SPeter Zijlstra #define for_class_range(class, _from, _to) \ 1851c3a340f7SSteven Rostedt (VMware) for (class = (_from); class != (_to); class--) 18526e2df058SPeter Zijlstra 1853391e43daSPeter Zijlstra #define for_each_class(class) \ 1854c3a340f7SSteven Rostedt (VMware) for_class_range(class, sched_class_highest, sched_class_lowest) 1855391e43daSPeter Zijlstra 1856391e43daSPeter Zijlstra extern const struct sched_class stop_sched_class; 1857aab03e05SDario Faggioli extern const struct sched_class dl_sched_class; 1858391e43daSPeter Zijlstra extern const struct sched_class rt_sched_class; 1859391e43daSPeter Zijlstra extern const struct sched_class fair_sched_class; 1860391e43daSPeter Zijlstra extern const struct sched_class idle_sched_class; 1861391e43daSPeter Zijlstra 18626e2df058SPeter Zijlstra static inline bool sched_stop_runnable(struct rq *rq) 18636e2df058SPeter Zijlstra { 18646e2df058SPeter Zijlstra return rq->stop && task_on_rq_queued(rq->stop); 18656e2df058SPeter Zijlstra } 18666e2df058SPeter Zijlstra 18676e2df058SPeter Zijlstra static inline bool sched_dl_runnable(struct rq *rq) 18686e2df058SPeter Zijlstra { 18696e2df058SPeter Zijlstra return rq->dl.dl_nr_running > 0; 18706e2df058SPeter Zijlstra } 18716e2df058SPeter Zijlstra 18726e2df058SPeter Zijlstra static inline bool sched_rt_runnable(struct rq *rq) 18736e2df058SPeter Zijlstra { 18746e2df058SPeter Zijlstra return rq->rt.rt_queued > 0; 18756e2df058SPeter Zijlstra } 18766e2df058SPeter Zijlstra 18776e2df058SPeter Zijlstra static inline bool sched_fair_runnable(struct rq *rq) 18786e2df058SPeter Zijlstra { 18796e2df058SPeter Zijlstra return rq->cfs.nr_running > 0; 18806e2df058SPeter Zijlstra } 1881391e43daSPeter Zijlstra 18825d7d6056SPeter Zijlstra extern struct task_struct *pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf); 188398c2f700SPeter Zijlstra extern struct task_struct *pick_next_task_idle(struct rq *rq); 18845d7d6056SPeter Zijlstra 1885391e43daSPeter Zijlstra #ifdef CONFIG_SMP 1886391e43daSPeter Zijlstra 188763b2ca30SNicolas Pitre extern void update_group_capacity(struct sched_domain *sd, int cpu); 1888b719203bSLi Zefan 18897caff66fSDaniel Lezcano extern void trigger_load_balance(struct rq *rq); 1890391e43daSPeter Zijlstra 1891c5b28038SPeter Zijlstra extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask); 1892c5b28038SPeter Zijlstra 1893391e43daSPeter Zijlstra #endif 1894391e43daSPeter Zijlstra 1895442bf3aaSDaniel Lezcano #ifdef CONFIG_CPU_IDLE 1896442bf3aaSDaniel Lezcano static inline void idle_set_state(struct rq *rq, 1897442bf3aaSDaniel Lezcano struct cpuidle_state *idle_state) 1898442bf3aaSDaniel Lezcano { 1899442bf3aaSDaniel Lezcano rq->idle_state = idle_state; 1900442bf3aaSDaniel Lezcano } 1901442bf3aaSDaniel Lezcano 1902442bf3aaSDaniel Lezcano static inline struct cpuidle_state *idle_get_state(struct rq *rq) 1903442bf3aaSDaniel Lezcano { 19049148a3a1SPeter Zijlstra SCHED_WARN_ON(!rcu_read_lock_held()); 190597fb7a0aSIngo Molnar 1906442bf3aaSDaniel Lezcano return rq->idle_state; 1907442bf3aaSDaniel Lezcano } 1908442bf3aaSDaniel Lezcano #else 1909442bf3aaSDaniel Lezcano static inline void idle_set_state(struct rq *rq, 1910442bf3aaSDaniel Lezcano struct cpuidle_state *idle_state) 1911442bf3aaSDaniel Lezcano { 1912442bf3aaSDaniel Lezcano } 1913442bf3aaSDaniel Lezcano 1914442bf3aaSDaniel Lezcano static inline struct cpuidle_state *idle_get_state(struct rq *rq) 1915442bf3aaSDaniel Lezcano { 1916442bf3aaSDaniel Lezcano return NULL; 1917442bf3aaSDaniel Lezcano } 1918442bf3aaSDaniel Lezcano #endif 1919442bf3aaSDaniel Lezcano 19208663effbSSteven Rostedt (VMware) extern void schedule_idle(void); 19218663effbSSteven Rostedt (VMware) 1922391e43daSPeter Zijlstra extern void sysrq_sched_debug_show(void); 1923391e43daSPeter Zijlstra extern void sched_init_granularity(void); 1924391e43daSPeter Zijlstra extern void update_max_interval(void); 19251baca4ceSJuri Lelli 19261baca4ceSJuri Lelli extern void init_sched_dl_class(void); 1927391e43daSPeter Zijlstra extern void init_sched_rt_class(void); 1928391e43daSPeter Zijlstra extern void init_sched_fair_class(void); 1929391e43daSPeter Zijlstra 19309059393eSVincent Guittot extern void reweight_task(struct task_struct *p, int prio); 19319059393eSVincent Guittot 19328875125eSKirill Tkhai extern void resched_curr(struct rq *rq); 1933391e43daSPeter Zijlstra extern void resched_cpu(int cpu); 1934391e43daSPeter Zijlstra 1935391e43daSPeter Zijlstra extern struct rt_bandwidth def_rt_bandwidth; 1936391e43daSPeter Zijlstra extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime); 1937391e43daSPeter Zijlstra 1938332ac17eSDario Faggioli extern struct dl_bandwidth def_dl_bandwidth; 1939332ac17eSDario Faggioli extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime); 1940aab03e05SDario Faggioli extern void init_dl_task_timer(struct sched_dl_entity *dl_se); 1941209a0cbdSLuca Abeni extern void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se); 1942aab03e05SDario Faggioli 1943c52f14d3SLuca Abeni #define BW_SHIFT 20 1944c52f14d3SLuca Abeni #define BW_UNIT (1 << BW_SHIFT) 19454da3abceSLuca Abeni #define RATIO_SHIFT 8 1946d505b8afSHuaixin Chang #define MAX_BW_BITS (64 - BW_SHIFT) 1947d505b8afSHuaixin Chang #define MAX_BW ((1ULL << MAX_BW_BITS) - 1) 1948332ac17eSDario Faggioli unsigned long to_ratio(u64 period, u64 runtime); 1949332ac17eSDario Faggioli 1950540247fbSYuyang Du extern void init_entity_runnable_average(struct sched_entity *se); 1951d0fe0b9cSDietmar Eggemann extern void post_init_entity_util_avg(struct task_struct *p); 1952a75cdaa9SAlex Shi 195376d92ac3SFrederic Weisbecker #ifdef CONFIG_NO_HZ_FULL 195476d92ac3SFrederic Weisbecker extern bool sched_can_stop_tick(struct rq *rq); 1955d84b3131SFrederic Weisbecker extern int __init sched_tick_offload_init(void); 195676d92ac3SFrederic Weisbecker 195776d92ac3SFrederic Weisbecker /* 195876d92ac3SFrederic Weisbecker * Tick may be needed by tasks in the runqueue depending on their policy and 195976d92ac3SFrederic Weisbecker * requirements. If tick is needed, lets send the target an IPI to kick it out of 196076d92ac3SFrederic Weisbecker * nohz mode if necessary. 196176d92ac3SFrederic Weisbecker */ 196276d92ac3SFrederic Weisbecker static inline void sched_update_tick_dependency(struct rq *rq) 196376d92ac3SFrederic Weisbecker { 196421a6ee14SMiaohe Lin int cpu = cpu_of(rq); 196576d92ac3SFrederic Weisbecker 196676d92ac3SFrederic Weisbecker if (!tick_nohz_full_cpu(cpu)) 196776d92ac3SFrederic Weisbecker return; 196876d92ac3SFrederic Weisbecker 196976d92ac3SFrederic Weisbecker if (sched_can_stop_tick(rq)) 197076d92ac3SFrederic Weisbecker tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED); 197176d92ac3SFrederic Weisbecker else 197276d92ac3SFrederic Weisbecker tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED); 197376d92ac3SFrederic Weisbecker } 197476d92ac3SFrederic Weisbecker #else 1975d84b3131SFrederic Weisbecker static inline int sched_tick_offload_init(void) { return 0; } 197676d92ac3SFrederic Weisbecker static inline void sched_update_tick_dependency(struct rq *rq) { } 197776d92ac3SFrederic Weisbecker #endif 197876d92ac3SFrederic Weisbecker 197972465447SKirill Tkhai static inline void add_nr_running(struct rq *rq, unsigned count) 1980391e43daSPeter Zijlstra { 198172465447SKirill Tkhai unsigned prev_nr = rq->nr_running; 198272465447SKirill Tkhai 198372465447SKirill Tkhai rq->nr_running = prev_nr + count; 19849d246053SPhil Auld if (trace_sched_update_nr_running_tp_enabled()) { 19859d246053SPhil Auld call_trace_sched_update_nr_running(rq, count); 19869d246053SPhil Auld } 19879f3660c2SFrederic Weisbecker 19884486edd1STim Chen #ifdef CONFIG_SMP 19893e184501SViresh Kumar if (prev_nr < 2 && rq->nr_running >= 2) { 1990e90c8fe1SValentin Schneider if (!READ_ONCE(rq->rd->overload)) 1991e90c8fe1SValentin Schneider WRITE_ONCE(rq->rd->overload, 1); 199276d92ac3SFrederic Weisbecker } 19933e184501SViresh Kumar #endif 19944486edd1STim Chen 199576d92ac3SFrederic Weisbecker sched_update_tick_dependency(rq); 19964486edd1STim Chen } 1997391e43daSPeter Zijlstra 199872465447SKirill Tkhai static inline void sub_nr_running(struct rq *rq, unsigned count) 1999391e43daSPeter Zijlstra { 200072465447SKirill Tkhai rq->nr_running -= count; 20019d246053SPhil Auld if (trace_sched_update_nr_running_tp_enabled()) { 20029d246053SPhil Auld call_trace_sched_update_nr_running(rq, count); 20039d246053SPhil Auld } 20049d246053SPhil Auld 200576d92ac3SFrederic Weisbecker /* Check if we still need preemption */ 200676d92ac3SFrederic Weisbecker sched_update_tick_dependency(rq); 2007391e43daSPeter Zijlstra } 2008391e43daSPeter Zijlstra 2009391e43daSPeter Zijlstra extern void activate_task(struct rq *rq, struct task_struct *p, int flags); 2010391e43daSPeter Zijlstra extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags); 2011391e43daSPeter Zijlstra 2012391e43daSPeter Zijlstra extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags); 2013391e43daSPeter Zijlstra 2014391e43daSPeter Zijlstra extern const_debug unsigned int sysctl_sched_nr_migrate; 2015391e43daSPeter Zijlstra extern const_debug unsigned int sysctl_sched_migration_cost; 2016391e43daSPeter Zijlstra 2017391e43daSPeter Zijlstra #ifdef CONFIG_SCHED_HRTICK 2018391e43daSPeter Zijlstra 2019391e43daSPeter Zijlstra /* 2020391e43daSPeter Zijlstra * Use hrtick when: 2021391e43daSPeter Zijlstra * - enabled by features 2022391e43daSPeter Zijlstra * - hrtimer is actually high res 2023391e43daSPeter Zijlstra */ 2024391e43daSPeter Zijlstra static inline int hrtick_enabled(struct rq *rq) 2025391e43daSPeter Zijlstra { 2026391e43daSPeter Zijlstra if (!sched_feat(HRTICK)) 2027391e43daSPeter Zijlstra return 0; 2028391e43daSPeter Zijlstra if (!cpu_active(cpu_of(rq))) 2029391e43daSPeter Zijlstra return 0; 2030391e43daSPeter Zijlstra return hrtimer_is_hres_active(&rq->hrtick_timer); 2031391e43daSPeter Zijlstra } 2032391e43daSPeter Zijlstra 2033391e43daSPeter Zijlstra void hrtick_start(struct rq *rq, u64 delay); 2034391e43daSPeter Zijlstra 2035b39e66eaSMike Galbraith #else 2036b39e66eaSMike Galbraith 2037b39e66eaSMike Galbraith static inline int hrtick_enabled(struct rq *rq) 2038b39e66eaSMike Galbraith { 2039b39e66eaSMike Galbraith return 0; 2040b39e66eaSMike Galbraith } 2041b39e66eaSMike Galbraith 2042391e43daSPeter Zijlstra #endif /* CONFIG_SCHED_HRTICK */ 2043391e43daSPeter Zijlstra 20441567c3e3SGiovanni Gherdovich #ifndef arch_scale_freq_tick 20451567c3e3SGiovanni Gherdovich static __always_inline 20461567c3e3SGiovanni Gherdovich void arch_scale_freq_tick(void) 20471567c3e3SGiovanni Gherdovich { 20481567c3e3SGiovanni Gherdovich } 20491567c3e3SGiovanni Gherdovich #endif 20501567c3e3SGiovanni Gherdovich 2051dfbca41fSPeter Zijlstra #ifndef arch_scale_freq_capacity 2052dfbca41fSPeter Zijlstra static __always_inline 20537673c8a4SJuri Lelli unsigned long arch_scale_freq_capacity(int cpu) 2054dfbca41fSPeter Zijlstra { 2055dfbca41fSPeter Zijlstra return SCHED_CAPACITY_SCALE; 2056dfbca41fSPeter Zijlstra } 2057dfbca41fSPeter Zijlstra #endif 2058b5b4860dSVincent Guittot 20597e1a9208SJuri Lelli #ifdef CONFIG_SMP 2060c1a280b6SThomas Gleixner #ifdef CONFIG_PREEMPTION 2061391e43daSPeter Zijlstra 2062391e43daSPeter Zijlstra static inline void double_rq_lock(struct rq *rq1, struct rq *rq2); 2063391e43daSPeter Zijlstra 2064391e43daSPeter Zijlstra /* 2065391e43daSPeter Zijlstra * fair double_lock_balance: Safely acquires both rq->locks in a fair 2066391e43daSPeter Zijlstra * way at the expense of forcing extra atomic operations in all 2067391e43daSPeter Zijlstra * invocations. This assures that the double_lock is acquired using the 2068391e43daSPeter Zijlstra * same underlying policy as the spinlock_t on this architecture, which 2069391e43daSPeter Zijlstra * reduces latency compared to the unfair variant below. However, it 2070391e43daSPeter Zijlstra * also adds more overhead and therefore may reduce throughput. 2071391e43daSPeter Zijlstra */ 2072391e43daSPeter Zijlstra static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) 2073391e43daSPeter Zijlstra __releases(this_rq->lock) 2074391e43daSPeter Zijlstra __acquires(busiest->lock) 2075391e43daSPeter Zijlstra __acquires(this_rq->lock) 2076391e43daSPeter Zijlstra { 2077391e43daSPeter Zijlstra raw_spin_unlock(&this_rq->lock); 2078391e43daSPeter Zijlstra double_rq_lock(this_rq, busiest); 2079391e43daSPeter Zijlstra 2080391e43daSPeter Zijlstra return 1; 2081391e43daSPeter Zijlstra } 2082391e43daSPeter Zijlstra 2083391e43daSPeter Zijlstra #else 2084391e43daSPeter Zijlstra /* 2085391e43daSPeter Zijlstra * Unfair double_lock_balance: Optimizes throughput at the expense of 2086391e43daSPeter Zijlstra * latency by eliminating extra atomic operations when the locks are 208797fb7a0aSIngo Molnar * already in proper order on entry. This favors lower CPU-ids and will 208897fb7a0aSIngo Molnar * grant the double lock to lower CPUs over higher ids under contention, 2089391e43daSPeter Zijlstra * regardless of entry order into the function. 2090391e43daSPeter Zijlstra */ 2091391e43daSPeter Zijlstra static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) 2092391e43daSPeter Zijlstra __releases(this_rq->lock) 2093391e43daSPeter Zijlstra __acquires(busiest->lock) 2094391e43daSPeter Zijlstra __acquires(this_rq->lock) 2095391e43daSPeter Zijlstra { 2096391e43daSPeter Zijlstra int ret = 0; 2097391e43daSPeter Zijlstra 2098391e43daSPeter Zijlstra if (unlikely(!raw_spin_trylock(&busiest->lock))) { 2099391e43daSPeter Zijlstra if (busiest < this_rq) { 2100391e43daSPeter Zijlstra raw_spin_unlock(&this_rq->lock); 2101391e43daSPeter Zijlstra raw_spin_lock(&busiest->lock); 2102391e43daSPeter Zijlstra raw_spin_lock_nested(&this_rq->lock, 2103391e43daSPeter Zijlstra SINGLE_DEPTH_NESTING); 2104391e43daSPeter Zijlstra ret = 1; 2105391e43daSPeter Zijlstra } else 2106391e43daSPeter Zijlstra raw_spin_lock_nested(&busiest->lock, 2107391e43daSPeter Zijlstra SINGLE_DEPTH_NESTING); 2108391e43daSPeter Zijlstra } 2109391e43daSPeter Zijlstra return ret; 2110391e43daSPeter Zijlstra } 2111391e43daSPeter Zijlstra 2112c1a280b6SThomas Gleixner #endif /* CONFIG_PREEMPTION */ 2113391e43daSPeter Zijlstra 2114391e43daSPeter Zijlstra /* 2115391e43daSPeter Zijlstra * double_lock_balance - lock the busiest runqueue, this_rq is locked already. 2116391e43daSPeter Zijlstra */ 2117391e43daSPeter Zijlstra static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest) 2118391e43daSPeter Zijlstra { 2119391e43daSPeter Zijlstra if (unlikely(!irqs_disabled())) { 212097fb7a0aSIngo Molnar /* printk() doesn't work well under rq->lock */ 2121391e43daSPeter Zijlstra raw_spin_unlock(&this_rq->lock); 2122391e43daSPeter Zijlstra BUG_ON(1); 2123391e43daSPeter Zijlstra } 2124391e43daSPeter Zijlstra 2125391e43daSPeter Zijlstra return _double_lock_balance(this_rq, busiest); 2126391e43daSPeter Zijlstra } 2127391e43daSPeter Zijlstra 2128391e43daSPeter Zijlstra static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) 2129391e43daSPeter Zijlstra __releases(busiest->lock) 2130391e43daSPeter Zijlstra { 2131391e43daSPeter Zijlstra raw_spin_unlock(&busiest->lock); 2132391e43daSPeter Zijlstra lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); 2133391e43daSPeter Zijlstra } 2134391e43daSPeter Zijlstra 213574602315SPeter Zijlstra static inline void double_lock(spinlock_t *l1, spinlock_t *l2) 213674602315SPeter Zijlstra { 213774602315SPeter Zijlstra if (l1 > l2) 213874602315SPeter Zijlstra swap(l1, l2); 213974602315SPeter Zijlstra 214074602315SPeter Zijlstra spin_lock(l1); 214174602315SPeter Zijlstra spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 214274602315SPeter Zijlstra } 214374602315SPeter Zijlstra 214460e69eedSMike Galbraith static inline void double_lock_irq(spinlock_t *l1, spinlock_t *l2) 214560e69eedSMike Galbraith { 214660e69eedSMike Galbraith if (l1 > l2) 214760e69eedSMike Galbraith swap(l1, l2); 214860e69eedSMike Galbraith 214960e69eedSMike Galbraith spin_lock_irq(l1); 215060e69eedSMike Galbraith spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 215160e69eedSMike Galbraith } 215260e69eedSMike Galbraith 215374602315SPeter Zijlstra static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2) 215474602315SPeter Zijlstra { 215574602315SPeter Zijlstra if (l1 > l2) 215674602315SPeter Zijlstra swap(l1, l2); 215774602315SPeter Zijlstra 215874602315SPeter Zijlstra raw_spin_lock(l1); 215974602315SPeter Zijlstra raw_spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 216074602315SPeter Zijlstra } 216174602315SPeter Zijlstra 2162391e43daSPeter Zijlstra /* 2163391e43daSPeter Zijlstra * double_rq_lock - safely lock two runqueues 2164391e43daSPeter Zijlstra * 2165391e43daSPeter Zijlstra * Note this does not disable interrupts like task_rq_lock, 2166391e43daSPeter Zijlstra * you need to do so manually before calling. 2167391e43daSPeter Zijlstra */ 2168391e43daSPeter Zijlstra static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) 2169391e43daSPeter Zijlstra __acquires(rq1->lock) 2170391e43daSPeter Zijlstra __acquires(rq2->lock) 2171391e43daSPeter Zijlstra { 2172391e43daSPeter Zijlstra BUG_ON(!irqs_disabled()); 2173391e43daSPeter Zijlstra if (rq1 == rq2) { 2174391e43daSPeter Zijlstra raw_spin_lock(&rq1->lock); 2175391e43daSPeter Zijlstra __acquire(rq2->lock); /* Fake it out ;) */ 2176391e43daSPeter Zijlstra } else { 2177391e43daSPeter Zijlstra if (rq1 < rq2) { 2178391e43daSPeter Zijlstra raw_spin_lock(&rq1->lock); 2179391e43daSPeter Zijlstra raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING); 2180391e43daSPeter Zijlstra } else { 2181391e43daSPeter Zijlstra raw_spin_lock(&rq2->lock); 2182391e43daSPeter Zijlstra raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING); 2183391e43daSPeter Zijlstra } 2184391e43daSPeter Zijlstra } 2185391e43daSPeter Zijlstra } 2186391e43daSPeter Zijlstra 2187391e43daSPeter Zijlstra /* 2188391e43daSPeter Zijlstra * double_rq_unlock - safely unlock two runqueues 2189391e43daSPeter Zijlstra * 2190391e43daSPeter Zijlstra * Note this does not restore interrupts like task_rq_unlock, 2191391e43daSPeter Zijlstra * you need to do so manually after calling. 2192391e43daSPeter Zijlstra */ 2193391e43daSPeter Zijlstra static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) 2194391e43daSPeter Zijlstra __releases(rq1->lock) 2195391e43daSPeter Zijlstra __releases(rq2->lock) 2196391e43daSPeter Zijlstra { 2197391e43daSPeter Zijlstra raw_spin_unlock(&rq1->lock); 2198391e43daSPeter Zijlstra if (rq1 != rq2) 2199391e43daSPeter Zijlstra raw_spin_unlock(&rq2->lock); 2200391e43daSPeter Zijlstra else 2201391e43daSPeter Zijlstra __release(rq2->lock); 2202391e43daSPeter Zijlstra } 2203391e43daSPeter Zijlstra 2204f2cb1360SIngo Molnar extern void set_rq_online (struct rq *rq); 2205f2cb1360SIngo Molnar extern void set_rq_offline(struct rq *rq); 2206f2cb1360SIngo Molnar extern bool sched_smp_initialized; 2207f2cb1360SIngo Molnar 2208391e43daSPeter Zijlstra #else /* CONFIG_SMP */ 2209391e43daSPeter Zijlstra 2210391e43daSPeter Zijlstra /* 2211391e43daSPeter Zijlstra * double_rq_lock - safely lock two runqueues 2212391e43daSPeter Zijlstra * 2213391e43daSPeter Zijlstra * Note this does not disable interrupts like task_rq_lock, 2214391e43daSPeter Zijlstra * you need to do so manually before calling. 2215391e43daSPeter Zijlstra */ 2216391e43daSPeter Zijlstra static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) 2217391e43daSPeter Zijlstra __acquires(rq1->lock) 2218391e43daSPeter Zijlstra __acquires(rq2->lock) 2219391e43daSPeter Zijlstra { 2220391e43daSPeter Zijlstra BUG_ON(!irqs_disabled()); 2221391e43daSPeter Zijlstra BUG_ON(rq1 != rq2); 2222391e43daSPeter Zijlstra raw_spin_lock(&rq1->lock); 2223391e43daSPeter Zijlstra __acquire(rq2->lock); /* Fake it out ;) */ 2224391e43daSPeter Zijlstra } 2225391e43daSPeter Zijlstra 2226391e43daSPeter Zijlstra /* 2227391e43daSPeter Zijlstra * double_rq_unlock - safely unlock two runqueues 2228391e43daSPeter Zijlstra * 2229391e43daSPeter Zijlstra * Note this does not restore interrupts like task_rq_unlock, 2230391e43daSPeter Zijlstra * you need to do so manually after calling. 2231391e43daSPeter Zijlstra */ 2232391e43daSPeter Zijlstra static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) 2233391e43daSPeter Zijlstra __releases(rq1->lock) 2234391e43daSPeter Zijlstra __releases(rq2->lock) 2235391e43daSPeter Zijlstra { 2236391e43daSPeter Zijlstra BUG_ON(rq1 != rq2); 2237391e43daSPeter Zijlstra raw_spin_unlock(&rq1->lock); 2238391e43daSPeter Zijlstra __release(rq2->lock); 2239391e43daSPeter Zijlstra } 2240391e43daSPeter Zijlstra 2241391e43daSPeter Zijlstra #endif 2242391e43daSPeter Zijlstra 2243391e43daSPeter Zijlstra extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq); 2244391e43daSPeter Zijlstra extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq); 22456b55c965SSrikar Dronamraju 22466b55c965SSrikar Dronamraju #ifdef CONFIG_SCHED_DEBUG 22479469eb01SPeter Zijlstra extern bool sched_debug_enabled; 22489469eb01SPeter Zijlstra 2249391e43daSPeter Zijlstra extern void print_cfs_stats(struct seq_file *m, int cpu); 2250391e43daSPeter Zijlstra extern void print_rt_stats(struct seq_file *m, int cpu); 2251acb32132SWanpeng Li extern void print_dl_stats(struct seq_file *m, int cpu); 2252f6a34630SMathieu Malaterre extern void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq); 2253f6a34630SMathieu Malaterre extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq); 2254f6a34630SMathieu Malaterre extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq); 2255397f2378SSrikar Dronamraju #ifdef CONFIG_NUMA_BALANCING 2256397f2378SSrikar Dronamraju extern void 2257397f2378SSrikar Dronamraju show_numa_stats(struct task_struct *p, struct seq_file *m); 2258397f2378SSrikar Dronamraju extern void 2259397f2378SSrikar Dronamraju print_numa_stats(struct seq_file *m, int node, unsigned long tsf, 2260397f2378SSrikar Dronamraju unsigned long tpf, unsigned long gsf, unsigned long gpf); 2261397f2378SSrikar Dronamraju #endif /* CONFIG_NUMA_BALANCING */ 2262397f2378SSrikar Dronamraju #endif /* CONFIG_SCHED_DEBUG */ 2263391e43daSPeter Zijlstra 2264391e43daSPeter Zijlstra extern void init_cfs_rq(struct cfs_rq *cfs_rq); 226507c54f7aSAbel Vesa extern void init_rt_rq(struct rt_rq *rt_rq); 226607c54f7aSAbel Vesa extern void init_dl_rq(struct dl_rq *dl_rq); 2267391e43daSPeter Zijlstra 22681ee14e6cSBen Segall extern void cfs_bandwidth_usage_inc(void); 22691ee14e6cSBen Segall extern void cfs_bandwidth_usage_dec(void); 22701c792db7SSuresh Siddha 22713451d024SFrederic Weisbecker #ifdef CONFIG_NO_HZ_COMMON 227200357f5eSPeter Zijlstra #define NOHZ_BALANCE_KICK_BIT 0 227300357f5eSPeter Zijlstra #define NOHZ_STATS_KICK_BIT 1 2274a22e47a4SPeter Zijlstra 2275a22e47a4SPeter Zijlstra #define NOHZ_BALANCE_KICK BIT(NOHZ_BALANCE_KICK_BIT) 2276b7031a02SPeter Zijlstra #define NOHZ_STATS_KICK BIT(NOHZ_STATS_KICK_BIT) 2277b7031a02SPeter Zijlstra 2278b7031a02SPeter Zijlstra #define NOHZ_KICK_MASK (NOHZ_BALANCE_KICK | NOHZ_STATS_KICK) 22791c792db7SSuresh Siddha 22801c792db7SSuresh Siddha #define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags) 228120a5c8ccSThomas Gleixner 228200357f5eSPeter Zijlstra extern void nohz_balance_exit_idle(struct rq *rq); 228320a5c8ccSThomas Gleixner #else 228400357f5eSPeter Zijlstra static inline void nohz_balance_exit_idle(struct rq *rq) { } 22851c792db7SSuresh Siddha #endif 228673fbec60SFrederic Weisbecker 2287daec5798SLuca Abeni 2288daec5798SLuca Abeni #ifdef CONFIG_SMP 2289daec5798SLuca Abeni static inline 2290daec5798SLuca Abeni void __dl_update(struct dl_bw *dl_b, s64 bw) 2291daec5798SLuca Abeni { 2292daec5798SLuca Abeni struct root_domain *rd = container_of(dl_b, struct root_domain, dl_bw); 2293daec5798SLuca Abeni int i; 2294daec5798SLuca Abeni 2295daec5798SLuca Abeni RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(), 2296daec5798SLuca Abeni "sched RCU must be held"); 2297daec5798SLuca Abeni for_each_cpu_and(i, rd->span, cpu_active_mask) { 2298daec5798SLuca Abeni struct rq *rq = cpu_rq(i); 2299daec5798SLuca Abeni 2300daec5798SLuca Abeni rq->dl.extra_bw += bw; 2301daec5798SLuca Abeni } 2302daec5798SLuca Abeni } 2303daec5798SLuca Abeni #else 2304daec5798SLuca Abeni static inline 2305daec5798SLuca Abeni void __dl_update(struct dl_bw *dl_b, s64 bw) 2306daec5798SLuca Abeni { 2307daec5798SLuca Abeni struct dl_rq *dl = container_of(dl_b, struct dl_rq, dl_bw); 2308daec5798SLuca Abeni 2309daec5798SLuca Abeni dl->extra_bw += bw; 2310daec5798SLuca Abeni } 2311daec5798SLuca Abeni #endif 2312daec5798SLuca Abeni 2313daec5798SLuca Abeni 231473fbec60SFrederic Weisbecker #ifdef CONFIG_IRQ_TIME_ACCOUNTING 231519d23dbfSFrederic Weisbecker struct irqtime { 231625e2d8c1SFrederic Weisbecker u64 total; 2317a499a5a1SFrederic Weisbecker u64 tick_delta; 231819d23dbfSFrederic Weisbecker u64 irq_start_time; 231919d23dbfSFrederic Weisbecker struct u64_stats_sync sync; 232019d23dbfSFrederic Weisbecker }; 232173fbec60SFrederic Weisbecker 232219d23dbfSFrederic Weisbecker DECLARE_PER_CPU(struct irqtime, cpu_irqtime); 232373fbec60SFrederic Weisbecker 232425e2d8c1SFrederic Weisbecker /* 232525e2d8c1SFrederic Weisbecker * Returns the irqtime minus the softirq time computed by ksoftirqd. 232625e2d8c1SFrederic Weisbecker * Otherwise ksoftirqd's sum_exec_runtime is substracted its own runtime 232725e2d8c1SFrederic Weisbecker * and never move forward. 232825e2d8c1SFrederic Weisbecker */ 232973fbec60SFrederic Weisbecker static inline u64 irq_time_read(int cpu) 233073fbec60SFrederic Weisbecker { 233119d23dbfSFrederic Weisbecker struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu); 233219d23dbfSFrederic Weisbecker unsigned int seq; 233319d23dbfSFrederic Weisbecker u64 total; 233473fbec60SFrederic Weisbecker 233573fbec60SFrederic Weisbecker do { 233619d23dbfSFrederic Weisbecker seq = __u64_stats_fetch_begin(&irqtime->sync); 233725e2d8c1SFrederic Weisbecker total = irqtime->total; 233819d23dbfSFrederic Weisbecker } while (__u64_stats_fetch_retry(&irqtime->sync, seq)); 233973fbec60SFrederic Weisbecker 234019d23dbfSFrederic Weisbecker return total; 234173fbec60SFrederic Weisbecker } 234273fbec60SFrederic Weisbecker #endif /* CONFIG_IRQ_TIME_ACCOUNTING */ 2343adaf9fcdSRafael J. Wysocki 2344adaf9fcdSRafael J. Wysocki #ifdef CONFIG_CPU_FREQ 2345b10abd0aSJoel Fernandes (Google) DECLARE_PER_CPU(struct update_util_data __rcu *, cpufreq_update_util_data); 2346adaf9fcdSRafael J. Wysocki 2347adaf9fcdSRafael J. Wysocki /** 2348adaf9fcdSRafael J. Wysocki * cpufreq_update_util - Take a note about CPU utilization changes. 234912bde33dSRafael J. Wysocki * @rq: Runqueue to carry out the update for. 235058919e83SRafael J. Wysocki * @flags: Update reason flags. 2351adaf9fcdSRafael J. Wysocki * 235258919e83SRafael J. Wysocki * This function is called by the scheduler on the CPU whose utilization is 235358919e83SRafael J. Wysocki * being updated. 2354adaf9fcdSRafael J. Wysocki * 2355adaf9fcdSRafael J. Wysocki * It can only be called from RCU-sched read-side critical sections. 2356adaf9fcdSRafael J. Wysocki * 2357adaf9fcdSRafael J. Wysocki * The way cpufreq is currently arranged requires it to evaluate the CPU 2358adaf9fcdSRafael J. Wysocki * performance state (frequency/voltage) on a regular basis to prevent it from 2359adaf9fcdSRafael J. Wysocki * being stuck in a completely inadequate performance level for too long. 2360e0367b12SJuri Lelli * That is not guaranteed to happen if the updates are only triggered from CFS 2361e0367b12SJuri Lelli * and DL, though, because they may not be coming in if only RT tasks are 2362e0367b12SJuri Lelli * active all the time (or there are RT tasks only). 2363adaf9fcdSRafael J. Wysocki * 2364e0367b12SJuri Lelli * As a workaround for that issue, this function is called periodically by the 2365e0367b12SJuri Lelli * RT sched class to trigger extra cpufreq updates to prevent it from stalling, 2366adaf9fcdSRafael J. Wysocki * but that really is a band-aid. Going forward it should be replaced with 2367e0367b12SJuri Lelli * solutions targeted more specifically at RT tasks. 2368adaf9fcdSRafael J. Wysocki */ 236912bde33dSRafael J. Wysocki static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) 2370adaf9fcdSRafael J. Wysocki { 237158919e83SRafael J. Wysocki struct update_util_data *data; 237258919e83SRafael J. Wysocki 2373674e7541SViresh Kumar data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data, 2374674e7541SViresh Kumar cpu_of(rq))); 237558919e83SRafael J. Wysocki if (data) 237612bde33dSRafael J. Wysocki data->func(data, rq_clock(rq), flags); 237712bde33dSRafael J. Wysocki } 2378adaf9fcdSRafael J. Wysocki #else 237912bde33dSRafael J. Wysocki static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {} 2380adaf9fcdSRafael J. Wysocki #endif /* CONFIG_CPU_FREQ */ 2381be53f58fSLinus Torvalds 2382982d9cdcSPatrick Bellasi #ifdef CONFIG_UCLAMP_TASK 2383686516b5SValentin Schneider unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id); 23849d20ad7dSPatrick Bellasi 238546609ce2SQais Yousef /** 238646609ce2SQais Yousef * uclamp_rq_util_with - clamp @util with @rq and @p effective uclamp values. 238746609ce2SQais Yousef * @rq: The rq to clamp against. Must not be NULL. 238846609ce2SQais Yousef * @util: The util value to clamp. 238946609ce2SQais Yousef * @p: The task to clamp against. Can be NULL if you want to clamp 239046609ce2SQais Yousef * against @rq only. 239146609ce2SQais Yousef * 239246609ce2SQais Yousef * Clamps the passed @util to the max(@rq, @p) effective uclamp values. 239346609ce2SQais Yousef * 239446609ce2SQais Yousef * If sched_uclamp_used static key is disabled, then just return the util 239546609ce2SQais Yousef * without any clamping since uclamp aggregation at the rq level in the fast 239646609ce2SQais Yousef * path is disabled, rendering this operation a NOP. 239746609ce2SQais Yousef * 239846609ce2SQais Yousef * Use uclamp_eff_value() if you don't care about uclamp values at rq level. It 239946609ce2SQais Yousef * will return the correct effective uclamp value of the task even if the 240046609ce2SQais Yousef * static key is disabled. 240146609ce2SQais Yousef */ 24029d20ad7dSPatrick Bellasi static __always_inline 2403d2b58a28SValentin Schneider unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util, 24049d20ad7dSPatrick Bellasi struct task_struct *p) 2405982d9cdcSPatrick Bellasi { 240646609ce2SQais Yousef unsigned long min_util; 240746609ce2SQais Yousef unsigned long max_util; 240846609ce2SQais Yousef 240946609ce2SQais Yousef if (!static_branch_likely(&sched_uclamp_used)) 241046609ce2SQais Yousef return util; 241146609ce2SQais Yousef 241246609ce2SQais Yousef min_util = READ_ONCE(rq->uclamp[UCLAMP_MIN].value); 241346609ce2SQais Yousef max_util = READ_ONCE(rq->uclamp[UCLAMP_MAX].value); 2414982d9cdcSPatrick Bellasi 24159d20ad7dSPatrick Bellasi if (p) { 24169d20ad7dSPatrick Bellasi min_util = max(min_util, uclamp_eff_value(p, UCLAMP_MIN)); 24179d20ad7dSPatrick Bellasi max_util = max(max_util, uclamp_eff_value(p, UCLAMP_MAX)); 24189d20ad7dSPatrick Bellasi } 24199d20ad7dSPatrick Bellasi 2420982d9cdcSPatrick Bellasi /* 2421982d9cdcSPatrick Bellasi * Since CPU's {min,max}_util clamps are MAX aggregated considering 2422982d9cdcSPatrick Bellasi * RUNNABLE tasks with _different_ clamps, we can end up with an 2423982d9cdcSPatrick Bellasi * inversion. Fix it now when the clamps are applied. 2424982d9cdcSPatrick Bellasi */ 2425982d9cdcSPatrick Bellasi if (unlikely(min_util >= max_util)) 2426982d9cdcSPatrick Bellasi return min_util; 2427982d9cdcSPatrick Bellasi 2428982d9cdcSPatrick Bellasi return clamp(util, min_util, max_util); 2429982d9cdcSPatrick Bellasi } 243046609ce2SQais Yousef 243146609ce2SQais Yousef /* 243246609ce2SQais Yousef * When uclamp is compiled in, the aggregation at rq level is 'turned off' 243346609ce2SQais Yousef * by default in the fast path and only gets turned on once userspace performs 243446609ce2SQais Yousef * an operation that requires it. 243546609ce2SQais Yousef * 243646609ce2SQais Yousef * Returns true if userspace opted-in to use uclamp and aggregation at rq level 243746609ce2SQais Yousef * hence is active. 243846609ce2SQais Yousef */ 243946609ce2SQais Yousef static inline bool uclamp_is_used(void) 244046609ce2SQais Yousef { 244146609ce2SQais Yousef return static_branch_likely(&sched_uclamp_used); 244246609ce2SQais Yousef } 2443982d9cdcSPatrick Bellasi #else /* CONFIG_UCLAMP_TASK */ 2444d2b58a28SValentin Schneider static inline 2445d2b58a28SValentin Schneider unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util, 24469d20ad7dSPatrick Bellasi struct task_struct *p) 24479d20ad7dSPatrick Bellasi { 24489d20ad7dSPatrick Bellasi return util; 24499d20ad7dSPatrick Bellasi } 245046609ce2SQais Yousef 245146609ce2SQais Yousef static inline bool uclamp_is_used(void) 245246609ce2SQais Yousef { 245346609ce2SQais Yousef return false; 245446609ce2SQais Yousef } 2455982d9cdcSPatrick Bellasi #endif /* CONFIG_UCLAMP_TASK */ 2456982d9cdcSPatrick Bellasi 24579bdcb44eSRafael J. Wysocki #ifdef arch_scale_freq_capacity 24589bdcb44eSRafael J. Wysocki # ifndef arch_scale_freq_invariant 245997fb7a0aSIngo Molnar # define arch_scale_freq_invariant() true 24609bdcb44eSRafael J. Wysocki # endif 246197fb7a0aSIngo Molnar #else 246297fb7a0aSIngo Molnar # define arch_scale_freq_invariant() false 24639bdcb44eSRafael J. Wysocki #endif 2464d4edd662SJuri Lelli 246510a35e68SVincent Guittot #ifdef CONFIG_SMP 246610a35e68SVincent Guittot static inline unsigned long capacity_orig_of(int cpu) 246710a35e68SVincent Guittot { 246810a35e68SVincent Guittot return cpu_rq(cpu)->cpu_capacity_orig; 246910a35e68SVincent Guittot } 247010a35e68SVincent Guittot #endif 247110a35e68SVincent Guittot 2472938e5e4bSQuentin Perret /** 2473938e5e4bSQuentin Perret * enum schedutil_type - CPU utilization type 2474938e5e4bSQuentin Perret * @FREQUENCY_UTIL: Utilization used to select frequency 2475938e5e4bSQuentin Perret * @ENERGY_UTIL: Utilization used during energy calculation 2476938e5e4bSQuentin Perret * 2477938e5e4bSQuentin Perret * The utilization signals of all scheduling classes (CFS/RT/DL) and IRQ time 2478938e5e4bSQuentin Perret * need to be aggregated differently depending on the usage made of them. This 2479938e5e4bSQuentin Perret * enum is used within schedutil_freq_util() to differentiate the types of 2480938e5e4bSQuentin Perret * utilization expected by the callers, and adjust the aggregation accordingly. 2481938e5e4bSQuentin Perret */ 2482938e5e4bSQuentin Perret enum schedutil_type { 2483938e5e4bSQuentin Perret FREQUENCY_UTIL, 2484938e5e4bSQuentin Perret ENERGY_UTIL, 2485938e5e4bSQuentin Perret }; 2486938e5e4bSQuentin Perret 2487af24bde8SPatrick Bellasi #ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL 2488938e5e4bSQuentin Perret 2489af24bde8SPatrick Bellasi unsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs, 2490af24bde8SPatrick Bellasi unsigned long max, enum schedutil_type type, 2491af24bde8SPatrick Bellasi struct task_struct *p); 2492938e5e4bSQuentin Perret 24938cc90515SVincent Guittot static inline unsigned long cpu_bw_dl(struct rq *rq) 2494d4edd662SJuri Lelli { 2495d4edd662SJuri Lelli return (rq->dl.running_bw * SCHED_CAPACITY_SCALE) >> BW_SHIFT; 2496d4edd662SJuri Lelli } 2497d4edd662SJuri Lelli 24988cc90515SVincent Guittot static inline unsigned long cpu_util_dl(struct rq *rq) 24998cc90515SVincent Guittot { 25008cc90515SVincent Guittot return READ_ONCE(rq->avg_dl.util_avg); 25018cc90515SVincent Guittot } 25028cc90515SVincent Guittot 2503d4edd662SJuri Lelli static inline unsigned long cpu_util_cfs(struct rq *rq) 2504d4edd662SJuri Lelli { 2505a07630b8SPatrick Bellasi unsigned long util = READ_ONCE(rq->cfs.avg.util_avg); 2506a07630b8SPatrick Bellasi 2507a07630b8SPatrick Bellasi if (sched_feat(UTIL_EST)) { 2508a07630b8SPatrick Bellasi util = max_t(unsigned long, util, 2509a07630b8SPatrick Bellasi READ_ONCE(rq->cfs.avg.util_est.enqueued)); 2510a07630b8SPatrick Bellasi } 2511a07630b8SPatrick Bellasi 2512a07630b8SPatrick Bellasi return util; 2513d4edd662SJuri Lelli } 2514371bf427SVincent Guittot 2515371bf427SVincent Guittot static inline unsigned long cpu_util_rt(struct rq *rq) 2516371bf427SVincent Guittot { 2517dfa444dcSVincent Guittot return READ_ONCE(rq->avg_rt.util_avg); 2518371bf427SVincent Guittot } 2519938e5e4bSQuentin Perret #else /* CONFIG_CPU_FREQ_GOV_SCHEDUTIL */ 2520af24bde8SPatrick Bellasi static inline unsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs, 2521af24bde8SPatrick Bellasi unsigned long max, enum schedutil_type type, 2522af24bde8SPatrick Bellasi struct task_struct *p) 2523938e5e4bSQuentin Perret { 2524af24bde8SPatrick Bellasi return 0; 2525938e5e4bSQuentin Perret } 2526af24bde8SPatrick Bellasi #endif /* CONFIG_CPU_FREQ_GOV_SCHEDUTIL */ 25279033ea11SVincent Guittot 252811d4afd4SVincent Guittot #ifdef CONFIG_HAVE_SCHED_AVG_IRQ 25299033ea11SVincent Guittot static inline unsigned long cpu_util_irq(struct rq *rq) 25309033ea11SVincent Guittot { 25319033ea11SVincent Guittot return rq->avg_irq.util_avg; 25329033ea11SVincent Guittot } 25332e62c474SVincent Guittot 25342e62c474SVincent Guittot static inline 25352e62c474SVincent Guittot unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max) 25362e62c474SVincent Guittot { 25372e62c474SVincent Guittot util *= (max - irq); 25382e62c474SVincent Guittot util /= max; 25392e62c474SVincent Guittot 25402e62c474SVincent Guittot return util; 25412e62c474SVincent Guittot 25422e62c474SVincent Guittot } 25439033ea11SVincent Guittot #else 25449033ea11SVincent Guittot static inline unsigned long cpu_util_irq(struct rq *rq) 25459033ea11SVincent Guittot { 25469033ea11SVincent Guittot return 0; 25479033ea11SVincent Guittot } 25489033ea11SVincent Guittot 25492e62c474SVincent Guittot static inline 25502e62c474SVincent Guittot unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max) 25512e62c474SVincent Guittot { 25522e62c474SVincent Guittot return util; 25532e62c474SVincent Guittot } 2554794a56ebSJuri Lelli #endif 25556aa140faSQuentin Perret 2556531b5c9fSQuentin Perret #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) 25571f74de87SQuentin Perret 2558f8a696f2SPeter Zijlstra #define perf_domain_span(pd) (to_cpumask(((pd)->em_pd->cpus))) 2559f8a696f2SPeter Zijlstra 2560f8a696f2SPeter Zijlstra DECLARE_STATIC_KEY_FALSE(sched_energy_present); 2561f8a696f2SPeter Zijlstra 2562f8a696f2SPeter Zijlstra static inline bool sched_energy_enabled(void) 2563f8a696f2SPeter Zijlstra { 2564f8a696f2SPeter Zijlstra return static_branch_unlikely(&sched_energy_present); 2565f8a696f2SPeter Zijlstra } 2566f8a696f2SPeter Zijlstra 2567f8a696f2SPeter Zijlstra #else /* ! (CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL) */ 2568f8a696f2SPeter Zijlstra 2569f8a696f2SPeter Zijlstra #define perf_domain_span(pd) NULL 2570f8a696f2SPeter Zijlstra static inline bool sched_energy_enabled(void) { return false; } 2571f8a696f2SPeter Zijlstra 2572f8a696f2SPeter Zijlstra #endif /* CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL */ 2573227a4aadSMathieu Desnoyers 2574227a4aadSMathieu Desnoyers #ifdef CONFIG_MEMBARRIER 2575227a4aadSMathieu Desnoyers /* 2576227a4aadSMathieu Desnoyers * The scheduler provides memory barriers required by membarrier between: 2577227a4aadSMathieu Desnoyers * - prior user-space memory accesses and store to rq->membarrier_state, 2578227a4aadSMathieu Desnoyers * - store to rq->membarrier_state and following user-space memory accesses. 2579227a4aadSMathieu Desnoyers * In the same way it provides those guarantees around store to rq->curr. 2580227a4aadSMathieu Desnoyers */ 2581227a4aadSMathieu Desnoyers static inline void membarrier_switch_mm(struct rq *rq, 2582227a4aadSMathieu Desnoyers struct mm_struct *prev_mm, 2583227a4aadSMathieu Desnoyers struct mm_struct *next_mm) 2584227a4aadSMathieu Desnoyers { 2585227a4aadSMathieu Desnoyers int membarrier_state; 2586227a4aadSMathieu Desnoyers 2587227a4aadSMathieu Desnoyers if (prev_mm == next_mm) 2588227a4aadSMathieu Desnoyers return; 2589227a4aadSMathieu Desnoyers 2590227a4aadSMathieu Desnoyers membarrier_state = atomic_read(&next_mm->membarrier_state); 2591227a4aadSMathieu Desnoyers if (READ_ONCE(rq->membarrier_state) == membarrier_state) 2592227a4aadSMathieu Desnoyers return; 2593227a4aadSMathieu Desnoyers 2594227a4aadSMathieu Desnoyers WRITE_ONCE(rq->membarrier_state, membarrier_state); 2595227a4aadSMathieu Desnoyers } 2596227a4aadSMathieu Desnoyers #else 2597227a4aadSMathieu Desnoyers static inline void membarrier_switch_mm(struct rq *rq, 2598227a4aadSMathieu Desnoyers struct mm_struct *prev_mm, 2599227a4aadSMathieu Desnoyers struct mm_struct *next_mm) 2600227a4aadSMathieu Desnoyers { 2601227a4aadSMathieu Desnoyers } 2602227a4aadSMathieu Desnoyers #endif 260352262ee5SMel Gorman 260452262ee5SMel Gorman #ifdef CONFIG_SMP 260552262ee5SMel Gorman static inline bool is_per_cpu_kthread(struct task_struct *p) 260652262ee5SMel Gorman { 260752262ee5SMel Gorman if (!(p->flags & PF_KTHREAD)) 260852262ee5SMel Gorman return false; 260952262ee5SMel Gorman 261052262ee5SMel Gorman if (p->nr_cpus_allowed != 1) 261152262ee5SMel Gorman return false; 261252262ee5SMel Gorman 261352262ee5SMel Gorman return true; 261452262ee5SMel Gorman } 261552262ee5SMel Gorman #endif 2616b3212fe2SThomas Gleixner 2617b3212fe2SThomas Gleixner void swake_up_all_locked(struct swait_queue_head *q); 2618b3212fe2SThomas Gleixner void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait); 2619