1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */ 297fb7a0aSIngo Molnar /* 397fb7a0aSIngo Molnar * Scheduler internal types and methods: 497fb7a0aSIngo Molnar */ 5391e43daSPeter Zijlstra #include <linux/sched.h> 6325ea10cSIngo Molnar 7dfc3401aSIngo Molnar #include <linux/sched/autogroup.h> 8e6017571SIngo Molnar #include <linux/sched/clock.h> 9325ea10cSIngo Molnar #include <linux/sched/coredump.h> 1055687da1SIngo Molnar #include <linux/sched/cpufreq.h> 11325ea10cSIngo Molnar #include <linux/sched/cputime.h> 12325ea10cSIngo Molnar #include <linux/sched/deadline.h> 13b17b0153SIngo Molnar #include <linux/sched/debug.h> 14ef8bd77fSIngo Molnar #include <linux/sched/hotplug.h> 15325ea10cSIngo Molnar #include <linux/sched/idle.h> 16325ea10cSIngo Molnar #include <linux/sched/init.h> 17325ea10cSIngo Molnar #include <linux/sched/isolation.h> 18325ea10cSIngo Molnar #include <linux/sched/jobctl.h> 19325ea10cSIngo Molnar #include <linux/sched/loadavg.h> 20325ea10cSIngo Molnar #include <linux/sched/mm.h> 21325ea10cSIngo Molnar #include <linux/sched/nohz.h> 22325ea10cSIngo Molnar #include <linux/sched/numa_balancing.h> 23325ea10cSIngo Molnar #include <linux/sched/prio.h> 24325ea10cSIngo Molnar #include <linux/sched/rt.h> 25325ea10cSIngo Molnar #include <linux/sched/signal.h> 26321a874aSThomas Gleixner #include <linux/sched/smt.h> 27325ea10cSIngo Molnar #include <linux/sched/stat.h> 28325ea10cSIngo Molnar #include <linux/sched/sysctl.h> 2929930025SIngo Molnar #include <linux/sched/task.h> 3068db0cf1SIngo Molnar #include <linux/sched/task_stack.h> 31325ea10cSIngo Molnar #include <linux/sched/topology.h> 32325ea10cSIngo Molnar #include <linux/sched/user.h> 33325ea10cSIngo Molnar #include <linux/sched/wake_q.h> 34325ea10cSIngo Molnar #include <linux/sched/xacct.h> 35ef8bd77fSIngo Molnar 36325ea10cSIngo Molnar #include <uapi/linux/sched/types.h> 37325ea10cSIngo Molnar 383866e845SSteven Rostedt (Red Hat) #include <linux/binfmts.h> 39325ea10cSIngo Molnar #include <linux/blkdev.h> 40325ea10cSIngo Molnar #include <linux/compat.h> 41325ea10cSIngo Molnar #include <linux/context_tracking.h> 42325ea10cSIngo Molnar #include <linux/cpufreq.h> 43325ea10cSIngo Molnar #include <linux/cpuidle.h> 44325ea10cSIngo Molnar #include <linux/cpuset.h> 45325ea10cSIngo Molnar #include <linux/ctype.h> 46325ea10cSIngo Molnar #include <linux/debugfs.h> 47325ea10cSIngo Molnar #include <linux/delayacct.h> 486aa140faSQuentin Perret #include <linux/energy_model.h> 49325ea10cSIngo Molnar #include <linux/init_task.h> 50325ea10cSIngo Molnar #include <linux/kprobes.h> 51325ea10cSIngo Molnar #include <linux/kthread.h> 52325ea10cSIngo Molnar #include <linux/membarrier.h> 53325ea10cSIngo Molnar #include <linux/migrate.h> 54325ea10cSIngo Molnar #include <linux/mmu_context.h> 55325ea10cSIngo Molnar #include <linux/nmi.h> 56325ea10cSIngo Molnar #include <linux/proc_fs.h> 57325ea10cSIngo Molnar #include <linux/prefetch.h> 58325ea10cSIngo Molnar #include <linux/profile.h> 59eb414681SJohannes Weiner #include <linux/psi.h> 60325ea10cSIngo Molnar #include <linux/rcupdate_wait.h> 61325ea10cSIngo Molnar #include <linux/security.h> 62391e43daSPeter Zijlstra #include <linux/stop_machine.h> 63325ea10cSIngo Molnar #include <linux/suspend.h> 64325ea10cSIngo Molnar #include <linux/swait.h> 65325ea10cSIngo Molnar #include <linux/syscalls.h> 66325ea10cSIngo Molnar #include <linux/task_work.h> 67325ea10cSIngo Molnar #include <linux/tsacct_kern.h> 68325ea10cSIngo Molnar 69325ea10cSIngo Molnar #include <asm/tlb.h> 70391e43daSPeter Zijlstra 717fce777cSIngo Molnar #ifdef CONFIG_PARAVIRT 727fce777cSIngo Molnar # include <asm/paravirt.h> 737fce777cSIngo Molnar #endif 747fce777cSIngo Molnar 75391e43daSPeter Zijlstra #include "cpupri.h" 766bfd6d72SJuri Lelli #include "cpudeadline.h" 77391e43daSPeter Zijlstra 789148a3a1SPeter Zijlstra #ifdef CONFIG_SCHED_DEBUG 799148a3a1SPeter Zijlstra # define SCHED_WARN_ON(x) WARN_ONCE(x, #x) 809148a3a1SPeter Zijlstra #else 816d3aed3dSIngo Molnar # define SCHED_WARN_ON(x) ({ (void)(x), 0; }) 829148a3a1SPeter Zijlstra #endif 839148a3a1SPeter Zijlstra 8445ceebf7SPaul Gortmaker struct rq; 85442bf3aaSDaniel Lezcano struct cpuidle_state; 8645ceebf7SPaul Gortmaker 87da0c1e65SKirill Tkhai /* task_struct::on_rq states: */ 88da0c1e65SKirill Tkhai #define TASK_ON_RQ_QUEUED 1 89cca26e80SKirill Tkhai #define TASK_ON_RQ_MIGRATING 2 90da0c1e65SKirill Tkhai 91391e43daSPeter Zijlstra extern __read_mostly int scheduler_running; 92391e43daSPeter Zijlstra 9345ceebf7SPaul Gortmaker extern unsigned long calc_load_update; 9445ceebf7SPaul Gortmaker extern atomic_long_t calc_load_tasks; 9545ceebf7SPaul Gortmaker 963289bdb4SPeter Zijlstra extern void calc_global_load_tick(struct rq *this_rq); 97d60585c5SThomas Gleixner extern long calc_load_fold_active(struct rq *this_rq, long adjust); 983289bdb4SPeter Zijlstra 99391e43daSPeter Zijlstra /* 100391e43daSPeter Zijlstra * Helpers for converting nanosecond timing to jiffy resolution 101391e43daSPeter Zijlstra */ 102391e43daSPeter Zijlstra #define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ)) 103391e43daSPeter Zijlstra 104cc1f4b1fSLi Zefan /* 105cc1f4b1fSLi Zefan * Increase resolution of nice-level calculations for 64-bit architectures. 106cc1f4b1fSLi Zefan * The extra resolution improves shares distribution and load balancing of 107cc1f4b1fSLi Zefan * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup 108cc1f4b1fSLi Zefan * hierarchies, especially on larger systems. This is not a user-visible change 109cc1f4b1fSLi Zefan * and does not change the user-interface for setting shares/weights. 110cc1f4b1fSLi Zefan * 111cc1f4b1fSLi Zefan * We increase resolution only if we have enough bits to allow this increased 11297fb7a0aSIngo Molnar * resolution (i.e. 64-bit). The costs for increasing resolution when 32-bit 11397fb7a0aSIngo Molnar * are pretty high and the returns do not justify the increased costs. 1142159197dSPeter Zijlstra * 11597fb7a0aSIngo Molnar * Really only required when CONFIG_FAIR_GROUP_SCHED=y is also set, but to 11697fb7a0aSIngo Molnar * increase coverage and consistency always enable it on 64-bit platforms. 117cc1f4b1fSLi Zefan */ 1182159197dSPeter Zijlstra #ifdef CONFIG_64BIT 119172895e6SYuyang Du # define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT) 1206ecdd749SYuyang Du # define scale_load(w) ((w) << SCHED_FIXEDPOINT_SHIFT) 12126cf5222SMichael Wang # define scale_load_down(w) \ 12226cf5222SMichael Wang ({ \ 12326cf5222SMichael Wang unsigned long __w = (w); \ 12426cf5222SMichael Wang if (__w) \ 12526cf5222SMichael Wang __w = max(2UL, __w >> SCHED_FIXEDPOINT_SHIFT); \ 12626cf5222SMichael Wang __w; \ 12726cf5222SMichael Wang }) 128cc1f4b1fSLi Zefan #else 129172895e6SYuyang Du # define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT) 130cc1f4b1fSLi Zefan # define scale_load(w) (w) 131cc1f4b1fSLi Zefan # define scale_load_down(w) (w) 132cc1f4b1fSLi Zefan #endif 133cc1f4b1fSLi Zefan 1346ecdd749SYuyang Du /* 135172895e6SYuyang Du * Task weight (visible to users) and its load (invisible to users) have 136172895e6SYuyang Du * independent resolution, but they should be well calibrated. We use 137172895e6SYuyang Du * scale_load() and scale_load_down(w) to convert between them. The 138172895e6SYuyang Du * following must be true: 139172895e6SYuyang Du * 140172895e6SYuyang Du * scale_load(sched_prio_to_weight[USER_PRIO(NICE_TO_PRIO(0))]) == NICE_0_LOAD 141172895e6SYuyang Du * 1426ecdd749SYuyang Du */ 143172895e6SYuyang Du #define NICE_0_LOAD (1L << NICE_0_LOAD_SHIFT) 144391e43daSPeter Zijlstra 145391e43daSPeter Zijlstra /* 146332ac17eSDario Faggioli * Single value that decides SCHED_DEADLINE internal math precision. 147332ac17eSDario Faggioli * 10 -> just above 1us 148332ac17eSDario Faggioli * 9 -> just above 0.5us 149332ac17eSDario Faggioli */ 15097fb7a0aSIngo Molnar #define DL_SCALE 10 151332ac17eSDario Faggioli 152332ac17eSDario Faggioli /* 15397fb7a0aSIngo Molnar * Single value that denotes runtime == period, ie unlimited time. 154391e43daSPeter Zijlstra */ 155391e43daSPeter Zijlstra #define RUNTIME_INF ((u64)~0ULL) 156391e43daSPeter Zijlstra 15720f9cd2aSHenrik Austad static inline int idle_policy(int policy) 15820f9cd2aSHenrik Austad { 15920f9cd2aSHenrik Austad return policy == SCHED_IDLE; 16020f9cd2aSHenrik Austad } 161d50dde5aSDario Faggioli static inline int fair_policy(int policy) 162d50dde5aSDario Faggioli { 163d50dde5aSDario Faggioli return policy == SCHED_NORMAL || policy == SCHED_BATCH; 164d50dde5aSDario Faggioli } 165d50dde5aSDario Faggioli 166391e43daSPeter Zijlstra static inline int rt_policy(int policy) 167391e43daSPeter Zijlstra { 168d50dde5aSDario Faggioli return policy == SCHED_FIFO || policy == SCHED_RR; 169391e43daSPeter Zijlstra } 170391e43daSPeter Zijlstra 171aab03e05SDario Faggioli static inline int dl_policy(int policy) 172aab03e05SDario Faggioli { 173aab03e05SDario Faggioli return policy == SCHED_DEADLINE; 174aab03e05SDario Faggioli } 17520f9cd2aSHenrik Austad static inline bool valid_policy(int policy) 17620f9cd2aSHenrik Austad { 17720f9cd2aSHenrik Austad return idle_policy(policy) || fair_policy(policy) || 17820f9cd2aSHenrik Austad rt_policy(policy) || dl_policy(policy); 17920f9cd2aSHenrik Austad } 180aab03e05SDario Faggioli 1811da1843fSViresh Kumar static inline int task_has_idle_policy(struct task_struct *p) 1821da1843fSViresh Kumar { 1831da1843fSViresh Kumar return idle_policy(p->policy); 1841da1843fSViresh Kumar } 1851da1843fSViresh Kumar 186391e43daSPeter Zijlstra static inline int task_has_rt_policy(struct task_struct *p) 187391e43daSPeter Zijlstra { 188391e43daSPeter Zijlstra return rt_policy(p->policy); 189391e43daSPeter Zijlstra } 190391e43daSPeter Zijlstra 191aab03e05SDario Faggioli static inline int task_has_dl_policy(struct task_struct *p) 192aab03e05SDario Faggioli { 193aab03e05SDario Faggioli return dl_policy(p->policy); 194aab03e05SDario Faggioli } 195aab03e05SDario Faggioli 19607881166SJuri Lelli #define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT) 19707881166SJuri Lelli 198d76343c6SValentin Schneider static inline void update_avg(u64 *avg, u64 sample) 199d76343c6SValentin Schneider { 200d76343c6SValentin Schneider s64 diff = sample - *avg; 201d76343c6SValentin Schneider *avg += diff / 8; 202d76343c6SValentin Schneider } 203d76343c6SValentin Schneider 2042d3d891dSDario Faggioli /* 205794a56ebSJuri Lelli * !! For sched_setattr_nocheck() (kernel) only !! 206794a56ebSJuri Lelli * 207794a56ebSJuri Lelli * This is actually gross. :( 208794a56ebSJuri Lelli * 209794a56ebSJuri Lelli * It is used to make schedutil kworker(s) higher priority than SCHED_DEADLINE 210794a56ebSJuri Lelli * tasks, but still be able to sleep. We need this on platforms that cannot 211794a56ebSJuri Lelli * atomically change clock frequency. Remove once fast switching will be 212794a56ebSJuri Lelli * available on such platforms. 213794a56ebSJuri Lelli * 214794a56ebSJuri Lelli * SUGOV stands for SchedUtil GOVernor. 215794a56ebSJuri Lelli */ 216794a56ebSJuri Lelli #define SCHED_FLAG_SUGOV 0x10000000 217794a56ebSJuri Lelli 218794a56ebSJuri Lelli static inline bool dl_entity_is_special(struct sched_dl_entity *dl_se) 219794a56ebSJuri Lelli { 220794a56ebSJuri Lelli #ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL 221794a56ebSJuri Lelli return unlikely(dl_se->flags & SCHED_FLAG_SUGOV); 222794a56ebSJuri Lelli #else 223794a56ebSJuri Lelli return false; 224794a56ebSJuri Lelli #endif 225794a56ebSJuri Lelli } 226794a56ebSJuri Lelli 227794a56ebSJuri Lelli /* 2282d3d891dSDario Faggioli * Tells if entity @a should preempt entity @b. 2292d3d891dSDario Faggioli */ 230332ac17eSDario Faggioli static inline bool 231332ac17eSDario Faggioli dl_entity_preempt(struct sched_dl_entity *a, struct sched_dl_entity *b) 2322d3d891dSDario Faggioli { 233794a56ebSJuri Lelli return dl_entity_is_special(a) || 234794a56ebSJuri Lelli dl_time_before(a->deadline, b->deadline); 2352d3d891dSDario Faggioli } 2362d3d891dSDario Faggioli 237391e43daSPeter Zijlstra /* 238391e43daSPeter Zijlstra * This is the priority-queue data structure of the RT scheduling class: 239391e43daSPeter Zijlstra */ 240391e43daSPeter Zijlstra struct rt_prio_array { 241391e43daSPeter Zijlstra DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */ 242391e43daSPeter Zijlstra struct list_head queue[MAX_RT_PRIO]; 243391e43daSPeter Zijlstra }; 244391e43daSPeter Zijlstra 245391e43daSPeter Zijlstra struct rt_bandwidth { 246391e43daSPeter Zijlstra /* nests inside the rq lock: */ 247391e43daSPeter Zijlstra raw_spinlock_t rt_runtime_lock; 248391e43daSPeter Zijlstra ktime_t rt_period; 249391e43daSPeter Zijlstra u64 rt_runtime; 250391e43daSPeter Zijlstra struct hrtimer rt_period_timer; 2514cfafd30SPeter Zijlstra unsigned int rt_period_active; 252391e43daSPeter Zijlstra }; 253a5e7be3bSJuri Lelli 254a5e7be3bSJuri Lelli void __dl_clear_params(struct task_struct *p); 255a5e7be3bSJuri Lelli 256332ac17eSDario Faggioli /* 257332ac17eSDario Faggioli * To keep the bandwidth of -deadline tasks and groups under control 258332ac17eSDario Faggioli * we need some place where: 259332ac17eSDario Faggioli * - store the maximum -deadline bandwidth of the system (the group); 260332ac17eSDario Faggioli * - cache the fraction of that bandwidth that is currently allocated. 261332ac17eSDario Faggioli * 262332ac17eSDario Faggioli * This is all done in the data structure below. It is similar to the 263332ac17eSDario Faggioli * one used for RT-throttling (rt_bandwidth), with the main difference 264332ac17eSDario Faggioli * that, since here we are only interested in admission control, we 265332ac17eSDario Faggioli * do not decrease any runtime while the group "executes", neither we 266332ac17eSDario Faggioli * need a timer to replenish it. 267332ac17eSDario Faggioli * 268332ac17eSDario Faggioli * With respect to SMP, the bandwidth is given on a per-CPU basis, 269332ac17eSDario Faggioli * meaning that: 270332ac17eSDario Faggioli * - dl_bw (< 100%) is the bandwidth of the system (group) on each CPU; 271332ac17eSDario Faggioli * - dl_total_bw array contains, in the i-eth element, the currently 272332ac17eSDario Faggioli * allocated bandwidth on the i-eth CPU. 273332ac17eSDario Faggioli * Moreover, groups consume bandwidth on each CPU, while tasks only 274332ac17eSDario Faggioli * consume bandwidth on the CPU they're running on. 275332ac17eSDario Faggioli * Finally, dl_total_bw_cpu is used to cache the index of dl_total_bw 276332ac17eSDario Faggioli * that will be shown the next time the proc or cgroup controls will 277332ac17eSDario Faggioli * be red. It on its turn can be changed by writing on its own 278332ac17eSDario Faggioli * control. 279332ac17eSDario Faggioli */ 280332ac17eSDario Faggioli struct dl_bandwidth { 281332ac17eSDario Faggioli raw_spinlock_t dl_runtime_lock; 282332ac17eSDario Faggioli u64 dl_runtime; 283332ac17eSDario Faggioli u64 dl_period; 284332ac17eSDario Faggioli }; 285332ac17eSDario Faggioli 286332ac17eSDario Faggioli static inline int dl_bandwidth_enabled(void) 287332ac17eSDario Faggioli { 2881724813dSPeter Zijlstra return sysctl_sched_rt_runtime >= 0; 289332ac17eSDario Faggioli } 290332ac17eSDario Faggioli 291332ac17eSDario Faggioli struct dl_bw { 292332ac17eSDario Faggioli raw_spinlock_t lock; 29397fb7a0aSIngo Molnar u64 bw; 29497fb7a0aSIngo Molnar u64 total_bw; 295332ac17eSDario Faggioli }; 296332ac17eSDario Faggioli 297daec5798SLuca Abeni static inline void __dl_update(struct dl_bw *dl_b, s64 bw); 298daec5798SLuca Abeni 2997f51412aSJuri Lelli static inline 3008c0944ceSPeter Zijlstra void __dl_sub(struct dl_bw *dl_b, u64 tsk_bw, int cpus) 3017f51412aSJuri Lelli { 3027f51412aSJuri Lelli dl_b->total_bw -= tsk_bw; 303daec5798SLuca Abeni __dl_update(dl_b, (s32)tsk_bw / cpus); 3047f51412aSJuri Lelli } 3057f51412aSJuri Lelli 3067f51412aSJuri Lelli static inline 307daec5798SLuca Abeni void __dl_add(struct dl_bw *dl_b, u64 tsk_bw, int cpus) 3087f51412aSJuri Lelli { 3097f51412aSJuri Lelli dl_b->total_bw += tsk_bw; 310daec5798SLuca Abeni __dl_update(dl_b, -((s32)tsk_bw / cpus)); 3117f51412aSJuri Lelli } 3127f51412aSJuri Lelli 3137f51412aSJuri Lelli static inline 3147f51412aSJuri Lelli bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw) 3157f51412aSJuri Lelli { 3167f51412aSJuri Lelli return dl_b->bw != -1 && 3177f51412aSJuri Lelli dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw; 3187f51412aSJuri Lelli } 3197f51412aSJuri Lelli 320f2cb1360SIngo Molnar extern void init_dl_bw(struct dl_bw *dl_b); 32106a76fe0SNicolas Pitre extern int sched_dl_global_validate(void); 32206a76fe0SNicolas Pitre extern void sched_dl_do_global(void); 32397fb7a0aSIngo Molnar extern int sched_dl_overflow(struct task_struct *p, int policy, const struct sched_attr *attr); 32406a76fe0SNicolas Pitre extern void __setparam_dl(struct task_struct *p, const struct sched_attr *attr); 32506a76fe0SNicolas Pitre extern void __getparam_dl(struct task_struct *p, struct sched_attr *attr); 32606a76fe0SNicolas Pitre extern bool __checkparam_dl(const struct sched_attr *attr); 32706a76fe0SNicolas Pitre extern bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr); 32897fb7a0aSIngo Molnar extern int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed); 32997fb7a0aSIngo Molnar extern int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial); 33006a76fe0SNicolas Pitre extern bool dl_cpu_busy(unsigned int cpu); 331391e43daSPeter Zijlstra 332391e43daSPeter Zijlstra #ifdef CONFIG_CGROUP_SCHED 333391e43daSPeter Zijlstra 334391e43daSPeter Zijlstra #include <linux/cgroup.h> 335eb414681SJohannes Weiner #include <linux/psi.h> 336391e43daSPeter Zijlstra 337391e43daSPeter Zijlstra struct cfs_rq; 338391e43daSPeter Zijlstra struct rt_rq; 339391e43daSPeter Zijlstra 34035cf4e50SMike Galbraith extern struct list_head task_groups; 341391e43daSPeter Zijlstra 342391e43daSPeter Zijlstra struct cfs_bandwidth { 343391e43daSPeter Zijlstra #ifdef CONFIG_CFS_BANDWIDTH 344391e43daSPeter Zijlstra raw_spinlock_t lock; 345391e43daSPeter Zijlstra ktime_t period; 34697fb7a0aSIngo Molnar u64 quota; 34797fb7a0aSIngo Molnar u64 runtime; 3489c58c79aSZhihui Zhang s64 hierarchical_quota; 349391e43daSPeter Zijlstra 35066567fcbSbsegall@google.com u8 idle; 35166567fcbSbsegall@google.com u8 period_active; 35266567fcbSbsegall@google.com u8 slack_started; 35397fb7a0aSIngo Molnar struct hrtimer period_timer; 35497fb7a0aSIngo Molnar struct hrtimer slack_timer; 355391e43daSPeter Zijlstra struct list_head throttled_cfs_rq; 356391e43daSPeter Zijlstra 35797fb7a0aSIngo Molnar /* Statistics: */ 35897fb7a0aSIngo Molnar int nr_periods; 35997fb7a0aSIngo Molnar int nr_throttled; 360391e43daSPeter Zijlstra u64 throttled_time; 361391e43daSPeter Zijlstra #endif 362391e43daSPeter Zijlstra }; 363391e43daSPeter Zijlstra 36497fb7a0aSIngo Molnar /* Task group related information */ 365391e43daSPeter Zijlstra struct task_group { 366391e43daSPeter Zijlstra struct cgroup_subsys_state css; 367391e43daSPeter Zijlstra 368391e43daSPeter Zijlstra #ifdef CONFIG_FAIR_GROUP_SCHED 36997fb7a0aSIngo Molnar /* schedulable entities of this group on each CPU */ 370391e43daSPeter Zijlstra struct sched_entity **se; 37197fb7a0aSIngo Molnar /* runqueue "owned" by this group on each CPU */ 372391e43daSPeter Zijlstra struct cfs_rq **cfs_rq; 373391e43daSPeter Zijlstra unsigned long shares; 374391e43daSPeter Zijlstra 375fa6bddebSAlex Shi #ifdef CONFIG_SMP 376b0367629SWaiman Long /* 377b0367629SWaiman Long * load_avg can be heavily contended at clock tick time, so put 378b0367629SWaiman Long * it in its own cacheline separated from the fields above which 379b0367629SWaiman Long * will also be accessed at each tick. 380b0367629SWaiman Long */ 381b0367629SWaiman Long atomic_long_t load_avg ____cacheline_aligned; 382391e43daSPeter Zijlstra #endif 383fa6bddebSAlex Shi #endif 384391e43daSPeter Zijlstra 385391e43daSPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED 386391e43daSPeter Zijlstra struct sched_rt_entity **rt_se; 387391e43daSPeter Zijlstra struct rt_rq **rt_rq; 388391e43daSPeter Zijlstra 389391e43daSPeter Zijlstra struct rt_bandwidth rt_bandwidth; 390391e43daSPeter Zijlstra #endif 391391e43daSPeter Zijlstra 392391e43daSPeter Zijlstra struct rcu_head rcu; 393391e43daSPeter Zijlstra struct list_head list; 394391e43daSPeter Zijlstra 395391e43daSPeter Zijlstra struct task_group *parent; 396391e43daSPeter Zijlstra struct list_head siblings; 397391e43daSPeter Zijlstra struct list_head children; 398391e43daSPeter Zijlstra 399391e43daSPeter Zijlstra #ifdef CONFIG_SCHED_AUTOGROUP 400391e43daSPeter Zijlstra struct autogroup *autogroup; 401391e43daSPeter Zijlstra #endif 402391e43daSPeter Zijlstra 403391e43daSPeter Zijlstra struct cfs_bandwidth cfs_bandwidth; 4042480c093SPatrick Bellasi 4052480c093SPatrick Bellasi #ifdef CONFIG_UCLAMP_TASK_GROUP 4062480c093SPatrick Bellasi /* The two decimal precision [%] value requested from user-space */ 4072480c093SPatrick Bellasi unsigned int uclamp_pct[UCLAMP_CNT]; 4082480c093SPatrick Bellasi /* Clamp values requested for a task group */ 4092480c093SPatrick Bellasi struct uclamp_se uclamp_req[UCLAMP_CNT]; 4100b60ba2dSPatrick Bellasi /* Effective clamp values used for a task group */ 4110b60ba2dSPatrick Bellasi struct uclamp_se uclamp[UCLAMP_CNT]; 4122480c093SPatrick Bellasi #endif 4132480c093SPatrick Bellasi 414391e43daSPeter Zijlstra }; 415391e43daSPeter Zijlstra 416391e43daSPeter Zijlstra #ifdef CONFIG_FAIR_GROUP_SCHED 417391e43daSPeter Zijlstra #define ROOT_TASK_GROUP_LOAD NICE_0_LOAD 418391e43daSPeter Zijlstra 419391e43daSPeter Zijlstra /* 420391e43daSPeter Zijlstra * A weight of 0 or 1 can cause arithmetics problems. 421391e43daSPeter Zijlstra * A weight of a cfs_rq is the sum of weights of which entities 422391e43daSPeter Zijlstra * are queued on this cfs_rq, so a weight of a entity should not be 423391e43daSPeter Zijlstra * too large, so as the shares value of a task group. 424391e43daSPeter Zijlstra * (The default weight is 1024 - so there's no practical 425391e43daSPeter Zijlstra * limitation from this.) 426391e43daSPeter Zijlstra */ 427391e43daSPeter Zijlstra #define MIN_SHARES (1UL << 1) 428391e43daSPeter Zijlstra #define MAX_SHARES (1UL << 18) 429391e43daSPeter Zijlstra #endif 430391e43daSPeter Zijlstra 431391e43daSPeter Zijlstra typedef int (*tg_visitor)(struct task_group *, void *); 432391e43daSPeter Zijlstra 433391e43daSPeter Zijlstra extern int walk_tg_tree_from(struct task_group *from, 434391e43daSPeter Zijlstra tg_visitor down, tg_visitor up, void *data); 435391e43daSPeter Zijlstra 436391e43daSPeter Zijlstra /* 437391e43daSPeter Zijlstra * Iterate the full tree, calling @down when first entering a node and @up when 438391e43daSPeter Zijlstra * leaving it for the final time. 439391e43daSPeter Zijlstra * 440391e43daSPeter Zijlstra * Caller must hold rcu_lock or sufficient equivalent. 441391e43daSPeter Zijlstra */ 442391e43daSPeter Zijlstra static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data) 443391e43daSPeter Zijlstra { 444391e43daSPeter Zijlstra return walk_tg_tree_from(&root_task_group, down, up, data); 445391e43daSPeter Zijlstra } 446391e43daSPeter Zijlstra 447391e43daSPeter Zijlstra extern int tg_nop(struct task_group *tg, void *data); 448391e43daSPeter Zijlstra 449391e43daSPeter Zijlstra extern void free_fair_sched_group(struct task_group *tg); 450391e43daSPeter Zijlstra extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent); 4518663e24dSPeter Zijlstra extern void online_fair_sched_group(struct task_group *tg); 4526fe1f348SPeter Zijlstra extern void unregister_fair_sched_group(struct task_group *tg); 453391e43daSPeter Zijlstra extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, 454391e43daSPeter Zijlstra struct sched_entity *se, int cpu, 455391e43daSPeter Zijlstra struct sched_entity *parent); 456391e43daSPeter Zijlstra extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b); 457391e43daSPeter Zijlstra 458391e43daSPeter Zijlstra extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b); 45977a4d1a1SPeter Zijlstra extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b); 460391e43daSPeter Zijlstra extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq); 461391e43daSPeter Zijlstra 462391e43daSPeter Zijlstra extern void free_rt_sched_group(struct task_group *tg); 463391e43daSPeter Zijlstra extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent); 464391e43daSPeter Zijlstra extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, 465391e43daSPeter Zijlstra struct sched_rt_entity *rt_se, int cpu, 466391e43daSPeter Zijlstra struct sched_rt_entity *parent); 4678887cd99SNicolas Pitre extern int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us); 4688887cd99SNicolas Pitre extern int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us); 4698887cd99SNicolas Pitre extern long sched_group_rt_runtime(struct task_group *tg); 4708887cd99SNicolas Pitre extern long sched_group_rt_period(struct task_group *tg); 4718887cd99SNicolas Pitre extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk); 472391e43daSPeter Zijlstra 47325cc7da7SLi Zefan extern struct task_group *sched_create_group(struct task_group *parent); 47425cc7da7SLi Zefan extern void sched_online_group(struct task_group *tg, 47525cc7da7SLi Zefan struct task_group *parent); 47625cc7da7SLi Zefan extern void sched_destroy_group(struct task_group *tg); 47725cc7da7SLi Zefan extern void sched_offline_group(struct task_group *tg); 47825cc7da7SLi Zefan 47925cc7da7SLi Zefan extern void sched_move_task(struct task_struct *tsk); 48025cc7da7SLi Zefan 48125cc7da7SLi Zefan #ifdef CONFIG_FAIR_GROUP_SCHED 48225cc7da7SLi Zefan extern int sched_group_set_shares(struct task_group *tg, unsigned long shares); 483ad936d86SByungchul Park 484ad936d86SByungchul Park #ifdef CONFIG_SMP 485ad936d86SByungchul Park extern void set_task_rq_fair(struct sched_entity *se, 486ad936d86SByungchul Park struct cfs_rq *prev, struct cfs_rq *next); 487ad936d86SByungchul Park #else /* !CONFIG_SMP */ 488ad936d86SByungchul Park static inline void set_task_rq_fair(struct sched_entity *se, 489ad936d86SByungchul Park struct cfs_rq *prev, struct cfs_rq *next) { } 490ad936d86SByungchul Park #endif /* CONFIG_SMP */ 491ad936d86SByungchul Park #endif /* CONFIG_FAIR_GROUP_SCHED */ 49225cc7da7SLi Zefan 493391e43daSPeter Zijlstra #else /* CONFIG_CGROUP_SCHED */ 494391e43daSPeter Zijlstra 495391e43daSPeter Zijlstra struct cfs_bandwidth { }; 496391e43daSPeter Zijlstra 497391e43daSPeter Zijlstra #endif /* CONFIG_CGROUP_SCHED */ 498391e43daSPeter Zijlstra 499391e43daSPeter Zijlstra /* CFS-related fields in a runqueue */ 500391e43daSPeter Zijlstra struct cfs_rq { 501391e43daSPeter Zijlstra struct load_weight load; 50297fb7a0aSIngo Molnar unsigned int nr_running; 50343e9f7f2SViresh Kumar unsigned int h_nr_running; /* SCHED_{NORMAL,BATCH,IDLE} */ 50443e9f7f2SViresh Kumar unsigned int idle_h_nr_running; /* SCHED_IDLE */ 505391e43daSPeter Zijlstra 506391e43daSPeter Zijlstra u64 exec_clock; 507391e43daSPeter Zijlstra u64 min_vruntime; 508391e43daSPeter Zijlstra #ifndef CONFIG_64BIT 509391e43daSPeter Zijlstra u64 min_vruntime_copy; 510391e43daSPeter Zijlstra #endif 511391e43daSPeter Zijlstra 512bfb06889SDavidlohr Bueso struct rb_root_cached tasks_timeline; 513391e43daSPeter Zijlstra 514391e43daSPeter Zijlstra /* 515391e43daSPeter Zijlstra * 'curr' points to currently running entity on this cfs_rq. 516391e43daSPeter Zijlstra * It is set to NULL otherwise (i.e when none are currently running). 517391e43daSPeter Zijlstra */ 51897fb7a0aSIngo Molnar struct sched_entity *curr; 51997fb7a0aSIngo Molnar struct sched_entity *next; 52097fb7a0aSIngo Molnar struct sched_entity *last; 52197fb7a0aSIngo Molnar struct sched_entity *skip; 522391e43daSPeter Zijlstra 523391e43daSPeter Zijlstra #ifdef CONFIG_SCHED_DEBUG 524391e43daSPeter Zijlstra unsigned int nr_spread_over; 525391e43daSPeter Zijlstra #endif 526391e43daSPeter Zijlstra 5272dac754eSPaul Turner #ifdef CONFIG_SMP 5282dac754eSPaul Turner /* 5299d89c257SYuyang Du * CFS load tracking 5302dac754eSPaul Turner */ 5319d89c257SYuyang Du struct sched_avg avg; 5322a2f5d4eSPeter Zijlstra #ifndef CONFIG_64BIT 5332a2f5d4eSPeter Zijlstra u64 load_last_update_time_copy; 5342a2f5d4eSPeter Zijlstra #endif 5352a2f5d4eSPeter Zijlstra struct { 5362a2f5d4eSPeter Zijlstra raw_spinlock_t lock ____cacheline_aligned; 5372a2f5d4eSPeter Zijlstra int nr; 5382a2f5d4eSPeter Zijlstra unsigned long load_avg; 5392a2f5d4eSPeter Zijlstra unsigned long util_avg; 5409f683953SVincent Guittot unsigned long runnable_avg; 5412a2f5d4eSPeter Zijlstra } removed; 542141965c7SAlex Shi 543c566e8e9SPaul Turner #ifdef CONFIG_FAIR_GROUP_SCHED 5440e2d2aaaSPeter Zijlstra unsigned long tg_load_avg_contrib; 5450e2d2aaaSPeter Zijlstra long propagate; 5460e2d2aaaSPeter Zijlstra long prop_runnable_sum; 5470e2d2aaaSPeter Zijlstra 54882958366SPaul Turner /* 54982958366SPaul Turner * h_load = weight * f(tg) 55082958366SPaul Turner * 55182958366SPaul Turner * Where f(tg) is the recursive weight fraction assigned to 55282958366SPaul Turner * this group. 55382958366SPaul Turner */ 55482958366SPaul Turner unsigned long h_load; 55568520796SVladimir Davydov u64 last_h_load_update; 55668520796SVladimir Davydov struct sched_entity *h_load_next; 55768520796SVladimir Davydov #endif /* CONFIG_FAIR_GROUP_SCHED */ 55882958366SPaul Turner #endif /* CONFIG_SMP */ 55982958366SPaul Turner 560391e43daSPeter Zijlstra #ifdef CONFIG_FAIR_GROUP_SCHED 56197fb7a0aSIngo Molnar struct rq *rq; /* CPU runqueue to which this cfs_rq is attached */ 562391e43daSPeter Zijlstra 563391e43daSPeter Zijlstra /* 564391e43daSPeter Zijlstra * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in 565391e43daSPeter Zijlstra * a hierarchy). Non-leaf lrqs hold other higher schedulable entities 566391e43daSPeter Zijlstra * (like users, containers etc.) 567391e43daSPeter Zijlstra * 56897fb7a0aSIngo Molnar * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a CPU. 56997fb7a0aSIngo Molnar * This list is used during load balance. 570391e43daSPeter Zijlstra */ 571391e43daSPeter Zijlstra int on_list; 572391e43daSPeter Zijlstra struct list_head leaf_cfs_rq_list; 573391e43daSPeter Zijlstra struct task_group *tg; /* group that "owns" this runqueue */ 574391e43daSPeter Zijlstra 575391e43daSPeter Zijlstra #ifdef CONFIG_CFS_BANDWIDTH 576391e43daSPeter Zijlstra int runtime_enabled; 577391e43daSPeter Zijlstra s64 runtime_remaining; 578391e43daSPeter Zijlstra 57997fb7a0aSIngo Molnar u64 throttled_clock; 58097fb7a0aSIngo Molnar u64 throttled_clock_task; 581f1b17280SPaul Turner u64 throttled_clock_task_time; 58297fb7a0aSIngo Molnar int throttled; 58397fb7a0aSIngo Molnar int throttle_count; 584391e43daSPeter Zijlstra struct list_head throttled_list; 585391e43daSPeter Zijlstra #endif /* CONFIG_CFS_BANDWIDTH */ 586391e43daSPeter Zijlstra #endif /* CONFIG_FAIR_GROUP_SCHED */ 587391e43daSPeter Zijlstra }; 588391e43daSPeter Zijlstra 589391e43daSPeter Zijlstra static inline int rt_bandwidth_enabled(void) 590391e43daSPeter Zijlstra { 591391e43daSPeter Zijlstra return sysctl_sched_rt_runtime >= 0; 592391e43daSPeter Zijlstra } 593391e43daSPeter Zijlstra 594b6366f04SSteven Rostedt /* RT IPI pull logic requires IRQ_WORK */ 5954bdced5cSSteven Rostedt (Red Hat) #if defined(CONFIG_IRQ_WORK) && defined(CONFIG_SMP) 596b6366f04SSteven Rostedt # define HAVE_RT_PUSH_IPI 597b6366f04SSteven Rostedt #endif 598b6366f04SSteven Rostedt 599391e43daSPeter Zijlstra /* Real-Time classes' related field in a runqueue: */ 600391e43daSPeter Zijlstra struct rt_rq { 601391e43daSPeter Zijlstra struct rt_prio_array active; 602c82513e5SPeter Zijlstra unsigned int rt_nr_running; 60301d36d0aSFrederic Weisbecker unsigned int rr_nr_running; 604391e43daSPeter Zijlstra #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED 605391e43daSPeter Zijlstra struct { 606391e43daSPeter Zijlstra int curr; /* highest queued rt task prio */ 607391e43daSPeter Zijlstra #ifdef CONFIG_SMP 608391e43daSPeter Zijlstra int next; /* next highest */ 609391e43daSPeter Zijlstra #endif 610391e43daSPeter Zijlstra } highest_prio; 611391e43daSPeter Zijlstra #endif 612391e43daSPeter Zijlstra #ifdef CONFIG_SMP 613391e43daSPeter Zijlstra unsigned long rt_nr_migratory; 614391e43daSPeter Zijlstra unsigned long rt_nr_total; 615391e43daSPeter Zijlstra int overloaded; 616391e43daSPeter Zijlstra struct plist_head pushable_tasks; 617371bf427SVincent Guittot 618b6366f04SSteven Rostedt #endif /* CONFIG_SMP */ 619f4ebcbc0SKirill Tkhai int rt_queued; 620f4ebcbc0SKirill Tkhai 621391e43daSPeter Zijlstra int rt_throttled; 622391e43daSPeter Zijlstra u64 rt_time; 623391e43daSPeter Zijlstra u64 rt_runtime; 624391e43daSPeter Zijlstra /* Nests inside the rq lock: */ 625391e43daSPeter Zijlstra raw_spinlock_t rt_runtime_lock; 626391e43daSPeter Zijlstra 627391e43daSPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED 628391e43daSPeter Zijlstra unsigned long rt_nr_boosted; 629391e43daSPeter Zijlstra 630391e43daSPeter Zijlstra struct rq *rq; 631391e43daSPeter Zijlstra struct task_group *tg; 632391e43daSPeter Zijlstra #endif 633391e43daSPeter Zijlstra }; 634391e43daSPeter Zijlstra 635296b2ffeSVincent Guittot static inline bool rt_rq_is_runnable(struct rt_rq *rt_rq) 636296b2ffeSVincent Guittot { 637296b2ffeSVincent Guittot return rt_rq->rt_queued && rt_rq->rt_nr_running; 638296b2ffeSVincent Guittot } 639296b2ffeSVincent Guittot 640aab03e05SDario Faggioli /* Deadline class' related fields in a runqueue */ 641aab03e05SDario Faggioli struct dl_rq { 642aab03e05SDario Faggioli /* runqueue is an rbtree, ordered by deadline */ 6432161573eSDavidlohr Bueso struct rb_root_cached root; 644aab03e05SDario Faggioli 645aab03e05SDario Faggioli unsigned long dl_nr_running; 6461baca4ceSJuri Lelli 6471baca4ceSJuri Lelli #ifdef CONFIG_SMP 6481baca4ceSJuri Lelli /* 6491baca4ceSJuri Lelli * Deadline values of the currently executing and the 6501baca4ceSJuri Lelli * earliest ready task on this rq. Caching these facilitates 651dfcb245eSIngo Molnar * the decision whether or not a ready but not running task 6521baca4ceSJuri Lelli * should migrate somewhere else. 6531baca4ceSJuri Lelli */ 6541baca4ceSJuri Lelli struct { 6551baca4ceSJuri Lelli u64 curr; 6561baca4ceSJuri Lelli u64 next; 6571baca4ceSJuri Lelli } earliest_dl; 6581baca4ceSJuri Lelli 6591baca4ceSJuri Lelli unsigned long dl_nr_migratory; 6601baca4ceSJuri Lelli int overloaded; 6611baca4ceSJuri Lelli 6621baca4ceSJuri Lelli /* 6631baca4ceSJuri Lelli * Tasks on this rq that can be pushed away. They are kept in 6641baca4ceSJuri Lelli * an rb-tree, ordered by tasks' deadlines, with caching 6651baca4ceSJuri Lelli * of the leftmost (earliest deadline) element. 6661baca4ceSJuri Lelli */ 6672161573eSDavidlohr Bueso struct rb_root_cached pushable_dl_tasks_root; 668332ac17eSDario Faggioli #else 669332ac17eSDario Faggioli struct dl_bw dl_bw; 6701baca4ceSJuri Lelli #endif 671e36d8677SLuca Abeni /* 672e36d8677SLuca Abeni * "Active utilization" for this runqueue: increased when a 673e36d8677SLuca Abeni * task wakes up (becomes TASK_RUNNING) and decreased when a 674e36d8677SLuca Abeni * task blocks 675e36d8677SLuca Abeni */ 676e36d8677SLuca Abeni u64 running_bw; 6774da3abceSLuca Abeni 6784da3abceSLuca Abeni /* 6798fd27231SLuca Abeni * Utilization of the tasks "assigned" to this runqueue (including 6808fd27231SLuca Abeni * the tasks that are in runqueue and the tasks that executed on this 6818fd27231SLuca Abeni * CPU and blocked). Increased when a task moves to this runqueue, and 6828fd27231SLuca Abeni * decreased when the task moves away (migrates, changes scheduling 6838fd27231SLuca Abeni * policy, or terminates). 6848fd27231SLuca Abeni * This is needed to compute the "inactive utilization" for the 6858fd27231SLuca Abeni * runqueue (inactive utilization = this_bw - running_bw). 6868fd27231SLuca Abeni */ 6878fd27231SLuca Abeni u64 this_bw; 688daec5798SLuca Abeni u64 extra_bw; 6898fd27231SLuca Abeni 6908fd27231SLuca Abeni /* 6914da3abceSLuca Abeni * Inverse of the fraction of CPU utilization that can be reclaimed 6924da3abceSLuca Abeni * by the GRUB algorithm. 6934da3abceSLuca Abeni */ 6944da3abceSLuca Abeni u64 bw_ratio; 695aab03e05SDario Faggioli }; 696aab03e05SDario Faggioli 697c0796298SVincent Guittot #ifdef CONFIG_FAIR_GROUP_SCHED 698c0796298SVincent Guittot /* An entity is a task if it doesn't "own" a runqueue */ 699c0796298SVincent Guittot #define entity_is_task(se) (!se->my_q) 7000dacee1bSVincent Guittot 7019f683953SVincent Guittot static inline void se_update_runnable(struct sched_entity *se) 7029f683953SVincent Guittot { 7039f683953SVincent Guittot if (!entity_is_task(se)) 7049f683953SVincent Guittot se->runnable_weight = se->my_q->h_nr_running; 7059f683953SVincent Guittot } 7069f683953SVincent Guittot 7079f683953SVincent Guittot static inline long se_runnable(struct sched_entity *se) 7089f683953SVincent Guittot { 7099f683953SVincent Guittot if (entity_is_task(se)) 7109f683953SVincent Guittot return !!se->on_rq; 7119f683953SVincent Guittot else 7129f683953SVincent Guittot return se->runnable_weight; 7139f683953SVincent Guittot } 7149f683953SVincent Guittot 715c0796298SVincent Guittot #else 716c0796298SVincent Guittot #define entity_is_task(se) 1 7170dacee1bSVincent Guittot 7189f683953SVincent Guittot static inline void se_update_runnable(struct sched_entity *se) {} 7199f683953SVincent Guittot 7209f683953SVincent Guittot static inline long se_runnable(struct sched_entity *se) 7219f683953SVincent Guittot { 7229f683953SVincent Guittot return !!se->on_rq; 7239f683953SVincent Guittot } 724c0796298SVincent Guittot #endif 725c0796298SVincent Guittot 726391e43daSPeter Zijlstra #ifdef CONFIG_SMP 727c0796298SVincent Guittot /* 728c0796298SVincent Guittot * XXX we want to get rid of these helpers and use the full load resolution. 729c0796298SVincent Guittot */ 730c0796298SVincent Guittot static inline long se_weight(struct sched_entity *se) 731c0796298SVincent Guittot { 732c0796298SVincent Guittot return scale_load_down(se->load.weight); 733c0796298SVincent Guittot } 734c0796298SVincent Guittot 735391e43daSPeter Zijlstra 736afe06efdSTim Chen static inline bool sched_asym_prefer(int a, int b) 737afe06efdSTim Chen { 738afe06efdSTim Chen return arch_asym_cpu_priority(a) > arch_asym_cpu_priority(b); 739afe06efdSTim Chen } 740afe06efdSTim Chen 7416aa140faSQuentin Perret struct perf_domain { 7426aa140faSQuentin Perret struct em_perf_domain *em_pd; 7436aa140faSQuentin Perret struct perf_domain *next; 7446aa140faSQuentin Perret struct rcu_head rcu; 7456aa140faSQuentin Perret }; 7466aa140faSQuentin Perret 747630246a0SQuentin Perret /* Scheduling group status flags */ 748630246a0SQuentin Perret #define SG_OVERLOAD 0x1 /* More than one runnable task on a CPU. */ 7492802bf3cSMorten Rasmussen #define SG_OVERUTILIZED 0x2 /* One or more CPUs are over-utilized. */ 750630246a0SQuentin Perret 751391e43daSPeter Zijlstra /* 752391e43daSPeter Zijlstra * We add the notion of a root-domain which will be used to define per-domain 753391e43daSPeter Zijlstra * variables. Each exclusive cpuset essentially defines an island domain by 75497fb7a0aSIngo Molnar * fully partitioning the member CPUs from any other cpuset. Whenever a new 755391e43daSPeter Zijlstra * exclusive cpuset is created, we also create and attach a new root-domain 756391e43daSPeter Zijlstra * object. 757391e43daSPeter Zijlstra * 758391e43daSPeter Zijlstra */ 759391e43daSPeter Zijlstra struct root_domain { 760391e43daSPeter Zijlstra atomic_t refcount; 761391e43daSPeter Zijlstra atomic_t rto_count; 762391e43daSPeter Zijlstra struct rcu_head rcu; 763391e43daSPeter Zijlstra cpumask_var_t span; 764391e43daSPeter Zijlstra cpumask_var_t online; 765391e43daSPeter Zijlstra 766757ffdd7SValentin Schneider /* 767757ffdd7SValentin Schneider * Indicate pullable load on at least one CPU, e.g: 768757ffdd7SValentin Schneider * - More than one runnable task 769757ffdd7SValentin Schneider * - Running task is misfit 770757ffdd7SValentin Schneider */ 771575638d1SValentin Schneider int overload; 7724486edd1STim Chen 7732802bf3cSMorten Rasmussen /* Indicate one or more cpus over-utilized (tipping point) */ 7742802bf3cSMorten Rasmussen int overutilized; 7752802bf3cSMorten Rasmussen 776391e43daSPeter Zijlstra /* 7771baca4ceSJuri Lelli * The bit corresponding to a CPU gets set here if such CPU has more 7781baca4ceSJuri Lelli * than one runnable -deadline task (as it is below for RT tasks). 7791baca4ceSJuri Lelli */ 7801baca4ceSJuri Lelli cpumask_var_t dlo_mask; 7811baca4ceSJuri Lelli atomic_t dlo_count; 782332ac17eSDario Faggioli struct dl_bw dl_bw; 7836bfd6d72SJuri Lelli struct cpudl cpudl; 7841baca4ceSJuri Lelli 7854bdced5cSSteven Rostedt (Red Hat) #ifdef HAVE_RT_PUSH_IPI 7864bdced5cSSteven Rostedt (Red Hat) /* 7874bdced5cSSteven Rostedt (Red Hat) * For IPI pull requests, loop across the rto_mask. 7884bdced5cSSteven Rostedt (Red Hat) */ 7894bdced5cSSteven Rostedt (Red Hat) struct irq_work rto_push_work; 7904bdced5cSSteven Rostedt (Red Hat) raw_spinlock_t rto_lock; 7914bdced5cSSteven Rostedt (Red Hat) /* These are only updated and read within rto_lock */ 7924bdced5cSSteven Rostedt (Red Hat) int rto_loop; 7934bdced5cSSteven Rostedt (Red Hat) int rto_cpu; 7944bdced5cSSteven Rostedt (Red Hat) /* These atomics are updated outside of a lock */ 7954bdced5cSSteven Rostedt (Red Hat) atomic_t rto_loop_next; 7964bdced5cSSteven Rostedt (Red Hat) atomic_t rto_loop_start; 7974bdced5cSSteven Rostedt (Red Hat) #endif 7981baca4ceSJuri Lelli /* 799391e43daSPeter Zijlstra * The "RT overload" flag: it gets set if a CPU has more than 800391e43daSPeter Zijlstra * one runnable RT task. 801391e43daSPeter Zijlstra */ 802391e43daSPeter Zijlstra cpumask_var_t rto_mask; 803391e43daSPeter Zijlstra struct cpupri cpupri; 804cd92bfd3SDietmar Eggemann 805cd92bfd3SDietmar Eggemann unsigned long max_cpu_capacity; 8066aa140faSQuentin Perret 8076aa140faSQuentin Perret /* 8086aa140faSQuentin Perret * NULL-terminated list of performance domains intersecting with the 8096aa140faSQuentin Perret * CPUs of the rd. Protected by RCU. 8106aa140faSQuentin Perret */ 8117ba7319fSJoel Fernandes (Google) struct perf_domain __rcu *pd; 812391e43daSPeter Zijlstra }; 813391e43daSPeter Zijlstra 814f2cb1360SIngo Molnar extern void init_defrootdomain(void); 8158d5dc512SPeter Zijlstra extern int sched_init_domains(const struct cpumask *cpu_map); 816f2cb1360SIngo Molnar extern void rq_attach_root(struct rq *rq, struct root_domain *rd); 817364f5665SSteven Rostedt (VMware) extern void sched_get_rd(struct root_domain *rd); 818364f5665SSteven Rostedt (VMware) extern void sched_put_rd(struct root_domain *rd); 819391e43daSPeter Zijlstra 8204bdced5cSSteven Rostedt (Red Hat) #ifdef HAVE_RT_PUSH_IPI 8214bdced5cSSteven Rostedt (Red Hat) extern void rto_push_irq_work_func(struct irq_work *work); 8224bdced5cSSteven Rostedt (Red Hat) #endif 823391e43daSPeter Zijlstra #endif /* CONFIG_SMP */ 824391e43daSPeter Zijlstra 82569842cbaSPatrick Bellasi #ifdef CONFIG_UCLAMP_TASK 82669842cbaSPatrick Bellasi /* 82769842cbaSPatrick Bellasi * struct uclamp_bucket - Utilization clamp bucket 82869842cbaSPatrick Bellasi * @value: utilization clamp value for tasks on this clamp bucket 82969842cbaSPatrick Bellasi * @tasks: number of RUNNABLE tasks on this clamp bucket 83069842cbaSPatrick Bellasi * 83169842cbaSPatrick Bellasi * Keep track of how many tasks are RUNNABLE for a given utilization 83269842cbaSPatrick Bellasi * clamp value. 83369842cbaSPatrick Bellasi */ 83469842cbaSPatrick Bellasi struct uclamp_bucket { 83569842cbaSPatrick Bellasi unsigned long value : bits_per(SCHED_CAPACITY_SCALE); 83669842cbaSPatrick Bellasi unsigned long tasks : BITS_PER_LONG - bits_per(SCHED_CAPACITY_SCALE); 83769842cbaSPatrick Bellasi }; 83869842cbaSPatrick Bellasi 83969842cbaSPatrick Bellasi /* 84069842cbaSPatrick Bellasi * struct uclamp_rq - rq's utilization clamp 84169842cbaSPatrick Bellasi * @value: currently active clamp values for a rq 84269842cbaSPatrick Bellasi * @bucket: utilization clamp buckets affecting a rq 84369842cbaSPatrick Bellasi * 84469842cbaSPatrick Bellasi * Keep track of RUNNABLE tasks on a rq to aggregate their clamp values. 84569842cbaSPatrick Bellasi * A clamp value is affecting a rq when there is at least one task RUNNABLE 84669842cbaSPatrick Bellasi * (or actually running) with that value. 84769842cbaSPatrick Bellasi * 84869842cbaSPatrick Bellasi * There are up to UCLAMP_CNT possible different clamp values, currently there 84969842cbaSPatrick Bellasi * are only two: minimum utilization and maximum utilization. 85069842cbaSPatrick Bellasi * 85169842cbaSPatrick Bellasi * All utilization clamping values are MAX aggregated, since: 85269842cbaSPatrick Bellasi * - for util_min: we want to run the CPU at least at the max of the minimum 85369842cbaSPatrick Bellasi * utilization required by its currently RUNNABLE tasks. 85469842cbaSPatrick Bellasi * - for util_max: we want to allow the CPU to run up to the max of the 85569842cbaSPatrick Bellasi * maximum utilization allowed by its currently RUNNABLE tasks. 85669842cbaSPatrick Bellasi * 85769842cbaSPatrick Bellasi * Since on each system we expect only a limited number of different 85869842cbaSPatrick Bellasi * utilization clamp values (UCLAMP_BUCKETS), use a simple array to track 85969842cbaSPatrick Bellasi * the metrics required to compute all the per-rq utilization clamp values. 86069842cbaSPatrick Bellasi */ 86169842cbaSPatrick Bellasi struct uclamp_rq { 86269842cbaSPatrick Bellasi unsigned int value; 86369842cbaSPatrick Bellasi struct uclamp_bucket bucket[UCLAMP_BUCKETS]; 86469842cbaSPatrick Bellasi }; 86569842cbaSPatrick Bellasi #endif /* CONFIG_UCLAMP_TASK */ 86669842cbaSPatrick Bellasi 867391e43daSPeter Zijlstra /* 868391e43daSPeter Zijlstra * This is the main, per-CPU runqueue data structure. 869391e43daSPeter Zijlstra * 870391e43daSPeter Zijlstra * Locking rule: those places that want to lock multiple runqueues 871391e43daSPeter Zijlstra * (such as the load balancing or the thread migration code), lock 872391e43daSPeter Zijlstra * acquire operations must be ordered by ascending &runqueue. 873391e43daSPeter Zijlstra */ 874391e43daSPeter Zijlstra struct rq { 875391e43daSPeter Zijlstra /* runqueue lock: */ 876391e43daSPeter Zijlstra raw_spinlock_t lock; 877391e43daSPeter Zijlstra 878391e43daSPeter Zijlstra /* 879391e43daSPeter Zijlstra * nr_running and cpu_load should be in the same cacheline because 880391e43daSPeter Zijlstra * remote CPUs use both these fields when doing load calculation. 881391e43daSPeter Zijlstra */ 882c82513e5SPeter Zijlstra unsigned int nr_running; 8830ec8aa00SPeter Zijlstra #ifdef CONFIG_NUMA_BALANCING 8840ec8aa00SPeter Zijlstra unsigned int nr_numa_running; 8850ec8aa00SPeter Zijlstra unsigned int nr_preferred_running; 886a4739ecaSSrikar Dronamraju unsigned int numa_migrate_on; 8870ec8aa00SPeter Zijlstra #endif 8883451d024SFrederic Weisbecker #ifdef CONFIG_NO_HZ_COMMON 8899fd81dd5SFrederic Weisbecker #ifdef CONFIG_SMP 890e022e0d3SPeter Zijlstra unsigned long last_blocked_load_update_tick; 891f643ea22SVincent Guittot unsigned int has_blocked_load; 89290b5363aSPeter Zijlstra (Intel) call_single_data_t nohz_csd; 8939fd81dd5SFrederic Weisbecker #endif /* CONFIG_SMP */ 89400357f5eSPeter Zijlstra unsigned int nohz_tick_stopped; 895a22e47a4SPeter Zijlstra atomic_t nohz_flags; 8969fd81dd5SFrederic Weisbecker #endif /* CONFIG_NO_HZ_COMMON */ 897dcdedb24SFrederic Weisbecker 898391e43daSPeter Zijlstra unsigned long nr_load_updates; 899391e43daSPeter Zijlstra u64 nr_switches; 900391e43daSPeter Zijlstra 90169842cbaSPatrick Bellasi #ifdef CONFIG_UCLAMP_TASK 90269842cbaSPatrick Bellasi /* Utilization clamp values based on CPU's RUNNABLE tasks */ 90369842cbaSPatrick Bellasi struct uclamp_rq uclamp[UCLAMP_CNT] ____cacheline_aligned; 904e496187dSPatrick Bellasi unsigned int uclamp_flags; 905e496187dSPatrick Bellasi #define UCLAMP_FLAG_IDLE 0x01 90669842cbaSPatrick Bellasi #endif 90769842cbaSPatrick Bellasi 908391e43daSPeter Zijlstra struct cfs_rq cfs; 909391e43daSPeter Zijlstra struct rt_rq rt; 910aab03e05SDario Faggioli struct dl_rq dl; 911391e43daSPeter Zijlstra 912391e43daSPeter Zijlstra #ifdef CONFIG_FAIR_GROUP_SCHED 91397fb7a0aSIngo Molnar /* list of leaf cfs_rq on this CPU: */ 914391e43daSPeter Zijlstra struct list_head leaf_cfs_rq_list; 9159c2791f9SVincent Guittot struct list_head *tmp_alone_branch; 916a35b6466SPeter Zijlstra #endif /* CONFIG_FAIR_GROUP_SCHED */ 917a35b6466SPeter Zijlstra 918391e43daSPeter Zijlstra /* 919391e43daSPeter Zijlstra * This is part of a global counter where only the total sum 920391e43daSPeter Zijlstra * over all CPUs matters. A task can increase this counter on 921391e43daSPeter Zijlstra * one CPU and if it got migrated afterwards it may decrease 922391e43daSPeter Zijlstra * it on another CPU. Always updated under the runqueue lock: 923391e43daSPeter Zijlstra */ 924391e43daSPeter Zijlstra unsigned long nr_uninterruptible; 925391e43daSPeter Zijlstra 9264104a562SMadhuparna Bhowmik struct task_struct __rcu *curr; 92797fb7a0aSIngo Molnar struct task_struct *idle; 92897fb7a0aSIngo Molnar struct task_struct *stop; 929391e43daSPeter Zijlstra unsigned long next_balance; 930391e43daSPeter Zijlstra struct mm_struct *prev_mm; 931391e43daSPeter Zijlstra 932cb42c9a3SMatt Fleming unsigned int clock_update_flags; 933391e43daSPeter Zijlstra u64 clock; 93423127296SVincent Guittot /* Ensure that all clocks are in the same cache line */ 93523127296SVincent Guittot u64 clock_task ____cacheline_aligned; 93623127296SVincent Guittot u64 clock_pelt; 93723127296SVincent Guittot unsigned long lost_idle_time; 938391e43daSPeter Zijlstra 939391e43daSPeter Zijlstra atomic_t nr_iowait; 940391e43daSPeter Zijlstra 941227a4aadSMathieu Desnoyers #ifdef CONFIG_MEMBARRIER 942227a4aadSMathieu Desnoyers int membarrier_state; 943227a4aadSMathieu Desnoyers #endif 944227a4aadSMathieu Desnoyers 945391e43daSPeter Zijlstra #ifdef CONFIG_SMP 946391e43daSPeter Zijlstra struct root_domain *rd; 947994aeb7aSJoel Fernandes (Google) struct sched_domain __rcu *sd; 948391e43daSPeter Zijlstra 949ced549faSNicolas Pitre unsigned long cpu_capacity; 950ca6d75e6SVincent Guittot unsigned long cpu_capacity_orig; 951391e43daSPeter Zijlstra 952e3fca9e7SPeter Zijlstra struct callback_head *balance_callback; 953e3fca9e7SPeter Zijlstra 954391e43daSPeter Zijlstra unsigned char idle_balance; 95597fb7a0aSIngo Molnar 9563b1baa64SMorten Rasmussen unsigned long misfit_task_load; 9573b1baa64SMorten Rasmussen 958391e43daSPeter Zijlstra /* For active balancing */ 959391e43daSPeter Zijlstra int active_balance; 960391e43daSPeter Zijlstra int push_cpu; 961391e43daSPeter Zijlstra struct cpu_stop_work active_balance_work; 96297fb7a0aSIngo Molnar 96397fb7a0aSIngo Molnar /* CPU of this runqueue: */ 964391e43daSPeter Zijlstra int cpu; 965391e43daSPeter Zijlstra int online; 966391e43daSPeter Zijlstra 967367456c7SPeter Zijlstra struct list_head cfs_tasks; 968367456c7SPeter Zijlstra 969371bf427SVincent Guittot struct sched_avg avg_rt; 9703727e0e1SVincent Guittot struct sched_avg avg_dl; 97111d4afd4SVincent Guittot #ifdef CONFIG_HAVE_SCHED_AVG_IRQ 97291c27493SVincent Guittot struct sched_avg avg_irq; 97391c27493SVincent Guittot #endif 97476504793SThara Gopinath #ifdef CONFIG_SCHED_THERMAL_PRESSURE 97576504793SThara Gopinath struct sched_avg avg_thermal; 97676504793SThara Gopinath #endif 977391e43daSPeter Zijlstra u64 idle_stamp; 978391e43daSPeter Zijlstra u64 avg_idle; 9799bd721c5SJason Low 9809bd721c5SJason Low /* This is used to determine avg_idle's max value */ 9819bd721c5SJason Low u64 max_idle_balance_cost; 98290b5363aSPeter Zijlstra (Intel) #endif /* CONFIG_SMP */ 983391e43daSPeter Zijlstra 984391e43daSPeter Zijlstra #ifdef CONFIG_IRQ_TIME_ACCOUNTING 985391e43daSPeter Zijlstra u64 prev_irq_time; 986391e43daSPeter Zijlstra #endif 987391e43daSPeter Zijlstra #ifdef CONFIG_PARAVIRT 988391e43daSPeter Zijlstra u64 prev_steal_time; 989391e43daSPeter Zijlstra #endif 990391e43daSPeter Zijlstra #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING 991391e43daSPeter Zijlstra u64 prev_steal_time_rq; 992391e43daSPeter Zijlstra #endif 993391e43daSPeter Zijlstra 994391e43daSPeter Zijlstra /* calc_load related fields */ 995391e43daSPeter Zijlstra unsigned long calc_load_update; 996391e43daSPeter Zijlstra long calc_load_active; 997391e43daSPeter Zijlstra 998391e43daSPeter Zijlstra #ifdef CONFIG_SCHED_HRTICK 999391e43daSPeter Zijlstra #ifdef CONFIG_SMP 1000966a9671SYing Huang call_single_data_t hrtick_csd; 1001391e43daSPeter Zijlstra #endif 1002391e43daSPeter Zijlstra struct hrtimer hrtick_timer; 1003391e43daSPeter Zijlstra #endif 1004391e43daSPeter Zijlstra 1005391e43daSPeter Zijlstra #ifdef CONFIG_SCHEDSTATS 1006391e43daSPeter Zijlstra /* latency stats */ 1007391e43daSPeter Zijlstra struct sched_info rq_sched_info; 1008391e43daSPeter Zijlstra unsigned long long rq_cpu_time; 1009391e43daSPeter Zijlstra /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */ 1010391e43daSPeter Zijlstra 1011391e43daSPeter Zijlstra /* sys_sched_yield() stats */ 1012391e43daSPeter Zijlstra unsigned int yld_count; 1013391e43daSPeter Zijlstra 1014391e43daSPeter Zijlstra /* schedule() stats */ 1015391e43daSPeter Zijlstra unsigned int sched_count; 1016391e43daSPeter Zijlstra unsigned int sched_goidle; 1017391e43daSPeter Zijlstra 1018391e43daSPeter Zijlstra /* try_to_wake_up() stats */ 1019391e43daSPeter Zijlstra unsigned int ttwu_count; 1020391e43daSPeter Zijlstra unsigned int ttwu_local; 1021391e43daSPeter Zijlstra #endif 1022391e43daSPeter Zijlstra 1023391e43daSPeter Zijlstra #ifdef CONFIG_SMP 102490b5363aSPeter Zijlstra (Intel) call_single_data_t wake_csd; 1025391e43daSPeter Zijlstra struct llist_head wake_list; 1026391e43daSPeter Zijlstra #endif 1027442bf3aaSDaniel Lezcano 1028442bf3aaSDaniel Lezcano #ifdef CONFIG_CPU_IDLE 1029442bf3aaSDaniel Lezcano /* Must be inspected within a rcu lock section */ 1030442bf3aaSDaniel Lezcano struct cpuidle_state *idle_state; 1031442bf3aaSDaniel Lezcano #endif 1032391e43daSPeter Zijlstra }; 1033391e43daSPeter Zijlstra 103462478d99SVincent Guittot #ifdef CONFIG_FAIR_GROUP_SCHED 103562478d99SVincent Guittot 103662478d99SVincent Guittot /* CPU runqueue to which this cfs_rq is attached */ 103762478d99SVincent Guittot static inline struct rq *rq_of(struct cfs_rq *cfs_rq) 103862478d99SVincent Guittot { 103962478d99SVincent Guittot return cfs_rq->rq; 104062478d99SVincent Guittot } 104162478d99SVincent Guittot 104262478d99SVincent Guittot #else 104362478d99SVincent Guittot 104462478d99SVincent Guittot static inline struct rq *rq_of(struct cfs_rq *cfs_rq) 104562478d99SVincent Guittot { 104662478d99SVincent Guittot return container_of(cfs_rq, struct rq, cfs); 104762478d99SVincent Guittot } 104862478d99SVincent Guittot #endif 104962478d99SVincent Guittot 1050391e43daSPeter Zijlstra static inline int cpu_of(struct rq *rq) 1051391e43daSPeter Zijlstra { 1052391e43daSPeter Zijlstra #ifdef CONFIG_SMP 1053391e43daSPeter Zijlstra return rq->cpu; 1054391e43daSPeter Zijlstra #else 1055391e43daSPeter Zijlstra return 0; 1056391e43daSPeter Zijlstra #endif 1057391e43daSPeter Zijlstra } 1058391e43daSPeter Zijlstra 10591b568f0aSPeter Zijlstra 10601b568f0aSPeter Zijlstra #ifdef CONFIG_SCHED_SMT 10611b568f0aSPeter Zijlstra extern void __update_idle_core(struct rq *rq); 10621b568f0aSPeter Zijlstra 10631b568f0aSPeter Zijlstra static inline void update_idle_core(struct rq *rq) 10641b568f0aSPeter Zijlstra { 10651b568f0aSPeter Zijlstra if (static_branch_unlikely(&sched_smt_present)) 10661b568f0aSPeter Zijlstra __update_idle_core(rq); 10671b568f0aSPeter Zijlstra } 10681b568f0aSPeter Zijlstra 10691b568f0aSPeter Zijlstra #else 10701b568f0aSPeter Zijlstra static inline void update_idle_core(struct rq *rq) { } 10711b568f0aSPeter Zijlstra #endif 10721b568f0aSPeter Zijlstra 10738b06c55bSPranith Kumar DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); 1074391e43daSPeter Zijlstra 1075518cd623SPeter Zijlstra #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) 10764a32fea9SChristoph Lameter #define this_rq() this_cpu_ptr(&runqueues) 1077518cd623SPeter Zijlstra #define task_rq(p) cpu_rq(task_cpu(p)) 1078518cd623SPeter Zijlstra #define cpu_curr(cpu) (cpu_rq(cpu)->curr) 10794a32fea9SChristoph Lameter #define raw_rq() raw_cpu_ptr(&runqueues) 1080518cd623SPeter Zijlstra 10811f351d7fSJohannes Weiner extern void update_rq_clock(struct rq *rq); 10821f351d7fSJohannes Weiner 1083cebde6d6SPeter Zijlstra static inline u64 __rq_clock_broken(struct rq *rq) 1084cebde6d6SPeter Zijlstra { 1085316c1608SJason Low return READ_ONCE(rq->clock); 1086cebde6d6SPeter Zijlstra } 1087cebde6d6SPeter Zijlstra 1088cb42c9a3SMatt Fleming /* 1089cb42c9a3SMatt Fleming * rq::clock_update_flags bits 1090cb42c9a3SMatt Fleming * 1091cb42c9a3SMatt Fleming * %RQCF_REQ_SKIP - will request skipping of clock update on the next 1092cb42c9a3SMatt Fleming * call to __schedule(). This is an optimisation to avoid 1093cb42c9a3SMatt Fleming * neighbouring rq clock updates. 1094cb42c9a3SMatt Fleming * 1095cb42c9a3SMatt Fleming * %RQCF_ACT_SKIP - is set from inside of __schedule() when skipping is 1096cb42c9a3SMatt Fleming * in effect and calls to update_rq_clock() are being ignored. 1097cb42c9a3SMatt Fleming * 1098cb42c9a3SMatt Fleming * %RQCF_UPDATED - is a debug flag that indicates whether a call has been 1099cb42c9a3SMatt Fleming * made to update_rq_clock() since the last time rq::lock was pinned. 1100cb42c9a3SMatt Fleming * 1101cb42c9a3SMatt Fleming * If inside of __schedule(), clock_update_flags will have been 1102cb42c9a3SMatt Fleming * shifted left (a left shift is a cheap operation for the fast path 1103cb42c9a3SMatt Fleming * to promote %RQCF_REQ_SKIP to %RQCF_ACT_SKIP), so you must use, 1104cb42c9a3SMatt Fleming * 1105cb42c9a3SMatt Fleming * if (rq-clock_update_flags >= RQCF_UPDATED) 1106cb42c9a3SMatt Fleming * 1107cb42c9a3SMatt Fleming * to check if %RQCF_UPADTED is set. It'll never be shifted more than 1108cb42c9a3SMatt Fleming * one position though, because the next rq_unpin_lock() will shift it 1109cb42c9a3SMatt Fleming * back. 1110cb42c9a3SMatt Fleming */ 1111cb42c9a3SMatt Fleming #define RQCF_REQ_SKIP 0x01 1112cb42c9a3SMatt Fleming #define RQCF_ACT_SKIP 0x02 1113cb42c9a3SMatt Fleming #define RQCF_UPDATED 0x04 1114cb42c9a3SMatt Fleming 1115cb42c9a3SMatt Fleming static inline void assert_clock_updated(struct rq *rq) 1116cb42c9a3SMatt Fleming { 1117cb42c9a3SMatt Fleming /* 1118cb42c9a3SMatt Fleming * The only reason for not seeing a clock update since the 1119cb42c9a3SMatt Fleming * last rq_pin_lock() is if we're currently skipping updates. 1120cb42c9a3SMatt Fleming */ 1121cb42c9a3SMatt Fleming SCHED_WARN_ON(rq->clock_update_flags < RQCF_ACT_SKIP); 1122cb42c9a3SMatt Fleming } 1123cb42c9a3SMatt Fleming 112478becc27SFrederic Weisbecker static inline u64 rq_clock(struct rq *rq) 112578becc27SFrederic Weisbecker { 1126cebde6d6SPeter Zijlstra lockdep_assert_held(&rq->lock); 1127cb42c9a3SMatt Fleming assert_clock_updated(rq); 1128cb42c9a3SMatt Fleming 112978becc27SFrederic Weisbecker return rq->clock; 113078becc27SFrederic Weisbecker } 113178becc27SFrederic Weisbecker 113278becc27SFrederic Weisbecker static inline u64 rq_clock_task(struct rq *rq) 113378becc27SFrederic Weisbecker { 1134cebde6d6SPeter Zijlstra lockdep_assert_held(&rq->lock); 1135cb42c9a3SMatt Fleming assert_clock_updated(rq); 1136cb42c9a3SMatt Fleming 113778becc27SFrederic Weisbecker return rq->clock_task; 113878becc27SFrederic Weisbecker } 113978becc27SFrederic Weisbecker 114005289b90SThara Gopinath /** 114105289b90SThara Gopinath * By default the decay is the default pelt decay period. 114205289b90SThara Gopinath * The decay shift can change the decay period in 114305289b90SThara Gopinath * multiples of 32. 114405289b90SThara Gopinath * Decay shift Decay period(ms) 114505289b90SThara Gopinath * 0 32 114605289b90SThara Gopinath * 1 64 114705289b90SThara Gopinath * 2 128 114805289b90SThara Gopinath * 3 256 114905289b90SThara Gopinath * 4 512 115005289b90SThara Gopinath */ 115105289b90SThara Gopinath extern int sched_thermal_decay_shift; 115205289b90SThara Gopinath 115305289b90SThara Gopinath static inline u64 rq_clock_thermal(struct rq *rq) 115405289b90SThara Gopinath { 115505289b90SThara Gopinath return rq_clock_task(rq) >> sched_thermal_decay_shift; 115605289b90SThara Gopinath } 115705289b90SThara Gopinath 1158adcc8da8SDavidlohr Bueso static inline void rq_clock_skip_update(struct rq *rq) 11599edfbfedSPeter Zijlstra { 11609edfbfedSPeter Zijlstra lockdep_assert_held(&rq->lock); 1161cb42c9a3SMatt Fleming rq->clock_update_flags |= RQCF_REQ_SKIP; 1162adcc8da8SDavidlohr Bueso } 1163adcc8da8SDavidlohr Bueso 1164adcc8da8SDavidlohr Bueso /* 1165595058b6SDavidlohr Bueso * See rt task throttling, which is the only time a skip 1166adcc8da8SDavidlohr Bueso * request is cancelled. 1167adcc8da8SDavidlohr Bueso */ 1168adcc8da8SDavidlohr Bueso static inline void rq_clock_cancel_skipupdate(struct rq *rq) 1169adcc8da8SDavidlohr Bueso { 1170adcc8da8SDavidlohr Bueso lockdep_assert_held(&rq->lock); 1171cb42c9a3SMatt Fleming rq->clock_update_flags &= ~RQCF_REQ_SKIP; 11729edfbfedSPeter Zijlstra } 11739edfbfedSPeter Zijlstra 1174d8ac8971SMatt Fleming struct rq_flags { 1175d8ac8971SMatt Fleming unsigned long flags; 1176d8ac8971SMatt Fleming struct pin_cookie cookie; 1177cb42c9a3SMatt Fleming #ifdef CONFIG_SCHED_DEBUG 1178cb42c9a3SMatt Fleming /* 1179cb42c9a3SMatt Fleming * A copy of (rq::clock_update_flags & RQCF_UPDATED) for the 1180cb42c9a3SMatt Fleming * current pin context is stashed here in case it needs to be 1181cb42c9a3SMatt Fleming * restored in rq_repin_lock(). 1182cb42c9a3SMatt Fleming */ 1183cb42c9a3SMatt Fleming unsigned int clock_update_flags; 1184cb42c9a3SMatt Fleming #endif 1185d8ac8971SMatt Fleming }; 1186d8ac8971SMatt Fleming 1187d8ac8971SMatt Fleming static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf) 1188d8ac8971SMatt Fleming { 1189d8ac8971SMatt Fleming rf->cookie = lockdep_pin_lock(&rq->lock); 1190cb42c9a3SMatt Fleming 1191cb42c9a3SMatt Fleming #ifdef CONFIG_SCHED_DEBUG 1192cb42c9a3SMatt Fleming rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP); 1193cb42c9a3SMatt Fleming rf->clock_update_flags = 0; 1194cb42c9a3SMatt Fleming #endif 1195d8ac8971SMatt Fleming } 1196d8ac8971SMatt Fleming 1197d8ac8971SMatt Fleming static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf) 1198d8ac8971SMatt Fleming { 1199cb42c9a3SMatt Fleming #ifdef CONFIG_SCHED_DEBUG 1200cb42c9a3SMatt Fleming if (rq->clock_update_flags > RQCF_ACT_SKIP) 1201cb42c9a3SMatt Fleming rf->clock_update_flags = RQCF_UPDATED; 1202cb42c9a3SMatt Fleming #endif 1203cb42c9a3SMatt Fleming 1204d8ac8971SMatt Fleming lockdep_unpin_lock(&rq->lock, rf->cookie); 1205d8ac8971SMatt Fleming } 1206d8ac8971SMatt Fleming 1207d8ac8971SMatt Fleming static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf) 1208d8ac8971SMatt Fleming { 1209d8ac8971SMatt Fleming lockdep_repin_lock(&rq->lock, rf->cookie); 1210cb42c9a3SMatt Fleming 1211cb42c9a3SMatt Fleming #ifdef CONFIG_SCHED_DEBUG 1212cb42c9a3SMatt Fleming /* 1213cb42c9a3SMatt Fleming * Restore the value we stashed in @rf for this pin context. 1214cb42c9a3SMatt Fleming */ 1215cb42c9a3SMatt Fleming rq->clock_update_flags |= rf->clock_update_flags; 1216cb42c9a3SMatt Fleming #endif 1217d8ac8971SMatt Fleming } 1218d8ac8971SMatt Fleming 12191f351d7fSJohannes Weiner struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) 12201f351d7fSJohannes Weiner __acquires(rq->lock); 12211f351d7fSJohannes Weiner 12221f351d7fSJohannes Weiner struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) 12231f351d7fSJohannes Weiner __acquires(p->pi_lock) 12241f351d7fSJohannes Weiner __acquires(rq->lock); 12251f351d7fSJohannes Weiner 12261f351d7fSJohannes Weiner static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf) 12271f351d7fSJohannes Weiner __releases(rq->lock) 12281f351d7fSJohannes Weiner { 12291f351d7fSJohannes Weiner rq_unpin_lock(rq, rf); 12301f351d7fSJohannes Weiner raw_spin_unlock(&rq->lock); 12311f351d7fSJohannes Weiner } 12321f351d7fSJohannes Weiner 12331f351d7fSJohannes Weiner static inline void 12341f351d7fSJohannes Weiner task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf) 12351f351d7fSJohannes Weiner __releases(rq->lock) 12361f351d7fSJohannes Weiner __releases(p->pi_lock) 12371f351d7fSJohannes Weiner { 12381f351d7fSJohannes Weiner rq_unpin_lock(rq, rf); 12391f351d7fSJohannes Weiner raw_spin_unlock(&rq->lock); 12401f351d7fSJohannes Weiner raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); 12411f351d7fSJohannes Weiner } 12421f351d7fSJohannes Weiner 12431f351d7fSJohannes Weiner static inline void 12441f351d7fSJohannes Weiner rq_lock_irqsave(struct rq *rq, struct rq_flags *rf) 12451f351d7fSJohannes Weiner __acquires(rq->lock) 12461f351d7fSJohannes Weiner { 12471f351d7fSJohannes Weiner raw_spin_lock_irqsave(&rq->lock, rf->flags); 12481f351d7fSJohannes Weiner rq_pin_lock(rq, rf); 12491f351d7fSJohannes Weiner } 12501f351d7fSJohannes Weiner 12511f351d7fSJohannes Weiner static inline void 12521f351d7fSJohannes Weiner rq_lock_irq(struct rq *rq, struct rq_flags *rf) 12531f351d7fSJohannes Weiner __acquires(rq->lock) 12541f351d7fSJohannes Weiner { 12551f351d7fSJohannes Weiner raw_spin_lock_irq(&rq->lock); 12561f351d7fSJohannes Weiner rq_pin_lock(rq, rf); 12571f351d7fSJohannes Weiner } 12581f351d7fSJohannes Weiner 12591f351d7fSJohannes Weiner static inline void 12601f351d7fSJohannes Weiner rq_lock(struct rq *rq, struct rq_flags *rf) 12611f351d7fSJohannes Weiner __acquires(rq->lock) 12621f351d7fSJohannes Weiner { 12631f351d7fSJohannes Weiner raw_spin_lock(&rq->lock); 12641f351d7fSJohannes Weiner rq_pin_lock(rq, rf); 12651f351d7fSJohannes Weiner } 12661f351d7fSJohannes Weiner 12671f351d7fSJohannes Weiner static inline void 12681f351d7fSJohannes Weiner rq_relock(struct rq *rq, struct rq_flags *rf) 12691f351d7fSJohannes Weiner __acquires(rq->lock) 12701f351d7fSJohannes Weiner { 12711f351d7fSJohannes Weiner raw_spin_lock(&rq->lock); 12721f351d7fSJohannes Weiner rq_repin_lock(rq, rf); 12731f351d7fSJohannes Weiner } 12741f351d7fSJohannes Weiner 12751f351d7fSJohannes Weiner static inline void 12761f351d7fSJohannes Weiner rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf) 12771f351d7fSJohannes Weiner __releases(rq->lock) 12781f351d7fSJohannes Weiner { 12791f351d7fSJohannes Weiner rq_unpin_lock(rq, rf); 12801f351d7fSJohannes Weiner raw_spin_unlock_irqrestore(&rq->lock, rf->flags); 12811f351d7fSJohannes Weiner } 12821f351d7fSJohannes Weiner 12831f351d7fSJohannes Weiner static inline void 12841f351d7fSJohannes Weiner rq_unlock_irq(struct rq *rq, struct rq_flags *rf) 12851f351d7fSJohannes Weiner __releases(rq->lock) 12861f351d7fSJohannes Weiner { 12871f351d7fSJohannes Weiner rq_unpin_lock(rq, rf); 12881f351d7fSJohannes Weiner raw_spin_unlock_irq(&rq->lock); 12891f351d7fSJohannes Weiner } 12901f351d7fSJohannes Weiner 12911f351d7fSJohannes Weiner static inline void 12921f351d7fSJohannes Weiner rq_unlock(struct rq *rq, struct rq_flags *rf) 12931f351d7fSJohannes Weiner __releases(rq->lock) 12941f351d7fSJohannes Weiner { 12951f351d7fSJohannes Weiner rq_unpin_lock(rq, rf); 12961f351d7fSJohannes Weiner raw_spin_unlock(&rq->lock); 12971f351d7fSJohannes Weiner } 12981f351d7fSJohannes Weiner 1299246b3b33SJohannes Weiner static inline struct rq * 1300246b3b33SJohannes Weiner this_rq_lock_irq(struct rq_flags *rf) 1301246b3b33SJohannes Weiner __acquires(rq->lock) 1302246b3b33SJohannes Weiner { 1303246b3b33SJohannes Weiner struct rq *rq; 1304246b3b33SJohannes Weiner 1305246b3b33SJohannes Weiner local_irq_disable(); 1306246b3b33SJohannes Weiner rq = this_rq(); 1307246b3b33SJohannes Weiner rq_lock(rq, rf); 1308246b3b33SJohannes Weiner return rq; 1309246b3b33SJohannes Weiner } 1310246b3b33SJohannes Weiner 13119942f79bSRik van Riel #ifdef CONFIG_NUMA 1312e3fe70b1SRik van Riel enum numa_topology_type { 1313e3fe70b1SRik van Riel NUMA_DIRECT, 1314e3fe70b1SRik van Riel NUMA_GLUELESS_MESH, 1315e3fe70b1SRik van Riel NUMA_BACKPLANE, 1316e3fe70b1SRik van Riel }; 1317e3fe70b1SRik van Riel extern enum numa_topology_type sched_numa_topology_type; 13189942f79bSRik van Riel extern int sched_max_numa_distance; 13199942f79bSRik van Riel extern bool find_numa_distance(int distance); 1320f2cb1360SIngo Molnar extern void sched_init_numa(void); 1321f2cb1360SIngo Molnar extern void sched_domains_numa_masks_set(unsigned int cpu); 1322f2cb1360SIngo Molnar extern void sched_domains_numa_masks_clear(unsigned int cpu); 1323e0e8d491SWanpeng Li extern int sched_numa_find_closest(const struct cpumask *cpus, int cpu); 1324f2cb1360SIngo Molnar #else 1325f2cb1360SIngo Molnar static inline void sched_init_numa(void) { } 1326f2cb1360SIngo Molnar static inline void sched_domains_numa_masks_set(unsigned int cpu) { } 1327f2cb1360SIngo Molnar static inline void sched_domains_numa_masks_clear(unsigned int cpu) { } 1328e0e8d491SWanpeng Li static inline int sched_numa_find_closest(const struct cpumask *cpus, int cpu) 1329e0e8d491SWanpeng Li { 1330e0e8d491SWanpeng Li return nr_cpu_ids; 1331e0e8d491SWanpeng Li } 1332f2cb1360SIngo Molnar #endif 1333f2cb1360SIngo Molnar 1334f809ca9aSMel Gorman #ifdef CONFIG_NUMA_BALANCING 133544dba3d5SIulia Manda /* The regions in numa_faults array from task_struct */ 133644dba3d5SIulia Manda enum numa_faults_stats { 133744dba3d5SIulia Manda NUMA_MEM = 0, 133844dba3d5SIulia Manda NUMA_CPU, 133944dba3d5SIulia Manda NUMA_MEMBUF, 134044dba3d5SIulia Manda NUMA_CPUBUF 134144dba3d5SIulia Manda }; 13420ec8aa00SPeter Zijlstra extern void sched_setnuma(struct task_struct *p, int node); 1343e6628d5bSMel Gorman extern int migrate_task_to(struct task_struct *p, int cpu); 13440ad4e3dfSSrikar Dronamraju extern int migrate_swap(struct task_struct *p, struct task_struct *t, 13450ad4e3dfSSrikar Dronamraju int cpu, int scpu); 134613784475SMel Gorman extern void init_numa_balancing(unsigned long clone_flags, struct task_struct *p); 134713784475SMel Gorman #else 134813784475SMel Gorman static inline void 134913784475SMel Gorman init_numa_balancing(unsigned long clone_flags, struct task_struct *p) 135013784475SMel Gorman { 135113784475SMel Gorman } 1352f809ca9aSMel Gorman #endif /* CONFIG_NUMA_BALANCING */ 1353f809ca9aSMel Gorman 1354518cd623SPeter Zijlstra #ifdef CONFIG_SMP 1355518cd623SPeter Zijlstra 1356e3fca9e7SPeter Zijlstra static inline void 1357e3fca9e7SPeter Zijlstra queue_balance_callback(struct rq *rq, 1358e3fca9e7SPeter Zijlstra struct callback_head *head, 1359e3fca9e7SPeter Zijlstra void (*func)(struct rq *rq)) 1360e3fca9e7SPeter Zijlstra { 1361e3fca9e7SPeter Zijlstra lockdep_assert_held(&rq->lock); 1362e3fca9e7SPeter Zijlstra 1363e3fca9e7SPeter Zijlstra if (unlikely(head->next)) 1364e3fca9e7SPeter Zijlstra return; 1365e3fca9e7SPeter Zijlstra 1366e3fca9e7SPeter Zijlstra head->func = (void (*)(struct callback_head *))func; 1367e3fca9e7SPeter Zijlstra head->next = rq->balance_callback; 1368e3fca9e7SPeter Zijlstra rq->balance_callback = head; 1369e3fca9e7SPeter Zijlstra } 1370e3fca9e7SPeter Zijlstra 1371e3baac47SPeter Zijlstra extern void sched_ttwu_pending(void); 1372e3baac47SPeter Zijlstra 1373391e43daSPeter Zijlstra #define rcu_dereference_check_sched_domain(p) \ 1374391e43daSPeter Zijlstra rcu_dereference_check((p), \ 1375391e43daSPeter Zijlstra lockdep_is_held(&sched_domains_mutex)) 1376391e43daSPeter Zijlstra 1377391e43daSPeter Zijlstra /* 1378391e43daSPeter Zijlstra * The domain tree (rq->sd) is protected by RCU's quiescent state transition. 1379337e9b07SPaul E. McKenney * See destroy_sched_domains: call_rcu for details. 1380391e43daSPeter Zijlstra * 1381391e43daSPeter Zijlstra * The domain tree of any CPU may only be accessed from within 1382391e43daSPeter Zijlstra * preempt-disabled sections. 1383391e43daSPeter Zijlstra */ 1384391e43daSPeter Zijlstra #define for_each_domain(cpu, __sd) \ 1385518cd623SPeter Zijlstra for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \ 1386518cd623SPeter Zijlstra __sd; __sd = __sd->parent) 1387391e43daSPeter Zijlstra 1388518cd623SPeter Zijlstra /** 1389518cd623SPeter Zijlstra * highest_flag_domain - Return highest sched_domain containing flag. 139097fb7a0aSIngo Molnar * @cpu: The CPU whose highest level of sched domain is to 1391518cd623SPeter Zijlstra * be returned. 1392518cd623SPeter Zijlstra * @flag: The flag to check for the highest sched_domain 139397fb7a0aSIngo Molnar * for the given CPU. 1394518cd623SPeter Zijlstra * 139597fb7a0aSIngo Molnar * Returns the highest sched_domain of a CPU which contains the given flag. 1396518cd623SPeter Zijlstra */ 1397518cd623SPeter Zijlstra static inline struct sched_domain *highest_flag_domain(int cpu, int flag) 1398518cd623SPeter Zijlstra { 1399518cd623SPeter Zijlstra struct sched_domain *sd, *hsd = NULL; 1400518cd623SPeter Zijlstra 1401518cd623SPeter Zijlstra for_each_domain(cpu, sd) { 1402518cd623SPeter Zijlstra if (!(sd->flags & flag)) 1403518cd623SPeter Zijlstra break; 1404518cd623SPeter Zijlstra hsd = sd; 1405518cd623SPeter Zijlstra } 1406518cd623SPeter Zijlstra 1407518cd623SPeter Zijlstra return hsd; 1408518cd623SPeter Zijlstra } 1409518cd623SPeter Zijlstra 1410fb13c7eeSMel Gorman static inline struct sched_domain *lowest_flag_domain(int cpu, int flag) 1411fb13c7eeSMel Gorman { 1412fb13c7eeSMel Gorman struct sched_domain *sd; 1413fb13c7eeSMel Gorman 1414fb13c7eeSMel Gorman for_each_domain(cpu, sd) { 1415fb13c7eeSMel Gorman if (sd->flags & flag) 1416fb13c7eeSMel Gorman break; 1417fb13c7eeSMel Gorman } 1418fb13c7eeSMel Gorman 1419fb13c7eeSMel Gorman return sd; 1420fb13c7eeSMel Gorman } 1421fb13c7eeSMel Gorman 1422994aeb7aSJoel Fernandes (Google) DECLARE_PER_CPU(struct sched_domain __rcu *, sd_llc); 14237d9ffa89SPeter Zijlstra DECLARE_PER_CPU(int, sd_llc_size); 1424518cd623SPeter Zijlstra DECLARE_PER_CPU(int, sd_llc_id); 1425994aeb7aSJoel Fernandes (Google) DECLARE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared); 1426994aeb7aSJoel Fernandes (Google) DECLARE_PER_CPU(struct sched_domain __rcu *, sd_numa); 1427994aeb7aSJoel Fernandes (Google) DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing); 1428994aeb7aSJoel Fernandes (Google) DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity); 1429df054e84SMorten Rasmussen extern struct static_key_false sched_asym_cpucapacity; 1430518cd623SPeter Zijlstra 143163b2ca30SNicolas Pitre struct sched_group_capacity { 14325e6521eaSLi Zefan atomic_t ref; 14335e6521eaSLi Zefan /* 1434172895e6SYuyang Du * CPU capacity of this group, SCHED_CAPACITY_SCALE being max capacity 143563b2ca30SNicolas Pitre * for a single CPU. 14365e6521eaSLi Zefan */ 1437bf475ce0SMorten Rasmussen unsigned long capacity; 1438bf475ce0SMorten Rasmussen unsigned long min_capacity; /* Min per-CPU capacity in group */ 1439e3d6d0cbSMorten Rasmussen unsigned long max_capacity; /* Max per-CPU capacity in group */ 14405e6521eaSLi Zefan unsigned long next_update; 144163b2ca30SNicolas Pitre int imbalance; /* XXX unrelated to capacity but shared group state */ 14425e6521eaSLi Zefan 1443005f874dSPeter Zijlstra #ifdef CONFIG_SCHED_DEBUG 1444005f874dSPeter Zijlstra int id; 1445005f874dSPeter Zijlstra #endif 1446005f874dSPeter Zijlstra 144797fb7a0aSIngo Molnar unsigned long cpumask[0]; /* Balance mask */ 14485e6521eaSLi Zefan }; 14495e6521eaSLi Zefan 14505e6521eaSLi Zefan struct sched_group { 14515e6521eaSLi Zefan struct sched_group *next; /* Must be a circular list */ 14525e6521eaSLi Zefan atomic_t ref; 14535e6521eaSLi Zefan 14545e6521eaSLi Zefan unsigned int group_weight; 145563b2ca30SNicolas Pitre struct sched_group_capacity *sgc; 145697fb7a0aSIngo Molnar int asym_prefer_cpu; /* CPU of highest priority in group */ 14575e6521eaSLi Zefan 14585e6521eaSLi Zefan /* 14595e6521eaSLi Zefan * The CPUs this group covers. 14605e6521eaSLi Zefan * 14615e6521eaSLi Zefan * NOTE: this field is variable length. (Allocated dynamically 14625e6521eaSLi Zefan * by attaching extra space to the end of the structure, 14635e6521eaSLi Zefan * depending on how many CPUs the kernel has booted up with) 14645e6521eaSLi Zefan */ 146504f5c362SGustavo A. R. Silva unsigned long cpumask[]; 14665e6521eaSLi Zefan }; 14675e6521eaSLi Zefan 1468ae4df9d6SPeter Zijlstra static inline struct cpumask *sched_group_span(struct sched_group *sg) 14695e6521eaSLi Zefan { 14705e6521eaSLi Zefan return to_cpumask(sg->cpumask); 14715e6521eaSLi Zefan } 14725e6521eaSLi Zefan 14735e6521eaSLi Zefan /* 1474e5c14b1fSPeter Zijlstra * See build_balance_mask(). 14755e6521eaSLi Zefan */ 1476e5c14b1fSPeter Zijlstra static inline struct cpumask *group_balance_mask(struct sched_group *sg) 14775e6521eaSLi Zefan { 147863b2ca30SNicolas Pitre return to_cpumask(sg->sgc->cpumask); 14795e6521eaSLi Zefan } 14805e6521eaSLi Zefan 14815e6521eaSLi Zefan /** 148297fb7a0aSIngo Molnar * group_first_cpu - Returns the first CPU in the cpumask of a sched_group. 148397fb7a0aSIngo Molnar * @group: The group whose first CPU is to be returned. 14845e6521eaSLi Zefan */ 14855e6521eaSLi Zefan static inline unsigned int group_first_cpu(struct sched_group *group) 14865e6521eaSLi Zefan { 1487ae4df9d6SPeter Zijlstra return cpumask_first(sched_group_span(group)); 14885e6521eaSLi Zefan } 14895e6521eaSLi Zefan 1490c1174876SPeter Zijlstra extern int group_balance_cpu(struct sched_group *sg); 1491c1174876SPeter Zijlstra 14923866e845SSteven Rostedt (Red Hat) #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL) 14933866e845SSteven Rostedt (Red Hat) void register_sched_domain_sysctl(void); 1494bbdacdfeSPeter Zijlstra void dirty_sched_domain_sysctl(int cpu); 14953866e845SSteven Rostedt (Red Hat) void unregister_sched_domain_sysctl(void); 14963866e845SSteven Rostedt (Red Hat) #else 14973866e845SSteven Rostedt (Red Hat) static inline void register_sched_domain_sysctl(void) 14983866e845SSteven Rostedt (Red Hat) { 14993866e845SSteven Rostedt (Red Hat) } 1500bbdacdfeSPeter Zijlstra static inline void dirty_sched_domain_sysctl(int cpu) 1501bbdacdfeSPeter Zijlstra { 1502bbdacdfeSPeter Zijlstra } 15033866e845SSteven Rostedt (Red Hat) static inline void unregister_sched_domain_sysctl(void) 15043866e845SSteven Rostedt (Red Hat) { 15053866e845SSteven Rostedt (Red Hat) } 15063866e845SSteven Rostedt (Red Hat) #endif 15073866e845SSteven Rostedt (Red Hat) 1508e3baac47SPeter Zijlstra #else 1509e3baac47SPeter Zijlstra 1510e3baac47SPeter Zijlstra static inline void sched_ttwu_pending(void) { } 1511e3baac47SPeter Zijlstra 1512518cd623SPeter Zijlstra #endif /* CONFIG_SMP */ 1513391e43daSPeter Zijlstra 1514391e43daSPeter Zijlstra #include "stats.h" 15151051408fSIngo Molnar #include "autogroup.h" 1516391e43daSPeter Zijlstra 1517391e43daSPeter Zijlstra #ifdef CONFIG_CGROUP_SCHED 1518391e43daSPeter Zijlstra 1519391e43daSPeter Zijlstra /* 1520391e43daSPeter Zijlstra * Return the group to which this tasks belongs. 1521391e43daSPeter Zijlstra * 15228af01f56STejun Heo * We cannot use task_css() and friends because the cgroup subsystem 15238af01f56STejun Heo * changes that value before the cgroup_subsys::attach() method is called, 15248af01f56STejun Heo * therefore we cannot pin it and might observe the wrong value. 15258323f26cSPeter Zijlstra * 15268323f26cSPeter Zijlstra * The same is true for autogroup's p->signal->autogroup->tg, the autogroup 15278323f26cSPeter Zijlstra * core changes this before calling sched_move_task(). 15288323f26cSPeter Zijlstra * 15298323f26cSPeter Zijlstra * Instead we use a 'copy' which is updated from sched_move_task() while 15308323f26cSPeter Zijlstra * holding both task_struct::pi_lock and rq::lock. 1531391e43daSPeter Zijlstra */ 1532391e43daSPeter Zijlstra static inline struct task_group *task_group(struct task_struct *p) 1533391e43daSPeter Zijlstra { 15348323f26cSPeter Zijlstra return p->sched_task_group; 1535391e43daSPeter Zijlstra } 1536391e43daSPeter Zijlstra 1537391e43daSPeter Zijlstra /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ 1538391e43daSPeter Zijlstra static inline void set_task_rq(struct task_struct *p, unsigned int cpu) 1539391e43daSPeter Zijlstra { 1540391e43daSPeter Zijlstra #if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED) 1541391e43daSPeter Zijlstra struct task_group *tg = task_group(p); 1542391e43daSPeter Zijlstra #endif 1543391e43daSPeter Zijlstra 1544391e43daSPeter Zijlstra #ifdef CONFIG_FAIR_GROUP_SCHED 1545ad936d86SByungchul Park set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]); 1546391e43daSPeter Zijlstra p->se.cfs_rq = tg->cfs_rq[cpu]; 1547391e43daSPeter Zijlstra p->se.parent = tg->se[cpu]; 1548391e43daSPeter Zijlstra #endif 1549391e43daSPeter Zijlstra 1550391e43daSPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED 1551391e43daSPeter Zijlstra p->rt.rt_rq = tg->rt_rq[cpu]; 1552391e43daSPeter Zijlstra p->rt.parent = tg->rt_se[cpu]; 1553391e43daSPeter Zijlstra #endif 1554391e43daSPeter Zijlstra } 1555391e43daSPeter Zijlstra 1556391e43daSPeter Zijlstra #else /* CONFIG_CGROUP_SCHED */ 1557391e43daSPeter Zijlstra 1558391e43daSPeter Zijlstra static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { } 1559391e43daSPeter Zijlstra static inline struct task_group *task_group(struct task_struct *p) 1560391e43daSPeter Zijlstra { 1561391e43daSPeter Zijlstra return NULL; 1562391e43daSPeter Zijlstra } 1563391e43daSPeter Zijlstra 1564391e43daSPeter Zijlstra #endif /* CONFIG_CGROUP_SCHED */ 1565391e43daSPeter Zijlstra 1566391e43daSPeter Zijlstra static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) 1567391e43daSPeter Zijlstra { 1568391e43daSPeter Zijlstra set_task_rq(p, cpu); 1569391e43daSPeter Zijlstra #ifdef CONFIG_SMP 1570391e43daSPeter Zijlstra /* 1571391e43daSPeter Zijlstra * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be 1572dfcb245eSIngo Molnar * successfully executed on another CPU. We must ensure that updates of 1573391e43daSPeter Zijlstra * per-task data have been completed by this moment. 1574391e43daSPeter Zijlstra */ 1575391e43daSPeter Zijlstra smp_wmb(); 1576c65eacbeSAndy Lutomirski #ifdef CONFIG_THREAD_INFO_IN_TASK 1577c546951dSAndrea Parri WRITE_ONCE(p->cpu, cpu); 1578c65eacbeSAndy Lutomirski #else 1579c546951dSAndrea Parri WRITE_ONCE(task_thread_info(p)->cpu, cpu); 1580c65eacbeSAndy Lutomirski #endif 1581ac66f547SPeter Zijlstra p->wake_cpu = cpu; 1582391e43daSPeter Zijlstra #endif 1583391e43daSPeter Zijlstra } 1584391e43daSPeter Zijlstra 1585391e43daSPeter Zijlstra /* 1586391e43daSPeter Zijlstra * Tunables that become constants when CONFIG_SCHED_DEBUG is off: 1587391e43daSPeter Zijlstra */ 1588391e43daSPeter Zijlstra #ifdef CONFIG_SCHED_DEBUG 1589c5905afbSIngo Molnar # include <linux/static_key.h> 1590391e43daSPeter Zijlstra # define const_debug __read_mostly 1591391e43daSPeter Zijlstra #else 1592391e43daSPeter Zijlstra # define const_debug const 1593391e43daSPeter Zijlstra #endif 1594391e43daSPeter Zijlstra 1595391e43daSPeter Zijlstra #define SCHED_FEAT(name, enabled) \ 1596391e43daSPeter Zijlstra __SCHED_FEAT_##name , 1597391e43daSPeter Zijlstra 1598391e43daSPeter Zijlstra enum { 1599391e43daSPeter Zijlstra #include "features.h" 1600f8b6d1ccSPeter Zijlstra __SCHED_FEAT_NR, 1601391e43daSPeter Zijlstra }; 1602391e43daSPeter Zijlstra 1603391e43daSPeter Zijlstra #undef SCHED_FEAT 1604391e43daSPeter Zijlstra 1605e9666d10SMasahiro Yamada #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_JUMP_LABEL) 1606765cc3a4SPatrick Bellasi 1607765cc3a4SPatrick Bellasi /* 1608765cc3a4SPatrick Bellasi * To support run-time toggling of sched features, all the translation units 1609765cc3a4SPatrick Bellasi * (but core.c) reference the sysctl_sched_features defined in core.c. 1610765cc3a4SPatrick Bellasi */ 1611765cc3a4SPatrick Bellasi extern const_debug unsigned int sysctl_sched_features; 1612765cc3a4SPatrick Bellasi 1613f8b6d1ccSPeter Zijlstra #define SCHED_FEAT(name, enabled) \ 1614c5905afbSIngo Molnar static __always_inline bool static_branch_##name(struct static_key *key) \ 1615f8b6d1ccSPeter Zijlstra { \ 16166e76ea8aSJason Baron return static_key_##enabled(key); \ 1617f8b6d1ccSPeter Zijlstra } 1618f8b6d1ccSPeter Zijlstra 1619f8b6d1ccSPeter Zijlstra #include "features.h" 1620f8b6d1ccSPeter Zijlstra #undef SCHED_FEAT 1621f8b6d1ccSPeter Zijlstra 1622c5905afbSIngo Molnar extern struct static_key sched_feat_keys[__SCHED_FEAT_NR]; 1623f8b6d1ccSPeter Zijlstra #define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x])) 1624765cc3a4SPatrick Bellasi 1625e9666d10SMasahiro Yamada #else /* !(SCHED_DEBUG && CONFIG_JUMP_LABEL) */ 1626765cc3a4SPatrick Bellasi 1627765cc3a4SPatrick Bellasi /* 1628765cc3a4SPatrick Bellasi * Each translation unit has its own copy of sysctl_sched_features to allow 1629765cc3a4SPatrick Bellasi * constants propagation at compile time and compiler optimization based on 1630765cc3a4SPatrick Bellasi * features default. 1631765cc3a4SPatrick Bellasi */ 1632765cc3a4SPatrick Bellasi #define SCHED_FEAT(name, enabled) \ 1633765cc3a4SPatrick Bellasi (1UL << __SCHED_FEAT_##name) * enabled | 1634765cc3a4SPatrick Bellasi static const_debug __maybe_unused unsigned int sysctl_sched_features = 1635765cc3a4SPatrick Bellasi #include "features.h" 1636765cc3a4SPatrick Bellasi 0; 1637765cc3a4SPatrick Bellasi #undef SCHED_FEAT 1638765cc3a4SPatrick Bellasi 16397e6f4c5dSPeter Zijlstra #define sched_feat(x) !!(sysctl_sched_features & (1UL << __SCHED_FEAT_##x)) 1640765cc3a4SPatrick Bellasi 1641e9666d10SMasahiro Yamada #endif /* SCHED_DEBUG && CONFIG_JUMP_LABEL */ 1642391e43daSPeter Zijlstra 16432a595721SSrikar Dronamraju extern struct static_key_false sched_numa_balancing; 1644cb251765SMel Gorman extern struct static_key_false sched_schedstats; 1645cbee9f88SPeter Zijlstra 1646391e43daSPeter Zijlstra static inline u64 global_rt_period(void) 1647391e43daSPeter Zijlstra { 1648391e43daSPeter Zijlstra return (u64)sysctl_sched_rt_period * NSEC_PER_USEC; 1649391e43daSPeter Zijlstra } 1650391e43daSPeter Zijlstra 1651391e43daSPeter Zijlstra static inline u64 global_rt_runtime(void) 1652391e43daSPeter Zijlstra { 1653391e43daSPeter Zijlstra if (sysctl_sched_rt_runtime < 0) 1654391e43daSPeter Zijlstra return RUNTIME_INF; 1655391e43daSPeter Zijlstra 1656391e43daSPeter Zijlstra return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC; 1657391e43daSPeter Zijlstra } 1658391e43daSPeter Zijlstra 1659391e43daSPeter Zijlstra static inline int task_current(struct rq *rq, struct task_struct *p) 1660391e43daSPeter Zijlstra { 1661391e43daSPeter Zijlstra return rq->curr == p; 1662391e43daSPeter Zijlstra } 1663391e43daSPeter Zijlstra 1664391e43daSPeter Zijlstra static inline int task_running(struct rq *rq, struct task_struct *p) 1665391e43daSPeter Zijlstra { 1666391e43daSPeter Zijlstra #ifdef CONFIG_SMP 1667391e43daSPeter Zijlstra return p->on_cpu; 1668391e43daSPeter Zijlstra #else 1669391e43daSPeter Zijlstra return task_current(rq, p); 1670391e43daSPeter Zijlstra #endif 1671391e43daSPeter Zijlstra } 1672391e43daSPeter Zijlstra 1673da0c1e65SKirill Tkhai static inline int task_on_rq_queued(struct task_struct *p) 1674da0c1e65SKirill Tkhai { 1675da0c1e65SKirill Tkhai return p->on_rq == TASK_ON_RQ_QUEUED; 1676da0c1e65SKirill Tkhai } 1677391e43daSPeter Zijlstra 1678cca26e80SKirill Tkhai static inline int task_on_rq_migrating(struct task_struct *p) 1679cca26e80SKirill Tkhai { 1680c546951dSAndrea Parri return READ_ONCE(p->on_rq) == TASK_ON_RQ_MIGRATING; 1681cca26e80SKirill Tkhai } 1682cca26e80SKirill Tkhai 1683b13095f0SLi Zefan /* 1684b13095f0SLi Zefan * wake flags 1685b13095f0SLi Zefan */ 168697fb7a0aSIngo Molnar #define WF_SYNC 0x01 /* Waker goes to sleep after wakeup */ 168797fb7a0aSIngo Molnar #define WF_FORK 0x02 /* Child wakeup after fork */ 168897fb7a0aSIngo Molnar #define WF_MIGRATED 0x4 /* Internal use, task got migrated */ 1689b13095f0SLi Zefan 1690391e43daSPeter Zijlstra /* 1691391e43daSPeter Zijlstra * To aid in avoiding the subversion of "niceness" due to uneven distribution 1692391e43daSPeter Zijlstra * of tasks with abnormal "nice" values across CPUs the contribution that 1693391e43daSPeter Zijlstra * each task makes to its run queue's load is weighted according to its 1694391e43daSPeter Zijlstra * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a 1695391e43daSPeter Zijlstra * scaled version of the new time slice allocation that they receive on time 1696391e43daSPeter Zijlstra * slice expiry etc. 1697391e43daSPeter Zijlstra */ 1698391e43daSPeter Zijlstra 1699391e43daSPeter Zijlstra #define WEIGHT_IDLEPRIO 3 1700391e43daSPeter Zijlstra #define WMULT_IDLEPRIO 1431655765 1701391e43daSPeter Zijlstra 1702ed82b8a1SAndi Kleen extern const int sched_prio_to_weight[40]; 1703ed82b8a1SAndi Kleen extern const u32 sched_prio_to_wmult[40]; 1704391e43daSPeter Zijlstra 1705ff77e468SPeter Zijlstra /* 1706ff77e468SPeter Zijlstra * {de,en}queue flags: 1707ff77e468SPeter Zijlstra * 1708ff77e468SPeter Zijlstra * DEQUEUE_SLEEP - task is no longer runnable 1709ff77e468SPeter Zijlstra * ENQUEUE_WAKEUP - task just became runnable 1710ff77e468SPeter Zijlstra * 1711ff77e468SPeter Zijlstra * SAVE/RESTORE - an otherwise spurious dequeue/enqueue, done to ensure tasks 1712ff77e468SPeter Zijlstra * are in a known state which allows modification. Such pairs 1713ff77e468SPeter Zijlstra * should preserve as much state as possible. 1714ff77e468SPeter Zijlstra * 1715ff77e468SPeter Zijlstra * MOVE - paired with SAVE/RESTORE, explicitly does not preserve the location 1716ff77e468SPeter Zijlstra * in the runqueue. 1717ff77e468SPeter Zijlstra * 1718ff77e468SPeter Zijlstra * ENQUEUE_HEAD - place at front of runqueue (tail if not specified) 1719ff77e468SPeter Zijlstra * ENQUEUE_REPLENISH - CBS (replenish runtime and postpone deadline) 172059efa0baSPeter Zijlstra * ENQUEUE_MIGRATED - the task was migrated during wakeup 1721ff77e468SPeter Zijlstra * 1722ff77e468SPeter Zijlstra */ 1723ff77e468SPeter Zijlstra 1724ff77e468SPeter Zijlstra #define DEQUEUE_SLEEP 0x01 172597fb7a0aSIngo Molnar #define DEQUEUE_SAVE 0x02 /* Matches ENQUEUE_RESTORE */ 172697fb7a0aSIngo Molnar #define DEQUEUE_MOVE 0x04 /* Matches ENQUEUE_MOVE */ 172797fb7a0aSIngo Molnar #define DEQUEUE_NOCLOCK 0x08 /* Matches ENQUEUE_NOCLOCK */ 1728ff77e468SPeter Zijlstra 17291de64443SPeter Zijlstra #define ENQUEUE_WAKEUP 0x01 1730ff77e468SPeter Zijlstra #define ENQUEUE_RESTORE 0x02 1731ff77e468SPeter Zijlstra #define ENQUEUE_MOVE 0x04 17320a67d1eeSPeter Zijlstra #define ENQUEUE_NOCLOCK 0x08 1733ff77e468SPeter Zijlstra 17340a67d1eeSPeter Zijlstra #define ENQUEUE_HEAD 0x10 17350a67d1eeSPeter Zijlstra #define ENQUEUE_REPLENISH 0x20 1736c82ba9faSLi Zefan #ifdef CONFIG_SMP 17370a67d1eeSPeter Zijlstra #define ENQUEUE_MIGRATED 0x40 1738c82ba9faSLi Zefan #else 173959efa0baSPeter Zijlstra #define ENQUEUE_MIGRATED 0x00 1740c82ba9faSLi Zefan #endif 1741c82ba9faSLi Zefan 174237e117c0SPeter Zijlstra #define RETRY_TASK ((void *)-1UL) 174337e117c0SPeter Zijlstra 1744c82ba9faSLi Zefan struct sched_class { 1745c82ba9faSLi Zefan const struct sched_class *next; 1746c82ba9faSLi Zefan 174769842cbaSPatrick Bellasi #ifdef CONFIG_UCLAMP_TASK 174869842cbaSPatrick Bellasi int uclamp_enabled; 174969842cbaSPatrick Bellasi #endif 175069842cbaSPatrick Bellasi 1751c82ba9faSLi Zefan void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags); 1752c82ba9faSLi Zefan void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags); 1753c82ba9faSLi Zefan void (*yield_task) (struct rq *rq); 1754c82ba9faSLi Zefan bool (*yield_to_task)(struct rq *rq, struct task_struct *p, bool preempt); 1755c82ba9faSLi Zefan 1756c82ba9faSLi Zefan void (*check_preempt_curr)(struct rq *rq, struct task_struct *p, int flags); 1757c82ba9faSLi Zefan 175898c2f700SPeter Zijlstra struct task_struct *(*pick_next_task)(struct rq *rq); 175998c2f700SPeter Zijlstra 17606e2df058SPeter Zijlstra void (*put_prev_task)(struct rq *rq, struct task_struct *p); 1761a0e813f2SPeter Zijlstra void (*set_next_task)(struct rq *rq, struct task_struct *p, bool first); 1762c82ba9faSLi Zefan 1763c82ba9faSLi Zefan #ifdef CONFIG_SMP 17646e2df058SPeter Zijlstra int (*balance)(struct rq *rq, struct task_struct *prev, struct rq_flags *rf); 1765ac66f547SPeter Zijlstra int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags); 17661327237aSSrikar Dronamraju void (*migrate_task_rq)(struct task_struct *p, int new_cpu); 1767c82ba9faSLi Zefan 1768c82ba9faSLi Zefan void (*task_woken)(struct rq *this_rq, struct task_struct *task); 1769c82ba9faSLi Zefan 1770c82ba9faSLi Zefan void (*set_cpus_allowed)(struct task_struct *p, 1771c82ba9faSLi Zefan const struct cpumask *newmask); 1772c82ba9faSLi Zefan 1773c82ba9faSLi Zefan void (*rq_online)(struct rq *rq); 1774c82ba9faSLi Zefan void (*rq_offline)(struct rq *rq); 1775c82ba9faSLi Zefan #endif 1776c82ba9faSLi Zefan 1777c82ba9faSLi Zefan void (*task_tick)(struct rq *rq, struct task_struct *p, int queued); 1778c82ba9faSLi Zefan void (*task_fork)(struct task_struct *p); 1779e6c390f2SDario Faggioli void (*task_dead)(struct task_struct *p); 1780c82ba9faSLi Zefan 178167dfa1b7SKirill Tkhai /* 178267dfa1b7SKirill Tkhai * The switched_from() call is allowed to drop rq->lock, therefore we 178367dfa1b7SKirill Tkhai * cannot assume the switched_from/switched_to pair is serliazed by 178467dfa1b7SKirill Tkhai * rq->lock. They are however serialized by p->pi_lock. 178567dfa1b7SKirill Tkhai */ 1786c82ba9faSLi Zefan void (*switched_from)(struct rq *this_rq, struct task_struct *task); 1787c82ba9faSLi Zefan void (*switched_to) (struct rq *this_rq, struct task_struct *task); 1788c82ba9faSLi Zefan void (*prio_changed) (struct rq *this_rq, struct task_struct *task, 1789c82ba9faSLi Zefan int oldprio); 1790c82ba9faSLi Zefan 1791c82ba9faSLi Zefan unsigned int (*get_rr_interval)(struct rq *rq, 1792c82ba9faSLi Zefan struct task_struct *task); 1793c82ba9faSLi Zefan 17946e998916SStanislaw Gruszka void (*update_curr)(struct rq *rq); 17956e998916SStanislaw Gruszka 1796ea86cb4bSVincent Guittot #define TASK_SET_GROUP 0 1797ea86cb4bSVincent Guittot #define TASK_MOVE_GROUP 1 1798ea86cb4bSVincent Guittot 1799c82ba9faSLi Zefan #ifdef CONFIG_FAIR_GROUP_SCHED 1800ea86cb4bSVincent Guittot void (*task_change_group)(struct task_struct *p, int type); 1801c82ba9faSLi Zefan #endif 1802c82ba9faSLi Zefan }; 1803391e43daSPeter Zijlstra 18043f1d2a31SPeter Zijlstra static inline void put_prev_task(struct rq *rq, struct task_struct *prev) 18053f1d2a31SPeter Zijlstra { 180610e7071bSPeter Zijlstra WARN_ON_ONCE(rq->curr != prev); 18076e2df058SPeter Zijlstra prev->sched_class->put_prev_task(rq, prev); 18083f1d2a31SPeter Zijlstra } 18093f1d2a31SPeter Zijlstra 181003b7fad1SPeter Zijlstra static inline void set_next_task(struct rq *rq, struct task_struct *next) 1811b2bf6c31SPeter Zijlstra { 181203b7fad1SPeter Zijlstra WARN_ON_ONCE(rq->curr != next); 1813a0e813f2SPeter Zijlstra next->sched_class->set_next_task(rq, next, false); 1814b2bf6c31SPeter Zijlstra } 1815b2bf6c31SPeter Zijlstra 1816f5832c19SNicolas Pitre #ifdef CONFIG_SMP 1817391e43daSPeter Zijlstra #define sched_class_highest (&stop_sched_class) 1818f5832c19SNicolas Pitre #else 1819f5832c19SNicolas Pitre #define sched_class_highest (&dl_sched_class) 1820f5832c19SNicolas Pitre #endif 18216e2df058SPeter Zijlstra 18226e2df058SPeter Zijlstra #define for_class_range(class, _from, _to) \ 18236e2df058SPeter Zijlstra for (class = (_from); class != (_to); class = class->next) 18246e2df058SPeter Zijlstra 1825391e43daSPeter Zijlstra #define for_each_class(class) \ 18266e2df058SPeter Zijlstra for_class_range(class, sched_class_highest, NULL) 1827391e43daSPeter Zijlstra 1828391e43daSPeter Zijlstra extern const struct sched_class stop_sched_class; 1829aab03e05SDario Faggioli extern const struct sched_class dl_sched_class; 1830391e43daSPeter Zijlstra extern const struct sched_class rt_sched_class; 1831391e43daSPeter Zijlstra extern const struct sched_class fair_sched_class; 1832391e43daSPeter Zijlstra extern const struct sched_class idle_sched_class; 1833391e43daSPeter Zijlstra 18346e2df058SPeter Zijlstra static inline bool sched_stop_runnable(struct rq *rq) 18356e2df058SPeter Zijlstra { 18366e2df058SPeter Zijlstra return rq->stop && task_on_rq_queued(rq->stop); 18376e2df058SPeter Zijlstra } 18386e2df058SPeter Zijlstra 18396e2df058SPeter Zijlstra static inline bool sched_dl_runnable(struct rq *rq) 18406e2df058SPeter Zijlstra { 18416e2df058SPeter Zijlstra return rq->dl.dl_nr_running > 0; 18426e2df058SPeter Zijlstra } 18436e2df058SPeter Zijlstra 18446e2df058SPeter Zijlstra static inline bool sched_rt_runnable(struct rq *rq) 18456e2df058SPeter Zijlstra { 18466e2df058SPeter Zijlstra return rq->rt.rt_queued > 0; 18476e2df058SPeter Zijlstra } 18486e2df058SPeter Zijlstra 18496e2df058SPeter Zijlstra static inline bool sched_fair_runnable(struct rq *rq) 18506e2df058SPeter Zijlstra { 18516e2df058SPeter Zijlstra return rq->cfs.nr_running > 0; 18526e2df058SPeter Zijlstra } 1853391e43daSPeter Zijlstra 18545d7d6056SPeter Zijlstra extern struct task_struct *pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf); 185598c2f700SPeter Zijlstra extern struct task_struct *pick_next_task_idle(struct rq *rq); 18565d7d6056SPeter Zijlstra 1857391e43daSPeter Zijlstra #ifdef CONFIG_SMP 1858391e43daSPeter Zijlstra 185963b2ca30SNicolas Pitre extern void update_group_capacity(struct sched_domain *sd, int cpu); 1860b719203bSLi Zefan 18617caff66fSDaniel Lezcano extern void trigger_load_balance(struct rq *rq); 1862391e43daSPeter Zijlstra 1863c5b28038SPeter Zijlstra extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask); 1864c5b28038SPeter Zijlstra 1865391e43daSPeter Zijlstra #endif 1866391e43daSPeter Zijlstra 1867442bf3aaSDaniel Lezcano #ifdef CONFIG_CPU_IDLE 1868442bf3aaSDaniel Lezcano static inline void idle_set_state(struct rq *rq, 1869442bf3aaSDaniel Lezcano struct cpuidle_state *idle_state) 1870442bf3aaSDaniel Lezcano { 1871442bf3aaSDaniel Lezcano rq->idle_state = idle_state; 1872442bf3aaSDaniel Lezcano } 1873442bf3aaSDaniel Lezcano 1874442bf3aaSDaniel Lezcano static inline struct cpuidle_state *idle_get_state(struct rq *rq) 1875442bf3aaSDaniel Lezcano { 18769148a3a1SPeter Zijlstra SCHED_WARN_ON(!rcu_read_lock_held()); 187797fb7a0aSIngo Molnar 1878442bf3aaSDaniel Lezcano return rq->idle_state; 1879442bf3aaSDaniel Lezcano } 1880442bf3aaSDaniel Lezcano #else 1881442bf3aaSDaniel Lezcano static inline void idle_set_state(struct rq *rq, 1882442bf3aaSDaniel Lezcano struct cpuidle_state *idle_state) 1883442bf3aaSDaniel Lezcano { 1884442bf3aaSDaniel Lezcano } 1885442bf3aaSDaniel Lezcano 1886442bf3aaSDaniel Lezcano static inline struct cpuidle_state *idle_get_state(struct rq *rq) 1887442bf3aaSDaniel Lezcano { 1888442bf3aaSDaniel Lezcano return NULL; 1889442bf3aaSDaniel Lezcano } 1890442bf3aaSDaniel Lezcano #endif 1891442bf3aaSDaniel Lezcano 18928663effbSSteven Rostedt (VMware) extern void schedule_idle(void); 18938663effbSSteven Rostedt (VMware) 1894391e43daSPeter Zijlstra extern void sysrq_sched_debug_show(void); 1895391e43daSPeter Zijlstra extern void sched_init_granularity(void); 1896391e43daSPeter Zijlstra extern void update_max_interval(void); 18971baca4ceSJuri Lelli 18981baca4ceSJuri Lelli extern void init_sched_dl_class(void); 1899391e43daSPeter Zijlstra extern void init_sched_rt_class(void); 1900391e43daSPeter Zijlstra extern void init_sched_fair_class(void); 1901391e43daSPeter Zijlstra 19029059393eSVincent Guittot extern void reweight_task(struct task_struct *p, int prio); 19039059393eSVincent Guittot 19048875125eSKirill Tkhai extern void resched_curr(struct rq *rq); 1905391e43daSPeter Zijlstra extern void resched_cpu(int cpu); 1906391e43daSPeter Zijlstra 1907391e43daSPeter Zijlstra extern struct rt_bandwidth def_rt_bandwidth; 1908391e43daSPeter Zijlstra extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime); 1909391e43daSPeter Zijlstra 1910332ac17eSDario Faggioli extern struct dl_bandwidth def_dl_bandwidth; 1911332ac17eSDario Faggioli extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime); 1912aab03e05SDario Faggioli extern void init_dl_task_timer(struct sched_dl_entity *dl_se); 1913209a0cbdSLuca Abeni extern void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se); 1914aab03e05SDario Faggioli 1915c52f14d3SLuca Abeni #define BW_SHIFT 20 1916c52f14d3SLuca Abeni #define BW_UNIT (1 << BW_SHIFT) 19174da3abceSLuca Abeni #define RATIO_SHIFT 8 1918d505b8afSHuaixin Chang #define MAX_BW_BITS (64 - BW_SHIFT) 1919d505b8afSHuaixin Chang #define MAX_BW ((1ULL << MAX_BW_BITS) - 1) 1920332ac17eSDario Faggioli unsigned long to_ratio(u64 period, u64 runtime); 1921332ac17eSDario Faggioli 1922540247fbSYuyang Du extern void init_entity_runnable_average(struct sched_entity *se); 1923d0fe0b9cSDietmar Eggemann extern void post_init_entity_util_avg(struct task_struct *p); 1924a75cdaa9SAlex Shi 192576d92ac3SFrederic Weisbecker #ifdef CONFIG_NO_HZ_FULL 192676d92ac3SFrederic Weisbecker extern bool sched_can_stop_tick(struct rq *rq); 1927d84b3131SFrederic Weisbecker extern int __init sched_tick_offload_init(void); 192876d92ac3SFrederic Weisbecker 192976d92ac3SFrederic Weisbecker /* 193076d92ac3SFrederic Weisbecker * Tick may be needed by tasks in the runqueue depending on their policy and 193176d92ac3SFrederic Weisbecker * requirements. If tick is needed, lets send the target an IPI to kick it out of 193276d92ac3SFrederic Weisbecker * nohz mode if necessary. 193376d92ac3SFrederic Weisbecker */ 193476d92ac3SFrederic Weisbecker static inline void sched_update_tick_dependency(struct rq *rq) 193576d92ac3SFrederic Weisbecker { 193676d92ac3SFrederic Weisbecker int cpu; 193776d92ac3SFrederic Weisbecker 193876d92ac3SFrederic Weisbecker if (!tick_nohz_full_enabled()) 193976d92ac3SFrederic Weisbecker return; 194076d92ac3SFrederic Weisbecker 194176d92ac3SFrederic Weisbecker cpu = cpu_of(rq); 194276d92ac3SFrederic Weisbecker 194376d92ac3SFrederic Weisbecker if (!tick_nohz_full_cpu(cpu)) 194476d92ac3SFrederic Weisbecker return; 194576d92ac3SFrederic Weisbecker 194676d92ac3SFrederic Weisbecker if (sched_can_stop_tick(rq)) 194776d92ac3SFrederic Weisbecker tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED); 194876d92ac3SFrederic Weisbecker else 194976d92ac3SFrederic Weisbecker tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED); 195076d92ac3SFrederic Weisbecker } 195176d92ac3SFrederic Weisbecker #else 1952d84b3131SFrederic Weisbecker static inline int sched_tick_offload_init(void) { return 0; } 195376d92ac3SFrederic Weisbecker static inline void sched_update_tick_dependency(struct rq *rq) { } 195476d92ac3SFrederic Weisbecker #endif 195576d92ac3SFrederic Weisbecker 195672465447SKirill Tkhai static inline void add_nr_running(struct rq *rq, unsigned count) 1957391e43daSPeter Zijlstra { 195872465447SKirill Tkhai unsigned prev_nr = rq->nr_running; 195972465447SKirill Tkhai 196072465447SKirill Tkhai rq->nr_running = prev_nr + count; 19619f3660c2SFrederic Weisbecker 19624486edd1STim Chen #ifdef CONFIG_SMP 19633e184501SViresh Kumar if (prev_nr < 2 && rq->nr_running >= 2) { 1964e90c8fe1SValentin Schneider if (!READ_ONCE(rq->rd->overload)) 1965e90c8fe1SValentin Schneider WRITE_ONCE(rq->rd->overload, 1); 196676d92ac3SFrederic Weisbecker } 19673e184501SViresh Kumar #endif 19684486edd1STim Chen 196976d92ac3SFrederic Weisbecker sched_update_tick_dependency(rq); 19704486edd1STim Chen } 1971391e43daSPeter Zijlstra 197272465447SKirill Tkhai static inline void sub_nr_running(struct rq *rq, unsigned count) 1973391e43daSPeter Zijlstra { 197472465447SKirill Tkhai rq->nr_running -= count; 197576d92ac3SFrederic Weisbecker /* Check if we still need preemption */ 197676d92ac3SFrederic Weisbecker sched_update_tick_dependency(rq); 1977391e43daSPeter Zijlstra } 1978391e43daSPeter Zijlstra 1979391e43daSPeter Zijlstra extern void activate_task(struct rq *rq, struct task_struct *p, int flags); 1980391e43daSPeter Zijlstra extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags); 1981391e43daSPeter Zijlstra 1982391e43daSPeter Zijlstra extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags); 1983391e43daSPeter Zijlstra 1984391e43daSPeter Zijlstra extern const_debug unsigned int sysctl_sched_nr_migrate; 1985391e43daSPeter Zijlstra extern const_debug unsigned int sysctl_sched_migration_cost; 1986391e43daSPeter Zijlstra 1987391e43daSPeter Zijlstra #ifdef CONFIG_SCHED_HRTICK 1988391e43daSPeter Zijlstra 1989391e43daSPeter Zijlstra /* 1990391e43daSPeter Zijlstra * Use hrtick when: 1991391e43daSPeter Zijlstra * - enabled by features 1992391e43daSPeter Zijlstra * - hrtimer is actually high res 1993391e43daSPeter Zijlstra */ 1994391e43daSPeter Zijlstra static inline int hrtick_enabled(struct rq *rq) 1995391e43daSPeter Zijlstra { 1996391e43daSPeter Zijlstra if (!sched_feat(HRTICK)) 1997391e43daSPeter Zijlstra return 0; 1998391e43daSPeter Zijlstra if (!cpu_active(cpu_of(rq))) 1999391e43daSPeter Zijlstra return 0; 2000391e43daSPeter Zijlstra return hrtimer_is_hres_active(&rq->hrtick_timer); 2001391e43daSPeter Zijlstra } 2002391e43daSPeter Zijlstra 2003391e43daSPeter Zijlstra void hrtick_start(struct rq *rq, u64 delay); 2004391e43daSPeter Zijlstra 2005b39e66eaSMike Galbraith #else 2006b39e66eaSMike Galbraith 2007b39e66eaSMike Galbraith static inline int hrtick_enabled(struct rq *rq) 2008b39e66eaSMike Galbraith { 2009b39e66eaSMike Galbraith return 0; 2010b39e66eaSMike Galbraith } 2011b39e66eaSMike Galbraith 2012391e43daSPeter Zijlstra #endif /* CONFIG_SCHED_HRTICK */ 2013391e43daSPeter Zijlstra 20141567c3e3SGiovanni Gherdovich #ifndef arch_scale_freq_tick 20151567c3e3SGiovanni Gherdovich static __always_inline 20161567c3e3SGiovanni Gherdovich void arch_scale_freq_tick(void) 20171567c3e3SGiovanni Gherdovich { 20181567c3e3SGiovanni Gherdovich } 20191567c3e3SGiovanni Gherdovich #endif 20201567c3e3SGiovanni Gherdovich 2021dfbca41fSPeter Zijlstra #ifndef arch_scale_freq_capacity 2022dfbca41fSPeter Zijlstra static __always_inline 20237673c8a4SJuri Lelli unsigned long arch_scale_freq_capacity(int cpu) 2024dfbca41fSPeter Zijlstra { 2025dfbca41fSPeter Zijlstra return SCHED_CAPACITY_SCALE; 2026dfbca41fSPeter Zijlstra } 2027dfbca41fSPeter Zijlstra #endif 2028b5b4860dSVincent Guittot 20297e1a9208SJuri Lelli #ifdef CONFIG_SMP 2030c1a280b6SThomas Gleixner #ifdef CONFIG_PREEMPTION 2031391e43daSPeter Zijlstra 2032391e43daSPeter Zijlstra static inline void double_rq_lock(struct rq *rq1, struct rq *rq2); 2033391e43daSPeter Zijlstra 2034391e43daSPeter Zijlstra /* 2035391e43daSPeter Zijlstra * fair double_lock_balance: Safely acquires both rq->locks in a fair 2036391e43daSPeter Zijlstra * way at the expense of forcing extra atomic operations in all 2037391e43daSPeter Zijlstra * invocations. This assures that the double_lock is acquired using the 2038391e43daSPeter Zijlstra * same underlying policy as the spinlock_t on this architecture, which 2039391e43daSPeter Zijlstra * reduces latency compared to the unfair variant below. However, it 2040391e43daSPeter Zijlstra * also adds more overhead and therefore may reduce throughput. 2041391e43daSPeter Zijlstra */ 2042391e43daSPeter Zijlstra static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) 2043391e43daSPeter Zijlstra __releases(this_rq->lock) 2044391e43daSPeter Zijlstra __acquires(busiest->lock) 2045391e43daSPeter Zijlstra __acquires(this_rq->lock) 2046391e43daSPeter Zijlstra { 2047391e43daSPeter Zijlstra raw_spin_unlock(&this_rq->lock); 2048391e43daSPeter Zijlstra double_rq_lock(this_rq, busiest); 2049391e43daSPeter Zijlstra 2050391e43daSPeter Zijlstra return 1; 2051391e43daSPeter Zijlstra } 2052391e43daSPeter Zijlstra 2053391e43daSPeter Zijlstra #else 2054391e43daSPeter Zijlstra /* 2055391e43daSPeter Zijlstra * Unfair double_lock_balance: Optimizes throughput at the expense of 2056391e43daSPeter Zijlstra * latency by eliminating extra atomic operations when the locks are 205797fb7a0aSIngo Molnar * already in proper order on entry. This favors lower CPU-ids and will 205897fb7a0aSIngo Molnar * grant the double lock to lower CPUs over higher ids under contention, 2059391e43daSPeter Zijlstra * regardless of entry order into the function. 2060391e43daSPeter Zijlstra */ 2061391e43daSPeter Zijlstra static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) 2062391e43daSPeter Zijlstra __releases(this_rq->lock) 2063391e43daSPeter Zijlstra __acquires(busiest->lock) 2064391e43daSPeter Zijlstra __acquires(this_rq->lock) 2065391e43daSPeter Zijlstra { 2066391e43daSPeter Zijlstra int ret = 0; 2067391e43daSPeter Zijlstra 2068391e43daSPeter Zijlstra if (unlikely(!raw_spin_trylock(&busiest->lock))) { 2069391e43daSPeter Zijlstra if (busiest < this_rq) { 2070391e43daSPeter Zijlstra raw_spin_unlock(&this_rq->lock); 2071391e43daSPeter Zijlstra raw_spin_lock(&busiest->lock); 2072391e43daSPeter Zijlstra raw_spin_lock_nested(&this_rq->lock, 2073391e43daSPeter Zijlstra SINGLE_DEPTH_NESTING); 2074391e43daSPeter Zijlstra ret = 1; 2075391e43daSPeter Zijlstra } else 2076391e43daSPeter Zijlstra raw_spin_lock_nested(&busiest->lock, 2077391e43daSPeter Zijlstra SINGLE_DEPTH_NESTING); 2078391e43daSPeter Zijlstra } 2079391e43daSPeter Zijlstra return ret; 2080391e43daSPeter Zijlstra } 2081391e43daSPeter Zijlstra 2082c1a280b6SThomas Gleixner #endif /* CONFIG_PREEMPTION */ 2083391e43daSPeter Zijlstra 2084391e43daSPeter Zijlstra /* 2085391e43daSPeter Zijlstra * double_lock_balance - lock the busiest runqueue, this_rq is locked already. 2086391e43daSPeter Zijlstra */ 2087391e43daSPeter Zijlstra static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest) 2088391e43daSPeter Zijlstra { 2089391e43daSPeter Zijlstra if (unlikely(!irqs_disabled())) { 209097fb7a0aSIngo Molnar /* printk() doesn't work well under rq->lock */ 2091391e43daSPeter Zijlstra raw_spin_unlock(&this_rq->lock); 2092391e43daSPeter Zijlstra BUG_ON(1); 2093391e43daSPeter Zijlstra } 2094391e43daSPeter Zijlstra 2095391e43daSPeter Zijlstra return _double_lock_balance(this_rq, busiest); 2096391e43daSPeter Zijlstra } 2097391e43daSPeter Zijlstra 2098391e43daSPeter Zijlstra static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) 2099391e43daSPeter Zijlstra __releases(busiest->lock) 2100391e43daSPeter Zijlstra { 2101391e43daSPeter Zijlstra raw_spin_unlock(&busiest->lock); 2102391e43daSPeter Zijlstra lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); 2103391e43daSPeter Zijlstra } 2104391e43daSPeter Zijlstra 210574602315SPeter Zijlstra static inline void double_lock(spinlock_t *l1, spinlock_t *l2) 210674602315SPeter Zijlstra { 210774602315SPeter Zijlstra if (l1 > l2) 210874602315SPeter Zijlstra swap(l1, l2); 210974602315SPeter Zijlstra 211074602315SPeter Zijlstra spin_lock(l1); 211174602315SPeter Zijlstra spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 211274602315SPeter Zijlstra } 211374602315SPeter Zijlstra 211460e69eedSMike Galbraith static inline void double_lock_irq(spinlock_t *l1, spinlock_t *l2) 211560e69eedSMike Galbraith { 211660e69eedSMike Galbraith if (l1 > l2) 211760e69eedSMike Galbraith swap(l1, l2); 211860e69eedSMike Galbraith 211960e69eedSMike Galbraith spin_lock_irq(l1); 212060e69eedSMike Galbraith spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 212160e69eedSMike Galbraith } 212260e69eedSMike Galbraith 212374602315SPeter Zijlstra static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2) 212474602315SPeter Zijlstra { 212574602315SPeter Zijlstra if (l1 > l2) 212674602315SPeter Zijlstra swap(l1, l2); 212774602315SPeter Zijlstra 212874602315SPeter Zijlstra raw_spin_lock(l1); 212974602315SPeter Zijlstra raw_spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 213074602315SPeter Zijlstra } 213174602315SPeter Zijlstra 2132391e43daSPeter Zijlstra /* 2133391e43daSPeter Zijlstra * double_rq_lock - safely lock two runqueues 2134391e43daSPeter Zijlstra * 2135391e43daSPeter Zijlstra * Note this does not disable interrupts like task_rq_lock, 2136391e43daSPeter Zijlstra * you need to do so manually before calling. 2137391e43daSPeter Zijlstra */ 2138391e43daSPeter Zijlstra static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) 2139391e43daSPeter Zijlstra __acquires(rq1->lock) 2140391e43daSPeter Zijlstra __acquires(rq2->lock) 2141391e43daSPeter Zijlstra { 2142391e43daSPeter Zijlstra BUG_ON(!irqs_disabled()); 2143391e43daSPeter Zijlstra if (rq1 == rq2) { 2144391e43daSPeter Zijlstra raw_spin_lock(&rq1->lock); 2145391e43daSPeter Zijlstra __acquire(rq2->lock); /* Fake it out ;) */ 2146391e43daSPeter Zijlstra } else { 2147391e43daSPeter Zijlstra if (rq1 < rq2) { 2148391e43daSPeter Zijlstra raw_spin_lock(&rq1->lock); 2149391e43daSPeter Zijlstra raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING); 2150391e43daSPeter Zijlstra } else { 2151391e43daSPeter Zijlstra raw_spin_lock(&rq2->lock); 2152391e43daSPeter Zijlstra raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING); 2153391e43daSPeter Zijlstra } 2154391e43daSPeter Zijlstra } 2155391e43daSPeter Zijlstra } 2156391e43daSPeter Zijlstra 2157391e43daSPeter Zijlstra /* 2158391e43daSPeter Zijlstra * double_rq_unlock - safely unlock two runqueues 2159391e43daSPeter Zijlstra * 2160391e43daSPeter Zijlstra * Note this does not restore interrupts like task_rq_unlock, 2161391e43daSPeter Zijlstra * you need to do so manually after calling. 2162391e43daSPeter Zijlstra */ 2163391e43daSPeter Zijlstra static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) 2164391e43daSPeter Zijlstra __releases(rq1->lock) 2165391e43daSPeter Zijlstra __releases(rq2->lock) 2166391e43daSPeter Zijlstra { 2167391e43daSPeter Zijlstra raw_spin_unlock(&rq1->lock); 2168391e43daSPeter Zijlstra if (rq1 != rq2) 2169391e43daSPeter Zijlstra raw_spin_unlock(&rq2->lock); 2170391e43daSPeter Zijlstra else 2171391e43daSPeter Zijlstra __release(rq2->lock); 2172391e43daSPeter Zijlstra } 2173391e43daSPeter Zijlstra 2174f2cb1360SIngo Molnar extern void set_rq_online (struct rq *rq); 2175f2cb1360SIngo Molnar extern void set_rq_offline(struct rq *rq); 2176f2cb1360SIngo Molnar extern bool sched_smp_initialized; 2177f2cb1360SIngo Molnar 2178391e43daSPeter Zijlstra #else /* CONFIG_SMP */ 2179391e43daSPeter Zijlstra 2180391e43daSPeter Zijlstra /* 2181391e43daSPeter Zijlstra * double_rq_lock - safely lock two runqueues 2182391e43daSPeter Zijlstra * 2183391e43daSPeter Zijlstra * Note this does not disable interrupts like task_rq_lock, 2184391e43daSPeter Zijlstra * you need to do so manually before calling. 2185391e43daSPeter Zijlstra */ 2186391e43daSPeter Zijlstra static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) 2187391e43daSPeter Zijlstra __acquires(rq1->lock) 2188391e43daSPeter Zijlstra __acquires(rq2->lock) 2189391e43daSPeter Zijlstra { 2190391e43daSPeter Zijlstra BUG_ON(!irqs_disabled()); 2191391e43daSPeter Zijlstra BUG_ON(rq1 != rq2); 2192391e43daSPeter Zijlstra raw_spin_lock(&rq1->lock); 2193391e43daSPeter Zijlstra __acquire(rq2->lock); /* Fake it out ;) */ 2194391e43daSPeter Zijlstra } 2195391e43daSPeter Zijlstra 2196391e43daSPeter Zijlstra /* 2197391e43daSPeter Zijlstra * double_rq_unlock - safely unlock two runqueues 2198391e43daSPeter Zijlstra * 2199391e43daSPeter Zijlstra * Note this does not restore interrupts like task_rq_unlock, 2200391e43daSPeter Zijlstra * you need to do so manually after calling. 2201391e43daSPeter Zijlstra */ 2202391e43daSPeter Zijlstra static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) 2203391e43daSPeter Zijlstra __releases(rq1->lock) 2204391e43daSPeter Zijlstra __releases(rq2->lock) 2205391e43daSPeter Zijlstra { 2206391e43daSPeter Zijlstra BUG_ON(rq1 != rq2); 2207391e43daSPeter Zijlstra raw_spin_unlock(&rq1->lock); 2208391e43daSPeter Zijlstra __release(rq2->lock); 2209391e43daSPeter Zijlstra } 2210391e43daSPeter Zijlstra 2211391e43daSPeter Zijlstra #endif 2212391e43daSPeter Zijlstra 2213391e43daSPeter Zijlstra extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq); 2214391e43daSPeter Zijlstra extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq); 22156b55c965SSrikar Dronamraju 22166b55c965SSrikar Dronamraju #ifdef CONFIG_SCHED_DEBUG 22179469eb01SPeter Zijlstra extern bool sched_debug_enabled; 22189469eb01SPeter Zijlstra 2219391e43daSPeter Zijlstra extern void print_cfs_stats(struct seq_file *m, int cpu); 2220391e43daSPeter Zijlstra extern void print_rt_stats(struct seq_file *m, int cpu); 2221acb32132SWanpeng Li extern void print_dl_stats(struct seq_file *m, int cpu); 2222f6a34630SMathieu Malaterre extern void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq); 2223f6a34630SMathieu Malaterre extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq); 2224f6a34630SMathieu Malaterre extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq); 2225397f2378SSrikar Dronamraju #ifdef CONFIG_NUMA_BALANCING 2226397f2378SSrikar Dronamraju extern void 2227397f2378SSrikar Dronamraju show_numa_stats(struct task_struct *p, struct seq_file *m); 2228397f2378SSrikar Dronamraju extern void 2229397f2378SSrikar Dronamraju print_numa_stats(struct seq_file *m, int node, unsigned long tsf, 2230397f2378SSrikar Dronamraju unsigned long tpf, unsigned long gsf, unsigned long gpf); 2231397f2378SSrikar Dronamraju #endif /* CONFIG_NUMA_BALANCING */ 2232397f2378SSrikar Dronamraju #endif /* CONFIG_SCHED_DEBUG */ 2233391e43daSPeter Zijlstra 2234391e43daSPeter Zijlstra extern void init_cfs_rq(struct cfs_rq *cfs_rq); 223507c54f7aSAbel Vesa extern void init_rt_rq(struct rt_rq *rt_rq); 223607c54f7aSAbel Vesa extern void init_dl_rq(struct dl_rq *dl_rq); 2237391e43daSPeter Zijlstra 22381ee14e6cSBen Segall extern void cfs_bandwidth_usage_inc(void); 22391ee14e6cSBen Segall extern void cfs_bandwidth_usage_dec(void); 22401c792db7SSuresh Siddha 22413451d024SFrederic Weisbecker #ifdef CONFIG_NO_HZ_COMMON 224200357f5eSPeter Zijlstra #define NOHZ_BALANCE_KICK_BIT 0 224300357f5eSPeter Zijlstra #define NOHZ_STATS_KICK_BIT 1 2244a22e47a4SPeter Zijlstra 2245a22e47a4SPeter Zijlstra #define NOHZ_BALANCE_KICK BIT(NOHZ_BALANCE_KICK_BIT) 2246b7031a02SPeter Zijlstra #define NOHZ_STATS_KICK BIT(NOHZ_STATS_KICK_BIT) 2247b7031a02SPeter Zijlstra 2248b7031a02SPeter Zijlstra #define NOHZ_KICK_MASK (NOHZ_BALANCE_KICK | NOHZ_STATS_KICK) 22491c792db7SSuresh Siddha 22501c792db7SSuresh Siddha #define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags) 225120a5c8ccSThomas Gleixner 225200357f5eSPeter Zijlstra extern void nohz_balance_exit_idle(struct rq *rq); 225320a5c8ccSThomas Gleixner #else 225400357f5eSPeter Zijlstra static inline void nohz_balance_exit_idle(struct rq *rq) { } 22551c792db7SSuresh Siddha #endif 225673fbec60SFrederic Weisbecker 2257daec5798SLuca Abeni 2258daec5798SLuca Abeni #ifdef CONFIG_SMP 2259daec5798SLuca Abeni static inline 2260daec5798SLuca Abeni void __dl_update(struct dl_bw *dl_b, s64 bw) 2261daec5798SLuca Abeni { 2262daec5798SLuca Abeni struct root_domain *rd = container_of(dl_b, struct root_domain, dl_bw); 2263daec5798SLuca Abeni int i; 2264daec5798SLuca Abeni 2265daec5798SLuca Abeni RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(), 2266daec5798SLuca Abeni "sched RCU must be held"); 2267daec5798SLuca Abeni for_each_cpu_and(i, rd->span, cpu_active_mask) { 2268daec5798SLuca Abeni struct rq *rq = cpu_rq(i); 2269daec5798SLuca Abeni 2270daec5798SLuca Abeni rq->dl.extra_bw += bw; 2271daec5798SLuca Abeni } 2272daec5798SLuca Abeni } 2273daec5798SLuca Abeni #else 2274daec5798SLuca Abeni static inline 2275daec5798SLuca Abeni void __dl_update(struct dl_bw *dl_b, s64 bw) 2276daec5798SLuca Abeni { 2277daec5798SLuca Abeni struct dl_rq *dl = container_of(dl_b, struct dl_rq, dl_bw); 2278daec5798SLuca Abeni 2279daec5798SLuca Abeni dl->extra_bw += bw; 2280daec5798SLuca Abeni } 2281daec5798SLuca Abeni #endif 2282daec5798SLuca Abeni 2283daec5798SLuca Abeni 228473fbec60SFrederic Weisbecker #ifdef CONFIG_IRQ_TIME_ACCOUNTING 228519d23dbfSFrederic Weisbecker struct irqtime { 228625e2d8c1SFrederic Weisbecker u64 total; 2287a499a5a1SFrederic Weisbecker u64 tick_delta; 228819d23dbfSFrederic Weisbecker u64 irq_start_time; 228919d23dbfSFrederic Weisbecker struct u64_stats_sync sync; 229019d23dbfSFrederic Weisbecker }; 229173fbec60SFrederic Weisbecker 229219d23dbfSFrederic Weisbecker DECLARE_PER_CPU(struct irqtime, cpu_irqtime); 229373fbec60SFrederic Weisbecker 229425e2d8c1SFrederic Weisbecker /* 229525e2d8c1SFrederic Weisbecker * Returns the irqtime minus the softirq time computed by ksoftirqd. 229625e2d8c1SFrederic Weisbecker * Otherwise ksoftirqd's sum_exec_runtime is substracted its own runtime 229725e2d8c1SFrederic Weisbecker * and never move forward. 229825e2d8c1SFrederic Weisbecker */ 229973fbec60SFrederic Weisbecker static inline u64 irq_time_read(int cpu) 230073fbec60SFrederic Weisbecker { 230119d23dbfSFrederic Weisbecker struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu); 230219d23dbfSFrederic Weisbecker unsigned int seq; 230319d23dbfSFrederic Weisbecker u64 total; 230473fbec60SFrederic Weisbecker 230573fbec60SFrederic Weisbecker do { 230619d23dbfSFrederic Weisbecker seq = __u64_stats_fetch_begin(&irqtime->sync); 230725e2d8c1SFrederic Weisbecker total = irqtime->total; 230819d23dbfSFrederic Weisbecker } while (__u64_stats_fetch_retry(&irqtime->sync, seq)); 230973fbec60SFrederic Weisbecker 231019d23dbfSFrederic Weisbecker return total; 231173fbec60SFrederic Weisbecker } 231273fbec60SFrederic Weisbecker #endif /* CONFIG_IRQ_TIME_ACCOUNTING */ 2313adaf9fcdSRafael J. Wysocki 2314adaf9fcdSRafael J. Wysocki #ifdef CONFIG_CPU_FREQ 2315b10abd0aSJoel Fernandes (Google) DECLARE_PER_CPU(struct update_util_data __rcu *, cpufreq_update_util_data); 2316adaf9fcdSRafael J. Wysocki 2317adaf9fcdSRafael J. Wysocki /** 2318adaf9fcdSRafael J. Wysocki * cpufreq_update_util - Take a note about CPU utilization changes. 231912bde33dSRafael J. Wysocki * @rq: Runqueue to carry out the update for. 232058919e83SRafael J. Wysocki * @flags: Update reason flags. 2321adaf9fcdSRafael J. Wysocki * 232258919e83SRafael J. Wysocki * This function is called by the scheduler on the CPU whose utilization is 232358919e83SRafael J. Wysocki * being updated. 2324adaf9fcdSRafael J. Wysocki * 2325adaf9fcdSRafael J. Wysocki * It can only be called from RCU-sched read-side critical sections. 2326adaf9fcdSRafael J. Wysocki * 2327adaf9fcdSRafael J. Wysocki * The way cpufreq is currently arranged requires it to evaluate the CPU 2328adaf9fcdSRafael J. Wysocki * performance state (frequency/voltage) on a regular basis to prevent it from 2329adaf9fcdSRafael J. Wysocki * being stuck in a completely inadequate performance level for too long. 2330e0367b12SJuri Lelli * That is not guaranteed to happen if the updates are only triggered from CFS 2331e0367b12SJuri Lelli * and DL, though, because they may not be coming in if only RT tasks are 2332e0367b12SJuri Lelli * active all the time (or there are RT tasks only). 2333adaf9fcdSRafael J. Wysocki * 2334e0367b12SJuri Lelli * As a workaround for that issue, this function is called periodically by the 2335e0367b12SJuri Lelli * RT sched class to trigger extra cpufreq updates to prevent it from stalling, 2336adaf9fcdSRafael J. Wysocki * but that really is a band-aid. Going forward it should be replaced with 2337e0367b12SJuri Lelli * solutions targeted more specifically at RT tasks. 2338adaf9fcdSRafael J. Wysocki */ 233912bde33dSRafael J. Wysocki static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) 2340adaf9fcdSRafael J. Wysocki { 234158919e83SRafael J. Wysocki struct update_util_data *data; 234258919e83SRafael J. Wysocki 2343674e7541SViresh Kumar data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data, 2344674e7541SViresh Kumar cpu_of(rq))); 234558919e83SRafael J. Wysocki if (data) 234612bde33dSRafael J. Wysocki data->func(data, rq_clock(rq), flags); 234712bde33dSRafael J. Wysocki } 2348adaf9fcdSRafael J. Wysocki #else 234912bde33dSRafael J. Wysocki static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {} 2350adaf9fcdSRafael J. Wysocki #endif /* CONFIG_CPU_FREQ */ 2351be53f58fSLinus Torvalds 2352982d9cdcSPatrick Bellasi #ifdef CONFIG_UCLAMP_TASK 2353686516b5SValentin Schneider unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id); 23549d20ad7dSPatrick Bellasi 23559d20ad7dSPatrick Bellasi static __always_inline 2356d2b58a28SValentin Schneider unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util, 23579d20ad7dSPatrick Bellasi struct task_struct *p) 2358982d9cdcSPatrick Bellasi { 2359686516b5SValentin Schneider unsigned long min_util = READ_ONCE(rq->uclamp[UCLAMP_MIN].value); 2360686516b5SValentin Schneider unsigned long max_util = READ_ONCE(rq->uclamp[UCLAMP_MAX].value); 2361982d9cdcSPatrick Bellasi 23629d20ad7dSPatrick Bellasi if (p) { 23639d20ad7dSPatrick Bellasi min_util = max(min_util, uclamp_eff_value(p, UCLAMP_MIN)); 23649d20ad7dSPatrick Bellasi max_util = max(max_util, uclamp_eff_value(p, UCLAMP_MAX)); 23659d20ad7dSPatrick Bellasi } 23669d20ad7dSPatrick Bellasi 2367982d9cdcSPatrick Bellasi /* 2368982d9cdcSPatrick Bellasi * Since CPU's {min,max}_util clamps are MAX aggregated considering 2369982d9cdcSPatrick Bellasi * RUNNABLE tasks with _different_ clamps, we can end up with an 2370982d9cdcSPatrick Bellasi * inversion. Fix it now when the clamps are applied. 2371982d9cdcSPatrick Bellasi */ 2372982d9cdcSPatrick Bellasi if (unlikely(min_util >= max_util)) 2373982d9cdcSPatrick Bellasi return min_util; 2374982d9cdcSPatrick Bellasi 2375982d9cdcSPatrick Bellasi return clamp(util, min_util, max_util); 2376982d9cdcSPatrick Bellasi } 2377982d9cdcSPatrick Bellasi #else /* CONFIG_UCLAMP_TASK */ 2378d2b58a28SValentin Schneider static inline 2379d2b58a28SValentin Schneider unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util, 23809d20ad7dSPatrick Bellasi struct task_struct *p) 23819d20ad7dSPatrick Bellasi { 23829d20ad7dSPatrick Bellasi return util; 23839d20ad7dSPatrick Bellasi } 2384982d9cdcSPatrick Bellasi #endif /* CONFIG_UCLAMP_TASK */ 2385982d9cdcSPatrick Bellasi 23869bdcb44eSRafael J. Wysocki #ifdef arch_scale_freq_capacity 23879bdcb44eSRafael J. Wysocki # ifndef arch_scale_freq_invariant 238897fb7a0aSIngo Molnar # define arch_scale_freq_invariant() true 23899bdcb44eSRafael J. Wysocki # endif 239097fb7a0aSIngo Molnar #else 239197fb7a0aSIngo Molnar # define arch_scale_freq_invariant() false 23929bdcb44eSRafael J. Wysocki #endif 2393d4edd662SJuri Lelli 239410a35e68SVincent Guittot #ifdef CONFIG_SMP 239510a35e68SVincent Guittot static inline unsigned long capacity_orig_of(int cpu) 239610a35e68SVincent Guittot { 239710a35e68SVincent Guittot return cpu_rq(cpu)->cpu_capacity_orig; 239810a35e68SVincent Guittot } 239910a35e68SVincent Guittot #endif 240010a35e68SVincent Guittot 2401938e5e4bSQuentin Perret /** 2402938e5e4bSQuentin Perret * enum schedutil_type - CPU utilization type 2403938e5e4bSQuentin Perret * @FREQUENCY_UTIL: Utilization used to select frequency 2404938e5e4bSQuentin Perret * @ENERGY_UTIL: Utilization used during energy calculation 2405938e5e4bSQuentin Perret * 2406938e5e4bSQuentin Perret * The utilization signals of all scheduling classes (CFS/RT/DL) and IRQ time 2407938e5e4bSQuentin Perret * need to be aggregated differently depending on the usage made of them. This 2408938e5e4bSQuentin Perret * enum is used within schedutil_freq_util() to differentiate the types of 2409938e5e4bSQuentin Perret * utilization expected by the callers, and adjust the aggregation accordingly. 2410938e5e4bSQuentin Perret */ 2411938e5e4bSQuentin Perret enum schedutil_type { 2412938e5e4bSQuentin Perret FREQUENCY_UTIL, 2413938e5e4bSQuentin Perret ENERGY_UTIL, 2414938e5e4bSQuentin Perret }; 2415938e5e4bSQuentin Perret 2416af24bde8SPatrick Bellasi #ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL 2417938e5e4bSQuentin Perret 2418af24bde8SPatrick Bellasi unsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs, 2419af24bde8SPatrick Bellasi unsigned long max, enum schedutil_type type, 2420af24bde8SPatrick Bellasi struct task_struct *p); 2421938e5e4bSQuentin Perret 24228cc90515SVincent Guittot static inline unsigned long cpu_bw_dl(struct rq *rq) 2423d4edd662SJuri Lelli { 2424d4edd662SJuri Lelli return (rq->dl.running_bw * SCHED_CAPACITY_SCALE) >> BW_SHIFT; 2425d4edd662SJuri Lelli } 2426d4edd662SJuri Lelli 24278cc90515SVincent Guittot static inline unsigned long cpu_util_dl(struct rq *rq) 24288cc90515SVincent Guittot { 24298cc90515SVincent Guittot return READ_ONCE(rq->avg_dl.util_avg); 24308cc90515SVincent Guittot } 24318cc90515SVincent Guittot 2432d4edd662SJuri Lelli static inline unsigned long cpu_util_cfs(struct rq *rq) 2433d4edd662SJuri Lelli { 2434a07630b8SPatrick Bellasi unsigned long util = READ_ONCE(rq->cfs.avg.util_avg); 2435a07630b8SPatrick Bellasi 2436a07630b8SPatrick Bellasi if (sched_feat(UTIL_EST)) { 2437a07630b8SPatrick Bellasi util = max_t(unsigned long, util, 2438a07630b8SPatrick Bellasi READ_ONCE(rq->cfs.avg.util_est.enqueued)); 2439a07630b8SPatrick Bellasi } 2440a07630b8SPatrick Bellasi 2441a07630b8SPatrick Bellasi return util; 2442d4edd662SJuri Lelli } 2443371bf427SVincent Guittot 2444371bf427SVincent Guittot static inline unsigned long cpu_util_rt(struct rq *rq) 2445371bf427SVincent Guittot { 2446dfa444dcSVincent Guittot return READ_ONCE(rq->avg_rt.util_avg); 2447371bf427SVincent Guittot } 2448938e5e4bSQuentin Perret #else /* CONFIG_CPU_FREQ_GOV_SCHEDUTIL */ 2449af24bde8SPatrick Bellasi static inline unsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs, 2450af24bde8SPatrick Bellasi unsigned long max, enum schedutil_type type, 2451af24bde8SPatrick Bellasi struct task_struct *p) 2452938e5e4bSQuentin Perret { 2453af24bde8SPatrick Bellasi return 0; 2454938e5e4bSQuentin Perret } 2455af24bde8SPatrick Bellasi #endif /* CONFIG_CPU_FREQ_GOV_SCHEDUTIL */ 24569033ea11SVincent Guittot 245711d4afd4SVincent Guittot #ifdef CONFIG_HAVE_SCHED_AVG_IRQ 24589033ea11SVincent Guittot static inline unsigned long cpu_util_irq(struct rq *rq) 24599033ea11SVincent Guittot { 24609033ea11SVincent Guittot return rq->avg_irq.util_avg; 24619033ea11SVincent Guittot } 24622e62c474SVincent Guittot 24632e62c474SVincent Guittot static inline 24642e62c474SVincent Guittot unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max) 24652e62c474SVincent Guittot { 24662e62c474SVincent Guittot util *= (max - irq); 24672e62c474SVincent Guittot util /= max; 24682e62c474SVincent Guittot 24692e62c474SVincent Guittot return util; 24702e62c474SVincent Guittot 24712e62c474SVincent Guittot } 24729033ea11SVincent Guittot #else 24739033ea11SVincent Guittot static inline unsigned long cpu_util_irq(struct rq *rq) 24749033ea11SVincent Guittot { 24759033ea11SVincent Guittot return 0; 24769033ea11SVincent Guittot } 24779033ea11SVincent Guittot 24782e62c474SVincent Guittot static inline 24792e62c474SVincent Guittot unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max) 24802e62c474SVincent Guittot { 24812e62c474SVincent Guittot return util; 24822e62c474SVincent Guittot } 2483794a56ebSJuri Lelli #endif 24846aa140faSQuentin Perret 2485531b5c9fSQuentin Perret #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) 24861f74de87SQuentin Perret 2487f8a696f2SPeter Zijlstra #define perf_domain_span(pd) (to_cpumask(((pd)->em_pd->cpus))) 2488f8a696f2SPeter Zijlstra 2489f8a696f2SPeter Zijlstra DECLARE_STATIC_KEY_FALSE(sched_energy_present); 2490f8a696f2SPeter Zijlstra 2491f8a696f2SPeter Zijlstra static inline bool sched_energy_enabled(void) 2492f8a696f2SPeter Zijlstra { 2493f8a696f2SPeter Zijlstra return static_branch_unlikely(&sched_energy_present); 2494f8a696f2SPeter Zijlstra } 2495f8a696f2SPeter Zijlstra 2496f8a696f2SPeter Zijlstra #else /* ! (CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL) */ 2497f8a696f2SPeter Zijlstra 2498f8a696f2SPeter Zijlstra #define perf_domain_span(pd) NULL 2499f8a696f2SPeter Zijlstra static inline bool sched_energy_enabled(void) { return false; } 2500f8a696f2SPeter Zijlstra 2501f8a696f2SPeter Zijlstra #endif /* CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL */ 2502227a4aadSMathieu Desnoyers 2503227a4aadSMathieu Desnoyers #ifdef CONFIG_MEMBARRIER 2504227a4aadSMathieu Desnoyers /* 2505227a4aadSMathieu Desnoyers * The scheduler provides memory barriers required by membarrier between: 2506227a4aadSMathieu Desnoyers * - prior user-space memory accesses and store to rq->membarrier_state, 2507227a4aadSMathieu Desnoyers * - store to rq->membarrier_state and following user-space memory accesses. 2508227a4aadSMathieu Desnoyers * In the same way it provides those guarantees around store to rq->curr. 2509227a4aadSMathieu Desnoyers */ 2510227a4aadSMathieu Desnoyers static inline void membarrier_switch_mm(struct rq *rq, 2511227a4aadSMathieu Desnoyers struct mm_struct *prev_mm, 2512227a4aadSMathieu Desnoyers struct mm_struct *next_mm) 2513227a4aadSMathieu Desnoyers { 2514227a4aadSMathieu Desnoyers int membarrier_state; 2515227a4aadSMathieu Desnoyers 2516227a4aadSMathieu Desnoyers if (prev_mm == next_mm) 2517227a4aadSMathieu Desnoyers return; 2518227a4aadSMathieu Desnoyers 2519227a4aadSMathieu Desnoyers membarrier_state = atomic_read(&next_mm->membarrier_state); 2520227a4aadSMathieu Desnoyers if (READ_ONCE(rq->membarrier_state) == membarrier_state) 2521227a4aadSMathieu Desnoyers return; 2522227a4aadSMathieu Desnoyers 2523227a4aadSMathieu Desnoyers WRITE_ONCE(rq->membarrier_state, membarrier_state); 2524227a4aadSMathieu Desnoyers } 2525227a4aadSMathieu Desnoyers #else 2526227a4aadSMathieu Desnoyers static inline void membarrier_switch_mm(struct rq *rq, 2527227a4aadSMathieu Desnoyers struct mm_struct *prev_mm, 2528227a4aadSMathieu Desnoyers struct mm_struct *next_mm) 2529227a4aadSMathieu Desnoyers { 2530227a4aadSMathieu Desnoyers } 2531227a4aadSMathieu Desnoyers #endif 253252262ee5SMel Gorman 253352262ee5SMel Gorman #ifdef CONFIG_SMP 253452262ee5SMel Gorman static inline bool is_per_cpu_kthread(struct task_struct *p) 253552262ee5SMel Gorman { 253652262ee5SMel Gorman if (!(p->flags & PF_KTHREAD)) 253752262ee5SMel Gorman return false; 253852262ee5SMel Gorman 253952262ee5SMel Gorman if (p->nr_cpus_allowed != 1) 254052262ee5SMel Gorman return false; 254152262ee5SMel Gorman 254252262ee5SMel Gorman return true; 254352262ee5SMel Gorman } 254452262ee5SMel Gorman #endif 2545b3212fe2SThomas Gleixner 2546b3212fe2SThomas Gleixner void swake_up_all_locked(struct swait_queue_head *q); 2547b3212fe2SThomas Gleixner void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait); 2548