1391e43daSPeter Zijlstra 2391e43daSPeter Zijlstra #include <linux/sched.h> 3cf4aebc2SClark Williams #include <linux/sched/sysctl.h> 48bd75c77SClark Williams #include <linux/sched/rt.h> 5aab03e05SDario Faggioli #include <linux/sched/deadline.h> 6391e43daSPeter Zijlstra #include <linux/mutex.h> 7391e43daSPeter Zijlstra #include <linux/spinlock.h> 8391e43daSPeter Zijlstra #include <linux/stop_machine.h> 9b6366f04SSteven Rostedt #include <linux/irq_work.h> 109f3660c2SFrederic Weisbecker #include <linux/tick.h> 11f809ca9aSMel Gorman #include <linux/slab.h> 12391e43daSPeter Zijlstra 13391e43daSPeter Zijlstra #include "cpupri.h" 146bfd6d72SJuri Lelli #include "cpudeadline.h" 1560fed789SLi Zefan #include "cpuacct.h" 16391e43daSPeter Zijlstra 1745ceebf7SPaul Gortmaker struct rq; 18442bf3aaSDaniel Lezcano struct cpuidle_state; 1945ceebf7SPaul Gortmaker 20da0c1e65SKirill Tkhai /* task_struct::on_rq states: */ 21da0c1e65SKirill Tkhai #define TASK_ON_RQ_QUEUED 1 22cca26e80SKirill Tkhai #define TASK_ON_RQ_MIGRATING 2 23da0c1e65SKirill Tkhai 24391e43daSPeter Zijlstra extern __read_mostly int scheduler_running; 25391e43daSPeter Zijlstra 2645ceebf7SPaul Gortmaker extern unsigned long calc_load_update; 2745ceebf7SPaul Gortmaker extern atomic_long_t calc_load_tasks; 2845ceebf7SPaul Gortmaker 2945ceebf7SPaul Gortmaker extern long calc_load_fold_active(struct rq *this_rq); 3045ceebf7SPaul Gortmaker extern void update_cpu_load_active(struct rq *this_rq); 3145ceebf7SPaul Gortmaker 32391e43daSPeter Zijlstra /* 33391e43daSPeter Zijlstra * Helpers for converting nanosecond timing to jiffy resolution 34391e43daSPeter Zijlstra */ 35391e43daSPeter Zijlstra #define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ)) 36391e43daSPeter Zijlstra 37cc1f4b1fSLi Zefan /* 38cc1f4b1fSLi Zefan * Increase resolution of nice-level calculations for 64-bit architectures. 39cc1f4b1fSLi Zefan * The extra resolution improves shares distribution and load balancing of 40cc1f4b1fSLi Zefan * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup 41cc1f4b1fSLi Zefan * hierarchies, especially on larger systems. This is not a user-visible change 42cc1f4b1fSLi Zefan * and does not change the user-interface for setting shares/weights. 43cc1f4b1fSLi Zefan * 44cc1f4b1fSLi Zefan * We increase resolution only if we have enough bits to allow this increased 45cc1f4b1fSLi Zefan * resolution (i.e. BITS_PER_LONG > 32). The costs for increasing resolution 46cc1f4b1fSLi Zefan * when BITS_PER_LONG <= 32 are pretty high and the returns do not justify the 47cc1f4b1fSLi Zefan * increased costs. 48cc1f4b1fSLi Zefan */ 49cc1f4b1fSLi Zefan #if 0 /* BITS_PER_LONG > 32 -- currently broken: it increases power usage under light load */ 50cc1f4b1fSLi Zefan # define SCHED_LOAD_RESOLUTION 10 51cc1f4b1fSLi Zefan # define scale_load(w) ((w) << SCHED_LOAD_RESOLUTION) 52cc1f4b1fSLi Zefan # define scale_load_down(w) ((w) >> SCHED_LOAD_RESOLUTION) 53cc1f4b1fSLi Zefan #else 54cc1f4b1fSLi Zefan # define SCHED_LOAD_RESOLUTION 0 55cc1f4b1fSLi Zefan # define scale_load(w) (w) 56cc1f4b1fSLi Zefan # define scale_load_down(w) (w) 57cc1f4b1fSLi Zefan #endif 58cc1f4b1fSLi Zefan 59cc1f4b1fSLi Zefan #define SCHED_LOAD_SHIFT (10 + SCHED_LOAD_RESOLUTION) 60cc1f4b1fSLi Zefan #define SCHED_LOAD_SCALE (1L << SCHED_LOAD_SHIFT) 61cc1f4b1fSLi Zefan 62391e43daSPeter Zijlstra #define NICE_0_LOAD SCHED_LOAD_SCALE 63391e43daSPeter Zijlstra #define NICE_0_SHIFT SCHED_LOAD_SHIFT 64391e43daSPeter Zijlstra 65391e43daSPeter Zijlstra /* 66332ac17eSDario Faggioli * Single value that decides SCHED_DEADLINE internal math precision. 67332ac17eSDario Faggioli * 10 -> just above 1us 68332ac17eSDario Faggioli * 9 -> just above 0.5us 69332ac17eSDario Faggioli */ 70332ac17eSDario Faggioli #define DL_SCALE (10) 71332ac17eSDario Faggioli 72332ac17eSDario Faggioli /* 73391e43daSPeter Zijlstra * These are the 'tuning knobs' of the scheduler: 74391e43daSPeter Zijlstra */ 75391e43daSPeter Zijlstra 76391e43daSPeter Zijlstra /* 77391e43daSPeter Zijlstra * single value that denotes runtime == period, ie unlimited time. 78391e43daSPeter Zijlstra */ 79391e43daSPeter Zijlstra #define RUNTIME_INF ((u64)~0ULL) 80391e43daSPeter Zijlstra 81d50dde5aSDario Faggioli static inline int fair_policy(int policy) 82d50dde5aSDario Faggioli { 83d50dde5aSDario Faggioli return policy == SCHED_NORMAL || policy == SCHED_BATCH; 84d50dde5aSDario Faggioli } 85d50dde5aSDario Faggioli 86391e43daSPeter Zijlstra static inline int rt_policy(int policy) 87391e43daSPeter Zijlstra { 88d50dde5aSDario Faggioli return policy == SCHED_FIFO || policy == SCHED_RR; 89391e43daSPeter Zijlstra } 90391e43daSPeter Zijlstra 91aab03e05SDario Faggioli static inline int dl_policy(int policy) 92aab03e05SDario Faggioli { 93aab03e05SDario Faggioli return policy == SCHED_DEADLINE; 94aab03e05SDario Faggioli } 95aab03e05SDario Faggioli 96391e43daSPeter Zijlstra static inline int task_has_rt_policy(struct task_struct *p) 97391e43daSPeter Zijlstra { 98391e43daSPeter Zijlstra return rt_policy(p->policy); 99391e43daSPeter Zijlstra } 100391e43daSPeter Zijlstra 101aab03e05SDario Faggioli static inline int task_has_dl_policy(struct task_struct *p) 102aab03e05SDario Faggioli { 103aab03e05SDario Faggioli return dl_policy(p->policy); 104aab03e05SDario Faggioli } 105aab03e05SDario Faggioli 106332ac17eSDario Faggioli static inline bool dl_time_before(u64 a, u64 b) 1072d3d891dSDario Faggioli { 1082d3d891dSDario Faggioli return (s64)(a - b) < 0; 1092d3d891dSDario Faggioli } 1102d3d891dSDario Faggioli 1112d3d891dSDario Faggioli /* 1122d3d891dSDario Faggioli * Tells if entity @a should preempt entity @b. 1132d3d891dSDario Faggioli */ 114332ac17eSDario Faggioli static inline bool 115332ac17eSDario Faggioli dl_entity_preempt(struct sched_dl_entity *a, struct sched_dl_entity *b) 1162d3d891dSDario Faggioli { 1172d3d891dSDario Faggioli return dl_time_before(a->deadline, b->deadline); 1182d3d891dSDario Faggioli } 1192d3d891dSDario Faggioli 120391e43daSPeter Zijlstra /* 121391e43daSPeter Zijlstra * This is the priority-queue data structure of the RT scheduling class: 122391e43daSPeter Zijlstra */ 123391e43daSPeter Zijlstra struct rt_prio_array { 124391e43daSPeter Zijlstra DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */ 125391e43daSPeter Zijlstra struct list_head queue[MAX_RT_PRIO]; 126391e43daSPeter Zijlstra }; 127391e43daSPeter Zijlstra 128391e43daSPeter Zijlstra struct rt_bandwidth { 129391e43daSPeter Zijlstra /* nests inside the rq lock: */ 130391e43daSPeter Zijlstra raw_spinlock_t rt_runtime_lock; 131391e43daSPeter Zijlstra ktime_t rt_period; 132391e43daSPeter Zijlstra u64 rt_runtime; 133391e43daSPeter Zijlstra struct hrtimer rt_period_timer; 134391e43daSPeter Zijlstra }; 135a5e7be3bSJuri Lelli 136a5e7be3bSJuri Lelli void __dl_clear_params(struct task_struct *p); 137a5e7be3bSJuri Lelli 138332ac17eSDario Faggioli /* 139332ac17eSDario Faggioli * To keep the bandwidth of -deadline tasks and groups under control 140332ac17eSDario Faggioli * we need some place where: 141332ac17eSDario Faggioli * - store the maximum -deadline bandwidth of the system (the group); 142332ac17eSDario Faggioli * - cache the fraction of that bandwidth that is currently allocated. 143332ac17eSDario Faggioli * 144332ac17eSDario Faggioli * This is all done in the data structure below. It is similar to the 145332ac17eSDario Faggioli * one used for RT-throttling (rt_bandwidth), with the main difference 146332ac17eSDario Faggioli * that, since here we are only interested in admission control, we 147332ac17eSDario Faggioli * do not decrease any runtime while the group "executes", neither we 148332ac17eSDario Faggioli * need a timer to replenish it. 149332ac17eSDario Faggioli * 150332ac17eSDario Faggioli * With respect to SMP, the bandwidth is given on a per-CPU basis, 151332ac17eSDario Faggioli * meaning that: 152332ac17eSDario Faggioli * - dl_bw (< 100%) is the bandwidth of the system (group) on each CPU; 153332ac17eSDario Faggioli * - dl_total_bw array contains, in the i-eth element, the currently 154332ac17eSDario Faggioli * allocated bandwidth on the i-eth CPU. 155332ac17eSDario Faggioli * Moreover, groups consume bandwidth on each CPU, while tasks only 156332ac17eSDario Faggioli * consume bandwidth on the CPU they're running on. 157332ac17eSDario Faggioli * Finally, dl_total_bw_cpu is used to cache the index of dl_total_bw 158332ac17eSDario Faggioli * that will be shown the next time the proc or cgroup controls will 159332ac17eSDario Faggioli * be red. It on its turn can be changed by writing on its own 160332ac17eSDario Faggioli * control. 161332ac17eSDario Faggioli */ 162332ac17eSDario Faggioli struct dl_bandwidth { 163332ac17eSDario Faggioli raw_spinlock_t dl_runtime_lock; 164332ac17eSDario Faggioli u64 dl_runtime; 165332ac17eSDario Faggioli u64 dl_period; 166332ac17eSDario Faggioli }; 167332ac17eSDario Faggioli 168332ac17eSDario Faggioli static inline int dl_bandwidth_enabled(void) 169332ac17eSDario Faggioli { 1701724813dSPeter Zijlstra return sysctl_sched_rt_runtime >= 0; 171332ac17eSDario Faggioli } 172332ac17eSDario Faggioli 173332ac17eSDario Faggioli extern struct dl_bw *dl_bw_of(int i); 174332ac17eSDario Faggioli 175332ac17eSDario Faggioli struct dl_bw { 176332ac17eSDario Faggioli raw_spinlock_t lock; 177332ac17eSDario Faggioli u64 bw, total_bw; 178332ac17eSDario Faggioli }; 179332ac17eSDario Faggioli 1807f51412aSJuri Lelli static inline 1817f51412aSJuri Lelli void __dl_clear(struct dl_bw *dl_b, u64 tsk_bw) 1827f51412aSJuri Lelli { 1837f51412aSJuri Lelli dl_b->total_bw -= tsk_bw; 1847f51412aSJuri Lelli } 1857f51412aSJuri Lelli 1867f51412aSJuri Lelli static inline 1877f51412aSJuri Lelli void __dl_add(struct dl_bw *dl_b, u64 tsk_bw) 1887f51412aSJuri Lelli { 1897f51412aSJuri Lelli dl_b->total_bw += tsk_bw; 1907f51412aSJuri Lelli } 1917f51412aSJuri Lelli 1927f51412aSJuri Lelli static inline 1937f51412aSJuri Lelli bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw) 1947f51412aSJuri Lelli { 1957f51412aSJuri Lelli return dl_b->bw != -1 && 1967f51412aSJuri Lelli dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw; 1977f51412aSJuri Lelli } 1987f51412aSJuri Lelli 199391e43daSPeter Zijlstra extern struct mutex sched_domains_mutex; 200391e43daSPeter Zijlstra 201391e43daSPeter Zijlstra #ifdef CONFIG_CGROUP_SCHED 202391e43daSPeter Zijlstra 203391e43daSPeter Zijlstra #include <linux/cgroup.h> 204391e43daSPeter Zijlstra 205391e43daSPeter Zijlstra struct cfs_rq; 206391e43daSPeter Zijlstra struct rt_rq; 207391e43daSPeter Zijlstra 20835cf4e50SMike Galbraith extern struct list_head task_groups; 209391e43daSPeter Zijlstra 210391e43daSPeter Zijlstra struct cfs_bandwidth { 211391e43daSPeter Zijlstra #ifdef CONFIG_CFS_BANDWIDTH 212391e43daSPeter Zijlstra raw_spinlock_t lock; 213391e43daSPeter Zijlstra ktime_t period; 214391e43daSPeter Zijlstra u64 quota, runtime; 2159c58c79aSZhihui Zhang s64 hierarchical_quota; 216391e43daSPeter Zijlstra u64 runtime_expires; 217391e43daSPeter Zijlstra 218391e43daSPeter Zijlstra int idle, timer_active; 219391e43daSPeter Zijlstra struct hrtimer period_timer, slack_timer; 220391e43daSPeter Zijlstra struct list_head throttled_cfs_rq; 221391e43daSPeter Zijlstra 222391e43daSPeter Zijlstra /* statistics */ 223391e43daSPeter Zijlstra int nr_periods, nr_throttled; 224391e43daSPeter Zijlstra u64 throttled_time; 225391e43daSPeter Zijlstra #endif 226391e43daSPeter Zijlstra }; 227391e43daSPeter Zijlstra 228391e43daSPeter Zijlstra /* task group related information */ 229391e43daSPeter Zijlstra struct task_group { 230391e43daSPeter Zijlstra struct cgroup_subsys_state css; 231391e43daSPeter Zijlstra 232391e43daSPeter Zijlstra #ifdef CONFIG_FAIR_GROUP_SCHED 233391e43daSPeter Zijlstra /* schedulable entities of this group on each cpu */ 234391e43daSPeter Zijlstra struct sched_entity **se; 235391e43daSPeter Zijlstra /* runqueue "owned" by this group on each cpu */ 236391e43daSPeter Zijlstra struct cfs_rq **cfs_rq; 237391e43daSPeter Zijlstra unsigned long shares; 238391e43daSPeter Zijlstra 239fa6bddebSAlex Shi #ifdef CONFIG_SMP 240bf5b986eSAlex Shi atomic_long_t load_avg; 241bb17f655SPaul Turner atomic_t runnable_avg; 242391e43daSPeter Zijlstra #endif 243fa6bddebSAlex Shi #endif 244391e43daSPeter Zijlstra 245391e43daSPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED 246391e43daSPeter Zijlstra struct sched_rt_entity **rt_se; 247391e43daSPeter Zijlstra struct rt_rq **rt_rq; 248391e43daSPeter Zijlstra 249391e43daSPeter Zijlstra struct rt_bandwidth rt_bandwidth; 250391e43daSPeter Zijlstra #endif 251391e43daSPeter Zijlstra 252391e43daSPeter Zijlstra struct rcu_head rcu; 253391e43daSPeter Zijlstra struct list_head list; 254391e43daSPeter Zijlstra 255391e43daSPeter Zijlstra struct task_group *parent; 256391e43daSPeter Zijlstra struct list_head siblings; 257391e43daSPeter Zijlstra struct list_head children; 258391e43daSPeter Zijlstra 259391e43daSPeter Zijlstra #ifdef CONFIG_SCHED_AUTOGROUP 260391e43daSPeter Zijlstra struct autogroup *autogroup; 261391e43daSPeter Zijlstra #endif 262391e43daSPeter Zijlstra 263391e43daSPeter Zijlstra struct cfs_bandwidth cfs_bandwidth; 264391e43daSPeter Zijlstra }; 265391e43daSPeter Zijlstra 266391e43daSPeter Zijlstra #ifdef CONFIG_FAIR_GROUP_SCHED 267391e43daSPeter Zijlstra #define ROOT_TASK_GROUP_LOAD NICE_0_LOAD 268391e43daSPeter Zijlstra 269391e43daSPeter Zijlstra /* 270391e43daSPeter Zijlstra * A weight of 0 or 1 can cause arithmetics problems. 271391e43daSPeter Zijlstra * A weight of a cfs_rq is the sum of weights of which entities 272391e43daSPeter Zijlstra * are queued on this cfs_rq, so a weight of a entity should not be 273391e43daSPeter Zijlstra * too large, so as the shares value of a task group. 274391e43daSPeter Zijlstra * (The default weight is 1024 - so there's no practical 275391e43daSPeter Zijlstra * limitation from this.) 276391e43daSPeter Zijlstra */ 277391e43daSPeter Zijlstra #define MIN_SHARES (1UL << 1) 278391e43daSPeter Zijlstra #define MAX_SHARES (1UL << 18) 279391e43daSPeter Zijlstra #endif 280391e43daSPeter Zijlstra 281391e43daSPeter Zijlstra typedef int (*tg_visitor)(struct task_group *, void *); 282391e43daSPeter Zijlstra 283391e43daSPeter Zijlstra extern int walk_tg_tree_from(struct task_group *from, 284391e43daSPeter Zijlstra tg_visitor down, tg_visitor up, void *data); 285391e43daSPeter Zijlstra 286391e43daSPeter Zijlstra /* 287391e43daSPeter Zijlstra * Iterate the full tree, calling @down when first entering a node and @up when 288391e43daSPeter Zijlstra * leaving it for the final time. 289391e43daSPeter Zijlstra * 290391e43daSPeter Zijlstra * Caller must hold rcu_lock or sufficient equivalent. 291391e43daSPeter Zijlstra */ 292391e43daSPeter Zijlstra static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data) 293391e43daSPeter Zijlstra { 294391e43daSPeter Zijlstra return walk_tg_tree_from(&root_task_group, down, up, data); 295391e43daSPeter Zijlstra } 296391e43daSPeter Zijlstra 297391e43daSPeter Zijlstra extern int tg_nop(struct task_group *tg, void *data); 298391e43daSPeter Zijlstra 299391e43daSPeter Zijlstra extern void free_fair_sched_group(struct task_group *tg); 300391e43daSPeter Zijlstra extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent); 301391e43daSPeter Zijlstra extern void unregister_fair_sched_group(struct task_group *tg, int cpu); 302391e43daSPeter Zijlstra extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, 303391e43daSPeter Zijlstra struct sched_entity *se, int cpu, 304391e43daSPeter Zijlstra struct sched_entity *parent); 305391e43daSPeter Zijlstra extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b); 306391e43daSPeter Zijlstra extern int sched_group_set_shares(struct task_group *tg, unsigned long shares); 307391e43daSPeter Zijlstra 308391e43daSPeter Zijlstra extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b); 30909dc4ab0SRoman Gushchin extern void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b, bool force); 310391e43daSPeter Zijlstra extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq); 311391e43daSPeter Zijlstra 312391e43daSPeter Zijlstra extern void free_rt_sched_group(struct task_group *tg); 313391e43daSPeter Zijlstra extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent); 314391e43daSPeter Zijlstra extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, 315391e43daSPeter Zijlstra struct sched_rt_entity *rt_se, int cpu, 316391e43daSPeter Zijlstra struct sched_rt_entity *parent); 317391e43daSPeter Zijlstra 31825cc7da7SLi Zefan extern struct task_group *sched_create_group(struct task_group *parent); 31925cc7da7SLi Zefan extern void sched_online_group(struct task_group *tg, 32025cc7da7SLi Zefan struct task_group *parent); 32125cc7da7SLi Zefan extern void sched_destroy_group(struct task_group *tg); 32225cc7da7SLi Zefan extern void sched_offline_group(struct task_group *tg); 32325cc7da7SLi Zefan 32425cc7da7SLi Zefan extern void sched_move_task(struct task_struct *tsk); 32525cc7da7SLi Zefan 32625cc7da7SLi Zefan #ifdef CONFIG_FAIR_GROUP_SCHED 32725cc7da7SLi Zefan extern int sched_group_set_shares(struct task_group *tg, unsigned long shares); 32825cc7da7SLi Zefan #endif 32925cc7da7SLi Zefan 330391e43daSPeter Zijlstra #else /* CONFIG_CGROUP_SCHED */ 331391e43daSPeter Zijlstra 332391e43daSPeter Zijlstra struct cfs_bandwidth { }; 333391e43daSPeter Zijlstra 334391e43daSPeter Zijlstra #endif /* CONFIG_CGROUP_SCHED */ 335391e43daSPeter Zijlstra 336391e43daSPeter Zijlstra /* CFS-related fields in a runqueue */ 337391e43daSPeter Zijlstra struct cfs_rq { 338391e43daSPeter Zijlstra struct load_weight load; 339c82513e5SPeter Zijlstra unsigned int nr_running, h_nr_running; 340391e43daSPeter Zijlstra 341391e43daSPeter Zijlstra u64 exec_clock; 342391e43daSPeter Zijlstra u64 min_vruntime; 343391e43daSPeter Zijlstra #ifndef CONFIG_64BIT 344391e43daSPeter Zijlstra u64 min_vruntime_copy; 345391e43daSPeter Zijlstra #endif 346391e43daSPeter Zijlstra 347391e43daSPeter Zijlstra struct rb_root tasks_timeline; 348391e43daSPeter Zijlstra struct rb_node *rb_leftmost; 349391e43daSPeter Zijlstra 350391e43daSPeter Zijlstra /* 351391e43daSPeter Zijlstra * 'curr' points to currently running entity on this cfs_rq. 352391e43daSPeter Zijlstra * It is set to NULL otherwise (i.e when none are currently running). 353391e43daSPeter Zijlstra */ 354391e43daSPeter Zijlstra struct sched_entity *curr, *next, *last, *skip; 355391e43daSPeter Zijlstra 356391e43daSPeter Zijlstra #ifdef CONFIG_SCHED_DEBUG 357391e43daSPeter Zijlstra unsigned int nr_spread_over; 358391e43daSPeter Zijlstra #endif 359391e43daSPeter Zijlstra 3602dac754eSPaul Turner #ifdef CONFIG_SMP 3612dac754eSPaul Turner /* 3622dac754eSPaul Turner * CFS Load tracking 3632dac754eSPaul Turner * Under CFS, load is tracked on a per-entity basis and aggregated up. 3642dac754eSPaul Turner * This allows for the description of both thread and group usage (in 3652dac754eSPaul Turner * the FAIR_GROUP_SCHED case). 36636ee28e4SVincent Guittot * runnable_load_avg is the sum of the load_avg_contrib of the 36736ee28e4SVincent Guittot * sched_entities on the rq. 36836ee28e4SVincent Guittot * blocked_load_avg is similar to runnable_load_avg except that its 36936ee28e4SVincent Guittot * the blocked sched_entities on the rq. 37036ee28e4SVincent Guittot * utilization_load_avg is the sum of the average running time of the 37136ee28e4SVincent Guittot * sched_entities on the rq. 3722dac754eSPaul Turner */ 37336ee28e4SVincent Guittot unsigned long runnable_load_avg, blocked_load_avg, utilization_load_avg; 3742509940fSAlex Shi atomic64_t decay_counter; 3759ee474f5SPaul Turner u64 last_decay; 3762509940fSAlex Shi atomic_long_t removed_load; 377141965c7SAlex Shi 378c566e8e9SPaul Turner #ifdef CONFIG_FAIR_GROUP_SCHED 379141965c7SAlex Shi /* Required to track per-cpu representation of a task_group */ 380bb17f655SPaul Turner u32 tg_runnable_contrib; 381bf5b986eSAlex Shi unsigned long tg_load_contrib; 38282958366SPaul Turner 38382958366SPaul Turner /* 38482958366SPaul Turner * h_load = weight * f(tg) 38582958366SPaul Turner * 38682958366SPaul Turner * Where f(tg) is the recursive weight fraction assigned to 38782958366SPaul Turner * this group. 38882958366SPaul Turner */ 38982958366SPaul Turner unsigned long h_load; 39068520796SVladimir Davydov u64 last_h_load_update; 39168520796SVladimir Davydov struct sched_entity *h_load_next; 39268520796SVladimir Davydov #endif /* CONFIG_FAIR_GROUP_SCHED */ 39382958366SPaul Turner #endif /* CONFIG_SMP */ 39482958366SPaul Turner 395391e43daSPeter Zijlstra #ifdef CONFIG_FAIR_GROUP_SCHED 396391e43daSPeter Zijlstra struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */ 397391e43daSPeter Zijlstra 398391e43daSPeter Zijlstra /* 399391e43daSPeter Zijlstra * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in 400391e43daSPeter Zijlstra * a hierarchy). Non-leaf lrqs hold other higher schedulable entities 401391e43daSPeter Zijlstra * (like users, containers etc.) 402391e43daSPeter Zijlstra * 403391e43daSPeter Zijlstra * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This 404391e43daSPeter Zijlstra * list is used during load balance. 405391e43daSPeter Zijlstra */ 406391e43daSPeter Zijlstra int on_list; 407391e43daSPeter Zijlstra struct list_head leaf_cfs_rq_list; 408391e43daSPeter Zijlstra struct task_group *tg; /* group that "owns" this runqueue */ 409391e43daSPeter Zijlstra 410391e43daSPeter Zijlstra #ifdef CONFIG_CFS_BANDWIDTH 411391e43daSPeter Zijlstra int runtime_enabled; 412391e43daSPeter Zijlstra u64 runtime_expires; 413391e43daSPeter Zijlstra s64 runtime_remaining; 414391e43daSPeter Zijlstra 415f1b17280SPaul Turner u64 throttled_clock, throttled_clock_task; 416f1b17280SPaul Turner u64 throttled_clock_task_time; 417391e43daSPeter Zijlstra int throttled, throttle_count; 418391e43daSPeter Zijlstra struct list_head throttled_list; 419391e43daSPeter Zijlstra #endif /* CONFIG_CFS_BANDWIDTH */ 420391e43daSPeter Zijlstra #endif /* CONFIG_FAIR_GROUP_SCHED */ 421391e43daSPeter Zijlstra }; 422391e43daSPeter Zijlstra 423391e43daSPeter Zijlstra static inline int rt_bandwidth_enabled(void) 424391e43daSPeter Zijlstra { 425391e43daSPeter Zijlstra return sysctl_sched_rt_runtime >= 0; 426391e43daSPeter Zijlstra } 427391e43daSPeter Zijlstra 428b6366f04SSteven Rostedt /* RT IPI pull logic requires IRQ_WORK */ 429b6366f04SSteven Rostedt #ifdef CONFIG_IRQ_WORK 430b6366f04SSteven Rostedt # define HAVE_RT_PUSH_IPI 431b6366f04SSteven Rostedt #endif 432b6366f04SSteven Rostedt 433391e43daSPeter Zijlstra /* Real-Time classes' related field in a runqueue: */ 434391e43daSPeter Zijlstra struct rt_rq { 435391e43daSPeter Zijlstra struct rt_prio_array active; 436c82513e5SPeter Zijlstra unsigned int rt_nr_running; 437391e43daSPeter Zijlstra #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED 438391e43daSPeter Zijlstra struct { 439391e43daSPeter Zijlstra int curr; /* highest queued rt task prio */ 440391e43daSPeter Zijlstra #ifdef CONFIG_SMP 441391e43daSPeter Zijlstra int next; /* next highest */ 442391e43daSPeter Zijlstra #endif 443391e43daSPeter Zijlstra } highest_prio; 444391e43daSPeter Zijlstra #endif 445391e43daSPeter Zijlstra #ifdef CONFIG_SMP 446391e43daSPeter Zijlstra unsigned long rt_nr_migratory; 447391e43daSPeter Zijlstra unsigned long rt_nr_total; 448391e43daSPeter Zijlstra int overloaded; 449391e43daSPeter Zijlstra struct plist_head pushable_tasks; 450b6366f04SSteven Rostedt #ifdef HAVE_RT_PUSH_IPI 451b6366f04SSteven Rostedt int push_flags; 452b6366f04SSteven Rostedt int push_cpu; 453b6366f04SSteven Rostedt struct irq_work push_work; 454b6366f04SSteven Rostedt raw_spinlock_t push_lock; 455391e43daSPeter Zijlstra #endif 456b6366f04SSteven Rostedt #endif /* CONFIG_SMP */ 457f4ebcbc0SKirill Tkhai int rt_queued; 458f4ebcbc0SKirill Tkhai 459391e43daSPeter Zijlstra int rt_throttled; 460391e43daSPeter Zijlstra u64 rt_time; 461391e43daSPeter Zijlstra u64 rt_runtime; 462391e43daSPeter Zijlstra /* Nests inside the rq lock: */ 463391e43daSPeter Zijlstra raw_spinlock_t rt_runtime_lock; 464391e43daSPeter Zijlstra 465391e43daSPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED 466391e43daSPeter Zijlstra unsigned long rt_nr_boosted; 467391e43daSPeter Zijlstra 468391e43daSPeter Zijlstra struct rq *rq; 469391e43daSPeter Zijlstra struct task_group *tg; 470391e43daSPeter Zijlstra #endif 471391e43daSPeter Zijlstra }; 472391e43daSPeter Zijlstra 473aab03e05SDario Faggioli /* Deadline class' related fields in a runqueue */ 474aab03e05SDario Faggioli struct dl_rq { 475aab03e05SDario Faggioli /* runqueue is an rbtree, ordered by deadline */ 476aab03e05SDario Faggioli struct rb_root rb_root; 477aab03e05SDario Faggioli struct rb_node *rb_leftmost; 478aab03e05SDario Faggioli 479aab03e05SDario Faggioli unsigned long dl_nr_running; 4801baca4ceSJuri Lelli 4811baca4ceSJuri Lelli #ifdef CONFIG_SMP 4821baca4ceSJuri Lelli /* 4831baca4ceSJuri Lelli * Deadline values of the currently executing and the 4841baca4ceSJuri Lelli * earliest ready task on this rq. Caching these facilitates 4851baca4ceSJuri Lelli * the decision wether or not a ready but not running task 4861baca4ceSJuri Lelli * should migrate somewhere else. 4871baca4ceSJuri Lelli */ 4881baca4ceSJuri Lelli struct { 4891baca4ceSJuri Lelli u64 curr; 4901baca4ceSJuri Lelli u64 next; 4911baca4ceSJuri Lelli } earliest_dl; 4921baca4ceSJuri Lelli 4931baca4ceSJuri Lelli unsigned long dl_nr_migratory; 4941baca4ceSJuri Lelli int overloaded; 4951baca4ceSJuri Lelli 4961baca4ceSJuri Lelli /* 4971baca4ceSJuri Lelli * Tasks on this rq that can be pushed away. They are kept in 4981baca4ceSJuri Lelli * an rb-tree, ordered by tasks' deadlines, with caching 4991baca4ceSJuri Lelli * of the leftmost (earliest deadline) element. 5001baca4ceSJuri Lelli */ 5011baca4ceSJuri Lelli struct rb_root pushable_dl_tasks_root; 5021baca4ceSJuri Lelli struct rb_node *pushable_dl_tasks_leftmost; 503332ac17eSDario Faggioli #else 504332ac17eSDario Faggioli struct dl_bw dl_bw; 5051baca4ceSJuri Lelli #endif 506aab03e05SDario Faggioli }; 507aab03e05SDario Faggioli 508391e43daSPeter Zijlstra #ifdef CONFIG_SMP 509391e43daSPeter Zijlstra 510391e43daSPeter Zijlstra /* 511391e43daSPeter Zijlstra * We add the notion of a root-domain which will be used to define per-domain 512391e43daSPeter Zijlstra * variables. Each exclusive cpuset essentially defines an island domain by 513391e43daSPeter Zijlstra * fully partitioning the member cpus from any other cpuset. Whenever a new 514391e43daSPeter Zijlstra * exclusive cpuset is created, we also create and attach a new root-domain 515391e43daSPeter Zijlstra * object. 516391e43daSPeter Zijlstra * 517391e43daSPeter Zijlstra */ 518391e43daSPeter Zijlstra struct root_domain { 519391e43daSPeter Zijlstra atomic_t refcount; 520391e43daSPeter Zijlstra atomic_t rto_count; 521391e43daSPeter Zijlstra struct rcu_head rcu; 522391e43daSPeter Zijlstra cpumask_var_t span; 523391e43daSPeter Zijlstra cpumask_var_t online; 524391e43daSPeter Zijlstra 5254486edd1STim Chen /* Indicate more than one runnable task for any CPU */ 5264486edd1STim Chen bool overload; 5274486edd1STim Chen 528391e43daSPeter Zijlstra /* 5291baca4ceSJuri Lelli * The bit corresponding to a CPU gets set here if such CPU has more 5301baca4ceSJuri Lelli * than one runnable -deadline task (as it is below for RT tasks). 5311baca4ceSJuri Lelli */ 5321baca4ceSJuri Lelli cpumask_var_t dlo_mask; 5331baca4ceSJuri Lelli atomic_t dlo_count; 534332ac17eSDario Faggioli struct dl_bw dl_bw; 5356bfd6d72SJuri Lelli struct cpudl cpudl; 5361baca4ceSJuri Lelli 5371baca4ceSJuri Lelli /* 538391e43daSPeter Zijlstra * The "RT overload" flag: it gets set if a CPU has more than 539391e43daSPeter Zijlstra * one runnable RT task. 540391e43daSPeter Zijlstra */ 541391e43daSPeter Zijlstra cpumask_var_t rto_mask; 542391e43daSPeter Zijlstra struct cpupri cpupri; 543391e43daSPeter Zijlstra }; 544391e43daSPeter Zijlstra 545391e43daSPeter Zijlstra extern struct root_domain def_root_domain; 546391e43daSPeter Zijlstra 547391e43daSPeter Zijlstra #endif /* CONFIG_SMP */ 548391e43daSPeter Zijlstra 549391e43daSPeter Zijlstra /* 550391e43daSPeter Zijlstra * This is the main, per-CPU runqueue data structure. 551391e43daSPeter Zijlstra * 552391e43daSPeter Zijlstra * Locking rule: those places that want to lock multiple runqueues 553391e43daSPeter Zijlstra * (such as the load balancing or the thread migration code), lock 554391e43daSPeter Zijlstra * acquire operations must be ordered by ascending &runqueue. 555391e43daSPeter Zijlstra */ 556391e43daSPeter Zijlstra struct rq { 557391e43daSPeter Zijlstra /* runqueue lock: */ 558391e43daSPeter Zijlstra raw_spinlock_t lock; 559391e43daSPeter Zijlstra 560391e43daSPeter Zijlstra /* 561391e43daSPeter Zijlstra * nr_running and cpu_load should be in the same cacheline because 562391e43daSPeter Zijlstra * remote CPUs use both these fields when doing load calculation. 563391e43daSPeter Zijlstra */ 564c82513e5SPeter Zijlstra unsigned int nr_running; 5650ec8aa00SPeter Zijlstra #ifdef CONFIG_NUMA_BALANCING 5660ec8aa00SPeter Zijlstra unsigned int nr_numa_running; 5670ec8aa00SPeter Zijlstra unsigned int nr_preferred_running; 5680ec8aa00SPeter Zijlstra #endif 569391e43daSPeter Zijlstra #define CPU_LOAD_IDX_MAX 5 570391e43daSPeter Zijlstra unsigned long cpu_load[CPU_LOAD_IDX_MAX]; 571391e43daSPeter Zijlstra unsigned long last_load_update_tick; 5723451d024SFrederic Weisbecker #ifdef CONFIG_NO_HZ_COMMON 573391e43daSPeter Zijlstra u64 nohz_stamp; 5741c792db7SSuresh Siddha unsigned long nohz_flags; 575391e43daSPeter Zijlstra #endif 576265f22a9SFrederic Weisbecker #ifdef CONFIG_NO_HZ_FULL 577265f22a9SFrederic Weisbecker unsigned long last_sched_tick; 578265f22a9SFrederic Weisbecker #endif 579391e43daSPeter Zijlstra /* capture load from *all* tasks on this cpu: */ 580391e43daSPeter Zijlstra struct load_weight load; 581391e43daSPeter Zijlstra unsigned long nr_load_updates; 582391e43daSPeter Zijlstra u64 nr_switches; 583391e43daSPeter Zijlstra 584391e43daSPeter Zijlstra struct cfs_rq cfs; 585391e43daSPeter Zijlstra struct rt_rq rt; 586aab03e05SDario Faggioli struct dl_rq dl; 587391e43daSPeter Zijlstra 588391e43daSPeter Zijlstra #ifdef CONFIG_FAIR_GROUP_SCHED 589391e43daSPeter Zijlstra /* list of leaf cfs_rq on this cpu: */ 590391e43daSPeter Zijlstra struct list_head leaf_cfs_rq_list; 591f5f9739dSDietmar Eggemann 592f5f9739dSDietmar Eggemann struct sched_avg avg; 593a35b6466SPeter Zijlstra #endif /* CONFIG_FAIR_GROUP_SCHED */ 594a35b6466SPeter Zijlstra 595391e43daSPeter Zijlstra /* 596391e43daSPeter Zijlstra * This is part of a global counter where only the total sum 597391e43daSPeter Zijlstra * over all CPUs matters. A task can increase this counter on 598391e43daSPeter Zijlstra * one CPU and if it got migrated afterwards it may decrease 599391e43daSPeter Zijlstra * it on another CPU. Always updated under the runqueue lock: 600391e43daSPeter Zijlstra */ 601391e43daSPeter Zijlstra unsigned long nr_uninterruptible; 602391e43daSPeter Zijlstra 603391e43daSPeter Zijlstra struct task_struct *curr, *idle, *stop; 604391e43daSPeter Zijlstra unsigned long next_balance; 605391e43daSPeter Zijlstra struct mm_struct *prev_mm; 606391e43daSPeter Zijlstra 6079edfbfedSPeter Zijlstra unsigned int clock_skip_update; 608391e43daSPeter Zijlstra u64 clock; 609391e43daSPeter Zijlstra u64 clock_task; 610391e43daSPeter Zijlstra 611391e43daSPeter Zijlstra atomic_t nr_iowait; 612391e43daSPeter Zijlstra 613391e43daSPeter Zijlstra #ifdef CONFIG_SMP 614391e43daSPeter Zijlstra struct root_domain *rd; 615391e43daSPeter Zijlstra struct sched_domain *sd; 616391e43daSPeter Zijlstra 617ced549faSNicolas Pitre unsigned long cpu_capacity; 618ca6d75e6SVincent Guittot unsigned long cpu_capacity_orig; 619391e43daSPeter Zijlstra 620391e43daSPeter Zijlstra unsigned char idle_balance; 621391e43daSPeter Zijlstra /* For active balancing */ 622391e43daSPeter Zijlstra int post_schedule; 623391e43daSPeter Zijlstra int active_balance; 624391e43daSPeter Zijlstra int push_cpu; 625391e43daSPeter Zijlstra struct cpu_stop_work active_balance_work; 626391e43daSPeter Zijlstra /* cpu of this runqueue: */ 627391e43daSPeter Zijlstra int cpu; 628391e43daSPeter Zijlstra int online; 629391e43daSPeter Zijlstra 630367456c7SPeter Zijlstra struct list_head cfs_tasks; 631367456c7SPeter Zijlstra 632391e43daSPeter Zijlstra u64 rt_avg; 633391e43daSPeter Zijlstra u64 age_stamp; 634391e43daSPeter Zijlstra u64 idle_stamp; 635391e43daSPeter Zijlstra u64 avg_idle; 6369bd721c5SJason Low 6379bd721c5SJason Low /* This is used to determine avg_idle's max value */ 6389bd721c5SJason Low u64 max_idle_balance_cost; 639391e43daSPeter Zijlstra #endif 640391e43daSPeter Zijlstra 641391e43daSPeter Zijlstra #ifdef CONFIG_IRQ_TIME_ACCOUNTING 642391e43daSPeter Zijlstra u64 prev_irq_time; 643391e43daSPeter Zijlstra #endif 644391e43daSPeter Zijlstra #ifdef CONFIG_PARAVIRT 645391e43daSPeter Zijlstra u64 prev_steal_time; 646391e43daSPeter Zijlstra #endif 647391e43daSPeter Zijlstra #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING 648391e43daSPeter Zijlstra u64 prev_steal_time_rq; 649391e43daSPeter Zijlstra #endif 650391e43daSPeter Zijlstra 651391e43daSPeter Zijlstra /* calc_load related fields */ 652391e43daSPeter Zijlstra unsigned long calc_load_update; 653391e43daSPeter Zijlstra long calc_load_active; 654391e43daSPeter Zijlstra 655391e43daSPeter Zijlstra #ifdef CONFIG_SCHED_HRTICK 656391e43daSPeter Zijlstra #ifdef CONFIG_SMP 657391e43daSPeter Zijlstra int hrtick_csd_pending; 658391e43daSPeter Zijlstra struct call_single_data hrtick_csd; 659391e43daSPeter Zijlstra #endif 660391e43daSPeter Zijlstra struct hrtimer hrtick_timer; 661391e43daSPeter Zijlstra #endif 662391e43daSPeter Zijlstra 663391e43daSPeter Zijlstra #ifdef CONFIG_SCHEDSTATS 664391e43daSPeter Zijlstra /* latency stats */ 665391e43daSPeter Zijlstra struct sched_info rq_sched_info; 666391e43daSPeter Zijlstra unsigned long long rq_cpu_time; 667391e43daSPeter Zijlstra /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */ 668391e43daSPeter Zijlstra 669391e43daSPeter Zijlstra /* sys_sched_yield() stats */ 670391e43daSPeter Zijlstra unsigned int yld_count; 671391e43daSPeter Zijlstra 672391e43daSPeter Zijlstra /* schedule() stats */ 673391e43daSPeter Zijlstra unsigned int sched_count; 674391e43daSPeter Zijlstra unsigned int sched_goidle; 675391e43daSPeter Zijlstra 676391e43daSPeter Zijlstra /* try_to_wake_up() stats */ 677391e43daSPeter Zijlstra unsigned int ttwu_count; 678391e43daSPeter Zijlstra unsigned int ttwu_local; 679391e43daSPeter Zijlstra #endif 680391e43daSPeter Zijlstra 681391e43daSPeter Zijlstra #ifdef CONFIG_SMP 682391e43daSPeter Zijlstra struct llist_head wake_list; 683391e43daSPeter Zijlstra #endif 684442bf3aaSDaniel Lezcano 685442bf3aaSDaniel Lezcano #ifdef CONFIG_CPU_IDLE 686442bf3aaSDaniel Lezcano /* Must be inspected within a rcu lock section */ 687442bf3aaSDaniel Lezcano struct cpuidle_state *idle_state; 688442bf3aaSDaniel Lezcano #endif 689391e43daSPeter Zijlstra }; 690391e43daSPeter Zijlstra 691391e43daSPeter Zijlstra static inline int cpu_of(struct rq *rq) 692391e43daSPeter Zijlstra { 693391e43daSPeter Zijlstra #ifdef CONFIG_SMP 694391e43daSPeter Zijlstra return rq->cpu; 695391e43daSPeter Zijlstra #else 696391e43daSPeter Zijlstra return 0; 697391e43daSPeter Zijlstra #endif 698391e43daSPeter Zijlstra } 699391e43daSPeter Zijlstra 7008b06c55bSPranith Kumar DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); 701391e43daSPeter Zijlstra 702518cd623SPeter Zijlstra #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) 7034a32fea9SChristoph Lameter #define this_rq() this_cpu_ptr(&runqueues) 704518cd623SPeter Zijlstra #define task_rq(p) cpu_rq(task_cpu(p)) 705518cd623SPeter Zijlstra #define cpu_curr(cpu) (cpu_rq(cpu)->curr) 7064a32fea9SChristoph Lameter #define raw_rq() raw_cpu_ptr(&runqueues) 707518cd623SPeter Zijlstra 708cebde6d6SPeter Zijlstra static inline u64 __rq_clock_broken(struct rq *rq) 709cebde6d6SPeter Zijlstra { 710cebde6d6SPeter Zijlstra return ACCESS_ONCE(rq->clock); 711cebde6d6SPeter Zijlstra } 712cebde6d6SPeter Zijlstra 71378becc27SFrederic Weisbecker static inline u64 rq_clock(struct rq *rq) 71478becc27SFrederic Weisbecker { 715cebde6d6SPeter Zijlstra lockdep_assert_held(&rq->lock); 71678becc27SFrederic Weisbecker return rq->clock; 71778becc27SFrederic Weisbecker } 71878becc27SFrederic Weisbecker 71978becc27SFrederic Weisbecker static inline u64 rq_clock_task(struct rq *rq) 72078becc27SFrederic Weisbecker { 721cebde6d6SPeter Zijlstra lockdep_assert_held(&rq->lock); 72278becc27SFrederic Weisbecker return rq->clock_task; 72378becc27SFrederic Weisbecker } 72478becc27SFrederic Weisbecker 7259edfbfedSPeter Zijlstra #define RQCF_REQ_SKIP 0x01 7269edfbfedSPeter Zijlstra #define RQCF_ACT_SKIP 0x02 7279edfbfedSPeter Zijlstra 7289edfbfedSPeter Zijlstra static inline void rq_clock_skip_update(struct rq *rq, bool skip) 7299edfbfedSPeter Zijlstra { 7309edfbfedSPeter Zijlstra lockdep_assert_held(&rq->lock); 7319edfbfedSPeter Zijlstra if (skip) 7329edfbfedSPeter Zijlstra rq->clock_skip_update |= RQCF_REQ_SKIP; 7339edfbfedSPeter Zijlstra else 7349edfbfedSPeter Zijlstra rq->clock_skip_update &= ~RQCF_REQ_SKIP; 7359edfbfedSPeter Zijlstra } 7369edfbfedSPeter Zijlstra 7379942f79bSRik van Riel #ifdef CONFIG_NUMA 738e3fe70b1SRik van Riel enum numa_topology_type { 739e3fe70b1SRik van Riel NUMA_DIRECT, 740e3fe70b1SRik van Riel NUMA_GLUELESS_MESH, 741e3fe70b1SRik van Riel NUMA_BACKPLANE, 742e3fe70b1SRik van Riel }; 743e3fe70b1SRik van Riel extern enum numa_topology_type sched_numa_topology_type; 7449942f79bSRik van Riel extern int sched_max_numa_distance; 7459942f79bSRik van Riel extern bool find_numa_distance(int distance); 7469942f79bSRik van Riel #endif 7479942f79bSRik van Riel 748f809ca9aSMel Gorman #ifdef CONFIG_NUMA_BALANCING 74944dba3d5SIulia Manda /* The regions in numa_faults array from task_struct */ 75044dba3d5SIulia Manda enum numa_faults_stats { 75144dba3d5SIulia Manda NUMA_MEM = 0, 75244dba3d5SIulia Manda NUMA_CPU, 75344dba3d5SIulia Manda NUMA_MEMBUF, 75444dba3d5SIulia Manda NUMA_CPUBUF 75544dba3d5SIulia Manda }; 7560ec8aa00SPeter Zijlstra extern void sched_setnuma(struct task_struct *p, int node); 757e6628d5bSMel Gorman extern int migrate_task_to(struct task_struct *p, int cpu); 758ac66f547SPeter Zijlstra extern int migrate_swap(struct task_struct *, struct task_struct *); 759f809ca9aSMel Gorman #endif /* CONFIG_NUMA_BALANCING */ 760f809ca9aSMel Gorman 761518cd623SPeter Zijlstra #ifdef CONFIG_SMP 762518cd623SPeter Zijlstra 763e3baac47SPeter Zijlstra extern void sched_ttwu_pending(void); 764e3baac47SPeter Zijlstra 765391e43daSPeter Zijlstra #define rcu_dereference_check_sched_domain(p) \ 766391e43daSPeter Zijlstra rcu_dereference_check((p), \ 767391e43daSPeter Zijlstra lockdep_is_held(&sched_domains_mutex)) 768391e43daSPeter Zijlstra 769391e43daSPeter Zijlstra /* 770391e43daSPeter Zijlstra * The domain tree (rq->sd) is protected by RCU's quiescent state transition. 771391e43daSPeter Zijlstra * See detach_destroy_domains: synchronize_sched for details. 772391e43daSPeter Zijlstra * 773391e43daSPeter Zijlstra * The domain tree of any CPU may only be accessed from within 774391e43daSPeter Zijlstra * preempt-disabled sections. 775391e43daSPeter Zijlstra */ 776391e43daSPeter Zijlstra #define for_each_domain(cpu, __sd) \ 777518cd623SPeter Zijlstra for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \ 778518cd623SPeter Zijlstra __sd; __sd = __sd->parent) 779391e43daSPeter Zijlstra 78077e81365SSuresh Siddha #define for_each_lower_domain(sd) for (; sd; sd = sd->child) 78177e81365SSuresh Siddha 782518cd623SPeter Zijlstra /** 783518cd623SPeter Zijlstra * highest_flag_domain - Return highest sched_domain containing flag. 784518cd623SPeter Zijlstra * @cpu: The cpu whose highest level of sched domain is to 785518cd623SPeter Zijlstra * be returned. 786518cd623SPeter Zijlstra * @flag: The flag to check for the highest sched_domain 787518cd623SPeter Zijlstra * for the given cpu. 788518cd623SPeter Zijlstra * 789518cd623SPeter Zijlstra * Returns the highest sched_domain of a cpu which contains the given flag. 790518cd623SPeter Zijlstra */ 791518cd623SPeter Zijlstra static inline struct sched_domain *highest_flag_domain(int cpu, int flag) 792518cd623SPeter Zijlstra { 793518cd623SPeter Zijlstra struct sched_domain *sd, *hsd = NULL; 794518cd623SPeter Zijlstra 795518cd623SPeter Zijlstra for_each_domain(cpu, sd) { 796518cd623SPeter Zijlstra if (!(sd->flags & flag)) 797518cd623SPeter Zijlstra break; 798518cd623SPeter Zijlstra hsd = sd; 799518cd623SPeter Zijlstra } 800518cd623SPeter Zijlstra 801518cd623SPeter Zijlstra return hsd; 802518cd623SPeter Zijlstra } 803518cd623SPeter Zijlstra 804fb13c7eeSMel Gorman static inline struct sched_domain *lowest_flag_domain(int cpu, int flag) 805fb13c7eeSMel Gorman { 806fb13c7eeSMel Gorman struct sched_domain *sd; 807fb13c7eeSMel Gorman 808fb13c7eeSMel Gorman for_each_domain(cpu, sd) { 809fb13c7eeSMel Gorman if (sd->flags & flag) 810fb13c7eeSMel Gorman break; 811fb13c7eeSMel Gorman } 812fb13c7eeSMel Gorman 813fb13c7eeSMel Gorman return sd; 814fb13c7eeSMel Gorman } 815fb13c7eeSMel Gorman 816518cd623SPeter Zijlstra DECLARE_PER_CPU(struct sched_domain *, sd_llc); 8177d9ffa89SPeter Zijlstra DECLARE_PER_CPU(int, sd_llc_size); 818518cd623SPeter Zijlstra DECLARE_PER_CPU(int, sd_llc_id); 819fb13c7eeSMel Gorman DECLARE_PER_CPU(struct sched_domain *, sd_numa); 82037dc6b50SPreeti U Murthy DECLARE_PER_CPU(struct sched_domain *, sd_busy); 82137dc6b50SPreeti U Murthy DECLARE_PER_CPU(struct sched_domain *, sd_asym); 822518cd623SPeter Zijlstra 82363b2ca30SNicolas Pitre struct sched_group_capacity { 8245e6521eaSLi Zefan atomic_t ref; 8255e6521eaSLi Zefan /* 82663b2ca30SNicolas Pitre * CPU capacity of this group, SCHED_LOAD_SCALE being max capacity 82763b2ca30SNicolas Pitre * for a single CPU. 8285e6521eaSLi Zefan */ 829dc7ff76eSVincent Guittot unsigned int capacity; 8305e6521eaSLi Zefan unsigned long next_update; 83163b2ca30SNicolas Pitre int imbalance; /* XXX unrelated to capacity but shared group state */ 8325e6521eaSLi Zefan /* 8335e6521eaSLi Zefan * Number of busy cpus in this group. 8345e6521eaSLi Zefan */ 8355e6521eaSLi Zefan atomic_t nr_busy_cpus; 8365e6521eaSLi Zefan 8375e6521eaSLi Zefan unsigned long cpumask[0]; /* iteration mask */ 8385e6521eaSLi Zefan }; 8395e6521eaSLi Zefan 8405e6521eaSLi Zefan struct sched_group { 8415e6521eaSLi Zefan struct sched_group *next; /* Must be a circular list */ 8425e6521eaSLi Zefan atomic_t ref; 8435e6521eaSLi Zefan 8445e6521eaSLi Zefan unsigned int group_weight; 84563b2ca30SNicolas Pitre struct sched_group_capacity *sgc; 8465e6521eaSLi Zefan 8475e6521eaSLi Zefan /* 8485e6521eaSLi Zefan * The CPUs this group covers. 8495e6521eaSLi Zefan * 8505e6521eaSLi Zefan * NOTE: this field is variable length. (Allocated dynamically 8515e6521eaSLi Zefan * by attaching extra space to the end of the structure, 8525e6521eaSLi Zefan * depending on how many CPUs the kernel has booted up with) 8535e6521eaSLi Zefan */ 8545e6521eaSLi Zefan unsigned long cpumask[0]; 8555e6521eaSLi Zefan }; 8565e6521eaSLi Zefan 8575e6521eaSLi Zefan static inline struct cpumask *sched_group_cpus(struct sched_group *sg) 8585e6521eaSLi Zefan { 8595e6521eaSLi Zefan return to_cpumask(sg->cpumask); 8605e6521eaSLi Zefan } 8615e6521eaSLi Zefan 8625e6521eaSLi Zefan /* 8635e6521eaSLi Zefan * cpumask masking which cpus in the group are allowed to iterate up the domain 8645e6521eaSLi Zefan * tree. 8655e6521eaSLi Zefan */ 8665e6521eaSLi Zefan static inline struct cpumask *sched_group_mask(struct sched_group *sg) 8675e6521eaSLi Zefan { 86863b2ca30SNicolas Pitre return to_cpumask(sg->sgc->cpumask); 8695e6521eaSLi Zefan } 8705e6521eaSLi Zefan 8715e6521eaSLi Zefan /** 8725e6521eaSLi Zefan * group_first_cpu - Returns the first cpu in the cpumask of a sched_group. 8735e6521eaSLi Zefan * @group: The group whose first cpu is to be returned. 8745e6521eaSLi Zefan */ 8755e6521eaSLi Zefan static inline unsigned int group_first_cpu(struct sched_group *group) 8765e6521eaSLi Zefan { 8775e6521eaSLi Zefan return cpumask_first(sched_group_cpus(group)); 8785e6521eaSLi Zefan } 8795e6521eaSLi Zefan 880c1174876SPeter Zijlstra extern int group_balance_cpu(struct sched_group *sg); 881c1174876SPeter Zijlstra 882e3baac47SPeter Zijlstra #else 883e3baac47SPeter Zijlstra 884e3baac47SPeter Zijlstra static inline void sched_ttwu_pending(void) { } 885e3baac47SPeter Zijlstra 886518cd623SPeter Zijlstra #endif /* CONFIG_SMP */ 887391e43daSPeter Zijlstra 888391e43daSPeter Zijlstra #include "stats.h" 889391e43daSPeter Zijlstra #include "auto_group.h" 890391e43daSPeter Zijlstra 891391e43daSPeter Zijlstra #ifdef CONFIG_CGROUP_SCHED 892391e43daSPeter Zijlstra 893391e43daSPeter Zijlstra /* 894391e43daSPeter Zijlstra * Return the group to which this tasks belongs. 895391e43daSPeter Zijlstra * 8968af01f56STejun Heo * We cannot use task_css() and friends because the cgroup subsystem 8978af01f56STejun Heo * changes that value before the cgroup_subsys::attach() method is called, 8988af01f56STejun Heo * therefore we cannot pin it and might observe the wrong value. 8998323f26cSPeter Zijlstra * 9008323f26cSPeter Zijlstra * The same is true for autogroup's p->signal->autogroup->tg, the autogroup 9018323f26cSPeter Zijlstra * core changes this before calling sched_move_task(). 9028323f26cSPeter Zijlstra * 9038323f26cSPeter Zijlstra * Instead we use a 'copy' which is updated from sched_move_task() while 9048323f26cSPeter Zijlstra * holding both task_struct::pi_lock and rq::lock. 905391e43daSPeter Zijlstra */ 906391e43daSPeter Zijlstra static inline struct task_group *task_group(struct task_struct *p) 907391e43daSPeter Zijlstra { 9088323f26cSPeter Zijlstra return p->sched_task_group; 909391e43daSPeter Zijlstra } 910391e43daSPeter Zijlstra 911391e43daSPeter Zijlstra /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ 912391e43daSPeter Zijlstra static inline void set_task_rq(struct task_struct *p, unsigned int cpu) 913391e43daSPeter Zijlstra { 914391e43daSPeter Zijlstra #if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED) 915391e43daSPeter Zijlstra struct task_group *tg = task_group(p); 916391e43daSPeter Zijlstra #endif 917391e43daSPeter Zijlstra 918391e43daSPeter Zijlstra #ifdef CONFIG_FAIR_GROUP_SCHED 919391e43daSPeter Zijlstra p->se.cfs_rq = tg->cfs_rq[cpu]; 920391e43daSPeter Zijlstra p->se.parent = tg->se[cpu]; 921391e43daSPeter Zijlstra #endif 922391e43daSPeter Zijlstra 923391e43daSPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED 924391e43daSPeter Zijlstra p->rt.rt_rq = tg->rt_rq[cpu]; 925391e43daSPeter Zijlstra p->rt.parent = tg->rt_se[cpu]; 926391e43daSPeter Zijlstra #endif 927391e43daSPeter Zijlstra } 928391e43daSPeter Zijlstra 929391e43daSPeter Zijlstra #else /* CONFIG_CGROUP_SCHED */ 930391e43daSPeter Zijlstra 931391e43daSPeter Zijlstra static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { } 932391e43daSPeter Zijlstra static inline struct task_group *task_group(struct task_struct *p) 933391e43daSPeter Zijlstra { 934391e43daSPeter Zijlstra return NULL; 935391e43daSPeter Zijlstra } 936391e43daSPeter Zijlstra 937391e43daSPeter Zijlstra #endif /* CONFIG_CGROUP_SCHED */ 938391e43daSPeter Zijlstra 939391e43daSPeter Zijlstra static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) 940391e43daSPeter Zijlstra { 941391e43daSPeter Zijlstra set_task_rq(p, cpu); 942391e43daSPeter Zijlstra #ifdef CONFIG_SMP 943391e43daSPeter Zijlstra /* 944391e43daSPeter Zijlstra * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be 945391e43daSPeter Zijlstra * successfuly executed on another CPU. We must ensure that updates of 946391e43daSPeter Zijlstra * per-task data have been completed by this moment. 947391e43daSPeter Zijlstra */ 948391e43daSPeter Zijlstra smp_wmb(); 949391e43daSPeter Zijlstra task_thread_info(p)->cpu = cpu; 950ac66f547SPeter Zijlstra p->wake_cpu = cpu; 951391e43daSPeter Zijlstra #endif 952391e43daSPeter Zijlstra } 953391e43daSPeter Zijlstra 954391e43daSPeter Zijlstra /* 955391e43daSPeter Zijlstra * Tunables that become constants when CONFIG_SCHED_DEBUG is off: 956391e43daSPeter Zijlstra */ 957391e43daSPeter Zijlstra #ifdef CONFIG_SCHED_DEBUG 958c5905afbSIngo Molnar # include <linux/static_key.h> 959391e43daSPeter Zijlstra # define const_debug __read_mostly 960391e43daSPeter Zijlstra #else 961391e43daSPeter Zijlstra # define const_debug const 962391e43daSPeter Zijlstra #endif 963391e43daSPeter Zijlstra 964391e43daSPeter Zijlstra extern const_debug unsigned int sysctl_sched_features; 965391e43daSPeter Zijlstra 966391e43daSPeter Zijlstra #define SCHED_FEAT(name, enabled) \ 967391e43daSPeter Zijlstra __SCHED_FEAT_##name , 968391e43daSPeter Zijlstra 969391e43daSPeter Zijlstra enum { 970391e43daSPeter Zijlstra #include "features.h" 971f8b6d1ccSPeter Zijlstra __SCHED_FEAT_NR, 972391e43daSPeter Zijlstra }; 973391e43daSPeter Zijlstra 974391e43daSPeter Zijlstra #undef SCHED_FEAT 975391e43daSPeter Zijlstra 976f8b6d1ccSPeter Zijlstra #if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL) 977f8b6d1ccSPeter Zijlstra #define SCHED_FEAT(name, enabled) \ 978c5905afbSIngo Molnar static __always_inline bool static_branch_##name(struct static_key *key) \ 979f8b6d1ccSPeter Zijlstra { \ 9806e76ea8aSJason Baron return static_key_##enabled(key); \ 981f8b6d1ccSPeter Zijlstra } 982f8b6d1ccSPeter Zijlstra 983f8b6d1ccSPeter Zijlstra #include "features.h" 984f8b6d1ccSPeter Zijlstra 985f8b6d1ccSPeter Zijlstra #undef SCHED_FEAT 986f8b6d1ccSPeter Zijlstra 987c5905afbSIngo Molnar extern struct static_key sched_feat_keys[__SCHED_FEAT_NR]; 988f8b6d1ccSPeter Zijlstra #define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x])) 989f8b6d1ccSPeter Zijlstra #else /* !(SCHED_DEBUG && HAVE_JUMP_LABEL) */ 990391e43daSPeter Zijlstra #define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x)) 991f8b6d1ccSPeter Zijlstra #endif /* SCHED_DEBUG && HAVE_JUMP_LABEL */ 992391e43daSPeter Zijlstra 993cbee9f88SPeter Zijlstra #ifdef CONFIG_NUMA_BALANCING 994cbee9f88SPeter Zijlstra #define sched_feat_numa(x) sched_feat(x) 9953105b86aSMel Gorman #ifdef CONFIG_SCHED_DEBUG 9963105b86aSMel Gorman #define numabalancing_enabled sched_feat_numa(NUMA) 9973105b86aSMel Gorman #else 9983105b86aSMel Gorman extern bool numabalancing_enabled; 9993105b86aSMel Gorman #endif /* CONFIG_SCHED_DEBUG */ 1000cbee9f88SPeter Zijlstra #else 1001cbee9f88SPeter Zijlstra #define sched_feat_numa(x) (0) 10023105b86aSMel Gorman #define numabalancing_enabled (0) 10033105b86aSMel Gorman #endif /* CONFIG_NUMA_BALANCING */ 1004cbee9f88SPeter Zijlstra 1005391e43daSPeter Zijlstra static inline u64 global_rt_period(void) 1006391e43daSPeter Zijlstra { 1007391e43daSPeter Zijlstra return (u64)sysctl_sched_rt_period * NSEC_PER_USEC; 1008391e43daSPeter Zijlstra } 1009391e43daSPeter Zijlstra 1010391e43daSPeter Zijlstra static inline u64 global_rt_runtime(void) 1011391e43daSPeter Zijlstra { 1012391e43daSPeter Zijlstra if (sysctl_sched_rt_runtime < 0) 1013391e43daSPeter Zijlstra return RUNTIME_INF; 1014391e43daSPeter Zijlstra 1015391e43daSPeter Zijlstra return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC; 1016391e43daSPeter Zijlstra } 1017391e43daSPeter Zijlstra 1018391e43daSPeter Zijlstra static inline int task_current(struct rq *rq, struct task_struct *p) 1019391e43daSPeter Zijlstra { 1020391e43daSPeter Zijlstra return rq->curr == p; 1021391e43daSPeter Zijlstra } 1022391e43daSPeter Zijlstra 1023391e43daSPeter Zijlstra static inline int task_running(struct rq *rq, struct task_struct *p) 1024391e43daSPeter Zijlstra { 1025391e43daSPeter Zijlstra #ifdef CONFIG_SMP 1026391e43daSPeter Zijlstra return p->on_cpu; 1027391e43daSPeter Zijlstra #else 1028391e43daSPeter Zijlstra return task_current(rq, p); 1029391e43daSPeter Zijlstra #endif 1030391e43daSPeter Zijlstra } 1031391e43daSPeter Zijlstra 1032da0c1e65SKirill Tkhai static inline int task_on_rq_queued(struct task_struct *p) 1033da0c1e65SKirill Tkhai { 1034da0c1e65SKirill Tkhai return p->on_rq == TASK_ON_RQ_QUEUED; 1035da0c1e65SKirill Tkhai } 1036391e43daSPeter Zijlstra 1037cca26e80SKirill Tkhai static inline int task_on_rq_migrating(struct task_struct *p) 1038cca26e80SKirill Tkhai { 1039cca26e80SKirill Tkhai return p->on_rq == TASK_ON_RQ_MIGRATING; 1040cca26e80SKirill Tkhai } 1041cca26e80SKirill Tkhai 1042391e43daSPeter Zijlstra #ifndef prepare_arch_switch 1043391e43daSPeter Zijlstra # define prepare_arch_switch(next) do { } while (0) 1044391e43daSPeter Zijlstra #endif 1045391e43daSPeter Zijlstra #ifndef finish_arch_switch 1046391e43daSPeter Zijlstra # define finish_arch_switch(prev) do { } while (0) 1047391e43daSPeter Zijlstra #endif 104801f23e16SCatalin Marinas #ifndef finish_arch_post_lock_switch 104901f23e16SCatalin Marinas # define finish_arch_post_lock_switch() do { } while (0) 105001f23e16SCatalin Marinas #endif 1051391e43daSPeter Zijlstra 1052391e43daSPeter Zijlstra static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) 1053391e43daSPeter Zijlstra { 1054391e43daSPeter Zijlstra #ifdef CONFIG_SMP 1055391e43daSPeter Zijlstra /* 1056391e43daSPeter Zijlstra * We can optimise this out completely for !SMP, because the 1057391e43daSPeter Zijlstra * SMP rebalancing from interrupt is the only thing that cares 1058391e43daSPeter Zijlstra * here. 1059391e43daSPeter Zijlstra */ 1060391e43daSPeter Zijlstra next->on_cpu = 1; 1061391e43daSPeter Zijlstra #endif 1062391e43daSPeter Zijlstra } 1063391e43daSPeter Zijlstra 1064391e43daSPeter Zijlstra static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) 1065391e43daSPeter Zijlstra { 1066391e43daSPeter Zijlstra #ifdef CONFIG_SMP 1067391e43daSPeter Zijlstra /* 1068391e43daSPeter Zijlstra * After ->on_cpu is cleared, the task can be moved to a different CPU. 1069391e43daSPeter Zijlstra * We must ensure this doesn't happen until the switch is completely 1070391e43daSPeter Zijlstra * finished. 1071391e43daSPeter Zijlstra */ 1072391e43daSPeter Zijlstra smp_wmb(); 1073391e43daSPeter Zijlstra prev->on_cpu = 0; 1074391e43daSPeter Zijlstra #endif 1075391e43daSPeter Zijlstra #ifdef CONFIG_DEBUG_SPINLOCK 1076391e43daSPeter Zijlstra /* this is a valid case when another task releases the spinlock */ 1077391e43daSPeter Zijlstra rq->lock.owner = current; 1078391e43daSPeter Zijlstra #endif 1079391e43daSPeter Zijlstra /* 1080391e43daSPeter Zijlstra * If we are tracking spinlock dependencies then we have to 1081391e43daSPeter Zijlstra * fix up the runqueue lock - which gets 'carried over' from 1082391e43daSPeter Zijlstra * prev into current: 1083391e43daSPeter Zijlstra */ 1084391e43daSPeter Zijlstra spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_); 1085391e43daSPeter Zijlstra 1086391e43daSPeter Zijlstra raw_spin_unlock_irq(&rq->lock); 1087391e43daSPeter Zijlstra } 1088391e43daSPeter Zijlstra 1089b13095f0SLi Zefan /* 1090b13095f0SLi Zefan * wake flags 1091b13095f0SLi Zefan */ 1092b13095f0SLi Zefan #define WF_SYNC 0x01 /* waker goes to sleep after wakeup */ 1093b13095f0SLi Zefan #define WF_FORK 0x02 /* child wakeup after fork */ 1094b13095f0SLi Zefan #define WF_MIGRATED 0x4 /* internal use, task got migrated */ 1095b13095f0SLi Zefan 1096391e43daSPeter Zijlstra /* 1097391e43daSPeter Zijlstra * To aid in avoiding the subversion of "niceness" due to uneven distribution 1098391e43daSPeter Zijlstra * of tasks with abnormal "nice" values across CPUs the contribution that 1099391e43daSPeter Zijlstra * each task makes to its run queue's load is weighted according to its 1100391e43daSPeter Zijlstra * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a 1101391e43daSPeter Zijlstra * scaled version of the new time slice allocation that they receive on time 1102391e43daSPeter Zijlstra * slice expiry etc. 1103391e43daSPeter Zijlstra */ 1104391e43daSPeter Zijlstra 1105391e43daSPeter Zijlstra #define WEIGHT_IDLEPRIO 3 1106391e43daSPeter Zijlstra #define WMULT_IDLEPRIO 1431655765 1107391e43daSPeter Zijlstra 1108391e43daSPeter Zijlstra /* 1109391e43daSPeter Zijlstra * Nice levels are multiplicative, with a gentle 10% change for every 1110391e43daSPeter Zijlstra * nice level changed. I.e. when a CPU-bound task goes from nice 0 to 1111391e43daSPeter Zijlstra * nice 1, it will get ~10% less CPU time than another CPU-bound task 1112391e43daSPeter Zijlstra * that remained on nice 0. 1113391e43daSPeter Zijlstra * 1114391e43daSPeter Zijlstra * The "10% effect" is relative and cumulative: from _any_ nice level, 1115391e43daSPeter Zijlstra * if you go up 1 level, it's -10% CPU usage, if you go down 1 level 1116391e43daSPeter Zijlstra * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25. 1117391e43daSPeter Zijlstra * If a task goes up by ~10% and another task goes down by ~10% then 1118391e43daSPeter Zijlstra * the relative distance between them is ~25%.) 1119391e43daSPeter Zijlstra */ 1120391e43daSPeter Zijlstra static const int prio_to_weight[40] = { 1121391e43daSPeter Zijlstra /* -20 */ 88761, 71755, 56483, 46273, 36291, 1122391e43daSPeter Zijlstra /* -15 */ 29154, 23254, 18705, 14949, 11916, 1123391e43daSPeter Zijlstra /* -10 */ 9548, 7620, 6100, 4904, 3906, 1124391e43daSPeter Zijlstra /* -5 */ 3121, 2501, 1991, 1586, 1277, 1125391e43daSPeter Zijlstra /* 0 */ 1024, 820, 655, 526, 423, 1126391e43daSPeter Zijlstra /* 5 */ 335, 272, 215, 172, 137, 1127391e43daSPeter Zijlstra /* 10 */ 110, 87, 70, 56, 45, 1128391e43daSPeter Zijlstra /* 15 */ 36, 29, 23, 18, 15, 1129391e43daSPeter Zijlstra }; 1130391e43daSPeter Zijlstra 1131391e43daSPeter Zijlstra /* 1132391e43daSPeter Zijlstra * Inverse (2^32/x) values of the prio_to_weight[] array, precalculated. 1133391e43daSPeter Zijlstra * 1134391e43daSPeter Zijlstra * In cases where the weight does not change often, we can use the 1135391e43daSPeter Zijlstra * precalculated inverse to speed up arithmetics by turning divisions 1136391e43daSPeter Zijlstra * into multiplications: 1137391e43daSPeter Zijlstra */ 1138391e43daSPeter Zijlstra static const u32 prio_to_wmult[40] = { 1139391e43daSPeter Zijlstra /* -20 */ 48388, 59856, 76040, 92818, 118348, 1140391e43daSPeter Zijlstra /* -15 */ 147320, 184698, 229616, 287308, 360437, 1141391e43daSPeter Zijlstra /* -10 */ 449829, 563644, 704093, 875809, 1099582, 1142391e43daSPeter Zijlstra /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326, 1143391e43daSPeter Zijlstra /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587, 1144391e43daSPeter Zijlstra /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126, 1145391e43daSPeter Zijlstra /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717, 1146391e43daSPeter Zijlstra /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153, 1147391e43daSPeter Zijlstra }; 1148391e43daSPeter Zijlstra 1149c82ba9faSLi Zefan #define ENQUEUE_WAKEUP 1 1150c82ba9faSLi Zefan #define ENQUEUE_HEAD 2 1151c82ba9faSLi Zefan #ifdef CONFIG_SMP 1152c82ba9faSLi Zefan #define ENQUEUE_WAKING 4 /* sched_class::task_waking was called */ 1153c82ba9faSLi Zefan #else 1154c82ba9faSLi Zefan #define ENQUEUE_WAKING 0 1155c82ba9faSLi Zefan #endif 1156aab03e05SDario Faggioli #define ENQUEUE_REPLENISH 8 1157c82ba9faSLi Zefan 1158c82ba9faSLi Zefan #define DEQUEUE_SLEEP 1 1159c82ba9faSLi Zefan 116037e117c0SPeter Zijlstra #define RETRY_TASK ((void *)-1UL) 116137e117c0SPeter Zijlstra 1162c82ba9faSLi Zefan struct sched_class { 1163c82ba9faSLi Zefan const struct sched_class *next; 1164c82ba9faSLi Zefan 1165c82ba9faSLi Zefan void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags); 1166c82ba9faSLi Zefan void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags); 1167c82ba9faSLi Zefan void (*yield_task) (struct rq *rq); 1168c82ba9faSLi Zefan bool (*yield_to_task) (struct rq *rq, struct task_struct *p, bool preempt); 1169c82ba9faSLi Zefan 1170c82ba9faSLi Zefan void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags); 1171c82ba9faSLi Zefan 1172606dba2eSPeter Zijlstra /* 1173606dba2eSPeter Zijlstra * It is the responsibility of the pick_next_task() method that will 1174606dba2eSPeter Zijlstra * return the next task to call put_prev_task() on the @prev task or 1175606dba2eSPeter Zijlstra * something equivalent. 117637e117c0SPeter Zijlstra * 117737e117c0SPeter Zijlstra * May return RETRY_TASK when it finds a higher prio class has runnable 117837e117c0SPeter Zijlstra * tasks. 1179606dba2eSPeter Zijlstra */ 1180606dba2eSPeter Zijlstra struct task_struct * (*pick_next_task) (struct rq *rq, 1181606dba2eSPeter Zijlstra struct task_struct *prev); 1182c82ba9faSLi Zefan void (*put_prev_task) (struct rq *rq, struct task_struct *p); 1183c82ba9faSLi Zefan 1184c82ba9faSLi Zefan #ifdef CONFIG_SMP 1185ac66f547SPeter Zijlstra int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags); 1186c82ba9faSLi Zefan void (*migrate_task_rq)(struct task_struct *p, int next_cpu); 1187c82ba9faSLi Zefan 1188c82ba9faSLi Zefan void (*post_schedule) (struct rq *this_rq); 1189c82ba9faSLi Zefan void (*task_waking) (struct task_struct *task); 1190c82ba9faSLi Zefan void (*task_woken) (struct rq *this_rq, struct task_struct *task); 1191c82ba9faSLi Zefan 1192c82ba9faSLi Zefan void (*set_cpus_allowed)(struct task_struct *p, 1193c82ba9faSLi Zefan const struct cpumask *newmask); 1194c82ba9faSLi Zefan 1195c82ba9faSLi Zefan void (*rq_online)(struct rq *rq); 1196c82ba9faSLi Zefan void (*rq_offline)(struct rq *rq); 1197c82ba9faSLi Zefan #endif 1198c82ba9faSLi Zefan 1199c82ba9faSLi Zefan void (*set_curr_task) (struct rq *rq); 1200c82ba9faSLi Zefan void (*task_tick) (struct rq *rq, struct task_struct *p, int queued); 1201c82ba9faSLi Zefan void (*task_fork) (struct task_struct *p); 1202e6c390f2SDario Faggioli void (*task_dead) (struct task_struct *p); 1203c82ba9faSLi Zefan 120467dfa1b7SKirill Tkhai /* 120567dfa1b7SKirill Tkhai * The switched_from() call is allowed to drop rq->lock, therefore we 120667dfa1b7SKirill Tkhai * cannot assume the switched_from/switched_to pair is serliazed by 120767dfa1b7SKirill Tkhai * rq->lock. They are however serialized by p->pi_lock. 120867dfa1b7SKirill Tkhai */ 1209c82ba9faSLi Zefan void (*switched_from) (struct rq *this_rq, struct task_struct *task); 1210c82ba9faSLi Zefan void (*switched_to) (struct rq *this_rq, struct task_struct *task); 1211c82ba9faSLi Zefan void (*prio_changed) (struct rq *this_rq, struct task_struct *task, 1212c82ba9faSLi Zefan int oldprio); 1213c82ba9faSLi Zefan 1214c82ba9faSLi Zefan unsigned int (*get_rr_interval) (struct rq *rq, 1215c82ba9faSLi Zefan struct task_struct *task); 1216c82ba9faSLi Zefan 12176e998916SStanislaw Gruszka void (*update_curr) (struct rq *rq); 12186e998916SStanislaw Gruszka 1219c82ba9faSLi Zefan #ifdef CONFIG_FAIR_GROUP_SCHED 1220c82ba9faSLi Zefan void (*task_move_group) (struct task_struct *p, int on_rq); 1221c82ba9faSLi Zefan #endif 1222c82ba9faSLi Zefan }; 1223391e43daSPeter Zijlstra 12243f1d2a31SPeter Zijlstra static inline void put_prev_task(struct rq *rq, struct task_struct *prev) 12253f1d2a31SPeter Zijlstra { 12263f1d2a31SPeter Zijlstra prev->sched_class->put_prev_task(rq, prev); 12273f1d2a31SPeter Zijlstra } 12283f1d2a31SPeter Zijlstra 1229391e43daSPeter Zijlstra #define sched_class_highest (&stop_sched_class) 1230391e43daSPeter Zijlstra #define for_each_class(class) \ 1231391e43daSPeter Zijlstra for (class = sched_class_highest; class; class = class->next) 1232391e43daSPeter Zijlstra 1233391e43daSPeter Zijlstra extern const struct sched_class stop_sched_class; 1234aab03e05SDario Faggioli extern const struct sched_class dl_sched_class; 1235391e43daSPeter Zijlstra extern const struct sched_class rt_sched_class; 1236391e43daSPeter Zijlstra extern const struct sched_class fair_sched_class; 1237391e43daSPeter Zijlstra extern const struct sched_class idle_sched_class; 1238391e43daSPeter Zijlstra 1239391e43daSPeter Zijlstra 1240391e43daSPeter Zijlstra #ifdef CONFIG_SMP 1241391e43daSPeter Zijlstra 124263b2ca30SNicolas Pitre extern void update_group_capacity(struct sched_domain *sd, int cpu); 1243b719203bSLi Zefan 12447caff66fSDaniel Lezcano extern void trigger_load_balance(struct rq *rq); 1245391e43daSPeter Zijlstra 1246642dbc39SVincent Guittot extern void idle_enter_fair(struct rq *this_rq); 1247642dbc39SVincent Guittot extern void idle_exit_fair(struct rq *this_rq); 1248642dbc39SVincent Guittot 1249dc877341SPeter Zijlstra #else 1250dc877341SPeter Zijlstra 1251dc877341SPeter Zijlstra static inline void idle_enter_fair(struct rq *rq) { } 1252dc877341SPeter Zijlstra static inline void idle_exit_fair(struct rq *rq) { } 1253dc877341SPeter Zijlstra 1254391e43daSPeter Zijlstra #endif 1255391e43daSPeter Zijlstra 1256442bf3aaSDaniel Lezcano #ifdef CONFIG_CPU_IDLE 1257442bf3aaSDaniel Lezcano static inline void idle_set_state(struct rq *rq, 1258442bf3aaSDaniel Lezcano struct cpuidle_state *idle_state) 1259442bf3aaSDaniel Lezcano { 1260442bf3aaSDaniel Lezcano rq->idle_state = idle_state; 1261442bf3aaSDaniel Lezcano } 1262442bf3aaSDaniel Lezcano 1263442bf3aaSDaniel Lezcano static inline struct cpuidle_state *idle_get_state(struct rq *rq) 1264442bf3aaSDaniel Lezcano { 1265442bf3aaSDaniel Lezcano WARN_ON(!rcu_read_lock_held()); 1266442bf3aaSDaniel Lezcano return rq->idle_state; 1267442bf3aaSDaniel Lezcano } 1268442bf3aaSDaniel Lezcano #else 1269442bf3aaSDaniel Lezcano static inline void idle_set_state(struct rq *rq, 1270442bf3aaSDaniel Lezcano struct cpuidle_state *idle_state) 1271442bf3aaSDaniel Lezcano { 1272442bf3aaSDaniel Lezcano } 1273442bf3aaSDaniel Lezcano 1274442bf3aaSDaniel Lezcano static inline struct cpuidle_state *idle_get_state(struct rq *rq) 1275442bf3aaSDaniel Lezcano { 1276442bf3aaSDaniel Lezcano return NULL; 1277442bf3aaSDaniel Lezcano } 1278442bf3aaSDaniel Lezcano #endif 1279442bf3aaSDaniel Lezcano 1280391e43daSPeter Zijlstra extern void sysrq_sched_debug_show(void); 1281391e43daSPeter Zijlstra extern void sched_init_granularity(void); 1282391e43daSPeter Zijlstra extern void update_max_interval(void); 12831baca4ceSJuri Lelli 12841baca4ceSJuri Lelli extern void init_sched_dl_class(void); 1285391e43daSPeter Zijlstra extern void init_sched_rt_class(void); 1286391e43daSPeter Zijlstra extern void init_sched_fair_class(void); 1287332ac17eSDario Faggioli extern void init_sched_dl_class(void); 1288391e43daSPeter Zijlstra 12898875125eSKirill Tkhai extern void resched_curr(struct rq *rq); 1290391e43daSPeter Zijlstra extern void resched_cpu(int cpu); 1291391e43daSPeter Zijlstra 1292391e43daSPeter Zijlstra extern struct rt_bandwidth def_rt_bandwidth; 1293391e43daSPeter Zijlstra extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime); 1294391e43daSPeter Zijlstra 1295332ac17eSDario Faggioli extern struct dl_bandwidth def_dl_bandwidth; 1296332ac17eSDario Faggioli extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime); 1297aab03e05SDario Faggioli extern void init_dl_task_timer(struct sched_dl_entity *dl_se); 1298aab03e05SDario Faggioli 1299332ac17eSDario Faggioli unsigned long to_ratio(u64 period, u64 runtime); 1300332ac17eSDario Faggioli 1301556061b0SPeter Zijlstra extern void update_idle_cpu_load(struct rq *this_rq); 1302391e43daSPeter Zijlstra 1303a75cdaa9SAlex Shi extern void init_task_runnable_average(struct task_struct *p); 1304a75cdaa9SAlex Shi 130572465447SKirill Tkhai static inline void add_nr_running(struct rq *rq, unsigned count) 1306391e43daSPeter Zijlstra { 130772465447SKirill Tkhai unsigned prev_nr = rq->nr_running; 130872465447SKirill Tkhai 130972465447SKirill Tkhai rq->nr_running = prev_nr + count; 13109f3660c2SFrederic Weisbecker 131172465447SKirill Tkhai if (prev_nr < 2 && rq->nr_running >= 2) { 13124486edd1STim Chen #ifdef CONFIG_SMP 13134486edd1STim Chen if (!rq->rd->overload) 13144486edd1STim Chen rq->rd->overload = true; 13154486edd1STim Chen #endif 13164486edd1STim Chen 13174486edd1STim Chen #ifdef CONFIG_NO_HZ_FULL 13189f3660c2SFrederic Weisbecker if (tick_nohz_full_cpu(rq->cpu)) { 13193882ec64SFrederic Weisbecker /* 13203882ec64SFrederic Weisbecker * Tick is needed if more than one task runs on a CPU. 13213882ec64SFrederic Weisbecker * Send the target an IPI to kick it out of nohz mode. 13223882ec64SFrederic Weisbecker * 13233882ec64SFrederic Weisbecker * We assume that IPI implies full memory barrier and the 13243882ec64SFrederic Weisbecker * new value of rq->nr_running is visible on reception 13253882ec64SFrederic Weisbecker * from the target. 13263882ec64SFrederic Weisbecker */ 1327fd2ac4f4SFrederic Weisbecker tick_nohz_full_kick_cpu(rq->cpu); 13289f3660c2SFrederic Weisbecker } 13299f3660c2SFrederic Weisbecker #endif 1330391e43daSPeter Zijlstra } 13314486edd1STim Chen } 1332391e43daSPeter Zijlstra 133372465447SKirill Tkhai static inline void sub_nr_running(struct rq *rq, unsigned count) 1334391e43daSPeter Zijlstra { 133572465447SKirill Tkhai rq->nr_running -= count; 1336391e43daSPeter Zijlstra } 1337391e43daSPeter Zijlstra 1338265f22a9SFrederic Weisbecker static inline void rq_last_tick_reset(struct rq *rq) 1339265f22a9SFrederic Weisbecker { 1340265f22a9SFrederic Weisbecker #ifdef CONFIG_NO_HZ_FULL 1341265f22a9SFrederic Weisbecker rq->last_sched_tick = jiffies; 1342265f22a9SFrederic Weisbecker #endif 1343265f22a9SFrederic Weisbecker } 1344265f22a9SFrederic Weisbecker 1345391e43daSPeter Zijlstra extern void update_rq_clock(struct rq *rq); 1346391e43daSPeter Zijlstra 1347391e43daSPeter Zijlstra extern void activate_task(struct rq *rq, struct task_struct *p, int flags); 1348391e43daSPeter Zijlstra extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags); 1349391e43daSPeter Zijlstra 1350391e43daSPeter Zijlstra extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags); 1351391e43daSPeter Zijlstra 1352391e43daSPeter Zijlstra extern const_debug unsigned int sysctl_sched_time_avg; 1353391e43daSPeter Zijlstra extern const_debug unsigned int sysctl_sched_nr_migrate; 1354391e43daSPeter Zijlstra extern const_debug unsigned int sysctl_sched_migration_cost; 1355391e43daSPeter Zijlstra 1356391e43daSPeter Zijlstra static inline u64 sched_avg_period(void) 1357391e43daSPeter Zijlstra { 1358391e43daSPeter Zijlstra return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2; 1359391e43daSPeter Zijlstra } 1360391e43daSPeter Zijlstra 1361391e43daSPeter Zijlstra #ifdef CONFIG_SCHED_HRTICK 1362391e43daSPeter Zijlstra 1363391e43daSPeter Zijlstra /* 1364391e43daSPeter Zijlstra * Use hrtick when: 1365391e43daSPeter Zijlstra * - enabled by features 1366391e43daSPeter Zijlstra * - hrtimer is actually high res 1367391e43daSPeter Zijlstra */ 1368391e43daSPeter Zijlstra static inline int hrtick_enabled(struct rq *rq) 1369391e43daSPeter Zijlstra { 1370391e43daSPeter Zijlstra if (!sched_feat(HRTICK)) 1371391e43daSPeter Zijlstra return 0; 1372391e43daSPeter Zijlstra if (!cpu_active(cpu_of(rq))) 1373391e43daSPeter Zijlstra return 0; 1374391e43daSPeter Zijlstra return hrtimer_is_hres_active(&rq->hrtick_timer); 1375391e43daSPeter Zijlstra } 1376391e43daSPeter Zijlstra 1377391e43daSPeter Zijlstra void hrtick_start(struct rq *rq, u64 delay); 1378391e43daSPeter Zijlstra 1379b39e66eaSMike Galbraith #else 1380b39e66eaSMike Galbraith 1381b39e66eaSMike Galbraith static inline int hrtick_enabled(struct rq *rq) 1382b39e66eaSMike Galbraith { 1383b39e66eaSMike Galbraith return 0; 1384b39e66eaSMike Galbraith } 1385b39e66eaSMike Galbraith 1386391e43daSPeter Zijlstra #endif /* CONFIG_SCHED_HRTICK */ 1387391e43daSPeter Zijlstra 1388391e43daSPeter Zijlstra #ifdef CONFIG_SMP 1389391e43daSPeter Zijlstra extern void sched_avg_update(struct rq *rq); 1390*dfbca41fSPeter Zijlstra 1391*dfbca41fSPeter Zijlstra #ifndef arch_scale_freq_capacity 1392*dfbca41fSPeter Zijlstra static __always_inline 1393*dfbca41fSPeter Zijlstra unsigned long arch_scale_freq_capacity(struct sched_domain *sd, int cpu) 1394*dfbca41fSPeter Zijlstra { 1395*dfbca41fSPeter Zijlstra return SCHED_CAPACITY_SCALE; 1396*dfbca41fSPeter Zijlstra } 1397*dfbca41fSPeter Zijlstra #endif 1398b5b4860dSVincent Guittot 1399391e43daSPeter Zijlstra static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) 1400391e43daSPeter Zijlstra { 1401b5b4860dSVincent Guittot rq->rt_avg += rt_delta * arch_scale_freq_capacity(NULL, cpu_of(rq)); 1402391e43daSPeter Zijlstra sched_avg_update(rq); 1403391e43daSPeter Zijlstra } 1404391e43daSPeter Zijlstra #else 1405391e43daSPeter Zijlstra static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { } 1406391e43daSPeter Zijlstra static inline void sched_avg_update(struct rq *rq) { } 1407391e43daSPeter Zijlstra #endif 1408391e43daSPeter Zijlstra 1409391e43daSPeter Zijlstra extern void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period); 1410391e43daSPeter Zijlstra 14113960c8c0SPeter Zijlstra /* 14123960c8c0SPeter Zijlstra * __task_rq_lock - lock the rq @p resides on. 14133960c8c0SPeter Zijlstra */ 14143960c8c0SPeter Zijlstra static inline struct rq *__task_rq_lock(struct task_struct *p) 14153960c8c0SPeter Zijlstra __acquires(rq->lock) 14163960c8c0SPeter Zijlstra { 14173960c8c0SPeter Zijlstra struct rq *rq; 14183960c8c0SPeter Zijlstra 14193960c8c0SPeter Zijlstra lockdep_assert_held(&p->pi_lock); 14203960c8c0SPeter Zijlstra 14213960c8c0SPeter Zijlstra for (;;) { 14223960c8c0SPeter Zijlstra rq = task_rq(p); 14233960c8c0SPeter Zijlstra raw_spin_lock(&rq->lock); 14243960c8c0SPeter Zijlstra if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) 14253960c8c0SPeter Zijlstra return rq; 14263960c8c0SPeter Zijlstra raw_spin_unlock(&rq->lock); 14273960c8c0SPeter Zijlstra 14283960c8c0SPeter Zijlstra while (unlikely(task_on_rq_migrating(p))) 14293960c8c0SPeter Zijlstra cpu_relax(); 14303960c8c0SPeter Zijlstra } 14313960c8c0SPeter Zijlstra } 14323960c8c0SPeter Zijlstra 14333960c8c0SPeter Zijlstra /* 14343960c8c0SPeter Zijlstra * task_rq_lock - lock p->pi_lock and lock the rq @p resides on. 14353960c8c0SPeter Zijlstra */ 14363960c8c0SPeter Zijlstra static inline struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags) 14373960c8c0SPeter Zijlstra __acquires(p->pi_lock) 14383960c8c0SPeter Zijlstra __acquires(rq->lock) 14393960c8c0SPeter Zijlstra { 14403960c8c0SPeter Zijlstra struct rq *rq; 14413960c8c0SPeter Zijlstra 14423960c8c0SPeter Zijlstra for (;;) { 14433960c8c0SPeter Zijlstra raw_spin_lock_irqsave(&p->pi_lock, *flags); 14443960c8c0SPeter Zijlstra rq = task_rq(p); 14453960c8c0SPeter Zijlstra raw_spin_lock(&rq->lock); 14463960c8c0SPeter Zijlstra /* 14473960c8c0SPeter Zijlstra * move_queued_task() task_rq_lock() 14483960c8c0SPeter Zijlstra * 14493960c8c0SPeter Zijlstra * ACQUIRE (rq->lock) 14503960c8c0SPeter Zijlstra * [S] ->on_rq = MIGRATING [L] rq = task_rq() 14513960c8c0SPeter Zijlstra * WMB (__set_task_cpu()) ACQUIRE (rq->lock); 14523960c8c0SPeter Zijlstra * [S] ->cpu = new_cpu [L] task_rq() 14533960c8c0SPeter Zijlstra * [L] ->on_rq 14543960c8c0SPeter Zijlstra * RELEASE (rq->lock) 14553960c8c0SPeter Zijlstra * 14563960c8c0SPeter Zijlstra * If we observe the old cpu in task_rq_lock, the acquire of 14573960c8c0SPeter Zijlstra * the old rq->lock will fully serialize against the stores. 14583960c8c0SPeter Zijlstra * 14593960c8c0SPeter Zijlstra * If we observe the new cpu in task_rq_lock, the acquire will 14603960c8c0SPeter Zijlstra * pair with the WMB to ensure we must then also see migrating. 14613960c8c0SPeter Zijlstra */ 14623960c8c0SPeter Zijlstra if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) 14633960c8c0SPeter Zijlstra return rq; 14643960c8c0SPeter Zijlstra raw_spin_unlock(&rq->lock); 14653960c8c0SPeter Zijlstra raw_spin_unlock_irqrestore(&p->pi_lock, *flags); 14663960c8c0SPeter Zijlstra 14673960c8c0SPeter Zijlstra while (unlikely(task_on_rq_migrating(p))) 14683960c8c0SPeter Zijlstra cpu_relax(); 14693960c8c0SPeter Zijlstra } 14703960c8c0SPeter Zijlstra } 14713960c8c0SPeter Zijlstra 14723960c8c0SPeter Zijlstra static inline void __task_rq_unlock(struct rq *rq) 14733960c8c0SPeter Zijlstra __releases(rq->lock) 14743960c8c0SPeter Zijlstra { 14753960c8c0SPeter Zijlstra raw_spin_unlock(&rq->lock); 14763960c8c0SPeter Zijlstra } 14773960c8c0SPeter Zijlstra 14783960c8c0SPeter Zijlstra static inline void 14793960c8c0SPeter Zijlstra task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags) 14803960c8c0SPeter Zijlstra __releases(rq->lock) 14813960c8c0SPeter Zijlstra __releases(p->pi_lock) 14823960c8c0SPeter Zijlstra { 14833960c8c0SPeter Zijlstra raw_spin_unlock(&rq->lock); 14843960c8c0SPeter Zijlstra raw_spin_unlock_irqrestore(&p->pi_lock, *flags); 14853960c8c0SPeter Zijlstra } 14863960c8c0SPeter Zijlstra 1487391e43daSPeter Zijlstra #ifdef CONFIG_SMP 1488391e43daSPeter Zijlstra #ifdef CONFIG_PREEMPT 1489391e43daSPeter Zijlstra 1490391e43daSPeter Zijlstra static inline void double_rq_lock(struct rq *rq1, struct rq *rq2); 1491391e43daSPeter Zijlstra 1492391e43daSPeter Zijlstra /* 1493391e43daSPeter Zijlstra * fair double_lock_balance: Safely acquires both rq->locks in a fair 1494391e43daSPeter Zijlstra * way at the expense of forcing extra atomic operations in all 1495391e43daSPeter Zijlstra * invocations. This assures that the double_lock is acquired using the 1496391e43daSPeter Zijlstra * same underlying policy as the spinlock_t on this architecture, which 1497391e43daSPeter Zijlstra * reduces latency compared to the unfair variant below. However, it 1498391e43daSPeter Zijlstra * also adds more overhead and therefore may reduce throughput. 1499391e43daSPeter Zijlstra */ 1500391e43daSPeter Zijlstra static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) 1501391e43daSPeter Zijlstra __releases(this_rq->lock) 1502391e43daSPeter Zijlstra __acquires(busiest->lock) 1503391e43daSPeter Zijlstra __acquires(this_rq->lock) 1504391e43daSPeter Zijlstra { 1505391e43daSPeter Zijlstra raw_spin_unlock(&this_rq->lock); 1506391e43daSPeter Zijlstra double_rq_lock(this_rq, busiest); 1507391e43daSPeter Zijlstra 1508391e43daSPeter Zijlstra return 1; 1509391e43daSPeter Zijlstra } 1510391e43daSPeter Zijlstra 1511391e43daSPeter Zijlstra #else 1512391e43daSPeter Zijlstra /* 1513391e43daSPeter Zijlstra * Unfair double_lock_balance: Optimizes throughput at the expense of 1514391e43daSPeter Zijlstra * latency by eliminating extra atomic operations when the locks are 1515391e43daSPeter Zijlstra * already in proper order on entry. This favors lower cpu-ids and will 1516391e43daSPeter Zijlstra * grant the double lock to lower cpus over higher ids under contention, 1517391e43daSPeter Zijlstra * regardless of entry order into the function. 1518391e43daSPeter Zijlstra */ 1519391e43daSPeter Zijlstra static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) 1520391e43daSPeter Zijlstra __releases(this_rq->lock) 1521391e43daSPeter Zijlstra __acquires(busiest->lock) 1522391e43daSPeter Zijlstra __acquires(this_rq->lock) 1523391e43daSPeter Zijlstra { 1524391e43daSPeter Zijlstra int ret = 0; 1525391e43daSPeter Zijlstra 1526391e43daSPeter Zijlstra if (unlikely(!raw_spin_trylock(&busiest->lock))) { 1527391e43daSPeter Zijlstra if (busiest < this_rq) { 1528391e43daSPeter Zijlstra raw_spin_unlock(&this_rq->lock); 1529391e43daSPeter Zijlstra raw_spin_lock(&busiest->lock); 1530391e43daSPeter Zijlstra raw_spin_lock_nested(&this_rq->lock, 1531391e43daSPeter Zijlstra SINGLE_DEPTH_NESTING); 1532391e43daSPeter Zijlstra ret = 1; 1533391e43daSPeter Zijlstra } else 1534391e43daSPeter Zijlstra raw_spin_lock_nested(&busiest->lock, 1535391e43daSPeter Zijlstra SINGLE_DEPTH_NESTING); 1536391e43daSPeter Zijlstra } 1537391e43daSPeter Zijlstra return ret; 1538391e43daSPeter Zijlstra } 1539391e43daSPeter Zijlstra 1540391e43daSPeter Zijlstra #endif /* CONFIG_PREEMPT */ 1541391e43daSPeter Zijlstra 1542391e43daSPeter Zijlstra /* 1543391e43daSPeter Zijlstra * double_lock_balance - lock the busiest runqueue, this_rq is locked already. 1544391e43daSPeter Zijlstra */ 1545391e43daSPeter Zijlstra static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest) 1546391e43daSPeter Zijlstra { 1547391e43daSPeter Zijlstra if (unlikely(!irqs_disabled())) { 1548391e43daSPeter Zijlstra /* printk() doesn't work good under rq->lock */ 1549391e43daSPeter Zijlstra raw_spin_unlock(&this_rq->lock); 1550391e43daSPeter Zijlstra BUG_ON(1); 1551391e43daSPeter Zijlstra } 1552391e43daSPeter Zijlstra 1553391e43daSPeter Zijlstra return _double_lock_balance(this_rq, busiest); 1554391e43daSPeter Zijlstra } 1555391e43daSPeter Zijlstra 1556391e43daSPeter Zijlstra static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) 1557391e43daSPeter Zijlstra __releases(busiest->lock) 1558391e43daSPeter Zijlstra { 1559391e43daSPeter Zijlstra raw_spin_unlock(&busiest->lock); 1560391e43daSPeter Zijlstra lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); 1561391e43daSPeter Zijlstra } 1562391e43daSPeter Zijlstra 156374602315SPeter Zijlstra static inline void double_lock(spinlock_t *l1, spinlock_t *l2) 156474602315SPeter Zijlstra { 156574602315SPeter Zijlstra if (l1 > l2) 156674602315SPeter Zijlstra swap(l1, l2); 156774602315SPeter Zijlstra 156874602315SPeter Zijlstra spin_lock(l1); 156974602315SPeter Zijlstra spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 157074602315SPeter Zijlstra } 157174602315SPeter Zijlstra 157260e69eedSMike Galbraith static inline void double_lock_irq(spinlock_t *l1, spinlock_t *l2) 157360e69eedSMike Galbraith { 157460e69eedSMike Galbraith if (l1 > l2) 157560e69eedSMike Galbraith swap(l1, l2); 157660e69eedSMike Galbraith 157760e69eedSMike Galbraith spin_lock_irq(l1); 157860e69eedSMike Galbraith spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 157960e69eedSMike Galbraith } 158060e69eedSMike Galbraith 158174602315SPeter Zijlstra static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2) 158274602315SPeter Zijlstra { 158374602315SPeter Zijlstra if (l1 > l2) 158474602315SPeter Zijlstra swap(l1, l2); 158574602315SPeter Zijlstra 158674602315SPeter Zijlstra raw_spin_lock(l1); 158774602315SPeter Zijlstra raw_spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 158874602315SPeter Zijlstra } 158974602315SPeter Zijlstra 1590391e43daSPeter Zijlstra /* 1591391e43daSPeter Zijlstra * double_rq_lock - safely lock two runqueues 1592391e43daSPeter Zijlstra * 1593391e43daSPeter Zijlstra * Note this does not disable interrupts like task_rq_lock, 1594391e43daSPeter Zijlstra * you need to do so manually before calling. 1595391e43daSPeter Zijlstra */ 1596391e43daSPeter Zijlstra static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) 1597391e43daSPeter Zijlstra __acquires(rq1->lock) 1598391e43daSPeter Zijlstra __acquires(rq2->lock) 1599391e43daSPeter Zijlstra { 1600391e43daSPeter Zijlstra BUG_ON(!irqs_disabled()); 1601391e43daSPeter Zijlstra if (rq1 == rq2) { 1602391e43daSPeter Zijlstra raw_spin_lock(&rq1->lock); 1603391e43daSPeter Zijlstra __acquire(rq2->lock); /* Fake it out ;) */ 1604391e43daSPeter Zijlstra } else { 1605391e43daSPeter Zijlstra if (rq1 < rq2) { 1606391e43daSPeter Zijlstra raw_spin_lock(&rq1->lock); 1607391e43daSPeter Zijlstra raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING); 1608391e43daSPeter Zijlstra } else { 1609391e43daSPeter Zijlstra raw_spin_lock(&rq2->lock); 1610391e43daSPeter Zijlstra raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING); 1611391e43daSPeter Zijlstra } 1612391e43daSPeter Zijlstra } 1613391e43daSPeter Zijlstra } 1614391e43daSPeter Zijlstra 1615391e43daSPeter Zijlstra /* 1616391e43daSPeter Zijlstra * double_rq_unlock - safely unlock two runqueues 1617391e43daSPeter Zijlstra * 1618391e43daSPeter Zijlstra * Note this does not restore interrupts like task_rq_unlock, 1619391e43daSPeter Zijlstra * you need to do so manually after calling. 1620391e43daSPeter Zijlstra */ 1621391e43daSPeter Zijlstra static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) 1622391e43daSPeter Zijlstra __releases(rq1->lock) 1623391e43daSPeter Zijlstra __releases(rq2->lock) 1624391e43daSPeter Zijlstra { 1625391e43daSPeter Zijlstra raw_spin_unlock(&rq1->lock); 1626391e43daSPeter Zijlstra if (rq1 != rq2) 1627391e43daSPeter Zijlstra raw_spin_unlock(&rq2->lock); 1628391e43daSPeter Zijlstra else 1629391e43daSPeter Zijlstra __release(rq2->lock); 1630391e43daSPeter Zijlstra } 1631391e43daSPeter Zijlstra 1632391e43daSPeter Zijlstra #else /* CONFIG_SMP */ 1633391e43daSPeter Zijlstra 1634391e43daSPeter Zijlstra /* 1635391e43daSPeter Zijlstra * double_rq_lock - safely lock two runqueues 1636391e43daSPeter Zijlstra * 1637391e43daSPeter Zijlstra * Note this does not disable interrupts like task_rq_lock, 1638391e43daSPeter Zijlstra * you need to do so manually before calling. 1639391e43daSPeter Zijlstra */ 1640391e43daSPeter Zijlstra static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) 1641391e43daSPeter Zijlstra __acquires(rq1->lock) 1642391e43daSPeter Zijlstra __acquires(rq2->lock) 1643391e43daSPeter Zijlstra { 1644391e43daSPeter Zijlstra BUG_ON(!irqs_disabled()); 1645391e43daSPeter Zijlstra BUG_ON(rq1 != rq2); 1646391e43daSPeter Zijlstra raw_spin_lock(&rq1->lock); 1647391e43daSPeter Zijlstra __acquire(rq2->lock); /* Fake it out ;) */ 1648391e43daSPeter Zijlstra } 1649391e43daSPeter Zijlstra 1650391e43daSPeter Zijlstra /* 1651391e43daSPeter Zijlstra * double_rq_unlock - safely unlock two runqueues 1652391e43daSPeter Zijlstra * 1653391e43daSPeter Zijlstra * Note this does not restore interrupts like task_rq_unlock, 1654391e43daSPeter Zijlstra * you need to do so manually after calling. 1655391e43daSPeter Zijlstra */ 1656391e43daSPeter Zijlstra static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) 1657391e43daSPeter Zijlstra __releases(rq1->lock) 1658391e43daSPeter Zijlstra __releases(rq2->lock) 1659391e43daSPeter Zijlstra { 1660391e43daSPeter Zijlstra BUG_ON(rq1 != rq2); 1661391e43daSPeter Zijlstra raw_spin_unlock(&rq1->lock); 1662391e43daSPeter Zijlstra __release(rq2->lock); 1663391e43daSPeter Zijlstra } 1664391e43daSPeter Zijlstra 1665391e43daSPeter Zijlstra #endif 1666391e43daSPeter Zijlstra 1667391e43daSPeter Zijlstra extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq); 1668391e43daSPeter Zijlstra extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq); 1669391e43daSPeter Zijlstra extern void print_cfs_stats(struct seq_file *m, int cpu); 1670391e43daSPeter Zijlstra extern void print_rt_stats(struct seq_file *m, int cpu); 1671acb32132SWanpeng Li extern void print_dl_stats(struct seq_file *m, int cpu); 1672391e43daSPeter Zijlstra 1673391e43daSPeter Zijlstra extern void init_cfs_rq(struct cfs_rq *cfs_rq); 1674391e43daSPeter Zijlstra extern void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq); 1675aab03e05SDario Faggioli extern void init_dl_rq(struct dl_rq *dl_rq, struct rq *rq); 1676391e43daSPeter Zijlstra 16771ee14e6cSBen Segall extern void cfs_bandwidth_usage_inc(void); 16781ee14e6cSBen Segall extern void cfs_bandwidth_usage_dec(void); 16791c792db7SSuresh Siddha 16803451d024SFrederic Weisbecker #ifdef CONFIG_NO_HZ_COMMON 16811c792db7SSuresh Siddha enum rq_nohz_flag_bits { 16821c792db7SSuresh Siddha NOHZ_TICK_STOPPED, 16831c792db7SSuresh Siddha NOHZ_BALANCE_KICK, 16841c792db7SSuresh Siddha }; 16851c792db7SSuresh Siddha 16861c792db7SSuresh Siddha #define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags) 16871c792db7SSuresh Siddha #endif 168873fbec60SFrederic Weisbecker 168973fbec60SFrederic Weisbecker #ifdef CONFIG_IRQ_TIME_ACCOUNTING 169073fbec60SFrederic Weisbecker 169173fbec60SFrederic Weisbecker DECLARE_PER_CPU(u64, cpu_hardirq_time); 169273fbec60SFrederic Weisbecker DECLARE_PER_CPU(u64, cpu_softirq_time); 169373fbec60SFrederic Weisbecker 169473fbec60SFrederic Weisbecker #ifndef CONFIG_64BIT 169573fbec60SFrederic Weisbecker DECLARE_PER_CPU(seqcount_t, irq_time_seq); 169673fbec60SFrederic Weisbecker 169773fbec60SFrederic Weisbecker static inline void irq_time_write_begin(void) 169873fbec60SFrederic Weisbecker { 169973fbec60SFrederic Weisbecker __this_cpu_inc(irq_time_seq.sequence); 170073fbec60SFrederic Weisbecker smp_wmb(); 170173fbec60SFrederic Weisbecker } 170273fbec60SFrederic Weisbecker 170373fbec60SFrederic Weisbecker static inline void irq_time_write_end(void) 170473fbec60SFrederic Weisbecker { 170573fbec60SFrederic Weisbecker smp_wmb(); 170673fbec60SFrederic Weisbecker __this_cpu_inc(irq_time_seq.sequence); 170773fbec60SFrederic Weisbecker } 170873fbec60SFrederic Weisbecker 170973fbec60SFrederic Weisbecker static inline u64 irq_time_read(int cpu) 171073fbec60SFrederic Weisbecker { 171173fbec60SFrederic Weisbecker u64 irq_time; 171273fbec60SFrederic Weisbecker unsigned seq; 171373fbec60SFrederic Weisbecker 171473fbec60SFrederic Weisbecker do { 171573fbec60SFrederic Weisbecker seq = read_seqcount_begin(&per_cpu(irq_time_seq, cpu)); 171673fbec60SFrederic Weisbecker irq_time = per_cpu(cpu_softirq_time, cpu) + 171773fbec60SFrederic Weisbecker per_cpu(cpu_hardirq_time, cpu); 171873fbec60SFrederic Weisbecker } while (read_seqcount_retry(&per_cpu(irq_time_seq, cpu), seq)); 171973fbec60SFrederic Weisbecker 172073fbec60SFrederic Weisbecker return irq_time; 172173fbec60SFrederic Weisbecker } 172273fbec60SFrederic Weisbecker #else /* CONFIG_64BIT */ 172373fbec60SFrederic Weisbecker static inline void irq_time_write_begin(void) 172473fbec60SFrederic Weisbecker { 172573fbec60SFrederic Weisbecker } 172673fbec60SFrederic Weisbecker 172773fbec60SFrederic Weisbecker static inline void irq_time_write_end(void) 172873fbec60SFrederic Weisbecker { 172973fbec60SFrederic Weisbecker } 173073fbec60SFrederic Weisbecker 173173fbec60SFrederic Weisbecker static inline u64 irq_time_read(int cpu) 173273fbec60SFrederic Weisbecker { 173373fbec60SFrederic Weisbecker return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu); 173473fbec60SFrederic Weisbecker } 173573fbec60SFrederic Weisbecker #endif /* CONFIG_64BIT */ 173673fbec60SFrederic Weisbecker #endif /* CONFIG_IRQ_TIME_ACCOUNTING */ 1737