1391e43daSPeter Zijlstra 2391e43daSPeter Zijlstra #include <linux/sched.h> 3391e43daSPeter Zijlstra #include <linux/mutex.h> 4391e43daSPeter Zijlstra #include <linux/spinlock.h> 5391e43daSPeter Zijlstra #include <linux/stop_machine.h> 6391e43daSPeter Zijlstra 7391e43daSPeter Zijlstra #include "cpupri.h" 8391e43daSPeter Zijlstra 9391e43daSPeter Zijlstra extern __read_mostly int scheduler_running; 10391e43daSPeter Zijlstra 11391e43daSPeter Zijlstra /* 12391e43daSPeter Zijlstra * Convert user-nice values [ -20 ... 0 ... 19 ] 13391e43daSPeter Zijlstra * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ], 14391e43daSPeter Zijlstra * and back. 15391e43daSPeter Zijlstra */ 16391e43daSPeter Zijlstra #define NICE_TO_PRIO(nice) (MAX_RT_PRIO + (nice) + 20) 17391e43daSPeter Zijlstra #define PRIO_TO_NICE(prio) ((prio) - MAX_RT_PRIO - 20) 18391e43daSPeter Zijlstra #define TASK_NICE(p) PRIO_TO_NICE((p)->static_prio) 19391e43daSPeter Zijlstra 20391e43daSPeter Zijlstra /* 21391e43daSPeter Zijlstra * 'User priority' is the nice value converted to something we 22391e43daSPeter Zijlstra * can work with better when scaling various scheduler parameters, 23391e43daSPeter Zijlstra * it's a [ 0 ... 39 ] range. 24391e43daSPeter Zijlstra */ 25391e43daSPeter Zijlstra #define USER_PRIO(p) ((p)-MAX_RT_PRIO) 26391e43daSPeter Zijlstra #define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio) 27391e43daSPeter Zijlstra #define MAX_USER_PRIO (USER_PRIO(MAX_PRIO)) 28391e43daSPeter Zijlstra 29391e43daSPeter Zijlstra /* 30391e43daSPeter Zijlstra * Helpers for converting nanosecond timing to jiffy resolution 31391e43daSPeter Zijlstra */ 32391e43daSPeter Zijlstra #define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ)) 33391e43daSPeter Zijlstra 34391e43daSPeter Zijlstra #define NICE_0_LOAD SCHED_LOAD_SCALE 35391e43daSPeter Zijlstra #define NICE_0_SHIFT SCHED_LOAD_SHIFT 36391e43daSPeter Zijlstra 37391e43daSPeter Zijlstra /* 38391e43daSPeter Zijlstra * These are the 'tuning knobs' of the scheduler: 39391e43daSPeter Zijlstra * 40391e43daSPeter Zijlstra * default timeslice is 100 msecs (used only for SCHED_RR tasks). 41391e43daSPeter Zijlstra * Timeslices get refilled after they expire. 42391e43daSPeter Zijlstra */ 43391e43daSPeter Zijlstra #define DEF_TIMESLICE (100 * HZ / 1000) 44391e43daSPeter Zijlstra 45391e43daSPeter Zijlstra /* 46391e43daSPeter Zijlstra * single value that denotes runtime == period, ie unlimited time. 47391e43daSPeter Zijlstra */ 48391e43daSPeter Zijlstra #define RUNTIME_INF ((u64)~0ULL) 49391e43daSPeter Zijlstra 50391e43daSPeter Zijlstra static inline int rt_policy(int policy) 51391e43daSPeter Zijlstra { 52391e43daSPeter Zijlstra if (policy == SCHED_FIFO || policy == SCHED_RR) 53391e43daSPeter Zijlstra return 1; 54391e43daSPeter Zijlstra return 0; 55391e43daSPeter Zijlstra } 56391e43daSPeter Zijlstra 57391e43daSPeter Zijlstra static inline int task_has_rt_policy(struct task_struct *p) 58391e43daSPeter Zijlstra { 59391e43daSPeter Zijlstra return rt_policy(p->policy); 60391e43daSPeter Zijlstra } 61391e43daSPeter Zijlstra 62391e43daSPeter Zijlstra /* 63391e43daSPeter Zijlstra * This is the priority-queue data structure of the RT scheduling class: 64391e43daSPeter Zijlstra */ 65391e43daSPeter Zijlstra struct rt_prio_array { 66391e43daSPeter Zijlstra DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */ 67391e43daSPeter Zijlstra struct list_head queue[MAX_RT_PRIO]; 68391e43daSPeter Zijlstra }; 69391e43daSPeter Zijlstra 70391e43daSPeter Zijlstra struct rt_bandwidth { 71391e43daSPeter Zijlstra /* nests inside the rq lock: */ 72391e43daSPeter Zijlstra raw_spinlock_t rt_runtime_lock; 73391e43daSPeter Zijlstra ktime_t rt_period; 74391e43daSPeter Zijlstra u64 rt_runtime; 75391e43daSPeter Zijlstra struct hrtimer rt_period_timer; 76391e43daSPeter Zijlstra }; 77391e43daSPeter Zijlstra 78391e43daSPeter Zijlstra extern struct mutex sched_domains_mutex; 79391e43daSPeter Zijlstra 80391e43daSPeter Zijlstra #ifdef CONFIG_CGROUP_SCHED 81391e43daSPeter Zijlstra 82391e43daSPeter Zijlstra #include <linux/cgroup.h> 83391e43daSPeter Zijlstra 84391e43daSPeter Zijlstra struct cfs_rq; 85391e43daSPeter Zijlstra struct rt_rq; 86391e43daSPeter Zijlstra 87391e43daSPeter Zijlstra static LIST_HEAD(task_groups); 88391e43daSPeter Zijlstra 89391e43daSPeter Zijlstra struct cfs_bandwidth { 90391e43daSPeter Zijlstra #ifdef CONFIG_CFS_BANDWIDTH 91391e43daSPeter Zijlstra raw_spinlock_t lock; 92391e43daSPeter Zijlstra ktime_t period; 93391e43daSPeter Zijlstra u64 quota, runtime; 94391e43daSPeter Zijlstra s64 hierarchal_quota; 95391e43daSPeter Zijlstra u64 runtime_expires; 96391e43daSPeter Zijlstra 97391e43daSPeter Zijlstra int idle, timer_active; 98391e43daSPeter Zijlstra struct hrtimer period_timer, slack_timer; 99391e43daSPeter Zijlstra struct list_head throttled_cfs_rq; 100391e43daSPeter Zijlstra 101391e43daSPeter Zijlstra /* statistics */ 102391e43daSPeter Zijlstra int nr_periods, nr_throttled; 103391e43daSPeter Zijlstra u64 throttled_time; 104391e43daSPeter Zijlstra #endif 105391e43daSPeter Zijlstra }; 106391e43daSPeter Zijlstra 107391e43daSPeter Zijlstra /* task group related information */ 108391e43daSPeter Zijlstra struct task_group { 109391e43daSPeter Zijlstra struct cgroup_subsys_state css; 110391e43daSPeter Zijlstra 111391e43daSPeter Zijlstra #ifdef CONFIG_FAIR_GROUP_SCHED 112391e43daSPeter Zijlstra /* schedulable entities of this group on each cpu */ 113391e43daSPeter Zijlstra struct sched_entity **se; 114391e43daSPeter Zijlstra /* runqueue "owned" by this group on each cpu */ 115391e43daSPeter Zijlstra struct cfs_rq **cfs_rq; 116391e43daSPeter Zijlstra unsigned long shares; 117391e43daSPeter Zijlstra 118391e43daSPeter Zijlstra atomic_t load_weight; 119391e43daSPeter Zijlstra #endif 120391e43daSPeter Zijlstra 121391e43daSPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED 122391e43daSPeter Zijlstra struct sched_rt_entity **rt_se; 123391e43daSPeter Zijlstra struct rt_rq **rt_rq; 124391e43daSPeter Zijlstra 125391e43daSPeter Zijlstra struct rt_bandwidth rt_bandwidth; 126391e43daSPeter Zijlstra #endif 127391e43daSPeter Zijlstra 128391e43daSPeter Zijlstra struct rcu_head rcu; 129391e43daSPeter Zijlstra struct list_head list; 130391e43daSPeter Zijlstra 131391e43daSPeter Zijlstra struct task_group *parent; 132391e43daSPeter Zijlstra struct list_head siblings; 133391e43daSPeter Zijlstra struct list_head children; 134391e43daSPeter Zijlstra 135391e43daSPeter Zijlstra #ifdef CONFIG_SCHED_AUTOGROUP 136391e43daSPeter Zijlstra struct autogroup *autogroup; 137391e43daSPeter Zijlstra #endif 138391e43daSPeter Zijlstra 139391e43daSPeter Zijlstra struct cfs_bandwidth cfs_bandwidth; 140391e43daSPeter Zijlstra }; 141391e43daSPeter Zijlstra 142391e43daSPeter Zijlstra #ifdef CONFIG_FAIR_GROUP_SCHED 143391e43daSPeter Zijlstra #define ROOT_TASK_GROUP_LOAD NICE_0_LOAD 144391e43daSPeter Zijlstra 145391e43daSPeter Zijlstra /* 146391e43daSPeter Zijlstra * A weight of 0 or 1 can cause arithmetics problems. 147391e43daSPeter Zijlstra * A weight of a cfs_rq is the sum of weights of which entities 148391e43daSPeter Zijlstra * are queued on this cfs_rq, so a weight of a entity should not be 149391e43daSPeter Zijlstra * too large, so as the shares value of a task group. 150391e43daSPeter Zijlstra * (The default weight is 1024 - so there's no practical 151391e43daSPeter Zijlstra * limitation from this.) 152391e43daSPeter Zijlstra */ 153391e43daSPeter Zijlstra #define MIN_SHARES (1UL << 1) 154391e43daSPeter Zijlstra #define MAX_SHARES (1UL << 18) 155391e43daSPeter Zijlstra #endif 156391e43daSPeter Zijlstra 157391e43daSPeter Zijlstra /* Default task group. 158391e43daSPeter Zijlstra * Every task in system belong to this group at bootup. 159391e43daSPeter Zijlstra */ 160391e43daSPeter Zijlstra extern struct task_group root_task_group; 161391e43daSPeter Zijlstra 162391e43daSPeter Zijlstra typedef int (*tg_visitor)(struct task_group *, void *); 163391e43daSPeter Zijlstra 164391e43daSPeter Zijlstra extern int walk_tg_tree_from(struct task_group *from, 165391e43daSPeter Zijlstra tg_visitor down, tg_visitor up, void *data); 166391e43daSPeter Zijlstra 167391e43daSPeter Zijlstra /* 168391e43daSPeter Zijlstra * Iterate the full tree, calling @down when first entering a node and @up when 169391e43daSPeter Zijlstra * leaving it for the final time. 170391e43daSPeter Zijlstra * 171391e43daSPeter Zijlstra * Caller must hold rcu_lock or sufficient equivalent. 172391e43daSPeter Zijlstra */ 173391e43daSPeter Zijlstra static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data) 174391e43daSPeter Zijlstra { 175391e43daSPeter Zijlstra return walk_tg_tree_from(&root_task_group, down, up, data); 176391e43daSPeter Zijlstra } 177391e43daSPeter Zijlstra 178391e43daSPeter Zijlstra extern int tg_nop(struct task_group *tg, void *data); 179391e43daSPeter Zijlstra 180391e43daSPeter Zijlstra extern void free_fair_sched_group(struct task_group *tg); 181391e43daSPeter Zijlstra extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent); 182391e43daSPeter Zijlstra extern void unregister_fair_sched_group(struct task_group *tg, int cpu); 183391e43daSPeter Zijlstra extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, 184391e43daSPeter Zijlstra struct sched_entity *se, int cpu, 185391e43daSPeter Zijlstra struct sched_entity *parent); 186391e43daSPeter Zijlstra extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b); 187391e43daSPeter Zijlstra extern int sched_group_set_shares(struct task_group *tg, unsigned long shares); 188391e43daSPeter Zijlstra 189391e43daSPeter Zijlstra extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b); 190391e43daSPeter Zijlstra extern void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b); 191391e43daSPeter Zijlstra extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq); 192391e43daSPeter Zijlstra 193391e43daSPeter Zijlstra extern void free_rt_sched_group(struct task_group *tg); 194391e43daSPeter Zijlstra extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent); 195391e43daSPeter Zijlstra extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, 196391e43daSPeter Zijlstra struct sched_rt_entity *rt_se, int cpu, 197391e43daSPeter Zijlstra struct sched_rt_entity *parent); 198391e43daSPeter Zijlstra 199391e43daSPeter Zijlstra #else /* CONFIG_CGROUP_SCHED */ 200391e43daSPeter Zijlstra 201391e43daSPeter Zijlstra struct cfs_bandwidth { }; 202391e43daSPeter Zijlstra 203391e43daSPeter Zijlstra #endif /* CONFIG_CGROUP_SCHED */ 204391e43daSPeter Zijlstra 205391e43daSPeter Zijlstra /* CFS-related fields in a runqueue */ 206391e43daSPeter Zijlstra struct cfs_rq { 207391e43daSPeter Zijlstra struct load_weight load; 208391e43daSPeter Zijlstra unsigned long nr_running, h_nr_running; 209391e43daSPeter Zijlstra 210391e43daSPeter Zijlstra u64 exec_clock; 211391e43daSPeter Zijlstra u64 min_vruntime; 212391e43daSPeter Zijlstra #ifndef CONFIG_64BIT 213391e43daSPeter Zijlstra u64 min_vruntime_copy; 214391e43daSPeter Zijlstra #endif 215391e43daSPeter Zijlstra 216391e43daSPeter Zijlstra struct rb_root tasks_timeline; 217391e43daSPeter Zijlstra struct rb_node *rb_leftmost; 218391e43daSPeter Zijlstra 219391e43daSPeter Zijlstra struct list_head tasks; 220391e43daSPeter Zijlstra struct list_head *balance_iterator; 221391e43daSPeter Zijlstra 222391e43daSPeter Zijlstra /* 223391e43daSPeter Zijlstra * 'curr' points to currently running entity on this cfs_rq. 224391e43daSPeter Zijlstra * It is set to NULL otherwise (i.e when none are currently running). 225391e43daSPeter Zijlstra */ 226391e43daSPeter Zijlstra struct sched_entity *curr, *next, *last, *skip; 227391e43daSPeter Zijlstra 228391e43daSPeter Zijlstra #ifdef CONFIG_SCHED_DEBUG 229391e43daSPeter Zijlstra unsigned int nr_spread_over; 230391e43daSPeter Zijlstra #endif 231391e43daSPeter Zijlstra 232391e43daSPeter Zijlstra #ifdef CONFIG_FAIR_GROUP_SCHED 233391e43daSPeter Zijlstra struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */ 234391e43daSPeter Zijlstra 235391e43daSPeter Zijlstra /* 236391e43daSPeter Zijlstra * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in 237391e43daSPeter Zijlstra * a hierarchy). Non-leaf lrqs hold other higher schedulable entities 238391e43daSPeter Zijlstra * (like users, containers etc.) 239391e43daSPeter Zijlstra * 240391e43daSPeter Zijlstra * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This 241391e43daSPeter Zijlstra * list is used during load balance. 242391e43daSPeter Zijlstra */ 243391e43daSPeter Zijlstra int on_list; 244391e43daSPeter Zijlstra struct list_head leaf_cfs_rq_list; 245391e43daSPeter Zijlstra struct task_group *tg; /* group that "owns" this runqueue */ 246391e43daSPeter Zijlstra 247391e43daSPeter Zijlstra #ifdef CONFIG_SMP 248391e43daSPeter Zijlstra /* 249391e43daSPeter Zijlstra * the part of load.weight contributed by tasks 250391e43daSPeter Zijlstra */ 251391e43daSPeter Zijlstra unsigned long task_weight; 252391e43daSPeter Zijlstra 253391e43daSPeter Zijlstra /* 254391e43daSPeter Zijlstra * h_load = weight * f(tg) 255391e43daSPeter Zijlstra * 256391e43daSPeter Zijlstra * Where f(tg) is the recursive weight fraction assigned to 257391e43daSPeter Zijlstra * this group. 258391e43daSPeter Zijlstra */ 259391e43daSPeter Zijlstra unsigned long h_load; 260391e43daSPeter Zijlstra 261391e43daSPeter Zijlstra /* 262391e43daSPeter Zijlstra * Maintaining per-cpu shares distribution for group scheduling 263391e43daSPeter Zijlstra * 264391e43daSPeter Zijlstra * load_stamp is the last time we updated the load average 265391e43daSPeter Zijlstra * load_last is the last time we updated the load average and saw load 266391e43daSPeter Zijlstra * load_unacc_exec_time is currently unaccounted execution time 267391e43daSPeter Zijlstra */ 268391e43daSPeter Zijlstra u64 load_avg; 269391e43daSPeter Zijlstra u64 load_period; 270391e43daSPeter Zijlstra u64 load_stamp, load_last, load_unacc_exec_time; 271391e43daSPeter Zijlstra 272391e43daSPeter Zijlstra unsigned long load_contribution; 273391e43daSPeter Zijlstra #endif /* CONFIG_SMP */ 274391e43daSPeter Zijlstra #ifdef CONFIG_CFS_BANDWIDTH 275391e43daSPeter Zijlstra int runtime_enabled; 276391e43daSPeter Zijlstra u64 runtime_expires; 277391e43daSPeter Zijlstra s64 runtime_remaining; 278391e43daSPeter Zijlstra 279391e43daSPeter Zijlstra u64 throttled_timestamp; 280391e43daSPeter Zijlstra int throttled, throttle_count; 281391e43daSPeter Zijlstra struct list_head throttled_list; 282391e43daSPeter Zijlstra #endif /* CONFIG_CFS_BANDWIDTH */ 283391e43daSPeter Zijlstra #endif /* CONFIG_FAIR_GROUP_SCHED */ 284391e43daSPeter Zijlstra }; 285391e43daSPeter Zijlstra 286391e43daSPeter Zijlstra static inline int rt_bandwidth_enabled(void) 287391e43daSPeter Zijlstra { 288391e43daSPeter Zijlstra return sysctl_sched_rt_runtime >= 0; 289391e43daSPeter Zijlstra } 290391e43daSPeter Zijlstra 291391e43daSPeter Zijlstra /* Real-Time classes' related field in a runqueue: */ 292391e43daSPeter Zijlstra struct rt_rq { 293391e43daSPeter Zijlstra struct rt_prio_array active; 294391e43daSPeter Zijlstra unsigned long rt_nr_running; 295391e43daSPeter Zijlstra #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED 296391e43daSPeter Zijlstra struct { 297391e43daSPeter Zijlstra int curr; /* highest queued rt task prio */ 298391e43daSPeter Zijlstra #ifdef CONFIG_SMP 299391e43daSPeter Zijlstra int next; /* next highest */ 300391e43daSPeter Zijlstra #endif 301391e43daSPeter Zijlstra } highest_prio; 302391e43daSPeter Zijlstra #endif 303391e43daSPeter Zijlstra #ifdef CONFIG_SMP 304391e43daSPeter Zijlstra unsigned long rt_nr_migratory; 305391e43daSPeter Zijlstra unsigned long rt_nr_total; 306391e43daSPeter Zijlstra int overloaded; 307391e43daSPeter Zijlstra struct plist_head pushable_tasks; 308391e43daSPeter Zijlstra #endif 309391e43daSPeter Zijlstra int rt_throttled; 310391e43daSPeter Zijlstra u64 rt_time; 311391e43daSPeter Zijlstra u64 rt_runtime; 312391e43daSPeter Zijlstra /* Nests inside the rq lock: */ 313391e43daSPeter Zijlstra raw_spinlock_t rt_runtime_lock; 314391e43daSPeter Zijlstra 315391e43daSPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED 316391e43daSPeter Zijlstra unsigned long rt_nr_boosted; 317391e43daSPeter Zijlstra 318391e43daSPeter Zijlstra struct rq *rq; 319391e43daSPeter Zijlstra struct list_head leaf_rt_rq_list; 320391e43daSPeter Zijlstra struct task_group *tg; 321391e43daSPeter Zijlstra #endif 322391e43daSPeter Zijlstra }; 323391e43daSPeter Zijlstra 324391e43daSPeter Zijlstra #ifdef CONFIG_SMP 325391e43daSPeter Zijlstra 326391e43daSPeter Zijlstra /* 327391e43daSPeter Zijlstra * We add the notion of a root-domain which will be used to define per-domain 328391e43daSPeter Zijlstra * variables. Each exclusive cpuset essentially defines an island domain by 329391e43daSPeter Zijlstra * fully partitioning the member cpus from any other cpuset. Whenever a new 330391e43daSPeter Zijlstra * exclusive cpuset is created, we also create and attach a new root-domain 331391e43daSPeter Zijlstra * object. 332391e43daSPeter Zijlstra * 333391e43daSPeter Zijlstra */ 334391e43daSPeter Zijlstra struct root_domain { 335391e43daSPeter Zijlstra atomic_t refcount; 336391e43daSPeter Zijlstra atomic_t rto_count; 337391e43daSPeter Zijlstra struct rcu_head rcu; 338391e43daSPeter Zijlstra cpumask_var_t span; 339391e43daSPeter Zijlstra cpumask_var_t online; 340391e43daSPeter Zijlstra 341391e43daSPeter Zijlstra /* 342391e43daSPeter Zijlstra * The "RT overload" flag: it gets set if a CPU has more than 343391e43daSPeter Zijlstra * one runnable RT task. 344391e43daSPeter Zijlstra */ 345391e43daSPeter Zijlstra cpumask_var_t rto_mask; 346391e43daSPeter Zijlstra struct cpupri cpupri; 347391e43daSPeter Zijlstra }; 348391e43daSPeter Zijlstra 349391e43daSPeter Zijlstra extern struct root_domain def_root_domain; 350391e43daSPeter Zijlstra 351391e43daSPeter Zijlstra #endif /* CONFIG_SMP */ 352391e43daSPeter Zijlstra 353391e43daSPeter Zijlstra /* 354391e43daSPeter Zijlstra * This is the main, per-CPU runqueue data structure. 355391e43daSPeter Zijlstra * 356391e43daSPeter Zijlstra * Locking rule: those places that want to lock multiple runqueues 357391e43daSPeter Zijlstra * (such as the load balancing or the thread migration code), lock 358391e43daSPeter Zijlstra * acquire operations must be ordered by ascending &runqueue. 359391e43daSPeter Zijlstra */ 360391e43daSPeter Zijlstra struct rq { 361391e43daSPeter Zijlstra /* runqueue lock: */ 362391e43daSPeter Zijlstra raw_spinlock_t lock; 363391e43daSPeter Zijlstra 364391e43daSPeter Zijlstra /* 365391e43daSPeter Zijlstra * nr_running and cpu_load should be in the same cacheline because 366391e43daSPeter Zijlstra * remote CPUs use both these fields when doing load calculation. 367391e43daSPeter Zijlstra */ 368391e43daSPeter Zijlstra unsigned long nr_running; 369391e43daSPeter Zijlstra #define CPU_LOAD_IDX_MAX 5 370391e43daSPeter Zijlstra unsigned long cpu_load[CPU_LOAD_IDX_MAX]; 371391e43daSPeter Zijlstra unsigned long last_load_update_tick; 372391e43daSPeter Zijlstra #ifdef CONFIG_NO_HZ 373391e43daSPeter Zijlstra u64 nohz_stamp; 3741c792db7SSuresh Siddha unsigned long nohz_flags; 375391e43daSPeter Zijlstra #endif 376391e43daSPeter Zijlstra int skip_clock_update; 377391e43daSPeter Zijlstra 378391e43daSPeter Zijlstra /* capture load from *all* tasks on this cpu: */ 379391e43daSPeter Zijlstra struct load_weight load; 380391e43daSPeter Zijlstra unsigned long nr_load_updates; 381391e43daSPeter Zijlstra u64 nr_switches; 382391e43daSPeter Zijlstra 383391e43daSPeter Zijlstra struct cfs_rq cfs; 384391e43daSPeter Zijlstra struct rt_rq rt; 385391e43daSPeter Zijlstra 386391e43daSPeter Zijlstra #ifdef CONFIG_FAIR_GROUP_SCHED 387391e43daSPeter Zijlstra /* list of leaf cfs_rq on this cpu: */ 388391e43daSPeter Zijlstra struct list_head leaf_cfs_rq_list; 389391e43daSPeter Zijlstra #endif 390391e43daSPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED 391391e43daSPeter Zijlstra struct list_head leaf_rt_rq_list; 392391e43daSPeter Zijlstra #endif 393391e43daSPeter Zijlstra 394391e43daSPeter Zijlstra /* 395391e43daSPeter Zijlstra * This is part of a global counter where only the total sum 396391e43daSPeter Zijlstra * over all CPUs matters. A task can increase this counter on 397391e43daSPeter Zijlstra * one CPU and if it got migrated afterwards it may decrease 398391e43daSPeter Zijlstra * it on another CPU. Always updated under the runqueue lock: 399391e43daSPeter Zijlstra */ 400391e43daSPeter Zijlstra unsigned long nr_uninterruptible; 401391e43daSPeter Zijlstra 402391e43daSPeter Zijlstra struct task_struct *curr, *idle, *stop; 403391e43daSPeter Zijlstra unsigned long next_balance; 404391e43daSPeter Zijlstra struct mm_struct *prev_mm; 405391e43daSPeter Zijlstra 406391e43daSPeter Zijlstra u64 clock; 407391e43daSPeter Zijlstra u64 clock_task; 408391e43daSPeter Zijlstra 409391e43daSPeter Zijlstra atomic_t nr_iowait; 410391e43daSPeter Zijlstra 411391e43daSPeter Zijlstra #ifdef CONFIG_SMP 412391e43daSPeter Zijlstra struct root_domain *rd; 413391e43daSPeter Zijlstra struct sched_domain *sd; 414391e43daSPeter Zijlstra 415391e43daSPeter Zijlstra unsigned long cpu_power; 416391e43daSPeter Zijlstra 417391e43daSPeter Zijlstra unsigned char idle_balance; 418391e43daSPeter Zijlstra /* For active balancing */ 419391e43daSPeter Zijlstra int post_schedule; 420391e43daSPeter Zijlstra int active_balance; 421391e43daSPeter Zijlstra int push_cpu; 422391e43daSPeter Zijlstra struct cpu_stop_work active_balance_work; 423391e43daSPeter Zijlstra /* cpu of this runqueue: */ 424391e43daSPeter Zijlstra int cpu; 425391e43daSPeter Zijlstra int online; 426391e43daSPeter Zijlstra 427391e43daSPeter Zijlstra u64 rt_avg; 428391e43daSPeter Zijlstra u64 age_stamp; 429391e43daSPeter Zijlstra u64 idle_stamp; 430391e43daSPeter Zijlstra u64 avg_idle; 431391e43daSPeter Zijlstra #endif 432391e43daSPeter Zijlstra 433391e43daSPeter Zijlstra #ifdef CONFIG_IRQ_TIME_ACCOUNTING 434391e43daSPeter Zijlstra u64 prev_irq_time; 435391e43daSPeter Zijlstra #endif 436391e43daSPeter Zijlstra #ifdef CONFIG_PARAVIRT 437391e43daSPeter Zijlstra u64 prev_steal_time; 438391e43daSPeter Zijlstra #endif 439391e43daSPeter Zijlstra #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING 440391e43daSPeter Zijlstra u64 prev_steal_time_rq; 441391e43daSPeter Zijlstra #endif 442391e43daSPeter Zijlstra 443391e43daSPeter Zijlstra /* calc_load related fields */ 444391e43daSPeter Zijlstra unsigned long calc_load_update; 445391e43daSPeter Zijlstra long calc_load_active; 446391e43daSPeter Zijlstra 447391e43daSPeter Zijlstra #ifdef CONFIG_SCHED_HRTICK 448391e43daSPeter Zijlstra #ifdef CONFIG_SMP 449391e43daSPeter Zijlstra int hrtick_csd_pending; 450391e43daSPeter Zijlstra struct call_single_data hrtick_csd; 451391e43daSPeter Zijlstra #endif 452391e43daSPeter Zijlstra struct hrtimer hrtick_timer; 453391e43daSPeter Zijlstra #endif 454391e43daSPeter Zijlstra 455391e43daSPeter Zijlstra #ifdef CONFIG_SCHEDSTATS 456391e43daSPeter Zijlstra /* latency stats */ 457391e43daSPeter Zijlstra struct sched_info rq_sched_info; 458391e43daSPeter Zijlstra unsigned long long rq_cpu_time; 459391e43daSPeter Zijlstra /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */ 460391e43daSPeter Zijlstra 461391e43daSPeter Zijlstra /* sys_sched_yield() stats */ 462391e43daSPeter Zijlstra unsigned int yld_count; 463391e43daSPeter Zijlstra 464391e43daSPeter Zijlstra /* schedule() stats */ 465391e43daSPeter Zijlstra unsigned int sched_switch; 466391e43daSPeter Zijlstra unsigned int sched_count; 467391e43daSPeter Zijlstra unsigned int sched_goidle; 468391e43daSPeter Zijlstra 469391e43daSPeter Zijlstra /* try_to_wake_up() stats */ 470391e43daSPeter Zijlstra unsigned int ttwu_count; 471391e43daSPeter Zijlstra unsigned int ttwu_local; 472391e43daSPeter Zijlstra #endif 473391e43daSPeter Zijlstra 474391e43daSPeter Zijlstra #ifdef CONFIG_SMP 475391e43daSPeter Zijlstra struct llist_head wake_list; 476391e43daSPeter Zijlstra #endif 477391e43daSPeter Zijlstra }; 478391e43daSPeter Zijlstra 479391e43daSPeter Zijlstra static inline int cpu_of(struct rq *rq) 480391e43daSPeter Zijlstra { 481391e43daSPeter Zijlstra #ifdef CONFIG_SMP 482391e43daSPeter Zijlstra return rq->cpu; 483391e43daSPeter Zijlstra #else 484391e43daSPeter Zijlstra return 0; 485391e43daSPeter Zijlstra #endif 486391e43daSPeter Zijlstra } 487391e43daSPeter Zijlstra 488391e43daSPeter Zijlstra DECLARE_PER_CPU(struct rq, runqueues); 489391e43daSPeter Zijlstra 490391e43daSPeter Zijlstra #define rcu_dereference_check_sched_domain(p) \ 491391e43daSPeter Zijlstra rcu_dereference_check((p), \ 492391e43daSPeter Zijlstra lockdep_is_held(&sched_domains_mutex)) 493391e43daSPeter Zijlstra 494391e43daSPeter Zijlstra /* 495391e43daSPeter Zijlstra * The domain tree (rq->sd) is protected by RCU's quiescent state transition. 496391e43daSPeter Zijlstra * See detach_destroy_domains: synchronize_sched for details. 497391e43daSPeter Zijlstra * 498391e43daSPeter Zijlstra * The domain tree of any CPU may only be accessed from within 499391e43daSPeter Zijlstra * preempt-disabled sections. 500391e43daSPeter Zijlstra */ 501391e43daSPeter Zijlstra #define for_each_domain(cpu, __sd) \ 502391e43daSPeter Zijlstra for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent) 503391e43daSPeter Zijlstra 50477e81365SSuresh Siddha #define for_each_lower_domain(sd) for (; sd; sd = sd->child) 50577e81365SSuresh Siddha 506391e43daSPeter Zijlstra #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) 507391e43daSPeter Zijlstra #define this_rq() (&__get_cpu_var(runqueues)) 508391e43daSPeter Zijlstra #define task_rq(p) cpu_rq(task_cpu(p)) 509391e43daSPeter Zijlstra #define cpu_curr(cpu) (cpu_rq(cpu)->curr) 510391e43daSPeter Zijlstra #define raw_rq() (&__raw_get_cpu_var(runqueues)) 511391e43daSPeter Zijlstra 512391e43daSPeter Zijlstra #include "stats.h" 513391e43daSPeter Zijlstra #include "auto_group.h" 514391e43daSPeter Zijlstra 515391e43daSPeter Zijlstra #ifdef CONFIG_CGROUP_SCHED 516391e43daSPeter Zijlstra 517391e43daSPeter Zijlstra /* 518391e43daSPeter Zijlstra * Return the group to which this tasks belongs. 519391e43daSPeter Zijlstra * 520391e43daSPeter Zijlstra * We use task_subsys_state_check() and extend the RCU verification with 521391e43daSPeter Zijlstra * pi->lock and rq->lock because cpu_cgroup_attach() holds those locks for each 522391e43daSPeter Zijlstra * task it moves into the cgroup. Therefore by holding either of those locks, 523391e43daSPeter Zijlstra * we pin the task to the current cgroup. 524391e43daSPeter Zijlstra */ 525391e43daSPeter Zijlstra static inline struct task_group *task_group(struct task_struct *p) 526391e43daSPeter Zijlstra { 527391e43daSPeter Zijlstra struct task_group *tg; 528391e43daSPeter Zijlstra struct cgroup_subsys_state *css; 529391e43daSPeter Zijlstra 530391e43daSPeter Zijlstra css = task_subsys_state_check(p, cpu_cgroup_subsys_id, 531391e43daSPeter Zijlstra lockdep_is_held(&p->pi_lock) || 532391e43daSPeter Zijlstra lockdep_is_held(&task_rq(p)->lock)); 533391e43daSPeter Zijlstra tg = container_of(css, struct task_group, css); 534391e43daSPeter Zijlstra 535391e43daSPeter Zijlstra return autogroup_task_group(p, tg); 536391e43daSPeter Zijlstra } 537391e43daSPeter Zijlstra 538391e43daSPeter Zijlstra /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ 539391e43daSPeter Zijlstra static inline void set_task_rq(struct task_struct *p, unsigned int cpu) 540391e43daSPeter Zijlstra { 541391e43daSPeter Zijlstra #if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED) 542391e43daSPeter Zijlstra struct task_group *tg = task_group(p); 543391e43daSPeter Zijlstra #endif 544391e43daSPeter Zijlstra 545391e43daSPeter Zijlstra #ifdef CONFIG_FAIR_GROUP_SCHED 546391e43daSPeter Zijlstra p->se.cfs_rq = tg->cfs_rq[cpu]; 547391e43daSPeter Zijlstra p->se.parent = tg->se[cpu]; 548391e43daSPeter Zijlstra #endif 549391e43daSPeter Zijlstra 550391e43daSPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED 551391e43daSPeter Zijlstra p->rt.rt_rq = tg->rt_rq[cpu]; 552391e43daSPeter Zijlstra p->rt.parent = tg->rt_se[cpu]; 553391e43daSPeter Zijlstra #endif 554391e43daSPeter Zijlstra } 555391e43daSPeter Zijlstra 556391e43daSPeter Zijlstra #else /* CONFIG_CGROUP_SCHED */ 557391e43daSPeter Zijlstra 558391e43daSPeter Zijlstra static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { } 559391e43daSPeter Zijlstra static inline struct task_group *task_group(struct task_struct *p) 560391e43daSPeter Zijlstra { 561391e43daSPeter Zijlstra return NULL; 562391e43daSPeter Zijlstra } 563391e43daSPeter Zijlstra 564391e43daSPeter Zijlstra #endif /* CONFIG_CGROUP_SCHED */ 565391e43daSPeter Zijlstra 566391e43daSPeter Zijlstra static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) 567391e43daSPeter Zijlstra { 568391e43daSPeter Zijlstra set_task_rq(p, cpu); 569391e43daSPeter Zijlstra #ifdef CONFIG_SMP 570391e43daSPeter Zijlstra /* 571391e43daSPeter Zijlstra * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be 572391e43daSPeter Zijlstra * successfuly executed on another CPU. We must ensure that updates of 573391e43daSPeter Zijlstra * per-task data have been completed by this moment. 574391e43daSPeter Zijlstra */ 575391e43daSPeter Zijlstra smp_wmb(); 576391e43daSPeter Zijlstra task_thread_info(p)->cpu = cpu; 577391e43daSPeter Zijlstra #endif 578391e43daSPeter Zijlstra } 579391e43daSPeter Zijlstra 580391e43daSPeter Zijlstra /* 581391e43daSPeter Zijlstra * Tunables that become constants when CONFIG_SCHED_DEBUG is off: 582391e43daSPeter Zijlstra */ 583391e43daSPeter Zijlstra #ifdef CONFIG_SCHED_DEBUG 584391e43daSPeter Zijlstra # define const_debug __read_mostly 585391e43daSPeter Zijlstra #else 586391e43daSPeter Zijlstra # define const_debug const 587391e43daSPeter Zijlstra #endif 588391e43daSPeter Zijlstra 589391e43daSPeter Zijlstra extern const_debug unsigned int sysctl_sched_features; 590391e43daSPeter Zijlstra 591391e43daSPeter Zijlstra #define SCHED_FEAT(name, enabled) \ 592391e43daSPeter Zijlstra __SCHED_FEAT_##name , 593391e43daSPeter Zijlstra 594391e43daSPeter Zijlstra enum { 595391e43daSPeter Zijlstra #include "features.h" 596391e43daSPeter Zijlstra }; 597391e43daSPeter Zijlstra 598391e43daSPeter Zijlstra #undef SCHED_FEAT 599391e43daSPeter Zijlstra 600391e43daSPeter Zijlstra #define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x)) 601391e43daSPeter Zijlstra 602391e43daSPeter Zijlstra static inline u64 global_rt_period(void) 603391e43daSPeter Zijlstra { 604391e43daSPeter Zijlstra return (u64)sysctl_sched_rt_period * NSEC_PER_USEC; 605391e43daSPeter Zijlstra } 606391e43daSPeter Zijlstra 607391e43daSPeter Zijlstra static inline u64 global_rt_runtime(void) 608391e43daSPeter Zijlstra { 609391e43daSPeter Zijlstra if (sysctl_sched_rt_runtime < 0) 610391e43daSPeter Zijlstra return RUNTIME_INF; 611391e43daSPeter Zijlstra 612391e43daSPeter Zijlstra return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC; 613391e43daSPeter Zijlstra } 614391e43daSPeter Zijlstra 615391e43daSPeter Zijlstra 616391e43daSPeter Zijlstra 617391e43daSPeter Zijlstra static inline int task_current(struct rq *rq, struct task_struct *p) 618391e43daSPeter Zijlstra { 619391e43daSPeter Zijlstra return rq->curr == p; 620391e43daSPeter Zijlstra } 621391e43daSPeter Zijlstra 622391e43daSPeter Zijlstra static inline int task_running(struct rq *rq, struct task_struct *p) 623391e43daSPeter Zijlstra { 624391e43daSPeter Zijlstra #ifdef CONFIG_SMP 625391e43daSPeter Zijlstra return p->on_cpu; 626391e43daSPeter Zijlstra #else 627391e43daSPeter Zijlstra return task_current(rq, p); 628391e43daSPeter Zijlstra #endif 629391e43daSPeter Zijlstra } 630391e43daSPeter Zijlstra 631391e43daSPeter Zijlstra 632391e43daSPeter Zijlstra #ifndef prepare_arch_switch 633391e43daSPeter Zijlstra # define prepare_arch_switch(next) do { } while (0) 634391e43daSPeter Zijlstra #endif 635391e43daSPeter Zijlstra #ifndef finish_arch_switch 636391e43daSPeter Zijlstra # define finish_arch_switch(prev) do { } while (0) 637391e43daSPeter Zijlstra #endif 638391e43daSPeter Zijlstra 639391e43daSPeter Zijlstra #ifndef __ARCH_WANT_UNLOCKED_CTXSW 640391e43daSPeter Zijlstra static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) 641391e43daSPeter Zijlstra { 642391e43daSPeter Zijlstra #ifdef CONFIG_SMP 643391e43daSPeter Zijlstra /* 644391e43daSPeter Zijlstra * We can optimise this out completely for !SMP, because the 645391e43daSPeter Zijlstra * SMP rebalancing from interrupt is the only thing that cares 646391e43daSPeter Zijlstra * here. 647391e43daSPeter Zijlstra */ 648391e43daSPeter Zijlstra next->on_cpu = 1; 649391e43daSPeter Zijlstra #endif 650391e43daSPeter Zijlstra } 651391e43daSPeter Zijlstra 652391e43daSPeter Zijlstra static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) 653391e43daSPeter Zijlstra { 654391e43daSPeter Zijlstra #ifdef CONFIG_SMP 655391e43daSPeter Zijlstra /* 656391e43daSPeter Zijlstra * After ->on_cpu is cleared, the task can be moved to a different CPU. 657391e43daSPeter Zijlstra * We must ensure this doesn't happen until the switch is completely 658391e43daSPeter Zijlstra * finished. 659391e43daSPeter Zijlstra */ 660391e43daSPeter Zijlstra smp_wmb(); 661391e43daSPeter Zijlstra prev->on_cpu = 0; 662391e43daSPeter Zijlstra #endif 663391e43daSPeter Zijlstra #ifdef CONFIG_DEBUG_SPINLOCK 664391e43daSPeter Zijlstra /* this is a valid case when another task releases the spinlock */ 665391e43daSPeter Zijlstra rq->lock.owner = current; 666391e43daSPeter Zijlstra #endif 667391e43daSPeter Zijlstra /* 668391e43daSPeter Zijlstra * If we are tracking spinlock dependencies then we have to 669391e43daSPeter Zijlstra * fix up the runqueue lock - which gets 'carried over' from 670391e43daSPeter Zijlstra * prev into current: 671391e43daSPeter Zijlstra */ 672391e43daSPeter Zijlstra spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_); 673391e43daSPeter Zijlstra 674391e43daSPeter Zijlstra raw_spin_unlock_irq(&rq->lock); 675391e43daSPeter Zijlstra } 676391e43daSPeter Zijlstra 677391e43daSPeter Zijlstra #else /* __ARCH_WANT_UNLOCKED_CTXSW */ 678391e43daSPeter Zijlstra static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) 679391e43daSPeter Zijlstra { 680391e43daSPeter Zijlstra #ifdef CONFIG_SMP 681391e43daSPeter Zijlstra /* 682391e43daSPeter Zijlstra * We can optimise this out completely for !SMP, because the 683391e43daSPeter Zijlstra * SMP rebalancing from interrupt is the only thing that cares 684391e43daSPeter Zijlstra * here. 685391e43daSPeter Zijlstra */ 686391e43daSPeter Zijlstra next->on_cpu = 1; 687391e43daSPeter Zijlstra #endif 688391e43daSPeter Zijlstra #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW 689391e43daSPeter Zijlstra raw_spin_unlock_irq(&rq->lock); 690391e43daSPeter Zijlstra #else 691391e43daSPeter Zijlstra raw_spin_unlock(&rq->lock); 692391e43daSPeter Zijlstra #endif 693391e43daSPeter Zijlstra } 694391e43daSPeter Zijlstra 695391e43daSPeter Zijlstra static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) 696391e43daSPeter Zijlstra { 697391e43daSPeter Zijlstra #ifdef CONFIG_SMP 698391e43daSPeter Zijlstra /* 699391e43daSPeter Zijlstra * After ->on_cpu is cleared, the task can be moved to a different CPU. 700391e43daSPeter Zijlstra * We must ensure this doesn't happen until the switch is completely 701391e43daSPeter Zijlstra * finished. 702391e43daSPeter Zijlstra */ 703391e43daSPeter Zijlstra smp_wmb(); 704391e43daSPeter Zijlstra prev->on_cpu = 0; 705391e43daSPeter Zijlstra #endif 706391e43daSPeter Zijlstra #ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW 707391e43daSPeter Zijlstra local_irq_enable(); 708391e43daSPeter Zijlstra #endif 709391e43daSPeter Zijlstra } 710391e43daSPeter Zijlstra #endif /* __ARCH_WANT_UNLOCKED_CTXSW */ 711391e43daSPeter Zijlstra 712391e43daSPeter Zijlstra 713391e43daSPeter Zijlstra static inline void update_load_add(struct load_weight *lw, unsigned long inc) 714391e43daSPeter Zijlstra { 715391e43daSPeter Zijlstra lw->weight += inc; 716391e43daSPeter Zijlstra lw->inv_weight = 0; 717391e43daSPeter Zijlstra } 718391e43daSPeter Zijlstra 719391e43daSPeter Zijlstra static inline void update_load_sub(struct load_weight *lw, unsigned long dec) 720391e43daSPeter Zijlstra { 721391e43daSPeter Zijlstra lw->weight -= dec; 722391e43daSPeter Zijlstra lw->inv_weight = 0; 723391e43daSPeter Zijlstra } 724391e43daSPeter Zijlstra 725391e43daSPeter Zijlstra static inline void update_load_set(struct load_weight *lw, unsigned long w) 726391e43daSPeter Zijlstra { 727391e43daSPeter Zijlstra lw->weight = w; 728391e43daSPeter Zijlstra lw->inv_weight = 0; 729391e43daSPeter Zijlstra } 730391e43daSPeter Zijlstra 731391e43daSPeter Zijlstra /* 732391e43daSPeter Zijlstra * To aid in avoiding the subversion of "niceness" due to uneven distribution 733391e43daSPeter Zijlstra * of tasks with abnormal "nice" values across CPUs the contribution that 734391e43daSPeter Zijlstra * each task makes to its run queue's load is weighted according to its 735391e43daSPeter Zijlstra * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a 736391e43daSPeter Zijlstra * scaled version of the new time slice allocation that they receive on time 737391e43daSPeter Zijlstra * slice expiry etc. 738391e43daSPeter Zijlstra */ 739391e43daSPeter Zijlstra 740391e43daSPeter Zijlstra #define WEIGHT_IDLEPRIO 3 741391e43daSPeter Zijlstra #define WMULT_IDLEPRIO 1431655765 742391e43daSPeter Zijlstra 743391e43daSPeter Zijlstra /* 744391e43daSPeter Zijlstra * Nice levels are multiplicative, with a gentle 10% change for every 745391e43daSPeter Zijlstra * nice level changed. I.e. when a CPU-bound task goes from nice 0 to 746391e43daSPeter Zijlstra * nice 1, it will get ~10% less CPU time than another CPU-bound task 747391e43daSPeter Zijlstra * that remained on nice 0. 748391e43daSPeter Zijlstra * 749391e43daSPeter Zijlstra * The "10% effect" is relative and cumulative: from _any_ nice level, 750391e43daSPeter Zijlstra * if you go up 1 level, it's -10% CPU usage, if you go down 1 level 751391e43daSPeter Zijlstra * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25. 752391e43daSPeter Zijlstra * If a task goes up by ~10% and another task goes down by ~10% then 753391e43daSPeter Zijlstra * the relative distance between them is ~25%.) 754391e43daSPeter Zijlstra */ 755391e43daSPeter Zijlstra static const int prio_to_weight[40] = { 756391e43daSPeter Zijlstra /* -20 */ 88761, 71755, 56483, 46273, 36291, 757391e43daSPeter Zijlstra /* -15 */ 29154, 23254, 18705, 14949, 11916, 758391e43daSPeter Zijlstra /* -10 */ 9548, 7620, 6100, 4904, 3906, 759391e43daSPeter Zijlstra /* -5 */ 3121, 2501, 1991, 1586, 1277, 760391e43daSPeter Zijlstra /* 0 */ 1024, 820, 655, 526, 423, 761391e43daSPeter Zijlstra /* 5 */ 335, 272, 215, 172, 137, 762391e43daSPeter Zijlstra /* 10 */ 110, 87, 70, 56, 45, 763391e43daSPeter Zijlstra /* 15 */ 36, 29, 23, 18, 15, 764391e43daSPeter Zijlstra }; 765391e43daSPeter Zijlstra 766391e43daSPeter Zijlstra /* 767391e43daSPeter Zijlstra * Inverse (2^32/x) values of the prio_to_weight[] array, precalculated. 768391e43daSPeter Zijlstra * 769391e43daSPeter Zijlstra * In cases where the weight does not change often, we can use the 770391e43daSPeter Zijlstra * precalculated inverse to speed up arithmetics by turning divisions 771391e43daSPeter Zijlstra * into multiplications: 772391e43daSPeter Zijlstra */ 773391e43daSPeter Zijlstra static const u32 prio_to_wmult[40] = { 774391e43daSPeter Zijlstra /* -20 */ 48388, 59856, 76040, 92818, 118348, 775391e43daSPeter Zijlstra /* -15 */ 147320, 184698, 229616, 287308, 360437, 776391e43daSPeter Zijlstra /* -10 */ 449829, 563644, 704093, 875809, 1099582, 777391e43daSPeter Zijlstra /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326, 778391e43daSPeter Zijlstra /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587, 779391e43daSPeter Zijlstra /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126, 780391e43daSPeter Zijlstra /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717, 781391e43daSPeter Zijlstra /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153, 782391e43daSPeter Zijlstra }; 783391e43daSPeter Zijlstra 784391e43daSPeter Zijlstra /* Time spent by the tasks of the cpu accounting group executing in ... */ 785391e43daSPeter Zijlstra enum cpuacct_stat_index { 786391e43daSPeter Zijlstra CPUACCT_STAT_USER, /* ... user mode */ 787391e43daSPeter Zijlstra CPUACCT_STAT_SYSTEM, /* ... kernel mode */ 788391e43daSPeter Zijlstra 789391e43daSPeter Zijlstra CPUACCT_STAT_NSTATS, 790391e43daSPeter Zijlstra }; 791391e43daSPeter Zijlstra 792391e43daSPeter Zijlstra 793391e43daSPeter Zijlstra #define sched_class_highest (&stop_sched_class) 794391e43daSPeter Zijlstra #define for_each_class(class) \ 795391e43daSPeter Zijlstra for (class = sched_class_highest; class; class = class->next) 796391e43daSPeter Zijlstra 797391e43daSPeter Zijlstra extern const struct sched_class stop_sched_class; 798391e43daSPeter Zijlstra extern const struct sched_class rt_sched_class; 799391e43daSPeter Zijlstra extern const struct sched_class fair_sched_class; 800391e43daSPeter Zijlstra extern const struct sched_class idle_sched_class; 801391e43daSPeter Zijlstra 802391e43daSPeter Zijlstra 803391e43daSPeter Zijlstra #ifdef CONFIG_SMP 804391e43daSPeter Zijlstra 805391e43daSPeter Zijlstra extern void trigger_load_balance(struct rq *rq, int cpu); 806391e43daSPeter Zijlstra extern void idle_balance(int this_cpu, struct rq *this_rq); 807391e43daSPeter Zijlstra 808391e43daSPeter Zijlstra #else /* CONFIG_SMP */ 809391e43daSPeter Zijlstra 810391e43daSPeter Zijlstra static inline void idle_balance(int cpu, struct rq *rq) 811391e43daSPeter Zijlstra { 812391e43daSPeter Zijlstra } 813391e43daSPeter Zijlstra 814391e43daSPeter Zijlstra #endif 815391e43daSPeter Zijlstra 816391e43daSPeter Zijlstra extern void sysrq_sched_debug_show(void); 817391e43daSPeter Zijlstra extern void sched_init_granularity(void); 818391e43daSPeter Zijlstra extern void update_max_interval(void); 819391e43daSPeter Zijlstra extern void update_group_power(struct sched_domain *sd, int cpu); 820391e43daSPeter Zijlstra extern int update_runtime(struct notifier_block *nfb, unsigned long action, void *hcpu); 821391e43daSPeter Zijlstra extern void init_sched_rt_class(void); 822391e43daSPeter Zijlstra extern void init_sched_fair_class(void); 823391e43daSPeter Zijlstra 824391e43daSPeter Zijlstra extern void resched_task(struct task_struct *p); 825391e43daSPeter Zijlstra extern void resched_cpu(int cpu); 826391e43daSPeter Zijlstra 827391e43daSPeter Zijlstra extern struct rt_bandwidth def_rt_bandwidth; 828391e43daSPeter Zijlstra extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime); 829391e43daSPeter Zijlstra 830391e43daSPeter Zijlstra extern void update_cpu_load(struct rq *this_rq); 831391e43daSPeter Zijlstra 832391e43daSPeter Zijlstra #ifdef CONFIG_CGROUP_CPUACCT 833*54c707e9SGlauber Costa #include <linux/cgroup.h> 834*54c707e9SGlauber Costa /* track cpu usage of a group of tasks and its child groups */ 835*54c707e9SGlauber Costa struct cpuacct { 836*54c707e9SGlauber Costa struct cgroup_subsys_state css; 837*54c707e9SGlauber Costa /* cpuusage holds pointer to a u64-type object on every cpu */ 838*54c707e9SGlauber Costa u64 __percpu *cpuusage; 839*54c707e9SGlauber Costa struct kernel_cpustat __percpu *cpustat; 840*54c707e9SGlauber Costa }; 841*54c707e9SGlauber Costa 842*54c707e9SGlauber Costa /* return cpu accounting group corresponding to this container */ 843*54c707e9SGlauber Costa static inline struct cpuacct *cgroup_ca(struct cgroup *cgrp) 844*54c707e9SGlauber Costa { 845*54c707e9SGlauber Costa return container_of(cgroup_subsys_state(cgrp, cpuacct_subsys_id), 846*54c707e9SGlauber Costa struct cpuacct, css); 847*54c707e9SGlauber Costa } 848*54c707e9SGlauber Costa 849*54c707e9SGlauber Costa /* return cpu accounting group to which this task belongs */ 850*54c707e9SGlauber Costa static inline struct cpuacct *task_ca(struct task_struct *tsk) 851*54c707e9SGlauber Costa { 852*54c707e9SGlauber Costa return container_of(task_subsys_state(tsk, cpuacct_subsys_id), 853*54c707e9SGlauber Costa struct cpuacct, css); 854*54c707e9SGlauber Costa } 855*54c707e9SGlauber Costa 856*54c707e9SGlauber Costa static inline struct cpuacct *parent_ca(struct cpuacct *ca) 857*54c707e9SGlauber Costa { 858*54c707e9SGlauber Costa if (!ca || !ca->css.cgroup->parent) 859*54c707e9SGlauber Costa return NULL; 860*54c707e9SGlauber Costa return cgroup_ca(ca->css.cgroup->parent); 861*54c707e9SGlauber Costa } 862*54c707e9SGlauber Costa 863391e43daSPeter Zijlstra extern void cpuacct_charge(struct task_struct *tsk, u64 cputime); 864391e43daSPeter Zijlstra #else 865391e43daSPeter Zijlstra static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {} 866391e43daSPeter Zijlstra #endif 867391e43daSPeter Zijlstra 868391e43daSPeter Zijlstra static inline void inc_nr_running(struct rq *rq) 869391e43daSPeter Zijlstra { 870391e43daSPeter Zijlstra rq->nr_running++; 871391e43daSPeter Zijlstra } 872391e43daSPeter Zijlstra 873391e43daSPeter Zijlstra static inline void dec_nr_running(struct rq *rq) 874391e43daSPeter Zijlstra { 875391e43daSPeter Zijlstra rq->nr_running--; 876391e43daSPeter Zijlstra } 877391e43daSPeter Zijlstra 878391e43daSPeter Zijlstra extern void update_rq_clock(struct rq *rq); 879391e43daSPeter Zijlstra 880391e43daSPeter Zijlstra extern void activate_task(struct rq *rq, struct task_struct *p, int flags); 881391e43daSPeter Zijlstra extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags); 882391e43daSPeter Zijlstra 883391e43daSPeter Zijlstra extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags); 884391e43daSPeter Zijlstra 885391e43daSPeter Zijlstra extern const_debug unsigned int sysctl_sched_time_avg; 886391e43daSPeter Zijlstra extern const_debug unsigned int sysctl_sched_nr_migrate; 887391e43daSPeter Zijlstra extern const_debug unsigned int sysctl_sched_migration_cost; 888391e43daSPeter Zijlstra 889391e43daSPeter Zijlstra static inline u64 sched_avg_period(void) 890391e43daSPeter Zijlstra { 891391e43daSPeter Zijlstra return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2; 892391e43daSPeter Zijlstra } 893391e43daSPeter Zijlstra 894391e43daSPeter Zijlstra void calc_load_account_idle(struct rq *this_rq); 895391e43daSPeter Zijlstra 896391e43daSPeter Zijlstra #ifdef CONFIG_SCHED_HRTICK 897391e43daSPeter Zijlstra 898391e43daSPeter Zijlstra /* 899391e43daSPeter Zijlstra * Use hrtick when: 900391e43daSPeter Zijlstra * - enabled by features 901391e43daSPeter Zijlstra * - hrtimer is actually high res 902391e43daSPeter Zijlstra */ 903391e43daSPeter Zijlstra static inline int hrtick_enabled(struct rq *rq) 904391e43daSPeter Zijlstra { 905391e43daSPeter Zijlstra if (!sched_feat(HRTICK)) 906391e43daSPeter Zijlstra return 0; 907391e43daSPeter Zijlstra if (!cpu_active(cpu_of(rq))) 908391e43daSPeter Zijlstra return 0; 909391e43daSPeter Zijlstra return hrtimer_is_hres_active(&rq->hrtick_timer); 910391e43daSPeter Zijlstra } 911391e43daSPeter Zijlstra 912391e43daSPeter Zijlstra void hrtick_start(struct rq *rq, u64 delay); 913391e43daSPeter Zijlstra 914b39e66eaSMike Galbraith #else 915b39e66eaSMike Galbraith 916b39e66eaSMike Galbraith static inline int hrtick_enabled(struct rq *rq) 917b39e66eaSMike Galbraith { 918b39e66eaSMike Galbraith return 0; 919b39e66eaSMike Galbraith } 920b39e66eaSMike Galbraith 921391e43daSPeter Zijlstra #endif /* CONFIG_SCHED_HRTICK */ 922391e43daSPeter Zijlstra 923391e43daSPeter Zijlstra #ifdef CONFIG_SMP 924391e43daSPeter Zijlstra extern void sched_avg_update(struct rq *rq); 925391e43daSPeter Zijlstra static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) 926391e43daSPeter Zijlstra { 927391e43daSPeter Zijlstra rq->rt_avg += rt_delta; 928391e43daSPeter Zijlstra sched_avg_update(rq); 929391e43daSPeter Zijlstra } 930391e43daSPeter Zijlstra #else 931391e43daSPeter Zijlstra static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { } 932391e43daSPeter Zijlstra static inline void sched_avg_update(struct rq *rq) { } 933391e43daSPeter Zijlstra #endif 934391e43daSPeter Zijlstra 935391e43daSPeter Zijlstra extern void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period); 936391e43daSPeter Zijlstra 937391e43daSPeter Zijlstra #ifdef CONFIG_SMP 938391e43daSPeter Zijlstra #ifdef CONFIG_PREEMPT 939391e43daSPeter Zijlstra 940391e43daSPeter Zijlstra static inline void double_rq_lock(struct rq *rq1, struct rq *rq2); 941391e43daSPeter Zijlstra 942391e43daSPeter Zijlstra /* 943391e43daSPeter Zijlstra * fair double_lock_balance: Safely acquires both rq->locks in a fair 944391e43daSPeter Zijlstra * way at the expense of forcing extra atomic operations in all 945391e43daSPeter Zijlstra * invocations. This assures that the double_lock is acquired using the 946391e43daSPeter Zijlstra * same underlying policy as the spinlock_t on this architecture, which 947391e43daSPeter Zijlstra * reduces latency compared to the unfair variant below. However, it 948391e43daSPeter Zijlstra * also adds more overhead and therefore may reduce throughput. 949391e43daSPeter Zijlstra */ 950391e43daSPeter Zijlstra static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) 951391e43daSPeter Zijlstra __releases(this_rq->lock) 952391e43daSPeter Zijlstra __acquires(busiest->lock) 953391e43daSPeter Zijlstra __acquires(this_rq->lock) 954391e43daSPeter Zijlstra { 955391e43daSPeter Zijlstra raw_spin_unlock(&this_rq->lock); 956391e43daSPeter Zijlstra double_rq_lock(this_rq, busiest); 957391e43daSPeter Zijlstra 958391e43daSPeter Zijlstra return 1; 959391e43daSPeter Zijlstra } 960391e43daSPeter Zijlstra 961391e43daSPeter Zijlstra #else 962391e43daSPeter Zijlstra /* 963391e43daSPeter Zijlstra * Unfair double_lock_balance: Optimizes throughput at the expense of 964391e43daSPeter Zijlstra * latency by eliminating extra atomic operations when the locks are 965391e43daSPeter Zijlstra * already in proper order on entry. This favors lower cpu-ids and will 966391e43daSPeter Zijlstra * grant the double lock to lower cpus over higher ids under contention, 967391e43daSPeter Zijlstra * regardless of entry order into the function. 968391e43daSPeter Zijlstra */ 969391e43daSPeter Zijlstra static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) 970391e43daSPeter Zijlstra __releases(this_rq->lock) 971391e43daSPeter Zijlstra __acquires(busiest->lock) 972391e43daSPeter Zijlstra __acquires(this_rq->lock) 973391e43daSPeter Zijlstra { 974391e43daSPeter Zijlstra int ret = 0; 975391e43daSPeter Zijlstra 976391e43daSPeter Zijlstra if (unlikely(!raw_spin_trylock(&busiest->lock))) { 977391e43daSPeter Zijlstra if (busiest < this_rq) { 978391e43daSPeter Zijlstra raw_spin_unlock(&this_rq->lock); 979391e43daSPeter Zijlstra raw_spin_lock(&busiest->lock); 980391e43daSPeter Zijlstra raw_spin_lock_nested(&this_rq->lock, 981391e43daSPeter Zijlstra SINGLE_DEPTH_NESTING); 982391e43daSPeter Zijlstra ret = 1; 983391e43daSPeter Zijlstra } else 984391e43daSPeter Zijlstra raw_spin_lock_nested(&busiest->lock, 985391e43daSPeter Zijlstra SINGLE_DEPTH_NESTING); 986391e43daSPeter Zijlstra } 987391e43daSPeter Zijlstra return ret; 988391e43daSPeter Zijlstra } 989391e43daSPeter Zijlstra 990391e43daSPeter Zijlstra #endif /* CONFIG_PREEMPT */ 991391e43daSPeter Zijlstra 992391e43daSPeter Zijlstra /* 993391e43daSPeter Zijlstra * double_lock_balance - lock the busiest runqueue, this_rq is locked already. 994391e43daSPeter Zijlstra */ 995391e43daSPeter Zijlstra static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest) 996391e43daSPeter Zijlstra { 997391e43daSPeter Zijlstra if (unlikely(!irqs_disabled())) { 998391e43daSPeter Zijlstra /* printk() doesn't work good under rq->lock */ 999391e43daSPeter Zijlstra raw_spin_unlock(&this_rq->lock); 1000391e43daSPeter Zijlstra BUG_ON(1); 1001391e43daSPeter Zijlstra } 1002391e43daSPeter Zijlstra 1003391e43daSPeter Zijlstra return _double_lock_balance(this_rq, busiest); 1004391e43daSPeter Zijlstra } 1005391e43daSPeter Zijlstra 1006391e43daSPeter Zijlstra static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) 1007391e43daSPeter Zijlstra __releases(busiest->lock) 1008391e43daSPeter Zijlstra { 1009391e43daSPeter Zijlstra raw_spin_unlock(&busiest->lock); 1010391e43daSPeter Zijlstra lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); 1011391e43daSPeter Zijlstra } 1012391e43daSPeter Zijlstra 1013391e43daSPeter Zijlstra /* 1014391e43daSPeter Zijlstra * double_rq_lock - safely lock two runqueues 1015391e43daSPeter Zijlstra * 1016391e43daSPeter Zijlstra * Note this does not disable interrupts like task_rq_lock, 1017391e43daSPeter Zijlstra * you need to do so manually before calling. 1018391e43daSPeter Zijlstra */ 1019391e43daSPeter Zijlstra static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) 1020391e43daSPeter Zijlstra __acquires(rq1->lock) 1021391e43daSPeter Zijlstra __acquires(rq2->lock) 1022391e43daSPeter Zijlstra { 1023391e43daSPeter Zijlstra BUG_ON(!irqs_disabled()); 1024391e43daSPeter Zijlstra if (rq1 == rq2) { 1025391e43daSPeter Zijlstra raw_spin_lock(&rq1->lock); 1026391e43daSPeter Zijlstra __acquire(rq2->lock); /* Fake it out ;) */ 1027391e43daSPeter Zijlstra } else { 1028391e43daSPeter Zijlstra if (rq1 < rq2) { 1029391e43daSPeter Zijlstra raw_spin_lock(&rq1->lock); 1030391e43daSPeter Zijlstra raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING); 1031391e43daSPeter Zijlstra } else { 1032391e43daSPeter Zijlstra raw_spin_lock(&rq2->lock); 1033391e43daSPeter Zijlstra raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING); 1034391e43daSPeter Zijlstra } 1035391e43daSPeter Zijlstra } 1036391e43daSPeter Zijlstra } 1037391e43daSPeter Zijlstra 1038391e43daSPeter Zijlstra /* 1039391e43daSPeter Zijlstra * double_rq_unlock - safely unlock two runqueues 1040391e43daSPeter Zijlstra * 1041391e43daSPeter Zijlstra * Note this does not restore interrupts like task_rq_unlock, 1042391e43daSPeter Zijlstra * you need to do so manually after calling. 1043391e43daSPeter Zijlstra */ 1044391e43daSPeter Zijlstra static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) 1045391e43daSPeter Zijlstra __releases(rq1->lock) 1046391e43daSPeter Zijlstra __releases(rq2->lock) 1047391e43daSPeter Zijlstra { 1048391e43daSPeter Zijlstra raw_spin_unlock(&rq1->lock); 1049391e43daSPeter Zijlstra if (rq1 != rq2) 1050391e43daSPeter Zijlstra raw_spin_unlock(&rq2->lock); 1051391e43daSPeter Zijlstra else 1052391e43daSPeter Zijlstra __release(rq2->lock); 1053391e43daSPeter Zijlstra } 1054391e43daSPeter Zijlstra 1055391e43daSPeter Zijlstra #else /* CONFIG_SMP */ 1056391e43daSPeter Zijlstra 1057391e43daSPeter Zijlstra /* 1058391e43daSPeter Zijlstra * double_rq_lock - safely lock two runqueues 1059391e43daSPeter Zijlstra * 1060391e43daSPeter Zijlstra * Note this does not disable interrupts like task_rq_lock, 1061391e43daSPeter Zijlstra * you need to do so manually before calling. 1062391e43daSPeter Zijlstra */ 1063391e43daSPeter Zijlstra static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) 1064391e43daSPeter Zijlstra __acquires(rq1->lock) 1065391e43daSPeter Zijlstra __acquires(rq2->lock) 1066391e43daSPeter Zijlstra { 1067391e43daSPeter Zijlstra BUG_ON(!irqs_disabled()); 1068391e43daSPeter Zijlstra BUG_ON(rq1 != rq2); 1069391e43daSPeter Zijlstra raw_spin_lock(&rq1->lock); 1070391e43daSPeter Zijlstra __acquire(rq2->lock); /* Fake it out ;) */ 1071391e43daSPeter Zijlstra } 1072391e43daSPeter Zijlstra 1073391e43daSPeter Zijlstra /* 1074391e43daSPeter Zijlstra * double_rq_unlock - safely unlock two runqueues 1075391e43daSPeter Zijlstra * 1076391e43daSPeter Zijlstra * Note this does not restore interrupts like task_rq_unlock, 1077391e43daSPeter Zijlstra * you need to do so manually after calling. 1078391e43daSPeter Zijlstra */ 1079391e43daSPeter Zijlstra static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) 1080391e43daSPeter Zijlstra __releases(rq1->lock) 1081391e43daSPeter Zijlstra __releases(rq2->lock) 1082391e43daSPeter Zijlstra { 1083391e43daSPeter Zijlstra BUG_ON(rq1 != rq2); 1084391e43daSPeter Zijlstra raw_spin_unlock(&rq1->lock); 1085391e43daSPeter Zijlstra __release(rq2->lock); 1086391e43daSPeter Zijlstra } 1087391e43daSPeter Zijlstra 1088391e43daSPeter Zijlstra #endif 1089391e43daSPeter Zijlstra 1090391e43daSPeter Zijlstra extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq); 1091391e43daSPeter Zijlstra extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq); 1092391e43daSPeter Zijlstra extern void print_cfs_stats(struct seq_file *m, int cpu); 1093391e43daSPeter Zijlstra extern void print_rt_stats(struct seq_file *m, int cpu); 1094391e43daSPeter Zijlstra 1095391e43daSPeter Zijlstra extern void init_cfs_rq(struct cfs_rq *cfs_rq); 1096391e43daSPeter Zijlstra extern void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq); 1097391e43daSPeter Zijlstra extern void unthrottle_offline_cfs_rqs(struct rq *rq); 1098391e43daSPeter Zijlstra 1099391e43daSPeter Zijlstra extern void account_cfs_bandwidth_used(int enabled, int was_enabled); 11001c792db7SSuresh Siddha 11011c792db7SSuresh Siddha #ifdef CONFIG_NO_HZ 11021c792db7SSuresh Siddha enum rq_nohz_flag_bits { 11031c792db7SSuresh Siddha NOHZ_TICK_STOPPED, 11041c792db7SSuresh Siddha NOHZ_BALANCE_KICK, 110569e1e811SSuresh Siddha NOHZ_IDLE, 11061c792db7SSuresh Siddha }; 11071c792db7SSuresh Siddha 11081c792db7SSuresh Siddha #define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags) 11091c792db7SSuresh Siddha #endif 1110