1391e43daSPeter Zijlstra /* 2391e43daSPeter Zijlstra * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR 3391e43daSPeter Zijlstra * policies) 4391e43daSPeter Zijlstra */ 5391e43daSPeter Zijlstra 6391e43daSPeter Zijlstra #include "sched.h" 7391e43daSPeter Zijlstra 8391e43daSPeter Zijlstra #include <linux/slab.h> 9391e43daSPeter Zijlstra 10ce0dbbbbSClark Williams int sched_rr_timeslice = RR_TIMESLICE; 11ce0dbbbbSClark Williams 12391e43daSPeter Zijlstra static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun); 13391e43daSPeter Zijlstra 14391e43daSPeter Zijlstra struct rt_bandwidth def_rt_bandwidth; 15391e43daSPeter Zijlstra 16391e43daSPeter Zijlstra static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer) 17391e43daSPeter Zijlstra { 18391e43daSPeter Zijlstra struct rt_bandwidth *rt_b = 19391e43daSPeter Zijlstra container_of(timer, struct rt_bandwidth, rt_period_timer); 20391e43daSPeter Zijlstra ktime_t now; 21391e43daSPeter Zijlstra int overrun; 22391e43daSPeter Zijlstra int idle = 0; 23391e43daSPeter Zijlstra 24391e43daSPeter Zijlstra for (;;) { 25391e43daSPeter Zijlstra now = hrtimer_cb_get_time(timer); 26391e43daSPeter Zijlstra overrun = hrtimer_forward(timer, now, rt_b->rt_period); 27391e43daSPeter Zijlstra 28391e43daSPeter Zijlstra if (!overrun) 29391e43daSPeter Zijlstra break; 30391e43daSPeter Zijlstra 31391e43daSPeter Zijlstra idle = do_sched_rt_period_timer(rt_b, overrun); 32391e43daSPeter Zijlstra } 33391e43daSPeter Zijlstra 34391e43daSPeter Zijlstra return idle ? HRTIMER_NORESTART : HRTIMER_RESTART; 35391e43daSPeter Zijlstra } 36391e43daSPeter Zijlstra 37391e43daSPeter Zijlstra void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime) 38391e43daSPeter Zijlstra { 39391e43daSPeter Zijlstra rt_b->rt_period = ns_to_ktime(period); 40391e43daSPeter Zijlstra rt_b->rt_runtime = runtime; 41391e43daSPeter Zijlstra 42391e43daSPeter Zijlstra raw_spin_lock_init(&rt_b->rt_runtime_lock); 43391e43daSPeter Zijlstra 44391e43daSPeter Zijlstra hrtimer_init(&rt_b->rt_period_timer, 45391e43daSPeter Zijlstra CLOCK_MONOTONIC, HRTIMER_MODE_REL); 46391e43daSPeter Zijlstra rt_b->rt_period_timer.function = sched_rt_period_timer; 47391e43daSPeter Zijlstra } 48391e43daSPeter Zijlstra 49391e43daSPeter Zijlstra static void start_rt_bandwidth(struct rt_bandwidth *rt_b) 50391e43daSPeter Zijlstra { 51391e43daSPeter Zijlstra if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF) 52391e43daSPeter Zijlstra return; 53391e43daSPeter Zijlstra 54391e43daSPeter Zijlstra if (hrtimer_active(&rt_b->rt_period_timer)) 55391e43daSPeter Zijlstra return; 56391e43daSPeter Zijlstra 57391e43daSPeter Zijlstra raw_spin_lock(&rt_b->rt_runtime_lock); 58391e43daSPeter Zijlstra start_bandwidth_timer(&rt_b->rt_period_timer, rt_b->rt_period); 59391e43daSPeter Zijlstra raw_spin_unlock(&rt_b->rt_runtime_lock); 60391e43daSPeter Zijlstra } 61391e43daSPeter Zijlstra 62391e43daSPeter Zijlstra void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq) 63391e43daSPeter Zijlstra { 64391e43daSPeter Zijlstra struct rt_prio_array *array; 65391e43daSPeter Zijlstra int i; 66391e43daSPeter Zijlstra 67391e43daSPeter Zijlstra array = &rt_rq->active; 68391e43daSPeter Zijlstra for (i = 0; i < MAX_RT_PRIO; i++) { 69391e43daSPeter Zijlstra INIT_LIST_HEAD(array->queue + i); 70391e43daSPeter Zijlstra __clear_bit(i, array->bitmap); 71391e43daSPeter Zijlstra } 72391e43daSPeter Zijlstra /* delimiter for bitsearch: */ 73391e43daSPeter Zijlstra __set_bit(MAX_RT_PRIO, array->bitmap); 74391e43daSPeter Zijlstra 75391e43daSPeter Zijlstra #if defined CONFIG_SMP 76391e43daSPeter Zijlstra rt_rq->highest_prio.curr = MAX_RT_PRIO; 77391e43daSPeter Zijlstra rt_rq->highest_prio.next = MAX_RT_PRIO; 78391e43daSPeter Zijlstra rt_rq->rt_nr_migratory = 0; 79391e43daSPeter Zijlstra rt_rq->overloaded = 0; 80391e43daSPeter Zijlstra plist_head_init(&rt_rq->pushable_tasks); 81391e43daSPeter Zijlstra #endif 82f4ebcbc0SKirill Tkhai /* We start is dequeued state, because no RT tasks are queued */ 83f4ebcbc0SKirill Tkhai rt_rq->rt_queued = 0; 84391e43daSPeter Zijlstra 85391e43daSPeter Zijlstra rt_rq->rt_time = 0; 86391e43daSPeter Zijlstra rt_rq->rt_throttled = 0; 87391e43daSPeter Zijlstra rt_rq->rt_runtime = 0; 88391e43daSPeter Zijlstra raw_spin_lock_init(&rt_rq->rt_runtime_lock); 89391e43daSPeter Zijlstra } 90391e43daSPeter Zijlstra 91391e43daSPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED 92391e43daSPeter Zijlstra static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b) 93391e43daSPeter Zijlstra { 94391e43daSPeter Zijlstra hrtimer_cancel(&rt_b->rt_period_timer); 95391e43daSPeter Zijlstra } 96391e43daSPeter Zijlstra 97391e43daSPeter Zijlstra #define rt_entity_is_task(rt_se) (!(rt_se)->my_q) 98391e43daSPeter Zijlstra 99391e43daSPeter Zijlstra static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se) 100391e43daSPeter Zijlstra { 101391e43daSPeter Zijlstra #ifdef CONFIG_SCHED_DEBUG 102391e43daSPeter Zijlstra WARN_ON_ONCE(!rt_entity_is_task(rt_se)); 103391e43daSPeter Zijlstra #endif 104391e43daSPeter Zijlstra return container_of(rt_se, struct task_struct, rt); 105391e43daSPeter Zijlstra } 106391e43daSPeter Zijlstra 107391e43daSPeter Zijlstra static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) 108391e43daSPeter Zijlstra { 109391e43daSPeter Zijlstra return rt_rq->rq; 110391e43daSPeter Zijlstra } 111391e43daSPeter Zijlstra 112391e43daSPeter Zijlstra static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se) 113391e43daSPeter Zijlstra { 114391e43daSPeter Zijlstra return rt_se->rt_rq; 115391e43daSPeter Zijlstra } 116391e43daSPeter Zijlstra 117653d07a6SKirill Tkhai static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se) 118653d07a6SKirill Tkhai { 119653d07a6SKirill Tkhai struct rt_rq *rt_rq = rt_se->rt_rq; 120653d07a6SKirill Tkhai 121653d07a6SKirill Tkhai return rt_rq->rq; 122653d07a6SKirill Tkhai } 123653d07a6SKirill Tkhai 124391e43daSPeter Zijlstra void free_rt_sched_group(struct task_group *tg) 125391e43daSPeter Zijlstra { 126391e43daSPeter Zijlstra int i; 127391e43daSPeter Zijlstra 128391e43daSPeter Zijlstra if (tg->rt_se) 129391e43daSPeter Zijlstra destroy_rt_bandwidth(&tg->rt_bandwidth); 130391e43daSPeter Zijlstra 131391e43daSPeter Zijlstra for_each_possible_cpu(i) { 132391e43daSPeter Zijlstra if (tg->rt_rq) 133391e43daSPeter Zijlstra kfree(tg->rt_rq[i]); 134391e43daSPeter Zijlstra if (tg->rt_se) 135391e43daSPeter Zijlstra kfree(tg->rt_se[i]); 136391e43daSPeter Zijlstra } 137391e43daSPeter Zijlstra 138391e43daSPeter Zijlstra kfree(tg->rt_rq); 139391e43daSPeter Zijlstra kfree(tg->rt_se); 140391e43daSPeter Zijlstra } 141391e43daSPeter Zijlstra 142391e43daSPeter Zijlstra void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, 143391e43daSPeter Zijlstra struct sched_rt_entity *rt_se, int cpu, 144391e43daSPeter Zijlstra struct sched_rt_entity *parent) 145391e43daSPeter Zijlstra { 146391e43daSPeter Zijlstra struct rq *rq = cpu_rq(cpu); 147391e43daSPeter Zijlstra 148391e43daSPeter Zijlstra rt_rq->highest_prio.curr = MAX_RT_PRIO; 149391e43daSPeter Zijlstra rt_rq->rt_nr_boosted = 0; 150391e43daSPeter Zijlstra rt_rq->rq = rq; 151391e43daSPeter Zijlstra rt_rq->tg = tg; 152391e43daSPeter Zijlstra 153391e43daSPeter Zijlstra tg->rt_rq[cpu] = rt_rq; 154391e43daSPeter Zijlstra tg->rt_se[cpu] = rt_se; 155391e43daSPeter Zijlstra 156391e43daSPeter Zijlstra if (!rt_se) 157391e43daSPeter Zijlstra return; 158391e43daSPeter Zijlstra 159391e43daSPeter Zijlstra if (!parent) 160391e43daSPeter Zijlstra rt_se->rt_rq = &rq->rt; 161391e43daSPeter Zijlstra else 162391e43daSPeter Zijlstra rt_se->rt_rq = parent->my_q; 163391e43daSPeter Zijlstra 164391e43daSPeter Zijlstra rt_se->my_q = rt_rq; 165391e43daSPeter Zijlstra rt_se->parent = parent; 166391e43daSPeter Zijlstra INIT_LIST_HEAD(&rt_se->run_list); 167391e43daSPeter Zijlstra } 168391e43daSPeter Zijlstra 169391e43daSPeter Zijlstra int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) 170391e43daSPeter Zijlstra { 171391e43daSPeter Zijlstra struct rt_rq *rt_rq; 172391e43daSPeter Zijlstra struct sched_rt_entity *rt_se; 173391e43daSPeter Zijlstra int i; 174391e43daSPeter Zijlstra 175391e43daSPeter Zijlstra tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL); 176391e43daSPeter Zijlstra if (!tg->rt_rq) 177391e43daSPeter Zijlstra goto err; 178391e43daSPeter Zijlstra tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL); 179391e43daSPeter Zijlstra if (!tg->rt_se) 180391e43daSPeter Zijlstra goto err; 181391e43daSPeter Zijlstra 182391e43daSPeter Zijlstra init_rt_bandwidth(&tg->rt_bandwidth, 183391e43daSPeter Zijlstra ktime_to_ns(def_rt_bandwidth.rt_period), 0); 184391e43daSPeter Zijlstra 185391e43daSPeter Zijlstra for_each_possible_cpu(i) { 186391e43daSPeter Zijlstra rt_rq = kzalloc_node(sizeof(struct rt_rq), 187391e43daSPeter Zijlstra GFP_KERNEL, cpu_to_node(i)); 188391e43daSPeter Zijlstra if (!rt_rq) 189391e43daSPeter Zijlstra goto err; 190391e43daSPeter Zijlstra 191391e43daSPeter Zijlstra rt_se = kzalloc_node(sizeof(struct sched_rt_entity), 192391e43daSPeter Zijlstra GFP_KERNEL, cpu_to_node(i)); 193391e43daSPeter Zijlstra if (!rt_se) 194391e43daSPeter Zijlstra goto err_free_rq; 195391e43daSPeter Zijlstra 196391e43daSPeter Zijlstra init_rt_rq(rt_rq, cpu_rq(i)); 197391e43daSPeter Zijlstra rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime; 198391e43daSPeter Zijlstra init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]); 199391e43daSPeter Zijlstra } 200391e43daSPeter Zijlstra 201391e43daSPeter Zijlstra return 1; 202391e43daSPeter Zijlstra 203391e43daSPeter Zijlstra err_free_rq: 204391e43daSPeter Zijlstra kfree(rt_rq); 205391e43daSPeter Zijlstra err: 206391e43daSPeter Zijlstra return 0; 207391e43daSPeter Zijlstra } 208391e43daSPeter Zijlstra 209391e43daSPeter Zijlstra #else /* CONFIG_RT_GROUP_SCHED */ 210391e43daSPeter Zijlstra 211391e43daSPeter Zijlstra #define rt_entity_is_task(rt_se) (1) 212391e43daSPeter Zijlstra 213391e43daSPeter Zijlstra static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se) 214391e43daSPeter Zijlstra { 215391e43daSPeter Zijlstra return container_of(rt_se, struct task_struct, rt); 216391e43daSPeter Zijlstra } 217391e43daSPeter Zijlstra 218391e43daSPeter Zijlstra static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) 219391e43daSPeter Zijlstra { 220391e43daSPeter Zijlstra return container_of(rt_rq, struct rq, rt); 221391e43daSPeter Zijlstra } 222391e43daSPeter Zijlstra 223653d07a6SKirill Tkhai static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se) 224391e43daSPeter Zijlstra { 225391e43daSPeter Zijlstra struct task_struct *p = rt_task_of(rt_se); 226653d07a6SKirill Tkhai 227653d07a6SKirill Tkhai return task_rq(p); 228653d07a6SKirill Tkhai } 229653d07a6SKirill Tkhai 230653d07a6SKirill Tkhai static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se) 231653d07a6SKirill Tkhai { 232653d07a6SKirill Tkhai struct rq *rq = rq_of_rt_se(rt_se); 233391e43daSPeter Zijlstra 234391e43daSPeter Zijlstra return &rq->rt; 235391e43daSPeter Zijlstra } 236391e43daSPeter Zijlstra 237391e43daSPeter Zijlstra void free_rt_sched_group(struct task_group *tg) { } 238391e43daSPeter Zijlstra 239391e43daSPeter Zijlstra int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) 240391e43daSPeter Zijlstra { 241391e43daSPeter Zijlstra return 1; 242391e43daSPeter Zijlstra } 243391e43daSPeter Zijlstra #endif /* CONFIG_RT_GROUP_SCHED */ 244391e43daSPeter Zijlstra 245391e43daSPeter Zijlstra #ifdef CONFIG_SMP 246391e43daSPeter Zijlstra 24738033c37SPeter Zijlstra static int pull_rt_task(struct rq *this_rq); 24838033c37SPeter Zijlstra 249dc877341SPeter Zijlstra static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev) 250dc877341SPeter Zijlstra { 251dc877341SPeter Zijlstra /* Try to pull RT tasks here if we lower this rq's prio */ 252dc877341SPeter Zijlstra return rq->rt.highest_prio.curr > prev->prio; 253dc877341SPeter Zijlstra } 254dc877341SPeter Zijlstra 255391e43daSPeter Zijlstra static inline int rt_overloaded(struct rq *rq) 256391e43daSPeter Zijlstra { 257391e43daSPeter Zijlstra return atomic_read(&rq->rd->rto_count); 258391e43daSPeter Zijlstra } 259391e43daSPeter Zijlstra 260391e43daSPeter Zijlstra static inline void rt_set_overload(struct rq *rq) 261391e43daSPeter Zijlstra { 262391e43daSPeter Zijlstra if (!rq->online) 263391e43daSPeter Zijlstra return; 264391e43daSPeter Zijlstra 265391e43daSPeter Zijlstra cpumask_set_cpu(rq->cpu, rq->rd->rto_mask); 266391e43daSPeter Zijlstra /* 267391e43daSPeter Zijlstra * Make sure the mask is visible before we set 268391e43daSPeter Zijlstra * the overload count. That is checked to determine 269391e43daSPeter Zijlstra * if we should look at the mask. It would be a shame 270391e43daSPeter Zijlstra * if we looked at the mask, but the mask was not 271391e43daSPeter Zijlstra * updated yet. 2727c3f2ab7SPeter Zijlstra * 2737c3f2ab7SPeter Zijlstra * Matched by the barrier in pull_rt_task(). 274391e43daSPeter Zijlstra */ 2757c3f2ab7SPeter Zijlstra smp_wmb(); 276391e43daSPeter Zijlstra atomic_inc(&rq->rd->rto_count); 277391e43daSPeter Zijlstra } 278391e43daSPeter Zijlstra 279391e43daSPeter Zijlstra static inline void rt_clear_overload(struct rq *rq) 280391e43daSPeter Zijlstra { 281391e43daSPeter Zijlstra if (!rq->online) 282391e43daSPeter Zijlstra return; 283391e43daSPeter Zijlstra 284391e43daSPeter Zijlstra /* the order here really doesn't matter */ 285391e43daSPeter Zijlstra atomic_dec(&rq->rd->rto_count); 286391e43daSPeter Zijlstra cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask); 287391e43daSPeter Zijlstra } 288391e43daSPeter Zijlstra 289391e43daSPeter Zijlstra static void update_rt_migration(struct rt_rq *rt_rq) 290391e43daSPeter Zijlstra { 291391e43daSPeter Zijlstra if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) { 292391e43daSPeter Zijlstra if (!rt_rq->overloaded) { 293391e43daSPeter Zijlstra rt_set_overload(rq_of_rt_rq(rt_rq)); 294391e43daSPeter Zijlstra rt_rq->overloaded = 1; 295391e43daSPeter Zijlstra } 296391e43daSPeter Zijlstra } else if (rt_rq->overloaded) { 297391e43daSPeter Zijlstra rt_clear_overload(rq_of_rt_rq(rt_rq)); 298391e43daSPeter Zijlstra rt_rq->overloaded = 0; 299391e43daSPeter Zijlstra } 300391e43daSPeter Zijlstra } 301391e43daSPeter Zijlstra 302391e43daSPeter Zijlstra static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 303391e43daSPeter Zijlstra { 30429baa747SPeter Zijlstra struct task_struct *p; 30529baa747SPeter Zijlstra 306391e43daSPeter Zijlstra if (!rt_entity_is_task(rt_se)) 307391e43daSPeter Zijlstra return; 308391e43daSPeter Zijlstra 30929baa747SPeter Zijlstra p = rt_task_of(rt_se); 310391e43daSPeter Zijlstra rt_rq = &rq_of_rt_rq(rt_rq)->rt; 311391e43daSPeter Zijlstra 312391e43daSPeter Zijlstra rt_rq->rt_nr_total++; 31329baa747SPeter Zijlstra if (p->nr_cpus_allowed > 1) 314391e43daSPeter Zijlstra rt_rq->rt_nr_migratory++; 315391e43daSPeter Zijlstra 316391e43daSPeter Zijlstra update_rt_migration(rt_rq); 317391e43daSPeter Zijlstra } 318391e43daSPeter Zijlstra 319391e43daSPeter Zijlstra static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 320391e43daSPeter Zijlstra { 32129baa747SPeter Zijlstra struct task_struct *p; 32229baa747SPeter Zijlstra 323391e43daSPeter Zijlstra if (!rt_entity_is_task(rt_se)) 324391e43daSPeter Zijlstra return; 325391e43daSPeter Zijlstra 32629baa747SPeter Zijlstra p = rt_task_of(rt_se); 327391e43daSPeter Zijlstra rt_rq = &rq_of_rt_rq(rt_rq)->rt; 328391e43daSPeter Zijlstra 329391e43daSPeter Zijlstra rt_rq->rt_nr_total--; 33029baa747SPeter Zijlstra if (p->nr_cpus_allowed > 1) 331391e43daSPeter Zijlstra rt_rq->rt_nr_migratory--; 332391e43daSPeter Zijlstra 333391e43daSPeter Zijlstra update_rt_migration(rt_rq); 334391e43daSPeter Zijlstra } 335391e43daSPeter Zijlstra 336391e43daSPeter Zijlstra static inline int has_pushable_tasks(struct rq *rq) 337391e43daSPeter Zijlstra { 338391e43daSPeter Zijlstra return !plist_head_empty(&rq->rt.pushable_tasks); 339391e43daSPeter Zijlstra } 340391e43daSPeter Zijlstra 341dc877341SPeter Zijlstra static inline void set_post_schedule(struct rq *rq) 342dc877341SPeter Zijlstra { 343dc877341SPeter Zijlstra /* 344dc877341SPeter Zijlstra * We detect this state here so that we can avoid taking the RQ 345dc877341SPeter Zijlstra * lock again later if there is no need to push 346dc877341SPeter Zijlstra */ 347dc877341SPeter Zijlstra rq->post_schedule = has_pushable_tasks(rq); 348dc877341SPeter Zijlstra } 349dc877341SPeter Zijlstra 350391e43daSPeter Zijlstra static void enqueue_pushable_task(struct rq *rq, struct task_struct *p) 351391e43daSPeter Zijlstra { 352391e43daSPeter Zijlstra plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); 353391e43daSPeter Zijlstra plist_node_init(&p->pushable_tasks, p->prio); 354391e43daSPeter Zijlstra plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks); 355391e43daSPeter Zijlstra 356391e43daSPeter Zijlstra /* Update the highest prio pushable task */ 357391e43daSPeter Zijlstra if (p->prio < rq->rt.highest_prio.next) 358391e43daSPeter Zijlstra rq->rt.highest_prio.next = p->prio; 359391e43daSPeter Zijlstra } 360391e43daSPeter Zijlstra 361391e43daSPeter Zijlstra static void dequeue_pushable_task(struct rq *rq, struct task_struct *p) 362391e43daSPeter Zijlstra { 363391e43daSPeter Zijlstra plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); 364391e43daSPeter Zijlstra 365391e43daSPeter Zijlstra /* Update the new highest prio pushable task */ 366391e43daSPeter Zijlstra if (has_pushable_tasks(rq)) { 367391e43daSPeter Zijlstra p = plist_first_entry(&rq->rt.pushable_tasks, 368391e43daSPeter Zijlstra struct task_struct, pushable_tasks); 369391e43daSPeter Zijlstra rq->rt.highest_prio.next = p->prio; 370391e43daSPeter Zijlstra } else 371391e43daSPeter Zijlstra rq->rt.highest_prio.next = MAX_RT_PRIO; 372391e43daSPeter Zijlstra } 373391e43daSPeter Zijlstra 374391e43daSPeter Zijlstra #else 375391e43daSPeter Zijlstra 376391e43daSPeter Zijlstra static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p) 377391e43daSPeter Zijlstra { 378391e43daSPeter Zijlstra } 379391e43daSPeter Zijlstra 380391e43daSPeter Zijlstra static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p) 381391e43daSPeter Zijlstra { 382391e43daSPeter Zijlstra } 383391e43daSPeter Zijlstra 384391e43daSPeter Zijlstra static inline 385391e43daSPeter Zijlstra void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 386391e43daSPeter Zijlstra { 387391e43daSPeter Zijlstra } 388391e43daSPeter Zijlstra 389391e43daSPeter Zijlstra static inline 390391e43daSPeter Zijlstra void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 391391e43daSPeter Zijlstra { 392391e43daSPeter Zijlstra } 393391e43daSPeter Zijlstra 394dc877341SPeter Zijlstra static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev) 395dc877341SPeter Zijlstra { 396dc877341SPeter Zijlstra return false; 397dc877341SPeter Zijlstra } 398dc877341SPeter Zijlstra 399dc877341SPeter Zijlstra static inline int pull_rt_task(struct rq *this_rq) 400dc877341SPeter Zijlstra { 401dc877341SPeter Zijlstra return 0; 402dc877341SPeter Zijlstra } 403dc877341SPeter Zijlstra 404dc877341SPeter Zijlstra static inline void set_post_schedule(struct rq *rq) 405dc877341SPeter Zijlstra { 406dc877341SPeter Zijlstra } 407391e43daSPeter Zijlstra #endif /* CONFIG_SMP */ 408391e43daSPeter Zijlstra 409f4ebcbc0SKirill Tkhai static void enqueue_top_rt_rq(struct rt_rq *rt_rq); 410f4ebcbc0SKirill Tkhai static void dequeue_top_rt_rq(struct rt_rq *rt_rq); 411f4ebcbc0SKirill Tkhai 412391e43daSPeter Zijlstra static inline int on_rt_rq(struct sched_rt_entity *rt_se) 413391e43daSPeter Zijlstra { 414391e43daSPeter Zijlstra return !list_empty(&rt_se->run_list); 415391e43daSPeter Zijlstra } 416391e43daSPeter Zijlstra 417391e43daSPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED 418391e43daSPeter Zijlstra 419391e43daSPeter Zijlstra static inline u64 sched_rt_runtime(struct rt_rq *rt_rq) 420391e43daSPeter Zijlstra { 421391e43daSPeter Zijlstra if (!rt_rq->tg) 422391e43daSPeter Zijlstra return RUNTIME_INF; 423391e43daSPeter Zijlstra 424391e43daSPeter Zijlstra return rt_rq->rt_runtime; 425391e43daSPeter Zijlstra } 426391e43daSPeter Zijlstra 427391e43daSPeter Zijlstra static inline u64 sched_rt_period(struct rt_rq *rt_rq) 428391e43daSPeter Zijlstra { 429391e43daSPeter Zijlstra return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period); 430391e43daSPeter Zijlstra } 431391e43daSPeter Zijlstra 432391e43daSPeter Zijlstra typedef struct task_group *rt_rq_iter_t; 433391e43daSPeter Zijlstra 434391e43daSPeter Zijlstra static inline struct task_group *next_task_group(struct task_group *tg) 435391e43daSPeter Zijlstra { 436391e43daSPeter Zijlstra do { 437391e43daSPeter Zijlstra tg = list_entry_rcu(tg->list.next, 438391e43daSPeter Zijlstra typeof(struct task_group), list); 439391e43daSPeter Zijlstra } while (&tg->list != &task_groups && task_group_is_autogroup(tg)); 440391e43daSPeter Zijlstra 441391e43daSPeter Zijlstra if (&tg->list == &task_groups) 442391e43daSPeter Zijlstra tg = NULL; 443391e43daSPeter Zijlstra 444391e43daSPeter Zijlstra return tg; 445391e43daSPeter Zijlstra } 446391e43daSPeter Zijlstra 447391e43daSPeter Zijlstra #define for_each_rt_rq(rt_rq, iter, rq) \ 448391e43daSPeter Zijlstra for (iter = container_of(&task_groups, typeof(*iter), list); \ 449391e43daSPeter Zijlstra (iter = next_task_group(iter)) && \ 450391e43daSPeter Zijlstra (rt_rq = iter->rt_rq[cpu_of(rq)]);) 451391e43daSPeter Zijlstra 452391e43daSPeter Zijlstra #define for_each_sched_rt_entity(rt_se) \ 453391e43daSPeter Zijlstra for (; rt_se; rt_se = rt_se->parent) 454391e43daSPeter Zijlstra 455391e43daSPeter Zijlstra static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se) 456391e43daSPeter Zijlstra { 457391e43daSPeter Zijlstra return rt_se->my_q; 458391e43daSPeter Zijlstra } 459391e43daSPeter Zijlstra 460391e43daSPeter Zijlstra static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head); 461391e43daSPeter Zijlstra static void dequeue_rt_entity(struct sched_rt_entity *rt_se); 462391e43daSPeter Zijlstra 463391e43daSPeter Zijlstra static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) 464391e43daSPeter Zijlstra { 465391e43daSPeter Zijlstra struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr; 4668875125eSKirill Tkhai struct rq *rq = rq_of_rt_rq(rt_rq); 467391e43daSPeter Zijlstra struct sched_rt_entity *rt_se; 468391e43daSPeter Zijlstra 4698875125eSKirill Tkhai int cpu = cpu_of(rq); 470391e43daSPeter Zijlstra 471391e43daSPeter Zijlstra rt_se = rt_rq->tg->rt_se[cpu]; 472391e43daSPeter Zijlstra 473391e43daSPeter Zijlstra if (rt_rq->rt_nr_running) { 474f4ebcbc0SKirill Tkhai if (!rt_se) 475f4ebcbc0SKirill Tkhai enqueue_top_rt_rq(rt_rq); 476f4ebcbc0SKirill Tkhai else if (!on_rt_rq(rt_se)) 477391e43daSPeter Zijlstra enqueue_rt_entity(rt_se, false); 478f4ebcbc0SKirill Tkhai 479391e43daSPeter Zijlstra if (rt_rq->highest_prio.curr < curr->prio) 4808875125eSKirill Tkhai resched_curr(rq); 481391e43daSPeter Zijlstra } 482391e43daSPeter Zijlstra } 483391e43daSPeter Zijlstra 484391e43daSPeter Zijlstra static void sched_rt_rq_dequeue(struct rt_rq *rt_rq) 485391e43daSPeter Zijlstra { 486391e43daSPeter Zijlstra struct sched_rt_entity *rt_se; 487391e43daSPeter Zijlstra int cpu = cpu_of(rq_of_rt_rq(rt_rq)); 488391e43daSPeter Zijlstra 489391e43daSPeter Zijlstra rt_se = rt_rq->tg->rt_se[cpu]; 490391e43daSPeter Zijlstra 491f4ebcbc0SKirill Tkhai if (!rt_se) 492f4ebcbc0SKirill Tkhai dequeue_top_rt_rq(rt_rq); 493f4ebcbc0SKirill Tkhai else if (on_rt_rq(rt_se)) 494391e43daSPeter Zijlstra dequeue_rt_entity(rt_se); 495391e43daSPeter Zijlstra } 496391e43daSPeter Zijlstra 49746383648SKirill Tkhai static inline int rt_rq_throttled(struct rt_rq *rt_rq) 49846383648SKirill Tkhai { 49946383648SKirill Tkhai return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted; 50046383648SKirill Tkhai } 50146383648SKirill Tkhai 502391e43daSPeter Zijlstra static int rt_se_boosted(struct sched_rt_entity *rt_se) 503391e43daSPeter Zijlstra { 504391e43daSPeter Zijlstra struct rt_rq *rt_rq = group_rt_rq(rt_se); 505391e43daSPeter Zijlstra struct task_struct *p; 506391e43daSPeter Zijlstra 507391e43daSPeter Zijlstra if (rt_rq) 508391e43daSPeter Zijlstra return !!rt_rq->rt_nr_boosted; 509391e43daSPeter Zijlstra 510391e43daSPeter Zijlstra p = rt_task_of(rt_se); 511391e43daSPeter Zijlstra return p->prio != p->normal_prio; 512391e43daSPeter Zijlstra } 513391e43daSPeter Zijlstra 514391e43daSPeter Zijlstra #ifdef CONFIG_SMP 515391e43daSPeter Zijlstra static inline const struct cpumask *sched_rt_period_mask(void) 516391e43daSPeter Zijlstra { 517424c93feSNathan Zimmer return this_rq()->rd->span; 518391e43daSPeter Zijlstra } 519391e43daSPeter Zijlstra #else 520391e43daSPeter Zijlstra static inline const struct cpumask *sched_rt_period_mask(void) 521391e43daSPeter Zijlstra { 522391e43daSPeter Zijlstra return cpu_online_mask; 523391e43daSPeter Zijlstra } 524391e43daSPeter Zijlstra #endif 525391e43daSPeter Zijlstra 526391e43daSPeter Zijlstra static inline 527391e43daSPeter Zijlstra struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu) 528391e43daSPeter Zijlstra { 529391e43daSPeter Zijlstra return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu]; 530391e43daSPeter Zijlstra } 531391e43daSPeter Zijlstra 532391e43daSPeter Zijlstra static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq) 533391e43daSPeter Zijlstra { 534391e43daSPeter Zijlstra return &rt_rq->tg->rt_bandwidth; 535391e43daSPeter Zijlstra } 536391e43daSPeter Zijlstra 537391e43daSPeter Zijlstra #else /* !CONFIG_RT_GROUP_SCHED */ 538391e43daSPeter Zijlstra 539391e43daSPeter Zijlstra static inline u64 sched_rt_runtime(struct rt_rq *rt_rq) 540391e43daSPeter Zijlstra { 541391e43daSPeter Zijlstra return rt_rq->rt_runtime; 542391e43daSPeter Zijlstra } 543391e43daSPeter Zijlstra 544391e43daSPeter Zijlstra static inline u64 sched_rt_period(struct rt_rq *rt_rq) 545391e43daSPeter Zijlstra { 546391e43daSPeter Zijlstra return ktime_to_ns(def_rt_bandwidth.rt_period); 547391e43daSPeter Zijlstra } 548391e43daSPeter Zijlstra 549391e43daSPeter Zijlstra typedef struct rt_rq *rt_rq_iter_t; 550391e43daSPeter Zijlstra 551391e43daSPeter Zijlstra #define for_each_rt_rq(rt_rq, iter, rq) \ 552391e43daSPeter Zijlstra for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL) 553391e43daSPeter Zijlstra 554391e43daSPeter Zijlstra #define for_each_sched_rt_entity(rt_se) \ 555391e43daSPeter Zijlstra for (; rt_se; rt_se = NULL) 556391e43daSPeter Zijlstra 557391e43daSPeter Zijlstra static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se) 558391e43daSPeter Zijlstra { 559391e43daSPeter Zijlstra return NULL; 560391e43daSPeter Zijlstra } 561391e43daSPeter Zijlstra 562391e43daSPeter Zijlstra static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq) 563391e43daSPeter Zijlstra { 564f4ebcbc0SKirill Tkhai struct rq *rq = rq_of_rt_rq(rt_rq); 565f4ebcbc0SKirill Tkhai 566f4ebcbc0SKirill Tkhai if (!rt_rq->rt_nr_running) 567f4ebcbc0SKirill Tkhai return; 568f4ebcbc0SKirill Tkhai 569f4ebcbc0SKirill Tkhai enqueue_top_rt_rq(rt_rq); 5708875125eSKirill Tkhai resched_curr(rq); 571391e43daSPeter Zijlstra } 572391e43daSPeter Zijlstra 573391e43daSPeter Zijlstra static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq) 574391e43daSPeter Zijlstra { 575f4ebcbc0SKirill Tkhai dequeue_top_rt_rq(rt_rq); 576391e43daSPeter Zijlstra } 577391e43daSPeter Zijlstra 57846383648SKirill Tkhai static inline int rt_rq_throttled(struct rt_rq *rt_rq) 57946383648SKirill Tkhai { 58046383648SKirill Tkhai return rt_rq->rt_throttled; 58146383648SKirill Tkhai } 58246383648SKirill Tkhai 583391e43daSPeter Zijlstra static inline const struct cpumask *sched_rt_period_mask(void) 584391e43daSPeter Zijlstra { 585391e43daSPeter Zijlstra return cpu_online_mask; 586391e43daSPeter Zijlstra } 587391e43daSPeter Zijlstra 588391e43daSPeter Zijlstra static inline 589391e43daSPeter Zijlstra struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu) 590391e43daSPeter Zijlstra { 591391e43daSPeter Zijlstra return &cpu_rq(cpu)->rt; 592391e43daSPeter Zijlstra } 593391e43daSPeter Zijlstra 594391e43daSPeter Zijlstra static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq) 595391e43daSPeter Zijlstra { 596391e43daSPeter Zijlstra return &def_rt_bandwidth; 597391e43daSPeter Zijlstra } 598391e43daSPeter Zijlstra 599391e43daSPeter Zijlstra #endif /* CONFIG_RT_GROUP_SCHED */ 600391e43daSPeter Zijlstra 601faa59937SJuri Lelli bool sched_rt_bandwidth_account(struct rt_rq *rt_rq) 602faa59937SJuri Lelli { 603faa59937SJuri Lelli struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); 604faa59937SJuri Lelli 605faa59937SJuri Lelli return (hrtimer_active(&rt_b->rt_period_timer) || 606faa59937SJuri Lelli rt_rq->rt_time < rt_b->rt_runtime); 607faa59937SJuri Lelli } 608faa59937SJuri Lelli 609391e43daSPeter Zijlstra #ifdef CONFIG_SMP 610391e43daSPeter Zijlstra /* 611391e43daSPeter Zijlstra * We ran out of runtime, see if we can borrow some from our neighbours. 612391e43daSPeter Zijlstra */ 613391e43daSPeter Zijlstra static int do_balance_runtime(struct rt_rq *rt_rq) 614391e43daSPeter Zijlstra { 615391e43daSPeter Zijlstra struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); 616aa7f6730SShawn Bohrer struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd; 617391e43daSPeter Zijlstra int i, weight, more = 0; 618391e43daSPeter Zijlstra u64 rt_period; 619391e43daSPeter Zijlstra 620391e43daSPeter Zijlstra weight = cpumask_weight(rd->span); 621391e43daSPeter Zijlstra 622391e43daSPeter Zijlstra raw_spin_lock(&rt_b->rt_runtime_lock); 623391e43daSPeter Zijlstra rt_period = ktime_to_ns(rt_b->rt_period); 624391e43daSPeter Zijlstra for_each_cpu(i, rd->span) { 625391e43daSPeter Zijlstra struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); 626391e43daSPeter Zijlstra s64 diff; 627391e43daSPeter Zijlstra 628391e43daSPeter Zijlstra if (iter == rt_rq) 629391e43daSPeter Zijlstra continue; 630391e43daSPeter Zijlstra 631391e43daSPeter Zijlstra raw_spin_lock(&iter->rt_runtime_lock); 632391e43daSPeter Zijlstra /* 633391e43daSPeter Zijlstra * Either all rqs have inf runtime and there's nothing to steal 634391e43daSPeter Zijlstra * or __disable_runtime() below sets a specific rq to inf to 635391e43daSPeter Zijlstra * indicate its been disabled and disalow stealing. 636391e43daSPeter Zijlstra */ 637391e43daSPeter Zijlstra if (iter->rt_runtime == RUNTIME_INF) 638391e43daSPeter Zijlstra goto next; 639391e43daSPeter Zijlstra 640391e43daSPeter Zijlstra /* 641391e43daSPeter Zijlstra * From runqueues with spare time, take 1/n part of their 642391e43daSPeter Zijlstra * spare time, but no more than our period. 643391e43daSPeter Zijlstra */ 644391e43daSPeter Zijlstra diff = iter->rt_runtime - iter->rt_time; 645391e43daSPeter Zijlstra if (diff > 0) { 646391e43daSPeter Zijlstra diff = div_u64((u64)diff, weight); 647391e43daSPeter Zijlstra if (rt_rq->rt_runtime + diff > rt_period) 648391e43daSPeter Zijlstra diff = rt_period - rt_rq->rt_runtime; 649391e43daSPeter Zijlstra iter->rt_runtime -= diff; 650391e43daSPeter Zijlstra rt_rq->rt_runtime += diff; 651391e43daSPeter Zijlstra more = 1; 652391e43daSPeter Zijlstra if (rt_rq->rt_runtime == rt_period) { 653391e43daSPeter Zijlstra raw_spin_unlock(&iter->rt_runtime_lock); 654391e43daSPeter Zijlstra break; 655391e43daSPeter Zijlstra } 656391e43daSPeter Zijlstra } 657391e43daSPeter Zijlstra next: 658391e43daSPeter Zijlstra raw_spin_unlock(&iter->rt_runtime_lock); 659391e43daSPeter Zijlstra } 660391e43daSPeter Zijlstra raw_spin_unlock(&rt_b->rt_runtime_lock); 661391e43daSPeter Zijlstra 662391e43daSPeter Zijlstra return more; 663391e43daSPeter Zijlstra } 664391e43daSPeter Zijlstra 665391e43daSPeter Zijlstra /* 666391e43daSPeter Zijlstra * Ensure this RQ takes back all the runtime it lend to its neighbours. 667391e43daSPeter Zijlstra */ 668391e43daSPeter Zijlstra static void __disable_runtime(struct rq *rq) 669391e43daSPeter Zijlstra { 670391e43daSPeter Zijlstra struct root_domain *rd = rq->rd; 671391e43daSPeter Zijlstra rt_rq_iter_t iter; 672391e43daSPeter Zijlstra struct rt_rq *rt_rq; 673391e43daSPeter Zijlstra 674391e43daSPeter Zijlstra if (unlikely(!scheduler_running)) 675391e43daSPeter Zijlstra return; 676391e43daSPeter Zijlstra 677391e43daSPeter Zijlstra for_each_rt_rq(rt_rq, iter, rq) { 678391e43daSPeter Zijlstra struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); 679391e43daSPeter Zijlstra s64 want; 680391e43daSPeter Zijlstra int i; 681391e43daSPeter Zijlstra 682391e43daSPeter Zijlstra raw_spin_lock(&rt_b->rt_runtime_lock); 683391e43daSPeter Zijlstra raw_spin_lock(&rt_rq->rt_runtime_lock); 684391e43daSPeter Zijlstra /* 685391e43daSPeter Zijlstra * Either we're all inf and nobody needs to borrow, or we're 686391e43daSPeter Zijlstra * already disabled and thus have nothing to do, or we have 687391e43daSPeter Zijlstra * exactly the right amount of runtime to take out. 688391e43daSPeter Zijlstra */ 689391e43daSPeter Zijlstra if (rt_rq->rt_runtime == RUNTIME_INF || 690391e43daSPeter Zijlstra rt_rq->rt_runtime == rt_b->rt_runtime) 691391e43daSPeter Zijlstra goto balanced; 692391e43daSPeter Zijlstra raw_spin_unlock(&rt_rq->rt_runtime_lock); 693391e43daSPeter Zijlstra 694391e43daSPeter Zijlstra /* 695391e43daSPeter Zijlstra * Calculate the difference between what we started out with 696391e43daSPeter Zijlstra * and what we current have, that's the amount of runtime 697391e43daSPeter Zijlstra * we lend and now have to reclaim. 698391e43daSPeter Zijlstra */ 699391e43daSPeter Zijlstra want = rt_b->rt_runtime - rt_rq->rt_runtime; 700391e43daSPeter Zijlstra 701391e43daSPeter Zijlstra /* 702391e43daSPeter Zijlstra * Greedy reclaim, take back as much as we can. 703391e43daSPeter Zijlstra */ 704391e43daSPeter Zijlstra for_each_cpu(i, rd->span) { 705391e43daSPeter Zijlstra struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); 706391e43daSPeter Zijlstra s64 diff; 707391e43daSPeter Zijlstra 708391e43daSPeter Zijlstra /* 709391e43daSPeter Zijlstra * Can't reclaim from ourselves or disabled runqueues. 710391e43daSPeter Zijlstra */ 711391e43daSPeter Zijlstra if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF) 712391e43daSPeter Zijlstra continue; 713391e43daSPeter Zijlstra 714391e43daSPeter Zijlstra raw_spin_lock(&iter->rt_runtime_lock); 715391e43daSPeter Zijlstra if (want > 0) { 716391e43daSPeter Zijlstra diff = min_t(s64, iter->rt_runtime, want); 717391e43daSPeter Zijlstra iter->rt_runtime -= diff; 718391e43daSPeter Zijlstra want -= diff; 719391e43daSPeter Zijlstra } else { 720391e43daSPeter Zijlstra iter->rt_runtime -= want; 721391e43daSPeter Zijlstra want -= want; 722391e43daSPeter Zijlstra } 723391e43daSPeter Zijlstra raw_spin_unlock(&iter->rt_runtime_lock); 724391e43daSPeter Zijlstra 725391e43daSPeter Zijlstra if (!want) 726391e43daSPeter Zijlstra break; 727391e43daSPeter Zijlstra } 728391e43daSPeter Zijlstra 729391e43daSPeter Zijlstra raw_spin_lock(&rt_rq->rt_runtime_lock); 730391e43daSPeter Zijlstra /* 731391e43daSPeter Zijlstra * We cannot be left wanting - that would mean some runtime 732391e43daSPeter Zijlstra * leaked out of the system. 733391e43daSPeter Zijlstra */ 734391e43daSPeter Zijlstra BUG_ON(want); 735391e43daSPeter Zijlstra balanced: 736391e43daSPeter Zijlstra /* 737391e43daSPeter Zijlstra * Disable all the borrow logic by pretending we have inf 738391e43daSPeter Zijlstra * runtime - in which case borrowing doesn't make sense. 739391e43daSPeter Zijlstra */ 740391e43daSPeter Zijlstra rt_rq->rt_runtime = RUNTIME_INF; 741a4c96ae3SPeter Boonstoppel rt_rq->rt_throttled = 0; 742391e43daSPeter Zijlstra raw_spin_unlock(&rt_rq->rt_runtime_lock); 743391e43daSPeter Zijlstra raw_spin_unlock(&rt_b->rt_runtime_lock); 74499b62567SKirill Tkhai 74599b62567SKirill Tkhai /* Make rt_rq available for pick_next_task() */ 74699b62567SKirill Tkhai sched_rt_rq_enqueue(rt_rq); 747391e43daSPeter Zijlstra } 748391e43daSPeter Zijlstra } 749391e43daSPeter Zijlstra 750391e43daSPeter Zijlstra static void __enable_runtime(struct rq *rq) 751391e43daSPeter Zijlstra { 752391e43daSPeter Zijlstra rt_rq_iter_t iter; 753391e43daSPeter Zijlstra struct rt_rq *rt_rq; 754391e43daSPeter Zijlstra 755391e43daSPeter Zijlstra if (unlikely(!scheduler_running)) 756391e43daSPeter Zijlstra return; 757391e43daSPeter Zijlstra 758391e43daSPeter Zijlstra /* 759391e43daSPeter Zijlstra * Reset each runqueue's bandwidth settings 760391e43daSPeter Zijlstra */ 761391e43daSPeter Zijlstra for_each_rt_rq(rt_rq, iter, rq) { 762391e43daSPeter Zijlstra struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); 763391e43daSPeter Zijlstra 764391e43daSPeter Zijlstra raw_spin_lock(&rt_b->rt_runtime_lock); 765391e43daSPeter Zijlstra raw_spin_lock(&rt_rq->rt_runtime_lock); 766391e43daSPeter Zijlstra rt_rq->rt_runtime = rt_b->rt_runtime; 767391e43daSPeter Zijlstra rt_rq->rt_time = 0; 768391e43daSPeter Zijlstra rt_rq->rt_throttled = 0; 769391e43daSPeter Zijlstra raw_spin_unlock(&rt_rq->rt_runtime_lock); 770391e43daSPeter Zijlstra raw_spin_unlock(&rt_b->rt_runtime_lock); 771391e43daSPeter Zijlstra } 772391e43daSPeter Zijlstra } 773391e43daSPeter Zijlstra 774391e43daSPeter Zijlstra static int balance_runtime(struct rt_rq *rt_rq) 775391e43daSPeter Zijlstra { 776391e43daSPeter Zijlstra int more = 0; 777391e43daSPeter Zijlstra 778391e43daSPeter Zijlstra if (!sched_feat(RT_RUNTIME_SHARE)) 779391e43daSPeter Zijlstra return more; 780391e43daSPeter Zijlstra 781391e43daSPeter Zijlstra if (rt_rq->rt_time > rt_rq->rt_runtime) { 782391e43daSPeter Zijlstra raw_spin_unlock(&rt_rq->rt_runtime_lock); 783391e43daSPeter Zijlstra more = do_balance_runtime(rt_rq); 784391e43daSPeter Zijlstra raw_spin_lock(&rt_rq->rt_runtime_lock); 785391e43daSPeter Zijlstra } 786391e43daSPeter Zijlstra 787391e43daSPeter Zijlstra return more; 788391e43daSPeter Zijlstra } 789391e43daSPeter Zijlstra #else /* !CONFIG_SMP */ 790391e43daSPeter Zijlstra static inline int balance_runtime(struct rt_rq *rt_rq) 791391e43daSPeter Zijlstra { 792391e43daSPeter Zijlstra return 0; 793391e43daSPeter Zijlstra } 794391e43daSPeter Zijlstra #endif /* CONFIG_SMP */ 795391e43daSPeter Zijlstra 796391e43daSPeter Zijlstra static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) 797391e43daSPeter Zijlstra { 79842c62a58SPeter Zijlstra int i, idle = 1, throttled = 0; 799391e43daSPeter Zijlstra const struct cpumask *span; 800391e43daSPeter Zijlstra 801391e43daSPeter Zijlstra span = sched_rt_period_mask(); 802e221d028SMike Galbraith #ifdef CONFIG_RT_GROUP_SCHED 803e221d028SMike Galbraith /* 804e221d028SMike Galbraith * FIXME: isolated CPUs should really leave the root task group, 805e221d028SMike Galbraith * whether they are isolcpus or were isolated via cpusets, lest 806e221d028SMike Galbraith * the timer run on a CPU which does not service all runqueues, 807e221d028SMike Galbraith * potentially leaving other CPUs indefinitely throttled. If 808e221d028SMike Galbraith * isolation is really required, the user will turn the throttle 809e221d028SMike Galbraith * off to kill the perturbations it causes anyway. Meanwhile, 810e221d028SMike Galbraith * this maintains functionality for boot and/or troubleshooting. 811e221d028SMike Galbraith */ 812e221d028SMike Galbraith if (rt_b == &root_task_group.rt_bandwidth) 813e221d028SMike Galbraith span = cpu_online_mask; 814e221d028SMike Galbraith #endif 815391e43daSPeter Zijlstra for_each_cpu(i, span) { 816391e43daSPeter Zijlstra int enqueue = 0; 817391e43daSPeter Zijlstra struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i); 818391e43daSPeter Zijlstra struct rq *rq = rq_of_rt_rq(rt_rq); 819391e43daSPeter Zijlstra 820391e43daSPeter Zijlstra raw_spin_lock(&rq->lock); 821391e43daSPeter Zijlstra if (rt_rq->rt_time) { 822391e43daSPeter Zijlstra u64 runtime; 823391e43daSPeter Zijlstra 824391e43daSPeter Zijlstra raw_spin_lock(&rt_rq->rt_runtime_lock); 825391e43daSPeter Zijlstra if (rt_rq->rt_throttled) 826391e43daSPeter Zijlstra balance_runtime(rt_rq); 827391e43daSPeter Zijlstra runtime = rt_rq->rt_runtime; 828391e43daSPeter Zijlstra rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime); 829391e43daSPeter Zijlstra if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) { 830391e43daSPeter Zijlstra rt_rq->rt_throttled = 0; 831391e43daSPeter Zijlstra enqueue = 1; 832391e43daSPeter Zijlstra 833391e43daSPeter Zijlstra /* 834*9edfbfedSPeter Zijlstra * When we're idle and a woken (rt) task is 835*9edfbfedSPeter Zijlstra * throttled check_preempt_curr() will set 836*9edfbfedSPeter Zijlstra * skip_update and the time between the wakeup 837*9edfbfedSPeter Zijlstra * and this unthrottle will get accounted as 838*9edfbfedSPeter Zijlstra * 'runtime'. 839391e43daSPeter Zijlstra */ 840391e43daSPeter Zijlstra if (rt_rq->rt_nr_running && rq->curr == rq->idle) 841*9edfbfedSPeter Zijlstra rq_clock_skip_update(rq, false); 842391e43daSPeter Zijlstra } 843391e43daSPeter Zijlstra if (rt_rq->rt_time || rt_rq->rt_nr_running) 844391e43daSPeter Zijlstra idle = 0; 845391e43daSPeter Zijlstra raw_spin_unlock(&rt_rq->rt_runtime_lock); 846391e43daSPeter Zijlstra } else if (rt_rq->rt_nr_running) { 847391e43daSPeter Zijlstra idle = 0; 848391e43daSPeter Zijlstra if (!rt_rq_throttled(rt_rq)) 849391e43daSPeter Zijlstra enqueue = 1; 850391e43daSPeter Zijlstra } 85142c62a58SPeter Zijlstra if (rt_rq->rt_throttled) 85242c62a58SPeter Zijlstra throttled = 1; 853391e43daSPeter Zijlstra 854391e43daSPeter Zijlstra if (enqueue) 855391e43daSPeter Zijlstra sched_rt_rq_enqueue(rt_rq); 856391e43daSPeter Zijlstra raw_spin_unlock(&rq->lock); 857391e43daSPeter Zijlstra } 858391e43daSPeter Zijlstra 85942c62a58SPeter Zijlstra if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)) 86042c62a58SPeter Zijlstra return 1; 86142c62a58SPeter Zijlstra 862391e43daSPeter Zijlstra return idle; 863391e43daSPeter Zijlstra } 864391e43daSPeter Zijlstra 865391e43daSPeter Zijlstra static inline int rt_se_prio(struct sched_rt_entity *rt_se) 866391e43daSPeter Zijlstra { 867391e43daSPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED 868391e43daSPeter Zijlstra struct rt_rq *rt_rq = group_rt_rq(rt_se); 869391e43daSPeter Zijlstra 870391e43daSPeter Zijlstra if (rt_rq) 871391e43daSPeter Zijlstra return rt_rq->highest_prio.curr; 872391e43daSPeter Zijlstra #endif 873391e43daSPeter Zijlstra 874391e43daSPeter Zijlstra return rt_task_of(rt_se)->prio; 875391e43daSPeter Zijlstra } 876391e43daSPeter Zijlstra 877391e43daSPeter Zijlstra static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq) 878391e43daSPeter Zijlstra { 879391e43daSPeter Zijlstra u64 runtime = sched_rt_runtime(rt_rq); 880391e43daSPeter Zijlstra 881391e43daSPeter Zijlstra if (rt_rq->rt_throttled) 882391e43daSPeter Zijlstra return rt_rq_throttled(rt_rq); 883391e43daSPeter Zijlstra 8845b680fd6SShan Hai if (runtime >= sched_rt_period(rt_rq)) 885391e43daSPeter Zijlstra return 0; 886391e43daSPeter Zijlstra 887391e43daSPeter Zijlstra balance_runtime(rt_rq); 888391e43daSPeter Zijlstra runtime = sched_rt_runtime(rt_rq); 889391e43daSPeter Zijlstra if (runtime == RUNTIME_INF) 890391e43daSPeter Zijlstra return 0; 891391e43daSPeter Zijlstra 892391e43daSPeter Zijlstra if (rt_rq->rt_time > runtime) { 8937abc63b1SPeter Zijlstra struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); 8947abc63b1SPeter Zijlstra 8957abc63b1SPeter Zijlstra /* 8967abc63b1SPeter Zijlstra * Don't actually throttle groups that have no runtime assigned 8977abc63b1SPeter Zijlstra * but accrue some time due to boosting. 8987abc63b1SPeter Zijlstra */ 8997abc63b1SPeter Zijlstra if (likely(rt_b->rt_runtime)) { 900391e43daSPeter Zijlstra rt_rq->rt_throttled = 1; 901c224815dSJohn Stultz printk_deferred_once("sched: RT throttling activated\n"); 9027abc63b1SPeter Zijlstra } else { 9037abc63b1SPeter Zijlstra /* 9047abc63b1SPeter Zijlstra * In case we did anyway, make it go away, 9057abc63b1SPeter Zijlstra * replenishment is a joke, since it will replenish us 9067abc63b1SPeter Zijlstra * with exactly 0 ns. 9077abc63b1SPeter Zijlstra */ 9087abc63b1SPeter Zijlstra rt_rq->rt_time = 0; 9097abc63b1SPeter Zijlstra } 9107abc63b1SPeter Zijlstra 911391e43daSPeter Zijlstra if (rt_rq_throttled(rt_rq)) { 912391e43daSPeter Zijlstra sched_rt_rq_dequeue(rt_rq); 913391e43daSPeter Zijlstra return 1; 914391e43daSPeter Zijlstra } 915391e43daSPeter Zijlstra } 916391e43daSPeter Zijlstra 917391e43daSPeter Zijlstra return 0; 918391e43daSPeter Zijlstra } 919391e43daSPeter Zijlstra 920391e43daSPeter Zijlstra /* 921391e43daSPeter Zijlstra * Update the current task's runtime statistics. Skip current tasks that 922391e43daSPeter Zijlstra * are not in our scheduling class. 923391e43daSPeter Zijlstra */ 924391e43daSPeter Zijlstra static void update_curr_rt(struct rq *rq) 925391e43daSPeter Zijlstra { 926391e43daSPeter Zijlstra struct task_struct *curr = rq->curr; 927391e43daSPeter Zijlstra struct sched_rt_entity *rt_se = &curr->rt; 928391e43daSPeter Zijlstra u64 delta_exec; 929391e43daSPeter Zijlstra 930391e43daSPeter Zijlstra if (curr->sched_class != &rt_sched_class) 931391e43daSPeter Zijlstra return; 932391e43daSPeter Zijlstra 93378becc27SFrederic Weisbecker delta_exec = rq_clock_task(rq) - curr->se.exec_start; 934fc79e240SKirill Tkhai if (unlikely((s64)delta_exec <= 0)) 935fc79e240SKirill Tkhai return; 936391e43daSPeter Zijlstra 93742c62a58SPeter Zijlstra schedstat_set(curr->se.statistics.exec_max, 93842c62a58SPeter Zijlstra max(curr->se.statistics.exec_max, delta_exec)); 939391e43daSPeter Zijlstra 940391e43daSPeter Zijlstra curr->se.sum_exec_runtime += delta_exec; 941391e43daSPeter Zijlstra account_group_exec_runtime(curr, delta_exec); 942391e43daSPeter Zijlstra 94378becc27SFrederic Weisbecker curr->se.exec_start = rq_clock_task(rq); 944391e43daSPeter Zijlstra cpuacct_charge(curr, delta_exec); 945391e43daSPeter Zijlstra 946391e43daSPeter Zijlstra sched_rt_avg_update(rq, delta_exec); 947391e43daSPeter Zijlstra 948391e43daSPeter Zijlstra if (!rt_bandwidth_enabled()) 949391e43daSPeter Zijlstra return; 950391e43daSPeter Zijlstra 951391e43daSPeter Zijlstra for_each_sched_rt_entity(rt_se) { 9520b07939cSGiedrius Rekasius struct rt_rq *rt_rq = rt_rq_of_se(rt_se); 953391e43daSPeter Zijlstra 954391e43daSPeter Zijlstra if (sched_rt_runtime(rt_rq) != RUNTIME_INF) { 955391e43daSPeter Zijlstra raw_spin_lock(&rt_rq->rt_runtime_lock); 956391e43daSPeter Zijlstra rt_rq->rt_time += delta_exec; 957391e43daSPeter Zijlstra if (sched_rt_runtime_exceeded(rt_rq)) 9588875125eSKirill Tkhai resched_curr(rq); 959391e43daSPeter Zijlstra raw_spin_unlock(&rt_rq->rt_runtime_lock); 960391e43daSPeter Zijlstra } 961391e43daSPeter Zijlstra } 962391e43daSPeter Zijlstra } 963391e43daSPeter Zijlstra 964f4ebcbc0SKirill Tkhai static void 965f4ebcbc0SKirill Tkhai dequeue_top_rt_rq(struct rt_rq *rt_rq) 966f4ebcbc0SKirill Tkhai { 967f4ebcbc0SKirill Tkhai struct rq *rq = rq_of_rt_rq(rt_rq); 968f4ebcbc0SKirill Tkhai 969f4ebcbc0SKirill Tkhai BUG_ON(&rq->rt != rt_rq); 970f4ebcbc0SKirill Tkhai 971f4ebcbc0SKirill Tkhai if (!rt_rq->rt_queued) 972f4ebcbc0SKirill Tkhai return; 973f4ebcbc0SKirill Tkhai 974f4ebcbc0SKirill Tkhai BUG_ON(!rq->nr_running); 975f4ebcbc0SKirill Tkhai 97672465447SKirill Tkhai sub_nr_running(rq, rt_rq->rt_nr_running); 977f4ebcbc0SKirill Tkhai rt_rq->rt_queued = 0; 978f4ebcbc0SKirill Tkhai } 979f4ebcbc0SKirill Tkhai 980f4ebcbc0SKirill Tkhai static void 981f4ebcbc0SKirill Tkhai enqueue_top_rt_rq(struct rt_rq *rt_rq) 982f4ebcbc0SKirill Tkhai { 983f4ebcbc0SKirill Tkhai struct rq *rq = rq_of_rt_rq(rt_rq); 984f4ebcbc0SKirill Tkhai 985f4ebcbc0SKirill Tkhai BUG_ON(&rq->rt != rt_rq); 986f4ebcbc0SKirill Tkhai 987f4ebcbc0SKirill Tkhai if (rt_rq->rt_queued) 988f4ebcbc0SKirill Tkhai return; 989f4ebcbc0SKirill Tkhai if (rt_rq_throttled(rt_rq) || !rt_rq->rt_nr_running) 990f4ebcbc0SKirill Tkhai return; 991f4ebcbc0SKirill Tkhai 99272465447SKirill Tkhai add_nr_running(rq, rt_rq->rt_nr_running); 993f4ebcbc0SKirill Tkhai rt_rq->rt_queued = 1; 994f4ebcbc0SKirill Tkhai } 995f4ebcbc0SKirill Tkhai 996391e43daSPeter Zijlstra #if defined CONFIG_SMP 997391e43daSPeter Zijlstra 998391e43daSPeter Zijlstra static void 999391e43daSPeter Zijlstra inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) 1000391e43daSPeter Zijlstra { 1001391e43daSPeter Zijlstra struct rq *rq = rq_of_rt_rq(rt_rq); 1002391e43daSPeter Zijlstra 1003757dfcaaSKirill Tkhai #ifdef CONFIG_RT_GROUP_SCHED 1004757dfcaaSKirill Tkhai /* 1005757dfcaaSKirill Tkhai * Change rq's cpupri only if rt_rq is the top queue. 1006757dfcaaSKirill Tkhai */ 1007757dfcaaSKirill Tkhai if (&rq->rt != rt_rq) 1008757dfcaaSKirill Tkhai return; 1009757dfcaaSKirill Tkhai #endif 1010391e43daSPeter Zijlstra if (rq->online && prio < prev_prio) 1011391e43daSPeter Zijlstra cpupri_set(&rq->rd->cpupri, rq->cpu, prio); 1012391e43daSPeter Zijlstra } 1013391e43daSPeter Zijlstra 1014391e43daSPeter Zijlstra static void 1015391e43daSPeter Zijlstra dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) 1016391e43daSPeter Zijlstra { 1017391e43daSPeter Zijlstra struct rq *rq = rq_of_rt_rq(rt_rq); 1018391e43daSPeter Zijlstra 1019757dfcaaSKirill Tkhai #ifdef CONFIG_RT_GROUP_SCHED 1020757dfcaaSKirill Tkhai /* 1021757dfcaaSKirill Tkhai * Change rq's cpupri only if rt_rq is the top queue. 1022757dfcaaSKirill Tkhai */ 1023757dfcaaSKirill Tkhai if (&rq->rt != rt_rq) 1024757dfcaaSKirill Tkhai return; 1025757dfcaaSKirill Tkhai #endif 1026391e43daSPeter Zijlstra if (rq->online && rt_rq->highest_prio.curr != prev_prio) 1027391e43daSPeter Zijlstra cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr); 1028391e43daSPeter Zijlstra } 1029391e43daSPeter Zijlstra 1030391e43daSPeter Zijlstra #else /* CONFIG_SMP */ 1031391e43daSPeter Zijlstra 1032391e43daSPeter Zijlstra static inline 1033391e43daSPeter Zijlstra void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {} 1034391e43daSPeter Zijlstra static inline 1035391e43daSPeter Zijlstra void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {} 1036391e43daSPeter Zijlstra 1037391e43daSPeter Zijlstra #endif /* CONFIG_SMP */ 1038391e43daSPeter Zijlstra 1039391e43daSPeter Zijlstra #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED 1040391e43daSPeter Zijlstra static void 1041391e43daSPeter Zijlstra inc_rt_prio(struct rt_rq *rt_rq, int prio) 1042391e43daSPeter Zijlstra { 1043391e43daSPeter Zijlstra int prev_prio = rt_rq->highest_prio.curr; 1044391e43daSPeter Zijlstra 1045391e43daSPeter Zijlstra if (prio < prev_prio) 1046391e43daSPeter Zijlstra rt_rq->highest_prio.curr = prio; 1047391e43daSPeter Zijlstra 1048391e43daSPeter Zijlstra inc_rt_prio_smp(rt_rq, prio, prev_prio); 1049391e43daSPeter Zijlstra } 1050391e43daSPeter Zijlstra 1051391e43daSPeter Zijlstra static void 1052391e43daSPeter Zijlstra dec_rt_prio(struct rt_rq *rt_rq, int prio) 1053391e43daSPeter Zijlstra { 1054391e43daSPeter Zijlstra int prev_prio = rt_rq->highest_prio.curr; 1055391e43daSPeter Zijlstra 1056391e43daSPeter Zijlstra if (rt_rq->rt_nr_running) { 1057391e43daSPeter Zijlstra 1058391e43daSPeter Zijlstra WARN_ON(prio < prev_prio); 1059391e43daSPeter Zijlstra 1060391e43daSPeter Zijlstra /* 1061391e43daSPeter Zijlstra * This may have been our highest task, and therefore 1062391e43daSPeter Zijlstra * we may have some recomputation to do 1063391e43daSPeter Zijlstra */ 1064391e43daSPeter Zijlstra if (prio == prev_prio) { 1065391e43daSPeter Zijlstra struct rt_prio_array *array = &rt_rq->active; 1066391e43daSPeter Zijlstra 1067391e43daSPeter Zijlstra rt_rq->highest_prio.curr = 1068391e43daSPeter Zijlstra sched_find_first_bit(array->bitmap); 1069391e43daSPeter Zijlstra } 1070391e43daSPeter Zijlstra 1071391e43daSPeter Zijlstra } else 1072391e43daSPeter Zijlstra rt_rq->highest_prio.curr = MAX_RT_PRIO; 1073391e43daSPeter Zijlstra 1074391e43daSPeter Zijlstra dec_rt_prio_smp(rt_rq, prio, prev_prio); 1075391e43daSPeter Zijlstra } 1076391e43daSPeter Zijlstra 1077391e43daSPeter Zijlstra #else 1078391e43daSPeter Zijlstra 1079391e43daSPeter Zijlstra static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {} 1080391e43daSPeter Zijlstra static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {} 1081391e43daSPeter Zijlstra 1082391e43daSPeter Zijlstra #endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */ 1083391e43daSPeter Zijlstra 1084391e43daSPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED 1085391e43daSPeter Zijlstra 1086391e43daSPeter Zijlstra static void 1087391e43daSPeter Zijlstra inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 1088391e43daSPeter Zijlstra { 1089391e43daSPeter Zijlstra if (rt_se_boosted(rt_se)) 1090391e43daSPeter Zijlstra rt_rq->rt_nr_boosted++; 1091391e43daSPeter Zijlstra 1092391e43daSPeter Zijlstra if (rt_rq->tg) 1093391e43daSPeter Zijlstra start_rt_bandwidth(&rt_rq->tg->rt_bandwidth); 1094391e43daSPeter Zijlstra } 1095391e43daSPeter Zijlstra 1096391e43daSPeter Zijlstra static void 1097391e43daSPeter Zijlstra dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 1098391e43daSPeter Zijlstra { 1099391e43daSPeter Zijlstra if (rt_se_boosted(rt_se)) 1100391e43daSPeter Zijlstra rt_rq->rt_nr_boosted--; 1101391e43daSPeter Zijlstra 1102391e43daSPeter Zijlstra WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted); 1103391e43daSPeter Zijlstra } 1104391e43daSPeter Zijlstra 1105391e43daSPeter Zijlstra #else /* CONFIG_RT_GROUP_SCHED */ 1106391e43daSPeter Zijlstra 1107391e43daSPeter Zijlstra static void 1108391e43daSPeter Zijlstra inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 1109391e43daSPeter Zijlstra { 1110391e43daSPeter Zijlstra start_rt_bandwidth(&def_rt_bandwidth); 1111391e43daSPeter Zijlstra } 1112391e43daSPeter Zijlstra 1113391e43daSPeter Zijlstra static inline 1114391e43daSPeter Zijlstra void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {} 1115391e43daSPeter Zijlstra 1116391e43daSPeter Zijlstra #endif /* CONFIG_RT_GROUP_SCHED */ 1117391e43daSPeter Zijlstra 1118391e43daSPeter Zijlstra static inline 111922abdef3SKirill Tkhai unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se) 112022abdef3SKirill Tkhai { 112122abdef3SKirill Tkhai struct rt_rq *group_rq = group_rt_rq(rt_se); 112222abdef3SKirill Tkhai 112322abdef3SKirill Tkhai if (group_rq) 112422abdef3SKirill Tkhai return group_rq->rt_nr_running; 112522abdef3SKirill Tkhai else 112622abdef3SKirill Tkhai return 1; 112722abdef3SKirill Tkhai } 112822abdef3SKirill Tkhai 112922abdef3SKirill Tkhai static inline 1130391e43daSPeter Zijlstra void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 1131391e43daSPeter Zijlstra { 1132391e43daSPeter Zijlstra int prio = rt_se_prio(rt_se); 1133391e43daSPeter Zijlstra 1134391e43daSPeter Zijlstra WARN_ON(!rt_prio(prio)); 113522abdef3SKirill Tkhai rt_rq->rt_nr_running += rt_se_nr_running(rt_se); 1136391e43daSPeter Zijlstra 1137391e43daSPeter Zijlstra inc_rt_prio(rt_rq, prio); 1138391e43daSPeter Zijlstra inc_rt_migration(rt_se, rt_rq); 1139391e43daSPeter Zijlstra inc_rt_group(rt_se, rt_rq); 1140391e43daSPeter Zijlstra } 1141391e43daSPeter Zijlstra 1142391e43daSPeter Zijlstra static inline 1143391e43daSPeter Zijlstra void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 1144391e43daSPeter Zijlstra { 1145391e43daSPeter Zijlstra WARN_ON(!rt_prio(rt_se_prio(rt_se))); 1146391e43daSPeter Zijlstra WARN_ON(!rt_rq->rt_nr_running); 114722abdef3SKirill Tkhai rt_rq->rt_nr_running -= rt_se_nr_running(rt_se); 1148391e43daSPeter Zijlstra 1149391e43daSPeter Zijlstra dec_rt_prio(rt_rq, rt_se_prio(rt_se)); 1150391e43daSPeter Zijlstra dec_rt_migration(rt_se, rt_rq); 1151391e43daSPeter Zijlstra dec_rt_group(rt_se, rt_rq); 1152391e43daSPeter Zijlstra } 1153391e43daSPeter Zijlstra 1154391e43daSPeter Zijlstra static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head) 1155391e43daSPeter Zijlstra { 1156391e43daSPeter Zijlstra struct rt_rq *rt_rq = rt_rq_of_se(rt_se); 1157391e43daSPeter Zijlstra struct rt_prio_array *array = &rt_rq->active; 1158391e43daSPeter Zijlstra struct rt_rq *group_rq = group_rt_rq(rt_se); 1159391e43daSPeter Zijlstra struct list_head *queue = array->queue + rt_se_prio(rt_se); 1160391e43daSPeter Zijlstra 1161391e43daSPeter Zijlstra /* 1162391e43daSPeter Zijlstra * Don't enqueue the group if its throttled, or when empty. 1163391e43daSPeter Zijlstra * The latter is a consequence of the former when a child group 1164391e43daSPeter Zijlstra * get throttled and the current group doesn't have any other 1165391e43daSPeter Zijlstra * active members. 1166391e43daSPeter Zijlstra */ 1167391e43daSPeter Zijlstra if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) 1168391e43daSPeter Zijlstra return; 1169391e43daSPeter Zijlstra 1170391e43daSPeter Zijlstra if (head) 1171391e43daSPeter Zijlstra list_add(&rt_se->run_list, queue); 1172391e43daSPeter Zijlstra else 1173391e43daSPeter Zijlstra list_add_tail(&rt_se->run_list, queue); 1174391e43daSPeter Zijlstra __set_bit(rt_se_prio(rt_se), array->bitmap); 1175391e43daSPeter Zijlstra 1176391e43daSPeter Zijlstra inc_rt_tasks(rt_se, rt_rq); 1177391e43daSPeter Zijlstra } 1178391e43daSPeter Zijlstra 1179391e43daSPeter Zijlstra static void __dequeue_rt_entity(struct sched_rt_entity *rt_se) 1180391e43daSPeter Zijlstra { 1181391e43daSPeter Zijlstra struct rt_rq *rt_rq = rt_rq_of_se(rt_se); 1182391e43daSPeter Zijlstra struct rt_prio_array *array = &rt_rq->active; 1183391e43daSPeter Zijlstra 1184391e43daSPeter Zijlstra list_del_init(&rt_se->run_list); 1185391e43daSPeter Zijlstra if (list_empty(array->queue + rt_se_prio(rt_se))) 1186391e43daSPeter Zijlstra __clear_bit(rt_se_prio(rt_se), array->bitmap); 1187391e43daSPeter Zijlstra 1188391e43daSPeter Zijlstra dec_rt_tasks(rt_se, rt_rq); 1189391e43daSPeter Zijlstra } 1190391e43daSPeter Zijlstra 1191391e43daSPeter Zijlstra /* 1192391e43daSPeter Zijlstra * Because the prio of an upper entry depends on the lower 1193391e43daSPeter Zijlstra * entries, we must remove entries top - down. 1194391e43daSPeter Zijlstra */ 1195391e43daSPeter Zijlstra static void dequeue_rt_stack(struct sched_rt_entity *rt_se) 1196391e43daSPeter Zijlstra { 1197391e43daSPeter Zijlstra struct sched_rt_entity *back = NULL; 1198391e43daSPeter Zijlstra 1199391e43daSPeter Zijlstra for_each_sched_rt_entity(rt_se) { 1200391e43daSPeter Zijlstra rt_se->back = back; 1201391e43daSPeter Zijlstra back = rt_se; 1202391e43daSPeter Zijlstra } 1203391e43daSPeter Zijlstra 1204f4ebcbc0SKirill Tkhai dequeue_top_rt_rq(rt_rq_of_se(back)); 1205f4ebcbc0SKirill Tkhai 1206391e43daSPeter Zijlstra for (rt_se = back; rt_se; rt_se = rt_se->back) { 1207391e43daSPeter Zijlstra if (on_rt_rq(rt_se)) 1208391e43daSPeter Zijlstra __dequeue_rt_entity(rt_se); 1209391e43daSPeter Zijlstra } 1210391e43daSPeter Zijlstra } 1211391e43daSPeter Zijlstra 1212391e43daSPeter Zijlstra static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head) 1213391e43daSPeter Zijlstra { 1214f4ebcbc0SKirill Tkhai struct rq *rq = rq_of_rt_se(rt_se); 1215f4ebcbc0SKirill Tkhai 1216391e43daSPeter Zijlstra dequeue_rt_stack(rt_se); 1217391e43daSPeter Zijlstra for_each_sched_rt_entity(rt_se) 1218391e43daSPeter Zijlstra __enqueue_rt_entity(rt_se, head); 1219f4ebcbc0SKirill Tkhai enqueue_top_rt_rq(&rq->rt); 1220391e43daSPeter Zijlstra } 1221391e43daSPeter Zijlstra 1222391e43daSPeter Zijlstra static void dequeue_rt_entity(struct sched_rt_entity *rt_se) 1223391e43daSPeter Zijlstra { 1224f4ebcbc0SKirill Tkhai struct rq *rq = rq_of_rt_se(rt_se); 1225f4ebcbc0SKirill Tkhai 1226391e43daSPeter Zijlstra dequeue_rt_stack(rt_se); 1227391e43daSPeter Zijlstra 1228391e43daSPeter Zijlstra for_each_sched_rt_entity(rt_se) { 1229391e43daSPeter Zijlstra struct rt_rq *rt_rq = group_rt_rq(rt_se); 1230391e43daSPeter Zijlstra 1231391e43daSPeter Zijlstra if (rt_rq && rt_rq->rt_nr_running) 1232391e43daSPeter Zijlstra __enqueue_rt_entity(rt_se, false); 1233391e43daSPeter Zijlstra } 1234f4ebcbc0SKirill Tkhai enqueue_top_rt_rq(&rq->rt); 1235391e43daSPeter Zijlstra } 1236391e43daSPeter Zijlstra 1237391e43daSPeter Zijlstra /* 1238391e43daSPeter Zijlstra * Adding/removing a task to/from a priority array: 1239391e43daSPeter Zijlstra */ 1240391e43daSPeter Zijlstra static void 1241391e43daSPeter Zijlstra enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags) 1242391e43daSPeter Zijlstra { 1243391e43daSPeter Zijlstra struct sched_rt_entity *rt_se = &p->rt; 1244391e43daSPeter Zijlstra 1245391e43daSPeter Zijlstra if (flags & ENQUEUE_WAKEUP) 1246391e43daSPeter Zijlstra rt_se->timeout = 0; 1247391e43daSPeter Zijlstra 1248391e43daSPeter Zijlstra enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD); 1249391e43daSPeter Zijlstra 125029baa747SPeter Zijlstra if (!task_current(rq, p) && p->nr_cpus_allowed > 1) 1251391e43daSPeter Zijlstra enqueue_pushable_task(rq, p); 1252391e43daSPeter Zijlstra } 1253391e43daSPeter Zijlstra 1254391e43daSPeter Zijlstra static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags) 1255391e43daSPeter Zijlstra { 1256391e43daSPeter Zijlstra struct sched_rt_entity *rt_se = &p->rt; 1257391e43daSPeter Zijlstra 1258391e43daSPeter Zijlstra update_curr_rt(rq); 1259391e43daSPeter Zijlstra dequeue_rt_entity(rt_se); 1260391e43daSPeter Zijlstra 1261391e43daSPeter Zijlstra dequeue_pushable_task(rq, p); 1262391e43daSPeter Zijlstra } 1263391e43daSPeter Zijlstra 1264391e43daSPeter Zijlstra /* 1265391e43daSPeter Zijlstra * Put task to the head or the end of the run list without the overhead of 1266391e43daSPeter Zijlstra * dequeue followed by enqueue. 1267391e43daSPeter Zijlstra */ 1268391e43daSPeter Zijlstra static void 1269391e43daSPeter Zijlstra requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head) 1270391e43daSPeter Zijlstra { 1271391e43daSPeter Zijlstra if (on_rt_rq(rt_se)) { 1272391e43daSPeter Zijlstra struct rt_prio_array *array = &rt_rq->active; 1273391e43daSPeter Zijlstra struct list_head *queue = array->queue + rt_se_prio(rt_se); 1274391e43daSPeter Zijlstra 1275391e43daSPeter Zijlstra if (head) 1276391e43daSPeter Zijlstra list_move(&rt_se->run_list, queue); 1277391e43daSPeter Zijlstra else 1278391e43daSPeter Zijlstra list_move_tail(&rt_se->run_list, queue); 1279391e43daSPeter Zijlstra } 1280391e43daSPeter Zijlstra } 1281391e43daSPeter Zijlstra 1282391e43daSPeter Zijlstra static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head) 1283391e43daSPeter Zijlstra { 1284391e43daSPeter Zijlstra struct sched_rt_entity *rt_se = &p->rt; 1285391e43daSPeter Zijlstra struct rt_rq *rt_rq; 1286391e43daSPeter Zijlstra 1287391e43daSPeter Zijlstra for_each_sched_rt_entity(rt_se) { 1288391e43daSPeter Zijlstra rt_rq = rt_rq_of_se(rt_se); 1289391e43daSPeter Zijlstra requeue_rt_entity(rt_rq, rt_se, head); 1290391e43daSPeter Zijlstra } 1291391e43daSPeter Zijlstra } 1292391e43daSPeter Zijlstra 1293391e43daSPeter Zijlstra static void yield_task_rt(struct rq *rq) 1294391e43daSPeter Zijlstra { 1295391e43daSPeter Zijlstra requeue_task_rt(rq, rq->curr, 0); 1296391e43daSPeter Zijlstra } 1297391e43daSPeter Zijlstra 1298391e43daSPeter Zijlstra #ifdef CONFIG_SMP 1299391e43daSPeter Zijlstra static int find_lowest_rq(struct task_struct *task); 1300391e43daSPeter Zijlstra 1301391e43daSPeter Zijlstra static int 1302ac66f547SPeter Zijlstra select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags) 1303391e43daSPeter Zijlstra { 1304391e43daSPeter Zijlstra struct task_struct *curr; 1305391e43daSPeter Zijlstra struct rq *rq; 1306391e43daSPeter Zijlstra 1307391e43daSPeter Zijlstra /* For anything but wake ups, just return the task_cpu */ 1308391e43daSPeter Zijlstra if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK) 1309391e43daSPeter Zijlstra goto out; 1310391e43daSPeter Zijlstra 1311391e43daSPeter Zijlstra rq = cpu_rq(cpu); 1312391e43daSPeter Zijlstra 1313391e43daSPeter Zijlstra rcu_read_lock(); 1314391e43daSPeter Zijlstra curr = ACCESS_ONCE(rq->curr); /* unlocked access */ 1315391e43daSPeter Zijlstra 1316391e43daSPeter Zijlstra /* 1317391e43daSPeter Zijlstra * If the current task on @p's runqueue is an RT task, then 1318391e43daSPeter Zijlstra * try to see if we can wake this RT task up on another 1319391e43daSPeter Zijlstra * runqueue. Otherwise simply start this RT task 1320391e43daSPeter Zijlstra * on its current runqueue. 1321391e43daSPeter Zijlstra * 1322391e43daSPeter Zijlstra * We want to avoid overloading runqueues. If the woken 1323391e43daSPeter Zijlstra * task is a higher priority, then it will stay on this CPU 1324391e43daSPeter Zijlstra * and the lower prio task should be moved to another CPU. 1325391e43daSPeter Zijlstra * Even though this will probably make the lower prio task 1326391e43daSPeter Zijlstra * lose its cache, we do not want to bounce a higher task 1327391e43daSPeter Zijlstra * around just because it gave up its CPU, perhaps for a 1328391e43daSPeter Zijlstra * lock? 1329391e43daSPeter Zijlstra * 1330391e43daSPeter Zijlstra * For equal prio tasks, we just let the scheduler sort it out. 1331391e43daSPeter Zijlstra * 1332391e43daSPeter Zijlstra * Otherwise, just let it ride on the affined RQ and the 1333391e43daSPeter Zijlstra * post-schedule router will push the preempted task away 1334391e43daSPeter Zijlstra * 1335391e43daSPeter Zijlstra * This test is optimistic, if we get it wrong the load-balancer 1336391e43daSPeter Zijlstra * will have to sort it out. 1337391e43daSPeter Zijlstra */ 1338391e43daSPeter Zijlstra if (curr && unlikely(rt_task(curr)) && 133929baa747SPeter Zijlstra (curr->nr_cpus_allowed < 2 || 13406bfa687cSShawn Bohrer curr->prio <= p->prio)) { 1341391e43daSPeter Zijlstra int target = find_lowest_rq(p); 1342391e43daSPeter Zijlstra 1343391e43daSPeter Zijlstra if (target != -1) 1344391e43daSPeter Zijlstra cpu = target; 1345391e43daSPeter Zijlstra } 1346391e43daSPeter Zijlstra rcu_read_unlock(); 1347391e43daSPeter Zijlstra 1348391e43daSPeter Zijlstra out: 1349391e43daSPeter Zijlstra return cpu; 1350391e43daSPeter Zijlstra } 1351391e43daSPeter Zijlstra 1352391e43daSPeter Zijlstra static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) 1353391e43daSPeter Zijlstra { 1354308a623aSWanpeng Li /* 1355308a623aSWanpeng Li * Current can't be migrated, useless to reschedule, 1356308a623aSWanpeng Li * let's hope p can move out. 1357308a623aSWanpeng Li */ 1358308a623aSWanpeng Li if (rq->curr->nr_cpus_allowed == 1 || 1359308a623aSWanpeng Li !cpupri_find(&rq->rd->cpupri, rq->curr, NULL)) 1360391e43daSPeter Zijlstra return; 1361391e43daSPeter Zijlstra 1362308a623aSWanpeng Li /* 1363308a623aSWanpeng Li * p is migratable, so let's not schedule it and 1364308a623aSWanpeng Li * see if it is pushed or pulled somewhere else. 1365308a623aSWanpeng Li */ 136629baa747SPeter Zijlstra if (p->nr_cpus_allowed != 1 1367391e43daSPeter Zijlstra && cpupri_find(&rq->rd->cpupri, p, NULL)) 1368391e43daSPeter Zijlstra return; 1369391e43daSPeter Zijlstra 1370391e43daSPeter Zijlstra /* 1371391e43daSPeter Zijlstra * There appears to be other cpus that can accept 1372391e43daSPeter Zijlstra * current and none to run 'p', so lets reschedule 1373391e43daSPeter Zijlstra * to try and push current away: 1374391e43daSPeter Zijlstra */ 1375391e43daSPeter Zijlstra requeue_task_rt(rq, p, 1); 13768875125eSKirill Tkhai resched_curr(rq); 1377391e43daSPeter Zijlstra } 1378391e43daSPeter Zijlstra 1379391e43daSPeter Zijlstra #endif /* CONFIG_SMP */ 1380391e43daSPeter Zijlstra 1381391e43daSPeter Zijlstra /* 1382391e43daSPeter Zijlstra * Preempt the current task with a newly woken task if needed: 1383391e43daSPeter Zijlstra */ 1384391e43daSPeter Zijlstra static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags) 1385391e43daSPeter Zijlstra { 1386391e43daSPeter Zijlstra if (p->prio < rq->curr->prio) { 13878875125eSKirill Tkhai resched_curr(rq); 1388391e43daSPeter Zijlstra return; 1389391e43daSPeter Zijlstra } 1390391e43daSPeter Zijlstra 1391391e43daSPeter Zijlstra #ifdef CONFIG_SMP 1392391e43daSPeter Zijlstra /* 1393391e43daSPeter Zijlstra * If: 1394391e43daSPeter Zijlstra * 1395391e43daSPeter Zijlstra * - the newly woken task is of equal priority to the current task 1396391e43daSPeter Zijlstra * - the newly woken task is non-migratable while current is migratable 1397391e43daSPeter Zijlstra * - current will be preempted on the next reschedule 1398391e43daSPeter Zijlstra * 1399391e43daSPeter Zijlstra * we should check to see if current can readily move to a different 1400391e43daSPeter Zijlstra * cpu. If so, we will reschedule to allow the push logic to try 1401391e43daSPeter Zijlstra * to move current somewhere else, making room for our non-migratable 1402391e43daSPeter Zijlstra * task. 1403391e43daSPeter Zijlstra */ 1404391e43daSPeter Zijlstra if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr)) 1405391e43daSPeter Zijlstra check_preempt_equal_prio(rq, p); 1406391e43daSPeter Zijlstra #endif 1407391e43daSPeter Zijlstra } 1408391e43daSPeter Zijlstra 1409391e43daSPeter Zijlstra static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq, 1410391e43daSPeter Zijlstra struct rt_rq *rt_rq) 1411391e43daSPeter Zijlstra { 1412391e43daSPeter Zijlstra struct rt_prio_array *array = &rt_rq->active; 1413391e43daSPeter Zijlstra struct sched_rt_entity *next = NULL; 1414391e43daSPeter Zijlstra struct list_head *queue; 1415391e43daSPeter Zijlstra int idx; 1416391e43daSPeter Zijlstra 1417391e43daSPeter Zijlstra idx = sched_find_first_bit(array->bitmap); 1418391e43daSPeter Zijlstra BUG_ON(idx >= MAX_RT_PRIO); 1419391e43daSPeter Zijlstra 1420391e43daSPeter Zijlstra queue = array->queue + idx; 1421391e43daSPeter Zijlstra next = list_entry(queue->next, struct sched_rt_entity, run_list); 1422391e43daSPeter Zijlstra 1423391e43daSPeter Zijlstra return next; 1424391e43daSPeter Zijlstra } 1425391e43daSPeter Zijlstra 1426391e43daSPeter Zijlstra static struct task_struct *_pick_next_task_rt(struct rq *rq) 1427391e43daSPeter Zijlstra { 1428391e43daSPeter Zijlstra struct sched_rt_entity *rt_se; 1429391e43daSPeter Zijlstra struct task_struct *p; 1430606dba2eSPeter Zijlstra struct rt_rq *rt_rq = &rq->rt; 1431391e43daSPeter Zijlstra 1432391e43daSPeter Zijlstra do { 1433391e43daSPeter Zijlstra rt_se = pick_next_rt_entity(rq, rt_rq); 1434391e43daSPeter Zijlstra BUG_ON(!rt_se); 1435391e43daSPeter Zijlstra rt_rq = group_rt_rq(rt_se); 1436391e43daSPeter Zijlstra } while (rt_rq); 1437391e43daSPeter Zijlstra 1438391e43daSPeter Zijlstra p = rt_task_of(rt_se); 143978becc27SFrederic Weisbecker p->se.exec_start = rq_clock_task(rq); 1440391e43daSPeter Zijlstra 1441391e43daSPeter Zijlstra return p; 1442391e43daSPeter Zijlstra } 1443391e43daSPeter Zijlstra 1444606dba2eSPeter Zijlstra static struct task_struct * 1445606dba2eSPeter Zijlstra pick_next_task_rt(struct rq *rq, struct task_struct *prev) 1446391e43daSPeter Zijlstra { 1447606dba2eSPeter Zijlstra struct task_struct *p; 1448606dba2eSPeter Zijlstra struct rt_rq *rt_rq = &rq->rt; 1449606dba2eSPeter Zijlstra 145037e117c0SPeter Zijlstra if (need_pull_rt_task(rq, prev)) { 145138033c37SPeter Zijlstra pull_rt_task(rq); 145237e117c0SPeter Zijlstra /* 145337e117c0SPeter Zijlstra * pull_rt_task() can drop (and re-acquire) rq->lock; this 1454a1d9a323SKirill Tkhai * means a dl or stop task can slip in, in which case we need 1455a1d9a323SKirill Tkhai * to re-start task selection. 145637e117c0SPeter Zijlstra */ 1457da0c1e65SKirill Tkhai if (unlikely((rq->stop && task_on_rq_queued(rq->stop)) || 1458a1d9a323SKirill Tkhai rq->dl.dl_nr_running)) 145937e117c0SPeter Zijlstra return RETRY_TASK; 146037e117c0SPeter Zijlstra } 146138033c37SPeter Zijlstra 1462734ff2a7SKirill Tkhai /* 1463734ff2a7SKirill Tkhai * We may dequeue prev's rt_rq in put_prev_task(). 1464734ff2a7SKirill Tkhai * So, we update time before rt_nr_running check. 1465734ff2a7SKirill Tkhai */ 1466734ff2a7SKirill Tkhai if (prev->sched_class == &rt_sched_class) 1467734ff2a7SKirill Tkhai update_curr_rt(rq); 1468734ff2a7SKirill Tkhai 1469f4ebcbc0SKirill Tkhai if (!rt_rq->rt_queued) 1470606dba2eSPeter Zijlstra return NULL; 1471606dba2eSPeter Zijlstra 14723f1d2a31SPeter Zijlstra put_prev_task(rq, prev); 1473606dba2eSPeter Zijlstra 1474606dba2eSPeter Zijlstra p = _pick_next_task_rt(rq); 1475391e43daSPeter Zijlstra 1476391e43daSPeter Zijlstra /* The running task is never eligible for pushing */ 1477391e43daSPeter Zijlstra dequeue_pushable_task(rq, p); 1478391e43daSPeter Zijlstra 1479dc877341SPeter Zijlstra set_post_schedule(rq); 1480391e43daSPeter Zijlstra 1481391e43daSPeter Zijlstra return p; 1482391e43daSPeter Zijlstra } 1483391e43daSPeter Zijlstra 1484391e43daSPeter Zijlstra static void put_prev_task_rt(struct rq *rq, struct task_struct *p) 1485391e43daSPeter Zijlstra { 1486391e43daSPeter Zijlstra update_curr_rt(rq); 1487391e43daSPeter Zijlstra 1488391e43daSPeter Zijlstra /* 1489391e43daSPeter Zijlstra * The previous task needs to be made eligible for pushing 1490391e43daSPeter Zijlstra * if it is still active 1491391e43daSPeter Zijlstra */ 149229baa747SPeter Zijlstra if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1) 1493391e43daSPeter Zijlstra enqueue_pushable_task(rq, p); 1494391e43daSPeter Zijlstra } 1495391e43daSPeter Zijlstra 1496391e43daSPeter Zijlstra #ifdef CONFIG_SMP 1497391e43daSPeter Zijlstra 1498391e43daSPeter Zijlstra /* Only try algorithms three times */ 1499391e43daSPeter Zijlstra #define RT_MAX_TRIES 3 1500391e43daSPeter Zijlstra 1501391e43daSPeter Zijlstra static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) 1502391e43daSPeter Zijlstra { 1503391e43daSPeter Zijlstra if (!task_running(rq, p) && 150460334cafSKirill Tkhai cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) 1505391e43daSPeter Zijlstra return 1; 1506391e43daSPeter Zijlstra return 0; 1507391e43daSPeter Zijlstra } 1508391e43daSPeter Zijlstra 1509e23ee747SKirill Tkhai /* 1510e23ee747SKirill Tkhai * Return the highest pushable rq's task, which is suitable to be executed 1511e23ee747SKirill Tkhai * on the cpu, NULL otherwise 1512e23ee747SKirill Tkhai */ 1513e23ee747SKirill Tkhai static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu) 1514391e43daSPeter Zijlstra { 1515e23ee747SKirill Tkhai struct plist_head *head = &rq->rt.pushable_tasks; 1516391e43daSPeter Zijlstra struct task_struct *p; 1517391e43daSPeter Zijlstra 1518e23ee747SKirill Tkhai if (!has_pushable_tasks(rq)) 1519e23ee747SKirill Tkhai return NULL; 1520391e43daSPeter Zijlstra 1521e23ee747SKirill Tkhai plist_for_each_entry(p, head, pushable_tasks) { 1522e23ee747SKirill Tkhai if (pick_rt_task(rq, p, cpu)) 1523e23ee747SKirill Tkhai return p; 1524391e43daSPeter Zijlstra } 1525391e43daSPeter Zijlstra 1526e23ee747SKirill Tkhai return NULL; 1527391e43daSPeter Zijlstra } 1528391e43daSPeter Zijlstra 1529391e43daSPeter Zijlstra static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask); 1530391e43daSPeter Zijlstra 1531391e43daSPeter Zijlstra static int find_lowest_rq(struct task_struct *task) 1532391e43daSPeter Zijlstra { 1533391e43daSPeter Zijlstra struct sched_domain *sd; 15344ba29684SChristoph Lameter struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask); 1535391e43daSPeter Zijlstra int this_cpu = smp_processor_id(); 1536391e43daSPeter Zijlstra int cpu = task_cpu(task); 1537391e43daSPeter Zijlstra 1538391e43daSPeter Zijlstra /* Make sure the mask is initialized first */ 1539391e43daSPeter Zijlstra if (unlikely(!lowest_mask)) 1540391e43daSPeter Zijlstra return -1; 1541391e43daSPeter Zijlstra 154229baa747SPeter Zijlstra if (task->nr_cpus_allowed == 1) 1543391e43daSPeter Zijlstra return -1; /* No other targets possible */ 1544391e43daSPeter Zijlstra 1545391e43daSPeter Zijlstra if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask)) 1546391e43daSPeter Zijlstra return -1; /* No targets found */ 1547391e43daSPeter Zijlstra 1548391e43daSPeter Zijlstra /* 1549391e43daSPeter Zijlstra * At this point we have built a mask of cpus representing the 1550391e43daSPeter Zijlstra * lowest priority tasks in the system. Now we want to elect 1551391e43daSPeter Zijlstra * the best one based on our affinity and topology. 1552391e43daSPeter Zijlstra * 1553391e43daSPeter Zijlstra * We prioritize the last cpu that the task executed on since 1554391e43daSPeter Zijlstra * it is most likely cache-hot in that location. 1555391e43daSPeter Zijlstra */ 1556391e43daSPeter Zijlstra if (cpumask_test_cpu(cpu, lowest_mask)) 1557391e43daSPeter Zijlstra return cpu; 1558391e43daSPeter Zijlstra 1559391e43daSPeter Zijlstra /* 1560391e43daSPeter Zijlstra * Otherwise, we consult the sched_domains span maps to figure 1561391e43daSPeter Zijlstra * out which cpu is logically closest to our hot cache data. 1562391e43daSPeter Zijlstra */ 1563391e43daSPeter Zijlstra if (!cpumask_test_cpu(this_cpu, lowest_mask)) 1564391e43daSPeter Zijlstra this_cpu = -1; /* Skip this_cpu opt if not among lowest */ 1565391e43daSPeter Zijlstra 1566391e43daSPeter Zijlstra rcu_read_lock(); 1567391e43daSPeter Zijlstra for_each_domain(cpu, sd) { 1568391e43daSPeter Zijlstra if (sd->flags & SD_WAKE_AFFINE) { 1569391e43daSPeter Zijlstra int best_cpu; 1570391e43daSPeter Zijlstra 1571391e43daSPeter Zijlstra /* 1572391e43daSPeter Zijlstra * "this_cpu" is cheaper to preempt than a 1573391e43daSPeter Zijlstra * remote processor. 1574391e43daSPeter Zijlstra */ 1575391e43daSPeter Zijlstra if (this_cpu != -1 && 1576391e43daSPeter Zijlstra cpumask_test_cpu(this_cpu, sched_domain_span(sd))) { 1577391e43daSPeter Zijlstra rcu_read_unlock(); 1578391e43daSPeter Zijlstra return this_cpu; 1579391e43daSPeter Zijlstra } 1580391e43daSPeter Zijlstra 1581391e43daSPeter Zijlstra best_cpu = cpumask_first_and(lowest_mask, 1582391e43daSPeter Zijlstra sched_domain_span(sd)); 1583391e43daSPeter Zijlstra if (best_cpu < nr_cpu_ids) { 1584391e43daSPeter Zijlstra rcu_read_unlock(); 1585391e43daSPeter Zijlstra return best_cpu; 1586391e43daSPeter Zijlstra } 1587391e43daSPeter Zijlstra } 1588391e43daSPeter Zijlstra } 1589391e43daSPeter Zijlstra rcu_read_unlock(); 1590391e43daSPeter Zijlstra 1591391e43daSPeter Zijlstra /* 1592391e43daSPeter Zijlstra * And finally, if there were no matches within the domains 1593391e43daSPeter Zijlstra * just give the caller *something* to work with from the compatible 1594391e43daSPeter Zijlstra * locations. 1595391e43daSPeter Zijlstra */ 1596391e43daSPeter Zijlstra if (this_cpu != -1) 1597391e43daSPeter Zijlstra return this_cpu; 1598391e43daSPeter Zijlstra 1599391e43daSPeter Zijlstra cpu = cpumask_any(lowest_mask); 1600391e43daSPeter Zijlstra if (cpu < nr_cpu_ids) 1601391e43daSPeter Zijlstra return cpu; 1602391e43daSPeter Zijlstra return -1; 1603391e43daSPeter Zijlstra } 1604391e43daSPeter Zijlstra 1605391e43daSPeter Zijlstra /* Will lock the rq it finds */ 1606391e43daSPeter Zijlstra static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) 1607391e43daSPeter Zijlstra { 1608391e43daSPeter Zijlstra struct rq *lowest_rq = NULL; 1609391e43daSPeter Zijlstra int tries; 1610391e43daSPeter Zijlstra int cpu; 1611391e43daSPeter Zijlstra 1612391e43daSPeter Zijlstra for (tries = 0; tries < RT_MAX_TRIES; tries++) { 1613391e43daSPeter Zijlstra cpu = find_lowest_rq(task); 1614391e43daSPeter Zijlstra 1615391e43daSPeter Zijlstra if ((cpu == -1) || (cpu == rq->cpu)) 1616391e43daSPeter Zijlstra break; 1617391e43daSPeter Zijlstra 1618391e43daSPeter Zijlstra lowest_rq = cpu_rq(cpu); 1619391e43daSPeter Zijlstra 1620391e43daSPeter Zijlstra /* if the prio of this runqueue changed, try again */ 1621391e43daSPeter Zijlstra if (double_lock_balance(rq, lowest_rq)) { 1622391e43daSPeter Zijlstra /* 1623391e43daSPeter Zijlstra * We had to unlock the run queue. In 1624391e43daSPeter Zijlstra * the mean time, task could have 1625391e43daSPeter Zijlstra * migrated already or had its affinity changed. 1626391e43daSPeter Zijlstra * Also make sure that it wasn't scheduled on its rq. 1627391e43daSPeter Zijlstra */ 1628391e43daSPeter Zijlstra if (unlikely(task_rq(task) != rq || 1629391e43daSPeter Zijlstra !cpumask_test_cpu(lowest_rq->cpu, 1630391e43daSPeter Zijlstra tsk_cpus_allowed(task)) || 1631391e43daSPeter Zijlstra task_running(rq, task) || 1632da0c1e65SKirill Tkhai !task_on_rq_queued(task))) { 1633391e43daSPeter Zijlstra 16347f1b4393SPeter Zijlstra double_unlock_balance(rq, lowest_rq); 1635391e43daSPeter Zijlstra lowest_rq = NULL; 1636391e43daSPeter Zijlstra break; 1637391e43daSPeter Zijlstra } 1638391e43daSPeter Zijlstra } 1639391e43daSPeter Zijlstra 1640391e43daSPeter Zijlstra /* If this rq is still suitable use it. */ 1641391e43daSPeter Zijlstra if (lowest_rq->rt.highest_prio.curr > task->prio) 1642391e43daSPeter Zijlstra break; 1643391e43daSPeter Zijlstra 1644391e43daSPeter Zijlstra /* try again */ 1645391e43daSPeter Zijlstra double_unlock_balance(rq, lowest_rq); 1646391e43daSPeter Zijlstra lowest_rq = NULL; 1647391e43daSPeter Zijlstra } 1648391e43daSPeter Zijlstra 1649391e43daSPeter Zijlstra return lowest_rq; 1650391e43daSPeter Zijlstra } 1651391e43daSPeter Zijlstra 1652391e43daSPeter Zijlstra static struct task_struct *pick_next_pushable_task(struct rq *rq) 1653391e43daSPeter Zijlstra { 1654391e43daSPeter Zijlstra struct task_struct *p; 1655391e43daSPeter Zijlstra 1656391e43daSPeter Zijlstra if (!has_pushable_tasks(rq)) 1657391e43daSPeter Zijlstra return NULL; 1658391e43daSPeter Zijlstra 1659391e43daSPeter Zijlstra p = plist_first_entry(&rq->rt.pushable_tasks, 1660391e43daSPeter Zijlstra struct task_struct, pushable_tasks); 1661391e43daSPeter Zijlstra 1662391e43daSPeter Zijlstra BUG_ON(rq->cpu != task_cpu(p)); 1663391e43daSPeter Zijlstra BUG_ON(task_current(rq, p)); 166429baa747SPeter Zijlstra BUG_ON(p->nr_cpus_allowed <= 1); 1665391e43daSPeter Zijlstra 1666da0c1e65SKirill Tkhai BUG_ON(!task_on_rq_queued(p)); 1667391e43daSPeter Zijlstra BUG_ON(!rt_task(p)); 1668391e43daSPeter Zijlstra 1669391e43daSPeter Zijlstra return p; 1670391e43daSPeter Zijlstra } 1671391e43daSPeter Zijlstra 1672391e43daSPeter Zijlstra /* 1673391e43daSPeter Zijlstra * If the current CPU has more than one RT task, see if the non 1674391e43daSPeter Zijlstra * running task can migrate over to a CPU that is running a task 1675391e43daSPeter Zijlstra * of lesser priority. 1676391e43daSPeter Zijlstra */ 1677391e43daSPeter Zijlstra static int push_rt_task(struct rq *rq) 1678391e43daSPeter Zijlstra { 1679391e43daSPeter Zijlstra struct task_struct *next_task; 1680391e43daSPeter Zijlstra struct rq *lowest_rq; 1681391e43daSPeter Zijlstra int ret = 0; 1682391e43daSPeter Zijlstra 1683391e43daSPeter Zijlstra if (!rq->rt.overloaded) 1684391e43daSPeter Zijlstra return 0; 1685391e43daSPeter Zijlstra 1686391e43daSPeter Zijlstra next_task = pick_next_pushable_task(rq); 1687391e43daSPeter Zijlstra if (!next_task) 1688391e43daSPeter Zijlstra return 0; 1689391e43daSPeter Zijlstra 1690391e43daSPeter Zijlstra retry: 1691391e43daSPeter Zijlstra if (unlikely(next_task == rq->curr)) { 1692391e43daSPeter Zijlstra WARN_ON(1); 1693391e43daSPeter Zijlstra return 0; 1694391e43daSPeter Zijlstra } 1695391e43daSPeter Zijlstra 1696391e43daSPeter Zijlstra /* 1697391e43daSPeter Zijlstra * It's possible that the next_task slipped in of 1698391e43daSPeter Zijlstra * higher priority than current. If that's the case 1699391e43daSPeter Zijlstra * just reschedule current. 1700391e43daSPeter Zijlstra */ 1701391e43daSPeter Zijlstra if (unlikely(next_task->prio < rq->curr->prio)) { 17028875125eSKirill Tkhai resched_curr(rq); 1703391e43daSPeter Zijlstra return 0; 1704391e43daSPeter Zijlstra } 1705391e43daSPeter Zijlstra 1706391e43daSPeter Zijlstra /* We might release rq lock */ 1707391e43daSPeter Zijlstra get_task_struct(next_task); 1708391e43daSPeter Zijlstra 1709391e43daSPeter Zijlstra /* find_lock_lowest_rq locks the rq if found */ 1710391e43daSPeter Zijlstra lowest_rq = find_lock_lowest_rq(next_task, rq); 1711391e43daSPeter Zijlstra if (!lowest_rq) { 1712391e43daSPeter Zijlstra struct task_struct *task; 1713391e43daSPeter Zijlstra /* 1714391e43daSPeter Zijlstra * find_lock_lowest_rq releases rq->lock 1715391e43daSPeter Zijlstra * so it is possible that next_task has migrated. 1716391e43daSPeter Zijlstra * 1717391e43daSPeter Zijlstra * We need to make sure that the task is still on the same 1718391e43daSPeter Zijlstra * run-queue and is also still the next task eligible for 1719391e43daSPeter Zijlstra * pushing. 1720391e43daSPeter Zijlstra */ 1721391e43daSPeter Zijlstra task = pick_next_pushable_task(rq); 1722391e43daSPeter Zijlstra if (task_cpu(next_task) == rq->cpu && task == next_task) { 1723391e43daSPeter Zijlstra /* 1724391e43daSPeter Zijlstra * The task hasn't migrated, and is still the next 1725391e43daSPeter Zijlstra * eligible task, but we failed to find a run-queue 1726391e43daSPeter Zijlstra * to push it to. Do not retry in this case, since 1727391e43daSPeter Zijlstra * other cpus will pull from us when ready. 1728391e43daSPeter Zijlstra */ 1729391e43daSPeter Zijlstra goto out; 1730391e43daSPeter Zijlstra } 1731391e43daSPeter Zijlstra 1732391e43daSPeter Zijlstra if (!task) 1733391e43daSPeter Zijlstra /* No more tasks, just exit */ 1734391e43daSPeter Zijlstra goto out; 1735391e43daSPeter Zijlstra 1736391e43daSPeter Zijlstra /* 1737391e43daSPeter Zijlstra * Something has shifted, try again. 1738391e43daSPeter Zijlstra */ 1739391e43daSPeter Zijlstra put_task_struct(next_task); 1740391e43daSPeter Zijlstra next_task = task; 1741391e43daSPeter Zijlstra goto retry; 1742391e43daSPeter Zijlstra } 1743391e43daSPeter Zijlstra 1744391e43daSPeter Zijlstra deactivate_task(rq, next_task, 0); 1745391e43daSPeter Zijlstra set_task_cpu(next_task, lowest_rq->cpu); 1746391e43daSPeter Zijlstra activate_task(lowest_rq, next_task, 0); 1747391e43daSPeter Zijlstra ret = 1; 1748391e43daSPeter Zijlstra 17498875125eSKirill Tkhai resched_curr(lowest_rq); 1750391e43daSPeter Zijlstra 1751391e43daSPeter Zijlstra double_unlock_balance(rq, lowest_rq); 1752391e43daSPeter Zijlstra 1753391e43daSPeter Zijlstra out: 1754391e43daSPeter Zijlstra put_task_struct(next_task); 1755391e43daSPeter Zijlstra 1756391e43daSPeter Zijlstra return ret; 1757391e43daSPeter Zijlstra } 1758391e43daSPeter Zijlstra 1759391e43daSPeter Zijlstra static void push_rt_tasks(struct rq *rq) 1760391e43daSPeter Zijlstra { 1761391e43daSPeter Zijlstra /* push_rt_task will return true if it moved an RT */ 1762391e43daSPeter Zijlstra while (push_rt_task(rq)) 1763391e43daSPeter Zijlstra ; 1764391e43daSPeter Zijlstra } 1765391e43daSPeter Zijlstra 1766391e43daSPeter Zijlstra static int pull_rt_task(struct rq *this_rq) 1767391e43daSPeter Zijlstra { 1768391e43daSPeter Zijlstra int this_cpu = this_rq->cpu, ret = 0, cpu; 1769391e43daSPeter Zijlstra struct task_struct *p; 1770391e43daSPeter Zijlstra struct rq *src_rq; 1771391e43daSPeter Zijlstra 1772391e43daSPeter Zijlstra if (likely(!rt_overloaded(this_rq))) 1773391e43daSPeter Zijlstra return 0; 1774391e43daSPeter Zijlstra 17757c3f2ab7SPeter Zijlstra /* 17767c3f2ab7SPeter Zijlstra * Match the barrier from rt_set_overloaded; this guarantees that if we 17777c3f2ab7SPeter Zijlstra * see overloaded we must also see the rto_mask bit. 17787c3f2ab7SPeter Zijlstra */ 17797c3f2ab7SPeter Zijlstra smp_rmb(); 17807c3f2ab7SPeter Zijlstra 1781391e43daSPeter Zijlstra for_each_cpu(cpu, this_rq->rd->rto_mask) { 1782391e43daSPeter Zijlstra if (this_cpu == cpu) 1783391e43daSPeter Zijlstra continue; 1784391e43daSPeter Zijlstra 1785391e43daSPeter Zijlstra src_rq = cpu_rq(cpu); 1786391e43daSPeter Zijlstra 1787391e43daSPeter Zijlstra /* 1788391e43daSPeter Zijlstra * Don't bother taking the src_rq->lock if the next highest 1789391e43daSPeter Zijlstra * task is known to be lower-priority than our current task. 1790391e43daSPeter Zijlstra * This may look racy, but if this value is about to go 1791391e43daSPeter Zijlstra * logically higher, the src_rq will push this task away. 1792391e43daSPeter Zijlstra * And if its going logically lower, we do not care 1793391e43daSPeter Zijlstra */ 1794391e43daSPeter Zijlstra if (src_rq->rt.highest_prio.next >= 1795391e43daSPeter Zijlstra this_rq->rt.highest_prio.curr) 1796391e43daSPeter Zijlstra continue; 1797391e43daSPeter Zijlstra 1798391e43daSPeter Zijlstra /* 1799391e43daSPeter Zijlstra * We can potentially drop this_rq's lock in 1800391e43daSPeter Zijlstra * double_lock_balance, and another CPU could 1801391e43daSPeter Zijlstra * alter this_rq 1802391e43daSPeter Zijlstra */ 1803391e43daSPeter Zijlstra double_lock_balance(this_rq, src_rq); 1804391e43daSPeter Zijlstra 1805391e43daSPeter Zijlstra /* 1806e23ee747SKirill Tkhai * We can pull only a task, which is pushable 1807e23ee747SKirill Tkhai * on its rq, and no others. 1808391e43daSPeter Zijlstra */ 1809e23ee747SKirill Tkhai p = pick_highest_pushable_task(src_rq, this_cpu); 1810391e43daSPeter Zijlstra 1811391e43daSPeter Zijlstra /* 1812391e43daSPeter Zijlstra * Do we have an RT task that preempts 1813391e43daSPeter Zijlstra * the to-be-scheduled task? 1814391e43daSPeter Zijlstra */ 1815391e43daSPeter Zijlstra if (p && (p->prio < this_rq->rt.highest_prio.curr)) { 1816391e43daSPeter Zijlstra WARN_ON(p == src_rq->curr); 1817da0c1e65SKirill Tkhai WARN_ON(!task_on_rq_queued(p)); 1818391e43daSPeter Zijlstra 1819391e43daSPeter Zijlstra /* 1820391e43daSPeter Zijlstra * There's a chance that p is higher in priority 1821391e43daSPeter Zijlstra * than what's currently running on its cpu. 1822391e43daSPeter Zijlstra * This is just that p is wakeing up and hasn't 1823391e43daSPeter Zijlstra * had a chance to schedule. We only pull 1824391e43daSPeter Zijlstra * p if it is lower in priority than the 1825391e43daSPeter Zijlstra * current task on the run queue 1826391e43daSPeter Zijlstra */ 1827391e43daSPeter Zijlstra if (p->prio < src_rq->curr->prio) 1828391e43daSPeter Zijlstra goto skip; 1829391e43daSPeter Zijlstra 1830391e43daSPeter Zijlstra ret = 1; 1831391e43daSPeter Zijlstra 1832391e43daSPeter Zijlstra deactivate_task(src_rq, p, 0); 1833391e43daSPeter Zijlstra set_task_cpu(p, this_cpu); 1834391e43daSPeter Zijlstra activate_task(this_rq, p, 0); 1835391e43daSPeter Zijlstra /* 1836391e43daSPeter Zijlstra * We continue with the search, just in 1837391e43daSPeter Zijlstra * case there's an even higher prio task 1838391e43daSPeter Zijlstra * in another runqueue. (low likelihood 1839391e43daSPeter Zijlstra * but possible) 1840391e43daSPeter Zijlstra */ 1841391e43daSPeter Zijlstra } 1842391e43daSPeter Zijlstra skip: 1843391e43daSPeter Zijlstra double_unlock_balance(this_rq, src_rq); 1844391e43daSPeter Zijlstra } 1845391e43daSPeter Zijlstra 1846391e43daSPeter Zijlstra return ret; 1847391e43daSPeter Zijlstra } 1848391e43daSPeter Zijlstra 1849391e43daSPeter Zijlstra static void post_schedule_rt(struct rq *rq) 1850391e43daSPeter Zijlstra { 1851391e43daSPeter Zijlstra push_rt_tasks(rq); 1852391e43daSPeter Zijlstra } 1853391e43daSPeter Zijlstra 1854391e43daSPeter Zijlstra /* 1855391e43daSPeter Zijlstra * If we are not running and we are not going to reschedule soon, we should 1856391e43daSPeter Zijlstra * try to push tasks away now 1857391e43daSPeter Zijlstra */ 1858391e43daSPeter Zijlstra static void task_woken_rt(struct rq *rq, struct task_struct *p) 1859391e43daSPeter Zijlstra { 1860391e43daSPeter Zijlstra if (!task_running(rq, p) && 1861391e43daSPeter Zijlstra !test_tsk_need_resched(rq->curr) && 1862391e43daSPeter Zijlstra has_pushable_tasks(rq) && 186329baa747SPeter Zijlstra p->nr_cpus_allowed > 1 && 18641baca4ceSJuri Lelli (dl_task(rq->curr) || rt_task(rq->curr)) && 186529baa747SPeter Zijlstra (rq->curr->nr_cpus_allowed < 2 || 1866391e43daSPeter Zijlstra rq->curr->prio <= p->prio)) 1867391e43daSPeter Zijlstra push_rt_tasks(rq); 1868391e43daSPeter Zijlstra } 1869391e43daSPeter Zijlstra 1870391e43daSPeter Zijlstra static void set_cpus_allowed_rt(struct task_struct *p, 1871391e43daSPeter Zijlstra const struct cpumask *new_mask) 1872391e43daSPeter Zijlstra { 18738d3d5adaSKirill Tkhai struct rq *rq; 18748d3d5adaSKirill Tkhai int weight; 1875391e43daSPeter Zijlstra 1876391e43daSPeter Zijlstra BUG_ON(!rt_task(p)); 1877391e43daSPeter Zijlstra 1878da0c1e65SKirill Tkhai if (!task_on_rq_queued(p)) 18798d3d5adaSKirill Tkhai return; 1880391e43daSPeter Zijlstra 18818d3d5adaSKirill Tkhai weight = cpumask_weight(new_mask); 18828d3d5adaSKirill Tkhai 1883391e43daSPeter Zijlstra /* 18848d3d5adaSKirill Tkhai * Only update if the process changes its state from whether it 18858d3d5adaSKirill Tkhai * can migrate or not. 1886391e43daSPeter Zijlstra */ 188729baa747SPeter Zijlstra if ((p->nr_cpus_allowed > 1) == (weight > 1)) 18888d3d5adaSKirill Tkhai return; 18898d3d5adaSKirill Tkhai 18908d3d5adaSKirill Tkhai rq = task_rq(p); 18918d3d5adaSKirill Tkhai 18928d3d5adaSKirill Tkhai /* 18938d3d5adaSKirill Tkhai * The process used to be able to migrate OR it can now migrate 18948d3d5adaSKirill Tkhai */ 18958d3d5adaSKirill Tkhai if (weight <= 1) { 18968d3d5adaSKirill Tkhai if (!task_current(rq, p)) 1897391e43daSPeter Zijlstra dequeue_pushable_task(rq, p); 1898391e43daSPeter Zijlstra BUG_ON(!rq->rt.rt_nr_migratory); 1899391e43daSPeter Zijlstra rq->rt.rt_nr_migratory--; 19008d3d5adaSKirill Tkhai } else { 19018d3d5adaSKirill Tkhai if (!task_current(rq, p)) 19028d3d5adaSKirill Tkhai enqueue_pushable_task(rq, p); 19038d3d5adaSKirill Tkhai rq->rt.rt_nr_migratory++; 1904391e43daSPeter Zijlstra } 1905391e43daSPeter Zijlstra 1906391e43daSPeter Zijlstra update_rt_migration(&rq->rt); 1907391e43daSPeter Zijlstra } 1908391e43daSPeter Zijlstra 1909391e43daSPeter Zijlstra /* Assumes rq->lock is held */ 1910391e43daSPeter Zijlstra static void rq_online_rt(struct rq *rq) 1911391e43daSPeter Zijlstra { 1912391e43daSPeter Zijlstra if (rq->rt.overloaded) 1913391e43daSPeter Zijlstra rt_set_overload(rq); 1914391e43daSPeter Zijlstra 1915391e43daSPeter Zijlstra __enable_runtime(rq); 1916391e43daSPeter Zijlstra 1917391e43daSPeter Zijlstra cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr); 1918391e43daSPeter Zijlstra } 1919391e43daSPeter Zijlstra 1920391e43daSPeter Zijlstra /* Assumes rq->lock is held */ 1921391e43daSPeter Zijlstra static void rq_offline_rt(struct rq *rq) 1922391e43daSPeter Zijlstra { 1923391e43daSPeter Zijlstra if (rq->rt.overloaded) 1924391e43daSPeter Zijlstra rt_clear_overload(rq); 1925391e43daSPeter Zijlstra 1926391e43daSPeter Zijlstra __disable_runtime(rq); 1927391e43daSPeter Zijlstra 1928391e43daSPeter Zijlstra cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID); 1929391e43daSPeter Zijlstra } 1930391e43daSPeter Zijlstra 1931391e43daSPeter Zijlstra /* 1932391e43daSPeter Zijlstra * When switch from the rt queue, we bring ourselves to a position 1933391e43daSPeter Zijlstra * that we might want to pull RT tasks from other runqueues. 1934391e43daSPeter Zijlstra */ 1935391e43daSPeter Zijlstra static void switched_from_rt(struct rq *rq, struct task_struct *p) 1936391e43daSPeter Zijlstra { 1937391e43daSPeter Zijlstra /* 1938391e43daSPeter Zijlstra * If there are other RT tasks then we will reschedule 1939391e43daSPeter Zijlstra * and the scheduling of the other RT tasks will handle 1940391e43daSPeter Zijlstra * the balancing. But if we are the last RT task 1941391e43daSPeter Zijlstra * we may need to handle the pulling of RT tasks 1942391e43daSPeter Zijlstra * now. 1943391e43daSPeter Zijlstra */ 1944da0c1e65SKirill Tkhai if (!task_on_rq_queued(p) || rq->rt.rt_nr_running) 19451158ddb5SKirill Tkhai return; 19461158ddb5SKirill Tkhai 19471158ddb5SKirill Tkhai if (pull_rt_task(rq)) 19488875125eSKirill Tkhai resched_curr(rq); 1949391e43daSPeter Zijlstra } 1950391e43daSPeter Zijlstra 195111c785b7SLi Zefan void __init init_sched_rt_class(void) 1952391e43daSPeter Zijlstra { 1953391e43daSPeter Zijlstra unsigned int i; 1954391e43daSPeter Zijlstra 1955391e43daSPeter Zijlstra for_each_possible_cpu(i) { 1956391e43daSPeter Zijlstra zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i), 1957391e43daSPeter Zijlstra GFP_KERNEL, cpu_to_node(i)); 1958391e43daSPeter Zijlstra } 1959391e43daSPeter Zijlstra } 1960391e43daSPeter Zijlstra #endif /* CONFIG_SMP */ 1961391e43daSPeter Zijlstra 1962391e43daSPeter Zijlstra /* 1963391e43daSPeter Zijlstra * When switching a task to RT, we may overload the runqueue 1964391e43daSPeter Zijlstra * with RT tasks. In this case we try to push them off to 1965391e43daSPeter Zijlstra * other runqueues. 1966391e43daSPeter Zijlstra */ 1967391e43daSPeter Zijlstra static void switched_to_rt(struct rq *rq, struct task_struct *p) 1968391e43daSPeter Zijlstra { 1969391e43daSPeter Zijlstra int check_resched = 1; 1970391e43daSPeter Zijlstra 1971391e43daSPeter Zijlstra /* 1972391e43daSPeter Zijlstra * If we are already running, then there's nothing 1973391e43daSPeter Zijlstra * that needs to be done. But if we are not running 1974391e43daSPeter Zijlstra * we may need to preempt the current running task. 1975391e43daSPeter Zijlstra * If that current running task is also an RT task 1976391e43daSPeter Zijlstra * then see if we can move to another run queue. 1977391e43daSPeter Zijlstra */ 1978da0c1e65SKirill Tkhai if (task_on_rq_queued(p) && rq->curr != p) { 1979391e43daSPeter Zijlstra #ifdef CONFIG_SMP 198010447917SKirill V Tkhai if (p->nr_cpus_allowed > 1 && rq->rt.overloaded && 1981391e43daSPeter Zijlstra /* Don't resched if we changed runqueues */ 198210447917SKirill V Tkhai push_rt_task(rq) && rq != task_rq(p)) 1983391e43daSPeter Zijlstra check_resched = 0; 1984391e43daSPeter Zijlstra #endif /* CONFIG_SMP */ 1985391e43daSPeter Zijlstra if (check_resched && p->prio < rq->curr->prio) 19868875125eSKirill Tkhai resched_curr(rq); 1987391e43daSPeter Zijlstra } 1988391e43daSPeter Zijlstra } 1989391e43daSPeter Zijlstra 1990391e43daSPeter Zijlstra /* 1991391e43daSPeter Zijlstra * Priority of the task has changed. This may cause 1992391e43daSPeter Zijlstra * us to initiate a push or pull. 1993391e43daSPeter Zijlstra */ 1994391e43daSPeter Zijlstra static void 1995391e43daSPeter Zijlstra prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio) 1996391e43daSPeter Zijlstra { 1997da0c1e65SKirill Tkhai if (!task_on_rq_queued(p)) 1998391e43daSPeter Zijlstra return; 1999391e43daSPeter Zijlstra 2000391e43daSPeter Zijlstra if (rq->curr == p) { 2001391e43daSPeter Zijlstra #ifdef CONFIG_SMP 2002391e43daSPeter Zijlstra /* 2003391e43daSPeter Zijlstra * If our priority decreases while running, we 2004391e43daSPeter Zijlstra * may need to pull tasks to this runqueue. 2005391e43daSPeter Zijlstra */ 2006391e43daSPeter Zijlstra if (oldprio < p->prio) 2007391e43daSPeter Zijlstra pull_rt_task(rq); 2008391e43daSPeter Zijlstra /* 2009391e43daSPeter Zijlstra * If there's a higher priority task waiting to run 2010391e43daSPeter Zijlstra * then reschedule. Note, the above pull_rt_task 2011391e43daSPeter Zijlstra * can release the rq lock and p could migrate. 2012391e43daSPeter Zijlstra * Only reschedule if p is still on the same runqueue. 2013391e43daSPeter Zijlstra */ 2014391e43daSPeter Zijlstra if (p->prio > rq->rt.highest_prio.curr && rq->curr == p) 20158875125eSKirill Tkhai resched_curr(rq); 2016391e43daSPeter Zijlstra #else 2017391e43daSPeter Zijlstra /* For UP simply resched on drop of prio */ 2018391e43daSPeter Zijlstra if (oldprio < p->prio) 20198875125eSKirill Tkhai resched_curr(rq); 2020391e43daSPeter Zijlstra #endif /* CONFIG_SMP */ 2021391e43daSPeter Zijlstra } else { 2022391e43daSPeter Zijlstra /* 2023391e43daSPeter Zijlstra * This task is not running, but if it is 2024391e43daSPeter Zijlstra * greater than the current running task 2025391e43daSPeter Zijlstra * then reschedule. 2026391e43daSPeter Zijlstra */ 2027391e43daSPeter Zijlstra if (p->prio < rq->curr->prio) 20288875125eSKirill Tkhai resched_curr(rq); 2029391e43daSPeter Zijlstra } 2030391e43daSPeter Zijlstra } 2031391e43daSPeter Zijlstra 2032391e43daSPeter Zijlstra static void watchdog(struct rq *rq, struct task_struct *p) 2033391e43daSPeter Zijlstra { 2034391e43daSPeter Zijlstra unsigned long soft, hard; 2035391e43daSPeter Zijlstra 2036391e43daSPeter Zijlstra /* max may change after cur was read, this will be fixed next tick */ 2037391e43daSPeter Zijlstra soft = task_rlimit(p, RLIMIT_RTTIME); 2038391e43daSPeter Zijlstra hard = task_rlimit_max(p, RLIMIT_RTTIME); 2039391e43daSPeter Zijlstra 2040391e43daSPeter Zijlstra if (soft != RLIM_INFINITY) { 2041391e43daSPeter Zijlstra unsigned long next; 2042391e43daSPeter Zijlstra 204357d2aa00SYing Xue if (p->rt.watchdog_stamp != jiffies) { 2044391e43daSPeter Zijlstra p->rt.timeout++; 204557d2aa00SYing Xue p->rt.watchdog_stamp = jiffies; 204657d2aa00SYing Xue } 204757d2aa00SYing Xue 2048391e43daSPeter Zijlstra next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ); 2049391e43daSPeter Zijlstra if (p->rt.timeout > next) 2050391e43daSPeter Zijlstra p->cputime_expires.sched_exp = p->se.sum_exec_runtime; 2051391e43daSPeter Zijlstra } 2052391e43daSPeter Zijlstra } 2053391e43daSPeter Zijlstra 2054391e43daSPeter Zijlstra static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued) 2055391e43daSPeter Zijlstra { 2056454c7999SColin Cross struct sched_rt_entity *rt_se = &p->rt; 2057454c7999SColin Cross 2058391e43daSPeter Zijlstra update_curr_rt(rq); 2059391e43daSPeter Zijlstra 2060391e43daSPeter Zijlstra watchdog(rq, p); 2061391e43daSPeter Zijlstra 2062391e43daSPeter Zijlstra /* 2063391e43daSPeter Zijlstra * RR tasks need a special form of timeslice management. 2064391e43daSPeter Zijlstra * FIFO tasks have no timeslices. 2065391e43daSPeter Zijlstra */ 2066391e43daSPeter Zijlstra if (p->policy != SCHED_RR) 2067391e43daSPeter Zijlstra return; 2068391e43daSPeter Zijlstra 2069391e43daSPeter Zijlstra if (--p->rt.time_slice) 2070391e43daSPeter Zijlstra return; 2071391e43daSPeter Zijlstra 2072ce0dbbbbSClark Williams p->rt.time_slice = sched_rr_timeslice; 2073391e43daSPeter Zijlstra 2074391e43daSPeter Zijlstra /* 2075e9aa39bbSLi Bin * Requeue to the end of queue if we (and all of our ancestors) are not 2076e9aa39bbSLi Bin * the only element on the queue 2077391e43daSPeter Zijlstra */ 2078454c7999SColin Cross for_each_sched_rt_entity(rt_se) { 2079454c7999SColin Cross if (rt_se->run_list.prev != rt_se->run_list.next) { 2080391e43daSPeter Zijlstra requeue_task_rt(rq, p, 0); 20818aa6f0ebSKirill Tkhai resched_curr(rq); 2082454c7999SColin Cross return; 2083454c7999SColin Cross } 2084391e43daSPeter Zijlstra } 2085391e43daSPeter Zijlstra } 2086391e43daSPeter Zijlstra 2087391e43daSPeter Zijlstra static void set_curr_task_rt(struct rq *rq) 2088391e43daSPeter Zijlstra { 2089391e43daSPeter Zijlstra struct task_struct *p = rq->curr; 2090391e43daSPeter Zijlstra 209178becc27SFrederic Weisbecker p->se.exec_start = rq_clock_task(rq); 2092391e43daSPeter Zijlstra 2093391e43daSPeter Zijlstra /* The running task is never eligible for pushing */ 2094391e43daSPeter Zijlstra dequeue_pushable_task(rq, p); 2095391e43daSPeter Zijlstra } 2096391e43daSPeter Zijlstra 2097391e43daSPeter Zijlstra static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task) 2098391e43daSPeter Zijlstra { 2099391e43daSPeter Zijlstra /* 2100391e43daSPeter Zijlstra * Time slice is 0 for SCHED_FIFO tasks 2101391e43daSPeter Zijlstra */ 2102391e43daSPeter Zijlstra if (task->policy == SCHED_RR) 2103ce0dbbbbSClark Williams return sched_rr_timeslice; 2104391e43daSPeter Zijlstra else 2105391e43daSPeter Zijlstra return 0; 2106391e43daSPeter Zijlstra } 2107391e43daSPeter Zijlstra 2108391e43daSPeter Zijlstra const struct sched_class rt_sched_class = { 2109391e43daSPeter Zijlstra .next = &fair_sched_class, 2110391e43daSPeter Zijlstra .enqueue_task = enqueue_task_rt, 2111391e43daSPeter Zijlstra .dequeue_task = dequeue_task_rt, 2112391e43daSPeter Zijlstra .yield_task = yield_task_rt, 2113391e43daSPeter Zijlstra 2114391e43daSPeter Zijlstra .check_preempt_curr = check_preempt_curr_rt, 2115391e43daSPeter Zijlstra 2116391e43daSPeter Zijlstra .pick_next_task = pick_next_task_rt, 2117391e43daSPeter Zijlstra .put_prev_task = put_prev_task_rt, 2118391e43daSPeter Zijlstra 2119391e43daSPeter Zijlstra #ifdef CONFIG_SMP 2120391e43daSPeter Zijlstra .select_task_rq = select_task_rq_rt, 2121391e43daSPeter Zijlstra 2122391e43daSPeter Zijlstra .set_cpus_allowed = set_cpus_allowed_rt, 2123391e43daSPeter Zijlstra .rq_online = rq_online_rt, 2124391e43daSPeter Zijlstra .rq_offline = rq_offline_rt, 2125391e43daSPeter Zijlstra .post_schedule = post_schedule_rt, 2126391e43daSPeter Zijlstra .task_woken = task_woken_rt, 2127391e43daSPeter Zijlstra .switched_from = switched_from_rt, 2128391e43daSPeter Zijlstra #endif 2129391e43daSPeter Zijlstra 2130391e43daSPeter Zijlstra .set_curr_task = set_curr_task_rt, 2131391e43daSPeter Zijlstra .task_tick = task_tick_rt, 2132391e43daSPeter Zijlstra 2133391e43daSPeter Zijlstra .get_rr_interval = get_rr_interval_rt, 2134391e43daSPeter Zijlstra 2135391e43daSPeter Zijlstra .prio_changed = prio_changed_rt, 2136391e43daSPeter Zijlstra .switched_to = switched_to_rt, 21376e998916SStanislaw Gruszka 21386e998916SStanislaw Gruszka .update_curr = update_curr_rt, 2139391e43daSPeter Zijlstra }; 2140391e43daSPeter Zijlstra 2141391e43daSPeter Zijlstra #ifdef CONFIG_SCHED_DEBUG 2142391e43daSPeter Zijlstra extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq); 2143391e43daSPeter Zijlstra 2144391e43daSPeter Zijlstra void print_rt_stats(struct seq_file *m, int cpu) 2145391e43daSPeter Zijlstra { 2146391e43daSPeter Zijlstra rt_rq_iter_t iter; 2147391e43daSPeter Zijlstra struct rt_rq *rt_rq; 2148391e43daSPeter Zijlstra 2149391e43daSPeter Zijlstra rcu_read_lock(); 2150391e43daSPeter Zijlstra for_each_rt_rq(rt_rq, iter, cpu_rq(cpu)) 2151391e43daSPeter Zijlstra print_rt_rq(m, cpu, rt_rq); 2152391e43daSPeter Zijlstra rcu_read_unlock(); 2153391e43daSPeter Zijlstra } 2154391e43daSPeter Zijlstra #endif /* CONFIG_SCHED_DEBUG */ 2155