1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 2391e43daSPeter Zijlstra /* 3391e43daSPeter Zijlstra * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR 4391e43daSPeter Zijlstra * policies) 5391e43daSPeter Zijlstra */ 6391e43daSPeter Zijlstra #include "sched.h" 7391e43daSPeter Zijlstra 8ce0dbbbbSClark Williams int sched_rr_timeslice = RR_TIMESLICE; 9975e155eSShile Zhang int sysctl_sched_rr_timeslice = (MSEC_PER_SEC / HZ) * RR_TIMESLICE; 10ce0dbbbbSClark Williams 11391e43daSPeter Zijlstra static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun); 12391e43daSPeter Zijlstra 13391e43daSPeter Zijlstra struct rt_bandwidth def_rt_bandwidth; 14391e43daSPeter Zijlstra 15391e43daSPeter Zijlstra static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer) 16391e43daSPeter Zijlstra { 17391e43daSPeter Zijlstra struct rt_bandwidth *rt_b = 18391e43daSPeter Zijlstra container_of(timer, struct rt_bandwidth, rt_period_timer); 19391e43daSPeter Zijlstra int idle = 0; 2077a4d1a1SPeter Zijlstra int overrun; 21391e43daSPeter Zijlstra 2277a4d1a1SPeter Zijlstra raw_spin_lock(&rt_b->rt_runtime_lock); 23391e43daSPeter Zijlstra for (;;) { 2477a4d1a1SPeter Zijlstra overrun = hrtimer_forward_now(timer, rt_b->rt_period); 25391e43daSPeter Zijlstra if (!overrun) 26391e43daSPeter Zijlstra break; 27391e43daSPeter Zijlstra 2877a4d1a1SPeter Zijlstra raw_spin_unlock(&rt_b->rt_runtime_lock); 29391e43daSPeter Zijlstra idle = do_sched_rt_period_timer(rt_b, overrun); 3077a4d1a1SPeter Zijlstra raw_spin_lock(&rt_b->rt_runtime_lock); 31391e43daSPeter Zijlstra } 324cfafd30SPeter Zijlstra if (idle) 334cfafd30SPeter Zijlstra rt_b->rt_period_active = 0; 3477a4d1a1SPeter Zijlstra raw_spin_unlock(&rt_b->rt_runtime_lock); 35391e43daSPeter Zijlstra 36391e43daSPeter Zijlstra return idle ? HRTIMER_NORESTART : HRTIMER_RESTART; 37391e43daSPeter Zijlstra } 38391e43daSPeter Zijlstra 39391e43daSPeter Zijlstra void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime) 40391e43daSPeter Zijlstra { 41391e43daSPeter Zijlstra rt_b->rt_period = ns_to_ktime(period); 42391e43daSPeter Zijlstra rt_b->rt_runtime = runtime; 43391e43daSPeter Zijlstra 44391e43daSPeter Zijlstra raw_spin_lock_init(&rt_b->rt_runtime_lock); 45391e43daSPeter Zijlstra 46391e43daSPeter Zijlstra hrtimer_init(&rt_b->rt_period_timer, 47391e43daSPeter Zijlstra CLOCK_MONOTONIC, HRTIMER_MODE_REL); 48391e43daSPeter Zijlstra rt_b->rt_period_timer.function = sched_rt_period_timer; 49391e43daSPeter Zijlstra } 50391e43daSPeter Zijlstra 51391e43daSPeter Zijlstra static void start_rt_bandwidth(struct rt_bandwidth *rt_b) 52391e43daSPeter Zijlstra { 53391e43daSPeter Zijlstra if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF) 54391e43daSPeter Zijlstra return; 55391e43daSPeter Zijlstra 56391e43daSPeter Zijlstra raw_spin_lock(&rt_b->rt_runtime_lock); 574cfafd30SPeter Zijlstra if (!rt_b->rt_period_active) { 584cfafd30SPeter Zijlstra rt_b->rt_period_active = 1; 59c3a990dcSSteven Rostedt /* 60c3a990dcSSteven Rostedt * SCHED_DEADLINE updates the bandwidth, as a run away 61c3a990dcSSteven Rostedt * RT task with a DL task could hog a CPU. But DL does 62c3a990dcSSteven Rostedt * not reset the period. If a deadline task was running 63c3a990dcSSteven Rostedt * without an RT task running, it can cause RT tasks to 64c3a990dcSSteven Rostedt * throttle when they start up. Kick the timer right away 65c3a990dcSSteven Rostedt * to update the period. 66c3a990dcSSteven Rostedt */ 67c3a990dcSSteven Rostedt hrtimer_forward_now(&rt_b->rt_period_timer, ns_to_ktime(0)); 684cfafd30SPeter Zijlstra hrtimer_start_expires(&rt_b->rt_period_timer, HRTIMER_MODE_ABS_PINNED); 694cfafd30SPeter Zijlstra } 70391e43daSPeter Zijlstra raw_spin_unlock(&rt_b->rt_runtime_lock); 71391e43daSPeter Zijlstra } 72391e43daSPeter Zijlstra 7307c54f7aSAbel Vesa void init_rt_rq(struct rt_rq *rt_rq) 74391e43daSPeter Zijlstra { 75391e43daSPeter Zijlstra struct rt_prio_array *array; 76391e43daSPeter Zijlstra int i; 77391e43daSPeter Zijlstra 78391e43daSPeter Zijlstra array = &rt_rq->active; 79391e43daSPeter Zijlstra for (i = 0; i < MAX_RT_PRIO; i++) { 80391e43daSPeter Zijlstra INIT_LIST_HEAD(array->queue + i); 81391e43daSPeter Zijlstra __clear_bit(i, array->bitmap); 82391e43daSPeter Zijlstra } 83391e43daSPeter Zijlstra /* delimiter for bitsearch: */ 84391e43daSPeter Zijlstra __set_bit(MAX_RT_PRIO, array->bitmap); 85391e43daSPeter Zijlstra 86391e43daSPeter Zijlstra #if defined CONFIG_SMP 87391e43daSPeter Zijlstra rt_rq->highest_prio.curr = MAX_RT_PRIO; 88391e43daSPeter Zijlstra rt_rq->highest_prio.next = MAX_RT_PRIO; 89391e43daSPeter Zijlstra rt_rq->rt_nr_migratory = 0; 90391e43daSPeter Zijlstra rt_rq->overloaded = 0; 91391e43daSPeter Zijlstra plist_head_init(&rt_rq->pushable_tasks); 92b6366f04SSteven Rostedt #endif /* CONFIG_SMP */ 93f4ebcbc0SKirill Tkhai /* We start is dequeued state, because no RT tasks are queued */ 94f4ebcbc0SKirill Tkhai rt_rq->rt_queued = 0; 95391e43daSPeter Zijlstra 96391e43daSPeter Zijlstra rt_rq->rt_time = 0; 97391e43daSPeter Zijlstra rt_rq->rt_throttled = 0; 98391e43daSPeter Zijlstra rt_rq->rt_runtime = 0; 99391e43daSPeter Zijlstra raw_spin_lock_init(&rt_rq->rt_runtime_lock); 100391e43daSPeter Zijlstra } 101391e43daSPeter Zijlstra 102391e43daSPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED 103391e43daSPeter Zijlstra static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b) 104391e43daSPeter Zijlstra { 105391e43daSPeter Zijlstra hrtimer_cancel(&rt_b->rt_period_timer); 106391e43daSPeter Zijlstra } 107391e43daSPeter Zijlstra 108391e43daSPeter Zijlstra #define rt_entity_is_task(rt_se) (!(rt_se)->my_q) 109391e43daSPeter Zijlstra 110391e43daSPeter Zijlstra static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se) 111391e43daSPeter Zijlstra { 112391e43daSPeter Zijlstra #ifdef CONFIG_SCHED_DEBUG 113391e43daSPeter Zijlstra WARN_ON_ONCE(!rt_entity_is_task(rt_se)); 114391e43daSPeter Zijlstra #endif 115391e43daSPeter Zijlstra return container_of(rt_se, struct task_struct, rt); 116391e43daSPeter Zijlstra } 117391e43daSPeter Zijlstra 118391e43daSPeter Zijlstra static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) 119391e43daSPeter Zijlstra { 120391e43daSPeter Zijlstra return rt_rq->rq; 121391e43daSPeter Zijlstra } 122391e43daSPeter Zijlstra 123391e43daSPeter Zijlstra static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se) 124391e43daSPeter Zijlstra { 125391e43daSPeter Zijlstra return rt_se->rt_rq; 126391e43daSPeter Zijlstra } 127391e43daSPeter Zijlstra 128653d07a6SKirill Tkhai static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se) 129653d07a6SKirill Tkhai { 130653d07a6SKirill Tkhai struct rt_rq *rt_rq = rt_se->rt_rq; 131653d07a6SKirill Tkhai 132653d07a6SKirill Tkhai return rt_rq->rq; 133653d07a6SKirill Tkhai } 134653d07a6SKirill Tkhai 135391e43daSPeter Zijlstra void free_rt_sched_group(struct task_group *tg) 136391e43daSPeter Zijlstra { 137391e43daSPeter Zijlstra int i; 138391e43daSPeter Zijlstra 139391e43daSPeter Zijlstra if (tg->rt_se) 140391e43daSPeter Zijlstra destroy_rt_bandwidth(&tg->rt_bandwidth); 141391e43daSPeter Zijlstra 142391e43daSPeter Zijlstra for_each_possible_cpu(i) { 143391e43daSPeter Zijlstra if (tg->rt_rq) 144391e43daSPeter Zijlstra kfree(tg->rt_rq[i]); 145391e43daSPeter Zijlstra if (tg->rt_se) 146391e43daSPeter Zijlstra kfree(tg->rt_se[i]); 147391e43daSPeter Zijlstra } 148391e43daSPeter Zijlstra 149391e43daSPeter Zijlstra kfree(tg->rt_rq); 150391e43daSPeter Zijlstra kfree(tg->rt_se); 151391e43daSPeter Zijlstra } 152391e43daSPeter Zijlstra 153391e43daSPeter Zijlstra void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, 154391e43daSPeter Zijlstra struct sched_rt_entity *rt_se, int cpu, 155391e43daSPeter Zijlstra struct sched_rt_entity *parent) 156391e43daSPeter Zijlstra { 157391e43daSPeter Zijlstra struct rq *rq = cpu_rq(cpu); 158391e43daSPeter Zijlstra 159391e43daSPeter Zijlstra rt_rq->highest_prio.curr = MAX_RT_PRIO; 160391e43daSPeter Zijlstra rt_rq->rt_nr_boosted = 0; 161391e43daSPeter Zijlstra rt_rq->rq = rq; 162391e43daSPeter Zijlstra rt_rq->tg = tg; 163391e43daSPeter Zijlstra 164391e43daSPeter Zijlstra tg->rt_rq[cpu] = rt_rq; 165391e43daSPeter Zijlstra tg->rt_se[cpu] = rt_se; 166391e43daSPeter Zijlstra 167391e43daSPeter Zijlstra if (!rt_se) 168391e43daSPeter Zijlstra return; 169391e43daSPeter Zijlstra 170391e43daSPeter Zijlstra if (!parent) 171391e43daSPeter Zijlstra rt_se->rt_rq = &rq->rt; 172391e43daSPeter Zijlstra else 173391e43daSPeter Zijlstra rt_se->rt_rq = parent->my_q; 174391e43daSPeter Zijlstra 175391e43daSPeter Zijlstra rt_se->my_q = rt_rq; 176391e43daSPeter Zijlstra rt_se->parent = parent; 177391e43daSPeter Zijlstra INIT_LIST_HEAD(&rt_se->run_list); 178391e43daSPeter Zijlstra } 179391e43daSPeter Zijlstra 180391e43daSPeter Zijlstra int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) 181391e43daSPeter Zijlstra { 182391e43daSPeter Zijlstra struct rt_rq *rt_rq; 183391e43daSPeter Zijlstra struct sched_rt_entity *rt_se; 184391e43daSPeter Zijlstra int i; 185391e43daSPeter Zijlstra 186*6396bb22SKees Cook tg->rt_rq = kcalloc(nr_cpu_ids, sizeof(rt_rq), GFP_KERNEL); 187391e43daSPeter Zijlstra if (!tg->rt_rq) 188391e43daSPeter Zijlstra goto err; 189*6396bb22SKees Cook tg->rt_se = kcalloc(nr_cpu_ids, sizeof(rt_se), GFP_KERNEL); 190391e43daSPeter Zijlstra if (!tg->rt_se) 191391e43daSPeter Zijlstra goto err; 192391e43daSPeter Zijlstra 193391e43daSPeter Zijlstra init_rt_bandwidth(&tg->rt_bandwidth, 194391e43daSPeter Zijlstra ktime_to_ns(def_rt_bandwidth.rt_period), 0); 195391e43daSPeter Zijlstra 196391e43daSPeter Zijlstra for_each_possible_cpu(i) { 197391e43daSPeter Zijlstra rt_rq = kzalloc_node(sizeof(struct rt_rq), 198391e43daSPeter Zijlstra GFP_KERNEL, cpu_to_node(i)); 199391e43daSPeter Zijlstra if (!rt_rq) 200391e43daSPeter Zijlstra goto err; 201391e43daSPeter Zijlstra 202391e43daSPeter Zijlstra rt_se = kzalloc_node(sizeof(struct sched_rt_entity), 203391e43daSPeter Zijlstra GFP_KERNEL, cpu_to_node(i)); 204391e43daSPeter Zijlstra if (!rt_se) 205391e43daSPeter Zijlstra goto err_free_rq; 206391e43daSPeter Zijlstra 20707c54f7aSAbel Vesa init_rt_rq(rt_rq); 208391e43daSPeter Zijlstra rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime; 209391e43daSPeter Zijlstra init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]); 210391e43daSPeter Zijlstra } 211391e43daSPeter Zijlstra 212391e43daSPeter Zijlstra return 1; 213391e43daSPeter Zijlstra 214391e43daSPeter Zijlstra err_free_rq: 215391e43daSPeter Zijlstra kfree(rt_rq); 216391e43daSPeter Zijlstra err: 217391e43daSPeter Zijlstra return 0; 218391e43daSPeter Zijlstra } 219391e43daSPeter Zijlstra 220391e43daSPeter Zijlstra #else /* CONFIG_RT_GROUP_SCHED */ 221391e43daSPeter Zijlstra 222391e43daSPeter Zijlstra #define rt_entity_is_task(rt_se) (1) 223391e43daSPeter Zijlstra 224391e43daSPeter Zijlstra static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se) 225391e43daSPeter Zijlstra { 226391e43daSPeter Zijlstra return container_of(rt_se, struct task_struct, rt); 227391e43daSPeter Zijlstra } 228391e43daSPeter Zijlstra 229391e43daSPeter Zijlstra static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) 230391e43daSPeter Zijlstra { 231391e43daSPeter Zijlstra return container_of(rt_rq, struct rq, rt); 232391e43daSPeter Zijlstra } 233391e43daSPeter Zijlstra 234653d07a6SKirill Tkhai static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se) 235391e43daSPeter Zijlstra { 236391e43daSPeter Zijlstra struct task_struct *p = rt_task_of(rt_se); 237653d07a6SKirill Tkhai 238653d07a6SKirill Tkhai return task_rq(p); 239653d07a6SKirill Tkhai } 240653d07a6SKirill Tkhai 241653d07a6SKirill Tkhai static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se) 242653d07a6SKirill Tkhai { 243653d07a6SKirill Tkhai struct rq *rq = rq_of_rt_se(rt_se); 244391e43daSPeter Zijlstra 245391e43daSPeter Zijlstra return &rq->rt; 246391e43daSPeter Zijlstra } 247391e43daSPeter Zijlstra 248391e43daSPeter Zijlstra void free_rt_sched_group(struct task_group *tg) { } 249391e43daSPeter Zijlstra 250391e43daSPeter Zijlstra int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) 251391e43daSPeter Zijlstra { 252391e43daSPeter Zijlstra return 1; 253391e43daSPeter Zijlstra } 254391e43daSPeter Zijlstra #endif /* CONFIG_RT_GROUP_SCHED */ 255391e43daSPeter Zijlstra 256391e43daSPeter Zijlstra #ifdef CONFIG_SMP 257391e43daSPeter Zijlstra 2588046d680SPeter Zijlstra static void pull_rt_task(struct rq *this_rq); 25938033c37SPeter Zijlstra 260dc877341SPeter Zijlstra static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev) 261dc877341SPeter Zijlstra { 262dc877341SPeter Zijlstra /* Try to pull RT tasks here if we lower this rq's prio */ 263dc877341SPeter Zijlstra return rq->rt.highest_prio.curr > prev->prio; 264dc877341SPeter Zijlstra } 265dc877341SPeter Zijlstra 266391e43daSPeter Zijlstra static inline int rt_overloaded(struct rq *rq) 267391e43daSPeter Zijlstra { 268391e43daSPeter Zijlstra return atomic_read(&rq->rd->rto_count); 269391e43daSPeter Zijlstra } 270391e43daSPeter Zijlstra 271391e43daSPeter Zijlstra static inline void rt_set_overload(struct rq *rq) 272391e43daSPeter Zijlstra { 273391e43daSPeter Zijlstra if (!rq->online) 274391e43daSPeter Zijlstra return; 275391e43daSPeter Zijlstra 276391e43daSPeter Zijlstra cpumask_set_cpu(rq->cpu, rq->rd->rto_mask); 277391e43daSPeter Zijlstra /* 278391e43daSPeter Zijlstra * Make sure the mask is visible before we set 279391e43daSPeter Zijlstra * the overload count. That is checked to determine 280391e43daSPeter Zijlstra * if we should look at the mask. It would be a shame 281391e43daSPeter Zijlstra * if we looked at the mask, but the mask was not 282391e43daSPeter Zijlstra * updated yet. 2837c3f2ab7SPeter Zijlstra * 2847c3f2ab7SPeter Zijlstra * Matched by the barrier in pull_rt_task(). 285391e43daSPeter Zijlstra */ 2867c3f2ab7SPeter Zijlstra smp_wmb(); 287391e43daSPeter Zijlstra atomic_inc(&rq->rd->rto_count); 288391e43daSPeter Zijlstra } 289391e43daSPeter Zijlstra 290391e43daSPeter Zijlstra static inline void rt_clear_overload(struct rq *rq) 291391e43daSPeter Zijlstra { 292391e43daSPeter Zijlstra if (!rq->online) 293391e43daSPeter Zijlstra return; 294391e43daSPeter Zijlstra 295391e43daSPeter Zijlstra /* the order here really doesn't matter */ 296391e43daSPeter Zijlstra atomic_dec(&rq->rd->rto_count); 297391e43daSPeter Zijlstra cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask); 298391e43daSPeter Zijlstra } 299391e43daSPeter Zijlstra 300391e43daSPeter Zijlstra static void update_rt_migration(struct rt_rq *rt_rq) 301391e43daSPeter Zijlstra { 302391e43daSPeter Zijlstra if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) { 303391e43daSPeter Zijlstra if (!rt_rq->overloaded) { 304391e43daSPeter Zijlstra rt_set_overload(rq_of_rt_rq(rt_rq)); 305391e43daSPeter Zijlstra rt_rq->overloaded = 1; 306391e43daSPeter Zijlstra } 307391e43daSPeter Zijlstra } else if (rt_rq->overloaded) { 308391e43daSPeter Zijlstra rt_clear_overload(rq_of_rt_rq(rt_rq)); 309391e43daSPeter Zijlstra rt_rq->overloaded = 0; 310391e43daSPeter Zijlstra } 311391e43daSPeter Zijlstra } 312391e43daSPeter Zijlstra 313391e43daSPeter Zijlstra static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 314391e43daSPeter Zijlstra { 31529baa747SPeter Zijlstra struct task_struct *p; 31629baa747SPeter Zijlstra 317391e43daSPeter Zijlstra if (!rt_entity_is_task(rt_se)) 318391e43daSPeter Zijlstra return; 319391e43daSPeter Zijlstra 32029baa747SPeter Zijlstra p = rt_task_of(rt_se); 321391e43daSPeter Zijlstra rt_rq = &rq_of_rt_rq(rt_rq)->rt; 322391e43daSPeter Zijlstra 323391e43daSPeter Zijlstra rt_rq->rt_nr_total++; 3244b53a341SIngo Molnar if (p->nr_cpus_allowed > 1) 325391e43daSPeter Zijlstra rt_rq->rt_nr_migratory++; 326391e43daSPeter Zijlstra 327391e43daSPeter Zijlstra update_rt_migration(rt_rq); 328391e43daSPeter Zijlstra } 329391e43daSPeter Zijlstra 330391e43daSPeter Zijlstra static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 331391e43daSPeter Zijlstra { 33229baa747SPeter Zijlstra struct task_struct *p; 33329baa747SPeter Zijlstra 334391e43daSPeter Zijlstra if (!rt_entity_is_task(rt_se)) 335391e43daSPeter Zijlstra return; 336391e43daSPeter Zijlstra 33729baa747SPeter Zijlstra p = rt_task_of(rt_se); 338391e43daSPeter Zijlstra rt_rq = &rq_of_rt_rq(rt_rq)->rt; 339391e43daSPeter Zijlstra 340391e43daSPeter Zijlstra rt_rq->rt_nr_total--; 3414b53a341SIngo Molnar if (p->nr_cpus_allowed > 1) 342391e43daSPeter Zijlstra rt_rq->rt_nr_migratory--; 343391e43daSPeter Zijlstra 344391e43daSPeter Zijlstra update_rt_migration(rt_rq); 345391e43daSPeter Zijlstra } 346391e43daSPeter Zijlstra 347391e43daSPeter Zijlstra static inline int has_pushable_tasks(struct rq *rq) 348391e43daSPeter Zijlstra { 349391e43daSPeter Zijlstra return !plist_head_empty(&rq->rt.pushable_tasks); 350391e43daSPeter Zijlstra } 351391e43daSPeter Zijlstra 352fd7a4bedSPeter Zijlstra static DEFINE_PER_CPU(struct callback_head, rt_push_head); 353fd7a4bedSPeter Zijlstra static DEFINE_PER_CPU(struct callback_head, rt_pull_head); 354e3fca9e7SPeter Zijlstra 355e3fca9e7SPeter Zijlstra static void push_rt_tasks(struct rq *); 356fd7a4bedSPeter Zijlstra static void pull_rt_task(struct rq *); 357e3fca9e7SPeter Zijlstra 35802d8ec94SIngo Molnar static inline void rt_queue_push_tasks(struct rq *rq) 359dc877341SPeter Zijlstra { 360e3fca9e7SPeter Zijlstra if (!has_pushable_tasks(rq)) 361e3fca9e7SPeter Zijlstra return; 362e3fca9e7SPeter Zijlstra 363fd7a4bedSPeter Zijlstra queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks); 364fd7a4bedSPeter Zijlstra } 365fd7a4bedSPeter Zijlstra 36602d8ec94SIngo Molnar static inline void rt_queue_pull_task(struct rq *rq) 367fd7a4bedSPeter Zijlstra { 368fd7a4bedSPeter Zijlstra queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task); 369dc877341SPeter Zijlstra } 370dc877341SPeter Zijlstra 371391e43daSPeter Zijlstra static void enqueue_pushable_task(struct rq *rq, struct task_struct *p) 372391e43daSPeter Zijlstra { 373391e43daSPeter Zijlstra plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); 374391e43daSPeter Zijlstra plist_node_init(&p->pushable_tasks, p->prio); 375391e43daSPeter Zijlstra plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks); 376391e43daSPeter Zijlstra 377391e43daSPeter Zijlstra /* Update the highest prio pushable task */ 378391e43daSPeter Zijlstra if (p->prio < rq->rt.highest_prio.next) 379391e43daSPeter Zijlstra rq->rt.highest_prio.next = p->prio; 380391e43daSPeter Zijlstra } 381391e43daSPeter Zijlstra 382391e43daSPeter Zijlstra static void dequeue_pushable_task(struct rq *rq, struct task_struct *p) 383391e43daSPeter Zijlstra { 384391e43daSPeter Zijlstra plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); 385391e43daSPeter Zijlstra 386391e43daSPeter Zijlstra /* Update the new highest prio pushable task */ 387391e43daSPeter Zijlstra if (has_pushable_tasks(rq)) { 388391e43daSPeter Zijlstra p = plist_first_entry(&rq->rt.pushable_tasks, 389391e43daSPeter Zijlstra struct task_struct, pushable_tasks); 390391e43daSPeter Zijlstra rq->rt.highest_prio.next = p->prio; 391391e43daSPeter Zijlstra } else 392391e43daSPeter Zijlstra rq->rt.highest_prio.next = MAX_RT_PRIO; 393391e43daSPeter Zijlstra } 394391e43daSPeter Zijlstra 395391e43daSPeter Zijlstra #else 396391e43daSPeter Zijlstra 397391e43daSPeter Zijlstra static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p) 398391e43daSPeter Zijlstra { 399391e43daSPeter Zijlstra } 400391e43daSPeter Zijlstra 401391e43daSPeter Zijlstra static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p) 402391e43daSPeter Zijlstra { 403391e43daSPeter Zijlstra } 404391e43daSPeter Zijlstra 405391e43daSPeter Zijlstra static inline 406391e43daSPeter Zijlstra void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 407391e43daSPeter Zijlstra { 408391e43daSPeter Zijlstra } 409391e43daSPeter Zijlstra 410391e43daSPeter Zijlstra static inline 411391e43daSPeter Zijlstra void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 412391e43daSPeter Zijlstra { 413391e43daSPeter Zijlstra } 414391e43daSPeter Zijlstra 415dc877341SPeter Zijlstra static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev) 416dc877341SPeter Zijlstra { 417dc877341SPeter Zijlstra return false; 418dc877341SPeter Zijlstra } 419dc877341SPeter Zijlstra 4208046d680SPeter Zijlstra static inline void pull_rt_task(struct rq *this_rq) 421dc877341SPeter Zijlstra { 422dc877341SPeter Zijlstra } 423dc877341SPeter Zijlstra 42402d8ec94SIngo Molnar static inline void rt_queue_push_tasks(struct rq *rq) 425dc877341SPeter Zijlstra { 426dc877341SPeter Zijlstra } 427391e43daSPeter Zijlstra #endif /* CONFIG_SMP */ 428391e43daSPeter Zijlstra 429f4ebcbc0SKirill Tkhai static void enqueue_top_rt_rq(struct rt_rq *rt_rq); 430f4ebcbc0SKirill Tkhai static void dequeue_top_rt_rq(struct rt_rq *rt_rq); 431f4ebcbc0SKirill Tkhai 432391e43daSPeter Zijlstra static inline int on_rt_rq(struct sched_rt_entity *rt_se) 433391e43daSPeter Zijlstra { 434ff77e468SPeter Zijlstra return rt_se->on_rq; 435391e43daSPeter Zijlstra } 436391e43daSPeter Zijlstra 437391e43daSPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED 438391e43daSPeter Zijlstra 439391e43daSPeter Zijlstra static inline u64 sched_rt_runtime(struct rt_rq *rt_rq) 440391e43daSPeter Zijlstra { 441391e43daSPeter Zijlstra if (!rt_rq->tg) 442391e43daSPeter Zijlstra return RUNTIME_INF; 443391e43daSPeter Zijlstra 444391e43daSPeter Zijlstra return rt_rq->rt_runtime; 445391e43daSPeter Zijlstra } 446391e43daSPeter Zijlstra 447391e43daSPeter Zijlstra static inline u64 sched_rt_period(struct rt_rq *rt_rq) 448391e43daSPeter Zijlstra { 449391e43daSPeter Zijlstra return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period); 450391e43daSPeter Zijlstra } 451391e43daSPeter Zijlstra 452391e43daSPeter Zijlstra typedef struct task_group *rt_rq_iter_t; 453391e43daSPeter Zijlstra 454391e43daSPeter Zijlstra static inline struct task_group *next_task_group(struct task_group *tg) 455391e43daSPeter Zijlstra { 456391e43daSPeter Zijlstra do { 457391e43daSPeter Zijlstra tg = list_entry_rcu(tg->list.next, 458391e43daSPeter Zijlstra typeof(struct task_group), list); 459391e43daSPeter Zijlstra } while (&tg->list != &task_groups && task_group_is_autogroup(tg)); 460391e43daSPeter Zijlstra 461391e43daSPeter Zijlstra if (&tg->list == &task_groups) 462391e43daSPeter Zijlstra tg = NULL; 463391e43daSPeter Zijlstra 464391e43daSPeter Zijlstra return tg; 465391e43daSPeter Zijlstra } 466391e43daSPeter Zijlstra 467391e43daSPeter Zijlstra #define for_each_rt_rq(rt_rq, iter, rq) \ 468391e43daSPeter Zijlstra for (iter = container_of(&task_groups, typeof(*iter), list); \ 469391e43daSPeter Zijlstra (iter = next_task_group(iter)) && \ 470391e43daSPeter Zijlstra (rt_rq = iter->rt_rq[cpu_of(rq)]);) 471391e43daSPeter Zijlstra 472391e43daSPeter Zijlstra #define for_each_sched_rt_entity(rt_se) \ 473391e43daSPeter Zijlstra for (; rt_se; rt_se = rt_se->parent) 474391e43daSPeter Zijlstra 475391e43daSPeter Zijlstra static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se) 476391e43daSPeter Zijlstra { 477391e43daSPeter Zijlstra return rt_se->my_q; 478391e43daSPeter Zijlstra } 479391e43daSPeter Zijlstra 480ff77e468SPeter Zijlstra static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags); 481ff77e468SPeter Zijlstra static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags); 482391e43daSPeter Zijlstra 483391e43daSPeter Zijlstra static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) 484391e43daSPeter Zijlstra { 485391e43daSPeter Zijlstra struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr; 4868875125eSKirill Tkhai struct rq *rq = rq_of_rt_rq(rt_rq); 487391e43daSPeter Zijlstra struct sched_rt_entity *rt_se; 488391e43daSPeter Zijlstra 4898875125eSKirill Tkhai int cpu = cpu_of(rq); 490391e43daSPeter Zijlstra 491391e43daSPeter Zijlstra rt_se = rt_rq->tg->rt_se[cpu]; 492391e43daSPeter Zijlstra 493391e43daSPeter Zijlstra if (rt_rq->rt_nr_running) { 494f4ebcbc0SKirill Tkhai if (!rt_se) 495f4ebcbc0SKirill Tkhai enqueue_top_rt_rq(rt_rq); 496f4ebcbc0SKirill Tkhai else if (!on_rt_rq(rt_se)) 497ff77e468SPeter Zijlstra enqueue_rt_entity(rt_se, 0); 498f4ebcbc0SKirill Tkhai 499391e43daSPeter Zijlstra if (rt_rq->highest_prio.curr < curr->prio) 5008875125eSKirill Tkhai resched_curr(rq); 501391e43daSPeter Zijlstra } 502391e43daSPeter Zijlstra } 503391e43daSPeter Zijlstra 504391e43daSPeter Zijlstra static void sched_rt_rq_dequeue(struct rt_rq *rt_rq) 505391e43daSPeter Zijlstra { 506391e43daSPeter Zijlstra struct sched_rt_entity *rt_se; 507391e43daSPeter Zijlstra int cpu = cpu_of(rq_of_rt_rq(rt_rq)); 508391e43daSPeter Zijlstra 509391e43daSPeter Zijlstra rt_se = rt_rq->tg->rt_se[cpu]; 510391e43daSPeter Zijlstra 511f4ebcbc0SKirill Tkhai if (!rt_se) 512f4ebcbc0SKirill Tkhai dequeue_top_rt_rq(rt_rq); 513f4ebcbc0SKirill Tkhai else if (on_rt_rq(rt_se)) 514ff77e468SPeter Zijlstra dequeue_rt_entity(rt_se, 0); 515391e43daSPeter Zijlstra } 516391e43daSPeter Zijlstra 51746383648SKirill Tkhai static inline int rt_rq_throttled(struct rt_rq *rt_rq) 51846383648SKirill Tkhai { 51946383648SKirill Tkhai return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted; 52046383648SKirill Tkhai } 52146383648SKirill Tkhai 522391e43daSPeter Zijlstra static int rt_se_boosted(struct sched_rt_entity *rt_se) 523391e43daSPeter Zijlstra { 524391e43daSPeter Zijlstra struct rt_rq *rt_rq = group_rt_rq(rt_se); 525391e43daSPeter Zijlstra struct task_struct *p; 526391e43daSPeter Zijlstra 527391e43daSPeter Zijlstra if (rt_rq) 528391e43daSPeter Zijlstra return !!rt_rq->rt_nr_boosted; 529391e43daSPeter Zijlstra 530391e43daSPeter Zijlstra p = rt_task_of(rt_se); 531391e43daSPeter Zijlstra return p->prio != p->normal_prio; 532391e43daSPeter Zijlstra } 533391e43daSPeter Zijlstra 534391e43daSPeter Zijlstra #ifdef CONFIG_SMP 535391e43daSPeter Zijlstra static inline const struct cpumask *sched_rt_period_mask(void) 536391e43daSPeter Zijlstra { 537424c93feSNathan Zimmer return this_rq()->rd->span; 538391e43daSPeter Zijlstra } 539391e43daSPeter Zijlstra #else 540391e43daSPeter Zijlstra static inline const struct cpumask *sched_rt_period_mask(void) 541391e43daSPeter Zijlstra { 542391e43daSPeter Zijlstra return cpu_online_mask; 543391e43daSPeter Zijlstra } 544391e43daSPeter Zijlstra #endif 545391e43daSPeter Zijlstra 546391e43daSPeter Zijlstra static inline 547391e43daSPeter Zijlstra struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu) 548391e43daSPeter Zijlstra { 549391e43daSPeter Zijlstra return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu]; 550391e43daSPeter Zijlstra } 551391e43daSPeter Zijlstra 552391e43daSPeter Zijlstra static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq) 553391e43daSPeter Zijlstra { 554391e43daSPeter Zijlstra return &rt_rq->tg->rt_bandwidth; 555391e43daSPeter Zijlstra } 556391e43daSPeter Zijlstra 557391e43daSPeter Zijlstra #else /* !CONFIG_RT_GROUP_SCHED */ 558391e43daSPeter Zijlstra 559391e43daSPeter Zijlstra static inline u64 sched_rt_runtime(struct rt_rq *rt_rq) 560391e43daSPeter Zijlstra { 561391e43daSPeter Zijlstra return rt_rq->rt_runtime; 562391e43daSPeter Zijlstra } 563391e43daSPeter Zijlstra 564391e43daSPeter Zijlstra static inline u64 sched_rt_period(struct rt_rq *rt_rq) 565391e43daSPeter Zijlstra { 566391e43daSPeter Zijlstra return ktime_to_ns(def_rt_bandwidth.rt_period); 567391e43daSPeter Zijlstra } 568391e43daSPeter Zijlstra 569391e43daSPeter Zijlstra typedef struct rt_rq *rt_rq_iter_t; 570391e43daSPeter Zijlstra 571391e43daSPeter Zijlstra #define for_each_rt_rq(rt_rq, iter, rq) \ 572391e43daSPeter Zijlstra for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL) 573391e43daSPeter Zijlstra 574391e43daSPeter Zijlstra #define for_each_sched_rt_entity(rt_se) \ 575391e43daSPeter Zijlstra for (; rt_se; rt_se = NULL) 576391e43daSPeter Zijlstra 577391e43daSPeter Zijlstra static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se) 578391e43daSPeter Zijlstra { 579391e43daSPeter Zijlstra return NULL; 580391e43daSPeter Zijlstra } 581391e43daSPeter Zijlstra 582391e43daSPeter Zijlstra static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq) 583391e43daSPeter Zijlstra { 584f4ebcbc0SKirill Tkhai struct rq *rq = rq_of_rt_rq(rt_rq); 585f4ebcbc0SKirill Tkhai 586f4ebcbc0SKirill Tkhai if (!rt_rq->rt_nr_running) 587f4ebcbc0SKirill Tkhai return; 588f4ebcbc0SKirill Tkhai 589f4ebcbc0SKirill Tkhai enqueue_top_rt_rq(rt_rq); 5908875125eSKirill Tkhai resched_curr(rq); 591391e43daSPeter Zijlstra } 592391e43daSPeter Zijlstra 593391e43daSPeter Zijlstra static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq) 594391e43daSPeter Zijlstra { 595f4ebcbc0SKirill Tkhai dequeue_top_rt_rq(rt_rq); 596391e43daSPeter Zijlstra } 597391e43daSPeter Zijlstra 59846383648SKirill Tkhai static inline int rt_rq_throttled(struct rt_rq *rt_rq) 59946383648SKirill Tkhai { 60046383648SKirill Tkhai return rt_rq->rt_throttled; 60146383648SKirill Tkhai } 60246383648SKirill Tkhai 603391e43daSPeter Zijlstra static inline const struct cpumask *sched_rt_period_mask(void) 604391e43daSPeter Zijlstra { 605391e43daSPeter Zijlstra return cpu_online_mask; 606391e43daSPeter Zijlstra } 607391e43daSPeter Zijlstra 608391e43daSPeter Zijlstra static inline 609391e43daSPeter Zijlstra struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu) 610391e43daSPeter Zijlstra { 611391e43daSPeter Zijlstra return &cpu_rq(cpu)->rt; 612391e43daSPeter Zijlstra } 613391e43daSPeter Zijlstra 614391e43daSPeter Zijlstra static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq) 615391e43daSPeter Zijlstra { 616391e43daSPeter Zijlstra return &def_rt_bandwidth; 617391e43daSPeter Zijlstra } 618391e43daSPeter Zijlstra 619391e43daSPeter Zijlstra #endif /* CONFIG_RT_GROUP_SCHED */ 620391e43daSPeter Zijlstra 621faa59937SJuri Lelli bool sched_rt_bandwidth_account(struct rt_rq *rt_rq) 622faa59937SJuri Lelli { 623faa59937SJuri Lelli struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); 624faa59937SJuri Lelli 625faa59937SJuri Lelli return (hrtimer_active(&rt_b->rt_period_timer) || 626faa59937SJuri Lelli rt_rq->rt_time < rt_b->rt_runtime); 627faa59937SJuri Lelli } 628faa59937SJuri Lelli 629391e43daSPeter Zijlstra #ifdef CONFIG_SMP 630391e43daSPeter Zijlstra /* 631391e43daSPeter Zijlstra * We ran out of runtime, see if we can borrow some from our neighbours. 632391e43daSPeter Zijlstra */ 633269b26a5SJuri Lelli static void do_balance_runtime(struct rt_rq *rt_rq) 634391e43daSPeter Zijlstra { 635391e43daSPeter Zijlstra struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); 636aa7f6730SShawn Bohrer struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd; 637269b26a5SJuri Lelli int i, weight; 638391e43daSPeter Zijlstra u64 rt_period; 639391e43daSPeter Zijlstra 640391e43daSPeter Zijlstra weight = cpumask_weight(rd->span); 641391e43daSPeter Zijlstra 642391e43daSPeter Zijlstra raw_spin_lock(&rt_b->rt_runtime_lock); 643391e43daSPeter Zijlstra rt_period = ktime_to_ns(rt_b->rt_period); 644391e43daSPeter Zijlstra for_each_cpu(i, rd->span) { 645391e43daSPeter Zijlstra struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); 646391e43daSPeter Zijlstra s64 diff; 647391e43daSPeter Zijlstra 648391e43daSPeter Zijlstra if (iter == rt_rq) 649391e43daSPeter Zijlstra continue; 650391e43daSPeter Zijlstra 651391e43daSPeter Zijlstra raw_spin_lock(&iter->rt_runtime_lock); 652391e43daSPeter Zijlstra /* 653391e43daSPeter Zijlstra * Either all rqs have inf runtime and there's nothing to steal 654391e43daSPeter Zijlstra * or __disable_runtime() below sets a specific rq to inf to 655391e43daSPeter Zijlstra * indicate its been disabled and disalow stealing. 656391e43daSPeter Zijlstra */ 657391e43daSPeter Zijlstra if (iter->rt_runtime == RUNTIME_INF) 658391e43daSPeter Zijlstra goto next; 659391e43daSPeter Zijlstra 660391e43daSPeter Zijlstra /* 661391e43daSPeter Zijlstra * From runqueues with spare time, take 1/n part of their 662391e43daSPeter Zijlstra * spare time, but no more than our period. 663391e43daSPeter Zijlstra */ 664391e43daSPeter Zijlstra diff = iter->rt_runtime - iter->rt_time; 665391e43daSPeter Zijlstra if (diff > 0) { 666391e43daSPeter Zijlstra diff = div_u64((u64)diff, weight); 667391e43daSPeter Zijlstra if (rt_rq->rt_runtime + diff > rt_period) 668391e43daSPeter Zijlstra diff = rt_period - rt_rq->rt_runtime; 669391e43daSPeter Zijlstra iter->rt_runtime -= diff; 670391e43daSPeter Zijlstra rt_rq->rt_runtime += diff; 671391e43daSPeter Zijlstra if (rt_rq->rt_runtime == rt_period) { 672391e43daSPeter Zijlstra raw_spin_unlock(&iter->rt_runtime_lock); 673391e43daSPeter Zijlstra break; 674391e43daSPeter Zijlstra } 675391e43daSPeter Zijlstra } 676391e43daSPeter Zijlstra next: 677391e43daSPeter Zijlstra raw_spin_unlock(&iter->rt_runtime_lock); 678391e43daSPeter Zijlstra } 679391e43daSPeter Zijlstra raw_spin_unlock(&rt_b->rt_runtime_lock); 680391e43daSPeter Zijlstra } 681391e43daSPeter Zijlstra 682391e43daSPeter Zijlstra /* 683391e43daSPeter Zijlstra * Ensure this RQ takes back all the runtime it lend to its neighbours. 684391e43daSPeter Zijlstra */ 685391e43daSPeter Zijlstra static void __disable_runtime(struct rq *rq) 686391e43daSPeter Zijlstra { 687391e43daSPeter Zijlstra struct root_domain *rd = rq->rd; 688391e43daSPeter Zijlstra rt_rq_iter_t iter; 689391e43daSPeter Zijlstra struct rt_rq *rt_rq; 690391e43daSPeter Zijlstra 691391e43daSPeter Zijlstra if (unlikely(!scheduler_running)) 692391e43daSPeter Zijlstra return; 693391e43daSPeter Zijlstra 694391e43daSPeter Zijlstra for_each_rt_rq(rt_rq, iter, rq) { 695391e43daSPeter Zijlstra struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); 696391e43daSPeter Zijlstra s64 want; 697391e43daSPeter Zijlstra int i; 698391e43daSPeter Zijlstra 699391e43daSPeter Zijlstra raw_spin_lock(&rt_b->rt_runtime_lock); 700391e43daSPeter Zijlstra raw_spin_lock(&rt_rq->rt_runtime_lock); 701391e43daSPeter Zijlstra /* 702391e43daSPeter Zijlstra * Either we're all inf and nobody needs to borrow, or we're 703391e43daSPeter Zijlstra * already disabled and thus have nothing to do, or we have 704391e43daSPeter Zijlstra * exactly the right amount of runtime to take out. 705391e43daSPeter Zijlstra */ 706391e43daSPeter Zijlstra if (rt_rq->rt_runtime == RUNTIME_INF || 707391e43daSPeter Zijlstra rt_rq->rt_runtime == rt_b->rt_runtime) 708391e43daSPeter Zijlstra goto balanced; 709391e43daSPeter Zijlstra raw_spin_unlock(&rt_rq->rt_runtime_lock); 710391e43daSPeter Zijlstra 711391e43daSPeter Zijlstra /* 712391e43daSPeter Zijlstra * Calculate the difference between what we started out with 713391e43daSPeter Zijlstra * and what we current have, that's the amount of runtime 714391e43daSPeter Zijlstra * we lend and now have to reclaim. 715391e43daSPeter Zijlstra */ 716391e43daSPeter Zijlstra want = rt_b->rt_runtime - rt_rq->rt_runtime; 717391e43daSPeter Zijlstra 718391e43daSPeter Zijlstra /* 719391e43daSPeter Zijlstra * Greedy reclaim, take back as much as we can. 720391e43daSPeter Zijlstra */ 721391e43daSPeter Zijlstra for_each_cpu(i, rd->span) { 722391e43daSPeter Zijlstra struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); 723391e43daSPeter Zijlstra s64 diff; 724391e43daSPeter Zijlstra 725391e43daSPeter Zijlstra /* 726391e43daSPeter Zijlstra * Can't reclaim from ourselves or disabled runqueues. 727391e43daSPeter Zijlstra */ 728391e43daSPeter Zijlstra if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF) 729391e43daSPeter Zijlstra continue; 730391e43daSPeter Zijlstra 731391e43daSPeter Zijlstra raw_spin_lock(&iter->rt_runtime_lock); 732391e43daSPeter Zijlstra if (want > 0) { 733391e43daSPeter Zijlstra diff = min_t(s64, iter->rt_runtime, want); 734391e43daSPeter Zijlstra iter->rt_runtime -= diff; 735391e43daSPeter Zijlstra want -= diff; 736391e43daSPeter Zijlstra } else { 737391e43daSPeter Zijlstra iter->rt_runtime -= want; 738391e43daSPeter Zijlstra want -= want; 739391e43daSPeter Zijlstra } 740391e43daSPeter Zijlstra raw_spin_unlock(&iter->rt_runtime_lock); 741391e43daSPeter Zijlstra 742391e43daSPeter Zijlstra if (!want) 743391e43daSPeter Zijlstra break; 744391e43daSPeter Zijlstra } 745391e43daSPeter Zijlstra 746391e43daSPeter Zijlstra raw_spin_lock(&rt_rq->rt_runtime_lock); 747391e43daSPeter Zijlstra /* 748391e43daSPeter Zijlstra * We cannot be left wanting - that would mean some runtime 749391e43daSPeter Zijlstra * leaked out of the system. 750391e43daSPeter Zijlstra */ 751391e43daSPeter Zijlstra BUG_ON(want); 752391e43daSPeter Zijlstra balanced: 753391e43daSPeter Zijlstra /* 754391e43daSPeter Zijlstra * Disable all the borrow logic by pretending we have inf 755391e43daSPeter Zijlstra * runtime - in which case borrowing doesn't make sense. 756391e43daSPeter Zijlstra */ 757391e43daSPeter Zijlstra rt_rq->rt_runtime = RUNTIME_INF; 758a4c96ae3SPeter Boonstoppel rt_rq->rt_throttled = 0; 759391e43daSPeter Zijlstra raw_spin_unlock(&rt_rq->rt_runtime_lock); 760391e43daSPeter Zijlstra raw_spin_unlock(&rt_b->rt_runtime_lock); 76199b62567SKirill Tkhai 76299b62567SKirill Tkhai /* Make rt_rq available for pick_next_task() */ 76399b62567SKirill Tkhai sched_rt_rq_enqueue(rt_rq); 764391e43daSPeter Zijlstra } 765391e43daSPeter Zijlstra } 766391e43daSPeter Zijlstra 767391e43daSPeter Zijlstra static void __enable_runtime(struct rq *rq) 768391e43daSPeter Zijlstra { 769391e43daSPeter Zijlstra rt_rq_iter_t iter; 770391e43daSPeter Zijlstra struct rt_rq *rt_rq; 771391e43daSPeter Zijlstra 772391e43daSPeter Zijlstra if (unlikely(!scheduler_running)) 773391e43daSPeter Zijlstra return; 774391e43daSPeter Zijlstra 775391e43daSPeter Zijlstra /* 776391e43daSPeter Zijlstra * Reset each runqueue's bandwidth settings 777391e43daSPeter Zijlstra */ 778391e43daSPeter Zijlstra for_each_rt_rq(rt_rq, iter, rq) { 779391e43daSPeter Zijlstra struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); 780391e43daSPeter Zijlstra 781391e43daSPeter Zijlstra raw_spin_lock(&rt_b->rt_runtime_lock); 782391e43daSPeter Zijlstra raw_spin_lock(&rt_rq->rt_runtime_lock); 783391e43daSPeter Zijlstra rt_rq->rt_runtime = rt_b->rt_runtime; 784391e43daSPeter Zijlstra rt_rq->rt_time = 0; 785391e43daSPeter Zijlstra rt_rq->rt_throttled = 0; 786391e43daSPeter Zijlstra raw_spin_unlock(&rt_rq->rt_runtime_lock); 787391e43daSPeter Zijlstra raw_spin_unlock(&rt_b->rt_runtime_lock); 788391e43daSPeter Zijlstra } 789391e43daSPeter Zijlstra } 790391e43daSPeter Zijlstra 791269b26a5SJuri Lelli static void balance_runtime(struct rt_rq *rt_rq) 792391e43daSPeter Zijlstra { 793391e43daSPeter Zijlstra if (!sched_feat(RT_RUNTIME_SHARE)) 794269b26a5SJuri Lelli return; 795391e43daSPeter Zijlstra 796391e43daSPeter Zijlstra if (rt_rq->rt_time > rt_rq->rt_runtime) { 797391e43daSPeter Zijlstra raw_spin_unlock(&rt_rq->rt_runtime_lock); 798269b26a5SJuri Lelli do_balance_runtime(rt_rq); 799391e43daSPeter Zijlstra raw_spin_lock(&rt_rq->rt_runtime_lock); 800391e43daSPeter Zijlstra } 801391e43daSPeter Zijlstra } 802391e43daSPeter Zijlstra #else /* !CONFIG_SMP */ 803269b26a5SJuri Lelli static inline void balance_runtime(struct rt_rq *rt_rq) {} 804391e43daSPeter Zijlstra #endif /* CONFIG_SMP */ 805391e43daSPeter Zijlstra 806391e43daSPeter Zijlstra static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) 807391e43daSPeter Zijlstra { 80842c62a58SPeter Zijlstra int i, idle = 1, throttled = 0; 809391e43daSPeter Zijlstra const struct cpumask *span; 810391e43daSPeter Zijlstra 811391e43daSPeter Zijlstra span = sched_rt_period_mask(); 812e221d028SMike Galbraith #ifdef CONFIG_RT_GROUP_SCHED 813e221d028SMike Galbraith /* 814e221d028SMike Galbraith * FIXME: isolated CPUs should really leave the root task group, 815e221d028SMike Galbraith * whether they are isolcpus or were isolated via cpusets, lest 816e221d028SMike Galbraith * the timer run on a CPU which does not service all runqueues, 817e221d028SMike Galbraith * potentially leaving other CPUs indefinitely throttled. If 818e221d028SMike Galbraith * isolation is really required, the user will turn the throttle 819e221d028SMike Galbraith * off to kill the perturbations it causes anyway. Meanwhile, 820e221d028SMike Galbraith * this maintains functionality for boot and/or troubleshooting. 821e221d028SMike Galbraith */ 822e221d028SMike Galbraith if (rt_b == &root_task_group.rt_bandwidth) 823e221d028SMike Galbraith span = cpu_online_mask; 824e221d028SMike Galbraith #endif 825391e43daSPeter Zijlstra for_each_cpu(i, span) { 826391e43daSPeter Zijlstra int enqueue = 0; 827391e43daSPeter Zijlstra struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i); 828391e43daSPeter Zijlstra struct rq *rq = rq_of_rt_rq(rt_rq); 829c249f255SDave Kleikamp int skip; 830c249f255SDave Kleikamp 831c249f255SDave Kleikamp /* 832c249f255SDave Kleikamp * When span == cpu_online_mask, taking each rq->lock 833c249f255SDave Kleikamp * can be time-consuming. Try to avoid it when possible. 834c249f255SDave Kleikamp */ 835c249f255SDave Kleikamp raw_spin_lock(&rt_rq->rt_runtime_lock); 836c249f255SDave Kleikamp skip = !rt_rq->rt_time && !rt_rq->rt_nr_running; 837c249f255SDave Kleikamp raw_spin_unlock(&rt_rq->rt_runtime_lock); 838c249f255SDave Kleikamp if (skip) 839c249f255SDave Kleikamp continue; 840391e43daSPeter Zijlstra 841391e43daSPeter Zijlstra raw_spin_lock(&rq->lock); 842d29a2064SDavidlohr Bueso update_rq_clock(rq); 843d29a2064SDavidlohr Bueso 844391e43daSPeter Zijlstra if (rt_rq->rt_time) { 845391e43daSPeter Zijlstra u64 runtime; 846391e43daSPeter Zijlstra 847391e43daSPeter Zijlstra raw_spin_lock(&rt_rq->rt_runtime_lock); 848391e43daSPeter Zijlstra if (rt_rq->rt_throttled) 849391e43daSPeter Zijlstra balance_runtime(rt_rq); 850391e43daSPeter Zijlstra runtime = rt_rq->rt_runtime; 851391e43daSPeter Zijlstra rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime); 852391e43daSPeter Zijlstra if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) { 853391e43daSPeter Zijlstra rt_rq->rt_throttled = 0; 854391e43daSPeter Zijlstra enqueue = 1; 855391e43daSPeter Zijlstra 856391e43daSPeter Zijlstra /* 8579edfbfedSPeter Zijlstra * When we're idle and a woken (rt) task is 8589edfbfedSPeter Zijlstra * throttled check_preempt_curr() will set 8599edfbfedSPeter Zijlstra * skip_update and the time between the wakeup 8609edfbfedSPeter Zijlstra * and this unthrottle will get accounted as 8619edfbfedSPeter Zijlstra * 'runtime'. 862391e43daSPeter Zijlstra */ 863391e43daSPeter Zijlstra if (rt_rq->rt_nr_running && rq->curr == rq->idle) 864adcc8da8SDavidlohr Bueso rq_clock_cancel_skipupdate(rq); 865391e43daSPeter Zijlstra } 866391e43daSPeter Zijlstra if (rt_rq->rt_time || rt_rq->rt_nr_running) 867391e43daSPeter Zijlstra idle = 0; 868391e43daSPeter Zijlstra raw_spin_unlock(&rt_rq->rt_runtime_lock); 869391e43daSPeter Zijlstra } else if (rt_rq->rt_nr_running) { 870391e43daSPeter Zijlstra idle = 0; 871391e43daSPeter Zijlstra if (!rt_rq_throttled(rt_rq)) 872391e43daSPeter Zijlstra enqueue = 1; 873391e43daSPeter Zijlstra } 87442c62a58SPeter Zijlstra if (rt_rq->rt_throttled) 87542c62a58SPeter Zijlstra throttled = 1; 876391e43daSPeter Zijlstra 877391e43daSPeter Zijlstra if (enqueue) 878391e43daSPeter Zijlstra sched_rt_rq_enqueue(rt_rq); 879391e43daSPeter Zijlstra raw_spin_unlock(&rq->lock); 880391e43daSPeter Zijlstra } 881391e43daSPeter Zijlstra 88242c62a58SPeter Zijlstra if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)) 88342c62a58SPeter Zijlstra return 1; 88442c62a58SPeter Zijlstra 885391e43daSPeter Zijlstra return idle; 886391e43daSPeter Zijlstra } 887391e43daSPeter Zijlstra 888391e43daSPeter Zijlstra static inline int rt_se_prio(struct sched_rt_entity *rt_se) 889391e43daSPeter Zijlstra { 890391e43daSPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED 891391e43daSPeter Zijlstra struct rt_rq *rt_rq = group_rt_rq(rt_se); 892391e43daSPeter Zijlstra 893391e43daSPeter Zijlstra if (rt_rq) 894391e43daSPeter Zijlstra return rt_rq->highest_prio.curr; 895391e43daSPeter Zijlstra #endif 896391e43daSPeter Zijlstra 897391e43daSPeter Zijlstra return rt_task_of(rt_se)->prio; 898391e43daSPeter Zijlstra } 899391e43daSPeter Zijlstra 900391e43daSPeter Zijlstra static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq) 901391e43daSPeter Zijlstra { 902391e43daSPeter Zijlstra u64 runtime = sched_rt_runtime(rt_rq); 903391e43daSPeter Zijlstra 904391e43daSPeter Zijlstra if (rt_rq->rt_throttled) 905391e43daSPeter Zijlstra return rt_rq_throttled(rt_rq); 906391e43daSPeter Zijlstra 9075b680fd6SShan Hai if (runtime >= sched_rt_period(rt_rq)) 908391e43daSPeter Zijlstra return 0; 909391e43daSPeter Zijlstra 910391e43daSPeter Zijlstra balance_runtime(rt_rq); 911391e43daSPeter Zijlstra runtime = sched_rt_runtime(rt_rq); 912391e43daSPeter Zijlstra if (runtime == RUNTIME_INF) 913391e43daSPeter Zijlstra return 0; 914391e43daSPeter Zijlstra 915391e43daSPeter Zijlstra if (rt_rq->rt_time > runtime) { 9167abc63b1SPeter Zijlstra struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); 9177abc63b1SPeter Zijlstra 9187abc63b1SPeter Zijlstra /* 9197abc63b1SPeter Zijlstra * Don't actually throttle groups that have no runtime assigned 9207abc63b1SPeter Zijlstra * but accrue some time due to boosting. 9217abc63b1SPeter Zijlstra */ 9227abc63b1SPeter Zijlstra if (likely(rt_b->rt_runtime)) { 923391e43daSPeter Zijlstra rt_rq->rt_throttled = 1; 924c224815dSJohn Stultz printk_deferred_once("sched: RT throttling activated\n"); 9257abc63b1SPeter Zijlstra } else { 9267abc63b1SPeter Zijlstra /* 9277abc63b1SPeter Zijlstra * In case we did anyway, make it go away, 9287abc63b1SPeter Zijlstra * replenishment is a joke, since it will replenish us 9297abc63b1SPeter Zijlstra * with exactly 0 ns. 9307abc63b1SPeter Zijlstra */ 9317abc63b1SPeter Zijlstra rt_rq->rt_time = 0; 9327abc63b1SPeter Zijlstra } 9337abc63b1SPeter Zijlstra 934391e43daSPeter Zijlstra if (rt_rq_throttled(rt_rq)) { 935391e43daSPeter Zijlstra sched_rt_rq_dequeue(rt_rq); 936391e43daSPeter Zijlstra return 1; 937391e43daSPeter Zijlstra } 938391e43daSPeter Zijlstra } 939391e43daSPeter Zijlstra 940391e43daSPeter Zijlstra return 0; 941391e43daSPeter Zijlstra } 942391e43daSPeter Zijlstra 943391e43daSPeter Zijlstra /* 944391e43daSPeter Zijlstra * Update the current task's runtime statistics. Skip current tasks that 945391e43daSPeter Zijlstra * are not in our scheduling class. 946391e43daSPeter Zijlstra */ 947391e43daSPeter Zijlstra static void update_curr_rt(struct rq *rq) 948391e43daSPeter Zijlstra { 949391e43daSPeter Zijlstra struct task_struct *curr = rq->curr; 950391e43daSPeter Zijlstra struct sched_rt_entity *rt_se = &curr->rt; 951391e43daSPeter Zijlstra u64 delta_exec; 952a7711602SWen Yang u64 now; 953391e43daSPeter Zijlstra 954391e43daSPeter Zijlstra if (curr->sched_class != &rt_sched_class) 955391e43daSPeter Zijlstra return; 956391e43daSPeter Zijlstra 957a7711602SWen Yang now = rq_clock_task(rq); 958e7ad2031SWen Yang delta_exec = now - curr->se.exec_start; 959fc79e240SKirill Tkhai if (unlikely((s64)delta_exec <= 0)) 960fc79e240SKirill Tkhai return; 961391e43daSPeter Zijlstra 96242c62a58SPeter Zijlstra schedstat_set(curr->se.statistics.exec_max, 96342c62a58SPeter Zijlstra max(curr->se.statistics.exec_max, delta_exec)); 964391e43daSPeter Zijlstra 965391e43daSPeter Zijlstra curr->se.sum_exec_runtime += delta_exec; 966391e43daSPeter Zijlstra account_group_exec_runtime(curr, delta_exec); 967391e43daSPeter Zijlstra 968e7ad2031SWen Yang curr->se.exec_start = now; 969d2cc5ed6STejun Heo cgroup_account_cputime(curr, delta_exec); 970391e43daSPeter Zijlstra 971391e43daSPeter Zijlstra sched_rt_avg_update(rq, delta_exec); 972391e43daSPeter Zijlstra 973391e43daSPeter Zijlstra if (!rt_bandwidth_enabled()) 974391e43daSPeter Zijlstra return; 975391e43daSPeter Zijlstra 976391e43daSPeter Zijlstra for_each_sched_rt_entity(rt_se) { 9770b07939cSGiedrius Rekasius struct rt_rq *rt_rq = rt_rq_of_se(rt_se); 978391e43daSPeter Zijlstra 979391e43daSPeter Zijlstra if (sched_rt_runtime(rt_rq) != RUNTIME_INF) { 980391e43daSPeter Zijlstra raw_spin_lock(&rt_rq->rt_runtime_lock); 981391e43daSPeter Zijlstra rt_rq->rt_time += delta_exec; 982391e43daSPeter Zijlstra if (sched_rt_runtime_exceeded(rt_rq)) 9838875125eSKirill Tkhai resched_curr(rq); 984391e43daSPeter Zijlstra raw_spin_unlock(&rt_rq->rt_runtime_lock); 985391e43daSPeter Zijlstra } 986391e43daSPeter Zijlstra } 987391e43daSPeter Zijlstra } 988391e43daSPeter Zijlstra 989f4ebcbc0SKirill Tkhai static void 990f4ebcbc0SKirill Tkhai dequeue_top_rt_rq(struct rt_rq *rt_rq) 991f4ebcbc0SKirill Tkhai { 992f4ebcbc0SKirill Tkhai struct rq *rq = rq_of_rt_rq(rt_rq); 993f4ebcbc0SKirill Tkhai 994f4ebcbc0SKirill Tkhai BUG_ON(&rq->rt != rt_rq); 995f4ebcbc0SKirill Tkhai 996f4ebcbc0SKirill Tkhai if (!rt_rq->rt_queued) 997f4ebcbc0SKirill Tkhai return; 998f4ebcbc0SKirill Tkhai 999f4ebcbc0SKirill Tkhai BUG_ON(!rq->nr_running); 1000f4ebcbc0SKirill Tkhai 100172465447SKirill Tkhai sub_nr_running(rq, rt_rq->rt_nr_running); 1002f4ebcbc0SKirill Tkhai rt_rq->rt_queued = 0; 10038f111bc3SPeter Zijlstra 10048f111bc3SPeter Zijlstra /* Kick cpufreq (see the comment in kernel/sched/sched.h). */ 10058f111bc3SPeter Zijlstra cpufreq_update_util(rq, 0); 1006f4ebcbc0SKirill Tkhai } 1007f4ebcbc0SKirill Tkhai 1008f4ebcbc0SKirill Tkhai static void 1009f4ebcbc0SKirill Tkhai enqueue_top_rt_rq(struct rt_rq *rt_rq) 1010f4ebcbc0SKirill Tkhai { 1011f4ebcbc0SKirill Tkhai struct rq *rq = rq_of_rt_rq(rt_rq); 1012f4ebcbc0SKirill Tkhai 1013f4ebcbc0SKirill Tkhai BUG_ON(&rq->rt != rt_rq); 1014f4ebcbc0SKirill Tkhai 1015f4ebcbc0SKirill Tkhai if (rt_rq->rt_queued) 1016f4ebcbc0SKirill Tkhai return; 1017f4ebcbc0SKirill Tkhai if (rt_rq_throttled(rt_rq) || !rt_rq->rt_nr_running) 1018f4ebcbc0SKirill Tkhai return; 1019f4ebcbc0SKirill Tkhai 102072465447SKirill Tkhai add_nr_running(rq, rt_rq->rt_nr_running); 1021f4ebcbc0SKirill Tkhai rt_rq->rt_queued = 1; 10228f111bc3SPeter Zijlstra 10238f111bc3SPeter Zijlstra /* Kick cpufreq (see the comment in kernel/sched/sched.h). */ 10248f111bc3SPeter Zijlstra cpufreq_update_util(rq, 0); 1025f4ebcbc0SKirill Tkhai } 1026f4ebcbc0SKirill Tkhai 1027391e43daSPeter Zijlstra #if defined CONFIG_SMP 1028391e43daSPeter Zijlstra 1029391e43daSPeter Zijlstra static void 1030391e43daSPeter Zijlstra inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) 1031391e43daSPeter Zijlstra { 1032391e43daSPeter Zijlstra struct rq *rq = rq_of_rt_rq(rt_rq); 1033391e43daSPeter Zijlstra 1034757dfcaaSKirill Tkhai #ifdef CONFIG_RT_GROUP_SCHED 1035757dfcaaSKirill Tkhai /* 1036757dfcaaSKirill Tkhai * Change rq's cpupri only if rt_rq is the top queue. 1037757dfcaaSKirill Tkhai */ 1038757dfcaaSKirill Tkhai if (&rq->rt != rt_rq) 1039757dfcaaSKirill Tkhai return; 1040757dfcaaSKirill Tkhai #endif 1041391e43daSPeter Zijlstra if (rq->online && prio < prev_prio) 1042391e43daSPeter Zijlstra cpupri_set(&rq->rd->cpupri, rq->cpu, prio); 1043391e43daSPeter Zijlstra } 1044391e43daSPeter Zijlstra 1045391e43daSPeter Zijlstra static void 1046391e43daSPeter Zijlstra dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) 1047391e43daSPeter Zijlstra { 1048391e43daSPeter Zijlstra struct rq *rq = rq_of_rt_rq(rt_rq); 1049391e43daSPeter Zijlstra 1050757dfcaaSKirill Tkhai #ifdef CONFIG_RT_GROUP_SCHED 1051757dfcaaSKirill Tkhai /* 1052757dfcaaSKirill Tkhai * Change rq's cpupri only if rt_rq is the top queue. 1053757dfcaaSKirill Tkhai */ 1054757dfcaaSKirill Tkhai if (&rq->rt != rt_rq) 1055757dfcaaSKirill Tkhai return; 1056757dfcaaSKirill Tkhai #endif 1057391e43daSPeter Zijlstra if (rq->online && rt_rq->highest_prio.curr != prev_prio) 1058391e43daSPeter Zijlstra cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr); 1059391e43daSPeter Zijlstra } 1060391e43daSPeter Zijlstra 1061391e43daSPeter Zijlstra #else /* CONFIG_SMP */ 1062391e43daSPeter Zijlstra 1063391e43daSPeter Zijlstra static inline 1064391e43daSPeter Zijlstra void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {} 1065391e43daSPeter Zijlstra static inline 1066391e43daSPeter Zijlstra void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {} 1067391e43daSPeter Zijlstra 1068391e43daSPeter Zijlstra #endif /* CONFIG_SMP */ 1069391e43daSPeter Zijlstra 1070391e43daSPeter Zijlstra #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED 1071391e43daSPeter Zijlstra static void 1072391e43daSPeter Zijlstra inc_rt_prio(struct rt_rq *rt_rq, int prio) 1073391e43daSPeter Zijlstra { 1074391e43daSPeter Zijlstra int prev_prio = rt_rq->highest_prio.curr; 1075391e43daSPeter Zijlstra 1076391e43daSPeter Zijlstra if (prio < prev_prio) 1077391e43daSPeter Zijlstra rt_rq->highest_prio.curr = prio; 1078391e43daSPeter Zijlstra 1079391e43daSPeter Zijlstra inc_rt_prio_smp(rt_rq, prio, prev_prio); 1080391e43daSPeter Zijlstra } 1081391e43daSPeter Zijlstra 1082391e43daSPeter Zijlstra static void 1083391e43daSPeter Zijlstra dec_rt_prio(struct rt_rq *rt_rq, int prio) 1084391e43daSPeter Zijlstra { 1085391e43daSPeter Zijlstra int prev_prio = rt_rq->highest_prio.curr; 1086391e43daSPeter Zijlstra 1087391e43daSPeter Zijlstra if (rt_rq->rt_nr_running) { 1088391e43daSPeter Zijlstra 1089391e43daSPeter Zijlstra WARN_ON(prio < prev_prio); 1090391e43daSPeter Zijlstra 1091391e43daSPeter Zijlstra /* 1092391e43daSPeter Zijlstra * This may have been our highest task, and therefore 1093391e43daSPeter Zijlstra * we may have some recomputation to do 1094391e43daSPeter Zijlstra */ 1095391e43daSPeter Zijlstra if (prio == prev_prio) { 1096391e43daSPeter Zijlstra struct rt_prio_array *array = &rt_rq->active; 1097391e43daSPeter Zijlstra 1098391e43daSPeter Zijlstra rt_rq->highest_prio.curr = 1099391e43daSPeter Zijlstra sched_find_first_bit(array->bitmap); 1100391e43daSPeter Zijlstra } 1101391e43daSPeter Zijlstra 1102391e43daSPeter Zijlstra } else 1103391e43daSPeter Zijlstra rt_rq->highest_prio.curr = MAX_RT_PRIO; 1104391e43daSPeter Zijlstra 1105391e43daSPeter Zijlstra dec_rt_prio_smp(rt_rq, prio, prev_prio); 1106391e43daSPeter Zijlstra } 1107391e43daSPeter Zijlstra 1108391e43daSPeter Zijlstra #else 1109391e43daSPeter Zijlstra 1110391e43daSPeter Zijlstra static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {} 1111391e43daSPeter Zijlstra static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {} 1112391e43daSPeter Zijlstra 1113391e43daSPeter Zijlstra #endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */ 1114391e43daSPeter Zijlstra 1115391e43daSPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED 1116391e43daSPeter Zijlstra 1117391e43daSPeter Zijlstra static void 1118391e43daSPeter Zijlstra inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 1119391e43daSPeter Zijlstra { 1120391e43daSPeter Zijlstra if (rt_se_boosted(rt_se)) 1121391e43daSPeter Zijlstra rt_rq->rt_nr_boosted++; 1122391e43daSPeter Zijlstra 1123391e43daSPeter Zijlstra if (rt_rq->tg) 1124391e43daSPeter Zijlstra start_rt_bandwidth(&rt_rq->tg->rt_bandwidth); 1125391e43daSPeter Zijlstra } 1126391e43daSPeter Zijlstra 1127391e43daSPeter Zijlstra static void 1128391e43daSPeter Zijlstra dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 1129391e43daSPeter Zijlstra { 1130391e43daSPeter Zijlstra if (rt_se_boosted(rt_se)) 1131391e43daSPeter Zijlstra rt_rq->rt_nr_boosted--; 1132391e43daSPeter Zijlstra 1133391e43daSPeter Zijlstra WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted); 1134391e43daSPeter Zijlstra } 1135391e43daSPeter Zijlstra 1136391e43daSPeter Zijlstra #else /* CONFIG_RT_GROUP_SCHED */ 1137391e43daSPeter Zijlstra 1138391e43daSPeter Zijlstra static void 1139391e43daSPeter Zijlstra inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 1140391e43daSPeter Zijlstra { 1141391e43daSPeter Zijlstra start_rt_bandwidth(&def_rt_bandwidth); 1142391e43daSPeter Zijlstra } 1143391e43daSPeter Zijlstra 1144391e43daSPeter Zijlstra static inline 1145391e43daSPeter Zijlstra void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {} 1146391e43daSPeter Zijlstra 1147391e43daSPeter Zijlstra #endif /* CONFIG_RT_GROUP_SCHED */ 1148391e43daSPeter Zijlstra 1149391e43daSPeter Zijlstra static inline 115022abdef3SKirill Tkhai unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se) 115122abdef3SKirill Tkhai { 115222abdef3SKirill Tkhai struct rt_rq *group_rq = group_rt_rq(rt_se); 115322abdef3SKirill Tkhai 115422abdef3SKirill Tkhai if (group_rq) 115522abdef3SKirill Tkhai return group_rq->rt_nr_running; 115622abdef3SKirill Tkhai else 115722abdef3SKirill Tkhai return 1; 115822abdef3SKirill Tkhai } 115922abdef3SKirill Tkhai 116022abdef3SKirill Tkhai static inline 116101d36d0aSFrederic Weisbecker unsigned int rt_se_rr_nr_running(struct sched_rt_entity *rt_se) 116201d36d0aSFrederic Weisbecker { 116301d36d0aSFrederic Weisbecker struct rt_rq *group_rq = group_rt_rq(rt_se); 116401d36d0aSFrederic Weisbecker struct task_struct *tsk; 116501d36d0aSFrederic Weisbecker 116601d36d0aSFrederic Weisbecker if (group_rq) 116701d36d0aSFrederic Weisbecker return group_rq->rr_nr_running; 116801d36d0aSFrederic Weisbecker 116901d36d0aSFrederic Weisbecker tsk = rt_task_of(rt_se); 117001d36d0aSFrederic Weisbecker 117101d36d0aSFrederic Weisbecker return (tsk->policy == SCHED_RR) ? 1 : 0; 117201d36d0aSFrederic Weisbecker } 117301d36d0aSFrederic Weisbecker 117401d36d0aSFrederic Weisbecker static inline 1175391e43daSPeter Zijlstra void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 1176391e43daSPeter Zijlstra { 1177391e43daSPeter Zijlstra int prio = rt_se_prio(rt_se); 1178391e43daSPeter Zijlstra 1179391e43daSPeter Zijlstra WARN_ON(!rt_prio(prio)); 118022abdef3SKirill Tkhai rt_rq->rt_nr_running += rt_se_nr_running(rt_se); 118101d36d0aSFrederic Weisbecker rt_rq->rr_nr_running += rt_se_rr_nr_running(rt_se); 1182391e43daSPeter Zijlstra 1183391e43daSPeter Zijlstra inc_rt_prio(rt_rq, prio); 1184391e43daSPeter Zijlstra inc_rt_migration(rt_se, rt_rq); 1185391e43daSPeter Zijlstra inc_rt_group(rt_se, rt_rq); 1186391e43daSPeter Zijlstra } 1187391e43daSPeter Zijlstra 1188391e43daSPeter Zijlstra static inline 1189391e43daSPeter Zijlstra void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 1190391e43daSPeter Zijlstra { 1191391e43daSPeter Zijlstra WARN_ON(!rt_prio(rt_se_prio(rt_se))); 1192391e43daSPeter Zijlstra WARN_ON(!rt_rq->rt_nr_running); 119322abdef3SKirill Tkhai rt_rq->rt_nr_running -= rt_se_nr_running(rt_se); 119401d36d0aSFrederic Weisbecker rt_rq->rr_nr_running -= rt_se_rr_nr_running(rt_se); 1195391e43daSPeter Zijlstra 1196391e43daSPeter Zijlstra dec_rt_prio(rt_rq, rt_se_prio(rt_se)); 1197391e43daSPeter Zijlstra dec_rt_migration(rt_se, rt_rq); 1198391e43daSPeter Zijlstra dec_rt_group(rt_se, rt_rq); 1199391e43daSPeter Zijlstra } 1200391e43daSPeter Zijlstra 1201ff77e468SPeter Zijlstra /* 1202ff77e468SPeter Zijlstra * Change rt_se->run_list location unless SAVE && !MOVE 1203ff77e468SPeter Zijlstra * 1204ff77e468SPeter Zijlstra * assumes ENQUEUE/DEQUEUE flags match 1205ff77e468SPeter Zijlstra */ 1206ff77e468SPeter Zijlstra static inline bool move_entity(unsigned int flags) 1207ff77e468SPeter Zijlstra { 1208ff77e468SPeter Zijlstra if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE) 1209ff77e468SPeter Zijlstra return false; 1210ff77e468SPeter Zijlstra 1211ff77e468SPeter Zijlstra return true; 1212ff77e468SPeter Zijlstra } 1213ff77e468SPeter Zijlstra 1214ff77e468SPeter Zijlstra static void __delist_rt_entity(struct sched_rt_entity *rt_se, struct rt_prio_array *array) 1215ff77e468SPeter Zijlstra { 1216ff77e468SPeter Zijlstra list_del_init(&rt_se->run_list); 1217ff77e468SPeter Zijlstra 1218ff77e468SPeter Zijlstra if (list_empty(array->queue + rt_se_prio(rt_se))) 1219ff77e468SPeter Zijlstra __clear_bit(rt_se_prio(rt_se), array->bitmap); 1220ff77e468SPeter Zijlstra 1221ff77e468SPeter Zijlstra rt_se->on_list = 0; 1222ff77e468SPeter Zijlstra } 1223ff77e468SPeter Zijlstra 1224ff77e468SPeter Zijlstra static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags) 1225391e43daSPeter Zijlstra { 1226391e43daSPeter Zijlstra struct rt_rq *rt_rq = rt_rq_of_se(rt_se); 1227391e43daSPeter Zijlstra struct rt_prio_array *array = &rt_rq->active; 1228391e43daSPeter Zijlstra struct rt_rq *group_rq = group_rt_rq(rt_se); 1229391e43daSPeter Zijlstra struct list_head *queue = array->queue + rt_se_prio(rt_se); 1230391e43daSPeter Zijlstra 1231391e43daSPeter Zijlstra /* 1232391e43daSPeter Zijlstra * Don't enqueue the group if its throttled, or when empty. 1233391e43daSPeter Zijlstra * The latter is a consequence of the former when a child group 1234391e43daSPeter Zijlstra * get throttled and the current group doesn't have any other 1235391e43daSPeter Zijlstra * active members. 1236391e43daSPeter Zijlstra */ 1237ff77e468SPeter Zijlstra if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) { 1238ff77e468SPeter Zijlstra if (rt_se->on_list) 1239ff77e468SPeter Zijlstra __delist_rt_entity(rt_se, array); 1240391e43daSPeter Zijlstra return; 1241ff77e468SPeter Zijlstra } 1242391e43daSPeter Zijlstra 1243ff77e468SPeter Zijlstra if (move_entity(flags)) { 1244ff77e468SPeter Zijlstra WARN_ON_ONCE(rt_se->on_list); 1245ff77e468SPeter Zijlstra if (flags & ENQUEUE_HEAD) 1246391e43daSPeter Zijlstra list_add(&rt_se->run_list, queue); 1247391e43daSPeter Zijlstra else 1248391e43daSPeter Zijlstra list_add_tail(&rt_se->run_list, queue); 1249ff77e468SPeter Zijlstra 1250391e43daSPeter Zijlstra __set_bit(rt_se_prio(rt_se), array->bitmap); 1251ff77e468SPeter Zijlstra rt_se->on_list = 1; 1252ff77e468SPeter Zijlstra } 1253ff77e468SPeter Zijlstra rt_se->on_rq = 1; 1254391e43daSPeter Zijlstra 1255391e43daSPeter Zijlstra inc_rt_tasks(rt_se, rt_rq); 1256391e43daSPeter Zijlstra } 1257391e43daSPeter Zijlstra 1258ff77e468SPeter Zijlstra static void __dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags) 1259391e43daSPeter Zijlstra { 1260391e43daSPeter Zijlstra struct rt_rq *rt_rq = rt_rq_of_se(rt_se); 1261391e43daSPeter Zijlstra struct rt_prio_array *array = &rt_rq->active; 1262391e43daSPeter Zijlstra 1263ff77e468SPeter Zijlstra if (move_entity(flags)) { 1264ff77e468SPeter Zijlstra WARN_ON_ONCE(!rt_se->on_list); 1265ff77e468SPeter Zijlstra __delist_rt_entity(rt_se, array); 1266ff77e468SPeter Zijlstra } 1267ff77e468SPeter Zijlstra rt_se->on_rq = 0; 1268391e43daSPeter Zijlstra 1269391e43daSPeter Zijlstra dec_rt_tasks(rt_se, rt_rq); 1270391e43daSPeter Zijlstra } 1271391e43daSPeter Zijlstra 1272391e43daSPeter Zijlstra /* 1273391e43daSPeter Zijlstra * Because the prio of an upper entry depends on the lower 1274391e43daSPeter Zijlstra * entries, we must remove entries top - down. 1275391e43daSPeter Zijlstra */ 1276ff77e468SPeter Zijlstra static void dequeue_rt_stack(struct sched_rt_entity *rt_se, unsigned int flags) 1277391e43daSPeter Zijlstra { 1278391e43daSPeter Zijlstra struct sched_rt_entity *back = NULL; 1279391e43daSPeter Zijlstra 1280391e43daSPeter Zijlstra for_each_sched_rt_entity(rt_se) { 1281391e43daSPeter Zijlstra rt_se->back = back; 1282391e43daSPeter Zijlstra back = rt_se; 1283391e43daSPeter Zijlstra } 1284391e43daSPeter Zijlstra 1285f4ebcbc0SKirill Tkhai dequeue_top_rt_rq(rt_rq_of_se(back)); 1286f4ebcbc0SKirill Tkhai 1287391e43daSPeter Zijlstra for (rt_se = back; rt_se; rt_se = rt_se->back) { 1288391e43daSPeter Zijlstra if (on_rt_rq(rt_se)) 1289ff77e468SPeter Zijlstra __dequeue_rt_entity(rt_se, flags); 1290391e43daSPeter Zijlstra } 1291391e43daSPeter Zijlstra } 1292391e43daSPeter Zijlstra 1293ff77e468SPeter Zijlstra static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags) 1294391e43daSPeter Zijlstra { 1295f4ebcbc0SKirill Tkhai struct rq *rq = rq_of_rt_se(rt_se); 1296f4ebcbc0SKirill Tkhai 1297ff77e468SPeter Zijlstra dequeue_rt_stack(rt_se, flags); 1298391e43daSPeter Zijlstra for_each_sched_rt_entity(rt_se) 1299ff77e468SPeter Zijlstra __enqueue_rt_entity(rt_se, flags); 1300f4ebcbc0SKirill Tkhai enqueue_top_rt_rq(&rq->rt); 1301391e43daSPeter Zijlstra } 1302391e43daSPeter Zijlstra 1303ff77e468SPeter Zijlstra static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags) 1304391e43daSPeter Zijlstra { 1305f4ebcbc0SKirill Tkhai struct rq *rq = rq_of_rt_se(rt_se); 1306f4ebcbc0SKirill Tkhai 1307ff77e468SPeter Zijlstra dequeue_rt_stack(rt_se, flags); 1308391e43daSPeter Zijlstra 1309391e43daSPeter Zijlstra for_each_sched_rt_entity(rt_se) { 1310391e43daSPeter Zijlstra struct rt_rq *rt_rq = group_rt_rq(rt_se); 1311391e43daSPeter Zijlstra 1312391e43daSPeter Zijlstra if (rt_rq && rt_rq->rt_nr_running) 1313ff77e468SPeter Zijlstra __enqueue_rt_entity(rt_se, flags); 1314391e43daSPeter Zijlstra } 1315f4ebcbc0SKirill Tkhai enqueue_top_rt_rq(&rq->rt); 1316391e43daSPeter Zijlstra } 1317391e43daSPeter Zijlstra 1318391e43daSPeter Zijlstra /* 1319391e43daSPeter Zijlstra * Adding/removing a task to/from a priority array: 1320391e43daSPeter Zijlstra */ 1321391e43daSPeter Zijlstra static void 1322391e43daSPeter Zijlstra enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags) 1323391e43daSPeter Zijlstra { 1324391e43daSPeter Zijlstra struct sched_rt_entity *rt_se = &p->rt; 1325391e43daSPeter Zijlstra 1326391e43daSPeter Zijlstra if (flags & ENQUEUE_WAKEUP) 1327391e43daSPeter Zijlstra rt_se->timeout = 0; 1328391e43daSPeter Zijlstra 1329ff77e468SPeter Zijlstra enqueue_rt_entity(rt_se, flags); 1330391e43daSPeter Zijlstra 13314b53a341SIngo Molnar if (!task_current(rq, p) && p->nr_cpus_allowed > 1) 1332391e43daSPeter Zijlstra enqueue_pushable_task(rq, p); 1333391e43daSPeter Zijlstra } 1334391e43daSPeter Zijlstra 1335391e43daSPeter Zijlstra static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags) 1336391e43daSPeter Zijlstra { 1337391e43daSPeter Zijlstra struct sched_rt_entity *rt_se = &p->rt; 1338391e43daSPeter Zijlstra 1339391e43daSPeter Zijlstra update_curr_rt(rq); 1340ff77e468SPeter Zijlstra dequeue_rt_entity(rt_se, flags); 1341391e43daSPeter Zijlstra 1342391e43daSPeter Zijlstra dequeue_pushable_task(rq, p); 1343391e43daSPeter Zijlstra } 1344391e43daSPeter Zijlstra 1345391e43daSPeter Zijlstra /* 1346391e43daSPeter Zijlstra * Put task to the head or the end of the run list without the overhead of 1347391e43daSPeter Zijlstra * dequeue followed by enqueue. 1348391e43daSPeter Zijlstra */ 1349391e43daSPeter Zijlstra static void 1350391e43daSPeter Zijlstra requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head) 1351391e43daSPeter Zijlstra { 1352391e43daSPeter Zijlstra if (on_rt_rq(rt_se)) { 1353391e43daSPeter Zijlstra struct rt_prio_array *array = &rt_rq->active; 1354391e43daSPeter Zijlstra struct list_head *queue = array->queue + rt_se_prio(rt_se); 1355391e43daSPeter Zijlstra 1356391e43daSPeter Zijlstra if (head) 1357391e43daSPeter Zijlstra list_move(&rt_se->run_list, queue); 1358391e43daSPeter Zijlstra else 1359391e43daSPeter Zijlstra list_move_tail(&rt_se->run_list, queue); 1360391e43daSPeter Zijlstra } 1361391e43daSPeter Zijlstra } 1362391e43daSPeter Zijlstra 1363391e43daSPeter Zijlstra static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head) 1364391e43daSPeter Zijlstra { 1365391e43daSPeter Zijlstra struct sched_rt_entity *rt_se = &p->rt; 1366391e43daSPeter Zijlstra struct rt_rq *rt_rq; 1367391e43daSPeter Zijlstra 1368391e43daSPeter Zijlstra for_each_sched_rt_entity(rt_se) { 1369391e43daSPeter Zijlstra rt_rq = rt_rq_of_se(rt_se); 1370391e43daSPeter Zijlstra requeue_rt_entity(rt_rq, rt_se, head); 1371391e43daSPeter Zijlstra } 1372391e43daSPeter Zijlstra } 1373391e43daSPeter Zijlstra 1374391e43daSPeter Zijlstra static void yield_task_rt(struct rq *rq) 1375391e43daSPeter Zijlstra { 1376391e43daSPeter Zijlstra requeue_task_rt(rq, rq->curr, 0); 1377391e43daSPeter Zijlstra } 1378391e43daSPeter Zijlstra 1379391e43daSPeter Zijlstra #ifdef CONFIG_SMP 1380391e43daSPeter Zijlstra static int find_lowest_rq(struct task_struct *task); 1381391e43daSPeter Zijlstra 1382391e43daSPeter Zijlstra static int 1383ac66f547SPeter Zijlstra select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags) 1384391e43daSPeter Zijlstra { 1385391e43daSPeter Zijlstra struct task_struct *curr; 1386391e43daSPeter Zijlstra struct rq *rq; 1387391e43daSPeter Zijlstra 1388391e43daSPeter Zijlstra /* For anything but wake ups, just return the task_cpu */ 1389391e43daSPeter Zijlstra if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK) 1390391e43daSPeter Zijlstra goto out; 1391391e43daSPeter Zijlstra 1392391e43daSPeter Zijlstra rq = cpu_rq(cpu); 1393391e43daSPeter Zijlstra 1394391e43daSPeter Zijlstra rcu_read_lock(); 1395316c1608SJason Low curr = READ_ONCE(rq->curr); /* unlocked access */ 1396391e43daSPeter Zijlstra 1397391e43daSPeter Zijlstra /* 1398391e43daSPeter Zijlstra * If the current task on @p's runqueue is an RT task, then 1399391e43daSPeter Zijlstra * try to see if we can wake this RT task up on another 1400391e43daSPeter Zijlstra * runqueue. Otherwise simply start this RT task 1401391e43daSPeter Zijlstra * on its current runqueue. 1402391e43daSPeter Zijlstra * 1403391e43daSPeter Zijlstra * We want to avoid overloading runqueues. If the woken 1404391e43daSPeter Zijlstra * task is a higher priority, then it will stay on this CPU 1405391e43daSPeter Zijlstra * and the lower prio task should be moved to another CPU. 1406391e43daSPeter Zijlstra * Even though this will probably make the lower prio task 1407391e43daSPeter Zijlstra * lose its cache, we do not want to bounce a higher task 1408391e43daSPeter Zijlstra * around just because it gave up its CPU, perhaps for a 1409391e43daSPeter Zijlstra * lock? 1410391e43daSPeter Zijlstra * 1411391e43daSPeter Zijlstra * For equal prio tasks, we just let the scheduler sort it out. 1412391e43daSPeter Zijlstra * 1413391e43daSPeter Zijlstra * Otherwise, just let it ride on the affined RQ and the 1414391e43daSPeter Zijlstra * post-schedule router will push the preempted task away 1415391e43daSPeter Zijlstra * 1416391e43daSPeter Zijlstra * This test is optimistic, if we get it wrong the load-balancer 1417391e43daSPeter Zijlstra * will have to sort it out. 1418391e43daSPeter Zijlstra */ 1419391e43daSPeter Zijlstra if (curr && unlikely(rt_task(curr)) && 14204b53a341SIngo Molnar (curr->nr_cpus_allowed < 2 || 14216bfa687cSShawn Bohrer curr->prio <= p->prio)) { 1422391e43daSPeter Zijlstra int target = find_lowest_rq(p); 1423391e43daSPeter Zijlstra 142480e3d87bSTim Chen /* 142580e3d87bSTim Chen * Don't bother moving it if the destination CPU is 142680e3d87bSTim Chen * not running a lower priority task. 142780e3d87bSTim Chen */ 142880e3d87bSTim Chen if (target != -1 && 142980e3d87bSTim Chen p->prio < cpu_rq(target)->rt.highest_prio.curr) 1430391e43daSPeter Zijlstra cpu = target; 1431391e43daSPeter Zijlstra } 1432391e43daSPeter Zijlstra rcu_read_unlock(); 1433391e43daSPeter Zijlstra 1434391e43daSPeter Zijlstra out: 1435391e43daSPeter Zijlstra return cpu; 1436391e43daSPeter Zijlstra } 1437391e43daSPeter Zijlstra 1438391e43daSPeter Zijlstra static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) 1439391e43daSPeter Zijlstra { 1440308a623aSWanpeng Li /* 1441308a623aSWanpeng Li * Current can't be migrated, useless to reschedule, 1442308a623aSWanpeng Li * let's hope p can move out. 1443308a623aSWanpeng Li */ 14444b53a341SIngo Molnar if (rq->curr->nr_cpus_allowed == 1 || 1445308a623aSWanpeng Li !cpupri_find(&rq->rd->cpupri, rq->curr, NULL)) 1446391e43daSPeter Zijlstra return; 1447391e43daSPeter Zijlstra 1448308a623aSWanpeng Li /* 1449308a623aSWanpeng Li * p is migratable, so let's not schedule it and 1450308a623aSWanpeng Li * see if it is pushed or pulled somewhere else. 1451308a623aSWanpeng Li */ 14524b53a341SIngo Molnar if (p->nr_cpus_allowed != 1 1453391e43daSPeter Zijlstra && cpupri_find(&rq->rd->cpupri, p, NULL)) 1454391e43daSPeter Zijlstra return; 1455391e43daSPeter Zijlstra 1456391e43daSPeter Zijlstra /* 145797fb7a0aSIngo Molnar * There appear to be other CPUs that can accept 145897fb7a0aSIngo Molnar * the current task but none can run 'p', so lets reschedule 145997fb7a0aSIngo Molnar * to try and push the current task away: 1460391e43daSPeter Zijlstra */ 1461391e43daSPeter Zijlstra requeue_task_rt(rq, p, 1); 14628875125eSKirill Tkhai resched_curr(rq); 1463391e43daSPeter Zijlstra } 1464391e43daSPeter Zijlstra 1465391e43daSPeter Zijlstra #endif /* CONFIG_SMP */ 1466391e43daSPeter Zijlstra 1467391e43daSPeter Zijlstra /* 1468391e43daSPeter Zijlstra * Preempt the current task with a newly woken task if needed: 1469391e43daSPeter Zijlstra */ 1470391e43daSPeter Zijlstra static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags) 1471391e43daSPeter Zijlstra { 1472391e43daSPeter Zijlstra if (p->prio < rq->curr->prio) { 14738875125eSKirill Tkhai resched_curr(rq); 1474391e43daSPeter Zijlstra return; 1475391e43daSPeter Zijlstra } 1476391e43daSPeter Zijlstra 1477391e43daSPeter Zijlstra #ifdef CONFIG_SMP 1478391e43daSPeter Zijlstra /* 1479391e43daSPeter Zijlstra * If: 1480391e43daSPeter Zijlstra * 1481391e43daSPeter Zijlstra * - the newly woken task is of equal priority to the current task 1482391e43daSPeter Zijlstra * - the newly woken task is non-migratable while current is migratable 1483391e43daSPeter Zijlstra * - current will be preempted on the next reschedule 1484391e43daSPeter Zijlstra * 1485391e43daSPeter Zijlstra * we should check to see if current can readily move to a different 1486391e43daSPeter Zijlstra * cpu. If so, we will reschedule to allow the push logic to try 1487391e43daSPeter Zijlstra * to move current somewhere else, making room for our non-migratable 1488391e43daSPeter Zijlstra * task. 1489391e43daSPeter Zijlstra */ 1490391e43daSPeter Zijlstra if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr)) 1491391e43daSPeter Zijlstra check_preempt_equal_prio(rq, p); 1492391e43daSPeter Zijlstra #endif 1493391e43daSPeter Zijlstra } 1494391e43daSPeter Zijlstra 1495391e43daSPeter Zijlstra static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq, 1496391e43daSPeter Zijlstra struct rt_rq *rt_rq) 1497391e43daSPeter Zijlstra { 1498391e43daSPeter Zijlstra struct rt_prio_array *array = &rt_rq->active; 1499391e43daSPeter Zijlstra struct sched_rt_entity *next = NULL; 1500391e43daSPeter Zijlstra struct list_head *queue; 1501391e43daSPeter Zijlstra int idx; 1502391e43daSPeter Zijlstra 1503391e43daSPeter Zijlstra idx = sched_find_first_bit(array->bitmap); 1504391e43daSPeter Zijlstra BUG_ON(idx >= MAX_RT_PRIO); 1505391e43daSPeter Zijlstra 1506391e43daSPeter Zijlstra queue = array->queue + idx; 1507391e43daSPeter Zijlstra next = list_entry(queue->next, struct sched_rt_entity, run_list); 1508391e43daSPeter Zijlstra 1509391e43daSPeter Zijlstra return next; 1510391e43daSPeter Zijlstra } 1511391e43daSPeter Zijlstra 1512391e43daSPeter Zijlstra static struct task_struct *_pick_next_task_rt(struct rq *rq) 1513391e43daSPeter Zijlstra { 1514391e43daSPeter Zijlstra struct sched_rt_entity *rt_se; 1515391e43daSPeter Zijlstra struct task_struct *p; 1516606dba2eSPeter Zijlstra struct rt_rq *rt_rq = &rq->rt; 1517391e43daSPeter Zijlstra 1518391e43daSPeter Zijlstra do { 1519391e43daSPeter Zijlstra rt_se = pick_next_rt_entity(rq, rt_rq); 1520391e43daSPeter Zijlstra BUG_ON(!rt_se); 1521391e43daSPeter Zijlstra rt_rq = group_rt_rq(rt_se); 1522391e43daSPeter Zijlstra } while (rt_rq); 1523391e43daSPeter Zijlstra 1524391e43daSPeter Zijlstra p = rt_task_of(rt_se); 152578becc27SFrederic Weisbecker p->se.exec_start = rq_clock_task(rq); 1526391e43daSPeter Zijlstra 1527391e43daSPeter Zijlstra return p; 1528391e43daSPeter Zijlstra } 1529391e43daSPeter Zijlstra 1530606dba2eSPeter Zijlstra static struct task_struct * 1531d8ac8971SMatt Fleming pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) 1532391e43daSPeter Zijlstra { 1533606dba2eSPeter Zijlstra struct task_struct *p; 1534606dba2eSPeter Zijlstra struct rt_rq *rt_rq = &rq->rt; 1535606dba2eSPeter Zijlstra 153637e117c0SPeter Zijlstra if (need_pull_rt_task(rq, prev)) { 1537cbce1a68SPeter Zijlstra /* 1538cbce1a68SPeter Zijlstra * This is OK, because current is on_cpu, which avoids it being 1539cbce1a68SPeter Zijlstra * picked for load-balance and preemption/IRQs are still 1540cbce1a68SPeter Zijlstra * disabled avoiding further scheduler activity on it and we're 1541cbce1a68SPeter Zijlstra * being very careful to re-start the picking loop. 1542cbce1a68SPeter Zijlstra */ 1543d8ac8971SMatt Fleming rq_unpin_lock(rq, rf); 154438033c37SPeter Zijlstra pull_rt_task(rq); 1545d8ac8971SMatt Fleming rq_repin_lock(rq, rf); 154637e117c0SPeter Zijlstra /* 154737e117c0SPeter Zijlstra * pull_rt_task() can drop (and re-acquire) rq->lock; this 1548a1d9a323SKirill Tkhai * means a dl or stop task can slip in, in which case we need 1549a1d9a323SKirill Tkhai * to re-start task selection. 155037e117c0SPeter Zijlstra */ 1551da0c1e65SKirill Tkhai if (unlikely((rq->stop && task_on_rq_queued(rq->stop)) || 1552a1d9a323SKirill Tkhai rq->dl.dl_nr_running)) 155337e117c0SPeter Zijlstra return RETRY_TASK; 155437e117c0SPeter Zijlstra } 155538033c37SPeter Zijlstra 1556734ff2a7SKirill Tkhai /* 1557734ff2a7SKirill Tkhai * We may dequeue prev's rt_rq in put_prev_task(). 1558734ff2a7SKirill Tkhai * So, we update time before rt_nr_running check. 1559734ff2a7SKirill Tkhai */ 1560734ff2a7SKirill Tkhai if (prev->sched_class == &rt_sched_class) 1561734ff2a7SKirill Tkhai update_curr_rt(rq); 1562734ff2a7SKirill Tkhai 1563f4ebcbc0SKirill Tkhai if (!rt_rq->rt_queued) 1564606dba2eSPeter Zijlstra return NULL; 1565606dba2eSPeter Zijlstra 15663f1d2a31SPeter Zijlstra put_prev_task(rq, prev); 1567606dba2eSPeter Zijlstra 1568606dba2eSPeter Zijlstra p = _pick_next_task_rt(rq); 1569391e43daSPeter Zijlstra 1570391e43daSPeter Zijlstra /* The running task is never eligible for pushing */ 1571391e43daSPeter Zijlstra dequeue_pushable_task(rq, p); 1572391e43daSPeter Zijlstra 157302d8ec94SIngo Molnar rt_queue_push_tasks(rq); 1574391e43daSPeter Zijlstra 1575391e43daSPeter Zijlstra return p; 1576391e43daSPeter Zijlstra } 1577391e43daSPeter Zijlstra 1578391e43daSPeter Zijlstra static void put_prev_task_rt(struct rq *rq, struct task_struct *p) 1579391e43daSPeter Zijlstra { 1580391e43daSPeter Zijlstra update_curr_rt(rq); 1581391e43daSPeter Zijlstra 1582391e43daSPeter Zijlstra /* 1583391e43daSPeter Zijlstra * The previous task needs to be made eligible for pushing 1584391e43daSPeter Zijlstra * if it is still active 1585391e43daSPeter Zijlstra */ 15864b53a341SIngo Molnar if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1) 1587391e43daSPeter Zijlstra enqueue_pushable_task(rq, p); 1588391e43daSPeter Zijlstra } 1589391e43daSPeter Zijlstra 1590391e43daSPeter Zijlstra #ifdef CONFIG_SMP 1591391e43daSPeter Zijlstra 1592391e43daSPeter Zijlstra /* Only try algorithms three times */ 1593391e43daSPeter Zijlstra #define RT_MAX_TRIES 3 1594391e43daSPeter Zijlstra 1595391e43daSPeter Zijlstra static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) 1596391e43daSPeter Zijlstra { 1597391e43daSPeter Zijlstra if (!task_running(rq, p) && 15980c98d344SIngo Molnar cpumask_test_cpu(cpu, &p->cpus_allowed)) 1599391e43daSPeter Zijlstra return 1; 160097fb7a0aSIngo Molnar 1601391e43daSPeter Zijlstra return 0; 1602391e43daSPeter Zijlstra } 1603391e43daSPeter Zijlstra 1604e23ee747SKirill Tkhai /* 1605e23ee747SKirill Tkhai * Return the highest pushable rq's task, which is suitable to be executed 160697fb7a0aSIngo Molnar * on the CPU, NULL otherwise 1607e23ee747SKirill Tkhai */ 1608e23ee747SKirill Tkhai static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu) 1609391e43daSPeter Zijlstra { 1610e23ee747SKirill Tkhai struct plist_head *head = &rq->rt.pushable_tasks; 1611391e43daSPeter Zijlstra struct task_struct *p; 1612391e43daSPeter Zijlstra 1613e23ee747SKirill Tkhai if (!has_pushable_tasks(rq)) 1614e23ee747SKirill Tkhai return NULL; 1615391e43daSPeter Zijlstra 1616e23ee747SKirill Tkhai plist_for_each_entry(p, head, pushable_tasks) { 1617e23ee747SKirill Tkhai if (pick_rt_task(rq, p, cpu)) 1618e23ee747SKirill Tkhai return p; 1619391e43daSPeter Zijlstra } 1620391e43daSPeter Zijlstra 1621e23ee747SKirill Tkhai return NULL; 1622391e43daSPeter Zijlstra } 1623391e43daSPeter Zijlstra 1624391e43daSPeter Zijlstra static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask); 1625391e43daSPeter Zijlstra 1626391e43daSPeter Zijlstra static int find_lowest_rq(struct task_struct *task) 1627391e43daSPeter Zijlstra { 1628391e43daSPeter Zijlstra struct sched_domain *sd; 16294ba29684SChristoph Lameter struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask); 1630391e43daSPeter Zijlstra int this_cpu = smp_processor_id(); 1631391e43daSPeter Zijlstra int cpu = task_cpu(task); 1632391e43daSPeter Zijlstra 1633391e43daSPeter Zijlstra /* Make sure the mask is initialized first */ 1634391e43daSPeter Zijlstra if (unlikely(!lowest_mask)) 1635391e43daSPeter Zijlstra return -1; 1636391e43daSPeter Zijlstra 16374b53a341SIngo Molnar if (task->nr_cpus_allowed == 1) 1638391e43daSPeter Zijlstra return -1; /* No other targets possible */ 1639391e43daSPeter Zijlstra 1640391e43daSPeter Zijlstra if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask)) 1641391e43daSPeter Zijlstra return -1; /* No targets found */ 1642391e43daSPeter Zijlstra 1643391e43daSPeter Zijlstra /* 164497fb7a0aSIngo Molnar * At this point we have built a mask of CPUs representing the 1645391e43daSPeter Zijlstra * lowest priority tasks in the system. Now we want to elect 1646391e43daSPeter Zijlstra * the best one based on our affinity and topology. 1647391e43daSPeter Zijlstra * 164897fb7a0aSIngo Molnar * We prioritize the last CPU that the task executed on since 1649391e43daSPeter Zijlstra * it is most likely cache-hot in that location. 1650391e43daSPeter Zijlstra */ 1651391e43daSPeter Zijlstra if (cpumask_test_cpu(cpu, lowest_mask)) 1652391e43daSPeter Zijlstra return cpu; 1653391e43daSPeter Zijlstra 1654391e43daSPeter Zijlstra /* 1655391e43daSPeter Zijlstra * Otherwise, we consult the sched_domains span maps to figure 165697fb7a0aSIngo Molnar * out which CPU is logically closest to our hot cache data. 1657391e43daSPeter Zijlstra */ 1658391e43daSPeter Zijlstra if (!cpumask_test_cpu(this_cpu, lowest_mask)) 1659391e43daSPeter Zijlstra this_cpu = -1; /* Skip this_cpu opt if not among lowest */ 1660391e43daSPeter Zijlstra 1661391e43daSPeter Zijlstra rcu_read_lock(); 1662391e43daSPeter Zijlstra for_each_domain(cpu, sd) { 1663391e43daSPeter Zijlstra if (sd->flags & SD_WAKE_AFFINE) { 1664391e43daSPeter Zijlstra int best_cpu; 1665391e43daSPeter Zijlstra 1666391e43daSPeter Zijlstra /* 1667391e43daSPeter Zijlstra * "this_cpu" is cheaper to preempt than a 1668391e43daSPeter Zijlstra * remote processor. 1669391e43daSPeter Zijlstra */ 1670391e43daSPeter Zijlstra if (this_cpu != -1 && 1671391e43daSPeter Zijlstra cpumask_test_cpu(this_cpu, sched_domain_span(sd))) { 1672391e43daSPeter Zijlstra rcu_read_unlock(); 1673391e43daSPeter Zijlstra return this_cpu; 1674391e43daSPeter Zijlstra } 1675391e43daSPeter Zijlstra 1676391e43daSPeter Zijlstra best_cpu = cpumask_first_and(lowest_mask, 1677391e43daSPeter Zijlstra sched_domain_span(sd)); 1678391e43daSPeter Zijlstra if (best_cpu < nr_cpu_ids) { 1679391e43daSPeter Zijlstra rcu_read_unlock(); 1680391e43daSPeter Zijlstra return best_cpu; 1681391e43daSPeter Zijlstra } 1682391e43daSPeter Zijlstra } 1683391e43daSPeter Zijlstra } 1684391e43daSPeter Zijlstra rcu_read_unlock(); 1685391e43daSPeter Zijlstra 1686391e43daSPeter Zijlstra /* 1687391e43daSPeter Zijlstra * And finally, if there were no matches within the domains 1688391e43daSPeter Zijlstra * just give the caller *something* to work with from the compatible 1689391e43daSPeter Zijlstra * locations. 1690391e43daSPeter Zijlstra */ 1691391e43daSPeter Zijlstra if (this_cpu != -1) 1692391e43daSPeter Zijlstra return this_cpu; 1693391e43daSPeter Zijlstra 1694391e43daSPeter Zijlstra cpu = cpumask_any(lowest_mask); 1695391e43daSPeter Zijlstra if (cpu < nr_cpu_ids) 1696391e43daSPeter Zijlstra return cpu; 169797fb7a0aSIngo Molnar 1698391e43daSPeter Zijlstra return -1; 1699391e43daSPeter Zijlstra } 1700391e43daSPeter Zijlstra 1701391e43daSPeter Zijlstra /* Will lock the rq it finds */ 1702391e43daSPeter Zijlstra static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) 1703391e43daSPeter Zijlstra { 1704391e43daSPeter Zijlstra struct rq *lowest_rq = NULL; 1705391e43daSPeter Zijlstra int tries; 1706391e43daSPeter Zijlstra int cpu; 1707391e43daSPeter Zijlstra 1708391e43daSPeter Zijlstra for (tries = 0; tries < RT_MAX_TRIES; tries++) { 1709391e43daSPeter Zijlstra cpu = find_lowest_rq(task); 1710391e43daSPeter Zijlstra 1711391e43daSPeter Zijlstra if ((cpu == -1) || (cpu == rq->cpu)) 1712391e43daSPeter Zijlstra break; 1713391e43daSPeter Zijlstra 1714391e43daSPeter Zijlstra lowest_rq = cpu_rq(cpu); 1715391e43daSPeter Zijlstra 171680e3d87bSTim Chen if (lowest_rq->rt.highest_prio.curr <= task->prio) { 171780e3d87bSTim Chen /* 171880e3d87bSTim Chen * Target rq has tasks of equal or higher priority, 171980e3d87bSTim Chen * retrying does not release any lock and is unlikely 172080e3d87bSTim Chen * to yield a different result. 172180e3d87bSTim Chen */ 172280e3d87bSTim Chen lowest_rq = NULL; 172380e3d87bSTim Chen break; 172480e3d87bSTim Chen } 172580e3d87bSTim Chen 1726391e43daSPeter Zijlstra /* if the prio of this runqueue changed, try again */ 1727391e43daSPeter Zijlstra if (double_lock_balance(rq, lowest_rq)) { 1728391e43daSPeter Zijlstra /* 1729391e43daSPeter Zijlstra * We had to unlock the run queue. In 1730391e43daSPeter Zijlstra * the mean time, task could have 1731391e43daSPeter Zijlstra * migrated already or had its affinity changed. 1732391e43daSPeter Zijlstra * Also make sure that it wasn't scheduled on its rq. 1733391e43daSPeter Zijlstra */ 1734391e43daSPeter Zijlstra if (unlikely(task_rq(task) != rq || 17350c98d344SIngo Molnar !cpumask_test_cpu(lowest_rq->cpu, &task->cpus_allowed) || 1736391e43daSPeter Zijlstra task_running(rq, task) || 173713b5ab02SXunlei Pang !rt_task(task) || 1738da0c1e65SKirill Tkhai !task_on_rq_queued(task))) { 1739391e43daSPeter Zijlstra 17407f1b4393SPeter Zijlstra double_unlock_balance(rq, lowest_rq); 1741391e43daSPeter Zijlstra lowest_rq = NULL; 1742391e43daSPeter Zijlstra break; 1743391e43daSPeter Zijlstra } 1744391e43daSPeter Zijlstra } 1745391e43daSPeter Zijlstra 1746391e43daSPeter Zijlstra /* If this rq is still suitable use it. */ 1747391e43daSPeter Zijlstra if (lowest_rq->rt.highest_prio.curr > task->prio) 1748391e43daSPeter Zijlstra break; 1749391e43daSPeter Zijlstra 1750391e43daSPeter Zijlstra /* try again */ 1751391e43daSPeter Zijlstra double_unlock_balance(rq, lowest_rq); 1752391e43daSPeter Zijlstra lowest_rq = NULL; 1753391e43daSPeter Zijlstra } 1754391e43daSPeter Zijlstra 1755391e43daSPeter Zijlstra return lowest_rq; 1756391e43daSPeter Zijlstra } 1757391e43daSPeter Zijlstra 1758391e43daSPeter Zijlstra static struct task_struct *pick_next_pushable_task(struct rq *rq) 1759391e43daSPeter Zijlstra { 1760391e43daSPeter Zijlstra struct task_struct *p; 1761391e43daSPeter Zijlstra 1762391e43daSPeter Zijlstra if (!has_pushable_tasks(rq)) 1763391e43daSPeter Zijlstra return NULL; 1764391e43daSPeter Zijlstra 1765391e43daSPeter Zijlstra p = plist_first_entry(&rq->rt.pushable_tasks, 1766391e43daSPeter Zijlstra struct task_struct, pushable_tasks); 1767391e43daSPeter Zijlstra 1768391e43daSPeter Zijlstra BUG_ON(rq->cpu != task_cpu(p)); 1769391e43daSPeter Zijlstra BUG_ON(task_current(rq, p)); 17704b53a341SIngo Molnar BUG_ON(p->nr_cpus_allowed <= 1); 1771391e43daSPeter Zijlstra 1772da0c1e65SKirill Tkhai BUG_ON(!task_on_rq_queued(p)); 1773391e43daSPeter Zijlstra BUG_ON(!rt_task(p)); 1774391e43daSPeter Zijlstra 1775391e43daSPeter Zijlstra return p; 1776391e43daSPeter Zijlstra } 1777391e43daSPeter Zijlstra 1778391e43daSPeter Zijlstra /* 1779391e43daSPeter Zijlstra * If the current CPU has more than one RT task, see if the non 1780391e43daSPeter Zijlstra * running task can migrate over to a CPU that is running a task 1781391e43daSPeter Zijlstra * of lesser priority. 1782391e43daSPeter Zijlstra */ 1783391e43daSPeter Zijlstra static int push_rt_task(struct rq *rq) 1784391e43daSPeter Zijlstra { 1785391e43daSPeter Zijlstra struct task_struct *next_task; 1786391e43daSPeter Zijlstra struct rq *lowest_rq; 1787391e43daSPeter Zijlstra int ret = 0; 1788391e43daSPeter Zijlstra 1789391e43daSPeter Zijlstra if (!rq->rt.overloaded) 1790391e43daSPeter Zijlstra return 0; 1791391e43daSPeter Zijlstra 1792391e43daSPeter Zijlstra next_task = pick_next_pushable_task(rq); 1793391e43daSPeter Zijlstra if (!next_task) 1794391e43daSPeter Zijlstra return 0; 1795391e43daSPeter Zijlstra 1796391e43daSPeter Zijlstra retry: 1797391e43daSPeter Zijlstra if (unlikely(next_task == rq->curr)) { 1798391e43daSPeter Zijlstra WARN_ON(1); 1799391e43daSPeter Zijlstra return 0; 1800391e43daSPeter Zijlstra } 1801391e43daSPeter Zijlstra 1802391e43daSPeter Zijlstra /* 1803391e43daSPeter Zijlstra * It's possible that the next_task slipped in of 1804391e43daSPeter Zijlstra * higher priority than current. If that's the case 1805391e43daSPeter Zijlstra * just reschedule current. 1806391e43daSPeter Zijlstra */ 1807391e43daSPeter Zijlstra if (unlikely(next_task->prio < rq->curr->prio)) { 18088875125eSKirill Tkhai resched_curr(rq); 1809391e43daSPeter Zijlstra return 0; 1810391e43daSPeter Zijlstra } 1811391e43daSPeter Zijlstra 1812391e43daSPeter Zijlstra /* We might release rq lock */ 1813391e43daSPeter Zijlstra get_task_struct(next_task); 1814391e43daSPeter Zijlstra 1815391e43daSPeter Zijlstra /* find_lock_lowest_rq locks the rq if found */ 1816391e43daSPeter Zijlstra lowest_rq = find_lock_lowest_rq(next_task, rq); 1817391e43daSPeter Zijlstra if (!lowest_rq) { 1818391e43daSPeter Zijlstra struct task_struct *task; 1819391e43daSPeter Zijlstra /* 1820391e43daSPeter Zijlstra * find_lock_lowest_rq releases rq->lock 1821391e43daSPeter Zijlstra * so it is possible that next_task has migrated. 1822391e43daSPeter Zijlstra * 1823391e43daSPeter Zijlstra * We need to make sure that the task is still on the same 1824391e43daSPeter Zijlstra * run-queue and is also still the next task eligible for 1825391e43daSPeter Zijlstra * pushing. 1826391e43daSPeter Zijlstra */ 1827391e43daSPeter Zijlstra task = pick_next_pushable_task(rq); 1828de16b91eSByungchul Park if (task == next_task) { 1829391e43daSPeter Zijlstra /* 1830391e43daSPeter Zijlstra * The task hasn't migrated, and is still the next 1831391e43daSPeter Zijlstra * eligible task, but we failed to find a run-queue 1832391e43daSPeter Zijlstra * to push it to. Do not retry in this case, since 183397fb7a0aSIngo Molnar * other CPUs will pull from us when ready. 1834391e43daSPeter Zijlstra */ 1835391e43daSPeter Zijlstra goto out; 1836391e43daSPeter Zijlstra } 1837391e43daSPeter Zijlstra 1838391e43daSPeter Zijlstra if (!task) 1839391e43daSPeter Zijlstra /* No more tasks, just exit */ 1840391e43daSPeter Zijlstra goto out; 1841391e43daSPeter Zijlstra 1842391e43daSPeter Zijlstra /* 1843391e43daSPeter Zijlstra * Something has shifted, try again. 1844391e43daSPeter Zijlstra */ 1845391e43daSPeter Zijlstra put_task_struct(next_task); 1846391e43daSPeter Zijlstra next_task = task; 1847391e43daSPeter Zijlstra goto retry; 1848391e43daSPeter Zijlstra } 1849391e43daSPeter Zijlstra 1850391e43daSPeter Zijlstra deactivate_task(rq, next_task, 0); 1851391e43daSPeter Zijlstra set_task_cpu(next_task, lowest_rq->cpu); 1852391e43daSPeter Zijlstra activate_task(lowest_rq, next_task, 0); 1853391e43daSPeter Zijlstra ret = 1; 1854391e43daSPeter Zijlstra 18558875125eSKirill Tkhai resched_curr(lowest_rq); 1856391e43daSPeter Zijlstra 1857391e43daSPeter Zijlstra double_unlock_balance(rq, lowest_rq); 1858391e43daSPeter Zijlstra 1859391e43daSPeter Zijlstra out: 1860391e43daSPeter Zijlstra put_task_struct(next_task); 1861391e43daSPeter Zijlstra 1862391e43daSPeter Zijlstra return ret; 1863391e43daSPeter Zijlstra } 1864391e43daSPeter Zijlstra 1865391e43daSPeter Zijlstra static void push_rt_tasks(struct rq *rq) 1866391e43daSPeter Zijlstra { 1867391e43daSPeter Zijlstra /* push_rt_task will return true if it moved an RT */ 1868391e43daSPeter Zijlstra while (push_rt_task(rq)) 1869391e43daSPeter Zijlstra ; 1870391e43daSPeter Zijlstra } 1871391e43daSPeter Zijlstra 1872b6366f04SSteven Rostedt #ifdef HAVE_RT_PUSH_IPI 1873b6366f04SSteven Rostedt 18743e777f99SSteven Rostedt (VMware) /* 18753e777f99SSteven Rostedt (VMware) * When a high priority task schedules out from a CPU and a lower priority 18763e777f99SSteven Rostedt (VMware) * task is scheduled in, a check is made to see if there's any RT tasks 18773e777f99SSteven Rostedt (VMware) * on other CPUs that are waiting to run because a higher priority RT task 18783e777f99SSteven Rostedt (VMware) * is currently running on its CPU. In this case, the CPU with multiple RT 18793e777f99SSteven Rostedt (VMware) * tasks queued on it (overloaded) needs to be notified that a CPU has opened 18803e777f99SSteven Rostedt (VMware) * up that may be able to run one of its non-running queued RT tasks. 18813e777f99SSteven Rostedt (VMware) * 18824bdced5cSSteven Rostedt (Red Hat) * All CPUs with overloaded RT tasks need to be notified as there is currently 18834bdced5cSSteven Rostedt (Red Hat) * no way to know which of these CPUs have the highest priority task waiting 18844bdced5cSSteven Rostedt (Red Hat) * to run. Instead of trying to take a spinlock on each of these CPUs, 18854bdced5cSSteven Rostedt (Red Hat) * which has shown to cause large latency when done on machines with many 18864bdced5cSSteven Rostedt (Red Hat) * CPUs, sending an IPI to the CPUs to have them push off the overloaded 18874bdced5cSSteven Rostedt (Red Hat) * RT tasks waiting to run. 18883e777f99SSteven Rostedt (VMware) * 18894bdced5cSSteven Rostedt (Red Hat) * Just sending an IPI to each of the CPUs is also an issue, as on large 18904bdced5cSSteven Rostedt (Red Hat) * count CPU machines, this can cause an IPI storm on a CPU, especially 18914bdced5cSSteven Rostedt (Red Hat) * if its the only CPU with multiple RT tasks queued, and a large number 18924bdced5cSSteven Rostedt (Red Hat) * of CPUs scheduling a lower priority task at the same time. 18933e777f99SSteven Rostedt (VMware) * 18944bdced5cSSteven Rostedt (Red Hat) * Each root domain has its own irq work function that can iterate over 18954bdced5cSSteven Rostedt (Red Hat) * all CPUs with RT overloaded tasks. Since all CPUs with overloaded RT 18964bdced5cSSteven Rostedt (Red Hat) * tassk must be checked if there's one or many CPUs that are lowering 18974bdced5cSSteven Rostedt (Red Hat) * their priority, there's a single irq work iterator that will try to 18984bdced5cSSteven Rostedt (Red Hat) * push off RT tasks that are waiting to run. 18993e777f99SSteven Rostedt (VMware) * 19004bdced5cSSteven Rostedt (Red Hat) * When a CPU schedules a lower priority task, it will kick off the 19014bdced5cSSteven Rostedt (Red Hat) * irq work iterator that will jump to each CPU with overloaded RT tasks. 19024bdced5cSSteven Rostedt (Red Hat) * As it only takes the first CPU that schedules a lower priority task 19034bdced5cSSteven Rostedt (Red Hat) * to start the process, the rto_start variable is incremented and if 19044bdced5cSSteven Rostedt (Red Hat) * the atomic result is one, then that CPU will try to take the rto_lock. 19054bdced5cSSteven Rostedt (Red Hat) * This prevents high contention on the lock as the process handles all 19064bdced5cSSteven Rostedt (Red Hat) * CPUs scheduling lower priority tasks. 19073e777f99SSteven Rostedt (VMware) * 19084bdced5cSSteven Rostedt (Red Hat) * All CPUs that are scheduling a lower priority task will increment the 19094bdced5cSSteven Rostedt (Red Hat) * rt_loop_next variable. This will make sure that the irq work iterator 19104bdced5cSSteven Rostedt (Red Hat) * checks all RT overloaded CPUs whenever a CPU schedules a new lower 19114bdced5cSSteven Rostedt (Red Hat) * priority task, even if the iterator is in the middle of a scan. Incrementing 19124bdced5cSSteven Rostedt (Red Hat) * the rt_loop_next will cause the iterator to perform another scan. 19133e777f99SSteven Rostedt (VMware) * 19143e777f99SSteven Rostedt (VMware) */ 1915ad0f1d9dSSteven Rostedt (VMware) static int rto_next_cpu(struct root_domain *rd) 1916b6366f04SSteven Rostedt { 19174bdced5cSSteven Rostedt (Red Hat) int next; 1918b6366f04SSteven Rostedt int cpu; 1919b6366f04SSteven Rostedt 1920b6366f04SSteven Rostedt /* 19214bdced5cSSteven Rostedt (Red Hat) * When starting the IPI RT pushing, the rto_cpu is set to -1, 19224bdced5cSSteven Rostedt (Red Hat) * rt_next_cpu() will simply return the first CPU found in 19234bdced5cSSteven Rostedt (Red Hat) * the rto_mask. 19244bdced5cSSteven Rostedt (Red Hat) * 192597fb7a0aSIngo Molnar * If rto_next_cpu() is called with rto_cpu is a valid CPU, it 19264bdced5cSSteven Rostedt (Red Hat) * will return the next CPU found in the rto_mask. 19274bdced5cSSteven Rostedt (Red Hat) * 19284bdced5cSSteven Rostedt (Red Hat) * If there are no more CPUs left in the rto_mask, then a check is made 19294bdced5cSSteven Rostedt (Red Hat) * against rto_loop and rto_loop_next. rto_loop is only updated with 19304bdced5cSSteven Rostedt (Red Hat) * the rto_lock held, but any CPU may increment the rto_loop_next 19314bdced5cSSteven Rostedt (Red Hat) * without any locking. 1932b6366f04SSteven Rostedt */ 19334bdced5cSSteven Rostedt (Red Hat) for (;;) { 19344bdced5cSSteven Rostedt (Red Hat) 19354bdced5cSSteven Rostedt (Red Hat) /* When rto_cpu is -1 this acts like cpumask_first() */ 19364bdced5cSSteven Rostedt (Red Hat) cpu = cpumask_next(rd->rto_cpu, rd->rto_mask); 19374bdced5cSSteven Rostedt (Red Hat) 19384bdced5cSSteven Rostedt (Red Hat) rd->rto_cpu = cpu; 19394bdced5cSSteven Rostedt (Red Hat) 19404bdced5cSSteven Rostedt (Red Hat) if (cpu < nr_cpu_ids) 19414bdced5cSSteven Rostedt (Red Hat) return cpu; 19424bdced5cSSteven Rostedt (Red Hat) 19434bdced5cSSteven Rostedt (Red Hat) rd->rto_cpu = -1; 19444bdced5cSSteven Rostedt (Red Hat) 19454bdced5cSSteven Rostedt (Red Hat) /* 19464bdced5cSSteven Rostedt (Red Hat) * ACQUIRE ensures we see the @rto_mask changes 19474bdced5cSSteven Rostedt (Red Hat) * made prior to the @next value observed. 19484bdced5cSSteven Rostedt (Red Hat) * 19494bdced5cSSteven Rostedt (Red Hat) * Matches WMB in rt_set_overload(). 19504bdced5cSSteven Rostedt (Red Hat) */ 19514bdced5cSSteven Rostedt (Red Hat) next = atomic_read_acquire(&rd->rto_loop_next); 19524bdced5cSSteven Rostedt (Red Hat) 19534bdced5cSSteven Rostedt (Red Hat) if (rd->rto_loop == next) 19544bdced5cSSteven Rostedt (Red Hat) break; 19554bdced5cSSteven Rostedt (Red Hat) 19564bdced5cSSteven Rostedt (Red Hat) rd->rto_loop = next; 1957b6366f04SSteven Rostedt } 1958b6366f04SSteven Rostedt 19594bdced5cSSteven Rostedt (Red Hat) return -1; 19604bdced5cSSteven Rostedt (Red Hat) } 1961b6366f04SSteven Rostedt 19624bdced5cSSteven Rostedt (Red Hat) static inline bool rto_start_trylock(atomic_t *v) 19634bdced5cSSteven Rostedt (Red Hat) { 19644bdced5cSSteven Rostedt (Red Hat) return !atomic_cmpxchg_acquire(v, 0, 1); 19654bdced5cSSteven Rostedt (Red Hat) } 19664bdced5cSSteven Rostedt (Red Hat) 19674bdced5cSSteven Rostedt (Red Hat) static inline void rto_start_unlock(atomic_t *v) 19684bdced5cSSteven Rostedt (Red Hat) { 19694bdced5cSSteven Rostedt (Red Hat) atomic_set_release(v, 0); 19704bdced5cSSteven Rostedt (Red Hat) } 19714bdced5cSSteven Rostedt (Red Hat) 19724bdced5cSSteven Rostedt (Red Hat) static void tell_cpu_to_push(struct rq *rq) 19734bdced5cSSteven Rostedt (Red Hat) { 19744bdced5cSSteven Rostedt (Red Hat) int cpu = -1; 19754bdced5cSSteven Rostedt (Red Hat) 19764bdced5cSSteven Rostedt (Red Hat) /* Keep the loop going if the IPI is currently active */ 19774bdced5cSSteven Rostedt (Red Hat) atomic_inc(&rq->rd->rto_loop_next); 19784bdced5cSSteven Rostedt (Red Hat) 19794bdced5cSSteven Rostedt (Red Hat) /* Only one CPU can initiate a loop at a time */ 19804bdced5cSSteven Rostedt (Red Hat) if (!rto_start_trylock(&rq->rd->rto_loop_start)) 1981b6366f04SSteven Rostedt return; 1982b6366f04SSteven Rostedt 19834bdced5cSSteven Rostedt (Red Hat) raw_spin_lock(&rq->rd->rto_lock); 1984b6366f04SSteven Rostedt 19854bdced5cSSteven Rostedt (Red Hat) /* 198697fb7a0aSIngo Molnar * The rto_cpu is updated under the lock, if it has a valid CPU 19874bdced5cSSteven Rostedt (Red Hat) * then the IPI is still running and will continue due to the 19884bdced5cSSteven Rostedt (Red Hat) * update to loop_next, and nothing needs to be done here. 19894bdced5cSSteven Rostedt (Red Hat) * Otherwise it is finishing up and an ipi needs to be sent. 19904bdced5cSSteven Rostedt (Red Hat) */ 19914bdced5cSSteven Rostedt (Red Hat) if (rq->rd->rto_cpu < 0) 1992ad0f1d9dSSteven Rostedt (VMware) cpu = rto_next_cpu(rq->rd); 19934bdced5cSSteven Rostedt (Red Hat) 19944bdced5cSSteven Rostedt (Red Hat) raw_spin_unlock(&rq->rd->rto_lock); 19954bdced5cSSteven Rostedt (Red Hat) 19964bdced5cSSteven Rostedt (Red Hat) rto_start_unlock(&rq->rd->rto_loop_start); 19974bdced5cSSteven Rostedt (Red Hat) 1998364f5665SSteven Rostedt (VMware) if (cpu >= 0) { 1999364f5665SSteven Rostedt (VMware) /* Make sure the rd does not get freed while pushing */ 2000364f5665SSteven Rostedt (VMware) sched_get_rd(rq->rd); 20014bdced5cSSteven Rostedt (Red Hat) irq_work_queue_on(&rq->rd->rto_push_work, cpu); 2002b6366f04SSteven Rostedt } 2003364f5665SSteven Rostedt (VMware) } 2004b6366f04SSteven Rostedt 2005b6366f04SSteven Rostedt /* Called from hardirq context */ 20064bdced5cSSteven Rostedt (Red Hat) void rto_push_irq_work_func(struct irq_work *work) 2007b6366f04SSteven Rostedt { 2008ad0f1d9dSSteven Rostedt (VMware) struct root_domain *rd = 2009ad0f1d9dSSteven Rostedt (VMware) container_of(work, struct root_domain, rto_push_work); 20104bdced5cSSteven Rostedt (Red Hat) struct rq *rq; 2011b6366f04SSteven Rostedt int cpu; 2012b6366f04SSteven Rostedt 20134bdced5cSSteven Rostedt (Red Hat) rq = this_rq(); 2014b6366f04SSteven Rostedt 20154bdced5cSSteven Rostedt (Red Hat) /* 20164bdced5cSSteven Rostedt (Red Hat) * We do not need to grab the lock to check for has_pushable_tasks. 20174bdced5cSSteven Rostedt (Red Hat) * When it gets updated, a check is made if a push is possible. 20184bdced5cSSteven Rostedt (Red Hat) */ 2019b6366f04SSteven Rostedt if (has_pushable_tasks(rq)) { 2020b6366f04SSteven Rostedt raw_spin_lock(&rq->lock); 20214bdced5cSSteven Rostedt (Red Hat) push_rt_tasks(rq); 2022b6366f04SSteven Rostedt raw_spin_unlock(&rq->lock); 2023b6366f04SSteven Rostedt } 2024b6366f04SSteven Rostedt 2025ad0f1d9dSSteven Rostedt (VMware) raw_spin_lock(&rd->rto_lock); 20264bdced5cSSteven Rostedt (Red Hat) 2027b6366f04SSteven Rostedt /* Pass the IPI to the next rt overloaded queue */ 2028ad0f1d9dSSteven Rostedt (VMware) cpu = rto_next_cpu(rd); 2029b6366f04SSteven Rostedt 2030ad0f1d9dSSteven Rostedt (VMware) raw_spin_unlock(&rd->rto_lock); 2031b6366f04SSteven Rostedt 2032364f5665SSteven Rostedt (VMware) if (cpu < 0) { 2033364f5665SSteven Rostedt (VMware) sched_put_rd(rd); 2034b6366f04SSteven Rostedt return; 2035364f5665SSteven Rostedt (VMware) } 2036b6366f04SSteven Rostedt 2037b6366f04SSteven Rostedt /* Try the next RT overloaded CPU */ 2038ad0f1d9dSSteven Rostedt (VMware) irq_work_queue_on(&rd->rto_push_work, cpu); 2039b6366f04SSteven Rostedt } 2040b6366f04SSteven Rostedt #endif /* HAVE_RT_PUSH_IPI */ 2041b6366f04SSteven Rostedt 20428046d680SPeter Zijlstra static void pull_rt_task(struct rq *this_rq) 2043391e43daSPeter Zijlstra { 20448046d680SPeter Zijlstra int this_cpu = this_rq->cpu, cpu; 20458046d680SPeter Zijlstra bool resched = false; 2046391e43daSPeter Zijlstra struct task_struct *p; 2047391e43daSPeter Zijlstra struct rq *src_rq; 2048f73c52a5SSteven Rostedt int rt_overload_count = rt_overloaded(this_rq); 2049391e43daSPeter Zijlstra 2050f73c52a5SSteven Rostedt if (likely(!rt_overload_count)) 20518046d680SPeter Zijlstra return; 2052391e43daSPeter Zijlstra 20537c3f2ab7SPeter Zijlstra /* 20547c3f2ab7SPeter Zijlstra * Match the barrier from rt_set_overloaded; this guarantees that if we 20557c3f2ab7SPeter Zijlstra * see overloaded we must also see the rto_mask bit. 20567c3f2ab7SPeter Zijlstra */ 20577c3f2ab7SPeter Zijlstra smp_rmb(); 20587c3f2ab7SPeter Zijlstra 2059f73c52a5SSteven Rostedt /* If we are the only overloaded CPU do nothing */ 2060f73c52a5SSteven Rostedt if (rt_overload_count == 1 && 2061f73c52a5SSteven Rostedt cpumask_test_cpu(this_rq->cpu, this_rq->rd->rto_mask)) 2062f73c52a5SSteven Rostedt return; 2063f73c52a5SSteven Rostedt 2064b6366f04SSteven Rostedt #ifdef HAVE_RT_PUSH_IPI 2065b6366f04SSteven Rostedt if (sched_feat(RT_PUSH_IPI)) { 2066b6366f04SSteven Rostedt tell_cpu_to_push(this_rq); 20678046d680SPeter Zijlstra return; 2068b6366f04SSteven Rostedt } 2069b6366f04SSteven Rostedt #endif 2070b6366f04SSteven Rostedt 2071391e43daSPeter Zijlstra for_each_cpu(cpu, this_rq->rd->rto_mask) { 2072391e43daSPeter Zijlstra if (this_cpu == cpu) 2073391e43daSPeter Zijlstra continue; 2074391e43daSPeter Zijlstra 2075391e43daSPeter Zijlstra src_rq = cpu_rq(cpu); 2076391e43daSPeter Zijlstra 2077391e43daSPeter Zijlstra /* 2078391e43daSPeter Zijlstra * Don't bother taking the src_rq->lock if the next highest 2079391e43daSPeter Zijlstra * task is known to be lower-priority than our current task. 2080391e43daSPeter Zijlstra * This may look racy, but if this value is about to go 2081391e43daSPeter Zijlstra * logically higher, the src_rq will push this task away. 2082391e43daSPeter Zijlstra * And if its going logically lower, we do not care 2083391e43daSPeter Zijlstra */ 2084391e43daSPeter Zijlstra if (src_rq->rt.highest_prio.next >= 2085391e43daSPeter Zijlstra this_rq->rt.highest_prio.curr) 2086391e43daSPeter Zijlstra continue; 2087391e43daSPeter Zijlstra 2088391e43daSPeter Zijlstra /* 2089391e43daSPeter Zijlstra * We can potentially drop this_rq's lock in 2090391e43daSPeter Zijlstra * double_lock_balance, and another CPU could 2091391e43daSPeter Zijlstra * alter this_rq 2092391e43daSPeter Zijlstra */ 2093391e43daSPeter Zijlstra double_lock_balance(this_rq, src_rq); 2094391e43daSPeter Zijlstra 2095391e43daSPeter Zijlstra /* 2096e23ee747SKirill Tkhai * We can pull only a task, which is pushable 2097e23ee747SKirill Tkhai * on its rq, and no others. 2098391e43daSPeter Zijlstra */ 2099e23ee747SKirill Tkhai p = pick_highest_pushable_task(src_rq, this_cpu); 2100391e43daSPeter Zijlstra 2101391e43daSPeter Zijlstra /* 2102391e43daSPeter Zijlstra * Do we have an RT task that preempts 2103391e43daSPeter Zijlstra * the to-be-scheduled task? 2104391e43daSPeter Zijlstra */ 2105391e43daSPeter Zijlstra if (p && (p->prio < this_rq->rt.highest_prio.curr)) { 2106391e43daSPeter Zijlstra WARN_ON(p == src_rq->curr); 2107da0c1e65SKirill Tkhai WARN_ON(!task_on_rq_queued(p)); 2108391e43daSPeter Zijlstra 2109391e43daSPeter Zijlstra /* 2110391e43daSPeter Zijlstra * There's a chance that p is higher in priority 211197fb7a0aSIngo Molnar * than what's currently running on its CPU. 2112391e43daSPeter Zijlstra * This is just that p is wakeing up and hasn't 2113391e43daSPeter Zijlstra * had a chance to schedule. We only pull 2114391e43daSPeter Zijlstra * p if it is lower in priority than the 2115391e43daSPeter Zijlstra * current task on the run queue 2116391e43daSPeter Zijlstra */ 2117391e43daSPeter Zijlstra if (p->prio < src_rq->curr->prio) 2118391e43daSPeter Zijlstra goto skip; 2119391e43daSPeter Zijlstra 21208046d680SPeter Zijlstra resched = true; 2121391e43daSPeter Zijlstra 2122391e43daSPeter Zijlstra deactivate_task(src_rq, p, 0); 2123391e43daSPeter Zijlstra set_task_cpu(p, this_cpu); 2124391e43daSPeter Zijlstra activate_task(this_rq, p, 0); 2125391e43daSPeter Zijlstra /* 2126391e43daSPeter Zijlstra * We continue with the search, just in 2127391e43daSPeter Zijlstra * case there's an even higher prio task 2128391e43daSPeter Zijlstra * in another runqueue. (low likelihood 2129391e43daSPeter Zijlstra * but possible) 2130391e43daSPeter Zijlstra */ 2131391e43daSPeter Zijlstra } 2132391e43daSPeter Zijlstra skip: 2133391e43daSPeter Zijlstra double_unlock_balance(this_rq, src_rq); 2134391e43daSPeter Zijlstra } 2135391e43daSPeter Zijlstra 21368046d680SPeter Zijlstra if (resched) 21378046d680SPeter Zijlstra resched_curr(this_rq); 2138391e43daSPeter Zijlstra } 2139391e43daSPeter Zijlstra 2140391e43daSPeter Zijlstra /* 2141391e43daSPeter Zijlstra * If we are not running and we are not going to reschedule soon, we should 2142391e43daSPeter Zijlstra * try to push tasks away now 2143391e43daSPeter Zijlstra */ 2144391e43daSPeter Zijlstra static void task_woken_rt(struct rq *rq, struct task_struct *p) 2145391e43daSPeter Zijlstra { 2146391e43daSPeter Zijlstra if (!task_running(rq, p) && 2147391e43daSPeter Zijlstra !test_tsk_need_resched(rq->curr) && 21484b53a341SIngo Molnar p->nr_cpus_allowed > 1 && 21491baca4ceSJuri Lelli (dl_task(rq->curr) || rt_task(rq->curr)) && 21504b53a341SIngo Molnar (rq->curr->nr_cpus_allowed < 2 || 2151391e43daSPeter Zijlstra rq->curr->prio <= p->prio)) 2152391e43daSPeter Zijlstra push_rt_tasks(rq); 2153391e43daSPeter Zijlstra } 2154391e43daSPeter Zijlstra 2155391e43daSPeter Zijlstra /* Assumes rq->lock is held */ 2156391e43daSPeter Zijlstra static void rq_online_rt(struct rq *rq) 2157391e43daSPeter Zijlstra { 2158391e43daSPeter Zijlstra if (rq->rt.overloaded) 2159391e43daSPeter Zijlstra rt_set_overload(rq); 2160391e43daSPeter Zijlstra 2161391e43daSPeter Zijlstra __enable_runtime(rq); 2162391e43daSPeter Zijlstra 2163391e43daSPeter Zijlstra cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr); 2164391e43daSPeter Zijlstra } 2165391e43daSPeter Zijlstra 2166391e43daSPeter Zijlstra /* Assumes rq->lock is held */ 2167391e43daSPeter Zijlstra static void rq_offline_rt(struct rq *rq) 2168391e43daSPeter Zijlstra { 2169391e43daSPeter Zijlstra if (rq->rt.overloaded) 2170391e43daSPeter Zijlstra rt_clear_overload(rq); 2171391e43daSPeter Zijlstra 2172391e43daSPeter Zijlstra __disable_runtime(rq); 2173391e43daSPeter Zijlstra 2174391e43daSPeter Zijlstra cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID); 2175391e43daSPeter Zijlstra } 2176391e43daSPeter Zijlstra 2177391e43daSPeter Zijlstra /* 2178391e43daSPeter Zijlstra * When switch from the rt queue, we bring ourselves to a position 2179391e43daSPeter Zijlstra * that we might want to pull RT tasks from other runqueues. 2180391e43daSPeter Zijlstra */ 2181391e43daSPeter Zijlstra static void switched_from_rt(struct rq *rq, struct task_struct *p) 2182391e43daSPeter Zijlstra { 2183391e43daSPeter Zijlstra /* 2184391e43daSPeter Zijlstra * If there are other RT tasks then we will reschedule 2185391e43daSPeter Zijlstra * and the scheduling of the other RT tasks will handle 2186391e43daSPeter Zijlstra * the balancing. But if we are the last RT task 2187391e43daSPeter Zijlstra * we may need to handle the pulling of RT tasks 2188391e43daSPeter Zijlstra * now. 2189391e43daSPeter Zijlstra */ 2190da0c1e65SKirill Tkhai if (!task_on_rq_queued(p) || rq->rt.rt_nr_running) 21911158ddb5SKirill Tkhai return; 21921158ddb5SKirill Tkhai 219302d8ec94SIngo Molnar rt_queue_pull_task(rq); 2194391e43daSPeter Zijlstra } 2195391e43daSPeter Zijlstra 219611c785b7SLi Zefan void __init init_sched_rt_class(void) 2197391e43daSPeter Zijlstra { 2198391e43daSPeter Zijlstra unsigned int i; 2199391e43daSPeter Zijlstra 2200391e43daSPeter Zijlstra for_each_possible_cpu(i) { 2201391e43daSPeter Zijlstra zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i), 2202391e43daSPeter Zijlstra GFP_KERNEL, cpu_to_node(i)); 2203391e43daSPeter Zijlstra } 2204391e43daSPeter Zijlstra } 2205391e43daSPeter Zijlstra #endif /* CONFIG_SMP */ 2206391e43daSPeter Zijlstra 2207391e43daSPeter Zijlstra /* 2208391e43daSPeter Zijlstra * When switching a task to RT, we may overload the runqueue 2209391e43daSPeter Zijlstra * with RT tasks. In this case we try to push them off to 2210391e43daSPeter Zijlstra * other runqueues. 2211391e43daSPeter Zijlstra */ 2212391e43daSPeter Zijlstra static void switched_to_rt(struct rq *rq, struct task_struct *p) 2213391e43daSPeter Zijlstra { 2214391e43daSPeter Zijlstra /* 2215391e43daSPeter Zijlstra * If we are already running, then there's nothing 2216391e43daSPeter Zijlstra * that needs to be done. But if we are not running 2217391e43daSPeter Zijlstra * we may need to preempt the current running task. 2218391e43daSPeter Zijlstra * If that current running task is also an RT task 2219391e43daSPeter Zijlstra * then see if we can move to another run queue. 2220391e43daSPeter Zijlstra */ 2221da0c1e65SKirill Tkhai if (task_on_rq_queued(p) && rq->curr != p) { 2222391e43daSPeter Zijlstra #ifdef CONFIG_SMP 22234b53a341SIngo Molnar if (p->nr_cpus_allowed > 1 && rq->rt.overloaded) 222402d8ec94SIngo Molnar rt_queue_push_tasks(rq); 2225619bd4a7SSebastian Andrzej Siewior #endif /* CONFIG_SMP */ 22262fe25826SPaul E. McKenney if (p->prio < rq->curr->prio && cpu_online(cpu_of(rq))) 22278875125eSKirill Tkhai resched_curr(rq); 2228391e43daSPeter Zijlstra } 2229391e43daSPeter Zijlstra } 2230391e43daSPeter Zijlstra 2231391e43daSPeter Zijlstra /* 2232391e43daSPeter Zijlstra * Priority of the task has changed. This may cause 2233391e43daSPeter Zijlstra * us to initiate a push or pull. 2234391e43daSPeter Zijlstra */ 2235391e43daSPeter Zijlstra static void 2236391e43daSPeter Zijlstra prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio) 2237391e43daSPeter Zijlstra { 2238da0c1e65SKirill Tkhai if (!task_on_rq_queued(p)) 2239391e43daSPeter Zijlstra return; 2240391e43daSPeter Zijlstra 2241391e43daSPeter Zijlstra if (rq->curr == p) { 2242391e43daSPeter Zijlstra #ifdef CONFIG_SMP 2243391e43daSPeter Zijlstra /* 2244391e43daSPeter Zijlstra * If our priority decreases while running, we 2245391e43daSPeter Zijlstra * may need to pull tasks to this runqueue. 2246391e43daSPeter Zijlstra */ 2247391e43daSPeter Zijlstra if (oldprio < p->prio) 224802d8ec94SIngo Molnar rt_queue_pull_task(rq); 2249fd7a4bedSPeter Zijlstra 2250391e43daSPeter Zijlstra /* 2251391e43daSPeter Zijlstra * If there's a higher priority task waiting to run 2252fd7a4bedSPeter Zijlstra * then reschedule. 2253391e43daSPeter Zijlstra */ 2254fd7a4bedSPeter Zijlstra if (p->prio > rq->rt.highest_prio.curr) 22558875125eSKirill Tkhai resched_curr(rq); 2256391e43daSPeter Zijlstra #else 2257391e43daSPeter Zijlstra /* For UP simply resched on drop of prio */ 2258391e43daSPeter Zijlstra if (oldprio < p->prio) 22598875125eSKirill Tkhai resched_curr(rq); 2260391e43daSPeter Zijlstra #endif /* CONFIG_SMP */ 2261391e43daSPeter Zijlstra } else { 2262391e43daSPeter Zijlstra /* 2263391e43daSPeter Zijlstra * This task is not running, but if it is 2264391e43daSPeter Zijlstra * greater than the current running task 2265391e43daSPeter Zijlstra * then reschedule. 2266391e43daSPeter Zijlstra */ 2267391e43daSPeter Zijlstra if (p->prio < rq->curr->prio) 22688875125eSKirill Tkhai resched_curr(rq); 2269391e43daSPeter Zijlstra } 2270391e43daSPeter Zijlstra } 2271391e43daSPeter Zijlstra 2272b18b6a9cSNicolas Pitre #ifdef CONFIG_POSIX_TIMERS 2273391e43daSPeter Zijlstra static void watchdog(struct rq *rq, struct task_struct *p) 2274391e43daSPeter Zijlstra { 2275391e43daSPeter Zijlstra unsigned long soft, hard; 2276391e43daSPeter Zijlstra 2277391e43daSPeter Zijlstra /* max may change after cur was read, this will be fixed next tick */ 2278391e43daSPeter Zijlstra soft = task_rlimit(p, RLIMIT_RTTIME); 2279391e43daSPeter Zijlstra hard = task_rlimit_max(p, RLIMIT_RTTIME); 2280391e43daSPeter Zijlstra 2281391e43daSPeter Zijlstra if (soft != RLIM_INFINITY) { 2282391e43daSPeter Zijlstra unsigned long next; 2283391e43daSPeter Zijlstra 228457d2aa00SYing Xue if (p->rt.watchdog_stamp != jiffies) { 2285391e43daSPeter Zijlstra p->rt.timeout++; 228657d2aa00SYing Xue p->rt.watchdog_stamp = jiffies; 228757d2aa00SYing Xue } 228857d2aa00SYing Xue 2289391e43daSPeter Zijlstra next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ); 2290391e43daSPeter Zijlstra if (p->rt.timeout > next) 2291391e43daSPeter Zijlstra p->cputime_expires.sched_exp = p->se.sum_exec_runtime; 2292391e43daSPeter Zijlstra } 2293391e43daSPeter Zijlstra } 2294b18b6a9cSNicolas Pitre #else 2295b18b6a9cSNicolas Pitre static inline void watchdog(struct rq *rq, struct task_struct *p) { } 2296b18b6a9cSNicolas Pitre #endif 2297391e43daSPeter Zijlstra 2298d84b3131SFrederic Weisbecker /* 2299d84b3131SFrederic Weisbecker * scheduler tick hitting a task of our scheduling class. 2300d84b3131SFrederic Weisbecker * 2301d84b3131SFrederic Weisbecker * NOTE: This function can be called remotely by the tick offload that 2302d84b3131SFrederic Weisbecker * goes along full dynticks. Therefore no local assumption can be made 2303d84b3131SFrederic Weisbecker * and everything must be accessed through the @rq and @curr passed in 2304d84b3131SFrederic Weisbecker * parameters. 2305d84b3131SFrederic Weisbecker */ 2306391e43daSPeter Zijlstra static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued) 2307391e43daSPeter Zijlstra { 2308454c7999SColin Cross struct sched_rt_entity *rt_se = &p->rt; 2309454c7999SColin Cross 2310391e43daSPeter Zijlstra update_curr_rt(rq); 2311391e43daSPeter Zijlstra 2312391e43daSPeter Zijlstra watchdog(rq, p); 2313391e43daSPeter Zijlstra 2314391e43daSPeter Zijlstra /* 2315391e43daSPeter Zijlstra * RR tasks need a special form of timeslice management. 2316391e43daSPeter Zijlstra * FIFO tasks have no timeslices. 2317391e43daSPeter Zijlstra */ 2318391e43daSPeter Zijlstra if (p->policy != SCHED_RR) 2319391e43daSPeter Zijlstra return; 2320391e43daSPeter Zijlstra 2321391e43daSPeter Zijlstra if (--p->rt.time_slice) 2322391e43daSPeter Zijlstra return; 2323391e43daSPeter Zijlstra 2324ce0dbbbbSClark Williams p->rt.time_slice = sched_rr_timeslice; 2325391e43daSPeter Zijlstra 2326391e43daSPeter Zijlstra /* 2327e9aa39bbSLi Bin * Requeue to the end of queue if we (and all of our ancestors) are not 2328e9aa39bbSLi Bin * the only element on the queue 2329391e43daSPeter Zijlstra */ 2330454c7999SColin Cross for_each_sched_rt_entity(rt_se) { 2331454c7999SColin Cross if (rt_se->run_list.prev != rt_se->run_list.next) { 2332391e43daSPeter Zijlstra requeue_task_rt(rq, p, 0); 23338aa6f0ebSKirill Tkhai resched_curr(rq); 2334454c7999SColin Cross return; 2335454c7999SColin Cross } 2336391e43daSPeter Zijlstra } 2337391e43daSPeter Zijlstra } 2338391e43daSPeter Zijlstra 2339391e43daSPeter Zijlstra static void set_curr_task_rt(struct rq *rq) 2340391e43daSPeter Zijlstra { 2341391e43daSPeter Zijlstra struct task_struct *p = rq->curr; 2342391e43daSPeter Zijlstra 234378becc27SFrederic Weisbecker p->se.exec_start = rq_clock_task(rq); 2344391e43daSPeter Zijlstra 2345391e43daSPeter Zijlstra /* The running task is never eligible for pushing */ 2346391e43daSPeter Zijlstra dequeue_pushable_task(rq, p); 2347391e43daSPeter Zijlstra } 2348391e43daSPeter Zijlstra 2349391e43daSPeter Zijlstra static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task) 2350391e43daSPeter Zijlstra { 2351391e43daSPeter Zijlstra /* 2352391e43daSPeter Zijlstra * Time slice is 0 for SCHED_FIFO tasks 2353391e43daSPeter Zijlstra */ 2354391e43daSPeter Zijlstra if (task->policy == SCHED_RR) 2355ce0dbbbbSClark Williams return sched_rr_timeslice; 2356391e43daSPeter Zijlstra else 2357391e43daSPeter Zijlstra return 0; 2358391e43daSPeter Zijlstra } 2359391e43daSPeter Zijlstra 2360391e43daSPeter Zijlstra const struct sched_class rt_sched_class = { 2361391e43daSPeter Zijlstra .next = &fair_sched_class, 2362391e43daSPeter Zijlstra .enqueue_task = enqueue_task_rt, 2363391e43daSPeter Zijlstra .dequeue_task = dequeue_task_rt, 2364391e43daSPeter Zijlstra .yield_task = yield_task_rt, 2365391e43daSPeter Zijlstra 2366391e43daSPeter Zijlstra .check_preempt_curr = check_preempt_curr_rt, 2367391e43daSPeter Zijlstra 2368391e43daSPeter Zijlstra .pick_next_task = pick_next_task_rt, 2369391e43daSPeter Zijlstra .put_prev_task = put_prev_task_rt, 2370391e43daSPeter Zijlstra 2371391e43daSPeter Zijlstra #ifdef CONFIG_SMP 2372391e43daSPeter Zijlstra .select_task_rq = select_task_rq_rt, 2373391e43daSPeter Zijlstra 23746c37067eSPeter Zijlstra .set_cpus_allowed = set_cpus_allowed_common, 2375391e43daSPeter Zijlstra .rq_online = rq_online_rt, 2376391e43daSPeter Zijlstra .rq_offline = rq_offline_rt, 2377391e43daSPeter Zijlstra .task_woken = task_woken_rt, 2378391e43daSPeter Zijlstra .switched_from = switched_from_rt, 2379391e43daSPeter Zijlstra #endif 2380391e43daSPeter Zijlstra 2381391e43daSPeter Zijlstra .set_curr_task = set_curr_task_rt, 2382391e43daSPeter Zijlstra .task_tick = task_tick_rt, 2383391e43daSPeter Zijlstra 2384391e43daSPeter Zijlstra .get_rr_interval = get_rr_interval_rt, 2385391e43daSPeter Zijlstra 2386391e43daSPeter Zijlstra .prio_changed = prio_changed_rt, 2387391e43daSPeter Zijlstra .switched_to = switched_to_rt, 23886e998916SStanislaw Gruszka 23896e998916SStanislaw Gruszka .update_curr = update_curr_rt, 2390391e43daSPeter Zijlstra }; 2391391e43daSPeter Zijlstra 23928887cd99SNicolas Pitre #ifdef CONFIG_RT_GROUP_SCHED 23938887cd99SNicolas Pitre /* 23948887cd99SNicolas Pitre * Ensure that the real time constraints are schedulable. 23958887cd99SNicolas Pitre */ 23968887cd99SNicolas Pitre static DEFINE_MUTEX(rt_constraints_mutex); 23978887cd99SNicolas Pitre 23988887cd99SNicolas Pitre /* Must be called with tasklist_lock held */ 23998887cd99SNicolas Pitre static inline int tg_has_rt_tasks(struct task_group *tg) 24008887cd99SNicolas Pitre { 24018887cd99SNicolas Pitre struct task_struct *g, *p; 24028887cd99SNicolas Pitre 24038887cd99SNicolas Pitre /* 24048887cd99SNicolas Pitre * Autogroups do not have RT tasks; see autogroup_create(). 24058887cd99SNicolas Pitre */ 24068887cd99SNicolas Pitre if (task_group_is_autogroup(tg)) 24078887cd99SNicolas Pitre return 0; 24088887cd99SNicolas Pitre 24098887cd99SNicolas Pitre for_each_process_thread(g, p) { 24108887cd99SNicolas Pitre if (rt_task(p) && task_group(p) == tg) 24118887cd99SNicolas Pitre return 1; 24128887cd99SNicolas Pitre } 24138887cd99SNicolas Pitre 24148887cd99SNicolas Pitre return 0; 24158887cd99SNicolas Pitre } 24168887cd99SNicolas Pitre 24178887cd99SNicolas Pitre struct rt_schedulable_data { 24188887cd99SNicolas Pitre struct task_group *tg; 24198887cd99SNicolas Pitre u64 rt_period; 24208887cd99SNicolas Pitre u64 rt_runtime; 24218887cd99SNicolas Pitre }; 24228887cd99SNicolas Pitre 24238887cd99SNicolas Pitre static int tg_rt_schedulable(struct task_group *tg, void *data) 24248887cd99SNicolas Pitre { 24258887cd99SNicolas Pitre struct rt_schedulable_data *d = data; 24268887cd99SNicolas Pitre struct task_group *child; 24278887cd99SNicolas Pitre unsigned long total, sum = 0; 24288887cd99SNicolas Pitre u64 period, runtime; 24298887cd99SNicolas Pitre 24308887cd99SNicolas Pitre period = ktime_to_ns(tg->rt_bandwidth.rt_period); 24318887cd99SNicolas Pitre runtime = tg->rt_bandwidth.rt_runtime; 24328887cd99SNicolas Pitre 24338887cd99SNicolas Pitre if (tg == d->tg) { 24348887cd99SNicolas Pitre period = d->rt_period; 24358887cd99SNicolas Pitre runtime = d->rt_runtime; 24368887cd99SNicolas Pitre } 24378887cd99SNicolas Pitre 24388887cd99SNicolas Pitre /* 24398887cd99SNicolas Pitre * Cannot have more runtime than the period. 24408887cd99SNicolas Pitre */ 24418887cd99SNicolas Pitre if (runtime > period && runtime != RUNTIME_INF) 24428887cd99SNicolas Pitre return -EINVAL; 24438887cd99SNicolas Pitre 24448887cd99SNicolas Pitre /* 24458887cd99SNicolas Pitre * Ensure we don't starve existing RT tasks. 24468887cd99SNicolas Pitre */ 24478887cd99SNicolas Pitre if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg)) 24488887cd99SNicolas Pitre return -EBUSY; 24498887cd99SNicolas Pitre 24508887cd99SNicolas Pitre total = to_ratio(period, runtime); 24518887cd99SNicolas Pitre 24528887cd99SNicolas Pitre /* 24538887cd99SNicolas Pitre * Nobody can have more than the global setting allows. 24548887cd99SNicolas Pitre */ 24558887cd99SNicolas Pitre if (total > to_ratio(global_rt_period(), global_rt_runtime())) 24568887cd99SNicolas Pitre return -EINVAL; 24578887cd99SNicolas Pitre 24588887cd99SNicolas Pitre /* 24598887cd99SNicolas Pitre * The sum of our children's runtime should not exceed our own. 24608887cd99SNicolas Pitre */ 24618887cd99SNicolas Pitre list_for_each_entry_rcu(child, &tg->children, siblings) { 24628887cd99SNicolas Pitre period = ktime_to_ns(child->rt_bandwidth.rt_period); 24638887cd99SNicolas Pitre runtime = child->rt_bandwidth.rt_runtime; 24648887cd99SNicolas Pitre 24658887cd99SNicolas Pitre if (child == d->tg) { 24668887cd99SNicolas Pitre period = d->rt_period; 24678887cd99SNicolas Pitre runtime = d->rt_runtime; 24688887cd99SNicolas Pitre } 24698887cd99SNicolas Pitre 24708887cd99SNicolas Pitre sum += to_ratio(period, runtime); 24718887cd99SNicolas Pitre } 24728887cd99SNicolas Pitre 24738887cd99SNicolas Pitre if (sum > total) 24748887cd99SNicolas Pitre return -EINVAL; 24758887cd99SNicolas Pitre 24768887cd99SNicolas Pitre return 0; 24778887cd99SNicolas Pitre } 24788887cd99SNicolas Pitre 24798887cd99SNicolas Pitre static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) 24808887cd99SNicolas Pitre { 24818887cd99SNicolas Pitre int ret; 24828887cd99SNicolas Pitre 24838887cd99SNicolas Pitre struct rt_schedulable_data data = { 24848887cd99SNicolas Pitre .tg = tg, 24858887cd99SNicolas Pitre .rt_period = period, 24868887cd99SNicolas Pitre .rt_runtime = runtime, 24878887cd99SNicolas Pitre }; 24888887cd99SNicolas Pitre 24898887cd99SNicolas Pitre rcu_read_lock(); 24908887cd99SNicolas Pitre ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data); 24918887cd99SNicolas Pitre rcu_read_unlock(); 24928887cd99SNicolas Pitre 24938887cd99SNicolas Pitre return ret; 24948887cd99SNicolas Pitre } 24958887cd99SNicolas Pitre 24968887cd99SNicolas Pitre static int tg_set_rt_bandwidth(struct task_group *tg, 24978887cd99SNicolas Pitre u64 rt_period, u64 rt_runtime) 24988887cd99SNicolas Pitre { 24998887cd99SNicolas Pitre int i, err = 0; 25008887cd99SNicolas Pitre 25018887cd99SNicolas Pitre /* 25028887cd99SNicolas Pitre * Disallowing the root group RT runtime is BAD, it would disallow the 25038887cd99SNicolas Pitre * kernel creating (and or operating) RT threads. 25048887cd99SNicolas Pitre */ 25058887cd99SNicolas Pitre if (tg == &root_task_group && rt_runtime == 0) 25068887cd99SNicolas Pitre return -EINVAL; 25078887cd99SNicolas Pitre 25088887cd99SNicolas Pitre /* No period doesn't make any sense. */ 25098887cd99SNicolas Pitre if (rt_period == 0) 25108887cd99SNicolas Pitre return -EINVAL; 25118887cd99SNicolas Pitre 25128887cd99SNicolas Pitre mutex_lock(&rt_constraints_mutex); 25138887cd99SNicolas Pitre read_lock(&tasklist_lock); 25148887cd99SNicolas Pitre err = __rt_schedulable(tg, rt_period, rt_runtime); 25158887cd99SNicolas Pitre if (err) 25168887cd99SNicolas Pitre goto unlock; 25178887cd99SNicolas Pitre 25188887cd99SNicolas Pitre raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock); 25198887cd99SNicolas Pitre tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period); 25208887cd99SNicolas Pitre tg->rt_bandwidth.rt_runtime = rt_runtime; 25218887cd99SNicolas Pitre 25228887cd99SNicolas Pitre for_each_possible_cpu(i) { 25238887cd99SNicolas Pitre struct rt_rq *rt_rq = tg->rt_rq[i]; 25248887cd99SNicolas Pitre 25258887cd99SNicolas Pitre raw_spin_lock(&rt_rq->rt_runtime_lock); 25268887cd99SNicolas Pitre rt_rq->rt_runtime = rt_runtime; 25278887cd99SNicolas Pitre raw_spin_unlock(&rt_rq->rt_runtime_lock); 25288887cd99SNicolas Pitre } 25298887cd99SNicolas Pitre raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock); 25308887cd99SNicolas Pitre unlock: 25318887cd99SNicolas Pitre read_unlock(&tasklist_lock); 25328887cd99SNicolas Pitre mutex_unlock(&rt_constraints_mutex); 25338887cd99SNicolas Pitre 25348887cd99SNicolas Pitre return err; 25358887cd99SNicolas Pitre } 25368887cd99SNicolas Pitre 25378887cd99SNicolas Pitre int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us) 25388887cd99SNicolas Pitre { 25398887cd99SNicolas Pitre u64 rt_runtime, rt_period; 25408887cd99SNicolas Pitre 25418887cd99SNicolas Pitre rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period); 25428887cd99SNicolas Pitre rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC; 25438887cd99SNicolas Pitre if (rt_runtime_us < 0) 25448887cd99SNicolas Pitre rt_runtime = RUNTIME_INF; 25458887cd99SNicolas Pitre 25468887cd99SNicolas Pitre return tg_set_rt_bandwidth(tg, rt_period, rt_runtime); 25478887cd99SNicolas Pitre } 25488887cd99SNicolas Pitre 25498887cd99SNicolas Pitre long sched_group_rt_runtime(struct task_group *tg) 25508887cd99SNicolas Pitre { 25518887cd99SNicolas Pitre u64 rt_runtime_us; 25528887cd99SNicolas Pitre 25538887cd99SNicolas Pitre if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF) 25548887cd99SNicolas Pitre return -1; 25558887cd99SNicolas Pitre 25568887cd99SNicolas Pitre rt_runtime_us = tg->rt_bandwidth.rt_runtime; 25578887cd99SNicolas Pitre do_div(rt_runtime_us, NSEC_PER_USEC); 25588887cd99SNicolas Pitre return rt_runtime_us; 25598887cd99SNicolas Pitre } 25608887cd99SNicolas Pitre 25618887cd99SNicolas Pitre int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us) 25628887cd99SNicolas Pitre { 25638887cd99SNicolas Pitre u64 rt_runtime, rt_period; 25648887cd99SNicolas Pitre 25658887cd99SNicolas Pitre rt_period = rt_period_us * NSEC_PER_USEC; 25668887cd99SNicolas Pitre rt_runtime = tg->rt_bandwidth.rt_runtime; 25678887cd99SNicolas Pitre 25688887cd99SNicolas Pitre return tg_set_rt_bandwidth(tg, rt_period, rt_runtime); 25698887cd99SNicolas Pitre } 25708887cd99SNicolas Pitre 25718887cd99SNicolas Pitre long sched_group_rt_period(struct task_group *tg) 25728887cd99SNicolas Pitre { 25738887cd99SNicolas Pitre u64 rt_period_us; 25748887cd99SNicolas Pitre 25758887cd99SNicolas Pitre rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period); 25768887cd99SNicolas Pitre do_div(rt_period_us, NSEC_PER_USEC); 25778887cd99SNicolas Pitre return rt_period_us; 25788887cd99SNicolas Pitre } 25798887cd99SNicolas Pitre 25808887cd99SNicolas Pitre static int sched_rt_global_constraints(void) 25818887cd99SNicolas Pitre { 25828887cd99SNicolas Pitre int ret = 0; 25838887cd99SNicolas Pitre 25848887cd99SNicolas Pitre mutex_lock(&rt_constraints_mutex); 25858887cd99SNicolas Pitre read_lock(&tasklist_lock); 25868887cd99SNicolas Pitre ret = __rt_schedulable(NULL, 0, 0); 25878887cd99SNicolas Pitre read_unlock(&tasklist_lock); 25888887cd99SNicolas Pitre mutex_unlock(&rt_constraints_mutex); 25898887cd99SNicolas Pitre 25908887cd99SNicolas Pitre return ret; 25918887cd99SNicolas Pitre } 25928887cd99SNicolas Pitre 25938887cd99SNicolas Pitre int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk) 25948887cd99SNicolas Pitre { 25958887cd99SNicolas Pitre /* Don't accept realtime tasks when there is no way for them to run */ 25968887cd99SNicolas Pitre if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0) 25978887cd99SNicolas Pitre return 0; 25988887cd99SNicolas Pitre 25998887cd99SNicolas Pitre return 1; 26008887cd99SNicolas Pitre } 26018887cd99SNicolas Pitre 26028887cd99SNicolas Pitre #else /* !CONFIG_RT_GROUP_SCHED */ 26038887cd99SNicolas Pitre static int sched_rt_global_constraints(void) 26048887cd99SNicolas Pitre { 26058887cd99SNicolas Pitre unsigned long flags; 26068887cd99SNicolas Pitre int i; 26078887cd99SNicolas Pitre 26088887cd99SNicolas Pitre raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags); 26098887cd99SNicolas Pitre for_each_possible_cpu(i) { 26108887cd99SNicolas Pitre struct rt_rq *rt_rq = &cpu_rq(i)->rt; 26118887cd99SNicolas Pitre 26128887cd99SNicolas Pitre raw_spin_lock(&rt_rq->rt_runtime_lock); 26138887cd99SNicolas Pitre rt_rq->rt_runtime = global_rt_runtime(); 26148887cd99SNicolas Pitre raw_spin_unlock(&rt_rq->rt_runtime_lock); 26158887cd99SNicolas Pitre } 26168887cd99SNicolas Pitre raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags); 26178887cd99SNicolas Pitre 26188887cd99SNicolas Pitre return 0; 26198887cd99SNicolas Pitre } 26208887cd99SNicolas Pitre #endif /* CONFIG_RT_GROUP_SCHED */ 26218887cd99SNicolas Pitre 26228887cd99SNicolas Pitre static int sched_rt_global_validate(void) 26238887cd99SNicolas Pitre { 26248887cd99SNicolas Pitre if (sysctl_sched_rt_period <= 0) 26258887cd99SNicolas Pitre return -EINVAL; 26268887cd99SNicolas Pitre 26278887cd99SNicolas Pitre if ((sysctl_sched_rt_runtime != RUNTIME_INF) && 26288887cd99SNicolas Pitre (sysctl_sched_rt_runtime > sysctl_sched_rt_period)) 26298887cd99SNicolas Pitre return -EINVAL; 26308887cd99SNicolas Pitre 26318887cd99SNicolas Pitre return 0; 26328887cd99SNicolas Pitre } 26338887cd99SNicolas Pitre 26348887cd99SNicolas Pitre static void sched_rt_do_global(void) 26358887cd99SNicolas Pitre { 26368887cd99SNicolas Pitre def_rt_bandwidth.rt_runtime = global_rt_runtime(); 26378887cd99SNicolas Pitre def_rt_bandwidth.rt_period = ns_to_ktime(global_rt_period()); 26388887cd99SNicolas Pitre } 26398887cd99SNicolas Pitre 26408887cd99SNicolas Pitre int sched_rt_handler(struct ctl_table *table, int write, 26418887cd99SNicolas Pitre void __user *buffer, size_t *lenp, 26428887cd99SNicolas Pitre loff_t *ppos) 26438887cd99SNicolas Pitre { 26448887cd99SNicolas Pitre int old_period, old_runtime; 26458887cd99SNicolas Pitre static DEFINE_MUTEX(mutex); 26468887cd99SNicolas Pitre int ret; 26478887cd99SNicolas Pitre 26488887cd99SNicolas Pitre mutex_lock(&mutex); 26498887cd99SNicolas Pitre old_period = sysctl_sched_rt_period; 26508887cd99SNicolas Pitre old_runtime = sysctl_sched_rt_runtime; 26518887cd99SNicolas Pitre 26528887cd99SNicolas Pitre ret = proc_dointvec(table, write, buffer, lenp, ppos); 26538887cd99SNicolas Pitre 26548887cd99SNicolas Pitre if (!ret && write) { 26558887cd99SNicolas Pitre ret = sched_rt_global_validate(); 26568887cd99SNicolas Pitre if (ret) 26578887cd99SNicolas Pitre goto undo; 26588887cd99SNicolas Pitre 26598887cd99SNicolas Pitre ret = sched_dl_global_validate(); 26608887cd99SNicolas Pitre if (ret) 26618887cd99SNicolas Pitre goto undo; 26628887cd99SNicolas Pitre 26638887cd99SNicolas Pitre ret = sched_rt_global_constraints(); 26648887cd99SNicolas Pitre if (ret) 26658887cd99SNicolas Pitre goto undo; 26668887cd99SNicolas Pitre 26678887cd99SNicolas Pitre sched_rt_do_global(); 26688887cd99SNicolas Pitre sched_dl_do_global(); 26698887cd99SNicolas Pitre } 26708887cd99SNicolas Pitre if (0) { 26718887cd99SNicolas Pitre undo: 26728887cd99SNicolas Pitre sysctl_sched_rt_period = old_period; 26738887cd99SNicolas Pitre sysctl_sched_rt_runtime = old_runtime; 26748887cd99SNicolas Pitre } 26758887cd99SNicolas Pitre mutex_unlock(&mutex); 26768887cd99SNicolas Pitre 26778887cd99SNicolas Pitre return ret; 26788887cd99SNicolas Pitre } 26798887cd99SNicolas Pitre 26808887cd99SNicolas Pitre int sched_rr_handler(struct ctl_table *table, int write, 26818887cd99SNicolas Pitre void __user *buffer, size_t *lenp, 26828887cd99SNicolas Pitre loff_t *ppos) 26838887cd99SNicolas Pitre { 26848887cd99SNicolas Pitre int ret; 26858887cd99SNicolas Pitre static DEFINE_MUTEX(mutex); 26868887cd99SNicolas Pitre 26878887cd99SNicolas Pitre mutex_lock(&mutex); 26888887cd99SNicolas Pitre ret = proc_dointvec(table, write, buffer, lenp, ppos); 26898887cd99SNicolas Pitre /* 26908887cd99SNicolas Pitre * Make sure that internally we keep jiffies. 26918887cd99SNicolas Pitre * Also, writing zero resets the timeslice to default: 26928887cd99SNicolas Pitre */ 26938887cd99SNicolas Pitre if (!ret && write) { 26948887cd99SNicolas Pitre sched_rr_timeslice = 26958887cd99SNicolas Pitre sysctl_sched_rr_timeslice <= 0 ? RR_TIMESLICE : 26968887cd99SNicolas Pitre msecs_to_jiffies(sysctl_sched_rr_timeslice); 26978887cd99SNicolas Pitre } 26988887cd99SNicolas Pitre mutex_unlock(&mutex); 269997fb7a0aSIngo Molnar 27008887cd99SNicolas Pitre return ret; 27018887cd99SNicolas Pitre } 27028887cd99SNicolas Pitre 2703391e43daSPeter Zijlstra #ifdef CONFIG_SCHED_DEBUG 2704391e43daSPeter Zijlstra void print_rt_stats(struct seq_file *m, int cpu) 2705391e43daSPeter Zijlstra { 2706391e43daSPeter Zijlstra rt_rq_iter_t iter; 2707391e43daSPeter Zijlstra struct rt_rq *rt_rq; 2708391e43daSPeter Zijlstra 2709391e43daSPeter Zijlstra rcu_read_lock(); 2710391e43daSPeter Zijlstra for_each_rt_rq(rt_rq, iter, cpu_rq(cpu)) 2711391e43daSPeter Zijlstra print_rt_rq(m, cpu, rt_rq); 2712391e43daSPeter Zijlstra rcu_read_unlock(); 2713391e43daSPeter Zijlstra } 2714391e43daSPeter Zijlstra #endif /* CONFIG_SCHED_DEBUG */ 2715