1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 2391e43daSPeter Zijlstra /* 3391e43daSPeter Zijlstra * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR 4391e43daSPeter Zijlstra * policies) 5391e43daSPeter Zijlstra */ 6391e43daSPeter Zijlstra 7391e43daSPeter Zijlstra #include "sched.h" 8391e43daSPeter Zijlstra 9391e43daSPeter Zijlstra #include <linux/slab.h> 10b6366f04SSteven Rostedt #include <linux/irq_work.h> 11391e43daSPeter Zijlstra 12ce0dbbbbSClark Williams int sched_rr_timeslice = RR_TIMESLICE; 13975e155eSShile Zhang int sysctl_sched_rr_timeslice = (MSEC_PER_SEC / HZ) * RR_TIMESLICE; 14ce0dbbbbSClark Williams 15391e43daSPeter Zijlstra static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun); 16391e43daSPeter Zijlstra 17391e43daSPeter Zijlstra struct rt_bandwidth def_rt_bandwidth; 18391e43daSPeter Zijlstra 19391e43daSPeter Zijlstra static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer) 20391e43daSPeter Zijlstra { 21391e43daSPeter Zijlstra struct rt_bandwidth *rt_b = 22391e43daSPeter Zijlstra container_of(timer, struct rt_bandwidth, rt_period_timer); 23391e43daSPeter Zijlstra int idle = 0; 2477a4d1a1SPeter Zijlstra int overrun; 25391e43daSPeter Zijlstra 2677a4d1a1SPeter Zijlstra raw_spin_lock(&rt_b->rt_runtime_lock); 27391e43daSPeter Zijlstra for (;;) { 2877a4d1a1SPeter Zijlstra overrun = hrtimer_forward_now(timer, rt_b->rt_period); 29391e43daSPeter Zijlstra if (!overrun) 30391e43daSPeter Zijlstra break; 31391e43daSPeter Zijlstra 3277a4d1a1SPeter Zijlstra raw_spin_unlock(&rt_b->rt_runtime_lock); 33391e43daSPeter Zijlstra idle = do_sched_rt_period_timer(rt_b, overrun); 3477a4d1a1SPeter Zijlstra raw_spin_lock(&rt_b->rt_runtime_lock); 35391e43daSPeter Zijlstra } 364cfafd30SPeter Zijlstra if (idle) 374cfafd30SPeter Zijlstra rt_b->rt_period_active = 0; 3877a4d1a1SPeter Zijlstra raw_spin_unlock(&rt_b->rt_runtime_lock); 39391e43daSPeter Zijlstra 40391e43daSPeter Zijlstra return idle ? HRTIMER_NORESTART : HRTIMER_RESTART; 41391e43daSPeter Zijlstra } 42391e43daSPeter Zijlstra 43391e43daSPeter Zijlstra void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime) 44391e43daSPeter Zijlstra { 45391e43daSPeter Zijlstra rt_b->rt_period = ns_to_ktime(period); 46391e43daSPeter Zijlstra rt_b->rt_runtime = runtime; 47391e43daSPeter Zijlstra 48391e43daSPeter Zijlstra raw_spin_lock_init(&rt_b->rt_runtime_lock); 49391e43daSPeter Zijlstra 50391e43daSPeter Zijlstra hrtimer_init(&rt_b->rt_period_timer, 51391e43daSPeter Zijlstra CLOCK_MONOTONIC, HRTIMER_MODE_REL); 52391e43daSPeter Zijlstra rt_b->rt_period_timer.function = sched_rt_period_timer; 53391e43daSPeter Zijlstra } 54391e43daSPeter Zijlstra 55391e43daSPeter Zijlstra static void start_rt_bandwidth(struct rt_bandwidth *rt_b) 56391e43daSPeter Zijlstra { 57391e43daSPeter Zijlstra if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF) 58391e43daSPeter Zijlstra return; 59391e43daSPeter Zijlstra 60391e43daSPeter Zijlstra raw_spin_lock(&rt_b->rt_runtime_lock); 614cfafd30SPeter Zijlstra if (!rt_b->rt_period_active) { 624cfafd30SPeter Zijlstra rt_b->rt_period_active = 1; 63c3a990dcSSteven Rostedt /* 64c3a990dcSSteven Rostedt * SCHED_DEADLINE updates the bandwidth, as a run away 65c3a990dcSSteven Rostedt * RT task with a DL task could hog a CPU. But DL does 66c3a990dcSSteven Rostedt * not reset the period. If a deadline task was running 67c3a990dcSSteven Rostedt * without an RT task running, it can cause RT tasks to 68c3a990dcSSteven Rostedt * throttle when they start up. Kick the timer right away 69c3a990dcSSteven Rostedt * to update the period. 70c3a990dcSSteven Rostedt */ 71c3a990dcSSteven Rostedt hrtimer_forward_now(&rt_b->rt_period_timer, ns_to_ktime(0)); 724cfafd30SPeter Zijlstra hrtimer_start_expires(&rt_b->rt_period_timer, HRTIMER_MODE_ABS_PINNED); 734cfafd30SPeter Zijlstra } 74391e43daSPeter Zijlstra raw_spin_unlock(&rt_b->rt_runtime_lock); 75391e43daSPeter Zijlstra } 76391e43daSPeter Zijlstra 7707c54f7aSAbel Vesa void init_rt_rq(struct rt_rq *rt_rq) 78391e43daSPeter Zijlstra { 79391e43daSPeter Zijlstra struct rt_prio_array *array; 80391e43daSPeter Zijlstra int i; 81391e43daSPeter Zijlstra 82391e43daSPeter Zijlstra array = &rt_rq->active; 83391e43daSPeter Zijlstra for (i = 0; i < MAX_RT_PRIO; i++) { 84391e43daSPeter Zijlstra INIT_LIST_HEAD(array->queue + i); 85391e43daSPeter Zijlstra __clear_bit(i, array->bitmap); 86391e43daSPeter Zijlstra } 87391e43daSPeter Zijlstra /* delimiter for bitsearch: */ 88391e43daSPeter Zijlstra __set_bit(MAX_RT_PRIO, array->bitmap); 89391e43daSPeter Zijlstra 90391e43daSPeter Zijlstra #if defined CONFIG_SMP 91391e43daSPeter Zijlstra rt_rq->highest_prio.curr = MAX_RT_PRIO; 92391e43daSPeter Zijlstra rt_rq->highest_prio.next = MAX_RT_PRIO; 93391e43daSPeter Zijlstra rt_rq->rt_nr_migratory = 0; 94391e43daSPeter Zijlstra rt_rq->overloaded = 0; 95391e43daSPeter Zijlstra plist_head_init(&rt_rq->pushable_tasks); 96b6366f04SSteven Rostedt #endif /* CONFIG_SMP */ 97f4ebcbc0SKirill Tkhai /* We start is dequeued state, because no RT tasks are queued */ 98f4ebcbc0SKirill Tkhai rt_rq->rt_queued = 0; 99391e43daSPeter Zijlstra 100391e43daSPeter Zijlstra rt_rq->rt_time = 0; 101391e43daSPeter Zijlstra rt_rq->rt_throttled = 0; 102391e43daSPeter Zijlstra rt_rq->rt_runtime = 0; 103391e43daSPeter Zijlstra raw_spin_lock_init(&rt_rq->rt_runtime_lock); 104391e43daSPeter Zijlstra } 105391e43daSPeter Zijlstra 106391e43daSPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED 107391e43daSPeter Zijlstra static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b) 108391e43daSPeter Zijlstra { 109391e43daSPeter Zijlstra hrtimer_cancel(&rt_b->rt_period_timer); 110391e43daSPeter Zijlstra } 111391e43daSPeter Zijlstra 112391e43daSPeter Zijlstra #define rt_entity_is_task(rt_se) (!(rt_se)->my_q) 113391e43daSPeter Zijlstra 114391e43daSPeter Zijlstra static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se) 115391e43daSPeter Zijlstra { 116391e43daSPeter Zijlstra #ifdef CONFIG_SCHED_DEBUG 117391e43daSPeter Zijlstra WARN_ON_ONCE(!rt_entity_is_task(rt_se)); 118391e43daSPeter Zijlstra #endif 119391e43daSPeter Zijlstra return container_of(rt_se, struct task_struct, rt); 120391e43daSPeter Zijlstra } 121391e43daSPeter Zijlstra 122391e43daSPeter Zijlstra static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) 123391e43daSPeter Zijlstra { 124391e43daSPeter Zijlstra return rt_rq->rq; 125391e43daSPeter Zijlstra } 126391e43daSPeter Zijlstra 127391e43daSPeter Zijlstra static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se) 128391e43daSPeter Zijlstra { 129391e43daSPeter Zijlstra return rt_se->rt_rq; 130391e43daSPeter Zijlstra } 131391e43daSPeter Zijlstra 132653d07a6SKirill Tkhai static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se) 133653d07a6SKirill Tkhai { 134653d07a6SKirill Tkhai struct rt_rq *rt_rq = rt_se->rt_rq; 135653d07a6SKirill Tkhai 136653d07a6SKirill Tkhai return rt_rq->rq; 137653d07a6SKirill Tkhai } 138653d07a6SKirill Tkhai 139391e43daSPeter Zijlstra void free_rt_sched_group(struct task_group *tg) 140391e43daSPeter Zijlstra { 141391e43daSPeter Zijlstra int i; 142391e43daSPeter Zijlstra 143391e43daSPeter Zijlstra if (tg->rt_se) 144391e43daSPeter Zijlstra destroy_rt_bandwidth(&tg->rt_bandwidth); 145391e43daSPeter Zijlstra 146391e43daSPeter Zijlstra for_each_possible_cpu(i) { 147391e43daSPeter Zijlstra if (tg->rt_rq) 148391e43daSPeter Zijlstra kfree(tg->rt_rq[i]); 149391e43daSPeter Zijlstra if (tg->rt_se) 150391e43daSPeter Zijlstra kfree(tg->rt_se[i]); 151391e43daSPeter Zijlstra } 152391e43daSPeter Zijlstra 153391e43daSPeter Zijlstra kfree(tg->rt_rq); 154391e43daSPeter Zijlstra kfree(tg->rt_se); 155391e43daSPeter Zijlstra } 156391e43daSPeter Zijlstra 157391e43daSPeter Zijlstra void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, 158391e43daSPeter Zijlstra struct sched_rt_entity *rt_se, int cpu, 159391e43daSPeter Zijlstra struct sched_rt_entity *parent) 160391e43daSPeter Zijlstra { 161391e43daSPeter Zijlstra struct rq *rq = cpu_rq(cpu); 162391e43daSPeter Zijlstra 163391e43daSPeter Zijlstra rt_rq->highest_prio.curr = MAX_RT_PRIO; 164391e43daSPeter Zijlstra rt_rq->rt_nr_boosted = 0; 165391e43daSPeter Zijlstra rt_rq->rq = rq; 166391e43daSPeter Zijlstra rt_rq->tg = tg; 167391e43daSPeter Zijlstra 168391e43daSPeter Zijlstra tg->rt_rq[cpu] = rt_rq; 169391e43daSPeter Zijlstra tg->rt_se[cpu] = rt_se; 170391e43daSPeter Zijlstra 171391e43daSPeter Zijlstra if (!rt_se) 172391e43daSPeter Zijlstra return; 173391e43daSPeter Zijlstra 174391e43daSPeter Zijlstra if (!parent) 175391e43daSPeter Zijlstra rt_se->rt_rq = &rq->rt; 176391e43daSPeter Zijlstra else 177391e43daSPeter Zijlstra rt_se->rt_rq = parent->my_q; 178391e43daSPeter Zijlstra 179391e43daSPeter Zijlstra rt_se->my_q = rt_rq; 180391e43daSPeter Zijlstra rt_se->parent = parent; 181391e43daSPeter Zijlstra INIT_LIST_HEAD(&rt_se->run_list); 182391e43daSPeter Zijlstra } 183391e43daSPeter Zijlstra 184391e43daSPeter Zijlstra int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) 185391e43daSPeter Zijlstra { 186391e43daSPeter Zijlstra struct rt_rq *rt_rq; 187391e43daSPeter Zijlstra struct sched_rt_entity *rt_se; 188391e43daSPeter Zijlstra int i; 189391e43daSPeter Zijlstra 190391e43daSPeter Zijlstra tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL); 191391e43daSPeter Zijlstra if (!tg->rt_rq) 192391e43daSPeter Zijlstra goto err; 193391e43daSPeter Zijlstra tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL); 194391e43daSPeter Zijlstra if (!tg->rt_se) 195391e43daSPeter Zijlstra goto err; 196391e43daSPeter Zijlstra 197391e43daSPeter Zijlstra init_rt_bandwidth(&tg->rt_bandwidth, 198391e43daSPeter Zijlstra ktime_to_ns(def_rt_bandwidth.rt_period), 0); 199391e43daSPeter Zijlstra 200391e43daSPeter Zijlstra for_each_possible_cpu(i) { 201391e43daSPeter Zijlstra rt_rq = kzalloc_node(sizeof(struct rt_rq), 202391e43daSPeter Zijlstra GFP_KERNEL, cpu_to_node(i)); 203391e43daSPeter Zijlstra if (!rt_rq) 204391e43daSPeter Zijlstra goto err; 205391e43daSPeter Zijlstra 206391e43daSPeter Zijlstra rt_se = kzalloc_node(sizeof(struct sched_rt_entity), 207391e43daSPeter Zijlstra GFP_KERNEL, cpu_to_node(i)); 208391e43daSPeter Zijlstra if (!rt_se) 209391e43daSPeter Zijlstra goto err_free_rq; 210391e43daSPeter Zijlstra 21107c54f7aSAbel Vesa init_rt_rq(rt_rq); 212391e43daSPeter Zijlstra rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime; 213391e43daSPeter Zijlstra init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]); 214391e43daSPeter Zijlstra } 215391e43daSPeter Zijlstra 216391e43daSPeter Zijlstra return 1; 217391e43daSPeter Zijlstra 218391e43daSPeter Zijlstra err_free_rq: 219391e43daSPeter Zijlstra kfree(rt_rq); 220391e43daSPeter Zijlstra err: 221391e43daSPeter Zijlstra return 0; 222391e43daSPeter Zijlstra } 223391e43daSPeter Zijlstra 224391e43daSPeter Zijlstra #else /* CONFIG_RT_GROUP_SCHED */ 225391e43daSPeter Zijlstra 226391e43daSPeter Zijlstra #define rt_entity_is_task(rt_se) (1) 227391e43daSPeter Zijlstra 228391e43daSPeter Zijlstra static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se) 229391e43daSPeter Zijlstra { 230391e43daSPeter Zijlstra return container_of(rt_se, struct task_struct, rt); 231391e43daSPeter Zijlstra } 232391e43daSPeter Zijlstra 233391e43daSPeter Zijlstra static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) 234391e43daSPeter Zijlstra { 235391e43daSPeter Zijlstra return container_of(rt_rq, struct rq, rt); 236391e43daSPeter Zijlstra } 237391e43daSPeter Zijlstra 238653d07a6SKirill Tkhai static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se) 239391e43daSPeter Zijlstra { 240391e43daSPeter Zijlstra struct task_struct *p = rt_task_of(rt_se); 241653d07a6SKirill Tkhai 242653d07a6SKirill Tkhai return task_rq(p); 243653d07a6SKirill Tkhai } 244653d07a6SKirill Tkhai 245653d07a6SKirill Tkhai static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se) 246653d07a6SKirill Tkhai { 247653d07a6SKirill Tkhai struct rq *rq = rq_of_rt_se(rt_se); 248391e43daSPeter Zijlstra 249391e43daSPeter Zijlstra return &rq->rt; 250391e43daSPeter Zijlstra } 251391e43daSPeter Zijlstra 252391e43daSPeter Zijlstra void free_rt_sched_group(struct task_group *tg) { } 253391e43daSPeter Zijlstra 254391e43daSPeter Zijlstra int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) 255391e43daSPeter Zijlstra { 256391e43daSPeter Zijlstra return 1; 257391e43daSPeter Zijlstra } 258391e43daSPeter Zijlstra #endif /* CONFIG_RT_GROUP_SCHED */ 259391e43daSPeter Zijlstra 260391e43daSPeter Zijlstra #ifdef CONFIG_SMP 261391e43daSPeter Zijlstra 2628046d680SPeter Zijlstra static void pull_rt_task(struct rq *this_rq); 26338033c37SPeter Zijlstra 264dc877341SPeter Zijlstra static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev) 265dc877341SPeter Zijlstra { 266dc877341SPeter Zijlstra /* Try to pull RT tasks here if we lower this rq's prio */ 267dc877341SPeter Zijlstra return rq->rt.highest_prio.curr > prev->prio; 268dc877341SPeter Zijlstra } 269dc877341SPeter Zijlstra 270391e43daSPeter Zijlstra static inline int rt_overloaded(struct rq *rq) 271391e43daSPeter Zijlstra { 272391e43daSPeter Zijlstra return atomic_read(&rq->rd->rto_count); 273391e43daSPeter Zijlstra } 274391e43daSPeter Zijlstra 275391e43daSPeter Zijlstra static inline void rt_set_overload(struct rq *rq) 276391e43daSPeter Zijlstra { 277391e43daSPeter Zijlstra if (!rq->online) 278391e43daSPeter Zijlstra return; 279391e43daSPeter Zijlstra 280391e43daSPeter Zijlstra cpumask_set_cpu(rq->cpu, rq->rd->rto_mask); 281391e43daSPeter Zijlstra /* 282391e43daSPeter Zijlstra * Make sure the mask is visible before we set 283391e43daSPeter Zijlstra * the overload count. That is checked to determine 284391e43daSPeter Zijlstra * if we should look at the mask. It would be a shame 285391e43daSPeter Zijlstra * if we looked at the mask, but the mask was not 286391e43daSPeter Zijlstra * updated yet. 2877c3f2ab7SPeter Zijlstra * 2887c3f2ab7SPeter Zijlstra * Matched by the barrier in pull_rt_task(). 289391e43daSPeter Zijlstra */ 2907c3f2ab7SPeter Zijlstra smp_wmb(); 291391e43daSPeter Zijlstra atomic_inc(&rq->rd->rto_count); 292391e43daSPeter Zijlstra } 293391e43daSPeter Zijlstra 294391e43daSPeter Zijlstra static inline void rt_clear_overload(struct rq *rq) 295391e43daSPeter Zijlstra { 296391e43daSPeter Zijlstra if (!rq->online) 297391e43daSPeter Zijlstra return; 298391e43daSPeter Zijlstra 299391e43daSPeter Zijlstra /* the order here really doesn't matter */ 300391e43daSPeter Zijlstra atomic_dec(&rq->rd->rto_count); 301391e43daSPeter Zijlstra cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask); 302391e43daSPeter Zijlstra } 303391e43daSPeter Zijlstra 304391e43daSPeter Zijlstra static void update_rt_migration(struct rt_rq *rt_rq) 305391e43daSPeter Zijlstra { 306391e43daSPeter Zijlstra if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) { 307391e43daSPeter Zijlstra if (!rt_rq->overloaded) { 308391e43daSPeter Zijlstra rt_set_overload(rq_of_rt_rq(rt_rq)); 309391e43daSPeter Zijlstra rt_rq->overloaded = 1; 310391e43daSPeter Zijlstra } 311391e43daSPeter Zijlstra } else if (rt_rq->overloaded) { 312391e43daSPeter Zijlstra rt_clear_overload(rq_of_rt_rq(rt_rq)); 313391e43daSPeter Zijlstra rt_rq->overloaded = 0; 314391e43daSPeter Zijlstra } 315391e43daSPeter Zijlstra } 316391e43daSPeter Zijlstra 317391e43daSPeter Zijlstra static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 318391e43daSPeter Zijlstra { 31929baa747SPeter Zijlstra struct task_struct *p; 32029baa747SPeter Zijlstra 321391e43daSPeter Zijlstra if (!rt_entity_is_task(rt_se)) 322391e43daSPeter Zijlstra return; 323391e43daSPeter Zijlstra 32429baa747SPeter Zijlstra p = rt_task_of(rt_se); 325391e43daSPeter Zijlstra rt_rq = &rq_of_rt_rq(rt_rq)->rt; 326391e43daSPeter Zijlstra 327391e43daSPeter Zijlstra rt_rq->rt_nr_total++; 3284b53a341SIngo Molnar if (p->nr_cpus_allowed > 1) 329391e43daSPeter Zijlstra rt_rq->rt_nr_migratory++; 330391e43daSPeter Zijlstra 331391e43daSPeter Zijlstra update_rt_migration(rt_rq); 332391e43daSPeter Zijlstra } 333391e43daSPeter Zijlstra 334391e43daSPeter Zijlstra static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 335391e43daSPeter Zijlstra { 33629baa747SPeter Zijlstra struct task_struct *p; 33729baa747SPeter Zijlstra 338391e43daSPeter Zijlstra if (!rt_entity_is_task(rt_se)) 339391e43daSPeter Zijlstra return; 340391e43daSPeter Zijlstra 34129baa747SPeter Zijlstra p = rt_task_of(rt_se); 342391e43daSPeter Zijlstra rt_rq = &rq_of_rt_rq(rt_rq)->rt; 343391e43daSPeter Zijlstra 344391e43daSPeter Zijlstra rt_rq->rt_nr_total--; 3454b53a341SIngo Molnar if (p->nr_cpus_allowed > 1) 346391e43daSPeter Zijlstra rt_rq->rt_nr_migratory--; 347391e43daSPeter Zijlstra 348391e43daSPeter Zijlstra update_rt_migration(rt_rq); 349391e43daSPeter Zijlstra } 350391e43daSPeter Zijlstra 351391e43daSPeter Zijlstra static inline int has_pushable_tasks(struct rq *rq) 352391e43daSPeter Zijlstra { 353391e43daSPeter Zijlstra return !plist_head_empty(&rq->rt.pushable_tasks); 354391e43daSPeter Zijlstra } 355391e43daSPeter Zijlstra 356fd7a4bedSPeter Zijlstra static DEFINE_PER_CPU(struct callback_head, rt_push_head); 357fd7a4bedSPeter Zijlstra static DEFINE_PER_CPU(struct callback_head, rt_pull_head); 358e3fca9e7SPeter Zijlstra 359e3fca9e7SPeter Zijlstra static void push_rt_tasks(struct rq *); 360fd7a4bedSPeter Zijlstra static void pull_rt_task(struct rq *); 361e3fca9e7SPeter Zijlstra 362e3fca9e7SPeter Zijlstra static inline void queue_push_tasks(struct rq *rq) 363dc877341SPeter Zijlstra { 364e3fca9e7SPeter Zijlstra if (!has_pushable_tasks(rq)) 365e3fca9e7SPeter Zijlstra return; 366e3fca9e7SPeter Zijlstra 367fd7a4bedSPeter Zijlstra queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks); 368fd7a4bedSPeter Zijlstra } 369fd7a4bedSPeter Zijlstra 370fd7a4bedSPeter Zijlstra static inline void queue_pull_task(struct rq *rq) 371fd7a4bedSPeter Zijlstra { 372fd7a4bedSPeter Zijlstra queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task); 373dc877341SPeter Zijlstra } 374dc877341SPeter Zijlstra 375391e43daSPeter Zijlstra static void enqueue_pushable_task(struct rq *rq, struct task_struct *p) 376391e43daSPeter Zijlstra { 377391e43daSPeter Zijlstra plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); 378391e43daSPeter Zijlstra plist_node_init(&p->pushable_tasks, p->prio); 379391e43daSPeter Zijlstra plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks); 380391e43daSPeter Zijlstra 381391e43daSPeter Zijlstra /* Update the highest prio pushable task */ 382391e43daSPeter Zijlstra if (p->prio < rq->rt.highest_prio.next) 383391e43daSPeter Zijlstra rq->rt.highest_prio.next = p->prio; 384391e43daSPeter Zijlstra } 385391e43daSPeter Zijlstra 386391e43daSPeter Zijlstra static void dequeue_pushable_task(struct rq *rq, struct task_struct *p) 387391e43daSPeter Zijlstra { 388391e43daSPeter Zijlstra plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); 389391e43daSPeter Zijlstra 390391e43daSPeter Zijlstra /* Update the new highest prio pushable task */ 391391e43daSPeter Zijlstra if (has_pushable_tasks(rq)) { 392391e43daSPeter Zijlstra p = plist_first_entry(&rq->rt.pushable_tasks, 393391e43daSPeter Zijlstra struct task_struct, pushable_tasks); 394391e43daSPeter Zijlstra rq->rt.highest_prio.next = p->prio; 395391e43daSPeter Zijlstra } else 396391e43daSPeter Zijlstra rq->rt.highest_prio.next = MAX_RT_PRIO; 397391e43daSPeter Zijlstra } 398391e43daSPeter Zijlstra 399391e43daSPeter Zijlstra #else 400391e43daSPeter Zijlstra 401391e43daSPeter Zijlstra static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p) 402391e43daSPeter Zijlstra { 403391e43daSPeter Zijlstra } 404391e43daSPeter Zijlstra 405391e43daSPeter Zijlstra static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p) 406391e43daSPeter Zijlstra { 407391e43daSPeter Zijlstra } 408391e43daSPeter Zijlstra 409391e43daSPeter Zijlstra static inline 410391e43daSPeter Zijlstra void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 411391e43daSPeter Zijlstra { 412391e43daSPeter Zijlstra } 413391e43daSPeter Zijlstra 414391e43daSPeter Zijlstra static inline 415391e43daSPeter Zijlstra void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 416391e43daSPeter Zijlstra { 417391e43daSPeter Zijlstra } 418391e43daSPeter Zijlstra 419dc877341SPeter Zijlstra static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev) 420dc877341SPeter Zijlstra { 421dc877341SPeter Zijlstra return false; 422dc877341SPeter Zijlstra } 423dc877341SPeter Zijlstra 4248046d680SPeter Zijlstra static inline void pull_rt_task(struct rq *this_rq) 425dc877341SPeter Zijlstra { 426dc877341SPeter Zijlstra } 427dc877341SPeter Zijlstra 428e3fca9e7SPeter Zijlstra static inline void queue_push_tasks(struct rq *rq) 429dc877341SPeter Zijlstra { 430dc877341SPeter Zijlstra } 431391e43daSPeter Zijlstra #endif /* CONFIG_SMP */ 432391e43daSPeter Zijlstra 433f4ebcbc0SKirill Tkhai static void enqueue_top_rt_rq(struct rt_rq *rt_rq); 434f4ebcbc0SKirill Tkhai static void dequeue_top_rt_rq(struct rt_rq *rt_rq); 435f4ebcbc0SKirill Tkhai 436391e43daSPeter Zijlstra static inline int on_rt_rq(struct sched_rt_entity *rt_se) 437391e43daSPeter Zijlstra { 438ff77e468SPeter Zijlstra return rt_se->on_rq; 439391e43daSPeter Zijlstra } 440391e43daSPeter Zijlstra 441391e43daSPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED 442391e43daSPeter Zijlstra 443391e43daSPeter Zijlstra static inline u64 sched_rt_runtime(struct rt_rq *rt_rq) 444391e43daSPeter Zijlstra { 445391e43daSPeter Zijlstra if (!rt_rq->tg) 446391e43daSPeter Zijlstra return RUNTIME_INF; 447391e43daSPeter Zijlstra 448391e43daSPeter Zijlstra return rt_rq->rt_runtime; 449391e43daSPeter Zijlstra } 450391e43daSPeter Zijlstra 451391e43daSPeter Zijlstra static inline u64 sched_rt_period(struct rt_rq *rt_rq) 452391e43daSPeter Zijlstra { 453391e43daSPeter Zijlstra return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period); 454391e43daSPeter Zijlstra } 455391e43daSPeter Zijlstra 456391e43daSPeter Zijlstra typedef struct task_group *rt_rq_iter_t; 457391e43daSPeter Zijlstra 458391e43daSPeter Zijlstra static inline struct task_group *next_task_group(struct task_group *tg) 459391e43daSPeter Zijlstra { 460391e43daSPeter Zijlstra do { 461391e43daSPeter Zijlstra tg = list_entry_rcu(tg->list.next, 462391e43daSPeter Zijlstra typeof(struct task_group), list); 463391e43daSPeter Zijlstra } while (&tg->list != &task_groups && task_group_is_autogroup(tg)); 464391e43daSPeter Zijlstra 465391e43daSPeter Zijlstra if (&tg->list == &task_groups) 466391e43daSPeter Zijlstra tg = NULL; 467391e43daSPeter Zijlstra 468391e43daSPeter Zijlstra return tg; 469391e43daSPeter Zijlstra } 470391e43daSPeter Zijlstra 471391e43daSPeter Zijlstra #define for_each_rt_rq(rt_rq, iter, rq) \ 472391e43daSPeter Zijlstra for (iter = container_of(&task_groups, typeof(*iter), list); \ 473391e43daSPeter Zijlstra (iter = next_task_group(iter)) && \ 474391e43daSPeter Zijlstra (rt_rq = iter->rt_rq[cpu_of(rq)]);) 475391e43daSPeter Zijlstra 476391e43daSPeter Zijlstra #define for_each_sched_rt_entity(rt_se) \ 477391e43daSPeter Zijlstra for (; rt_se; rt_se = rt_se->parent) 478391e43daSPeter Zijlstra 479391e43daSPeter Zijlstra static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se) 480391e43daSPeter Zijlstra { 481391e43daSPeter Zijlstra return rt_se->my_q; 482391e43daSPeter Zijlstra } 483391e43daSPeter Zijlstra 484ff77e468SPeter Zijlstra static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags); 485ff77e468SPeter Zijlstra static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags); 486391e43daSPeter Zijlstra 487391e43daSPeter Zijlstra static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) 488391e43daSPeter Zijlstra { 489391e43daSPeter Zijlstra struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr; 4908875125eSKirill Tkhai struct rq *rq = rq_of_rt_rq(rt_rq); 491391e43daSPeter Zijlstra struct sched_rt_entity *rt_se; 492391e43daSPeter Zijlstra 4938875125eSKirill Tkhai int cpu = cpu_of(rq); 494391e43daSPeter Zijlstra 495391e43daSPeter Zijlstra rt_se = rt_rq->tg->rt_se[cpu]; 496391e43daSPeter Zijlstra 497391e43daSPeter Zijlstra if (rt_rq->rt_nr_running) { 498f4ebcbc0SKirill Tkhai if (!rt_se) 499f4ebcbc0SKirill Tkhai enqueue_top_rt_rq(rt_rq); 500f4ebcbc0SKirill Tkhai else if (!on_rt_rq(rt_se)) 501ff77e468SPeter Zijlstra enqueue_rt_entity(rt_se, 0); 502f4ebcbc0SKirill Tkhai 503391e43daSPeter Zijlstra if (rt_rq->highest_prio.curr < curr->prio) 5048875125eSKirill Tkhai resched_curr(rq); 505391e43daSPeter Zijlstra } 506391e43daSPeter Zijlstra } 507391e43daSPeter Zijlstra 508391e43daSPeter Zijlstra static void sched_rt_rq_dequeue(struct rt_rq *rt_rq) 509391e43daSPeter Zijlstra { 510391e43daSPeter Zijlstra struct sched_rt_entity *rt_se; 511391e43daSPeter Zijlstra int cpu = cpu_of(rq_of_rt_rq(rt_rq)); 512391e43daSPeter Zijlstra 513391e43daSPeter Zijlstra rt_se = rt_rq->tg->rt_se[cpu]; 514391e43daSPeter Zijlstra 515f4ebcbc0SKirill Tkhai if (!rt_se) 516f4ebcbc0SKirill Tkhai dequeue_top_rt_rq(rt_rq); 517f4ebcbc0SKirill Tkhai else if (on_rt_rq(rt_se)) 518ff77e468SPeter Zijlstra dequeue_rt_entity(rt_se, 0); 519391e43daSPeter Zijlstra } 520391e43daSPeter Zijlstra 52146383648SKirill Tkhai static inline int rt_rq_throttled(struct rt_rq *rt_rq) 52246383648SKirill Tkhai { 52346383648SKirill Tkhai return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted; 52446383648SKirill Tkhai } 52546383648SKirill Tkhai 526391e43daSPeter Zijlstra static int rt_se_boosted(struct sched_rt_entity *rt_se) 527391e43daSPeter Zijlstra { 528391e43daSPeter Zijlstra struct rt_rq *rt_rq = group_rt_rq(rt_se); 529391e43daSPeter Zijlstra struct task_struct *p; 530391e43daSPeter Zijlstra 531391e43daSPeter Zijlstra if (rt_rq) 532391e43daSPeter Zijlstra return !!rt_rq->rt_nr_boosted; 533391e43daSPeter Zijlstra 534391e43daSPeter Zijlstra p = rt_task_of(rt_se); 535391e43daSPeter Zijlstra return p->prio != p->normal_prio; 536391e43daSPeter Zijlstra } 537391e43daSPeter Zijlstra 538391e43daSPeter Zijlstra #ifdef CONFIG_SMP 539391e43daSPeter Zijlstra static inline const struct cpumask *sched_rt_period_mask(void) 540391e43daSPeter Zijlstra { 541424c93feSNathan Zimmer return this_rq()->rd->span; 542391e43daSPeter Zijlstra } 543391e43daSPeter Zijlstra #else 544391e43daSPeter Zijlstra static inline const struct cpumask *sched_rt_period_mask(void) 545391e43daSPeter Zijlstra { 546391e43daSPeter Zijlstra return cpu_online_mask; 547391e43daSPeter Zijlstra } 548391e43daSPeter Zijlstra #endif 549391e43daSPeter Zijlstra 550391e43daSPeter Zijlstra static inline 551391e43daSPeter Zijlstra struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu) 552391e43daSPeter Zijlstra { 553391e43daSPeter Zijlstra return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu]; 554391e43daSPeter Zijlstra } 555391e43daSPeter Zijlstra 556391e43daSPeter Zijlstra static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq) 557391e43daSPeter Zijlstra { 558391e43daSPeter Zijlstra return &rt_rq->tg->rt_bandwidth; 559391e43daSPeter Zijlstra } 560391e43daSPeter Zijlstra 561391e43daSPeter Zijlstra #else /* !CONFIG_RT_GROUP_SCHED */ 562391e43daSPeter Zijlstra 563391e43daSPeter Zijlstra static inline u64 sched_rt_runtime(struct rt_rq *rt_rq) 564391e43daSPeter Zijlstra { 565391e43daSPeter Zijlstra return rt_rq->rt_runtime; 566391e43daSPeter Zijlstra } 567391e43daSPeter Zijlstra 568391e43daSPeter Zijlstra static inline u64 sched_rt_period(struct rt_rq *rt_rq) 569391e43daSPeter Zijlstra { 570391e43daSPeter Zijlstra return ktime_to_ns(def_rt_bandwidth.rt_period); 571391e43daSPeter Zijlstra } 572391e43daSPeter Zijlstra 573391e43daSPeter Zijlstra typedef struct rt_rq *rt_rq_iter_t; 574391e43daSPeter Zijlstra 575391e43daSPeter Zijlstra #define for_each_rt_rq(rt_rq, iter, rq) \ 576391e43daSPeter Zijlstra for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL) 577391e43daSPeter Zijlstra 578391e43daSPeter Zijlstra #define for_each_sched_rt_entity(rt_se) \ 579391e43daSPeter Zijlstra for (; rt_se; rt_se = NULL) 580391e43daSPeter Zijlstra 581391e43daSPeter Zijlstra static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se) 582391e43daSPeter Zijlstra { 583391e43daSPeter Zijlstra return NULL; 584391e43daSPeter Zijlstra } 585391e43daSPeter Zijlstra 586391e43daSPeter Zijlstra static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq) 587391e43daSPeter Zijlstra { 588f4ebcbc0SKirill Tkhai struct rq *rq = rq_of_rt_rq(rt_rq); 589f4ebcbc0SKirill Tkhai 590f4ebcbc0SKirill Tkhai if (!rt_rq->rt_nr_running) 591f4ebcbc0SKirill Tkhai return; 592f4ebcbc0SKirill Tkhai 593f4ebcbc0SKirill Tkhai enqueue_top_rt_rq(rt_rq); 5948875125eSKirill Tkhai resched_curr(rq); 595391e43daSPeter Zijlstra } 596391e43daSPeter Zijlstra 597391e43daSPeter Zijlstra static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq) 598391e43daSPeter Zijlstra { 599f4ebcbc0SKirill Tkhai dequeue_top_rt_rq(rt_rq); 600391e43daSPeter Zijlstra } 601391e43daSPeter Zijlstra 60246383648SKirill Tkhai static inline int rt_rq_throttled(struct rt_rq *rt_rq) 60346383648SKirill Tkhai { 60446383648SKirill Tkhai return rt_rq->rt_throttled; 60546383648SKirill Tkhai } 60646383648SKirill Tkhai 607391e43daSPeter Zijlstra static inline const struct cpumask *sched_rt_period_mask(void) 608391e43daSPeter Zijlstra { 609391e43daSPeter Zijlstra return cpu_online_mask; 610391e43daSPeter Zijlstra } 611391e43daSPeter Zijlstra 612391e43daSPeter Zijlstra static inline 613391e43daSPeter Zijlstra struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu) 614391e43daSPeter Zijlstra { 615391e43daSPeter Zijlstra return &cpu_rq(cpu)->rt; 616391e43daSPeter Zijlstra } 617391e43daSPeter Zijlstra 618391e43daSPeter Zijlstra static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq) 619391e43daSPeter Zijlstra { 620391e43daSPeter Zijlstra return &def_rt_bandwidth; 621391e43daSPeter Zijlstra } 622391e43daSPeter Zijlstra 623391e43daSPeter Zijlstra #endif /* CONFIG_RT_GROUP_SCHED */ 624391e43daSPeter Zijlstra 625faa59937SJuri Lelli bool sched_rt_bandwidth_account(struct rt_rq *rt_rq) 626faa59937SJuri Lelli { 627faa59937SJuri Lelli struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); 628faa59937SJuri Lelli 629faa59937SJuri Lelli return (hrtimer_active(&rt_b->rt_period_timer) || 630faa59937SJuri Lelli rt_rq->rt_time < rt_b->rt_runtime); 631faa59937SJuri Lelli } 632faa59937SJuri Lelli 633391e43daSPeter Zijlstra #ifdef CONFIG_SMP 634391e43daSPeter Zijlstra /* 635391e43daSPeter Zijlstra * We ran out of runtime, see if we can borrow some from our neighbours. 636391e43daSPeter Zijlstra */ 637269b26a5SJuri Lelli static void do_balance_runtime(struct rt_rq *rt_rq) 638391e43daSPeter Zijlstra { 639391e43daSPeter Zijlstra struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); 640aa7f6730SShawn Bohrer struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd; 641269b26a5SJuri Lelli int i, weight; 642391e43daSPeter Zijlstra u64 rt_period; 643391e43daSPeter Zijlstra 644391e43daSPeter Zijlstra weight = cpumask_weight(rd->span); 645391e43daSPeter Zijlstra 646391e43daSPeter Zijlstra raw_spin_lock(&rt_b->rt_runtime_lock); 647391e43daSPeter Zijlstra rt_period = ktime_to_ns(rt_b->rt_period); 648391e43daSPeter Zijlstra for_each_cpu(i, rd->span) { 649391e43daSPeter Zijlstra struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); 650391e43daSPeter Zijlstra s64 diff; 651391e43daSPeter Zijlstra 652391e43daSPeter Zijlstra if (iter == rt_rq) 653391e43daSPeter Zijlstra continue; 654391e43daSPeter Zijlstra 655391e43daSPeter Zijlstra raw_spin_lock(&iter->rt_runtime_lock); 656391e43daSPeter Zijlstra /* 657391e43daSPeter Zijlstra * Either all rqs have inf runtime and there's nothing to steal 658391e43daSPeter Zijlstra * or __disable_runtime() below sets a specific rq to inf to 659391e43daSPeter Zijlstra * indicate its been disabled and disalow stealing. 660391e43daSPeter Zijlstra */ 661391e43daSPeter Zijlstra if (iter->rt_runtime == RUNTIME_INF) 662391e43daSPeter Zijlstra goto next; 663391e43daSPeter Zijlstra 664391e43daSPeter Zijlstra /* 665391e43daSPeter Zijlstra * From runqueues with spare time, take 1/n part of their 666391e43daSPeter Zijlstra * spare time, but no more than our period. 667391e43daSPeter Zijlstra */ 668391e43daSPeter Zijlstra diff = iter->rt_runtime - iter->rt_time; 669391e43daSPeter Zijlstra if (diff > 0) { 670391e43daSPeter Zijlstra diff = div_u64((u64)diff, weight); 671391e43daSPeter Zijlstra if (rt_rq->rt_runtime + diff > rt_period) 672391e43daSPeter Zijlstra diff = rt_period - rt_rq->rt_runtime; 673391e43daSPeter Zijlstra iter->rt_runtime -= diff; 674391e43daSPeter Zijlstra rt_rq->rt_runtime += diff; 675391e43daSPeter Zijlstra if (rt_rq->rt_runtime == rt_period) { 676391e43daSPeter Zijlstra raw_spin_unlock(&iter->rt_runtime_lock); 677391e43daSPeter Zijlstra break; 678391e43daSPeter Zijlstra } 679391e43daSPeter Zijlstra } 680391e43daSPeter Zijlstra next: 681391e43daSPeter Zijlstra raw_spin_unlock(&iter->rt_runtime_lock); 682391e43daSPeter Zijlstra } 683391e43daSPeter Zijlstra raw_spin_unlock(&rt_b->rt_runtime_lock); 684391e43daSPeter Zijlstra } 685391e43daSPeter Zijlstra 686391e43daSPeter Zijlstra /* 687391e43daSPeter Zijlstra * Ensure this RQ takes back all the runtime it lend to its neighbours. 688391e43daSPeter Zijlstra */ 689391e43daSPeter Zijlstra static void __disable_runtime(struct rq *rq) 690391e43daSPeter Zijlstra { 691391e43daSPeter Zijlstra struct root_domain *rd = rq->rd; 692391e43daSPeter Zijlstra rt_rq_iter_t iter; 693391e43daSPeter Zijlstra struct rt_rq *rt_rq; 694391e43daSPeter Zijlstra 695391e43daSPeter Zijlstra if (unlikely(!scheduler_running)) 696391e43daSPeter Zijlstra return; 697391e43daSPeter Zijlstra 698391e43daSPeter Zijlstra for_each_rt_rq(rt_rq, iter, rq) { 699391e43daSPeter Zijlstra struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); 700391e43daSPeter Zijlstra s64 want; 701391e43daSPeter Zijlstra int i; 702391e43daSPeter Zijlstra 703391e43daSPeter Zijlstra raw_spin_lock(&rt_b->rt_runtime_lock); 704391e43daSPeter Zijlstra raw_spin_lock(&rt_rq->rt_runtime_lock); 705391e43daSPeter Zijlstra /* 706391e43daSPeter Zijlstra * Either we're all inf and nobody needs to borrow, or we're 707391e43daSPeter Zijlstra * already disabled and thus have nothing to do, or we have 708391e43daSPeter Zijlstra * exactly the right amount of runtime to take out. 709391e43daSPeter Zijlstra */ 710391e43daSPeter Zijlstra if (rt_rq->rt_runtime == RUNTIME_INF || 711391e43daSPeter Zijlstra rt_rq->rt_runtime == rt_b->rt_runtime) 712391e43daSPeter Zijlstra goto balanced; 713391e43daSPeter Zijlstra raw_spin_unlock(&rt_rq->rt_runtime_lock); 714391e43daSPeter Zijlstra 715391e43daSPeter Zijlstra /* 716391e43daSPeter Zijlstra * Calculate the difference between what we started out with 717391e43daSPeter Zijlstra * and what we current have, that's the amount of runtime 718391e43daSPeter Zijlstra * we lend and now have to reclaim. 719391e43daSPeter Zijlstra */ 720391e43daSPeter Zijlstra want = rt_b->rt_runtime - rt_rq->rt_runtime; 721391e43daSPeter Zijlstra 722391e43daSPeter Zijlstra /* 723391e43daSPeter Zijlstra * Greedy reclaim, take back as much as we can. 724391e43daSPeter Zijlstra */ 725391e43daSPeter Zijlstra for_each_cpu(i, rd->span) { 726391e43daSPeter Zijlstra struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); 727391e43daSPeter Zijlstra s64 diff; 728391e43daSPeter Zijlstra 729391e43daSPeter Zijlstra /* 730391e43daSPeter Zijlstra * Can't reclaim from ourselves or disabled runqueues. 731391e43daSPeter Zijlstra */ 732391e43daSPeter Zijlstra if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF) 733391e43daSPeter Zijlstra continue; 734391e43daSPeter Zijlstra 735391e43daSPeter Zijlstra raw_spin_lock(&iter->rt_runtime_lock); 736391e43daSPeter Zijlstra if (want > 0) { 737391e43daSPeter Zijlstra diff = min_t(s64, iter->rt_runtime, want); 738391e43daSPeter Zijlstra iter->rt_runtime -= diff; 739391e43daSPeter Zijlstra want -= diff; 740391e43daSPeter Zijlstra } else { 741391e43daSPeter Zijlstra iter->rt_runtime -= want; 742391e43daSPeter Zijlstra want -= want; 743391e43daSPeter Zijlstra } 744391e43daSPeter Zijlstra raw_spin_unlock(&iter->rt_runtime_lock); 745391e43daSPeter Zijlstra 746391e43daSPeter Zijlstra if (!want) 747391e43daSPeter Zijlstra break; 748391e43daSPeter Zijlstra } 749391e43daSPeter Zijlstra 750391e43daSPeter Zijlstra raw_spin_lock(&rt_rq->rt_runtime_lock); 751391e43daSPeter Zijlstra /* 752391e43daSPeter Zijlstra * We cannot be left wanting - that would mean some runtime 753391e43daSPeter Zijlstra * leaked out of the system. 754391e43daSPeter Zijlstra */ 755391e43daSPeter Zijlstra BUG_ON(want); 756391e43daSPeter Zijlstra balanced: 757391e43daSPeter Zijlstra /* 758391e43daSPeter Zijlstra * Disable all the borrow logic by pretending we have inf 759391e43daSPeter Zijlstra * runtime - in which case borrowing doesn't make sense. 760391e43daSPeter Zijlstra */ 761391e43daSPeter Zijlstra rt_rq->rt_runtime = RUNTIME_INF; 762a4c96ae3SPeter Boonstoppel rt_rq->rt_throttled = 0; 763391e43daSPeter Zijlstra raw_spin_unlock(&rt_rq->rt_runtime_lock); 764391e43daSPeter Zijlstra raw_spin_unlock(&rt_b->rt_runtime_lock); 76599b62567SKirill Tkhai 76699b62567SKirill Tkhai /* Make rt_rq available for pick_next_task() */ 76799b62567SKirill Tkhai sched_rt_rq_enqueue(rt_rq); 768391e43daSPeter Zijlstra } 769391e43daSPeter Zijlstra } 770391e43daSPeter Zijlstra 771391e43daSPeter Zijlstra static void __enable_runtime(struct rq *rq) 772391e43daSPeter Zijlstra { 773391e43daSPeter Zijlstra rt_rq_iter_t iter; 774391e43daSPeter Zijlstra struct rt_rq *rt_rq; 775391e43daSPeter Zijlstra 776391e43daSPeter Zijlstra if (unlikely(!scheduler_running)) 777391e43daSPeter Zijlstra return; 778391e43daSPeter Zijlstra 779391e43daSPeter Zijlstra /* 780391e43daSPeter Zijlstra * Reset each runqueue's bandwidth settings 781391e43daSPeter Zijlstra */ 782391e43daSPeter Zijlstra for_each_rt_rq(rt_rq, iter, rq) { 783391e43daSPeter Zijlstra struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); 784391e43daSPeter Zijlstra 785391e43daSPeter Zijlstra raw_spin_lock(&rt_b->rt_runtime_lock); 786391e43daSPeter Zijlstra raw_spin_lock(&rt_rq->rt_runtime_lock); 787391e43daSPeter Zijlstra rt_rq->rt_runtime = rt_b->rt_runtime; 788391e43daSPeter Zijlstra rt_rq->rt_time = 0; 789391e43daSPeter Zijlstra rt_rq->rt_throttled = 0; 790391e43daSPeter Zijlstra raw_spin_unlock(&rt_rq->rt_runtime_lock); 791391e43daSPeter Zijlstra raw_spin_unlock(&rt_b->rt_runtime_lock); 792391e43daSPeter Zijlstra } 793391e43daSPeter Zijlstra } 794391e43daSPeter Zijlstra 795269b26a5SJuri Lelli static void balance_runtime(struct rt_rq *rt_rq) 796391e43daSPeter Zijlstra { 797391e43daSPeter Zijlstra if (!sched_feat(RT_RUNTIME_SHARE)) 798269b26a5SJuri Lelli return; 799391e43daSPeter Zijlstra 800391e43daSPeter Zijlstra if (rt_rq->rt_time > rt_rq->rt_runtime) { 801391e43daSPeter Zijlstra raw_spin_unlock(&rt_rq->rt_runtime_lock); 802269b26a5SJuri Lelli do_balance_runtime(rt_rq); 803391e43daSPeter Zijlstra raw_spin_lock(&rt_rq->rt_runtime_lock); 804391e43daSPeter Zijlstra } 805391e43daSPeter Zijlstra } 806391e43daSPeter Zijlstra #else /* !CONFIG_SMP */ 807269b26a5SJuri Lelli static inline void balance_runtime(struct rt_rq *rt_rq) {} 808391e43daSPeter Zijlstra #endif /* CONFIG_SMP */ 809391e43daSPeter Zijlstra 810391e43daSPeter Zijlstra static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) 811391e43daSPeter Zijlstra { 81242c62a58SPeter Zijlstra int i, idle = 1, throttled = 0; 813391e43daSPeter Zijlstra const struct cpumask *span; 814391e43daSPeter Zijlstra 815391e43daSPeter Zijlstra span = sched_rt_period_mask(); 816e221d028SMike Galbraith #ifdef CONFIG_RT_GROUP_SCHED 817e221d028SMike Galbraith /* 818e221d028SMike Galbraith * FIXME: isolated CPUs should really leave the root task group, 819e221d028SMike Galbraith * whether they are isolcpus or were isolated via cpusets, lest 820e221d028SMike Galbraith * the timer run on a CPU which does not service all runqueues, 821e221d028SMike Galbraith * potentially leaving other CPUs indefinitely throttled. If 822e221d028SMike Galbraith * isolation is really required, the user will turn the throttle 823e221d028SMike Galbraith * off to kill the perturbations it causes anyway. Meanwhile, 824e221d028SMike Galbraith * this maintains functionality for boot and/or troubleshooting. 825e221d028SMike Galbraith */ 826e221d028SMike Galbraith if (rt_b == &root_task_group.rt_bandwidth) 827e221d028SMike Galbraith span = cpu_online_mask; 828e221d028SMike Galbraith #endif 829391e43daSPeter Zijlstra for_each_cpu(i, span) { 830391e43daSPeter Zijlstra int enqueue = 0; 831391e43daSPeter Zijlstra struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i); 832391e43daSPeter Zijlstra struct rq *rq = rq_of_rt_rq(rt_rq); 833c249f255SDave Kleikamp int skip; 834c249f255SDave Kleikamp 835c249f255SDave Kleikamp /* 836c249f255SDave Kleikamp * When span == cpu_online_mask, taking each rq->lock 837c249f255SDave Kleikamp * can be time-consuming. Try to avoid it when possible. 838c249f255SDave Kleikamp */ 839c249f255SDave Kleikamp raw_spin_lock(&rt_rq->rt_runtime_lock); 840c249f255SDave Kleikamp skip = !rt_rq->rt_time && !rt_rq->rt_nr_running; 841c249f255SDave Kleikamp raw_spin_unlock(&rt_rq->rt_runtime_lock); 842c249f255SDave Kleikamp if (skip) 843c249f255SDave Kleikamp continue; 844391e43daSPeter Zijlstra 845391e43daSPeter Zijlstra raw_spin_lock(&rq->lock); 846391e43daSPeter Zijlstra if (rt_rq->rt_time) { 847391e43daSPeter Zijlstra u64 runtime; 848391e43daSPeter Zijlstra 849391e43daSPeter Zijlstra raw_spin_lock(&rt_rq->rt_runtime_lock); 850391e43daSPeter Zijlstra if (rt_rq->rt_throttled) 851391e43daSPeter Zijlstra balance_runtime(rt_rq); 852391e43daSPeter Zijlstra runtime = rt_rq->rt_runtime; 853391e43daSPeter Zijlstra rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime); 854391e43daSPeter Zijlstra if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) { 855391e43daSPeter Zijlstra rt_rq->rt_throttled = 0; 856391e43daSPeter Zijlstra enqueue = 1; 857391e43daSPeter Zijlstra 858391e43daSPeter Zijlstra /* 8599edfbfedSPeter Zijlstra * When we're idle and a woken (rt) task is 8609edfbfedSPeter Zijlstra * throttled check_preempt_curr() will set 8619edfbfedSPeter Zijlstra * skip_update and the time between the wakeup 8629edfbfedSPeter Zijlstra * and this unthrottle will get accounted as 8639edfbfedSPeter Zijlstra * 'runtime'. 864391e43daSPeter Zijlstra */ 865391e43daSPeter Zijlstra if (rt_rq->rt_nr_running && rq->curr == rq->idle) 8669edfbfedSPeter Zijlstra rq_clock_skip_update(rq, false); 867391e43daSPeter Zijlstra } 868391e43daSPeter Zijlstra if (rt_rq->rt_time || rt_rq->rt_nr_running) 869391e43daSPeter Zijlstra idle = 0; 870391e43daSPeter Zijlstra raw_spin_unlock(&rt_rq->rt_runtime_lock); 871391e43daSPeter Zijlstra } else if (rt_rq->rt_nr_running) { 872391e43daSPeter Zijlstra idle = 0; 873391e43daSPeter Zijlstra if (!rt_rq_throttled(rt_rq)) 874391e43daSPeter Zijlstra enqueue = 1; 875391e43daSPeter Zijlstra } 87642c62a58SPeter Zijlstra if (rt_rq->rt_throttled) 87742c62a58SPeter Zijlstra throttled = 1; 878391e43daSPeter Zijlstra 879391e43daSPeter Zijlstra if (enqueue) 880391e43daSPeter Zijlstra sched_rt_rq_enqueue(rt_rq); 881391e43daSPeter Zijlstra raw_spin_unlock(&rq->lock); 882391e43daSPeter Zijlstra } 883391e43daSPeter Zijlstra 88442c62a58SPeter Zijlstra if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)) 88542c62a58SPeter Zijlstra return 1; 88642c62a58SPeter Zijlstra 887391e43daSPeter Zijlstra return idle; 888391e43daSPeter Zijlstra } 889391e43daSPeter Zijlstra 890391e43daSPeter Zijlstra static inline int rt_se_prio(struct sched_rt_entity *rt_se) 891391e43daSPeter Zijlstra { 892391e43daSPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED 893391e43daSPeter Zijlstra struct rt_rq *rt_rq = group_rt_rq(rt_se); 894391e43daSPeter Zijlstra 895391e43daSPeter Zijlstra if (rt_rq) 896391e43daSPeter Zijlstra return rt_rq->highest_prio.curr; 897391e43daSPeter Zijlstra #endif 898391e43daSPeter Zijlstra 899391e43daSPeter Zijlstra return rt_task_of(rt_se)->prio; 900391e43daSPeter Zijlstra } 901391e43daSPeter Zijlstra 902391e43daSPeter Zijlstra static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq) 903391e43daSPeter Zijlstra { 904391e43daSPeter Zijlstra u64 runtime = sched_rt_runtime(rt_rq); 905391e43daSPeter Zijlstra 906391e43daSPeter Zijlstra if (rt_rq->rt_throttled) 907391e43daSPeter Zijlstra return rt_rq_throttled(rt_rq); 908391e43daSPeter Zijlstra 9095b680fd6SShan Hai if (runtime >= sched_rt_period(rt_rq)) 910391e43daSPeter Zijlstra return 0; 911391e43daSPeter Zijlstra 912391e43daSPeter Zijlstra balance_runtime(rt_rq); 913391e43daSPeter Zijlstra runtime = sched_rt_runtime(rt_rq); 914391e43daSPeter Zijlstra if (runtime == RUNTIME_INF) 915391e43daSPeter Zijlstra return 0; 916391e43daSPeter Zijlstra 917391e43daSPeter Zijlstra if (rt_rq->rt_time > runtime) { 9187abc63b1SPeter Zijlstra struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); 9197abc63b1SPeter Zijlstra 9207abc63b1SPeter Zijlstra /* 9217abc63b1SPeter Zijlstra * Don't actually throttle groups that have no runtime assigned 9227abc63b1SPeter Zijlstra * but accrue some time due to boosting. 9237abc63b1SPeter Zijlstra */ 9247abc63b1SPeter Zijlstra if (likely(rt_b->rt_runtime)) { 925391e43daSPeter Zijlstra rt_rq->rt_throttled = 1; 926c224815dSJohn Stultz printk_deferred_once("sched: RT throttling activated\n"); 9277abc63b1SPeter Zijlstra } else { 9287abc63b1SPeter Zijlstra /* 9297abc63b1SPeter Zijlstra * In case we did anyway, make it go away, 9307abc63b1SPeter Zijlstra * replenishment is a joke, since it will replenish us 9317abc63b1SPeter Zijlstra * with exactly 0 ns. 9327abc63b1SPeter Zijlstra */ 9337abc63b1SPeter Zijlstra rt_rq->rt_time = 0; 9347abc63b1SPeter Zijlstra } 9357abc63b1SPeter Zijlstra 936391e43daSPeter Zijlstra if (rt_rq_throttled(rt_rq)) { 937391e43daSPeter Zijlstra sched_rt_rq_dequeue(rt_rq); 938391e43daSPeter Zijlstra return 1; 939391e43daSPeter Zijlstra } 940391e43daSPeter Zijlstra } 941391e43daSPeter Zijlstra 942391e43daSPeter Zijlstra return 0; 943391e43daSPeter Zijlstra } 944391e43daSPeter Zijlstra 945391e43daSPeter Zijlstra /* 946391e43daSPeter Zijlstra * Update the current task's runtime statistics. Skip current tasks that 947391e43daSPeter Zijlstra * are not in our scheduling class. 948391e43daSPeter Zijlstra */ 949391e43daSPeter Zijlstra static void update_curr_rt(struct rq *rq) 950391e43daSPeter Zijlstra { 951391e43daSPeter Zijlstra struct task_struct *curr = rq->curr; 952391e43daSPeter Zijlstra struct sched_rt_entity *rt_se = &curr->rt; 953391e43daSPeter Zijlstra u64 delta_exec; 954391e43daSPeter Zijlstra 955391e43daSPeter Zijlstra if (curr->sched_class != &rt_sched_class) 956391e43daSPeter Zijlstra return; 957391e43daSPeter Zijlstra 95878becc27SFrederic Weisbecker delta_exec = rq_clock_task(rq) - curr->se.exec_start; 959fc79e240SKirill Tkhai if (unlikely((s64)delta_exec <= 0)) 960fc79e240SKirill Tkhai return; 961391e43daSPeter Zijlstra 96258919e83SRafael J. Wysocki /* Kick cpufreq (see the comment in kernel/sched/sched.h). */ 963674e7541SViresh Kumar cpufreq_update_util(rq, SCHED_CPUFREQ_RT); 964594dd290SWanpeng Li 96542c62a58SPeter Zijlstra schedstat_set(curr->se.statistics.exec_max, 96642c62a58SPeter Zijlstra max(curr->se.statistics.exec_max, delta_exec)); 967391e43daSPeter Zijlstra 968391e43daSPeter Zijlstra curr->se.sum_exec_runtime += delta_exec; 969391e43daSPeter Zijlstra account_group_exec_runtime(curr, delta_exec); 970391e43daSPeter Zijlstra 97178becc27SFrederic Weisbecker curr->se.exec_start = rq_clock_task(rq); 972d2cc5ed6STejun Heo cgroup_account_cputime(curr, delta_exec); 973391e43daSPeter Zijlstra 974391e43daSPeter Zijlstra sched_rt_avg_update(rq, delta_exec); 975391e43daSPeter Zijlstra 976391e43daSPeter Zijlstra if (!rt_bandwidth_enabled()) 977391e43daSPeter Zijlstra return; 978391e43daSPeter Zijlstra 979391e43daSPeter Zijlstra for_each_sched_rt_entity(rt_se) { 9800b07939cSGiedrius Rekasius struct rt_rq *rt_rq = rt_rq_of_se(rt_se); 981391e43daSPeter Zijlstra 982391e43daSPeter Zijlstra if (sched_rt_runtime(rt_rq) != RUNTIME_INF) { 983391e43daSPeter Zijlstra raw_spin_lock(&rt_rq->rt_runtime_lock); 984391e43daSPeter Zijlstra rt_rq->rt_time += delta_exec; 985391e43daSPeter Zijlstra if (sched_rt_runtime_exceeded(rt_rq)) 9868875125eSKirill Tkhai resched_curr(rq); 987391e43daSPeter Zijlstra raw_spin_unlock(&rt_rq->rt_runtime_lock); 988391e43daSPeter Zijlstra } 989391e43daSPeter Zijlstra } 990391e43daSPeter Zijlstra } 991391e43daSPeter Zijlstra 992f4ebcbc0SKirill Tkhai static void 993f4ebcbc0SKirill Tkhai dequeue_top_rt_rq(struct rt_rq *rt_rq) 994f4ebcbc0SKirill Tkhai { 995f4ebcbc0SKirill Tkhai struct rq *rq = rq_of_rt_rq(rt_rq); 996f4ebcbc0SKirill Tkhai 997f4ebcbc0SKirill Tkhai BUG_ON(&rq->rt != rt_rq); 998f4ebcbc0SKirill Tkhai 999f4ebcbc0SKirill Tkhai if (!rt_rq->rt_queued) 1000f4ebcbc0SKirill Tkhai return; 1001f4ebcbc0SKirill Tkhai 1002f4ebcbc0SKirill Tkhai BUG_ON(!rq->nr_running); 1003f4ebcbc0SKirill Tkhai 100472465447SKirill Tkhai sub_nr_running(rq, rt_rq->rt_nr_running); 1005f4ebcbc0SKirill Tkhai rt_rq->rt_queued = 0; 1006f4ebcbc0SKirill Tkhai } 1007f4ebcbc0SKirill Tkhai 1008f4ebcbc0SKirill Tkhai static void 1009f4ebcbc0SKirill Tkhai enqueue_top_rt_rq(struct rt_rq *rt_rq) 1010f4ebcbc0SKirill Tkhai { 1011f4ebcbc0SKirill Tkhai struct rq *rq = rq_of_rt_rq(rt_rq); 1012f4ebcbc0SKirill Tkhai 1013f4ebcbc0SKirill Tkhai BUG_ON(&rq->rt != rt_rq); 1014f4ebcbc0SKirill Tkhai 1015f4ebcbc0SKirill Tkhai if (rt_rq->rt_queued) 1016f4ebcbc0SKirill Tkhai return; 1017f4ebcbc0SKirill Tkhai if (rt_rq_throttled(rt_rq) || !rt_rq->rt_nr_running) 1018f4ebcbc0SKirill Tkhai return; 1019f4ebcbc0SKirill Tkhai 102072465447SKirill Tkhai add_nr_running(rq, rt_rq->rt_nr_running); 1021f4ebcbc0SKirill Tkhai rt_rq->rt_queued = 1; 1022f4ebcbc0SKirill Tkhai } 1023f4ebcbc0SKirill Tkhai 1024391e43daSPeter Zijlstra #if defined CONFIG_SMP 1025391e43daSPeter Zijlstra 1026391e43daSPeter Zijlstra static void 1027391e43daSPeter Zijlstra inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) 1028391e43daSPeter Zijlstra { 1029391e43daSPeter Zijlstra struct rq *rq = rq_of_rt_rq(rt_rq); 1030391e43daSPeter Zijlstra 1031757dfcaaSKirill Tkhai #ifdef CONFIG_RT_GROUP_SCHED 1032757dfcaaSKirill Tkhai /* 1033757dfcaaSKirill Tkhai * Change rq's cpupri only if rt_rq is the top queue. 1034757dfcaaSKirill Tkhai */ 1035757dfcaaSKirill Tkhai if (&rq->rt != rt_rq) 1036757dfcaaSKirill Tkhai return; 1037757dfcaaSKirill Tkhai #endif 1038391e43daSPeter Zijlstra if (rq->online && prio < prev_prio) 1039391e43daSPeter Zijlstra cpupri_set(&rq->rd->cpupri, rq->cpu, prio); 1040391e43daSPeter Zijlstra } 1041391e43daSPeter Zijlstra 1042391e43daSPeter Zijlstra static void 1043391e43daSPeter Zijlstra dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) 1044391e43daSPeter Zijlstra { 1045391e43daSPeter Zijlstra struct rq *rq = rq_of_rt_rq(rt_rq); 1046391e43daSPeter Zijlstra 1047757dfcaaSKirill Tkhai #ifdef CONFIG_RT_GROUP_SCHED 1048757dfcaaSKirill Tkhai /* 1049757dfcaaSKirill Tkhai * Change rq's cpupri only if rt_rq is the top queue. 1050757dfcaaSKirill Tkhai */ 1051757dfcaaSKirill Tkhai if (&rq->rt != rt_rq) 1052757dfcaaSKirill Tkhai return; 1053757dfcaaSKirill Tkhai #endif 1054391e43daSPeter Zijlstra if (rq->online && rt_rq->highest_prio.curr != prev_prio) 1055391e43daSPeter Zijlstra cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr); 1056391e43daSPeter Zijlstra } 1057391e43daSPeter Zijlstra 1058391e43daSPeter Zijlstra #else /* CONFIG_SMP */ 1059391e43daSPeter Zijlstra 1060391e43daSPeter Zijlstra static inline 1061391e43daSPeter Zijlstra void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {} 1062391e43daSPeter Zijlstra static inline 1063391e43daSPeter Zijlstra void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {} 1064391e43daSPeter Zijlstra 1065391e43daSPeter Zijlstra #endif /* CONFIG_SMP */ 1066391e43daSPeter Zijlstra 1067391e43daSPeter Zijlstra #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED 1068391e43daSPeter Zijlstra static void 1069391e43daSPeter Zijlstra inc_rt_prio(struct rt_rq *rt_rq, int prio) 1070391e43daSPeter Zijlstra { 1071391e43daSPeter Zijlstra int prev_prio = rt_rq->highest_prio.curr; 1072391e43daSPeter Zijlstra 1073391e43daSPeter Zijlstra if (prio < prev_prio) 1074391e43daSPeter Zijlstra rt_rq->highest_prio.curr = prio; 1075391e43daSPeter Zijlstra 1076391e43daSPeter Zijlstra inc_rt_prio_smp(rt_rq, prio, prev_prio); 1077391e43daSPeter Zijlstra } 1078391e43daSPeter Zijlstra 1079391e43daSPeter Zijlstra static void 1080391e43daSPeter Zijlstra dec_rt_prio(struct rt_rq *rt_rq, int prio) 1081391e43daSPeter Zijlstra { 1082391e43daSPeter Zijlstra int prev_prio = rt_rq->highest_prio.curr; 1083391e43daSPeter Zijlstra 1084391e43daSPeter Zijlstra if (rt_rq->rt_nr_running) { 1085391e43daSPeter Zijlstra 1086391e43daSPeter Zijlstra WARN_ON(prio < prev_prio); 1087391e43daSPeter Zijlstra 1088391e43daSPeter Zijlstra /* 1089391e43daSPeter Zijlstra * This may have been our highest task, and therefore 1090391e43daSPeter Zijlstra * we may have some recomputation to do 1091391e43daSPeter Zijlstra */ 1092391e43daSPeter Zijlstra if (prio == prev_prio) { 1093391e43daSPeter Zijlstra struct rt_prio_array *array = &rt_rq->active; 1094391e43daSPeter Zijlstra 1095391e43daSPeter Zijlstra rt_rq->highest_prio.curr = 1096391e43daSPeter Zijlstra sched_find_first_bit(array->bitmap); 1097391e43daSPeter Zijlstra } 1098391e43daSPeter Zijlstra 1099391e43daSPeter Zijlstra } else 1100391e43daSPeter Zijlstra rt_rq->highest_prio.curr = MAX_RT_PRIO; 1101391e43daSPeter Zijlstra 1102391e43daSPeter Zijlstra dec_rt_prio_smp(rt_rq, prio, prev_prio); 1103391e43daSPeter Zijlstra } 1104391e43daSPeter Zijlstra 1105391e43daSPeter Zijlstra #else 1106391e43daSPeter Zijlstra 1107391e43daSPeter Zijlstra static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {} 1108391e43daSPeter Zijlstra static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {} 1109391e43daSPeter Zijlstra 1110391e43daSPeter Zijlstra #endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */ 1111391e43daSPeter Zijlstra 1112391e43daSPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED 1113391e43daSPeter Zijlstra 1114391e43daSPeter Zijlstra static void 1115391e43daSPeter Zijlstra inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 1116391e43daSPeter Zijlstra { 1117391e43daSPeter Zijlstra if (rt_se_boosted(rt_se)) 1118391e43daSPeter Zijlstra rt_rq->rt_nr_boosted++; 1119391e43daSPeter Zijlstra 1120391e43daSPeter Zijlstra if (rt_rq->tg) 1121391e43daSPeter Zijlstra start_rt_bandwidth(&rt_rq->tg->rt_bandwidth); 1122391e43daSPeter Zijlstra } 1123391e43daSPeter Zijlstra 1124391e43daSPeter Zijlstra static void 1125391e43daSPeter Zijlstra dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 1126391e43daSPeter Zijlstra { 1127391e43daSPeter Zijlstra if (rt_se_boosted(rt_se)) 1128391e43daSPeter Zijlstra rt_rq->rt_nr_boosted--; 1129391e43daSPeter Zijlstra 1130391e43daSPeter Zijlstra WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted); 1131391e43daSPeter Zijlstra } 1132391e43daSPeter Zijlstra 1133391e43daSPeter Zijlstra #else /* CONFIG_RT_GROUP_SCHED */ 1134391e43daSPeter Zijlstra 1135391e43daSPeter Zijlstra static void 1136391e43daSPeter Zijlstra inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 1137391e43daSPeter Zijlstra { 1138391e43daSPeter Zijlstra start_rt_bandwidth(&def_rt_bandwidth); 1139391e43daSPeter Zijlstra } 1140391e43daSPeter Zijlstra 1141391e43daSPeter Zijlstra static inline 1142391e43daSPeter Zijlstra void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {} 1143391e43daSPeter Zijlstra 1144391e43daSPeter Zijlstra #endif /* CONFIG_RT_GROUP_SCHED */ 1145391e43daSPeter Zijlstra 1146391e43daSPeter Zijlstra static inline 114722abdef3SKirill Tkhai unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se) 114822abdef3SKirill Tkhai { 114922abdef3SKirill Tkhai struct rt_rq *group_rq = group_rt_rq(rt_se); 115022abdef3SKirill Tkhai 115122abdef3SKirill Tkhai if (group_rq) 115222abdef3SKirill Tkhai return group_rq->rt_nr_running; 115322abdef3SKirill Tkhai else 115422abdef3SKirill Tkhai return 1; 115522abdef3SKirill Tkhai } 115622abdef3SKirill Tkhai 115722abdef3SKirill Tkhai static inline 115801d36d0aSFrederic Weisbecker unsigned int rt_se_rr_nr_running(struct sched_rt_entity *rt_se) 115901d36d0aSFrederic Weisbecker { 116001d36d0aSFrederic Weisbecker struct rt_rq *group_rq = group_rt_rq(rt_se); 116101d36d0aSFrederic Weisbecker struct task_struct *tsk; 116201d36d0aSFrederic Weisbecker 116301d36d0aSFrederic Weisbecker if (group_rq) 116401d36d0aSFrederic Weisbecker return group_rq->rr_nr_running; 116501d36d0aSFrederic Weisbecker 116601d36d0aSFrederic Weisbecker tsk = rt_task_of(rt_se); 116701d36d0aSFrederic Weisbecker 116801d36d0aSFrederic Weisbecker return (tsk->policy == SCHED_RR) ? 1 : 0; 116901d36d0aSFrederic Weisbecker } 117001d36d0aSFrederic Weisbecker 117101d36d0aSFrederic Weisbecker static inline 1172391e43daSPeter Zijlstra void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 1173391e43daSPeter Zijlstra { 1174391e43daSPeter Zijlstra int prio = rt_se_prio(rt_se); 1175391e43daSPeter Zijlstra 1176391e43daSPeter Zijlstra WARN_ON(!rt_prio(prio)); 117722abdef3SKirill Tkhai rt_rq->rt_nr_running += rt_se_nr_running(rt_se); 117801d36d0aSFrederic Weisbecker rt_rq->rr_nr_running += rt_se_rr_nr_running(rt_se); 1179391e43daSPeter Zijlstra 1180391e43daSPeter Zijlstra inc_rt_prio(rt_rq, prio); 1181391e43daSPeter Zijlstra inc_rt_migration(rt_se, rt_rq); 1182391e43daSPeter Zijlstra inc_rt_group(rt_se, rt_rq); 1183391e43daSPeter Zijlstra } 1184391e43daSPeter Zijlstra 1185391e43daSPeter Zijlstra static inline 1186391e43daSPeter Zijlstra void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 1187391e43daSPeter Zijlstra { 1188391e43daSPeter Zijlstra WARN_ON(!rt_prio(rt_se_prio(rt_se))); 1189391e43daSPeter Zijlstra WARN_ON(!rt_rq->rt_nr_running); 119022abdef3SKirill Tkhai rt_rq->rt_nr_running -= rt_se_nr_running(rt_se); 119101d36d0aSFrederic Weisbecker rt_rq->rr_nr_running -= rt_se_rr_nr_running(rt_se); 1192391e43daSPeter Zijlstra 1193391e43daSPeter Zijlstra dec_rt_prio(rt_rq, rt_se_prio(rt_se)); 1194391e43daSPeter Zijlstra dec_rt_migration(rt_se, rt_rq); 1195391e43daSPeter Zijlstra dec_rt_group(rt_se, rt_rq); 1196391e43daSPeter Zijlstra } 1197391e43daSPeter Zijlstra 1198ff77e468SPeter Zijlstra /* 1199ff77e468SPeter Zijlstra * Change rt_se->run_list location unless SAVE && !MOVE 1200ff77e468SPeter Zijlstra * 1201ff77e468SPeter Zijlstra * assumes ENQUEUE/DEQUEUE flags match 1202ff77e468SPeter Zijlstra */ 1203ff77e468SPeter Zijlstra static inline bool move_entity(unsigned int flags) 1204ff77e468SPeter Zijlstra { 1205ff77e468SPeter Zijlstra if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE) 1206ff77e468SPeter Zijlstra return false; 1207ff77e468SPeter Zijlstra 1208ff77e468SPeter Zijlstra return true; 1209ff77e468SPeter Zijlstra } 1210ff77e468SPeter Zijlstra 1211ff77e468SPeter Zijlstra static void __delist_rt_entity(struct sched_rt_entity *rt_se, struct rt_prio_array *array) 1212ff77e468SPeter Zijlstra { 1213ff77e468SPeter Zijlstra list_del_init(&rt_se->run_list); 1214ff77e468SPeter Zijlstra 1215ff77e468SPeter Zijlstra if (list_empty(array->queue + rt_se_prio(rt_se))) 1216ff77e468SPeter Zijlstra __clear_bit(rt_se_prio(rt_se), array->bitmap); 1217ff77e468SPeter Zijlstra 1218ff77e468SPeter Zijlstra rt_se->on_list = 0; 1219ff77e468SPeter Zijlstra } 1220ff77e468SPeter Zijlstra 1221ff77e468SPeter Zijlstra static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags) 1222391e43daSPeter Zijlstra { 1223391e43daSPeter Zijlstra struct rt_rq *rt_rq = rt_rq_of_se(rt_se); 1224391e43daSPeter Zijlstra struct rt_prio_array *array = &rt_rq->active; 1225391e43daSPeter Zijlstra struct rt_rq *group_rq = group_rt_rq(rt_se); 1226391e43daSPeter Zijlstra struct list_head *queue = array->queue + rt_se_prio(rt_se); 1227391e43daSPeter Zijlstra 1228391e43daSPeter Zijlstra /* 1229391e43daSPeter Zijlstra * Don't enqueue the group if its throttled, or when empty. 1230391e43daSPeter Zijlstra * The latter is a consequence of the former when a child group 1231391e43daSPeter Zijlstra * get throttled and the current group doesn't have any other 1232391e43daSPeter Zijlstra * active members. 1233391e43daSPeter Zijlstra */ 1234ff77e468SPeter Zijlstra if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) { 1235ff77e468SPeter Zijlstra if (rt_se->on_list) 1236ff77e468SPeter Zijlstra __delist_rt_entity(rt_se, array); 1237391e43daSPeter Zijlstra return; 1238ff77e468SPeter Zijlstra } 1239391e43daSPeter Zijlstra 1240ff77e468SPeter Zijlstra if (move_entity(flags)) { 1241ff77e468SPeter Zijlstra WARN_ON_ONCE(rt_se->on_list); 1242ff77e468SPeter Zijlstra if (flags & ENQUEUE_HEAD) 1243391e43daSPeter Zijlstra list_add(&rt_se->run_list, queue); 1244391e43daSPeter Zijlstra else 1245391e43daSPeter Zijlstra list_add_tail(&rt_se->run_list, queue); 1246ff77e468SPeter Zijlstra 1247391e43daSPeter Zijlstra __set_bit(rt_se_prio(rt_se), array->bitmap); 1248ff77e468SPeter Zijlstra rt_se->on_list = 1; 1249ff77e468SPeter Zijlstra } 1250ff77e468SPeter Zijlstra rt_se->on_rq = 1; 1251391e43daSPeter Zijlstra 1252391e43daSPeter Zijlstra inc_rt_tasks(rt_se, rt_rq); 1253391e43daSPeter Zijlstra } 1254391e43daSPeter Zijlstra 1255ff77e468SPeter Zijlstra static void __dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags) 1256391e43daSPeter Zijlstra { 1257391e43daSPeter Zijlstra struct rt_rq *rt_rq = rt_rq_of_se(rt_se); 1258391e43daSPeter Zijlstra struct rt_prio_array *array = &rt_rq->active; 1259391e43daSPeter Zijlstra 1260ff77e468SPeter Zijlstra if (move_entity(flags)) { 1261ff77e468SPeter Zijlstra WARN_ON_ONCE(!rt_se->on_list); 1262ff77e468SPeter Zijlstra __delist_rt_entity(rt_se, array); 1263ff77e468SPeter Zijlstra } 1264ff77e468SPeter Zijlstra rt_se->on_rq = 0; 1265391e43daSPeter Zijlstra 1266391e43daSPeter Zijlstra dec_rt_tasks(rt_se, rt_rq); 1267391e43daSPeter Zijlstra } 1268391e43daSPeter Zijlstra 1269391e43daSPeter Zijlstra /* 1270391e43daSPeter Zijlstra * Because the prio of an upper entry depends on the lower 1271391e43daSPeter Zijlstra * entries, we must remove entries top - down. 1272391e43daSPeter Zijlstra */ 1273ff77e468SPeter Zijlstra static void dequeue_rt_stack(struct sched_rt_entity *rt_se, unsigned int flags) 1274391e43daSPeter Zijlstra { 1275391e43daSPeter Zijlstra struct sched_rt_entity *back = NULL; 1276391e43daSPeter Zijlstra 1277391e43daSPeter Zijlstra for_each_sched_rt_entity(rt_se) { 1278391e43daSPeter Zijlstra rt_se->back = back; 1279391e43daSPeter Zijlstra back = rt_se; 1280391e43daSPeter Zijlstra } 1281391e43daSPeter Zijlstra 1282f4ebcbc0SKirill Tkhai dequeue_top_rt_rq(rt_rq_of_se(back)); 1283f4ebcbc0SKirill Tkhai 1284391e43daSPeter Zijlstra for (rt_se = back; rt_se; rt_se = rt_se->back) { 1285391e43daSPeter Zijlstra if (on_rt_rq(rt_se)) 1286ff77e468SPeter Zijlstra __dequeue_rt_entity(rt_se, flags); 1287391e43daSPeter Zijlstra } 1288391e43daSPeter Zijlstra } 1289391e43daSPeter Zijlstra 1290ff77e468SPeter Zijlstra static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags) 1291391e43daSPeter Zijlstra { 1292f4ebcbc0SKirill Tkhai struct rq *rq = rq_of_rt_se(rt_se); 1293f4ebcbc0SKirill Tkhai 1294ff77e468SPeter Zijlstra dequeue_rt_stack(rt_se, flags); 1295391e43daSPeter Zijlstra for_each_sched_rt_entity(rt_se) 1296ff77e468SPeter Zijlstra __enqueue_rt_entity(rt_se, flags); 1297f4ebcbc0SKirill Tkhai enqueue_top_rt_rq(&rq->rt); 1298391e43daSPeter Zijlstra } 1299391e43daSPeter Zijlstra 1300ff77e468SPeter Zijlstra static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags) 1301391e43daSPeter Zijlstra { 1302f4ebcbc0SKirill Tkhai struct rq *rq = rq_of_rt_se(rt_se); 1303f4ebcbc0SKirill Tkhai 1304ff77e468SPeter Zijlstra dequeue_rt_stack(rt_se, flags); 1305391e43daSPeter Zijlstra 1306391e43daSPeter Zijlstra for_each_sched_rt_entity(rt_se) { 1307391e43daSPeter Zijlstra struct rt_rq *rt_rq = group_rt_rq(rt_se); 1308391e43daSPeter Zijlstra 1309391e43daSPeter Zijlstra if (rt_rq && rt_rq->rt_nr_running) 1310ff77e468SPeter Zijlstra __enqueue_rt_entity(rt_se, flags); 1311391e43daSPeter Zijlstra } 1312f4ebcbc0SKirill Tkhai enqueue_top_rt_rq(&rq->rt); 1313391e43daSPeter Zijlstra } 1314391e43daSPeter Zijlstra 1315391e43daSPeter Zijlstra /* 1316391e43daSPeter Zijlstra * Adding/removing a task to/from a priority array: 1317391e43daSPeter Zijlstra */ 1318391e43daSPeter Zijlstra static void 1319391e43daSPeter Zijlstra enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags) 1320391e43daSPeter Zijlstra { 1321391e43daSPeter Zijlstra struct sched_rt_entity *rt_se = &p->rt; 1322391e43daSPeter Zijlstra 1323391e43daSPeter Zijlstra if (flags & ENQUEUE_WAKEUP) 1324391e43daSPeter Zijlstra rt_se->timeout = 0; 1325391e43daSPeter Zijlstra 1326ff77e468SPeter Zijlstra enqueue_rt_entity(rt_se, flags); 1327391e43daSPeter Zijlstra 13284b53a341SIngo Molnar if (!task_current(rq, p) && p->nr_cpus_allowed > 1) 1329391e43daSPeter Zijlstra enqueue_pushable_task(rq, p); 1330391e43daSPeter Zijlstra } 1331391e43daSPeter Zijlstra 1332391e43daSPeter Zijlstra static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags) 1333391e43daSPeter Zijlstra { 1334391e43daSPeter Zijlstra struct sched_rt_entity *rt_se = &p->rt; 1335391e43daSPeter Zijlstra 1336391e43daSPeter Zijlstra update_curr_rt(rq); 1337ff77e468SPeter Zijlstra dequeue_rt_entity(rt_se, flags); 1338391e43daSPeter Zijlstra 1339391e43daSPeter Zijlstra dequeue_pushable_task(rq, p); 1340391e43daSPeter Zijlstra } 1341391e43daSPeter Zijlstra 1342391e43daSPeter Zijlstra /* 1343391e43daSPeter Zijlstra * Put task to the head or the end of the run list without the overhead of 1344391e43daSPeter Zijlstra * dequeue followed by enqueue. 1345391e43daSPeter Zijlstra */ 1346391e43daSPeter Zijlstra static void 1347391e43daSPeter Zijlstra requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head) 1348391e43daSPeter Zijlstra { 1349391e43daSPeter Zijlstra if (on_rt_rq(rt_se)) { 1350391e43daSPeter Zijlstra struct rt_prio_array *array = &rt_rq->active; 1351391e43daSPeter Zijlstra struct list_head *queue = array->queue + rt_se_prio(rt_se); 1352391e43daSPeter Zijlstra 1353391e43daSPeter Zijlstra if (head) 1354391e43daSPeter Zijlstra list_move(&rt_se->run_list, queue); 1355391e43daSPeter Zijlstra else 1356391e43daSPeter Zijlstra list_move_tail(&rt_se->run_list, queue); 1357391e43daSPeter Zijlstra } 1358391e43daSPeter Zijlstra } 1359391e43daSPeter Zijlstra 1360391e43daSPeter Zijlstra static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head) 1361391e43daSPeter Zijlstra { 1362391e43daSPeter Zijlstra struct sched_rt_entity *rt_se = &p->rt; 1363391e43daSPeter Zijlstra struct rt_rq *rt_rq; 1364391e43daSPeter Zijlstra 1365391e43daSPeter Zijlstra for_each_sched_rt_entity(rt_se) { 1366391e43daSPeter Zijlstra rt_rq = rt_rq_of_se(rt_se); 1367391e43daSPeter Zijlstra requeue_rt_entity(rt_rq, rt_se, head); 1368391e43daSPeter Zijlstra } 1369391e43daSPeter Zijlstra } 1370391e43daSPeter Zijlstra 1371391e43daSPeter Zijlstra static void yield_task_rt(struct rq *rq) 1372391e43daSPeter Zijlstra { 1373391e43daSPeter Zijlstra requeue_task_rt(rq, rq->curr, 0); 1374391e43daSPeter Zijlstra } 1375391e43daSPeter Zijlstra 1376391e43daSPeter Zijlstra #ifdef CONFIG_SMP 1377391e43daSPeter Zijlstra static int find_lowest_rq(struct task_struct *task); 1378391e43daSPeter Zijlstra 1379391e43daSPeter Zijlstra static int 1380ac66f547SPeter Zijlstra select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags) 1381391e43daSPeter Zijlstra { 1382391e43daSPeter Zijlstra struct task_struct *curr; 1383391e43daSPeter Zijlstra struct rq *rq; 1384391e43daSPeter Zijlstra 1385391e43daSPeter Zijlstra /* For anything but wake ups, just return the task_cpu */ 1386391e43daSPeter Zijlstra if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK) 1387391e43daSPeter Zijlstra goto out; 1388391e43daSPeter Zijlstra 1389391e43daSPeter Zijlstra rq = cpu_rq(cpu); 1390391e43daSPeter Zijlstra 1391391e43daSPeter Zijlstra rcu_read_lock(); 1392316c1608SJason Low curr = READ_ONCE(rq->curr); /* unlocked access */ 1393391e43daSPeter Zijlstra 1394391e43daSPeter Zijlstra /* 1395391e43daSPeter Zijlstra * If the current task on @p's runqueue is an RT task, then 1396391e43daSPeter Zijlstra * try to see if we can wake this RT task up on another 1397391e43daSPeter Zijlstra * runqueue. Otherwise simply start this RT task 1398391e43daSPeter Zijlstra * on its current runqueue. 1399391e43daSPeter Zijlstra * 1400391e43daSPeter Zijlstra * We want to avoid overloading runqueues. If the woken 1401391e43daSPeter Zijlstra * task is a higher priority, then it will stay on this CPU 1402391e43daSPeter Zijlstra * and the lower prio task should be moved to another CPU. 1403391e43daSPeter Zijlstra * Even though this will probably make the lower prio task 1404391e43daSPeter Zijlstra * lose its cache, we do not want to bounce a higher task 1405391e43daSPeter Zijlstra * around just because it gave up its CPU, perhaps for a 1406391e43daSPeter Zijlstra * lock? 1407391e43daSPeter Zijlstra * 1408391e43daSPeter Zijlstra * For equal prio tasks, we just let the scheduler sort it out. 1409391e43daSPeter Zijlstra * 1410391e43daSPeter Zijlstra * Otherwise, just let it ride on the affined RQ and the 1411391e43daSPeter Zijlstra * post-schedule router will push the preempted task away 1412391e43daSPeter Zijlstra * 1413391e43daSPeter Zijlstra * This test is optimistic, if we get it wrong the load-balancer 1414391e43daSPeter Zijlstra * will have to sort it out. 1415391e43daSPeter Zijlstra */ 1416391e43daSPeter Zijlstra if (curr && unlikely(rt_task(curr)) && 14174b53a341SIngo Molnar (curr->nr_cpus_allowed < 2 || 14186bfa687cSShawn Bohrer curr->prio <= p->prio)) { 1419391e43daSPeter Zijlstra int target = find_lowest_rq(p); 1420391e43daSPeter Zijlstra 142180e3d87bSTim Chen /* 142280e3d87bSTim Chen * Don't bother moving it if the destination CPU is 142380e3d87bSTim Chen * not running a lower priority task. 142480e3d87bSTim Chen */ 142580e3d87bSTim Chen if (target != -1 && 142680e3d87bSTim Chen p->prio < cpu_rq(target)->rt.highest_prio.curr) 1427391e43daSPeter Zijlstra cpu = target; 1428391e43daSPeter Zijlstra } 1429391e43daSPeter Zijlstra rcu_read_unlock(); 1430391e43daSPeter Zijlstra 1431391e43daSPeter Zijlstra out: 1432391e43daSPeter Zijlstra return cpu; 1433391e43daSPeter Zijlstra } 1434391e43daSPeter Zijlstra 1435391e43daSPeter Zijlstra static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) 1436391e43daSPeter Zijlstra { 1437308a623aSWanpeng Li /* 1438308a623aSWanpeng Li * Current can't be migrated, useless to reschedule, 1439308a623aSWanpeng Li * let's hope p can move out. 1440308a623aSWanpeng Li */ 14414b53a341SIngo Molnar if (rq->curr->nr_cpus_allowed == 1 || 1442308a623aSWanpeng Li !cpupri_find(&rq->rd->cpupri, rq->curr, NULL)) 1443391e43daSPeter Zijlstra return; 1444391e43daSPeter Zijlstra 1445308a623aSWanpeng Li /* 1446308a623aSWanpeng Li * p is migratable, so let's not schedule it and 1447308a623aSWanpeng Li * see if it is pushed or pulled somewhere else. 1448308a623aSWanpeng Li */ 14494b53a341SIngo Molnar if (p->nr_cpus_allowed != 1 1450391e43daSPeter Zijlstra && cpupri_find(&rq->rd->cpupri, p, NULL)) 1451391e43daSPeter Zijlstra return; 1452391e43daSPeter Zijlstra 1453391e43daSPeter Zijlstra /* 1454391e43daSPeter Zijlstra * There appears to be other cpus that can accept 1455391e43daSPeter Zijlstra * current and none to run 'p', so lets reschedule 1456391e43daSPeter Zijlstra * to try and push current away: 1457391e43daSPeter Zijlstra */ 1458391e43daSPeter Zijlstra requeue_task_rt(rq, p, 1); 14598875125eSKirill Tkhai resched_curr(rq); 1460391e43daSPeter Zijlstra } 1461391e43daSPeter Zijlstra 1462391e43daSPeter Zijlstra #endif /* CONFIG_SMP */ 1463391e43daSPeter Zijlstra 1464391e43daSPeter Zijlstra /* 1465391e43daSPeter Zijlstra * Preempt the current task with a newly woken task if needed: 1466391e43daSPeter Zijlstra */ 1467391e43daSPeter Zijlstra static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags) 1468391e43daSPeter Zijlstra { 1469391e43daSPeter Zijlstra if (p->prio < rq->curr->prio) { 14708875125eSKirill Tkhai resched_curr(rq); 1471391e43daSPeter Zijlstra return; 1472391e43daSPeter Zijlstra } 1473391e43daSPeter Zijlstra 1474391e43daSPeter Zijlstra #ifdef CONFIG_SMP 1475391e43daSPeter Zijlstra /* 1476391e43daSPeter Zijlstra * If: 1477391e43daSPeter Zijlstra * 1478391e43daSPeter Zijlstra * - the newly woken task is of equal priority to the current task 1479391e43daSPeter Zijlstra * - the newly woken task is non-migratable while current is migratable 1480391e43daSPeter Zijlstra * - current will be preempted on the next reschedule 1481391e43daSPeter Zijlstra * 1482391e43daSPeter Zijlstra * we should check to see if current can readily move to a different 1483391e43daSPeter Zijlstra * cpu. If so, we will reschedule to allow the push logic to try 1484391e43daSPeter Zijlstra * to move current somewhere else, making room for our non-migratable 1485391e43daSPeter Zijlstra * task. 1486391e43daSPeter Zijlstra */ 1487391e43daSPeter Zijlstra if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr)) 1488391e43daSPeter Zijlstra check_preempt_equal_prio(rq, p); 1489391e43daSPeter Zijlstra #endif 1490391e43daSPeter Zijlstra } 1491391e43daSPeter Zijlstra 1492391e43daSPeter Zijlstra static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq, 1493391e43daSPeter Zijlstra struct rt_rq *rt_rq) 1494391e43daSPeter Zijlstra { 1495391e43daSPeter Zijlstra struct rt_prio_array *array = &rt_rq->active; 1496391e43daSPeter Zijlstra struct sched_rt_entity *next = NULL; 1497391e43daSPeter Zijlstra struct list_head *queue; 1498391e43daSPeter Zijlstra int idx; 1499391e43daSPeter Zijlstra 1500391e43daSPeter Zijlstra idx = sched_find_first_bit(array->bitmap); 1501391e43daSPeter Zijlstra BUG_ON(idx >= MAX_RT_PRIO); 1502391e43daSPeter Zijlstra 1503391e43daSPeter Zijlstra queue = array->queue + idx; 1504391e43daSPeter Zijlstra next = list_entry(queue->next, struct sched_rt_entity, run_list); 1505391e43daSPeter Zijlstra 1506391e43daSPeter Zijlstra return next; 1507391e43daSPeter Zijlstra } 1508391e43daSPeter Zijlstra 1509391e43daSPeter Zijlstra static struct task_struct *_pick_next_task_rt(struct rq *rq) 1510391e43daSPeter Zijlstra { 1511391e43daSPeter Zijlstra struct sched_rt_entity *rt_se; 1512391e43daSPeter Zijlstra struct task_struct *p; 1513606dba2eSPeter Zijlstra struct rt_rq *rt_rq = &rq->rt; 1514391e43daSPeter Zijlstra 1515391e43daSPeter Zijlstra do { 1516391e43daSPeter Zijlstra rt_se = pick_next_rt_entity(rq, rt_rq); 1517391e43daSPeter Zijlstra BUG_ON(!rt_se); 1518391e43daSPeter Zijlstra rt_rq = group_rt_rq(rt_se); 1519391e43daSPeter Zijlstra } while (rt_rq); 1520391e43daSPeter Zijlstra 1521391e43daSPeter Zijlstra p = rt_task_of(rt_se); 152278becc27SFrederic Weisbecker p->se.exec_start = rq_clock_task(rq); 1523391e43daSPeter Zijlstra 1524391e43daSPeter Zijlstra return p; 1525391e43daSPeter Zijlstra } 1526391e43daSPeter Zijlstra 1527606dba2eSPeter Zijlstra static struct task_struct * 1528d8ac8971SMatt Fleming pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) 1529391e43daSPeter Zijlstra { 1530606dba2eSPeter Zijlstra struct task_struct *p; 1531606dba2eSPeter Zijlstra struct rt_rq *rt_rq = &rq->rt; 1532606dba2eSPeter Zijlstra 153337e117c0SPeter Zijlstra if (need_pull_rt_task(rq, prev)) { 1534cbce1a68SPeter Zijlstra /* 1535cbce1a68SPeter Zijlstra * This is OK, because current is on_cpu, which avoids it being 1536cbce1a68SPeter Zijlstra * picked for load-balance and preemption/IRQs are still 1537cbce1a68SPeter Zijlstra * disabled avoiding further scheduler activity on it and we're 1538cbce1a68SPeter Zijlstra * being very careful to re-start the picking loop. 1539cbce1a68SPeter Zijlstra */ 1540d8ac8971SMatt Fleming rq_unpin_lock(rq, rf); 154138033c37SPeter Zijlstra pull_rt_task(rq); 1542d8ac8971SMatt Fleming rq_repin_lock(rq, rf); 154337e117c0SPeter Zijlstra /* 154437e117c0SPeter Zijlstra * pull_rt_task() can drop (and re-acquire) rq->lock; this 1545a1d9a323SKirill Tkhai * means a dl or stop task can slip in, in which case we need 1546a1d9a323SKirill Tkhai * to re-start task selection. 154737e117c0SPeter Zijlstra */ 1548da0c1e65SKirill Tkhai if (unlikely((rq->stop && task_on_rq_queued(rq->stop)) || 1549a1d9a323SKirill Tkhai rq->dl.dl_nr_running)) 155037e117c0SPeter Zijlstra return RETRY_TASK; 155137e117c0SPeter Zijlstra } 155238033c37SPeter Zijlstra 1553734ff2a7SKirill Tkhai /* 1554734ff2a7SKirill Tkhai * We may dequeue prev's rt_rq in put_prev_task(). 1555734ff2a7SKirill Tkhai * So, we update time before rt_nr_running check. 1556734ff2a7SKirill Tkhai */ 1557734ff2a7SKirill Tkhai if (prev->sched_class == &rt_sched_class) 1558734ff2a7SKirill Tkhai update_curr_rt(rq); 1559734ff2a7SKirill Tkhai 1560f4ebcbc0SKirill Tkhai if (!rt_rq->rt_queued) 1561606dba2eSPeter Zijlstra return NULL; 1562606dba2eSPeter Zijlstra 15633f1d2a31SPeter Zijlstra put_prev_task(rq, prev); 1564606dba2eSPeter Zijlstra 1565606dba2eSPeter Zijlstra p = _pick_next_task_rt(rq); 1566391e43daSPeter Zijlstra 1567391e43daSPeter Zijlstra /* The running task is never eligible for pushing */ 1568391e43daSPeter Zijlstra dequeue_pushable_task(rq, p); 1569391e43daSPeter Zijlstra 1570e3fca9e7SPeter Zijlstra queue_push_tasks(rq); 1571391e43daSPeter Zijlstra 1572391e43daSPeter Zijlstra return p; 1573391e43daSPeter Zijlstra } 1574391e43daSPeter Zijlstra 1575391e43daSPeter Zijlstra static void put_prev_task_rt(struct rq *rq, struct task_struct *p) 1576391e43daSPeter Zijlstra { 1577391e43daSPeter Zijlstra update_curr_rt(rq); 1578391e43daSPeter Zijlstra 1579391e43daSPeter Zijlstra /* 1580391e43daSPeter Zijlstra * The previous task needs to be made eligible for pushing 1581391e43daSPeter Zijlstra * if it is still active 1582391e43daSPeter Zijlstra */ 15834b53a341SIngo Molnar if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1) 1584391e43daSPeter Zijlstra enqueue_pushable_task(rq, p); 1585391e43daSPeter Zijlstra } 1586391e43daSPeter Zijlstra 1587391e43daSPeter Zijlstra #ifdef CONFIG_SMP 1588391e43daSPeter Zijlstra 1589391e43daSPeter Zijlstra /* Only try algorithms three times */ 1590391e43daSPeter Zijlstra #define RT_MAX_TRIES 3 1591391e43daSPeter Zijlstra 1592391e43daSPeter Zijlstra static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) 1593391e43daSPeter Zijlstra { 1594391e43daSPeter Zijlstra if (!task_running(rq, p) && 15950c98d344SIngo Molnar cpumask_test_cpu(cpu, &p->cpus_allowed)) 1596391e43daSPeter Zijlstra return 1; 1597391e43daSPeter Zijlstra return 0; 1598391e43daSPeter Zijlstra } 1599391e43daSPeter Zijlstra 1600e23ee747SKirill Tkhai /* 1601e23ee747SKirill Tkhai * Return the highest pushable rq's task, which is suitable to be executed 1602e23ee747SKirill Tkhai * on the cpu, NULL otherwise 1603e23ee747SKirill Tkhai */ 1604e23ee747SKirill Tkhai static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu) 1605391e43daSPeter Zijlstra { 1606e23ee747SKirill Tkhai struct plist_head *head = &rq->rt.pushable_tasks; 1607391e43daSPeter Zijlstra struct task_struct *p; 1608391e43daSPeter Zijlstra 1609e23ee747SKirill Tkhai if (!has_pushable_tasks(rq)) 1610e23ee747SKirill Tkhai return NULL; 1611391e43daSPeter Zijlstra 1612e23ee747SKirill Tkhai plist_for_each_entry(p, head, pushable_tasks) { 1613e23ee747SKirill Tkhai if (pick_rt_task(rq, p, cpu)) 1614e23ee747SKirill Tkhai return p; 1615391e43daSPeter Zijlstra } 1616391e43daSPeter Zijlstra 1617e23ee747SKirill Tkhai return NULL; 1618391e43daSPeter Zijlstra } 1619391e43daSPeter Zijlstra 1620391e43daSPeter Zijlstra static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask); 1621391e43daSPeter Zijlstra 1622391e43daSPeter Zijlstra static int find_lowest_rq(struct task_struct *task) 1623391e43daSPeter Zijlstra { 1624391e43daSPeter Zijlstra struct sched_domain *sd; 16254ba29684SChristoph Lameter struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask); 1626391e43daSPeter Zijlstra int this_cpu = smp_processor_id(); 1627391e43daSPeter Zijlstra int cpu = task_cpu(task); 1628391e43daSPeter Zijlstra 1629391e43daSPeter Zijlstra /* Make sure the mask is initialized first */ 1630391e43daSPeter Zijlstra if (unlikely(!lowest_mask)) 1631391e43daSPeter Zijlstra return -1; 1632391e43daSPeter Zijlstra 16334b53a341SIngo Molnar if (task->nr_cpus_allowed == 1) 1634391e43daSPeter Zijlstra return -1; /* No other targets possible */ 1635391e43daSPeter Zijlstra 1636391e43daSPeter Zijlstra if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask)) 1637391e43daSPeter Zijlstra return -1; /* No targets found */ 1638391e43daSPeter Zijlstra 1639391e43daSPeter Zijlstra /* 1640391e43daSPeter Zijlstra * At this point we have built a mask of cpus representing the 1641391e43daSPeter Zijlstra * lowest priority tasks in the system. Now we want to elect 1642391e43daSPeter Zijlstra * the best one based on our affinity and topology. 1643391e43daSPeter Zijlstra * 1644391e43daSPeter Zijlstra * We prioritize the last cpu that the task executed on since 1645391e43daSPeter Zijlstra * it is most likely cache-hot in that location. 1646391e43daSPeter Zijlstra */ 1647391e43daSPeter Zijlstra if (cpumask_test_cpu(cpu, lowest_mask)) 1648391e43daSPeter Zijlstra return cpu; 1649391e43daSPeter Zijlstra 1650391e43daSPeter Zijlstra /* 1651391e43daSPeter Zijlstra * Otherwise, we consult the sched_domains span maps to figure 1652391e43daSPeter Zijlstra * out which cpu is logically closest to our hot cache data. 1653391e43daSPeter Zijlstra */ 1654391e43daSPeter Zijlstra if (!cpumask_test_cpu(this_cpu, lowest_mask)) 1655391e43daSPeter Zijlstra this_cpu = -1; /* Skip this_cpu opt if not among lowest */ 1656391e43daSPeter Zijlstra 1657391e43daSPeter Zijlstra rcu_read_lock(); 1658391e43daSPeter Zijlstra for_each_domain(cpu, sd) { 1659391e43daSPeter Zijlstra if (sd->flags & SD_WAKE_AFFINE) { 1660391e43daSPeter Zijlstra int best_cpu; 1661391e43daSPeter Zijlstra 1662391e43daSPeter Zijlstra /* 1663391e43daSPeter Zijlstra * "this_cpu" is cheaper to preempt than a 1664391e43daSPeter Zijlstra * remote processor. 1665391e43daSPeter Zijlstra */ 1666391e43daSPeter Zijlstra if (this_cpu != -1 && 1667391e43daSPeter Zijlstra cpumask_test_cpu(this_cpu, sched_domain_span(sd))) { 1668391e43daSPeter Zijlstra rcu_read_unlock(); 1669391e43daSPeter Zijlstra return this_cpu; 1670391e43daSPeter Zijlstra } 1671391e43daSPeter Zijlstra 1672391e43daSPeter Zijlstra best_cpu = cpumask_first_and(lowest_mask, 1673391e43daSPeter Zijlstra sched_domain_span(sd)); 1674391e43daSPeter Zijlstra if (best_cpu < nr_cpu_ids) { 1675391e43daSPeter Zijlstra rcu_read_unlock(); 1676391e43daSPeter Zijlstra return best_cpu; 1677391e43daSPeter Zijlstra } 1678391e43daSPeter Zijlstra } 1679391e43daSPeter Zijlstra } 1680391e43daSPeter Zijlstra rcu_read_unlock(); 1681391e43daSPeter Zijlstra 1682391e43daSPeter Zijlstra /* 1683391e43daSPeter Zijlstra * And finally, if there were no matches within the domains 1684391e43daSPeter Zijlstra * just give the caller *something* to work with from the compatible 1685391e43daSPeter Zijlstra * locations. 1686391e43daSPeter Zijlstra */ 1687391e43daSPeter Zijlstra if (this_cpu != -1) 1688391e43daSPeter Zijlstra return this_cpu; 1689391e43daSPeter Zijlstra 1690391e43daSPeter Zijlstra cpu = cpumask_any(lowest_mask); 1691391e43daSPeter Zijlstra if (cpu < nr_cpu_ids) 1692391e43daSPeter Zijlstra return cpu; 1693391e43daSPeter Zijlstra return -1; 1694391e43daSPeter Zijlstra } 1695391e43daSPeter Zijlstra 1696391e43daSPeter Zijlstra /* Will lock the rq it finds */ 1697391e43daSPeter Zijlstra static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) 1698391e43daSPeter Zijlstra { 1699391e43daSPeter Zijlstra struct rq *lowest_rq = NULL; 1700391e43daSPeter Zijlstra int tries; 1701391e43daSPeter Zijlstra int cpu; 1702391e43daSPeter Zijlstra 1703391e43daSPeter Zijlstra for (tries = 0; tries < RT_MAX_TRIES; tries++) { 1704391e43daSPeter Zijlstra cpu = find_lowest_rq(task); 1705391e43daSPeter Zijlstra 1706391e43daSPeter Zijlstra if ((cpu == -1) || (cpu == rq->cpu)) 1707391e43daSPeter Zijlstra break; 1708391e43daSPeter Zijlstra 1709391e43daSPeter Zijlstra lowest_rq = cpu_rq(cpu); 1710391e43daSPeter Zijlstra 171180e3d87bSTim Chen if (lowest_rq->rt.highest_prio.curr <= task->prio) { 171280e3d87bSTim Chen /* 171380e3d87bSTim Chen * Target rq has tasks of equal or higher priority, 171480e3d87bSTim Chen * retrying does not release any lock and is unlikely 171580e3d87bSTim Chen * to yield a different result. 171680e3d87bSTim Chen */ 171780e3d87bSTim Chen lowest_rq = NULL; 171880e3d87bSTim Chen break; 171980e3d87bSTim Chen } 172080e3d87bSTim Chen 1721391e43daSPeter Zijlstra /* if the prio of this runqueue changed, try again */ 1722391e43daSPeter Zijlstra if (double_lock_balance(rq, lowest_rq)) { 1723391e43daSPeter Zijlstra /* 1724391e43daSPeter Zijlstra * We had to unlock the run queue. In 1725391e43daSPeter Zijlstra * the mean time, task could have 1726391e43daSPeter Zijlstra * migrated already or had its affinity changed. 1727391e43daSPeter Zijlstra * Also make sure that it wasn't scheduled on its rq. 1728391e43daSPeter Zijlstra */ 1729391e43daSPeter Zijlstra if (unlikely(task_rq(task) != rq || 17300c98d344SIngo Molnar !cpumask_test_cpu(lowest_rq->cpu, &task->cpus_allowed) || 1731391e43daSPeter Zijlstra task_running(rq, task) || 173213b5ab02SXunlei Pang !rt_task(task) || 1733da0c1e65SKirill Tkhai !task_on_rq_queued(task))) { 1734391e43daSPeter Zijlstra 17357f1b4393SPeter Zijlstra double_unlock_balance(rq, lowest_rq); 1736391e43daSPeter Zijlstra lowest_rq = NULL; 1737391e43daSPeter Zijlstra break; 1738391e43daSPeter Zijlstra } 1739391e43daSPeter Zijlstra } 1740391e43daSPeter Zijlstra 1741391e43daSPeter Zijlstra /* If this rq is still suitable use it. */ 1742391e43daSPeter Zijlstra if (lowest_rq->rt.highest_prio.curr > task->prio) 1743391e43daSPeter Zijlstra break; 1744391e43daSPeter Zijlstra 1745391e43daSPeter Zijlstra /* try again */ 1746391e43daSPeter Zijlstra double_unlock_balance(rq, lowest_rq); 1747391e43daSPeter Zijlstra lowest_rq = NULL; 1748391e43daSPeter Zijlstra } 1749391e43daSPeter Zijlstra 1750391e43daSPeter Zijlstra return lowest_rq; 1751391e43daSPeter Zijlstra } 1752391e43daSPeter Zijlstra 1753391e43daSPeter Zijlstra static struct task_struct *pick_next_pushable_task(struct rq *rq) 1754391e43daSPeter Zijlstra { 1755391e43daSPeter Zijlstra struct task_struct *p; 1756391e43daSPeter Zijlstra 1757391e43daSPeter Zijlstra if (!has_pushable_tasks(rq)) 1758391e43daSPeter Zijlstra return NULL; 1759391e43daSPeter Zijlstra 1760391e43daSPeter Zijlstra p = plist_first_entry(&rq->rt.pushable_tasks, 1761391e43daSPeter Zijlstra struct task_struct, pushable_tasks); 1762391e43daSPeter Zijlstra 1763391e43daSPeter Zijlstra BUG_ON(rq->cpu != task_cpu(p)); 1764391e43daSPeter Zijlstra BUG_ON(task_current(rq, p)); 17654b53a341SIngo Molnar BUG_ON(p->nr_cpus_allowed <= 1); 1766391e43daSPeter Zijlstra 1767da0c1e65SKirill Tkhai BUG_ON(!task_on_rq_queued(p)); 1768391e43daSPeter Zijlstra BUG_ON(!rt_task(p)); 1769391e43daSPeter Zijlstra 1770391e43daSPeter Zijlstra return p; 1771391e43daSPeter Zijlstra } 1772391e43daSPeter Zijlstra 1773391e43daSPeter Zijlstra /* 1774391e43daSPeter Zijlstra * If the current CPU has more than one RT task, see if the non 1775391e43daSPeter Zijlstra * running task can migrate over to a CPU that is running a task 1776391e43daSPeter Zijlstra * of lesser priority. 1777391e43daSPeter Zijlstra */ 1778391e43daSPeter Zijlstra static int push_rt_task(struct rq *rq) 1779391e43daSPeter Zijlstra { 1780391e43daSPeter Zijlstra struct task_struct *next_task; 1781391e43daSPeter Zijlstra struct rq *lowest_rq; 1782391e43daSPeter Zijlstra int ret = 0; 1783391e43daSPeter Zijlstra 1784391e43daSPeter Zijlstra if (!rq->rt.overloaded) 1785391e43daSPeter Zijlstra return 0; 1786391e43daSPeter Zijlstra 1787391e43daSPeter Zijlstra next_task = pick_next_pushable_task(rq); 1788391e43daSPeter Zijlstra if (!next_task) 1789391e43daSPeter Zijlstra return 0; 1790391e43daSPeter Zijlstra 1791391e43daSPeter Zijlstra retry: 1792391e43daSPeter Zijlstra if (unlikely(next_task == rq->curr)) { 1793391e43daSPeter Zijlstra WARN_ON(1); 1794391e43daSPeter Zijlstra return 0; 1795391e43daSPeter Zijlstra } 1796391e43daSPeter Zijlstra 1797391e43daSPeter Zijlstra /* 1798391e43daSPeter Zijlstra * It's possible that the next_task slipped in of 1799391e43daSPeter Zijlstra * higher priority than current. If that's the case 1800391e43daSPeter Zijlstra * just reschedule current. 1801391e43daSPeter Zijlstra */ 1802391e43daSPeter Zijlstra if (unlikely(next_task->prio < rq->curr->prio)) { 18038875125eSKirill Tkhai resched_curr(rq); 1804391e43daSPeter Zijlstra return 0; 1805391e43daSPeter Zijlstra } 1806391e43daSPeter Zijlstra 1807391e43daSPeter Zijlstra /* We might release rq lock */ 1808391e43daSPeter Zijlstra get_task_struct(next_task); 1809391e43daSPeter Zijlstra 1810391e43daSPeter Zijlstra /* find_lock_lowest_rq locks the rq if found */ 1811391e43daSPeter Zijlstra lowest_rq = find_lock_lowest_rq(next_task, rq); 1812391e43daSPeter Zijlstra if (!lowest_rq) { 1813391e43daSPeter Zijlstra struct task_struct *task; 1814391e43daSPeter Zijlstra /* 1815391e43daSPeter Zijlstra * find_lock_lowest_rq releases rq->lock 1816391e43daSPeter Zijlstra * so it is possible that next_task has migrated. 1817391e43daSPeter Zijlstra * 1818391e43daSPeter Zijlstra * We need to make sure that the task is still on the same 1819391e43daSPeter Zijlstra * run-queue and is also still the next task eligible for 1820391e43daSPeter Zijlstra * pushing. 1821391e43daSPeter Zijlstra */ 1822391e43daSPeter Zijlstra task = pick_next_pushable_task(rq); 1823de16b91eSByungchul Park if (task == next_task) { 1824391e43daSPeter Zijlstra /* 1825391e43daSPeter Zijlstra * The task hasn't migrated, and is still the next 1826391e43daSPeter Zijlstra * eligible task, but we failed to find a run-queue 1827391e43daSPeter Zijlstra * to push it to. Do not retry in this case, since 1828391e43daSPeter Zijlstra * other cpus will pull from us when ready. 1829391e43daSPeter Zijlstra */ 1830391e43daSPeter Zijlstra goto out; 1831391e43daSPeter Zijlstra } 1832391e43daSPeter Zijlstra 1833391e43daSPeter Zijlstra if (!task) 1834391e43daSPeter Zijlstra /* No more tasks, just exit */ 1835391e43daSPeter Zijlstra goto out; 1836391e43daSPeter Zijlstra 1837391e43daSPeter Zijlstra /* 1838391e43daSPeter Zijlstra * Something has shifted, try again. 1839391e43daSPeter Zijlstra */ 1840391e43daSPeter Zijlstra put_task_struct(next_task); 1841391e43daSPeter Zijlstra next_task = task; 1842391e43daSPeter Zijlstra goto retry; 1843391e43daSPeter Zijlstra } 1844391e43daSPeter Zijlstra 1845391e43daSPeter Zijlstra deactivate_task(rq, next_task, 0); 1846391e43daSPeter Zijlstra set_task_cpu(next_task, lowest_rq->cpu); 1847391e43daSPeter Zijlstra activate_task(lowest_rq, next_task, 0); 1848391e43daSPeter Zijlstra ret = 1; 1849391e43daSPeter Zijlstra 18508875125eSKirill Tkhai resched_curr(lowest_rq); 1851391e43daSPeter Zijlstra 1852391e43daSPeter Zijlstra double_unlock_balance(rq, lowest_rq); 1853391e43daSPeter Zijlstra 1854391e43daSPeter Zijlstra out: 1855391e43daSPeter Zijlstra put_task_struct(next_task); 1856391e43daSPeter Zijlstra 1857391e43daSPeter Zijlstra return ret; 1858391e43daSPeter Zijlstra } 1859391e43daSPeter Zijlstra 1860391e43daSPeter Zijlstra static void push_rt_tasks(struct rq *rq) 1861391e43daSPeter Zijlstra { 1862391e43daSPeter Zijlstra /* push_rt_task will return true if it moved an RT */ 1863391e43daSPeter Zijlstra while (push_rt_task(rq)) 1864391e43daSPeter Zijlstra ; 1865391e43daSPeter Zijlstra } 1866391e43daSPeter Zijlstra 1867b6366f04SSteven Rostedt #ifdef HAVE_RT_PUSH_IPI 1868b6366f04SSteven Rostedt 18693e777f99SSteven Rostedt (VMware) /* 18703e777f99SSteven Rostedt (VMware) * When a high priority task schedules out from a CPU and a lower priority 18713e777f99SSteven Rostedt (VMware) * task is scheduled in, a check is made to see if there's any RT tasks 18723e777f99SSteven Rostedt (VMware) * on other CPUs that are waiting to run because a higher priority RT task 18733e777f99SSteven Rostedt (VMware) * is currently running on its CPU. In this case, the CPU with multiple RT 18743e777f99SSteven Rostedt (VMware) * tasks queued on it (overloaded) needs to be notified that a CPU has opened 18753e777f99SSteven Rostedt (VMware) * up that may be able to run one of its non-running queued RT tasks. 18763e777f99SSteven Rostedt (VMware) * 18774bdced5cSSteven Rostedt (Red Hat) * All CPUs with overloaded RT tasks need to be notified as there is currently 18784bdced5cSSteven Rostedt (Red Hat) * no way to know which of these CPUs have the highest priority task waiting 18794bdced5cSSteven Rostedt (Red Hat) * to run. Instead of trying to take a spinlock on each of these CPUs, 18804bdced5cSSteven Rostedt (Red Hat) * which has shown to cause large latency when done on machines with many 18814bdced5cSSteven Rostedt (Red Hat) * CPUs, sending an IPI to the CPUs to have them push off the overloaded 18824bdced5cSSteven Rostedt (Red Hat) * RT tasks waiting to run. 18833e777f99SSteven Rostedt (VMware) * 18844bdced5cSSteven Rostedt (Red Hat) * Just sending an IPI to each of the CPUs is also an issue, as on large 18854bdced5cSSteven Rostedt (Red Hat) * count CPU machines, this can cause an IPI storm on a CPU, especially 18864bdced5cSSteven Rostedt (Red Hat) * if its the only CPU with multiple RT tasks queued, and a large number 18874bdced5cSSteven Rostedt (Red Hat) * of CPUs scheduling a lower priority task at the same time. 18883e777f99SSteven Rostedt (VMware) * 18894bdced5cSSteven Rostedt (Red Hat) * Each root domain has its own irq work function that can iterate over 18904bdced5cSSteven Rostedt (Red Hat) * all CPUs with RT overloaded tasks. Since all CPUs with overloaded RT 18914bdced5cSSteven Rostedt (Red Hat) * tassk must be checked if there's one or many CPUs that are lowering 18924bdced5cSSteven Rostedt (Red Hat) * their priority, there's a single irq work iterator that will try to 18934bdced5cSSteven Rostedt (Red Hat) * push off RT tasks that are waiting to run. 18943e777f99SSteven Rostedt (VMware) * 18954bdced5cSSteven Rostedt (Red Hat) * When a CPU schedules a lower priority task, it will kick off the 18964bdced5cSSteven Rostedt (Red Hat) * irq work iterator that will jump to each CPU with overloaded RT tasks. 18974bdced5cSSteven Rostedt (Red Hat) * As it only takes the first CPU that schedules a lower priority task 18984bdced5cSSteven Rostedt (Red Hat) * to start the process, the rto_start variable is incremented and if 18994bdced5cSSteven Rostedt (Red Hat) * the atomic result is one, then that CPU will try to take the rto_lock. 19004bdced5cSSteven Rostedt (Red Hat) * This prevents high contention on the lock as the process handles all 19014bdced5cSSteven Rostedt (Red Hat) * CPUs scheduling lower priority tasks. 19023e777f99SSteven Rostedt (VMware) * 19034bdced5cSSteven Rostedt (Red Hat) * All CPUs that are scheduling a lower priority task will increment the 19044bdced5cSSteven Rostedt (Red Hat) * rt_loop_next variable. This will make sure that the irq work iterator 19054bdced5cSSteven Rostedt (Red Hat) * checks all RT overloaded CPUs whenever a CPU schedules a new lower 19064bdced5cSSteven Rostedt (Red Hat) * priority task, even if the iterator is in the middle of a scan. Incrementing 19074bdced5cSSteven Rostedt (Red Hat) * the rt_loop_next will cause the iterator to perform another scan. 19083e777f99SSteven Rostedt (VMware) * 19093e777f99SSteven Rostedt (VMware) */ 19104bdced5cSSteven Rostedt (Red Hat) static int rto_next_cpu(struct rq *rq) 1911b6366f04SSteven Rostedt { 19124bdced5cSSteven Rostedt (Red Hat) struct root_domain *rd = rq->rd; 19134bdced5cSSteven Rostedt (Red Hat) int next; 1914b6366f04SSteven Rostedt int cpu; 1915b6366f04SSteven Rostedt 1916b6366f04SSteven Rostedt /* 19174bdced5cSSteven Rostedt (Red Hat) * When starting the IPI RT pushing, the rto_cpu is set to -1, 19184bdced5cSSteven Rostedt (Red Hat) * rt_next_cpu() will simply return the first CPU found in 19194bdced5cSSteven Rostedt (Red Hat) * the rto_mask. 19204bdced5cSSteven Rostedt (Red Hat) * 19214bdced5cSSteven Rostedt (Red Hat) * If rto_next_cpu() is called with rto_cpu is a valid cpu, it 19224bdced5cSSteven Rostedt (Red Hat) * will return the next CPU found in the rto_mask. 19234bdced5cSSteven Rostedt (Red Hat) * 19244bdced5cSSteven Rostedt (Red Hat) * If there are no more CPUs left in the rto_mask, then a check is made 19254bdced5cSSteven Rostedt (Red Hat) * against rto_loop and rto_loop_next. rto_loop is only updated with 19264bdced5cSSteven Rostedt (Red Hat) * the rto_lock held, but any CPU may increment the rto_loop_next 19274bdced5cSSteven Rostedt (Red Hat) * without any locking. 1928b6366f04SSteven Rostedt */ 19294bdced5cSSteven Rostedt (Red Hat) for (;;) { 19304bdced5cSSteven Rostedt (Red Hat) 19314bdced5cSSteven Rostedt (Red Hat) /* When rto_cpu is -1 this acts like cpumask_first() */ 19324bdced5cSSteven Rostedt (Red Hat) cpu = cpumask_next(rd->rto_cpu, rd->rto_mask); 19334bdced5cSSteven Rostedt (Red Hat) 19344bdced5cSSteven Rostedt (Red Hat) rd->rto_cpu = cpu; 19354bdced5cSSteven Rostedt (Red Hat) 19364bdced5cSSteven Rostedt (Red Hat) if (cpu < nr_cpu_ids) 19374bdced5cSSteven Rostedt (Red Hat) return cpu; 19384bdced5cSSteven Rostedt (Red Hat) 19394bdced5cSSteven Rostedt (Red Hat) rd->rto_cpu = -1; 19404bdced5cSSteven Rostedt (Red Hat) 19414bdced5cSSteven Rostedt (Red Hat) /* 19424bdced5cSSteven Rostedt (Red Hat) * ACQUIRE ensures we see the @rto_mask changes 19434bdced5cSSteven Rostedt (Red Hat) * made prior to the @next value observed. 19444bdced5cSSteven Rostedt (Red Hat) * 19454bdced5cSSteven Rostedt (Red Hat) * Matches WMB in rt_set_overload(). 19464bdced5cSSteven Rostedt (Red Hat) */ 19474bdced5cSSteven Rostedt (Red Hat) next = atomic_read_acquire(&rd->rto_loop_next); 19484bdced5cSSteven Rostedt (Red Hat) 19494bdced5cSSteven Rostedt (Red Hat) if (rd->rto_loop == next) 19504bdced5cSSteven Rostedt (Red Hat) break; 19514bdced5cSSteven Rostedt (Red Hat) 19524bdced5cSSteven Rostedt (Red Hat) rd->rto_loop = next; 1953b6366f04SSteven Rostedt } 1954b6366f04SSteven Rostedt 19554bdced5cSSteven Rostedt (Red Hat) return -1; 19564bdced5cSSteven Rostedt (Red Hat) } 1957b6366f04SSteven Rostedt 19584bdced5cSSteven Rostedt (Red Hat) static inline bool rto_start_trylock(atomic_t *v) 19594bdced5cSSteven Rostedt (Red Hat) { 19604bdced5cSSteven Rostedt (Red Hat) return !atomic_cmpxchg_acquire(v, 0, 1); 19614bdced5cSSteven Rostedt (Red Hat) } 19624bdced5cSSteven Rostedt (Red Hat) 19634bdced5cSSteven Rostedt (Red Hat) static inline void rto_start_unlock(atomic_t *v) 19644bdced5cSSteven Rostedt (Red Hat) { 19654bdced5cSSteven Rostedt (Red Hat) atomic_set_release(v, 0); 19664bdced5cSSteven Rostedt (Red Hat) } 19674bdced5cSSteven Rostedt (Red Hat) 19684bdced5cSSteven Rostedt (Red Hat) static void tell_cpu_to_push(struct rq *rq) 19694bdced5cSSteven Rostedt (Red Hat) { 19704bdced5cSSteven Rostedt (Red Hat) int cpu = -1; 19714bdced5cSSteven Rostedt (Red Hat) 19724bdced5cSSteven Rostedt (Red Hat) /* Keep the loop going if the IPI is currently active */ 19734bdced5cSSteven Rostedt (Red Hat) atomic_inc(&rq->rd->rto_loop_next); 19744bdced5cSSteven Rostedt (Red Hat) 19754bdced5cSSteven Rostedt (Red Hat) /* Only one CPU can initiate a loop at a time */ 19764bdced5cSSteven Rostedt (Red Hat) if (!rto_start_trylock(&rq->rd->rto_loop_start)) 1977b6366f04SSteven Rostedt return; 1978b6366f04SSteven Rostedt 19794bdced5cSSteven Rostedt (Red Hat) raw_spin_lock(&rq->rd->rto_lock); 1980b6366f04SSteven Rostedt 19814bdced5cSSteven Rostedt (Red Hat) /* 19824bdced5cSSteven Rostedt (Red Hat) * The rto_cpu is updated under the lock, if it has a valid cpu 19834bdced5cSSteven Rostedt (Red Hat) * then the IPI is still running and will continue due to the 19844bdced5cSSteven Rostedt (Red Hat) * update to loop_next, and nothing needs to be done here. 19854bdced5cSSteven Rostedt (Red Hat) * Otherwise it is finishing up and an ipi needs to be sent. 19864bdced5cSSteven Rostedt (Red Hat) */ 19874bdced5cSSteven Rostedt (Red Hat) if (rq->rd->rto_cpu < 0) 19884bdced5cSSteven Rostedt (Red Hat) cpu = rto_next_cpu(rq); 19894bdced5cSSteven Rostedt (Red Hat) 19904bdced5cSSteven Rostedt (Red Hat) raw_spin_unlock(&rq->rd->rto_lock); 19914bdced5cSSteven Rostedt (Red Hat) 19924bdced5cSSteven Rostedt (Red Hat) rto_start_unlock(&rq->rd->rto_loop_start); 19934bdced5cSSteven Rostedt (Red Hat) 19944bdced5cSSteven Rostedt (Red Hat) if (cpu >= 0) 19954bdced5cSSteven Rostedt (Red Hat) irq_work_queue_on(&rq->rd->rto_push_work, cpu); 1996b6366f04SSteven Rostedt } 1997b6366f04SSteven Rostedt 1998b6366f04SSteven Rostedt /* Called from hardirq context */ 19994bdced5cSSteven Rostedt (Red Hat) void rto_push_irq_work_func(struct irq_work *work) 2000b6366f04SSteven Rostedt { 20014bdced5cSSteven Rostedt (Red Hat) struct rq *rq; 2002b6366f04SSteven Rostedt int cpu; 2003b6366f04SSteven Rostedt 20044bdced5cSSteven Rostedt (Red Hat) rq = this_rq(); 2005b6366f04SSteven Rostedt 20064bdced5cSSteven Rostedt (Red Hat) /* 20074bdced5cSSteven Rostedt (Red Hat) * We do not need to grab the lock to check for has_pushable_tasks. 20084bdced5cSSteven Rostedt (Red Hat) * When it gets updated, a check is made if a push is possible. 20094bdced5cSSteven Rostedt (Red Hat) */ 2010b6366f04SSteven Rostedt if (has_pushable_tasks(rq)) { 2011b6366f04SSteven Rostedt raw_spin_lock(&rq->lock); 20124bdced5cSSteven Rostedt (Red Hat) push_rt_tasks(rq); 2013b6366f04SSteven Rostedt raw_spin_unlock(&rq->lock); 2014b6366f04SSteven Rostedt } 2015b6366f04SSteven Rostedt 20164bdced5cSSteven Rostedt (Red Hat) raw_spin_lock(&rq->rd->rto_lock); 20174bdced5cSSteven Rostedt (Red Hat) 2018b6366f04SSteven Rostedt /* Pass the IPI to the next rt overloaded queue */ 20194bdced5cSSteven Rostedt (Red Hat) cpu = rto_next_cpu(rq); 2020b6366f04SSteven Rostedt 20214bdced5cSSteven Rostedt (Red Hat) raw_spin_unlock(&rq->rd->rto_lock); 2022b6366f04SSteven Rostedt 20234bdced5cSSteven Rostedt (Red Hat) if (cpu < 0) 2024b6366f04SSteven Rostedt return; 2025b6366f04SSteven Rostedt 2026b6366f04SSteven Rostedt /* Try the next RT overloaded CPU */ 20274bdced5cSSteven Rostedt (Red Hat) irq_work_queue_on(&rq->rd->rto_push_work, cpu); 2028b6366f04SSteven Rostedt } 2029b6366f04SSteven Rostedt #endif /* HAVE_RT_PUSH_IPI */ 2030b6366f04SSteven Rostedt 20318046d680SPeter Zijlstra static void pull_rt_task(struct rq *this_rq) 2032391e43daSPeter Zijlstra { 20338046d680SPeter Zijlstra int this_cpu = this_rq->cpu, cpu; 20348046d680SPeter Zijlstra bool resched = false; 2035391e43daSPeter Zijlstra struct task_struct *p; 2036391e43daSPeter Zijlstra struct rq *src_rq; 2037391e43daSPeter Zijlstra 2038391e43daSPeter Zijlstra if (likely(!rt_overloaded(this_rq))) 20398046d680SPeter Zijlstra return; 2040391e43daSPeter Zijlstra 20417c3f2ab7SPeter Zijlstra /* 20427c3f2ab7SPeter Zijlstra * Match the barrier from rt_set_overloaded; this guarantees that if we 20437c3f2ab7SPeter Zijlstra * see overloaded we must also see the rto_mask bit. 20447c3f2ab7SPeter Zijlstra */ 20457c3f2ab7SPeter Zijlstra smp_rmb(); 20467c3f2ab7SPeter Zijlstra 2047b6366f04SSteven Rostedt #ifdef HAVE_RT_PUSH_IPI 2048b6366f04SSteven Rostedt if (sched_feat(RT_PUSH_IPI)) { 2049b6366f04SSteven Rostedt tell_cpu_to_push(this_rq); 20508046d680SPeter Zijlstra return; 2051b6366f04SSteven Rostedt } 2052b6366f04SSteven Rostedt #endif 2053b6366f04SSteven Rostedt 2054391e43daSPeter Zijlstra for_each_cpu(cpu, this_rq->rd->rto_mask) { 2055391e43daSPeter Zijlstra if (this_cpu == cpu) 2056391e43daSPeter Zijlstra continue; 2057391e43daSPeter Zijlstra 2058391e43daSPeter Zijlstra src_rq = cpu_rq(cpu); 2059391e43daSPeter Zijlstra 2060391e43daSPeter Zijlstra /* 2061391e43daSPeter Zijlstra * Don't bother taking the src_rq->lock if the next highest 2062391e43daSPeter Zijlstra * task is known to be lower-priority than our current task. 2063391e43daSPeter Zijlstra * This may look racy, but if this value is about to go 2064391e43daSPeter Zijlstra * logically higher, the src_rq will push this task away. 2065391e43daSPeter Zijlstra * And if its going logically lower, we do not care 2066391e43daSPeter Zijlstra */ 2067391e43daSPeter Zijlstra if (src_rq->rt.highest_prio.next >= 2068391e43daSPeter Zijlstra this_rq->rt.highest_prio.curr) 2069391e43daSPeter Zijlstra continue; 2070391e43daSPeter Zijlstra 2071391e43daSPeter Zijlstra /* 2072391e43daSPeter Zijlstra * We can potentially drop this_rq's lock in 2073391e43daSPeter Zijlstra * double_lock_balance, and another CPU could 2074391e43daSPeter Zijlstra * alter this_rq 2075391e43daSPeter Zijlstra */ 2076391e43daSPeter Zijlstra double_lock_balance(this_rq, src_rq); 2077391e43daSPeter Zijlstra 2078391e43daSPeter Zijlstra /* 2079e23ee747SKirill Tkhai * We can pull only a task, which is pushable 2080e23ee747SKirill Tkhai * on its rq, and no others. 2081391e43daSPeter Zijlstra */ 2082e23ee747SKirill Tkhai p = pick_highest_pushable_task(src_rq, this_cpu); 2083391e43daSPeter Zijlstra 2084391e43daSPeter Zijlstra /* 2085391e43daSPeter Zijlstra * Do we have an RT task that preempts 2086391e43daSPeter Zijlstra * the to-be-scheduled task? 2087391e43daSPeter Zijlstra */ 2088391e43daSPeter Zijlstra if (p && (p->prio < this_rq->rt.highest_prio.curr)) { 2089391e43daSPeter Zijlstra WARN_ON(p == src_rq->curr); 2090da0c1e65SKirill Tkhai WARN_ON(!task_on_rq_queued(p)); 2091391e43daSPeter Zijlstra 2092391e43daSPeter Zijlstra /* 2093391e43daSPeter Zijlstra * There's a chance that p is higher in priority 2094391e43daSPeter Zijlstra * than what's currently running on its cpu. 2095391e43daSPeter Zijlstra * This is just that p is wakeing up and hasn't 2096391e43daSPeter Zijlstra * had a chance to schedule. We only pull 2097391e43daSPeter Zijlstra * p if it is lower in priority than the 2098391e43daSPeter Zijlstra * current task on the run queue 2099391e43daSPeter Zijlstra */ 2100391e43daSPeter Zijlstra if (p->prio < src_rq->curr->prio) 2101391e43daSPeter Zijlstra goto skip; 2102391e43daSPeter Zijlstra 21038046d680SPeter Zijlstra resched = true; 2104391e43daSPeter Zijlstra 2105391e43daSPeter Zijlstra deactivate_task(src_rq, p, 0); 2106391e43daSPeter Zijlstra set_task_cpu(p, this_cpu); 2107391e43daSPeter Zijlstra activate_task(this_rq, p, 0); 2108391e43daSPeter Zijlstra /* 2109391e43daSPeter Zijlstra * We continue with the search, just in 2110391e43daSPeter Zijlstra * case there's an even higher prio task 2111391e43daSPeter Zijlstra * in another runqueue. (low likelihood 2112391e43daSPeter Zijlstra * but possible) 2113391e43daSPeter Zijlstra */ 2114391e43daSPeter Zijlstra } 2115391e43daSPeter Zijlstra skip: 2116391e43daSPeter Zijlstra double_unlock_balance(this_rq, src_rq); 2117391e43daSPeter Zijlstra } 2118391e43daSPeter Zijlstra 21198046d680SPeter Zijlstra if (resched) 21208046d680SPeter Zijlstra resched_curr(this_rq); 2121391e43daSPeter Zijlstra } 2122391e43daSPeter Zijlstra 2123391e43daSPeter Zijlstra /* 2124391e43daSPeter Zijlstra * If we are not running and we are not going to reschedule soon, we should 2125391e43daSPeter Zijlstra * try to push tasks away now 2126391e43daSPeter Zijlstra */ 2127391e43daSPeter Zijlstra static void task_woken_rt(struct rq *rq, struct task_struct *p) 2128391e43daSPeter Zijlstra { 2129391e43daSPeter Zijlstra if (!task_running(rq, p) && 2130391e43daSPeter Zijlstra !test_tsk_need_resched(rq->curr) && 21314b53a341SIngo Molnar p->nr_cpus_allowed > 1 && 21321baca4ceSJuri Lelli (dl_task(rq->curr) || rt_task(rq->curr)) && 21334b53a341SIngo Molnar (rq->curr->nr_cpus_allowed < 2 || 2134391e43daSPeter Zijlstra rq->curr->prio <= p->prio)) 2135391e43daSPeter Zijlstra push_rt_tasks(rq); 2136391e43daSPeter Zijlstra } 2137391e43daSPeter Zijlstra 2138391e43daSPeter Zijlstra /* Assumes rq->lock is held */ 2139391e43daSPeter Zijlstra static void rq_online_rt(struct rq *rq) 2140391e43daSPeter Zijlstra { 2141391e43daSPeter Zijlstra if (rq->rt.overloaded) 2142391e43daSPeter Zijlstra rt_set_overload(rq); 2143391e43daSPeter Zijlstra 2144391e43daSPeter Zijlstra __enable_runtime(rq); 2145391e43daSPeter Zijlstra 2146391e43daSPeter Zijlstra cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr); 2147391e43daSPeter Zijlstra } 2148391e43daSPeter Zijlstra 2149391e43daSPeter Zijlstra /* Assumes rq->lock is held */ 2150391e43daSPeter Zijlstra static void rq_offline_rt(struct rq *rq) 2151391e43daSPeter Zijlstra { 2152391e43daSPeter Zijlstra if (rq->rt.overloaded) 2153391e43daSPeter Zijlstra rt_clear_overload(rq); 2154391e43daSPeter Zijlstra 2155391e43daSPeter Zijlstra __disable_runtime(rq); 2156391e43daSPeter Zijlstra 2157391e43daSPeter Zijlstra cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID); 2158391e43daSPeter Zijlstra } 2159391e43daSPeter Zijlstra 2160391e43daSPeter Zijlstra /* 2161391e43daSPeter Zijlstra * When switch from the rt queue, we bring ourselves to a position 2162391e43daSPeter Zijlstra * that we might want to pull RT tasks from other runqueues. 2163391e43daSPeter Zijlstra */ 2164391e43daSPeter Zijlstra static void switched_from_rt(struct rq *rq, struct task_struct *p) 2165391e43daSPeter Zijlstra { 2166391e43daSPeter Zijlstra /* 2167391e43daSPeter Zijlstra * If there are other RT tasks then we will reschedule 2168391e43daSPeter Zijlstra * and the scheduling of the other RT tasks will handle 2169391e43daSPeter Zijlstra * the balancing. But if we are the last RT task 2170391e43daSPeter Zijlstra * we may need to handle the pulling of RT tasks 2171391e43daSPeter Zijlstra * now. 2172391e43daSPeter Zijlstra */ 2173da0c1e65SKirill Tkhai if (!task_on_rq_queued(p) || rq->rt.rt_nr_running) 21741158ddb5SKirill Tkhai return; 21751158ddb5SKirill Tkhai 2176fd7a4bedSPeter Zijlstra queue_pull_task(rq); 2177391e43daSPeter Zijlstra } 2178391e43daSPeter Zijlstra 217911c785b7SLi Zefan void __init init_sched_rt_class(void) 2180391e43daSPeter Zijlstra { 2181391e43daSPeter Zijlstra unsigned int i; 2182391e43daSPeter Zijlstra 2183391e43daSPeter Zijlstra for_each_possible_cpu(i) { 2184391e43daSPeter Zijlstra zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i), 2185391e43daSPeter Zijlstra GFP_KERNEL, cpu_to_node(i)); 2186391e43daSPeter Zijlstra } 2187391e43daSPeter Zijlstra } 2188391e43daSPeter Zijlstra #endif /* CONFIG_SMP */ 2189391e43daSPeter Zijlstra 2190391e43daSPeter Zijlstra /* 2191391e43daSPeter Zijlstra * When switching a task to RT, we may overload the runqueue 2192391e43daSPeter Zijlstra * with RT tasks. In this case we try to push them off to 2193391e43daSPeter Zijlstra * other runqueues. 2194391e43daSPeter Zijlstra */ 2195391e43daSPeter Zijlstra static void switched_to_rt(struct rq *rq, struct task_struct *p) 2196391e43daSPeter Zijlstra { 2197391e43daSPeter Zijlstra /* 2198391e43daSPeter Zijlstra * If we are already running, then there's nothing 2199391e43daSPeter Zijlstra * that needs to be done. But if we are not running 2200391e43daSPeter Zijlstra * we may need to preempt the current running task. 2201391e43daSPeter Zijlstra * If that current running task is also an RT task 2202391e43daSPeter Zijlstra * then see if we can move to another run queue. 2203391e43daSPeter Zijlstra */ 2204da0c1e65SKirill Tkhai if (task_on_rq_queued(p) && rq->curr != p) { 2205391e43daSPeter Zijlstra #ifdef CONFIG_SMP 22064b53a341SIngo Molnar if (p->nr_cpus_allowed > 1 && rq->rt.overloaded) 2207fd7a4bedSPeter Zijlstra queue_push_tasks(rq); 2208619bd4a7SSebastian Andrzej Siewior #endif /* CONFIG_SMP */ 2209*2fe25826SPaul E. McKenney if (p->prio < rq->curr->prio && cpu_online(cpu_of(rq))) 22108875125eSKirill Tkhai resched_curr(rq); 2211391e43daSPeter Zijlstra } 2212391e43daSPeter Zijlstra } 2213391e43daSPeter Zijlstra 2214391e43daSPeter Zijlstra /* 2215391e43daSPeter Zijlstra * Priority of the task has changed. This may cause 2216391e43daSPeter Zijlstra * us to initiate a push or pull. 2217391e43daSPeter Zijlstra */ 2218391e43daSPeter Zijlstra static void 2219391e43daSPeter Zijlstra prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio) 2220391e43daSPeter Zijlstra { 2221da0c1e65SKirill Tkhai if (!task_on_rq_queued(p)) 2222391e43daSPeter Zijlstra return; 2223391e43daSPeter Zijlstra 2224391e43daSPeter Zijlstra if (rq->curr == p) { 2225391e43daSPeter Zijlstra #ifdef CONFIG_SMP 2226391e43daSPeter Zijlstra /* 2227391e43daSPeter Zijlstra * If our priority decreases while running, we 2228391e43daSPeter Zijlstra * may need to pull tasks to this runqueue. 2229391e43daSPeter Zijlstra */ 2230391e43daSPeter Zijlstra if (oldprio < p->prio) 2231fd7a4bedSPeter Zijlstra queue_pull_task(rq); 2232fd7a4bedSPeter Zijlstra 2233391e43daSPeter Zijlstra /* 2234391e43daSPeter Zijlstra * If there's a higher priority task waiting to run 2235fd7a4bedSPeter Zijlstra * then reschedule. 2236391e43daSPeter Zijlstra */ 2237fd7a4bedSPeter Zijlstra if (p->prio > rq->rt.highest_prio.curr) 22388875125eSKirill Tkhai resched_curr(rq); 2239391e43daSPeter Zijlstra #else 2240391e43daSPeter Zijlstra /* For UP simply resched on drop of prio */ 2241391e43daSPeter Zijlstra if (oldprio < p->prio) 22428875125eSKirill Tkhai resched_curr(rq); 2243391e43daSPeter Zijlstra #endif /* CONFIG_SMP */ 2244391e43daSPeter Zijlstra } else { 2245391e43daSPeter Zijlstra /* 2246391e43daSPeter Zijlstra * This task is not running, but if it is 2247391e43daSPeter Zijlstra * greater than the current running task 2248391e43daSPeter Zijlstra * then reschedule. 2249391e43daSPeter Zijlstra */ 2250391e43daSPeter Zijlstra if (p->prio < rq->curr->prio) 22518875125eSKirill Tkhai resched_curr(rq); 2252391e43daSPeter Zijlstra } 2253391e43daSPeter Zijlstra } 2254391e43daSPeter Zijlstra 2255b18b6a9cSNicolas Pitre #ifdef CONFIG_POSIX_TIMERS 2256391e43daSPeter Zijlstra static void watchdog(struct rq *rq, struct task_struct *p) 2257391e43daSPeter Zijlstra { 2258391e43daSPeter Zijlstra unsigned long soft, hard; 2259391e43daSPeter Zijlstra 2260391e43daSPeter Zijlstra /* max may change after cur was read, this will be fixed next tick */ 2261391e43daSPeter Zijlstra soft = task_rlimit(p, RLIMIT_RTTIME); 2262391e43daSPeter Zijlstra hard = task_rlimit_max(p, RLIMIT_RTTIME); 2263391e43daSPeter Zijlstra 2264391e43daSPeter Zijlstra if (soft != RLIM_INFINITY) { 2265391e43daSPeter Zijlstra unsigned long next; 2266391e43daSPeter Zijlstra 226757d2aa00SYing Xue if (p->rt.watchdog_stamp != jiffies) { 2268391e43daSPeter Zijlstra p->rt.timeout++; 226957d2aa00SYing Xue p->rt.watchdog_stamp = jiffies; 227057d2aa00SYing Xue } 227157d2aa00SYing Xue 2272391e43daSPeter Zijlstra next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ); 2273391e43daSPeter Zijlstra if (p->rt.timeout > next) 2274391e43daSPeter Zijlstra p->cputime_expires.sched_exp = p->se.sum_exec_runtime; 2275391e43daSPeter Zijlstra } 2276391e43daSPeter Zijlstra } 2277b18b6a9cSNicolas Pitre #else 2278b18b6a9cSNicolas Pitre static inline void watchdog(struct rq *rq, struct task_struct *p) { } 2279b18b6a9cSNicolas Pitre #endif 2280391e43daSPeter Zijlstra 2281391e43daSPeter Zijlstra static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued) 2282391e43daSPeter Zijlstra { 2283454c7999SColin Cross struct sched_rt_entity *rt_se = &p->rt; 2284454c7999SColin Cross 2285391e43daSPeter Zijlstra update_curr_rt(rq); 2286391e43daSPeter Zijlstra 2287391e43daSPeter Zijlstra watchdog(rq, p); 2288391e43daSPeter Zijlstra 2289391e43daSPeter Zijlstra /* 2290391e43daSPeter Zijlstra * RR tasks need a special form of timeslice management. 2291391e43daSPeter Zijlstra * FIFO tasks have no timeslices. 2292391e43daSPeter Zijlstra */ 2293391e43daSPeter Zijlstra if (p->policy != SCHED_RR) 2294391e43daSPeter Zijlstra return; 2295391e43daSPeter Zijlstra 2296391e43daSPeter Zijlstra if (--p->rt.time_slice) 2297391e43daSPeter Zijlstra return; 2298391e43daSPeter Zijlstra 2299ce0dbbbbSClark Williams p->rt.time_slice = sched_rr_timeslice; 2300391e43daSPeter Zijlstra 2301391e43daSPeter Zijlstra /* 2302e9aa39bbSLi Bin * Requeue to the end of queue if we (and all of our ancestors) are not 2303e9aa39bbSLi Bin * the only element on the queue 2304391e43daSPeter Zijlstra */ 2305454c7999SColin Cross for_each_sched_rt_entity(rt_se) { 2306454c7999SColin Cross if (rt_se->run_list.prev != rt_se->run_list.next) { 2307391e43daSPeter Zijlstra requeue_task_rt(rq, p, 0); 23088aa6f0ebSKirill Tkhai resched_curr(rq); 2309454c7999SColin Cross return; 2310454c7999SColin Cross } 2311391e43daSPeter Zijlstra } 2312391e43daSPeter Zijlstra } 2313391e43daSPeter Zijlstra 2314391e43daSPeter Zijlstra static void set_curr_task_rt(struct rq *rq) 2315391e43daSPeter Zijlstra { 2316391e43daSPeter Zijlstra struct task_struct *p = rq->curr; 2317391e43daSPeter Zijlstra 231878becc27SFrederic Weisbecker p->se.exec_start = rq_clock_task(rq); 2319391e43daSPeter Zijlstra 2320391e43daSPeter Zijlstra /* The running task is never eligible for pushing */ 2321391e43daSPeter Zijlstra dequeue_pushable_task(rq, p); 2322391e43daSPeter Zijlstra } 2323391e43daSPeter Zijlstra 2324391e43daSPeter Zijlstra static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task) 2325391e43daSPeter Zijlstra { 2326391e43daSPeter Zijlstra /* 2327391e43daSPeter Zijlstra * Time slice is 0 for SCHED_FIFO tasks 2328391e43daSPeter Zijlstra */ 2329391e43daSPeter Zijlstra if (task->policy == SCHED_RR) 2330ce0dbbbbSClark Williams return sched_rr_timeslice; 2331391e43daSPeter Zijlstra else 2332391e43daSPeter Zijlstra return 0; 2333391e43daSPeter Zijlstra } 2334391e43daSPeter Zijlstra 2335391e43daSPeter Zijlstra const struct sched_class rt_sched_class = { 2336391e43daSPeter Zijlstra .next = &fair_sched_class, 2337391e43daSPeter Zijlstra .enqueue_task = enqueue_task_rt, 2338391e43daSPeter Zijlstra .dequeue_task = dequeue_task_rt, 2339391e43daSPeter Zijlstra .yield_task = yield_task_rt, 2340391e43daSPeter Zijlstra 2341391e43daSPeter Zijlstra .check_preempt_curr = check_preempt_curr_rt, 2342391e43daSPeter Zijlstra 2343391e43daSPeter Zijlstra .pick_next_task = pick_next_task_rt, 2344391e43daSPeter Zijlstra .put_prev_task = put_prev_task_rt, 2345391e43daSPeter Zijlstra 2346391e43daSPeter Zijlstra #ifdef CONFIG_SMP 2347391e43daSPeter Zijlstra .select_task_rq = select_task_rq_rt, 2348391e43daSPeter Zijlstra 23496c37067eSPeter Zijlstra .set_cpus_allowed = set_cpus_allowed_common, 2350391e43daSPeter Zijlstra .rq_online = rq_online_rt, 2351391e43daSPeter Zijlstra .rq_offline = rq_offline_rt, 2352391e43daSPeter Zijlstra .task_woken = task_woken_rt, 2353391e43daSPeter Zijlstra .switched_from = switched_from_rt, 2354391e43daSPeter Zijlstra #endif 2355391e43daSPeter Zijlstra 2356391e43daSPeter Zijlstra .set_curr_task = set_curr_task_rt, 2357391e43daSPeter Zijlstra .task_tick = task_tick_rt, 2358391e43daSPeter Zijlstra 2359391e43daSPeter Zijlstra .get_rr_interval = get_rr_interval_rt, 2360391e43daSPeter Zijlstra 2361391e43daSPeter Zijlstra .prio_changed = prio_changed_rt, 2362391e43daSPeter Zijlstra .switched_to = switched_to_rt, 23636e998916SStanislaw Gruszka 23646e998916SStanislaw Gruszka .update_curr = update_curr_rt, 2365391e43daSPeter Zijlstra }; 2366391e43daSPeter Zijlstra 23678887cd99SNicolas Pitre #ifdef CONFIG_RT_GROUP_SCHED 23688887cd99SNicolas Pitre /* 23698887cd99SNicolas Pitre * Ensure that the real time constraints are schedulable. 23708887cd99SNicolas Pitre */ 23718887cd99SNicolas Pitre static DEFINE_MUTEX(rt_constraints_mutex); 23728887cd99SNicolas Pitre 23738887cd99SNicolas Pitre /* Must be called with tasklist_lock held */ 23748887cd99SNicolas Pitre static inline int tg_has_rt_tasks(struct task_group *tg) 23758887cd99SNicolas Pitre { 23768887cd99SNicolas Pitre struct task_struct *g, *p; 23778887cd99SNicolas Pitre 23788887cd99SNicolas Pitre /* 23798887cd99SNicolas Pitre * Autogroups do not have RT tasks; see autogroup_create(). 23808887cd99SNicolas Pitre */ 23818887cd99SNicolas Pitre if (task_group_is_autogroup(tg)) 23828887cd99SNicolas Pitre return 0; 23838887cd99SNicolas Pitre 23848887cd99SNicolas Pitre for_each_process_thread(g, p) { 23858887cd99SNicolas Pitre if (rt_task(p) && task_group(p) == tg) 23868887cd99SNicolas Pitre return 1; 23878887cd99SNicolas Pitre } 23888887cd99SNicolas Pitre 23898887cd99SNicolas Pitre return 0; 23908887cd99SNicolas Pitre } 23918887cd99SNicolas Pitre 23928887cd99SNicolas Pitre struct rt_schedulable_data { 23938887cd99SNicolas Pitre struct task_group *tg; 23948887cd99SNicolas Pitre u64 rt_period; 23958887cd99SNicolas Pitre u64 rt_runtime; 23968887cd99SNicolas Pitre }; 23978887cd99SNicolas Pitre 23988887cd99SNicolas Pitre static int tg_rt_schedulable(struct task_group *tg, void *data) 23998887cd99SNicolas Pitre { 24008887cd99SNicolas Pitre struct rt_schedulable_data *d = data; 24018887cd99SNicolas Pitre struct task_group *child; 24028887cd99SNicolas Pitre unsigned long total, sum = 0; 24038887cd99SNicolas Pitre u64 period, runtime; 24048887cd99SNicolas Pitre 24058887cd99SNicolas Pitre period = ktime_to_ns(tg->rt_bandwidth.rt_period); 24068887cd99SNicolas Pitre runtime = tg->rt_bandwidth.rt_runtime; 24078887cd99SNicolas Pitre 24088887cd99SNicolas Pitre if (tg == d->tg) { 24098887cd99SNicolas Pitre period = d->rt_period; 24108887cd99SNicolas Pitre runtime = d->rt_runtime; 24118887cd99SNicolas Pitre } 24128887cd99SNicolas Pitre 24138887cd99SNicolas Pitre /* 24148887cd99SNicolas Pitre * Cannot have more runtime than the period. 24158887cd99SNicolas Pitre */ 24168887cd99SNicolas Pitre if (runtime > period && runtime != RUNTIME_INF) 24178887cd99SNicolas Pitre return -EINVAL; 24188887cd99SNicolas Pitre 24198887cd99SNicolas Pitre /* 24208887cd99SNicolas Pitre * Ensure we don't starve existing RT tasks. 24218887cd99SNicolas Pitre */ 24228887cd99SNicolas Pitre if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg)) 24238887cd99SNicolas Pitre return -EBUSY; 24248887cd99SNicolas Pitre 24258887cd99SNicolas Pitre total = to_ratio(period, runtime); 24268887cd99SNicolas Pitre 24278887cd99SNicolas Pitre /* 24288887cd99SNicolas Pitre * Nobody can have more than the global setting allows. 24298887cd99SNicolas Pitre */ 24308887cd99SNicolas Pitre if (total > to_ratio(global_rt_period(), global_rt_runtime())) 24318887cd99SNicolas Pitre return -EINVAL; 24328887cd99SNicolas Pitre 24338887cd99SNicolas Pitre /* 24348887cd99SNicolas Pitre * The sum of our children's runtime should not exceed our own. 24358887cd99SNicolas Pitre */ 24368887cd99SNicolas Pitre list_for_each_entry_rcu(child, &tg->children, siblings) { 24378887cd99SNicolas Pitre period = ktime_to_ns(child->rt_bandwidth.rt_period); 24388887cd99SNicolas Pitre runtime = child->rt_bandwidth.rt_runtime; 24398887cd99SNicolas Pitre 24408887cd99SNicolas Pitre if (child == d->tg) { 24418887cd99SNicolas Pitre period = d->rt_period; 24428887cd99SNicolas Pitre runtime = d->rt_runtime; 24438887cd99SNicolas Pitre } 24448887cd99SNicolas Pitre 24458887cd99SNicolas Pitre sum += to_ratio(period, runtime); 24468887cd99SNicolas Pitre } 24478887cd99SNicolas Pitre 24488887cd99SNicolas Pitre if (sum > total) 24498887cd99SNicolas Pitre return -EINVAL; 24508887cd99SNicolas Pitre 24518887cd99SNicolas Pitre return 0; 24528887cd99SNicolas Pitre } 24538887cd99SNicolas Pitre 24548887cd99SNicolas Pitre static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) 24558887cd99SNicolas Pitre { 24568887cd99SNicolas Pitre int ret; 24578887cd99SNicolas Pitre 24588887cd99SNicolas Pitre struct rt_schedulable_data data = { 24598887cd99SNicolas Pitre .tg = tg, 24608887cd99SNicolas Pitre .rt_period = period, 24618887cd99SNicolas Pitre .rt_runtime = runtime, 24628887cd99SNicolas Pitre }; 24638887cd99SNicolas Pitre 24648887cd99SNicolas Pitre rcu_read_lock(); 24658887cd99SNicolas Pitre ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data); 24668887cd99SNicolas Pitre rcu_read_unlock(); 24678887cd99SNicolas Pitre 24688887cd99SNicolas Pitre return ret; 24698887cd99SNicolas Pitre } 24708887cd99SNicolas Pitre 24718887cd99SNicolas Pitre static int tg_set_rt_bandwidth(struct task_group *tg, 24728887cd99SNicolas Pitre u64 rt_period, u64 rt_runtime) 24738887cd99SNicolas Pitre { 24748887cd99SNicolas Pitre int i, err = 0; 24758887cd99SNicolas Pitre 24768887cd99SNicolas Pitre /* 24778887cd99SNicolas Pitre * Disallowing the root group RT runtime is BAD, it would disallow the 24788887cd99SNicolas Pitre * kernel creating (and or operating) RT threads. 24798887cd99SNicolas Pitre */ 24808887cd99SNicolas Pitre if (tg == &root_task_group && rt_runtime == 0) 24818887cd99SNicolas Pitre return -EINVAL; 24828887cd99SNicolas Pitre 24838887cd99SNicolas Pitre /* No period doesn't make any sense. */ 24848887cd99SNicolas Pitre if (rt_period == 0) 24858887cd99SNicolas Pitre return -EINVAL; 24868887cd99SNicolas Pitre 24878887cd99SNicolas Pitre mutex_lock(&rt_constraints_mutex); 24888887cd99SNicolas Pitre read_lock(&tasklist_lock); 24898887cd99SNicolas Pitre err = __rt_schedulable(tg, rt_period, rt_runtime); 24908887cd99SNicolas Pitre if (err) 24918887cd99SNicolas Pitre goto unlock; 24928887cd99SNicolas Pitre 24938887cd99SNicolas Pitre raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock); 24948887cd99SNicolas Pitre tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period); 24958887cd99SNicolas Pitre tg->rt_bandwidth.rt_runtime = rt_runtime; 24968887cd99SNicolas Pitre 24978887cd99SNicolas Pitre for_each_possible_cpu(i) { 24988887cd99SNicolas Pitre struct rt_rq *rt_rq = tg->rt_rq[i]; 24998887cd99SNicolas Pitre 25008887cd99SNicolas Pitre raw_spin_lock(&rt_rq->rt_runtime_lock); 25018887cd99SNicolas Pitre rt_rq->rt_runtime = rt_runtime; 25028887cd99SNicolas Pitre raw_spin_unlock(&rt_rq->rt_runtime_lock); 25038887cd99SNicolas Pitre } 25048887cd99SNicolas Pitre raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock); 25058887cd99SNicolas Pitre unlock: 25068887cd99SNicolas Pitre read_unlock(&tasklist_lock); 25078887cd99SNicolas Pitre mutex_unlock(&rt_constraints_mutex); 25088887cd99SNicolas Pitre 25098887cd99SNicolas Pitre return err; 25108887cd99SNicolas Pitre } 25118887cd99SNicolas Pitre 25128887cd99SNicolas Pitre int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us) 25138887cd99SNicolas Pitre { 25148887cd99SNicolas Pitre u64 rt_runtime, rt_period; 25158887cd99SNicolas Pitre 25168887cd99SNicolas Pitre rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period); 25178887cd99SNicolas Pitre rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC; 25188887cd99SNicolas Pitre if (rt_runtime_us < 0) 25198887cd99SNicolas Pitre rt_runtime = RUNTIME_INF; 25208887cd99SNicolas Pitre 25218887cd99SNicolas Pitre return tg_set_rt_bandwidth(tg, rt_period, rt_runtime); 25228887cd99SNicolas Pitre } 25238887cd99SNicolas Pitre 25248887cd99SNicolas Pitre long sched_group_rt_runtime(struct task_group *tg) 25258887cd99SNicolas Pitre { 25268887cd99SNicolas Pitre u64 rt_runtime_us; 25278887cd99SNicolas Pitre 25288887cd99SNicolas Pitre if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF) 25298887cd99SNicolas Pitre return -1; 25308887cd99SNicolas Pitre 25318887cd99SNicolas Pitre rt_runtime_us = tg->rt_bandwidth.rt_runtime; 25328887cd99SNicolas Pitre do_div(rt_runtime_us, NSEC_PER_USEC); 25338887cd99SNicolas Pitre return rt_runtime_us; 25348887cd99SNicolas Pitre } 25358887cd99SNicolas Pitre 25368887cd99SNicolas Pitre int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us) 25378887cd99SNicolas Pitre { 25388887cd99SNicolas Pitre u64 rt_runtime, rt_period; 25398887cd99SNicolas Pitre 25408887cd99SNicolas Pitre rt_period = rt_period_us * NSEC_PER_USEC; 25418887cd99SNicolas Pitre rt_runtime = tg->rt_bandwidth.rt_runtime; 25428887cd99SNicolas Pitre 25438887cd99SNicolas Pitre return tg_set_rt_bandwidth(tg, rt_period, rt_runtime); 25448887cd99SNicolas Pitre } 25458887cd99SNicolas Pitre 25468887cd99SNicolas Pitre long sched_group_rt_period(struct task_group *tg) 25478887cd99SNicolas Pitre { 25488887cd99SNicolas Pitre u64 rt_period_us; 25498887cd99SNicolas Pitre 25508887cd99SNicolas Pitre rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period); 25518887cd99SNicolas Pitre do_div(rt_period_us, NSEC_PER_USEC); 25528887cd99SNicolas Pitre return rt_period_us; 25538887cd99SNicolas Pitre } 25548887cd99SNicolas Pitre 25558887cd99SNicolas Pitre static int sched_rt_global_constraints(void) 25568887cd99SNicolas Pitre { 25578887cd99SNicolas Pitre int ret = 0; 25588887cd99SNicolas Pitre 25598887cd99SNicolas Pitre mutex_lock(&rt_constraints_mutex); 25608887cd99SNicolas Pitre read_lock(&tasklist_lock); 25618887cd99SNicolas Pitre ret = __rt_schedulable(NULL, 0, 0); 25628887cd99SNicolas Pitre read_unlock(&tasklist_lock); 25638887cd99SNicolas Pitre mutex_unlock(&rt_constraints_mutex); 25648887cd99SNicolas Pitre 25658887cd99SNicolas Pitre return ret; 25668887cd99SNicolas Pitre } 25678887cd99SNicolas Pitre 25688887cd99SNicolas Pitre int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk) 25698887cd99SNicolas Pitre { 25708887cd99SNicolas Pitre /* Don't accept realtime tasks when there is no way for them to run */ 25718887cd99SNicolas Pitre if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0) 25728887cd99SNicolas Pitre return 0; 25738887cd99SNicolas Pitre 25748887cd99SNicolas Pitre return 1; 25758887cd99SNicolas Pitre } 25768887cd99SNicolas Pitre 25778887cd99SNicolas Pitre #else /* !CONFIG_RT_GROUP_SCHED */ 25788887cd99SNicolas Pitre static int sched_rt_global_constraints(void) 25798887cd99SNicolas Pitre { 25808887cd99SNicolas Pitre unsigned long flags; 25818887cd99SNicolas Pitre int i; 25828887cd99SNicolas Pitre 25838887cd99SNicolas Pitre raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags); 25848887cd99SNicolas Pitre for_each_possible_cpu(i) { 25858887cd99SNicolas Pitre struct rt_rq *rt_rq = &cpu_rq(i)->rt; 25868887cd99SNicolas Pitre 25878887cd99SNicolas Pitre raw_spin_lock(&rt_rq->rt_runtime_lock); 25888887cd99SNicolas Pitre rt_rq->rt_runtime = global_rt_runtime(); 25898887cd99SNicolas Pitre raw_spin_unlock(&rt_rq->rt_runtime_lock); 25908887cd99SNicolas Pitre } 25918887cd99SNicolas Pitre raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags); 25928887cd99SNicolas Pitre 25938887cd99SNicolas Pitre return 0; 25948887cd99SNicolas Pitre } 25958887cd99SNicolas Pitre #endif /* CONFIG_RT_GROUP_SCHED */ 25968887cd99SNicolas Pitre 25978887cd99SNicolas Pitre static int sched_rt_global_validate(void) 25988887cd99SNicolas Pitre { 25998887cd99SNicolas Pitre if (sysctl_sched_rt_period <= 0) 26008887cd99SNicolas Pitre return -EINVAL; 26018887cd99SNicolas Pitre 26028887cd99SNicolas Pitre if ((sysctl_sched_rt_runtime != RUNTIME_INF) && 26038887cd99SNicolas Pitre (sysctl_sched_rt_runtime > sysctl_sched_rt_period)) 26048887cd99SNicolas Pitre return -EINVAL; 26058887cd99SNicolas Pitre 26068887cd99SNicolas Pitre return 0; 26078887cd99SNicolas Pitre } 26088887cd99SNicolas Pitre 26098887cd99SNicolas Pitre static void sched_rt_do_global(void) 26108887cd99SNicolas Pitre { 26118887cd99SNicolas Pitre def_rt_bandwidth.rt_runtime = global_rt_runtime(); 26128887cd99SNicolas Pitre def_rt_bandwidth.rt_period = ns_to_ktime(global_rt_period()); 26138887cd99SNicolas Pitre } 26148887cd99SNicolas Pitre 26158887cd99SNicolas Pitre int sched_rt_handler(struct ctl_table *table, int write, 26168887cd99SNicolas Pitre void __user *buffer, size_t *lenp, 26178887cd99SNicolas Pitre loff_t *ppos) 26188887cd99SNicolas Pitre { 26198887cd99SNicolas Pitre int old_period, old_runtime; 26208887cd99SNicolas Pitre static DEFINE_MUTEX(mutex); 26218887cd99SNicolas Pitre int ret; 26228887cd99SNicolas Pitre 26238887cd99SNicolas Pitre mutex_lock(&mutex); 26248887cd99SNicolas Pitre old_period = sysctl_sched_rt_period; 26258887cd99SNicolas Pitre old_runtime = sysctl_sched_rt_runtime; 26268887cd99SNicolas Pitre 26278887cd99SNicolas Pitre ret = proc_dointvec(table, write, buffer, lenp, ppos); 26288887cd99SNicolas Pitre 26298887cd99SNicolas Pitre if (!ret && write) { 26308887cd99SNicolas Pitre ret = sched_rt_global_validate(); 26318887cd99SNicolas Pitre if (ret) 26328887cd99SNicolas Pitre goto undo; 26338887cd99SNicolas Pitre 26348887cd99SNicolas Pitre ret = sched_dl_global_validate(); 26358887cd99SNicolas Pitre if (ret) 26368887cd99SNicolas Pitre goto undo; 26378887cd99SNicolas Pitre 26388887cd99SNicolas Pitre ret = sched_rt_global_constraints(); 26398887cd99SNicolas Pitre if (ret) 26408887cd99SNicolas Pitre goto undo; 26418887cd99SNicolas Pitre 26428887cd99SNicolas Pitre sched_rt_do_global(); 26438887cd99SNicolas Pitre sched_dl_do_global(); 26448887cd99SNicolas Pitre } 26458887cd99SNicolas Pitre if (0) { 26468887cd99SNicolas Pitre undo: 26478887cd99SNicolas Pitre sysctl_sched_rt_period = old_period; 26488887cd99SNicolas Pitre sysctl_sched_rt_runtime = old_runtime; 26498887cd99SNicolas Pitre } 26508887cd99SNicolas Pitre mutex_unlock(&mutex); 26518887cd99SNicolas Pitre 26528887cd99SNicolas Pitre return ret; 26538887cd99SNicolas Pitre } 26548887cd99SNicolas Pitre 26558887cd99SNicolas Pitre int sched_rr_handler(struct ctl_table *table, int write, 26568887cd99SNicolas Pitre void __user *buffer, size_t *lenp, 26578887cd99SNicolas Pitre loff_t *ppos) 26588887cd99SNicolas Pitre { 26598887cd99SNicolas Pitre int ret; 26608887cd99SNicolas Pitre static DEFINE_MUTEX(mutex); 26618887cd99SNicolas Pitre 26628887cd99SNicolas Pitre mutex_lock(&mutex); 26638887cd99SNicolas Pitre ret = proc_dointvec(table, write, buffer, lenp, ppos); 26648887cd99SNicolas Pitre /* 26658887cd99SNicolas Pitre * Make sure that internally we keep jiffies. 26668887cd99SNicolas Pitre * Also, writing zero resets the timeslice to default: 26678887cd99SNicolas Pitre */ 26688887cd99SNicolas Pitre if (!ret && write) { 26698887cd99SNicolas Pitre sched_rr_timeslice = 26708887cd99SNicolas Pitre sysctl_sched_rr_timeslice <= 0 ? RR_TIMESLICE : 26718887cd99SNicolas Pitre msecs_to_jiffies(sysctl_sched_rr_timeslice); 26728887cd99SNicolas Pitre } 26738887cd99SNicolas Pitre mutex_unlock(&mutex); 26748887cd99SNicolas Pitre return ret; 26758887cd99SNicolas Pitre } 26768887cd99SNicolas Pitre 2677391e43daSPeter Zijlstra #ifdef CONFIG_SCHED_DEBUG 2678391e43daSPeter Zijlstra extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq); 2679391e43daSPeter Zijlstra 2680391e43daSPeter Zijlstra void print_rt_stats(struct seq_file *m, int cpu) 2681391e43daSPeter Zijlstra { 2682391e43daSPeter Zijlstra rt_rq_iter_t iter; 2683391e43daSPeter Zijlstra struct rt_rq *rt_rq; 2684391e43daSPeter Zijlstra 2685391e43daSPeter Zijlstra rcu_read_lock(); 2686391e43daSPeter Zijlstra for_each_rt_rq(rt_rq, iter, cpu_rq(cpu)) 2687391e43daSPeter Zijlstra print_rt_rq(m, cpu, rt_rq); 2688391e43daSPeter Zijlstra rcu_read_unlock(); 2689391e43daSPeter Zijlstra } 2690391e43daSPeter Zijlstra #endif /* CONFIG_SCHED_DEBUG */ 2691