1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 2391e43daSPeter Zijlstra /* 3391e43daSPeter Zijlstra * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR 4391e43daSPeter Zijlstra * policies) 5391e43daSPeter Zijlstra */ 6371bf427SVincent Guittot 7ce0dbbbbSClark Williams int sched_rr_timeslice = RR_TIMESLICE; 8975e155eSShile Zhang int sysctl_sched_rr_timeslice = (MSEC_PER_SEC / HZ) * RR_TIMESLICE; 9d505b8afSHuaixin Chang /* More than 4 hours if BW_SHIFT equals 20. */ 10d505b8afSHuaixin Chang static const u64 max_rt_runtime = MAX_BW; 11ce0dbbbbSClark Williams 12391e43daSPeter Zijlstra static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun); 13391e43daSPeter Zijlstra 14391e43daSPeter Zijlstra struct rt_bandwidth def_rt_bandwidth; 15391e43daSPeter Zijlstra 16391e43daSPeter Zijlstra static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer) 17391e43daSPeter Zijlstra { 18391e43daSPeter Zijlstra struct rt_bandwidth *rt_b = 19391e43daSPeter Zijlstra container_of(timer, struct rt_bandwidth, rt_period_timer); 20391e43daSPeter Zijlstra int idle = 0; 2177a4d1a1SPeter Zijlstra int overrun; 22391e43daSPeter Zijlstra 2377a4d1a1SPeter Zijlstra raw_spin_lock(&rt_b->rt_runtime_lock); 24391e43daSPeter Zijlstra for (;;) { 2577a4d1a1SPeter Zijlstra overrun = hrtimer_forward_now(timer, rt_b->rt_period); 26391e43daSPeter Zijlstra if (!overrun) 27391e43daSPeter Zijlstra break; 28391e43daSPeter Zijlstra 2977a4d1a1SPeter Zijlstra raw_spin_unlock(&rt_b->rt_runtime_lock); 30391e43daSPeter Zijlstra idle = do_sched_rt_period_timer(rt_b, overrun); 3177a4d1a1SPeter Zijlstra raw_spin_lock(&rt_b->rt_runtime_lock); 32391e43daSPeter Zijlstra } 334cfafd30SPeter Zijlstra if (idle) 344cfafd30SPeter Zijlstra rt_b->rt_period_active = 0; 3577a4d1a1SPeter Zijlstra raw_spin_unlock(&rt_b->rt_runtime_lock); 36391e43daSPeter Zijlstra 37391e43daSPeter Zijlstra return idle ? HRTIMER_NORESTART : HRTIMER_RESTART; 38391e43daSPeter Zijlstra } 39391e43daSPeter Zijlstra 40391e43daSPeter Zijlstra void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime) 41391e43daSPeter Zijlstra { 42391e43daSPeter Zijlstra rt_b->rt_period = ns_to_ktime(period); 43391e43daSPeter Zijlstra rt_b->rt_runtime = runtime; 44391e43daSPeter Zijlstra 45391e43daSPeter Zijlstra raw_spin_lock_init(&rt_b->rt_runtime_lock); 46391e43daSPeter Zijlstra 47d5096aa6SSebastian Andrzej Siewior hrtimer_init(&rt_b->rt_period_timer, CLOCK_MONOTONIC, 48d5096aa6SSebastian Andrzej Siewior HRTIMER_MODE_REL_HARD); 49391e43daSPeter Zijlstra rt_b->rt_period_timer.function = sched_rt_period_timer; 50391e43daSPeter Zijlstra } 51391e43daSPeter Zijlstra 529b58e976SLi Hua static inline void do_start_rt_bandwidth(struct rt_bandwidth *rt_b) 53391e43daSPeter Zijlstra { 54391e43daSPeter Zijlstra raw_spin_lock(&rt_b->rt_runtime_lock); 554cfafd30SPeter Zijlstra if (!rt_b->rt_period_active) { 564cfafd30SPeter Zijlstra rt_b->rt_period_active = 1; 57c3a990dcSSteven Rostedt /* 58c3a990dcSSteven Rostedt * SCHED_DEADLINE updates the bandwidth, as a run away 59c3a990dcSSteven Rostedt * RT task with a DL task could hog a CPU. But DL does 60c3a990dcSSteven Rostedt * not reset the period. If a deadline task was running 61c3a990dcSSteven Rostedt * without an RT task running, it can cause RT tasks to 62c3a990dcSSteven Rostedt * throttle when they start up. Kick the timer right away 63c3a990dcSSteven Rostedt * to update the period. 64c3a990dcSSteven Rostedt */ 65c3a990dcSSteven Rostedt hrtimer_forward_now(&rt_b->rt_period_timer, ns_to_ktime(0)); 66d5096aa6SSebastian Andrzej Siewior hrtimer_start_expires(&rt_b->rt_period_timer, 67d5096aa6SSebastian Andrzej Siewior HRTIMER_MODE_ABS_PINNED_HARD); 684cfafd30SPeter Zijlstra } 69391e43daSPeter Zijlstra raw_spin_unlock(&rt_b->rt_runtime_lock); 70391e43daSPeter Zijlstra } 71391e43daSPeter Zijlstra 729b58e976SLi Hua static void start_rt_bandwidth(struct rt_bandwidth *rt_b) 739b58e976SLi Hua { 749b58e976SLi Hua if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF) 759b58e976SLi Hua return; 769b58e976SLi Hua 779b58e976SLi Hua do_start_rt_bandwidth(rt_b); 789b58e976SLi Hua } 799b58e976SLi Hua 8007c54f7aSAbel Vesa void init_rt_rq(struct rt_rq *rt_rq) 81391e43daSPeter Zijlstra { 82391e43daSPeter Zijlstra struct rt_prio_array *array; 83391e43daSPeter Zijlstra int i; 84391e43daSPeter Zijlstra 85391e43daSPeter Zijlstra array = &rt_rq->active; 86391e43daSPeter Zijlstra for (i = 0; i < MAX_RT_PRIO; i++) { 87391e43daSPeter Zijlstra INIT_LIST_HEAD(array->queue + i); 88391e43daSPeter Zijlstra __clear_bit(i, array->bitmap); 89391e43daSPeter Zijlstra } 90391e43daSPeter Zijlstra /* delimiter for bitsearch: */ 91391e43daSPeter Zijlstra __set_bit(MAX_RT_PRIO, array->bitmap); 92391e43daSPeter Zijlstra 93391e43daSPeter Zijlstra #if defined CONFIG_SMP 94934fc331SPeter Zijlstra rt_rq->highest_prio.curr = MAX_RT_PRIO-1; 95934fc331SPeter Zijlstra rt_rq->highest_prio.next = MAX_RT_PRIO-1; 96391e43daSPeter Zijlstra rt_rq->rt_nr_migratory = 0; 97391e43daSPeter Zijlstra rt_rq->overloaded = 0; 98391e43daSPeter Zijlstra plist_head_init(&rt_rq->pushable_tasks); 99b6366f04SSteven Rostedt #endif /* CONFIG_SMP */ 100f4ebcbc0SKirill Tkhai /* We start is dequeued state, because no RT tasks are queued */ 101f4ebcbc0SKirill Tkhai rt_rq->rt_queued = 0; 102391e43daSPeter Zijlstra 103391e43daSPeter Zijlstra rt_rq->rt_time = 0; 104391e43daSPeter Zijlstra rt_rq->rt_throttled = 0; 105391e43daSPeter Zijlstra rt_rq->rt_runtime = 0; 106391e43daSPeter Zijlstra raw_spin_lock_init(&rt_rq->rt_runtime_lock); 107391e43daSPeter Zijlstra } 108391e43daSPeter Zijlstra 109391e43daSPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED 110391e43daSPeter Zijlstra static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b) 111391e43daSPeter Zijlstra { 112391e43daSPeter Zijlstra hrtimer_cancel(&rt_b->rt_period_timer); 113391e43daSPeter Zijlstra } 114391e43daSPeter Zijlstra 115391e43daSPeter Zijlstra #define rt_entity_is_task(rt_se) (!(rt_se)->my_q) 116391e43daSPeter Zijlstra 117391e43daSPeter Zijlstra static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se) 118391e43daSPeter Zijlstra { 119391e43daSPeter Zijlstra #ifdef CONFIG_SCHED_DEBUG 120391e43daSPeter Zijlstra WARN_ON_ONCE(!rt_entity_is_task(rt_se)); 121391e43daSPeter Zijlstra #endif 122391e43daSPeter Zijlstra return container_of(rt_se, struct task_struct, rt); 123391e43daSPeter Zijlstra } 124391e43daSPeter Zijlstra 125391e43daSPeter Zijlstra static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) 126391e43daSPeter Zijlstra { 127391e43daSPeter Zijlstra return rt_rq->rq; 128391e43daSPeter Zijlstra } 129391e43daSPeter Zijlstra 130391e43daSPeter Zijlstra static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se) 131391e43daSPeter Zijlstra { 132391e43daSPeter Zijlstra return rt_se->rt_rq; 133391e43daSPeter Zijlstra } 134391e43daSPeter Zijlstra 135653d07a6SKirill Tkhai static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se) 136653d07a6SKirill Tkhai { 137653d07a6SKirill Tkhai struct rt_rq *rt_rq = rt_se->rt_rq; 138653d07a6SKirill Tkhai 139653d07a6SKirill Tkhai return rt_rq->rq; 140653d07a6SKirill Tkhai } 141653d07a6SKirill Tkhai 142b027789eSMathias Krause void unregister_rt_sched_group(struct task_group *tg) 143b027789eSMathias Krause { 144b027789eSMathias Krause if (tg->rt_se) 145b027789eSMathias Krause destroy_rt_bandwidth(&tg->rt_bandwidth); 146b027789eSMathias Krause 147b027789eSMathias Krause } 148b027789eSMathias Krause 149391e43daSPeter Zijlstra void free_rt_sched_group(struct task_group *tg) 150391e43daSPeter Zijlstra { 151391e43daSPeter Zijlstra int i; 152391e43daSPeter Zijlstra 153391e43daSPeter Zijlstra for_each_possible_cpu(i) { 154391e43daSPeter Zijlstra if (tg->rt_rq) 155391e43daSPeter Zijlstra kfree(tg->rt_rq[i]); 156391e43daSPeter Zijlstra if (tg->rt_se) 157391e43daSPeter Zijlstra kfree(tg->rt_se[i]); 158391e43daSPeter Zijlstra } 159391e43daSPeter Zijlstra 160391e43daSPeter Zijlstra kfree(tg->rt_rq); 161391e43daSPeter Zijlstra kfree(tg->rt_se); 162391e43daSPeter Zijlstra } 163391e43daSPeter Zijlstra 164391e43daSPeter Zijlstra void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, 165391e43daSPeter Zijlstra struct sched_rt_entity *rt_se, int cpu, 166391e43daSPeter Zijlstra struct sched_rt_entity *parent) 167391e43daSPeter Zijlstra { 168391e43daSPeter Zijlstra struct rq *rq = cpu_rq(cpu); 169391e43daSPeter Zijlstra 170934fc331SPeter Zijlstra rt_rq->highest_prio.curr = MAX_RT_PRIO-1; 171391e43daSPeter Zijlstra rt_rq->rt_nr_boosted = 0; 172391e43daSPeter Zijlstra rt_rq->rq = rq; 173391e43daSPeter Zijlstra rt_rq->tg = tg; 174391e43daSPeter Zijlstra 175391e43daSPeter Zijlstra tg->rt_rq[cpu] = rt_rq; 176391e43daSPeter Zijlstra tg->rt_se[cpu] = rt_se; 177391e43daSPeter Zijlstra 178391e43daSPeter Zijlstra if (!rt_se) 179391e43daSPeter Zijlstra return; 180391e43daSPeter Zijlstra 181391e43daSPeter Zijlstra if (!parent) 182391e43daSPeter Zijlstra rt_se->rt_rq = &rq->rt; 183391e43daSPeter Zijlstra else 184391e43daSPeter Zijlstra rt_se->rt_rq = parent->my_q; 185391e43daSPeter Zijlstra 186391e43daSPeter Zijlstra rt_se->my_q = rt_rq; 187391e43daSPeter Zijlstra rt_se->parent = parent; 188391e43daSPeter Zijlstra INIT_LIST_HEAD(&rt_se->run_list); 189391e43daSPeter Zijlstra } 190391e43daSPeter Zijlstra 191391e43daSPeter Zijlstra int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) 192391e43daSPeter Zijlstra { 193391e43daSPeter Zijlstra struct rt_rq *rt_rq; 194391e43daSPeter Zijlstra struct sched_rt_entity *rt_se; 195391e43daSPeter Zijlstra int i; 196391e43daSPeter Zijlstra 1976396bb22SKees Cook tg->rt_rq = kcalloc(nr_cpu_ids, sizeof(rt_rq), GFP_KERNEL); 198391e43daSPeter Zijlstra if (!tg->rt_rq) 199391e43daSPeter Zijlstra goto err; 2006396bb22SKees Cook tg->rt_se = kcalloc(nr_cpu_ids, sizeof(rt_se), GFP_KERNEL); 201391e43daSPeter Zijlstra if (!tg->rt_se) 202391e43daSPeter Zijlstra goto err; 203391e43daSPeter Zijlstra 204391e43daSPeter Zijlstra init_rt_bandwidth(&tg->rt_bandwidth, 205391e43daSPeter Zijlstra ktime_to_ns(def_rt_bandwidth.rt_period), 0); 206391e43daSPeter Zijlstra 207391e43daSPeter Zijlstra for_each_possible_cpu(i) { 208391e43daSPeter Zijlstra rt_rq = kzalloc_node(sizeof(struct rt_rq), 209391e43daSPeter Zijlstra GFP_KERNEL, cpu_to_node(i)); 210391e43daSPeter Zijlstra if (!rt_rq) 211391e43daSPeter Zijlstra goto err; 212391e43daSPeter Zijlstra 213391e43daSPeter Zijlstra rt_se = kzalloc_node(sizeof(struct sched_rt_entity), 214391e43daSPeter Zijlstra GFP_KERNEL, cpu_to_node(i)); 215391e43daSPeter Zijlstra if (!rt_se) 216391e43daSPeter Zijlstra goto err_free_rq; 217391e43daSPeter Zijlstra 21807c54f7aSAbel Vesa init_rt_rq(rt_rq); 219391e43daSPeter Zijlstra rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime; 220391e43daSPeter Zijlstra init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]); 221391e43daSPeter Zijlstra } 222391e43daSPeter Zijlstra 223391e43daSPeter Zijlstra return 1; 224391e43daSPeter Zijlstra 225391e43daSPeter Zijlstra err_free_rq: 226391e43daSPeter Zijlstra kfree(rt_rq); 227391e43daSPeter Zijlstra err: 228391e43daSPeter Zijlstra return 0; 229391e43daSPeter Zijlstra } 230391e43daSPeter Zijlstra 231391e43daSPeter Zijlstra #else /* CONFIG_RT_GROUP_SCHED */ 232391e43daSPeter Zijlstra 233391e43daSPeter Zijlstra #define rt_entity_is_task(rt_se) (1) 234391e43daSPeter Zijlstra 235391e43daSPeter Zijlstra static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se) 236391e43daSPeter Zijlstra { 237391e43daSPeter Zijlstra return container_of(rt_se, struct task_struct, rt); 238391e43daSPeter Zijlstra } 239391e43daSPeter Zijlstra 240391e43daSPeter Zijlstra static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) 241391e43daSPeter Zijlstra { 242391e43daSPeter Zijlstra return container_of(rt_rq, struct rq, rt); 243391e43daSPeter Zijlstra } 244391e43daSPeter Zijlstra 245653d07a6SKirill Tkhai static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se) 246391e43daSPeter Zijlstra { 247391e43daSPeter Zijlstra struct task_struct *p = rt_task_of(rt_se); 248653d07a6SKirill Tkhai 249653d07a6SKirill Tkhai return task_rq(p); 250653d07a6SKirill Tkhai } 251653d07a6SKirill Tkhai 252653d07a6SKirill Tkhai static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se) 253653d07a6SKirill Tkhai { 254653d07a6SKirill Tkhai struct rq *rq = rq_of_rt_se(rt_se); 255391e43daSPeter Zijlstra 256391e43daSPeter Zijlstra return &rq->rt; 257391e43daSPeter Zijlstra } 258391e43daSPeter Zijlstra 259b027789eSMathias Krause void unregister_rt_sched_group(struct task_group *tg) { } 260b027789eSMathias Krause 261391e43daSPeter Zijlstra void free_rt_sched_group(struct task_group *tg) { } 262391e43daSPeter Zijlstra 263391e43daSPeter Zijlstra int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) 264391e43daSPeter Zijlstra { 265391e43daSPeter Zijlstra return 1; 266391e43daSPeter Zijlstra } 267391e43daSPeter Zijlstra #endif /* CONFIG_RT_GROUP_SCHED */ 268391e43daSPeter Zijlstra 269391e43daSPeter Zijlstra #ifdef CONFIG_SMP 270391e43daSPeter Zijlstra 271dc877341SPeter Zijlstra static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev) 272dc877341SPeter Zijlstra { 273dc877341SPeter Zijlstra /* Try to pull RT tasks here if we lower this rq's prio */ 274120455c5SPeter Zijlstra return rq->online && rq->rt.highest_prio.curr > prev->prio; 275dc877341SPeter Zijlstra } 276dc877341SPeter Zijlstra 277391e43daSPeter Zijlstra static inline int rt_overloaded(struct rq *rq) 278391e43daSPeter Zijlstra { 279391e43daSPeter Zijlstra return atomic_read(&rq->rd->rto_count); 280391e43daSPeter Zijlstra } 281391e43daSPeter Zijlstra 282391e43daSPeter Zijlstra static inline void rt_set_overload(struct rq *rq) 283391e43daSPeter Zijlstra { 284391e43daSPeter Zijlstra if (!rq->online) 285391e43daSPeter Zijlstra return; 286391e43daSPeter Zijlstra 287391e43daSPeter Zijlstra cpumask_set_cpu(rq->cpu, rq->rd->rto_mask); 288391e43daSPeter Zijlstra /* 289391e43daSPeter Zijlstra * Make sure the mask is visible before we set 290391e43daSPeter Zijlstra * the overload count. That is checked to determine 291391e43daSPeter Zijlstra * if we should look at the mask. It would be a shame 292391e43daSPeter Zijlstra * if we looked at the mask, but the mask was not 293391e43daSPeter Zijlstra * updated yet. 2947c3f2ab7SPeter Zijlstra * 2957c3f2ab7SPeter Zijlstra * Matched by the barrier in pull_rt_task(). 296391e43daSPeter Zijlstra */ 2977c3f2ab7SPeter Zijlstra smp_wmb(); 298391e43daSPeter Zijlstra atomic_inc(&rq->rd->rto_count); 299391e43daSPeter Zijlstra } 300391e43daSPeter Zijlstra 301391e43daSPeter Zijlstra static inline void rt_clear_overload(struct rq *rq) 302391e43daSPeter Zijlstra { 303391e43daSPeter Zijlstra if (!rq->online) 304391e43daSPeter Zijlstra return; 305391e43daSPeter Zijlstra 306391e43daSPeter Zijlstra /* the order here really doesn't matter */ 307391e43daSPeter Zijlstra atomic_dec(&rq->rd->rto_count); 308391e43daSPeter Zijlstra cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask); 309391e43daSPeter Zijlstra } 310391e43daSPeter Zijlstra 311391e43daSPeter Zijlstra static void update_rt_migration(struct rt_rq *rt_rq) 312391e43daSPeter Zijlstra { 313391e43daSPeter Zijlstra if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) { 314391e43daSPeter Zijlstra if (!rt_rq->overloaded) { 315391e43daSPeter Zijlstra rt_set_overload(rq_of_rt_rq(rt_rq)); 316391e43daSPeter Zijlstra rt_rq->overloaded = 1; 317391e43daSPeter Zijlstra } 318391e43daSPeter Zijlstra } else if (rt_rq->overloaded) { 319391e43daSPeter Zijlstra rt_clear_overload(rq_of_rt_rq(rt_rq)); 320391e43daSPeter Zijlstra rt_rq->overloaded = 0; 321391e43daSPeter Zijlstra } 322391e43daSPeter Zijlstra } 323391e43daSPeter Zijlstra 324391e43daSPeter Zijlstra static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 325391e43daSPeter Zijlstra { 32629baa747SPeter Zijlstra struct task_struct *p; 32729baa747SPeter Zijlstra 328391e43daSPeter Zijlstra if (!rt_entity_is_task(rt_se)) 329391e43daSPeter Zijlstra return; 330391e43daSPeter Zijlstra 33129baa747SPeter Zijlstra p = rt_task_of(rt_se); 332391e43daSPeter Zijlstra rt_rq = &rq_of_rt_rq(rt_rq)->rt; 333391e43daSPeter Zijlstra 334391e43daSPeter Zijlstra rt_rq->rt_nr_total++; 3354b53a341SIngo Molnar if (p->nr_cpus_allowed > 1) 336391e43daSPeter Zijlstra rt_rq->rt_nr_migratory++; 337391e43daSPeter Zijlstra 338391e43daSPeter Zijlstra update_rt_migration(rt_rq); 339391e43daSPeter Zijlstra } 340391e43daSPeter Zijlstra 341391e43daSPeter Zijlstra static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 342391e43daSPeter Zijlstra { 34329baa747SPeter Zijlstra struct task_struct *p; 34429baa747SPeter Zijlstra 345391e43daSPeter Zijlstra if (!rt_entity_is_task(rt_se)) 346391e43daSPeter Zijlstra return; 347391e43daSPeter Zijlstra 34829baa747SPeter Zijlstra p = rt_task_of(rt_se); 349391e43daSPeter Zijlstra rt_rq = &rq_of_rt_rq(rt_rq)->rt; 350391e43daSPeter Zijlstra 351391e43daSPeter Zijlstra rt_rq->rt_nr_total--; 3524b53a341SIngo Molnar if (p->nr_cpus_allowed > 1) 353391e43daSPeter Zijlstra rt_rq->rt_nr_migratory--; 354391e43daSPeter Zijlstra 355391e43daSPeter Zijlstra update_rt_migration(rt_rq); 356391e43daSPeter Zijlstra } 357391e43daSPeter Zijlstra 358391e43daSPeter Zijlstra static inline int has_pushable_tasks(struct rq *rq) 359391e43daSPeter Zijlstra { 360391e43daSPeter Zijlstra return !plist_head_empty(&rq->rt.pushable_tasks); 361391e43daSPeter Zijlstra } 362391e43daSPeter Zijlstra 363fd7a4bedSPeter Zijlstra static DEFINE_PER_CPU(struct callback_head, rt_push_head); 364fd7a4bedSPeter Zijlstra static DEFINE_PER_CPU(struct callback_head, rt_pull_head); 365e3fca9e7SPeter Zijlstra 366e3fca9e7SPeter Zijlstra static void push_rt_tasks(struct rq *); 367fd7a4bedSPeter Zijlstra static void pull_rt_task(struct rq *); 368e3fca9e7SPeter Zijlstra 36902d8ec94SIngo Molnar static inline void rt_queue_push_tasks(struct rq *rq) 370dc877341SPeter Zijlstra { 371e3fca9e7SPeter Zijlstra if (!has_pushable_tasks(rq)) 372e3fca9e7SPeter Zijlstra return; 373e3fca9e7SPeter Zijlstra 374fd7a4bedSPeter Zijlstra queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks); 375fd7a4bedSPeter Zijlstra } 376fd7a4bedSPeter Zijlstra 37702d8ec94SIngo Molnar static inline void rt_queue_pull_task(struct rq *rq) 378fd7a4bedSPeter Zijlstra { 379fd7a4bedSPeter Zijlstra queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task); 380dc877341SPeter Zijlstra } 381dc877341SPeter Zijlstra 382391e43daSPeter Zijlstra static void enqueue_pushable_task(struct rq *rq, struct task_struct *p) 383391e43daSPeter Zijlstra { 384391e43daSPeter Zijlstra plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); 385391e43daSPeter Zijlstra plist_node_init(&p->pushable_tasks, p->prio); 386391e43daSPeter Zijlstra plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks); 387391e43daSPeter Zijlstra 388391e43daSPeter Zijlstra /* Update the highest prio pushable task */ 389391e43daSPeter Zijlstra if (p->prio < rq->rt.highest_prio.next) 390391e43daSPeter Zijlstra rq->rt.highest_prio.next = p->prio; 391391e43daSPeter Zijlstra } 392391e43daSPeter Zijlstra 393391e43daSPeter Zijlstra static void dequeue_pushable_task(struct rq *rq, struct task_struct *p) 394391e43daSPeter Zijlstra { 395391e43daSPeter Zijlstra plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); 396391e43daSPeter Zijlstra 397391e43daSPeter Zijlstra /* Update the new highest prio pushable task */ 398391e43daSPeter Zijlstra if (has_pushable_tasks(rq)) { 399391e43daSPeter Zijlstra p = plist_first_entry(&rq->rt.pushable_tasks, 400391e43daSPeter Zijlstra struct task_struct, pushable_tasks); 401391e43daSPeter Zijlstra rq->rt.highest_prio.next = p->prio; 402934fc331SPeter Zijlstra } else { 403934fc331SPeter Zijlstra rq->rt.highest_prio.next = MAX_RT_PRIO-1; 404934fc331SPeter Zijlstra } 405391e43daSPeter Zijlstra } 406391e43daSPeter Zijlstra 407391e43daSPeter Zijlstra #else 408391e43daSPeter Zijlstra 409391e43daSPeter Zijlstra static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p) 410391e43daSPeter Zijlstra { 411391e43daSPeter Zijlstra } 412391e43daSPeter Zijlstra 413391e43daSPeter Zijlstra static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p) 414391e43daSPeter Zijlstra { 415391e43daSPeter Zijlstra } 416391e43daSPeter Zijlstra 417391e43daSPeter Zijlstra static inline 418391e43daSPeter Zijlstra void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 419391e43daSPeter Zijlstra { 420391e43daSPeter Zijlstra } 421391e43daSPeter Zijlstra 422391e43daSPeter Zijlstra static inline 423391e43daSPeter Zijlstra void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 424391e43daSPeter Zijlstra { 425391e43daSPeter Zijlstra } 426391e43daSPeter Zijlstra 42702d8ec94SIngo Molnar static inline void rt_queue_push_tasks(struct rq *rq) 428dc877341SPeter Zijlstra { 429dc877341SPeter Zijlstra } 430391e43daSPeter Zijlstra #endif /* CONFIG_SMP */ 431391e43daSPeter Zijlstra 432f4ebcbc0SKirill Tkhai static void enqueue_top_rt_rq(struct rt_rq *rt_rq); 433f4ebcbc0SKirill Tkhai static void dequeue_top_rt_rq(struct rt_rq *rt_rq); 434f4ebcbc0SKirill Tkhai 435391e43daSPeter Zijlstra static inline int on_rt_rq(struct sched_rt_entity *rt_se) 436391e43daSPeter Zijlstra { 437ff77e468SPeter Zijlstra return rt_se->on_rq; 438391e43daSPeter Zijlstra } 439391e43daSPeter Zijlstra 440804d402fSQais Yousef #ifdef CONFIG_UCLAMP_TASK 441804d402fSQais Yousef /* 442804d402fSQais Yousef * Verify the fitness of task @p to run on @cpu taking into account the uclamp 443804d402fSQais Yousef * settings. 444804d402fSQais Yousef * 445804d402fSQais Yousef * This check is only important for heterogeneous systems where uclamp_min value 446804d402fSQais Yousef * is higher than the capacity of a @cpu. For non-heterogeneous system this 447804d402fSQais Yousef * function will always return true. 448804d402fSQais Yousef * 449804d402fSQais Yousef * The function will return true if the capacity of the @cpu is >= the 450804d402fSQais Yousef * uclamp_min and false otherwise. 451804d402fSQais Yousef * 452804d402fSQais Yousef * Note that uclamp_min will be clamped to uclamp_max if uclamp_min 453804d402fSQais Yousef * > uclamp_max. 454804d402fSQais Yousef */ 455804d402fSQais Yousef static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu) 456804d402fSQais Yousef { 457804d402fSQais Yousef unsigned int min_cap; 458804d402fSQais Yousef unsigned int max_cap; 459804d402fSQais Yousef unsigned int cpu_cap; 460804d402fSQais Yousef 461804d402fSQais Yousef /* Only heterogeneous systems can benefit from this check */ 462804d402fSQais Yousef if (!static_branch_unlikely(&sched_asym_cpucapacity)) 463804d402fSQais Yousef return true; 464804d402fSQais Yousef 465804d402fSQais Yousef min_cap = uclamp_eff_value(p, UCLAMP_MIN); 466804d402fSQais Yousef max_cap = uclamp_eff_value(p, UCLAMP_MAX); 467804d402fSQais Yousef 468804d402fSQais Yousef cpu_cap = capacity_orig_of(cpu); 469804d402fSQais Yousef 470804d402fSQais Yousef return cpu_cap >= min(min_cap, max_cap); 471804d402fSQais Yousef } 472804d402fSQais Yousef #else 473804d402fSQais Yousef static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu) 474804d402fSQais Yousef { 475804d402fSQais Yousef return true; 476804d402fSQais Yousef } 477804d402fSQais Yousef #endif 478804d402fSQais Yousef 479391e43daSPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED 480391e43daSPeter Zijlstra 481391e43daSPeter Zijlstra static inline u64 sched_rt_runtime(struct rt_rq *rt_rq) 482391e43daSPeter Zijlstra { 483391e43daSPeter Zijlstra if (!rt_rq->tg) 484391e43daSPeter Zijlstra return RUNTIME_INF; 485391e43daSPeter Zijlstra 486391e43daSPeter Zijlstra return rt_rq->rt_runtime; 487391e43daSPeter Zijlstra } 488391e43daSPeter Zijlstra 489391e43daSPeter Zijlstra static inline u64 sched_rt_period(struct rt_rq *rt_rq) 490391e43daSPeter Zijlstra { 491391e43daSPeter Zijlstra return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period); 492391e43daSPeter Zijlstra } 493391e43daSPeter Zijlstra 494391e43daSPeter Zijlstra typedef struct task_group *rt_rq_iter_t; 495391e43daSPeter Zijlstra 496391e43daSPeter Zijlstra static inline struct task_group *next_task_group(struct task_group *tg) 497391e43daSPeter Zijlstra { 498391e43daSPeter Zijlstra do { 499391e43daSPeter Zijlstra tg = list_entry_rcu(tg->list.next, 500391e43daSPeter Zijlstra typeof(struct task_group), list); 501391e43daSPeter Zijlstra } while (&tg->list != &task_groups && task_group_is_autogroup(tg)); 502391e43daSPeter Zijlstra 503391e43daSPeter Zijlstra if (&tg->list == &task_groups) 504391e43daSPeter Zijlstra tg = NULL; 505391e43daSPeter Zijlstra 506391e43daSPeter Zijlstra return tg; 507391e43daSPeter Zijlstra } 508391e43daSPeter Zijlstra 509391e43daSPeter Zijlstra #define for_each_rt_rq(rt_rq, iter, rq) \ 510391e43daSPeter Zijlstra for (iter = container_of(&task_groups, typeof(*iter), list); \ 511391e43daSPeter Zijlstra (iter = next_task_group(iter)) && \ 512391e43daSPeter Zijlstra (rt_rq = iter->rt_rq[cpu_of(rq)]);) 513391e43daSPeter Zijlstra 514391e43daSPeter Zijlstra #define for_each_sched_rt_entity(rt_se) \ 515391e43daSPeter Zijlstra for (; rt_se; rt_se = rt_se->parent) 516391e43daSPeter Zijlstra 517391e43daSPeter Zijlstra static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se) 518391e43daSPeter Zijlstra { 519391e43daSPeter Zijlstra return rt_se->my_q; 520391e43daSPeter Zijlstra } 521391e43daSPeter Zijlstra 522ff77e468SPeter Zijlstra static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags); 523ff77e468SPeter Zijlstra static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags); 524391e43daSPeter Zijlstra 525391e43daSPeter Zijlstra static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) 526391e43daSPeter Zijlstra { 527391e43daSPeter Zijlstra struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr; 5288875125eSKirill Tkhai struct rq *rq = rq_of_rt_rq(rt_rq); 529391e43daSPeter Zijlstra struct sched_rt_entity *rt_se; 530391e43daSPeter Zijlstra 5318875125eSKirill Tkhai int cpu = cpu_of(rq); 532391e43daSPeter Zijlstra 533391e43daSPeter Zijlstra rt_se = rt_rq->tg->rt_se[cpu]; 534391e43daSPeter Zijlstra 535391e43daSPeter Zijlstra if (rt_rq->rt_nr_running) { 536f4ebcbc0SKirill Tkhai if (!rt_se) 537f4ebcbc0SKirill Tkhai enqueue_top_rt_rq(rt_rq); 538f4ebcbc0SKirill Tkhai else if (!on_rt_rq(rt_se)) 539ff77e468SPeter Zijlstra enqueue_rt_entity(rt_se, 0); 540f4ebcbc0SKirill Tkhai 541391e43daSPeter Zijlstra if (rt_rq->highest_prio.curr < curr->prio) 5428875125eSKirill Tkhai resched_curr(rq); 543391e43daSPeter Zijlstra } 544391e43daSPeter Zijlstra } 545391e43daSPeter Zijlstra 546391e43daSPeter Zijlstra static void sched_rt_rq_dequeue(struct rt_rq *rt_rq) 547391e43daSPeter Zijlstra { 548391e43daSPeter Zijlstra struct sched_rt_entity *rt_se; 549391e43daSPeter Zijlstra int cpu = cpu_of(rq_of_rt_rq(rt_rq)); 550391e43daSPeter Zijlstra 551391e43daSPeter Zijlstra rt_se = rt_rq->tg->rt_se[cpu]; 552391e43daSPeter Zijlstra 553296b2ffeSVincent Guittot if (!rt_se) { 554f4ebcbc0SKirill Tkhai dequeue_top_rt_rq(rt_rq); 555296b2ffeSVincent Guittot /* Kick cpufreq (see the comment in kernel/sched/sched.h). */ 556296b2ffeSVincent Guittot cpufreq_update_util(rq_of_rt_rq(rt_rq), 0); 557296b2ffeSVincent Guittot } 558f4ebcbc0SKirill Tkhai else if (on_rt_rq(rt_se)) 559ff77e468SPeter Zijlstra dequeue_rt_entity(rt_se, 0); 560391e43daSPeter Zijlstra } 561391e43daSPeter Zijlstra 56246383648SKirill Tkhai static inline int rt_rq_throttled(struct rt_rq *rt_rq) 56346383648SKirill Tkhai { 56446383648SKirill Tkhai return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted; 56546383648SKirill Tkhai } 56646383648SKirill Tkhai 567391e43daSPeter Zijlstra static int rt_se_boosted(struct sched_rt_entity *rt_se) 568391e43daSPeter Zijlstra { 569391e43daSPeter Zijlstra struct rt_rq *rt_rq = group_rt_rq(rt_se); 570391e43daSPeter Zijlstra struct task_struct *p; 571391e43daSPeter Zijlstra 572391e43daSPeter Zijlstra if (rt_rq) 573391e43daSPeter Zijlstra return !!rt_rq->rt_nr_boosted; 574391e43daSPeter Zijlstra 575391e43daSPeter Zijlstra p = rt_task_of(rt_se); 576391e43daSPeter Zijlstra return p->prio != p->normal_prio; 577391e43daSPeter Zijlstra } 578391e43daSPeter Zijlstra 579391e43daSPeter Zijlstra #ifdef CONFIG_SMP 580391e43daSPeter Zijlstra static inline const struct cpumask *sched_rt_period_mask(void) 581391e43daSPeter Zijlstra { 582424c93feSNathan Zimmer return this_rq()->rd->span; 583391e43daSPeter Zijlstra } 584391e43daSPeter Zijlstra #else 585391e43daSPeter Zijlstra static inline const struct cpumask *sched_rt_period_mask(void) 586391e43daSPeter Zijlstra { 587391e43daSPeter Zijlstra return cpu_online_mask; 588391e43daSPeter Zijlstra } 589391e43daSPeter Zijlstra #endif 590391e43daSPeter Zijlstra 591391e43daSPeter Zijlstra static inline 592391e43daSPeter Zijlstra struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu) 593391e43daSPeter Zijlstra { 594391e43daSPeter Zijlstra return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu]; 595391e43daSPeter Zijlstra } 596391e43daSPeter Zijlstra 597391e43daSPeter Zijlstra static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq) 598391e43daSPeter Zijlstra { 599391e43daSPeter Zijlstra return &rt_rq->tg->rt_bandwidth; 600391e43daSPeter Zijlstra } 601391e43daSPeter Zijlstra 602391e43daSPeter Zijlstra #else /* !CONFIG_RT_GROUP_SCHED */ 603391e43daSPeter Zijlstra 604391e43daSPeter Zijlstra static inline u64 sched_rt_runtime(struct rt_rq *rt_rq) 605391e43daSPeter Zijlstra { 606391e43daSPeter Zijlstra return rt_rq->rt_runtime; 607391e43daSPeter Zijlstra } 608391e43daSPeter Zijlstra 609391e43daSPeter Zijlstra static inline u64 sched_rt_period(struct rt_rq *rt_rq) 610391e43daSPeter Zijlstra { 611391e43daSPeter Zijlstra return ktime_to_ns(def_rt_bandwidth.rt_period); 612391e43daSPeter Zijlstra } 613391e43daSPeter Zijlstra 614391e43daSPeter Zijlstra typedef struct rt_rq *rt_rq_iter_t; 615391e43daSPeter Zijlstra 616391e43daSPeter Zijlstra #define for_each_rt_rq(rt_rq, iter, rq) \ 617391e43daSPeter Zijlstra for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL) 618391e43daSPeter Zijlstra 619391e43daSPeter Zijlstra #define for_each_sched_rt_entity(rt_se) \ 620391e43daSPeter Zijlstra for (; rt_se; rt_se = NULL) 621391e43daSPeter Zijlstra 622391e43daSPeter Zijlstra static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se) 623391e43daSPeter Zijlstra { 624391e43daSPeter Zijlstra return NULL; 625391e43daSPeter Zijlstra } 626391e43daSPeter Zijlstra 627391e43daSPeter Zijlstra static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq) 628391e43daSPeter Zijlstra { 629f4ebcbc0SKirill Tkhai struct rq *rq = rq_of_rt_rq(rt_rq); 630f4ebcbc0SKirill Tkhai 631f4ebcbc0SKirill Tkhai if (!rt_rq->rt_nr_running) 632f4ebcbc0SKirill Tkhai return; 633f4ebcbc0SKirill Tkhai 634f4ebcbc0SKirill Tkhai enqueue_top_rt_rq(rt_rq); 6358875125eSKirill Tkhai resched_curr(rq); 636391e43daSPeter Zijlstra } 637391e43daSPeter Zijlstra 638391e43daSPeter Zijlstra static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq) 639391e43daSPeter Zijlstra { 640f4ebcbc0SKirill Tkhai dequeue_top_rt_rq(rt_rq); 641391e43daSPeter Zijlstra } 642391e43daSPeter Zijlstra 64346383648SKirill Tkhai static inline int rt_rq_throttled(struct rt_rq *rt_rq) 64446383648SKirill Tkhai { 64546383648SKirill Tkhai return rt_rq->rt_throttled; 64646383648SKirill Tkhai } 64746383648SKirill Tkhai 648391e43daSPeter Zijlstra static inline const struct cpumask *sched_rt_period_mask(void) 649391e43daSPeter Zijlstra { 650391e43daSPeter Zijlstra return cpu_online_mask; 651391e43daSPeter Zijlstra } 652391e43daSPeter Zijlstra 653391e43daSPeter Zijlstra static inline 654391e43daSPeter Zijlstra struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu) 655391e43daSPeter Zijlstra { 656391e43daSPeter Zijlstra return &cpu_rq(cpu)->rt; 657391e43daSPeter Zijlstra } 658391e43daSPeter Zijlstra 659391e43daSPeter Zijlstra static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq) 660391e43daSPeter Zijlstra { 661391e43daSPeter Zijlstra return &def_rt_bandwidth; 662391e43daSPeter Zijlstra } 663391e43daSPeter Zijlstra 664391e43daSPeter Zijlstra #endif /* CONFIG_RT_GROUP_SCHED */ 665391e43daSPeter Zijlstra 666faa59937SJuri Lelli bool sched_rt_bandwidth_account(struct rt_rq *rt_rq) 667faa59937SJuri Lelli { 668faa59937SJuri Lelli struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); 669faa59937SJuri Lelli 670faa59937SJuri Lelli return (hrtimer_active(&rt_b->rt_period_timer) || 671faa59937SJuri Lelli rt_rq->rt_time < rt_b->rt_runtime); 672faa59937SJuri Lelli } 673faa59937SJuri Lelli 674391e43daSPeter Zijlstra #ifdef CONFIG_SMP 675391e43daSPeter Zijlstra /* 676391e43daSPeter Zijlstra * We ran out of runtime, see if we can borrow some from our neighbours. 677391e43daSPeter Zijlstra */ 678269b26a5SJuri Lelli static void do_balance_runtime(struct rt_rq *rt_rq) 679391e43daSPeter Zijlstra { 680391e43daSPeter Zijlstra struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); 681aa7f6730SShawn Bohrer struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd; 682269b26a5SJuri Lelli int i, weight; 683391e43daSPeter Zijlstra u64 rt_period; 684391e43daSPeter Zijlstra 685391e43daSPeter Zijlstra weight = cpumask_weight(rd->span); 686391e43daSPeter Zijlstra 687391e43daSPeter Zijlstra raw_spin_lock(&rt_b->rt_runtime_lock); 688391e43daSPeter Zijlstra rt_period = ktime_to_ns(rt_b->rt_period); 689391e43daSPeter Zijlstra for_each_cpu(i, rd->span) { 690391e43daSPeter Zijlstra struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); 691391e43daSPeter Zijlstra s64 diff; 692391e43daSPeter Zijlstra 693391e43daSPeter Zijlstra if (iter == rt_rq) 694391e43daSPeter Zijlstra continue; 695391e43daSPeter Zijlstra 696391e43daSPeter Zijlstra raw_spin_lock(&iter->rt_runtime_lock); 697391e43daSPeter Zijlstra /* 698391e43daSPeter Zijlstra * Either all rqs have inf runtime and there's nothing to steal 699391e43daSPeter Zijlstra * or __disable_runtime() below sets a specific rq to inf to 7003b03706fSIngo Molnar * indicate its been disabled and disallow stealing. 701391e43daSPeter Zijlstra */ 702391e43daSPeter Zijlstra if (iter->rt_runtime == RUNTIME_INF) 703391e43daSPeter Zijlstra goto next; 704391e43daSPeter Zijlstra 705391e43daSPeter Zijlstra /* 706391e43daSPeter Zijlstra * From runqueues with spare time, take 1/n part of their 707391e43daSPeter Zijlstra * spare time, but no more than our period. 708391e43daSPeter Zijlstra */ 709391e43daSPeter Zijlstra diff = iter->rt_runtime - iter->rt_time; 710391e43daSPeter Zijlstra if (diff > 0) { 711391e43daSPeter Zijlstra diff = div_u64((u64)diff, weight); 712391e43daSPeter Zijlstra if (rt_rq->rt_runtime + diff > rt_period) 713391e43daSPeter Zijlstra diff = rt_period - rt_rq->rt_runtime; 714391e43daSPeter Zijlstra iter->rt_runtime -= diff; 715391e43daSPeter Zijlstra rt_rq->rt_runtime += diff; 716391e43daSPeter Zijlstra if (rt_rq->rt_runtime == rt_period) { 717391e43daSPeter Zijlstra raw_spin_unlock(&iter->rt_runtime_lock); 718391e43daSPeter Zijlstra break; 719391e43daSPeter Zijlstra } 720391e43daSPeter Zijlstra } 721391e43daSPeter Zijlstra next: 722391e43daSPeter Zijlstra raw_spin_unlock(&iter->rt_runtime_lock); 723391e43daSPeter Zijlstra } 724391e43daSPeter Zijlstra raw_spin_unlock(&rt_b->rt_runtime_lock); 725391e43daSPeter Zijlstra } 726391e43daSPeter Zijlstra 727391e43daSPeter Zijlstra /* 728391e43daSPeter Zijlstra * Ensure this RQ takes back all the runtime it lend to its neighbours. 729391e43daSPeter Zijlstra */ 730391e43daSPeter Zijlstra static void __disable_runtime(struct rq *rq) 731391e43daSPeter Zijlstra { 732391e43daSPeter Zijlstra struct root_domain *rd = rq->rd; 733391e43daSPeter Zijlstra rt_rq_iter_t iter; 734391e43daSPeter Zijlstra struct rt_rq *rt_rq; 735391e43daSPeter Zijlstra 736391e43daSPeter Zijlstra if (unlikely(!scheduler_running)) 737391e43daSPeter Zijlstra return; 738391e43daSPeter Zijlstra 739391e43daSPeter Zijlstra for_each_rt_rq(rt_rq, iter, rq) { 740391e43daSPeter Zijlstra struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); 741391e43daSPeter Zijlstra s64 want; 742391e43daSPeter Zijlstra int i; 743391e43daSPeter Zijlstra 744391e43daSPeter Zijlstra raw_spin_lock(&rt_b->rt_runtime_lock); 745391e43daSPeter Zijlstra raw_spin_lock(&rt_rq->rt_runtime_lock); 746391e43daSPeter Zijlstra /* 747391e43daSPeter Zijlstra * Either we're all inf and nobody needs to borrow, or we're 748391e43daSPeter Zijlstra * already disabled and thus have nothing to do, or we have 749391e43daSPeter Zijlstra * exactly the right amount of runtime to take out. 750391e43daSPeter Zijlstra */ 751391e43daSPeter Zijlstra if (rt_rq->rt_runtime == RUNTIME_INF || 752391e43daSPeter Zijlstra rt_rq->rt_runtime == rt_b->rt_runtime) 753391e43daSPeter Zijlstra goto balanced; 754391e43daSPeter Zijlstra raw_spin_unlock(&rt_rq->rt_runtime_lock); 755391e43daSPeter Zijlstra 756391e43daSPeter Zijlstra /* 757391e43daSPeter Zijlstra * Calculate the difference between what we started out with 758391e43daSPeter Zijlstra * and what we current have, that's the amount of runtime 759391e43daSPeter Zijlstra * we lend and now have to reclaim. 760391e43daSPeter Zijlstra */ 761391e43daSPeter Zijlstra want = rt_b->rt_runtime - rt_rq->rt_runtime; 762391e43daSPeter Zijlstra 763391e43daSPeter Zijlstra /* 764391e43daSPeter Zijlstra * Greedy reclaim, take back as much as we can. 765391e43daSPeter Zijlstra */ 766391e43daSPeter Zijlstra for_each_cpu(i, rd->span) { 767391e43daSPeter Zijlstra struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); 768391e43daSPeter Zijlstra s64 diff; 769391e43daSPeter Zijlstra 770391e43daSPeter Zijlstra /* 771391e43daSPeter Zijlstra * Can't reclaim from ourselves or disabled runqueues. 772391e43daSPeter Zijlstra */ 773391e43daSPeter Zijlstra if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF) 774391e43daSPeter Zijlstra continue; 775391e43daSPeter Zijlstra 776391e43daSPeter Zijlstra raw_spin_lock(&iter->rt_runtime_lock); 777391e43daSPeter Zijlstra if (want > 0) { 778391e43daSPeter Zijlstra diff = min_t(s64, iter->rt_runtime, want); 779391e43daSPeter Zijlstra iter->rt_runtime -= diff; 780391e43daSPeter Zijlstra want -= diff; 781391e43daSPeter Zijlstra } else { 782391e43daSPeter Zijlstra iter->rt_runtime -= want; 783391e43daSPeter Zijlstra want -= want; 784391e43daSPeter Zijlstra } 785391e43daSPeter Zijlstra raw_spin_unlock(&iter->rt_runtime_lock); 786391e43daSPeter Zijlstra 787391e43daSPeter Zijlstra if (!want) 788391e43daSPeter Zijlstra break; 789391e43daSPeter Zijlstra } 790391e43daSPeter Zijlstra 791391e43daSPeter Zijlstra raw_spin_lock(&rt_rq->rt_runtime_lock); 792391e43daSPeter Zijlstra /* 793391e43daSPeter Zijlstra * We cannot be left wanting - that would mean some runtime 794391e43daSPeter Zijlstra * leaked out of the system. 795391e43daSPeter Zijlstra */ 796391e43daSPeter Zijlstra BUG_ON(want); 797391e43daSPeter Zijlstra balanced: 798391e43daSPeter Zijlstra /* 799391e43daSPeter Zijlstra * Disable all the borrow logic by pretending we have inf 800391e43daSPeter Zijlstra * runtime - in which case borrowing doesn't make sense. 801391e43daSPeter Zijlstra */ 802391e43daSPeter Zijlstra rt_rq->rt_runtime = RUNTIME_INF; 803a4c96ae3SPeter Boonstoppel rt_rq->rt_throttled = 0; 804391e43daSPeter Zijlstra raw_spin_unlock(&rt_rq->rt_runtime_lock); 805391e43daSPeter Zijlstra raw_spin_unlock(&rt_b->rt_runtime_lock); 80699b62567SKirill Tkhai 80799b62567SKirill Tkhai /* Make rt_rq available for pick_next_task() */ 80899b62567SKirill Tkhai sched_rt_rq_enqueue(rt_rq); 809391e43daSPeter Zijlstra } 810391e43daSPeter Zijlstra } 811391e43daSPeter Zijlstra 812391e43daSPeter Zijlstra static void __enable_runtime(struct rq *rq) 813391e43daSPeter Zijlstra { 814391e43daSPeter Zijlstra rt_rq_iter_t iter; 815391e43daSPeter Zijlstra struct rt_rq *rt_rq; 816391e43daSPeter Zijlstra 817391e43daSPeter Zijlstra if (unlikely(!scheduler_running)) 818391e43daSPeter Zijlstra return; 819391e43daSPeter Zijlstra 820391e43daSPeter Zijlstra /* 821391e43daSPeter Zijlstra * Reset each runqueue's bandwidth settings 822391e43daSPeter Zijlstra */ 823391e43daSPeter Zijlstra for_each_rt_rq(rt_rq, iter, rq) { 824391e43daSPeter Zijlstra struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); 825391e43daSPeter Zijlstra 826391e43daSPeter Zijlstra raw_spin_lock(&rt_b->rt_runtime_lock); 827391e43daSPeter Zijlstra raw_spin_lock(&rt_rq->rt_runtime_lock); 828391e43daSPeter Zijlstra rt_rq->rt_runtime = rt_b->rt_runtime; 829391e43daSPeter Zijlstra rt_rq->rt_time = 0; 830391e43daSPeter Zijlstra rt_rq->rt_throttled = 0; 831391e43daSPeter Zijlstra raw_spin_unlock(&rt_rq->rt_runtime_lock); 832391e43daSPeter Zijlstra raw_spin_unlock(&rt_b->rt_runtime_lock); 833391e43daSPeter Zijlstra } 834391e43daSPeter Zijlstra } 835391e43daSPeter Zijlstra 836269b26a5SJuri Lelli static void balance_runtime(struct rt_rq *rt_rq) 837391e43daSPeter Zijlstra { 838391e43daSPeter Zijlstra if (!sched_feat(RT_RUNTIME_SHARE)) 839269b26a5SJuri Lelli return; 840391e43daSPeter Zijlstra 841391e43daSPeter Zijlstra if (rt_rq->rt_time > rt_rq->rt_runtime) { 842391e43daSPeter Zijlstra raw_spin_unlock(&rt_rq->rt_runtime_lock); 843269b26a5SJuri Lelli do_balance_runtime(rt_rq); 844391e43daSPeter Zijlstra raw_spin_lock(&rt_rq->rt_runtime_lock); 845391e43daSPeter Zijlstra } 846391e43daSPeter Zijlstra } 847391e43daSPeter Zijlstra #else /* !CONFIG_SMP */ 848269b26a5SJuri Lelli static inline void balance_runtime(struct rt_rq *rt_rq) {} 849391e43daSPeter Zijlstra #endif /* CONFIG_SMP */ 850391e43daSPeter Zijlstra 851391e43daSPeter Zijlstra static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) 852391e43daSPeter Zijlstra { 85342c62a58SPeter Zijlstra int i, idle = 1, throttled = 0; 854391e43daSPeter Zijlstra const struct cpumask *span; 855391e43daSPeter Zijlstra 856391e43daSPeter Zijlstra span = sched_rt_period_mask(); 857e221d028SMike Galbraith #ifdef CONFIG_RT_GROUP_SCHED 858e221d028SMike Galbraith /* 859e221d028SMike Galbraith * FIXME: isolated CPUs should really leave the root task group, 860e221d028SMike Galbraith * whether they are isolcpus or were isolated via cpusets, lest 861e221d028SMike Galbraith * the timer run on a CPU which does not service all runqueues, 862e221d028SMike Galbraith * potentially leaving other CPUs indefinitely throttled. If 863e221d028SMike Galbraith * isolation is really required, the user will turn the throttle 864e221d028SMike Galbraith * off to kill the perturbations it causes anyway. Meanwhile, 865e221d028SMike Galbraith * this maintains functionality for boot and/or troubleshooting. 866e221d028SMike Galbraith */ 867e221d028SMike Galbraith if (rt_b == &root_task_group.rt_bandwidth) 868e221d028SMike Galbraith span = cpu_online_mask; 869e221d028SMike Galbraith #endif 870391e43daSPeter Zijlstra for_each_cpu(i, span) { 871391e43daSPeter Zijlstra int enqueue = 0; 872391e43daSPeter Zijlstra struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i); 873391e43daSPeter Zijlstra struct rq *rq = rq_of_rt_rq(rt_rq); 874*2679a837SHao Jia struct rq_flags rf; 875c249f255SDave Kleikamp int skip; 876c249f255SDave Kleikamp 877c249f255SDave Kleikamp /* 878c249f255SDave Kleikamp * When span == cpu_online_mask, taking each rq->lock 879c249f255SDave Kleikamp * can be time-consuming. Try to avoid it when possible. 880c249f255SDave Kleikamp */ 881c249f255SDave Kleikamp raw_spin_lock(&rt_rq->rt_runtime_lock); 882f3d133eeSHailong Liu if (!sched_feat(RT_RUNTIME_SHARE) && rt_rq->rt_runtime != RUNTIME_INF) 883f3d133eeSHailong Liu rt_rq->rt_runtime = rt_b->rt_runtime; 884c249f255SDave Kleikamp skip = !rt_rq->rt_time && !rt_rq->rt_nr_running; 885c249f255SDave Kleikamp raw_spin_unlock(&rt_rq->rt_runtime_lock); 886c249f255SDave Kleikamp if (skip) 887c249f255SDave Kleikamp continue; 888391e43daSPeter Zijlstra 889*2679a837SHao Jia rq_lock(rq, &rf); 890d29a2064SDavidlohr Bueso update_rq_clock(rq); 891d29a2064SDavidlohr Bueso 892391e43daSPeter Zijlstra if (rt_rq->rt_time) { 893391e43daSPeter Zijlstra u64 runtime; 894391e43daSPeter Zijlstra 895391e43daSPeter Zijlstra raw_spin_lock(&rt_rq->rt_runtime_lock); 896391e43daSPeter Zijlstra if (rt_rq->rt_throttled) 897391e43daSPeter Zijlstra balance_runtime(rt_rq); 898391e43daSPeter Zijlstra runtime = rt_rq->rt_runtime; 899391e43daSPeter Zijlstra rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime); 900391e43daSPeter Zijlstra if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) { 901391e43daSPeter Zijlstra rt_rq->rt_throttled = 0; 902391e43daSPeter Zijlstra enqueue = 1; 903391e43daSPeter Zijlstra 904391e43daSPeter Zijlstra /* 9059edfbfedSPeter Zijlstra * When we're idle and a woken (rt) task is 9069edfbfedSPeter Zijlstra * throttled check_preempt_curr() will set 9079edfbfedSPeter Zijlstra * skip_update and the time between the wakeup 9089edfbfedSPeter Zijlstra * and this unthrottle will get accounted as 9099edfbfedSPeter Zijlstra * 'runtime'. 910391e43daSPeter Zijlstra */ 911391e43daSPeter Zijlstra if (rt_rq->rt_nr_running && rq->curr == rq->idle) 912adcc8da8SDavidlohr Bueso rq_clock_cancel_skipupdate(rq); 913391e43daSPeter Zijlstra } 914391e43daSPeter Zijlstra if (rt_rq->rt_time || rt_rq->rt_nr_running) 915391e43daSPeter Zijlstra idle = 0; 916391e43daSPeter Zijlstra raw_spin_unlock(&rt_rq->rt_runtime_lock); 917391e43daSPeter Zijlstra } else if (rt_rq->rt_nr_running) { 918391e43daSPeter Zijlstra idle = 0; 919391e43daSPeter Zijlstra if (!rt_rq_throttled(rt_rq)) 920391e43daSPeter Zijlstra enqueue = 1; 921391e43daSPeter Zijlstra } 92242c62a58SPeter Zijlstra if (rt_rq->rt_throttled) 92342c62a58SPeter Zijlstra throttled = 1; 924391e43daSPeter Zijlstra 925391e43daSPeter Zijlstra if (enqueue) 926391e43daSPeter Zijlstra sched_rt_rq_enqueue(rt_rq); 927*2679a837SHao Jia rq_unlock(rq, &rf); 928391e43daSPeter Zijlstra } 929391e43daSPeter Zijlstra 93042c62a58SPeter Zijlstra if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)) 93142c62a58SPeter Zijlstra return 1; 93242c62a58SPeter Zijlstra 933391e43daSPeter Zijlstra return idle; 934391e43daSPeter Zijlstra } 935391e43daSPeter Zijlstra 936391e43daSPeter Zijlstra static inline int rt_se_prio(struct sched_rt_entity *rt_se) 937391e43daSPeter Zijlstra { 938391e43daSPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED 939391e43daSPeter Zijlstra struct rt_rq *rt_rq = group_rt_rq(rt_se); 940391e43daSPeter Zijlstra 941391e43daSPeter Zijlstra if (rt_rq) 942391e43daSPeter Zijlstra return rt_rq->highest_prio.curr; 943391e43daSPeter Zijlstra #endif 944391e43daSPeter Zijlstra 945391e43daSPeter Zijlstra return rt_task_of(rt_se)->prio; 946391e43daSPeter Zijlstra } 947391e43daSPeter Zijlstra 948391e43daSPeter Zijlstra static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq) 949391e43daSPeter Zijlstra { 950391e43daSPeter Zijlstra u64 runtime = sched_rt_runtime(rt_rq); 951391e43daSPeter Zijlstra 952391e43daSPeter Zijlstra if (rt_rq->rt_throttled) 953391e43daSPeter Zijlstra return rt_rq_throttled(rt_rq); 954391e43daSPeter Zijlstra 9555b680fd6SShan Hai if (runtime >= sched_rt_period(rt_rq)) 956391e43daSPeter Zijlstra return 0; 957391e43daSPeter Zijlstra 958391e43daSPeter Zijlstra balance_runtime(rt_rq); 959391e43daSPeter Zijlstra runtime = sched_rt_runtime(rt_rq); 960391e43daSPeter Zijlstra if (runtime == RUNTIME_INF) 961391e43daSPeter Zijlstra return 0; 962391e43daSPeter Zijlstra 963391e43daSPeter Zijlstra if (rt_rq->rt_time > runtime) { 9647abc63b1SPeter Zijlstra struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); 9657abc63b1SPeter Zijlstra 9667abc63b1SPeter Zijlstra /* 9677abc63b1SPeter Zijlstra * Don't actually throttle groups that have no runtime assigned 9687abc63b1SPeter Zijlstra * but accrue some time due to boosting. 9697abc63b1SPeter Zijlstra */ 9707abc63b1SPeter Zijlstra if (likely(rt_b->rt_runtime)) { 971391e43daSPeter Zijlstra rt_rq->rt_throttled = 1; 972c224815dSJohn Stultz printk_deferred_once("sched: RT throttling activated\n"); 9737abc63b1SPeter Zijlstra } else { 9747abc63b1SPeter Zijlstra /* 9757abc63b1SPeter Zijlstra * In case we did anyway, make it go away, 9767abc63b1SPeter Zijlstra * replenishment is a joke, since it will replenish us 9777abc63b1SPeter Zijlstra * with exactly 0 ns. 9787abc63b1SPeter Zijlstra */ 9797abc63b1SPeter Zijlstra rt_rq->rt_time = 0; 9807abc63b1SPeter Zijlstra } 9817abc63b1SPeter Zijlstra 982391e43daSPeter Zijlstra if (rt_rq_throttled(rt_rq)) { 983391e43daSPeter Zijlstra sched_rt_rq_dequeue(rt_rq); 984391e43daSPeter Zijlstra return 1; 985391e43daSPeter Zijlstra } 986391e43daSPeter Zijlstra } 987391e43daSPeter Zijlstra 988391e43daSPeter Zijlstra return 0; 989391e43daSPeter Zijlstra } 990391e43daSPeter Zijlstra 991391e43daSPeter Zijlstra /* 992391e43daSPeter Zijlstra * Update the current task's runtime statistics. Skip current tasks that 993391e43daSPeter Zijlstra * are not in our scheduling class. 994391e43daSPeter Zijlstra */ 995391e43daSPeter Zijlstra static void update_curr_rt(struct rq *rq) 996391e43daSPeter Zijlstra { 997391e43daSPeter Zijlstra struct task_struct *curr = rq->curr; 998391e43daSPeter Zijlstra struct sched_rt_entity *rt_se = &curr->rt; 999391e43daSPeter Zijlstra u64 delta_exec; 1000a7711602SWen Yang u64 now; 1001391e43daSPeter Zijlstra 1002391e43daSPeter Zijlstra if (curr->sched_class != &rt_sched_class) 1003391e43daSPeter Zijlstra return; 1004391e43daSPeter Zijlstra 1005a7711602SWen Yang now = rq_clock_task(rq); 1006e7ad2031SWen Yang delta_exec = now - curr->se.exec_start; 1007fc79e240SKirill Tkhai if (unlikely((s64)delta_exec <= 0)) 1008fc79e240SKirill Tkhai return; 1009391e43daSPeter Zijlstra 1010ceeadb83SYafang Shao schedstat_set(curr->stats.exec_max, 1011ceeadb83SYafang Shao max(curr->stats.exec_max, delta_exec)); 1012391e43daSPeter Zijlstra 1013ed7b564cSYafang Shao trace_sched_stat_runtime(curr, delta_exec, 0); 1014ed7b564cSYafang Shao 1015391e43daSPeter Zijlstra curr->se.sum_exec_runtime += delta_exec; 1016391e43daSPeter Zijlstra account_group_exec_runtime(curr, delta_exec); 1017391e43daSPeter Zijlstra 1018e7ad2031SWen Yang curr->se.exec_start = now; 1019d2cc5ed6STejun Heo cgroup_account_cputime(curr, delta_exec); 1020391e43daSPeter Zijlstra 1021391e43daSPeter Zijlstra if (!rt_bandwidth_enabled()) 1022391e43daSPeter Zijlstra return; 1023391e43daSPeter Zijlstra 1024391e43daSPeter Zijlstra for_each_sched_rt_entity(rt_se) { 10250b07939cSGiedrius Rekasius struct rt_rq *rt_rq = rt_rq_of_se(rt_se); 10269b58e976SLi Hua int exceeded; 1027391e43daSPeter Zijlstra 1028391e43daSPeter Zijlstra if (sched_rt_runtime(rt_rq) != RUNTIME_INF) { 1029391e43daSPeter Zijlstra raw_spin_lock(&rt_rq->rt_runtime_lock); 1030391e43daSPeter Zijlstra rt_rq->rt_time += delta_exec; 10319b58e976SLi Hua exceeded = sched_rt_runtime_exceeded(rt_rq); 10329b58e976SLi Hua if (exceeded) 10338875125eSKirill Tkhai resched_curr(rq); 1034391e43daSPeter Zijlstra raw_spin_unlock(&rt_rq->rt_runtime_lock); 10359b58e976SLi Hua if (exceeded) 10369b58e976SLi Hua do_start_rt_bandwidth(sched_rt_bandwidth(rt_rq)); 1037391e43daSPeter Zijlstra } 1038391e43daSPeter Zijlstra } 1039391e43daSPeter Zijlstra } 1040391e43daSPeter Zijlstra 1041f4ebcbc0SKirill Tkhai static void 1042f4ebcbc0SKirill Tkhai dequeue_top_rt_rq(struct rt_rq *rt_rq) 1043f4ebcbc0SKirill Tkhai { 1044f4ebcbc0SKirill Tkhai struct rq *rq = rq_of_rt_rq(rt_rq); 1045f4ebcbc0SKirill Tkhai 1046f4ebcbc0SKirill Tkhai BUG_ON(&rq->rt != rt_rq); 1047f4ebcbc0SKirill Tkhai 1048f4ebcbc0SKirill Tkhai if (!rt_rq->rt_queued) 1049f4ebcbc0SKirill Tkhai return; 1050f4ebcbc0SKirill Tkhai 1051f4ebcbc0SKirill Tkhai BUG_ON(!rq->nr_running); 1052f4ebcbc0SKirill Tkhai 105372465447SKirill Tkhai sub_nr_running(rq, rt_rq->rt_nr_running); 1054f4ebcbc0SKirill Tkhai rt_rq->rt_queued = 0; 10558f111bc3SPeter Zijlstra 1056f4ebcbc0SKirill Tkhai } 1057f4ebcbc0SKirill Tkhai 1058f4ebcbc0SKirill Tkhai static void 1059f4ebcbc0SKirill Tkhai enqueue_top_rt_rq(struct rt_rq *rt_rq) 1060f4ebcbc0SKirill Tkhai { 1061f4ebcbc0SKirill Tkhai struct rq *rq = rq_of_rt_rq(rt_rq); 1062f4ebcbc0SKirill Tkhai 1063f4ebcbc0SKirill Tkhai BUG_ON(&rq->rt != rt_rq); 1064f4ebcbc0SKirill Tkhai 1065f4ebcbc0SKirill Tkhai if (rt_rq->rt_queued) 1066f4ebcbc0SKirill Tkhai return; 1067296b2ffeSVincent Guittot 1068296b2ffeSVincent Guittot if (rt_rq_throttled(rt_rq)) 1069f4ebcbc0SKirill Tkhai return; 1070f4ebcbc0SKirill Tkhai 1071296b2ffeSVincent Guittot if (rt_rq->rt_nr_running) { 107272465447SKirill Tkhai add_nr_running(rq, rt_rq->rt_nr_running); 1073f4ebcbc0SKirill Tkhai rt_rq->rt_queued = 1; 1074296b2ffeSVincent Guittot } 10758f111bc3SPeter Zijlstra 10768f111bc3SPeter Zijlstra /* Kick cpufreq (see the comment in kernel/sched/sched.h). */ 10778f111bc3SPeter Zijlstra cpufreq_update_util(rq, 0); 1078f4ebcbc0SKirill Tkhai } 1079f4ebcbc0SKirill Tkhai 1080391e43daSPeter Zijlstra #if defined CONFIG_SMP 1081391e43daSPeter Zijlstra 1082391e43daSPeter Zijlstra static void 1083391e43daSPeter Zijlstra inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) 1084391e43daSPeter Zijlstra { 1085391e43daSPeter Zijlstra struct rq *rq = rq_of_rt_rq(rt_rq); 1086391e43daSPeter Zijlstra 1087757dfcaaSKirill Tkhai #ifdef CONFIG_RT_GROUP_SCHED 1088757dfcaaSKirill Tkhai /* 1089757dfcaaSKirill Tkhai * Change rq's cpupri only if rt_rq is the top queue. 1090757dfcaaSKirill Tkhai */ 1091757dfcaaSKirill Tkhai if (&rq->rt != rt_rq) 1092757dfcaaSKirill Tkhai return; 1093757dfcaaSKirill Tkhai #endif 1094391e43daSPeter Zijlstra if (rq->online && prio < prev_prio) 1095391e43daSPeter Zijlstra cpupri_set(&rq->rd->cpupri, rq->cpu, prio); 1096391e43daSPeter Zijlstra } 1097391e43daSPeter Zijlstra 1098391e43daSPeter Zijlstra static void 1099391e43daSPeter Zijlstra dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) 1100391e43daSPeter Zijlstra { 1101391e43daSPeter Zijlstra struct rq *rq = rq_of_rt_rq(rt_rq); 1102391e43daSPeter Zijlstra 1103757dfcaaSKirill Tkhai #ifdef CONFIG_RT_GROUP_SCHED 1104757dfcaaSKirill Tkhai /* 1105757dfcaaSKirill Tkhai * Change rq's cpupri only if rt_rq is the top queue. 1106757dfcaaSKirill Tkhai */ 1107757dfcaaSKirill Tkhai if (&rq->rt != rt_rq) 1108757dfcaaSKirill Tkhai return; 1109757dfcaaSKirill Tkhai #endif 1110391e43daSPeter Zijlstra if (rq->online && rt_rq->highest_prio.curr != prev_prio) 1111391e43daSPeter Zijlstra cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr); 1112391e43daSPeter Zijlstra } 1113391e43daSPeter Zijlstra 1114391e43daSPeter Zijlstra #else /* CONFIG_SMP */ 1115391e43daSPeter Zijlstra 1116391e43daSPeter Zijlstra static inline 1117391e43daSPeter Zijlstra void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {} 1118391e43daSPeter Zijlstra static inline 1119391e43daSPeter Zijlstra void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {} 1120391e43daSPeter Zijlstra 1121391e43daSPeter Zijlstra #endif /* CONFIG_SMP */ 1122391e43daSPeter Zijlstra 1123391e43daSPeter Zijlstra #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED 1124391e43daSPeter Zijlstra static void 1125391e43daSPeter Zijlstra inc_rt_prio(struct rt_rq *rt_rq, int prio) 1126391e43daSPeter Zijlstra { 1127391e43daSPeter Zijlstra int prev_prio = rt_rq->highest_prio.curr; 1128391e43daSPeter Zijlstra 1129391e43daSPeter Zijlstra if (prio < prev_prio) 1130391e43daSPeter Zijlstra rt_rq->highest_prio.curr = prio; 1131391e43daSPeter Zijlstra 1132391e43daSPeter Zijlstra inc_rt_prio_smp(rt_rq, prio, prev_prio); 1133391e43daSPeter Zijlstra } 1134391e43daSPeter Zijlstra 1135391e43daSPeter Zijlstra static void 1136391e43daSPeter Zijlstra dec_rt_prio(struct rt_rq *rt_rq, int prio) 1137391e43daSPeter Zijlstra { 1138391e43daSPeter Zijlstra int prev_prio = rt_rq->highest_prio.curr; 1139391e43daSPeter Zijlstra 1140391e43daSPeter Zijlstra if (rt_rq->rt_nr_running) { 1141391e43daSPeter Zijlstra 1142391e43daSPeter Zijlstra WARN_ON(prio < prev_prio); 1143391e43daSPeter Zijlstra 1144391e43daSPeter Zijlstra /* 1145391e43daSPeter Zijlstra * This may have been our highest task, and therefore 1146391e43daSPeter Zijlstra * we may have some recomputation to do 1147391e43daSPeter Zijlstra */ 1148391e43daSPeter Zijlstra if (prio == prev_prio) { 1149391e43daSPeter Zijlstra struct rt_prio_array *array = &rt_rq->active; 1150391e43daSPeter Zijlstra 1151391e43daSPeter Zijlstra rt_rq->highest_prio.curr = 1152391e43daSPeter Zijlstra sched_find_first_bit(array->bitmap); 1153391e43daSPeter Zijlstra } 1154391e43daSPeter Zijlstra 1155934fc331SPeter Zijlstra } else { 1156934fc331SPeter Zijlstra rt_rq->highest_prio.curr = MAX_RT_PRIO-1; 1157934fc331SPeter Zijlstra } 1158391e43daSPeter Zijlstra 1159391e43daSPeter Zijlstra dec_rt_prio_smp(rt_rq, prio, prev_prio); 1160391e43daSPeter Zijlstra } 1161391e43daSPeter Zijlstra 1162391e43daSPeter Zijlstra #else 1163391e43daSPeter Zijlstra 1164391e43daSPeter Zijlstra static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {} 1165391e43daSPeter Zijlstra static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {} 1166391e43daSPeter Zijlstra 1167391e43daSPeter Zijlstra #endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */ 1168391e43daSPeter Zijlstra 1169391e43daSPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED 1170391e43daSPeter Zijlstra 1171391e43daSPeter Zijlstra static void 1172391e43daSPeter Zijlstra inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 1173391e43daSPeter Zijlstra { 1174391e43daSPeter Zijlstra if (rt_se_boosted(rt_se)) 1175391e43daSPeter Zijlstra rt_rq->rt_nr_boosted++; 1176391e43daSPeter Zijlstra 1177391e43daSPeter Zijlstra if (rt_rq->tg) 1178391e43daSPeter Zijlstra start_rt_bandwidth(&rt_rq->tg->rt_bandwidth); 1179391e43daSPeter Zijlstra } 1180391e43daSPeter Zijlstra 1181391e43daSPeter Zijlstra static void 1182391e43daSPeter Zijlstra dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 1183391e43daSPeter Zijlstra { 1184391e43daSPeter Zijlstra if (rt_se_boosted(rt_se)) 1185391e43daSPeter Zijlstra rt_rq->rt_nr_boosted--; 1186391e43daSPeter Zijlstra 1187391e43daSPeter Zijlstra WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted); 1188391e43daSPeter Zijlstra } 1189391e43daSPeter Zijlstra 1190391e43daSPeter Zijlstra #else /* CONFIG_RT_GROUP_SCHED */ 1191391e43daSPeter Zijlstra 1192391e43daSPeter Zijlstra static void 1193391e43daSPeter Zijlstra inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 1194391e43daSPeter Zijlstra { 1195391e43daSPeter Zijlstra start_rt_bandwidth(&def_rt_bandwidth); 1196391e43daSPeter Zijlstra } 1197391e43daSPeter Zijlstra 1198391e43daSPeter Zijlstra static inline 1199391e43daSPeter Zijlstra void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {} 1200391e43daSPeter Zijlstra 1201391e43daSPeter Zijlstra #endif /* CONFIG_RT_GROUP_SCHED */ 1202391e43daSPeter Zijlstra 1203391e43daSPeter Zijlstra static inline 120422abdef3SKirill Tkhai unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se) 120522abdef3SKirill Tkhai { 120622abdef3SKirill Tkhai struct rt_rq *group_rq = group_rt_rq(rt_se); 120722abdef3SKirill Tkhai 120822abdef3SKirill Tkhai if (group_rq) 120922abdef3SKirill Tkhai return group_rq->rt_nr_running; 121022abdef3SKirill Tkhai else 121122abdef3SKirill Tkhai return 1; 121222abdef3SKirill Tkhai } 121322abdef3SKirill Tkhai 121422abdef3SKirill Tkhai static inline 121501d36d0aSFrederic Weisbecker unsigned int rt_se_rr_nr_running(struct sched_rt_entity *rt_se) 121601d36d0aSFrederic Weisbecker { 121701d36d0aSFrederic Weisbecker struct rt_rq *group_rq = group_rt_rq(rt_se); 121801d36d0aSFrederic Weisbecker struct task_struct *tsk; 121901d36d0aSFrederic Weisbecker 122001d36d0aSFrederic Weisbecker if (group_rq) 122101d36d0aSFrederic Weisbecker return group_rq->rr_nr_running; 122201d36d0aSFrederic Weisbecker 122301d36d0aSFrederic Weisbecker tsk = rt_task_of(rt_se); 122401d36d0aSFrederic Weisbecker 122501d36d0aSFrederic Weisbecker return (tsk->policy == SCHED_RR) ? 1 : 0; 122601d36d0aSFrederic Weisbecker } 122701d36d0aSFrederic Weisbecker 122801d36d0aSFrederic Weisbecker static inline 1229391e43daSPeter Zijlstra void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 1230391e43daSPeter Zijlstra { 1231391e43daSPeter Zijlstra int prio = rt_se_prio(rt_se); 1232391e43daSPeter Zijlstra 1233391e43daSPeter Zijlstra WARN_ON(!rt_prio(prio)); 123422abdef3SKirill Tkhai rt_rq->rt_nr_running += rt_se_nr_running(rt_se); 123501d36d0aSFrederic Weisbecker rt_rq->rr_nr_running += rt_se_rr_nr_running(rt_se); 1236391e43daSPeter Zijlstra 1237391e43daSPeter Zijlstra inc_rt_prio(rt_rq, prio); 1238391e43daSPeter Zijlstra inc_rt_migration(rt_se, rt_rq); 1239391e43daSPeter Zijlstra inc_rt_group(rt_se, rt_rq); 1240391e43daSPeter Zijlstra } 1241391e43daSPeter Zijlstra 1242391e43daSPeter Zijlstra static inline 1243391e43daSPeter Zijlstra void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 1244391e43daSPeter Zijlstra { 1245391e43daSPeter Zijlstra WARN_ON(!rt_prio(rt_se_prio(rt_se))); 1246391e43daSPeter Zijlstra WARN_ON(!rt_rq->rt_nr_running); 124722abdef3SKirill Tkhai rt_rq->rt_nr_running -= rt_se_nr_running(rt_se); 124801d36d0aSFrederic Weisbecker rt_rq->rr_nr_running -= rt_se_rr_nr_running(rt_se); 1249391e43daSPeter Zijlstra 1250391e43daSPeter Zijlstra dec_rt_prio(rt_rq, rt_se_prio(rt_se)); 1251391e43daSPeter Zijlstra dec_rt_migration(rt_se, rt_rq); 1252391e43daSPeter Zijlstra dec_rt_group(rt_se, rt_rq); 1253391e43daSPeter Zijlstra } 1254391e43daSPeter Zijlstra 1255ff77e468SPeter Zijlstra /* 1256ff77e468SPeter Zijlstra * Change rt_se->run_list location unless SAVE && !MOVE 1257ff77e468SPeter Zijlstra * 1258ff77e468SPeter Zijlstra * assumes ENQUEUE/DEQUEUE flags match 1259ff77e468SPeter Zijlstra */ 1260ff77e468SPeter Zijlstra static inline bool move_entity(unsigned int flags) 1261ff77e468SPeter Zijlstra { 1262ff77e468SPeter Zijlstra if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE) 1263ff77e468SPeter Zijlstra return false; 1264ff77e468SPeter Zijlstra 1265ff77e468SPeter Zijlstra return true; 1266ff77e468SPeter Zijlstra } 1267ff77e468SPeter Zijlstra 1268ff77e468SPeter Zijlstra static void __delist_rt_entity(struct sched_rt_entity *rt_se, struct rt_prio_array *array) 1269ff77e468SPeter Zijlstra { 1270ff77e468SPeter Zijlstra list_del_init(&rt_se->run_list); 1271ff77e468SPeter Zijlstra 1272ff77e468SPeter Zijlstra if (list_empty(array->queue + rt_se_prio(rt_se))) 1273ff77e468SPeter Zijlstra __clear_bit(rt_se_prio(rt_se), array->bitmap); 1274ff77e468SPeter Zijlstra 1275ff77e468SPeter Zijlstra rt_se->on_list = 0; 1276ff77e468SPeter Zijlstra } 1277ff77e468SPeter Zijlstra 127857a5c2daSYafang Shao static inline struct sched_statistics * 127957a5c2daSYafang Shao __schedstats_from_rt_se(struct sched_rt_entity *rt_se) 128057a5c2daSYafang Shao { 128157a5c2daSYafang Shao #ifdef CONFIG_RT_GROUP_SCHED 128257a5c2daSYafang Shao /* schedstats is not supported for rt group. */ 128357a5c2daSYafang Shao if (!rt_entity_is_task(rt_se)) 128457a5c2daSYafang Shao return NULL; 128557a5c2daSYafang Shao #endif 128657a5c2daSYafang Shao 128757a5c2daSYafang Shao return &rt_task_of(rt_se)->stats; 128857a5c2daSYafang Shao } 128957a5c2daSYafang Shao 129057a5c2daSYafang Shao static inline void 129157a5c2daSYafang Shao update_stats_wait_start_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se) 129257a5c2daSYafang Shao { 129357a5c2daSYafang Shao struct sched_statistics *stats; 129457a5c2daSYafang Shao struct task_struct *p = NULL; 129557a5c2daSYafang Shao 129657a5c2daSYafang Shao if (!schedstat_enabled()) 129757a5c2daSYafang Shao return; 129857a5c2daSYafang Shao 129957a5c2daSYafang Shao if (rt_entity_is_task(rt_se)) 130057a5c2daSYafang Shao p = rt_task_of(rt_se); 130157a5c2daSYafang Shao 130257a5c2daSYafang Shao stats = __schedstats_from_rt_se(rt_se); 130357a5c2daSYafang Shao if (!stats) 130457a5c2daSYafang Shao return; 130557a5c2daSYafang Shao 130657a5c2daSYafang Shao __update_stats_wait_start(rq_of_rt_rq(rt_rq), p, stats); 130757a5c2daSYafang Shao } 130857a5c2daSYafang Shao 130957a5c2daSYafang Shao static inline void 131057a5c2daSYafang Shao update_stats_enqueue_sleeper_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se) 131157a5c2daSYafang Shao { 131257a5c2daSYafang Shao struct sched_statistics *stats; 131357a5c2daSYafang Shao struct task_struct *p = NULL; 131457a5c2daSYafang Shao 131557a5c2daSYafang Shao if (!schedstat_enabled()) 131657a5c2daSYafang Shao return; 131757a5c2daSYafang Shao 131857a5c2daSYafang Shao if (rt_entity_is_task(rt_se)) 131957a5c2daSYafang Shao p = rt_task_of(rt_se); 132057a5c2daSYafang Shao 132157a5c2daSYafang Shao stats = __schedstats_from_rt_se(rt_se); 132257a5c2daSYafang Shao if (!stats) 132357a5c2daSYafang Shao return; 132457a5c2daSYafang Shao 132557a5c2daSYafang Shao __update_stats_enqueue_sleeper(rq_of_rt_rq(rt_rq), p, stats); 132657a5c2daSYafang Shao } 132757a5c2daSYafang Shao 132857a5c2daSYafang Shao static inline void 132957a5c2daSYafang Shao update_stats_enqueue_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, 133057a5c2daSYafang Shao int flags) 133157a5c2daSYafang Shao { 133257a5c2daSYafang Shao if (!schedstat_enabled()) 133357a5c2daSYafang Shao return; 133457a5c2daSYafang Shao 133557a5c2daSYafang Shao if (flags & ENQUEUE_WAKEUP) 133657a5c2daSYafang Shao update_stats_enqueue_sleeper_rt(rt_rq, rt_se); 133757a5c2daSYafang Shao } 133857a5c2daSYafang Shao 133957a5c2daSYafang Shao static inline void 134057a5c2daSYafang Shao update_stats_wait_end_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se) 134157a5c2daSYafang Shao { 134257a5c2daSYafang Shao struct sched_statistics *stats; 134357a5c2daSYafang Shao struct task_struct *p = NULL; 134457a5c2daSYafang Shao 134557a5c2daSYafang Shao if (!schedstat_enabled()) 134657a5c2daSYafang Shao return; 134757a5c2daSYafang Shao 134857a5c2daSYafang Shao if (rt_entity_is_task(rt_se)) 134957a5c2daSYafang Shao p = rt_task_of(rt_se); 135057a5c2daSYafang Shao 135157a5c2daSYafang Shao stats = __schedstats_from_rt_se(rt_se); 135257a5c2daSYafang Shao if (!stats) 135357a5c2daSYafang Shao return; 135457a5c2daSYafang Shao 135557a5c2daSYafang Shao __update_stats_wait_end(rq_of_rt_rq(rt_rq), p, stats); 135657a5c2daSYafang Shao } 135757a5c2daSYafang Shao 135857a5c2daSYafang Shao static inline void 135957a5c2daSYafang Shao update_stats_dequeue_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, 136057a5c2daSYafang Shao int flags) 136157a5c2daSYafang Shao { 136257a5c2daSYafang Shao struct task_struct *p = NULL; 136357a5c2daSYafang Shao 136457a5c2daSYafang Shao if (!schedstat_enabled()) 136557a5c2daSYafang Shao return; 136657a5c2daSYafang Shao 136757a5c2daSYafang Shao if (rt_entity_is_task(rt_se)) 136857a5c2daSYafang Shao p = rt_task_of(rt_se); 136957a5c2daSYafang Shao 137057a5c2daSYafang Shao if ((flags & DEQUEUE_SLEEP) && p) { 137157a5c2daSYafang Shao unsigned int state; 137257a5c2daSYafang Shao 137357a5c2daSYafang Shao state = READ_ONCE(p->__state); 137457a5c2daSYafang Shao if (state & TASK_INTERRUPTIBLE) 137557a5c2daSYafang Shao __schedstat_set(p->stats.sleep_start, 137657a5c2daSYafang Shao rq_clock(rq_of_rt_rq(rt_rq))); 137757a5c2daSYafang Shao 137857a5c2daSYafang Shao if (state & TASK_UNINTERRUPTIBLE) 137957a5c2daSYafang Shao __schedstat_set(p->stats.block_start, 138057a5c2daSYafang Shao rq_clock(rq_of_rt_rq(rt_rq))); 138157a5c2daSYafang Shao } 138257a5c2daSYafang Shao } 138357a5c2daSYafang Shao 1384ff77e468SPeter Zijlstra static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags) 1385391e43daSPeter Zijlstra { 1386391e43daSPeter Zijlstra struct rt_rq *rt_rq = rt_rq_of_se(rt_se); 1387391e43daSPeter Zijlstra struct rt_prio_array *array = &rt_rq->active; 1388391e43daSPeter Zijlstra struct rt_rq *group_rq = group_rt_rq(rt_se); 1389391e43daSPeter Zijlstra struct list_head *queue = array->queue + rt_se_prio(rt_se); 1390391e43daSPeter Zijlstra 1391391e43daSPeter Zijlstra /* 1392391e43daSPeter Zijlstra * Don't enqueue the group if its throttled, or when empty. 1393391e43daSPeter Zijlstra * The latter is a consequence of the former when a child group 1394391e43daSPeter Zijlstra * get throttled and the current group doesn't have any other 1395391e43daSPeter Zijlstra * active members. 1396391e43daSPeter Zijlstra */ 1397ff77e468SPeter Zijlstra if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) { 1398ff77e468SPeter Zijlstra if (rt_se->on_list) 1399ff77e468SPeter Zijlstra __delist_rt_entity(rt_se, array); 1400391e43daSPeter Zijlstra return; 1401ff77e468SPeter Zijlstra } 1402391e43daSPeter Zijlstra 1403ff77e468SPeter Zijlstra if (move_entity(flags)) { 1404ff77e468SPeter Zijlstra WARN_ON_ONCE(rt_se->on_list); 1405ff77e468SPeter Zijlstra if (flags & ENQUEUE_HEAD) 1406391e43daSPeter Zijlstra list_add(&rt_se->run_list, queue); 1407391e43daSPeter Zijlstra else 1408391e43daSPeter Zijlstra list_add_tail(&rt_se->run_list, queue); 1409ff77e468SPeter Zijlstra 1410391e43daSPeter Zijlstra __set_bit(rt_se_prio(rt_se), array->bitmap); 1411ff77e468SPeter Zijlstra rt_se->on_list = 1; 1412ff77e468SPeter Zijlstra } 1413ff77e468SPeter Zijlstra rt_se->on_rq = 1; 1414391e43daSPeter Zijlstra 1415391e43daSPeter Zijlstra inc_rt_tasks(rt_se, rt_rq); 1416391e43daSPeter Zijlstra } 1417391e43daSPeter Zijlstra 1418ff77e468SPeter Zijlstra static void __dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags) 1419391e43daSPeter Zijlstra { 1420391e43daSPeter Zijlstra struct rt_rq *rt_rq = rt_rq_of_se(rt_se); 1421391e43daSPeter Zijlstra struct rt_prio_array *array = &rt_rq->active; 1422391e43daSPeter Zijlstra 1423ff77e468SPeter Zijlstra if (move_entity(flags)) { 1424ff77e468SPeter Zijlstra WARN_ON_ONCE(!rt_se->on_list); 1425ff77e468SPeter Zijlstra __delist_rt_entity(rt_se, array); 1426ff77e468SPeter Zijlstra } 1427ff77e468SPeter Zijlstra rt_se->on_rq = 0; 1428391e43daSPeter Zijlstra 1429391e43daSPeter Zijlstra dec_rt_tasks(rt_se, rt_rq); 1430391e43daSPeter Zijlstra } 1431391e43daSPeter Zijlstra 1432391e43daSPeter Zijlstra /* 1433391e43daSPeter Zijlstra * Because the prio of an upper entry depends on the lower 1434391e43daSPeter Zijlstra * entries, we must remove entries top - down. 1435391e43daSPeter Zijlstra */ 1436ff77e468SPeter Zijlstra static void dequeue_rt_stack(struct sched_rt_entity *rt_se, unsigned int flags) 1437391e43daSPeter Zijlstra { 1438391e43daSPeter Zijlstra struct sched_rt_entity *back = NULL; 1439391e43daSPeter Zijlstra 1440391e43daSPeter Zijlstra for_each_sched_rt_entity(rt_se) { 1441391e43daSPeter Zijlstra rt_se->back = back; 1442391e43daSPeter Zijlstra back = rt_se; 1443391e43daSPeter Zijlstra } 1444391e43daSPeter Zijlstra 1445f4ebcbc0SKirill Tkhai dequeue_top_rt_rq(rt_rq_of_se(back)); 1446f4ebcbc0SKirill Tkhai 1447391e43daSPeter Zijlstra for (rt_se = back; rt_se; rt_se = rt_se->back) { 1448391e43daSPeter Zijlstra if (on_rt_rq(rt_se)) 1449ff77e468SPeter Zijlstra __dequeue_rt_entity(rt_se, flags); 1450391e43daSPeter Zijlstra } 1451391e43daSPeter Zijlstra } 1452391e43daSPeter Zijlstra 1453ff77e468SPeter Zijlstra static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags) 1454391e43daSPeter Zijlstra { 1455f4ebcbc0SKirill Tkhai struct rq *rq = rq_of_rt_se(rt_se); 1456f4ebcbc0SKirill Tkhai 145757a5c2daSYafang Shao update_stats_enqueue_rt(rt_rq_of_se(rt_se), rt_se, flags); 145857a5c2daSYafang Shao 1459ff77e468SPeter Zijlstra dequeue_rt_stack(rt_se, flags); 1460391e43daSPeter Zijlstra for_each_sched_rt_entity(rt_se) 1461ff77e468SPeter Zijlstra __enqueue_rt_entity(rt_se, flags); 1462f4ebcbc0SKirill Tkhai enqueue_top_rt_rq(&rq->rt); 1463391e43daSPeter Zijlstra } 1464391e43daSPeter Zijlstra 1465ff77e468SPeter Zijlstra static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags) 1466391e43daSPeter Zijlstra { 1467f4ebcbc0SKirill Tkhai struct rq *rq = rq_of_rt_se(rt_se); 1468f4ebcbc0SKirill Tkhai 146957a5c2daSYafang Shao update_stats_dequeue_rt(rt_rq_of_se(rt_se), rt_se, flags); 147057a5c2daSYafang Shao 1471ff77e468SPeter Zijlstra dequeue_rt_stack(rt_se, flags); 1472391e43daSPeter Zijlstra 1473391e43daSPeter Zijlstra for_each_sched_rt_entity(rt_se) { 1474391e43daSPeter Zijlstra struct rt_rq *rt_rq = group_rt_rq(rt_se); 1475391e43daSPeter Zijlstra 1476391e43daSPeter Zijlstra if (rt_rq && rt_rq->rt_nr_running) 1477ff77e468SPeter Zijlstra __enqueue_rt_entity(rt_se, flags); 1478391e43daSPeter Zijlstra } 1479f4ebcbc0SKirill Tkhai enqueue_top_rt_rq(&rq->rt); 1480391e43daSPeter Zijlstra } 1481391e43daSPeter Zijlstra 1482391e43daSPeter Zijlstra /* 1483391e43daSPeter Zijlstra * Adding/removing a task to/from a priority array: 1484391e43daSPeter Zijlstra */ 1485391e43daSPeter Zijlstra static void 1486391e43daSPeter Zijlstra enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags) 1487391e43daSPeter Zijlstra { 1488391e43daSPeter Zijlstra struct sched_rt_entity *rt_se = &p->rt; 1489391e43daSPeter Zijlstra 1490391e43daSPeter Zijlstra if (flags & ENQUEUE_WAKEUP) 1491391e43daSPeter Zijlstra rt_se->timeout = 0; 1492391e43daSPeter Zijlstra 149357a5c2daSYafang Shao check_schedstat_required(); 149457a5c2daSYafang Shao update_stats_wait_start_rt(rt_rq_of_se(rt_se), rt_se); 149557a5c2daSYafang Shao 1496ff77e468SPeter Zijlstra enqueue_rt_entity(rt_se, flags); 1497391e43daSPeter Zijlstra 14984b53a341SIngo Molnar if (!task_current(rq, p) && p->nr_cpus_allowed > 1) 1499391e43daSPeter Zijlstra enqueue_pushable_task(rq, p); 1500391e43daSPeter Zijlstra } 1501391e43daSPeter Zijlstra 1502391e43daSPeter Zijlstra static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags) 1503391e43daSPeter Zijlstra { 1504391e43daSPeter Zijlstra struct sched_rt_entity *rt_se = &p->rt; 1505391e43daSPeter Zijlstra 1506391e43daSPeter Zijlstra update_curr_rt(rq); 1507ff77e468SPeter Zijlstra dequeue_rt_entity(rt_se, flags); 1508391e43daSPeter Zijlstra 1509391e43daSPeter Zijlstra dequeue_pushable_task(rq, p); 1510391e43daSPeter Zijlstra } 1511391e43daSPeter Zijlstra 1512391e43daSPeter Zijlstra /* 1513391e43daSPeter Zijlstra * Put task to the head or the end of the run list without the overhead of 1514391e43daSPeter Zijlstra * dequeue followed by enqueue. 1515391e43daSPeter Zijlstra */ 1516391e43daSPeter Zijlstra static void 1517391e43daSPeter Zijlstra requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head) 1518391e43daSPeter Zijlstra { 1519391e43daSPeter Zijlstra if (on_rt_rq(rt_se)) { 1520391e43daSPeter Zijlstra struct rt_prio_array *array = &rt_rq->active; 1521391e43daSPeter Zijlstra struct list_head *queue = array->queue + rt_se_prio(rt_se); 1522391e43daSPeter Zijlstra 1523391e43daSPeter Zijlstra if (head) 1524391e43daSPeter Zijlstra list_move(&rt_se->run_list, queue); 1525391e43daSPeter Zijlstra else 1526391e43daSPeter Zijlstra list_move_tail(&rt_se->run_list, queue); 1527391e43daSPeter Zijlstra } 1528391e43daSPeter Zijlstra } 1529391e43daSPeter Zijlstra 1530391e43daSPeter Zijlstra static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head) 1531391e43daSPeter Zijlstra { 1532391e43daSPeter Zijlstra struct sched_rt_entity *rt_se = &p->rt; 1533391e43daSPeter Zijlstra struct rt_rq *rt_rq; 1534391e43daSPeter Zijlstra 1535391e43daSPeter Zijlstra for_each_sched_rt_entity(rt_se) { 1536391e43daSPeter Zijlstra rt_rq = rt_rq_of_se(rt_se); 1537391e43daSPeter Zijlstra requeue_rt_entity(rt_rq, rt_se, head); 1538391e43daSPeter Zijlstra } 1539391e43daSPeter Zijlstra } 1540391e43daSPeter Zijlstra 1541391e43daSPeter Zijlstra static void yield_task_rt(struct rq *rq) 1542391e43daSPeter Zijlstra { 1543391e43daSPeter Zijlstra requeue_task_rt(rq, rq->curr, 0); 1544391e43daSPeter Zijlstra } 1545391e43daSPeter Zijlstra 1546391e43daSPeter Zijlstra #ifdef CONFIG_SMP 1547391e43daSPeter Zijlstra static int find_lowest_rq(struct task_struct *task); 1548391e43daSPeter Zijlstra 1549391e43daSPeter Zijlstra static int 15503aef1551SValentin Schneider select_task_rq_rt(struct task_struct *p, int cpu, int flags) 1551391e43daSPeter Zijlstra { 1552391e43daSPeter Zijlstra struct task_struct *curr; 1553391e43daSPeter Zijlstra struct rq *rq; 1554804d402fSQais Yousef bool test; 1555391e43daSPeter Zijlstra 1556391e43daSPeter Zijlstra /* For anything but wake ups, just return the task_cpu */ 15573aef1551SValentin Schneider if (!(flags & (WF_TTWU | WF_FORK))) 1558391e43daSPeter Zijlstra goto out; 1559391e43daSPeter Zijlstra 1560391e43daSPeter Zijlstra rq = cpu_rq(cpu); 1561391e43daSPeter Zijlstra 1562391e43daSPeter Zijlstra rcu_read_lock(); 1563316c1608SJason Low curr = READ_ONCE(rq->curr); /* unlocked access */ 1564391e43daSPeter Zijlstra 1565391e43daSPeter Zijlstra /* 1566391e43daSPeter Zijlstra * If the current task on @p's runqueue is an RT task, then 1567391e43daSPeter Zijlstra * try to see if we can wake this RT task up on another 1568391e43daSPeter Zijlstra * runqueue. Otherwise simply start this RT task 1569391e43daSPeter Zijlstra * on its current runqueue. 1570391e43daSPeter Zijlstra * 1571391e43daSPeter Zijlstra * We want to avoid overloading runqueues. If the woken 1572391e43daSPeter Zijlstra * task is a higher priority, then it will stay on this CPU 1573391e43daSPeter Zijlstra * and the lower prio task should be moved to another CPU. 1574391e43daSPeter Zijlstra * Even though this will probably make the lower prio task 1575391e43daSPeter Zijlstra * lose its cache, we do not want to bounce a higher task 1576391e43daSPeter Zijlstra * around just because it gave up its CPU, perhaps for a 1577391e43daSPeter Zijlstra * lock? 1578391e43daSPeter Zijlstra * 1579391e43daSPeter Zijlstra * For equal prio tasks, we just let the scheduler sort it out. 1580391e43daSPeter Zijlstra * 1581391e43daSPeter Zijlstra * Otherwise, just let it ride on the affined RQ and the 1582391e43daSPeter Zijlstra * post-schedule router will push the preempted task away 1583391e43daSPeter Zijlstra * 1584391e43daSPeter Zijlstra * This test is optimistic, if we get it wrong the load-balancer 1585391e43daSPeter Zijlstra * will have to sort it out. 1586804d402fSQais Yousef * 1587804d402fSQais Yousef * We take into account the capacity of the CPU to ensure it fits the 1588804d402fSQais Yousef * requirement of the task - which is only important on heterogeneous 1589804d402fSQais Yousef * systems like big.LITTLE. 1590391e43daSPeter Zijlstra */ 1591804d402fSQais Yousef test = curr && 1592804d402fSQais Yousef unlikely(rt_task(curr)) && 1593804d402fSQais Yousef (curr->nr_cpus_allowed < 2 || curr->prio <= p->prio); 1594804d402fSQais Yousef 1595804d402fSQais Yousef if (test || !rt_task_fits_capacity(p, cpu)) { 1596391e43daSPeter Zijlstra int target = find_lowest_rq(p); 1597391e43daSPeter Zijlstra 159880e3d87bSTim Chen /* 1599b28bc1e0SQais Yousef * Bail out if we were forcing a migration to find a better 1600b28bc1e0SQais Yousef * fitting CPU but our search failed. 1601b28bc1e0SQais Yousef */ 1602b28bc1e0SQais Yousef if (!test && target != -1 && !rt_task_fits_capacity(p, target)) 1603b28bc1e0SQais Yousef goto out_unlock; 1604b28bc1e0SQais Yousef 1605b28bc1e0SQais Yousef /* 160680e3d87bSTim Chen * Don't bother moving it if the destination CPU is 160780e3d87bSTim Chen * not running a lower priority task. 160880e3d87bSTim Chen */ 160980e3d87bSTim Chen if (target != -1 && 161080e3d87bSTim Chen p->prio < cpu_rq(target)->rt.highest_prio.curr) 1611391e43daSPeter Zijlstra cpu = target; 1612391e43daSPeter Zijlstra } 1613b28bc1e0SQais Yousef 1614b28bc1e0SQais Yousef out_unlock: 1615391e43daSPeter Zijlstra rcu_read_unlock(); 1616391e43daSPeter Zijlstra 1617391e43daSPeter Zijlstra out: 1618391e43daSPeter Zijlstra return cpu; 1619391e43daSPeter Zijlstra } 1620391e43daSPeter Zijlstra 1621391e43daSPeter Zijlstra static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) 1622391e43daSPeter Zijlstra { 1623308a623aSWanpeng Li /* 1624308a623aSWanpeng Li * Current can't be migrated, useless to reschedule, 1625308a623aSWanpeng Li * let's hope p can move out. 1626308a623aSWanpeng Li */ 16274b53a341SIngo Molnar if (rq->curr->nr_cpus_allowed == 1 || 1628a1bd02e1SQais Yousef !cpupri_find(&rq->rd->cpupri, rq->curr, NULL)) 1629391e43daSPeter Zijlstra return; 1630391e43daSPeter Zijlstra 1631308a623aSWanpeng Li /* 1632308a623aSWanpeng Li * p is migratable, so let's not schedule it and 1633308a623aSWanpeng Li * see if it is pushed or pulled somewhere else. 1634308a623aSWanpeng Li */ 1635804d402fSQais Yousef if (p->nr_cpus_allowed != 1 && 1636a1bd02e1SQais Yousef cpupri_find(&rq->rd->cpupri, p, NULL)) 1637391e43daSPeter Zijlstra return; 1638391e43daSPeter Zijlstra 1639391e43daSPeter Zijlstra /* 164097fb7a0aSIngo Molnar * There appear to be other CPUs that can accept 164197fb7a0aSIngo Molnar * the current task but none can run 'p', so lets reschedule 164297fb7a0aSIngo Molnar * to try and push the current task away: 1643391e43daSPeter Zijlstra */ 1644391e43daSPeter Zijlstra requeue_task_rt(rq, p, 1); 16458875125eSKirill Tkhai resched_curr(rq); 1646391e43daSPeter Zijlstra } 1647391e43daSPeter Zijlstra 16486e2df058SPeter Zijlstra static int balance_rt(struct rq *rq, struct task_struct *p, struct rq_flags *rf) 16496e2df058SPeter Zijlstra { 16506e2df058SPeter Zijlstra if (!on_rt_rq(&p->rt) && need_pull_rt_task(rq, p)) { 16516e2df058SPeter Zijlstra /* 16526e2df058SPeter Zijlstra * This is OK, because current is on_cpu, which avoids it being 16536e2df058SPeter Zijlstra * picked for load-balance and preemption/IRQs are still 16546e2df058SPeter Zijlstra * disabled avoiding further scheduler activity on it and we've 16556e2df058SPeter Zijlstra * not yet started the picking loop. 16566e2df058SPeter Zijlstra */ 16576e2df058SPeter Zijlstra rq_unpin_lock(rq, rf); 16586e2df058SPeter Zijlstra pull_rt_task(rq); 16596e2df058SPeter Zijlstra rq_repin_lock(rq, rf); 16606e2df058SPeter Zijlstra } 16616e2df058SPeter Zijlstra 16626e2df058SPeter Zijlstra return sched_stop_runnable(rq) || sched_dl_runnable(rq) || sched_rt_runnable(rq); 16636e2df058SPeter Zijlstra } 1664391e43daSPeter Zijlstra #endif /* CONFIG_SMP */ 1665391e43daSPeter Zijlstra 1666391e43daSPeter Zijlstra /* 1667391e43daSPeter Zijlstra * Preempt the current task with a newly woken task if needed: 1668391e43daSPeter Zijlstra */ 1669391e43daSPeter Zijlstra static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags) 1670391e43daSPeter Zijlstra { 1671391e43daSPeter Zijlstra if (p->prio < rq->curr->prio) { 16728875125eSKirill Tkhai resched_curr(rq); 1673391e43daSPeter Zijlstra return; 1674391e43daSPeter Zijlstra } 1675391e43daSPeter Zijlstra 1676391e43daSPeter Zijlstra #ifdef CONFIG_SMP 1677391e43daSPeter Zijlstra /* 1678391e43daSPeter Zijlstra * If: 1679391e43daSPeter Zijlstra * 1680391e43daSPeter Zijlstra * - the newly woken task is of equal priority to the current task 1681391e43daSPeter Zijlstra * - the newly woken task is non-migratable while current is migratable 1682391e43daSPeter Zijlstra * - current will be preempted on the next reschedule 1683391e43daSPeter Zijlstra * 1684391e43daSPeter Zijlstra * we should check to see if current can readily move to a different 1685391e43daSPeter Zijlstra * cpu. If so, we will reschedule to allow the push logic to try 1686391e43daSPeter Zijlstra * to move current somewhere else, making room for our non-migratable 1687391e43daSPeter Zijlstra * task. 1688391e43daSPeter Zijlstra */ 1689391e43daSPeter Zijlstra if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr)) 1690391e43daSPeter Zijlstra check_preempt_equal_prio(rq, p); 1691391e43daSPeter Zijlstra #endif 1692391e43daSPeter Zijlstra } 1693391e43daSPeter Zijlstra 1694a0e813f2SPeter Zijlstra static inline void set_next_task_rt(struct rq *rq, struct task_struct *p, bool first) 1695ff1cdc94SMuchun Song { 169657a5c2daSYafang Shao struct sched_rt_entity *rt_se = &p->rt; 169757a5c2daSYafang Shao struct rt_rq *rt_rq = &rq->rt; 169857a5c2daSYafang Shao 1699ff1cdc94SMuchun Song p->se.exec_start = rq_clock_task(rq); 170057a5c2daSYafang Shao if (on_rt_rq(&p->rt)) 170157a5c2daSYafang Shao update_stats_wait_end_rt(rt_rq, rt_se); 1702ff1cdc94SMuchun Song 1703ff1cdc94SMuchun Song /* The running task is never eligible for pushing */ 1704ff1cdc94SMuchun Song dequeue_pushable_task(rq, p); 1705f95d4eaeSPeter Zijlstra 1706a0e813f2SPeter Zijlstra if (!first) 1707a0e813f2SPeter Zijlstra return; 1708a0e813f2SPeter Zijlstra 1709f95d4eaeSPeter Zijlstra /* 1710f95d4eaeSPeter Zijlstra * If prev task was rt, put_prev_task() has already updated the 1711f95d4eaeSPeter Zijlstra * utilization. We only care of the case where we start to schedule a 1712f95d4eaeSPeter Zijlstra * rt task 1713f95d4eaeSPeter Zijlstra */ 1714f95d4eaeSPeter Zijlstra if (rq->curr->sched_class != &rt_sched_class) 1715f95d4eaeSPeter Zijlstra update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0); 1716f95d4eaeSPeter Zijlstra 1717f95d4eaeSPeter Zijlstra rt_queue_push_tasks(rq); 1718ff1cdc94SMuchun Song } 1719ff1cdc94SMuchun Song 1720821aecd0SDietmar Eggemann static struct sched_rt_entity *pick_next_rt_entity(struct rt_rq *rt_rq) 1721391e43daSPeter Zijlstra { 1722391e43daSPeter Zijlstra struct rt_prio_array *array = &rt_rq->active; 1723391e43daSPeter Zijlstra struct sched_rt_entity *next = NULL; 1724391e43daSPeter Zijlstra struct list_head *queue; 1725391e43daSPeter Zijlstra int idx; 1726391e43daSPeter Zijlstra 1727391e43daSPeter Zijlstra idx = sched_find_first_bit(array->bitmap); 1728391e43daSPeter Zijlstra BUG_ON(idx >= MAX_RT_PRIO); 1729391e43daSPeter Zijlstra 1730391e43daSPeter Zijlstra queue = array->queue + idx; 1731391e43daSPeter Zijlstra next = list_entry(queue->next, struct sched_rt_entity, run_list); 1732391e43daSPeter Zijlstra 1733391e43daSPeter Zijlstra return next; 1734391e43daSPeter Zijlstra } 1735391e43daSPeter Zijlstra 1736391e43daSPeter Zijlstra static struct task_struct *_pick_next_task_rt(struct rq *rq) 1737391e43daSPeter Zijlstra { 1738391e43daSPeter Zijlstra struct sched_rt_entity *rt_se; 1739606dba2eSPeter Zijlstra struct rt_rq *rt_rq = &rq->rt; 1740391e43daSPeter Zijlstra 1741391e43daSPeter Zijlstra do { 1742821aecd0SDietmar Eggemann rt_se = pick_next_rt_entity(rt_rq); 1743391e43daSPeter Zijlstra BUG_ON(!rt_se); 1744391e43daSPeter Zijlstra rt_rq = group_rt_rq(rt_se); 1745391e43daSPeter Zijlstra } while (rt_rq); 1746391e43daSPeter Zijlstra 1747ff1cdc94SMuchun Song return rt_task_of(rt_se); 1748391e43daSPeter Zijlstra } 1749391e43daSPeter Zijlstra 175021f56ffeSPeter Zijlstra static struct task_struct *pick_task_rt(struct rq *rq) 1751391e43daSPeter Zijlstra { 1752606dba2eSPeter Zijlstra struct task_struct *p; 1753606dba2eSPeter Zijlstra 17546e2df058SPeter Zijlstra if (!sched_rt_runnable(rq)) 1755606dba2eSPeter Zijlstra return NULL; 1756606dba2eSPeter Zijlstra 1757606dba2eSPeter Zijlstra p = _pick_next_task_rt(rq); 175821f56ffeSPeter Zijlstra 175921f56ffeSPeter Zijlstra return p; 176021f56ffeSPeter Zijlstra } 176121f56ffeSPeter Zijlstra 176221f56ffeSPeter Zijlstra static struct task_struct *pick_next_task_rt(struct rq *rq) 176321f56ffeSPeter Zijlstra { 176421f56ffeSPeter Zijlstra struct task_struct *p = pick_task_rt(rq); 176521f56ffeSPeter Zijlstra 176621f56ffeSPeter Zijlstra if (p) 1767a0e813f2SPeter Zijlstra set_next_task_rt(rq, p, true); 176821f56ffeSPeter Zijlstra 1769391e43daSPeter Zijlstra return p; 1770391e43daSPeter Zijlstra } 1771391e43daSPeter Zijlstra 17726e2df058SPeter Zijlstra static void put_prev_task_rt(struct rq *rq, struct task_struct *p) 1773391e43daSPeter Zijlstra { 177457a5c2daSYafang Shao struct sched_rt_entity *rt_se = &p->rt; 177557a5c2daSYafang Shao struct rt_rq *rt_rq = &rq->rt; 177657a5c2daSYafang Shao 177757a5c2daSYafang Shao if (on_rt_rq(&p->rt)) 177857a5c2daSYafang Shao update_stats_wait_start_rt(rt_rq, rt_se); 177957a5c2daSYafang Shao 1780391e43daSPeter Zijlstra update_curr_rt(rq); 1781391e43daSPeter Zijlstra 178223127296SVincent Guittot update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1); 1783371bf427SVincent Guittot 1784391e43daSPeter Zijlstra /* 1785391e43daSPeter Zijlstra * The previous task needs to be made eligible for pushing 1786391e43daSPeter Zijlstra * if it is still active 1787391e43daSPeter Zijlstra */ 17884b53a341SIngo Molnar if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1) 1789391e43daSPeter Zijlstra enqueue_pushable_task(rq, p); 1790391e43daSPeter Zijlstra } 1791391e43daSPeter Zijlstra 1792391e43daSPeter Zijlstra #ifdef CONFIG_SMP 1793391e43daSPeter Zijlstra 1794391e43daSPeter Zijlstra /* Only try algorithms three times */ 1795391e43daSPeter Zijlstra #define RT_MAX_TRIES 3 1796391e43daSPeter Zijlstra 1797391e43daSPeter Zijlstra static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) 1798391e43daSPeter Zijlstra { 1799391e43daSPeter Zijlstra if (!task_running(rq, p) && 180095158a89SPeter Zijlstra cpumask_test_cpu(cpu, &p->cpus_mask)) 1801391e43daSPeter Zijlstra return 1; 180297fb7a0aSIngo Molnar 1803391e43daSPeter Zijlstra return 0; 1804391e43daSPeter Zijlstra } 1805391e43daSPeter Zijlstra 1806e23ee747SKirill Tkhai /* 1807e23ee747SKirill Tkhai * Return the highest pushable rq's task, which is suitable to be executed 180897fb7a0aSIngo Molnar * on the CPU, NULL otherwise 1809e23ee747SKirill Tkhai */ 1810e23ee747SKirill Tkhai static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu) 1811391e43daSPeter Zijlstra { 1812e23ee747SKirill Tkhai struct plist_head *head = &rq->rt.pushable_tasks; 1813391e43daSPeter Zijlstra struct task_struct *p; 1814391e43daSPeter Zijlstra 1815e23ee747SKirill Tkhai if (!has_pushable_tasks(rq)) 1816e23ee747SKirill Tkhai return NULL; 1817391e43daSPeter Zijlstra 1818e23ee747SKirill Tkhai plist_for_each_entry(p, head, pushable_tasks) { 1819e23ee747SKirill Tkhai if (pick_rt_task(rq, p, cpu)) 1820e23ee747SKirill Tkhai return p; 1821391e43daSPeter Zijlstra } 1822391e43daSPeter Zijlstra 1823e23ee747SKirill Tkhai return NULL; 1824391e43daSPeter Zijlstra } 1825391e43daSPeter Zijlstra 1826391e43daSPeter Zijlstra static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask); 1827391e43daSPeter Zijlstra 1828391e43daSPeter Zijlstra static int find_lowest_rq(struct task_struct *task) 1829391e43daSPeter Zijlstra { 1830391e43daSPeter Zijlstra struct sched_domain *sd; 18314ba29684SChristoph Lameter struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask); 1832391e43daSPeter Zijlstra int this_cpu = smp_processor_id(); 1833391e43daSPeter Zijlstra int cpu = task_cpu(task); 1834a1bd02e1SQais Yousef int ret; 1835391e43daSPeter Zijlstra 1836391e43daSPeter Zijlstra /* Make sure the mask is initialized first */ 1837391e43daSPeter Zijlstra if (unlikely(!lowest_mask)) 1838391e43daSPeter Zijlstra return -1; 1839391e43daSPeter Zijlstra 18404b53a341SIngo Molnar if (task->nr_cpus_allowed == 1) 1841391e43daSPeter Zijlstra return -1; /* No other targets possible */ 1842391e43daSPeter Zijlstra 1843a1bd02e1SQais Yousef /* 1844a1bd02e1SQais Yousef * If we're on asym system ensure we consider the different capacities 1845a1bd02e1SQais Yousef * of the CPUs when searching for the lowest_mask. 1846a1bd02e1SQais Yousef */ 1847a1bd02e1SQais Yousef if (static_branch_unlikely(&sched_asym_cpucapacity)) { 1848a1bd02e1SQais Yousef 1849a1bd02e1SQais Yousef ret = cpupri_find_fitness(&task_rq(task)->rd->cpupri, 1850a1bd02e1SQais Yousef task, lowest_mask, 1851a1bd02e1SQais Yousef rt_task_fits_capacity); 1852a1bd02e1SQais Yousef } else { 1853a1bd02e1SQais Yousef 1854a1bd02e1SQais Yousef ret = cpupri_find(&task_rq(task)->rd->cpupri, 1855a1bd02e1SQais Yousef task, lowest_mask); 1856a1bd02e1SQais Yousef } 1857a1bd02e1SQais Yousef 1858a1bd02e1SQais Yousef if (!ret) 1859391e43daSPeter Zijlstra return -1; /* No targets found */ 1860391e43daSPeter Zijlstra 1861391e43daSPeter Zijlstra /* 186297fb7a0aSIngo Molnar * At this point we have built a mask of CPUs representing the 1863391e43daSPeter Zijlstra * lowest priority tasks in the system. Now we want to elect 1864391e43daSPeter Zijlstra * the best one based on our affinity and topology. 1865391e43daSPeter Zijlstra * 186697fb7a0aSIngo Molnar * We prioritize the last CPU that the task executed on since 1867391e43daSPeter Zijlstra * it is most likely cache-hot in that location. 1868391e43daSPeter Zijlstra */ 1869391e43daSPeter Zijlstra if (cpumask_test_cpu(cpu, lowest_mask)) 1870391e43daSPeter Zijlstra return cpu; 1871391e43daSPeter Zijlstra 1872391e43daSPeter Zijlstra /* 1873391e43daSPeter Zijlstra * Otherwise, we consult the sched_domains span maps to figure 187497fb7a0aSIngo Molnar * out which CPU is logically closest to our hot cache data. 1875391e43daSPeter Zijlstra */ 1876391e43daSPeter Zijlstra if (!cpumask_test_cpu(this_cpu, lowest_mask)) 1877391e43daSPeter Zijlstra this_cpu = -1; /* Skip this_cpu opt if not among lowest */ 1878391e43daSPeter Zijlstra 1879391e43daSPeter Zijlstra rcu_read_lock(); 1880391e43daSPeter Zijlstra for_each_domain(cpu, sd) { 1881391e43daSPeter Zijlstra if (sd->flags & SD_WAKE_AFFINE) { 1882391e43daSPeter Zijlstra int best_cpu; 1883391e43daSPeter Zijlstra 1884391e43daSPeter Zijlstra /* 1885391e43daSPeter Zijlstra * "this_cpu" is cheaper to preempt than a 1886391e43daSPeter Zijlstra * remote processor. 1887391e43daSPeter Zijlstra */ 1888391e43daSPeter Zijlstra if (this_cpu != -1 && 1889391e43daSPeter Zijlstra cpumask_test_cpu(this_cpu, sched_domain_span(sd))) { 1890391e43daSPeter Zijlstra rcu_read_unlock(); 1891391e43daSPeter Zijlstra return this_cpu; 1892391e43daSPeter Zijlstra } 1893391e43daSPeter Zijlstra 189414e292f8SPeter Zijlstra best_cpu = cpumask_any_and_distribute(lowest_mask, 1895391e43daSPeter Zijlstra sched_domain_span(sd)); 1896391e43daSPeter Zijlstra if (best_cpu < nr_cpu_ids) { 1897391e43daSPeter Zijlstra rcu_read_unlock(); 1898391e43daSPeter Zijlstra return best_cpu; 1899391e43daSPeter Zijlstra } 1900391e43daSPeter Zijlstra } 1901391e43daSPeter Zijlstra } 1902391e43daSPeter Zijlstra rcu_read_unlock(); 1903391e43daSPeter Zijlstra 1904391e43daSPeter Zijlstra /* 1905391e43daSPeter Zijlstra * And finally, if there were no matches within the domains 1906391e43daSPeter Zijlstra * just give the caller *something* to work with from the compatible 1907391e43daSPeter Zijlstra * locations. 1908391e43daSPeter Zijlstra */ 1909391e43daSPeter Zijlstra if (this_cpu != -1) 1910391e43daSPeter Zijlstra return this_cpu; 1911391e43daSPeter Zijlstra 191214e292f8SPeter Zijlstra cpu = cpumask_any_distribute(lowest_mask); 1913391e43daSPeter Zijlstra if (cpu < nr_cpu_ids) 1914391e43daSPeter Zijlstra return cpu; 191597fb7a0aSIngo Molnar 1916391e43daSPeter Zijlstra return -1; 1917391e43daSPeter Zijlstra } 1918391e43daSPeter Zijlstra 1919391e43daSPeter Zijlstra /* Will lock the rq it finds */ 1920391e43daSPeter Zijlstra static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) 1921391e43daSPeter Zijlstra { 1922391e43daSPeter Zijlstra struct rq *lowest_rq = NULL; 1923391e43daSPeter Zijlstra int tries; 1924391e43daSPeter Zijlstra int cpu; 1925391e43daSPeter Zijlstra 1926391e43daSPeter Zijlstra for (tries = 0; tries < RT_MAX_TRIES; tries++) { 1927391e43daSPeter Zijlstra cpu = find_lowest_rq(task); 1928391e43daSPeter Zijlstra 1929391e43daSPeter Zijlstra if ((cpu == -1) || (cpu == rq->cpu)) 1930391e43daSPeter Zijlstra break; 1931391e43daSPeter Zijlstra 1932391e43daSPeter Zijlstra lowest_rq = cpu_rq(cpu); 1933391e43daSPeter Zijlstra 193480e3d87bSTim Chen if (lowest_rq->rt.highest_prio.curr <= task->prio) { 193580e3d87bSTim Chen /* 193680e3d87bSTim Chen * Target rq has tasks of equal or higher priority, 193780e3d87bSTim Chen * retrying does not release any lock and is unlikely 193880e3d87bSTim Chen * to yield a different result. 193980e3d87bSTim Chen */ 194080e3d87bSTim Chen lowest_rq = NULL; 194180e3d87bSTim Chen break; 194280e3d87bSTim Chen } 194380e3d87bSTim Chen 1944391e43daSPeter Zijlstra /* if the prio of this runqueue changed, try again */ 1945391e43daSPeter Zijlstra if (double_lock_balance(rq, lowest_rq)) { 1946391e43daSPeter Zijlstra /* 1947391e43daSPeter Zijlstra * We had to unlock the run queue. In 1948391e43daSPeter Zijlstra * the mean time, task could have 1949391e43daSPeter Zijlstra * migrated already or had its affinity changed. 1950391e43daSPeter Zijlstra * Also make sure that it wasn't scheduled on its rq. 1951391e43daSPeter Zijlstra */ 1952391e43daSPeter Zijlstra if (unlikely(task_rq(task) != rq || 195395158a89SPeter Zijlstra !cpumask_test_cpu(lowest_rq->cpu, &task->cpus_mask) || 1954391e43daSPeter Zijlstra task_running(rq, task) || 195513b5ab02SXunlei Pang !rt_task(task) || 1956da0c1e65SKirill Tkhai !task_on_rq_queued(task))) { 1957391e43daSPeter Zijlstra 19587f1b4393SPeter Zijlstra double_unlock_balance(rq, lowest_rq); 1959391e43daSPeter Zijlstra lowest_rq = NULL; 1960391e43daSPeter Zijlstra break; 1961391e43daSPeter Zijlstra } 1962391e43daSPeter Zijlstra } 1963391e43daSPeter Zijlstra 1964391e43daSPeter Zijlstra /* If this rq is still suitable use it. */ 1965391e43daSPeter Zijlstra if (lowest_rq->rt.highest_prio.curr > task->prio) 1966391e43daSPeter Zijlstra break; 1967391e43daSPeter Zijlstra 1968391e43daSPeter Zijlstra /* try again */ 1969391e43daSPeter Zijlstra double_unlock_balance(rq, lowest_rq); 1970391e43daSPeter Zijlstra lowest_rq = NULL; 1971391e43daSPeter Zijlstra } 1972391e43daSPeter Zijlstra 1973391e43daSPeter Zijlstra return lowest_rq; 1974391e43daSPeter Zijlstra } 1975391e43daSPeter Zijlstra 1976391e43daSPeter Zijlstra static struct task_struct *pick_next_pushable_task(struct rq *rq) 1977391e43daSPeter Zijlstra { 1978391e43daSPeter Zijlstra struct task_struct *p; 1979391e43daSPeter Zijlstra 1980391e43daSPeter Zijlstra if (!has_pushable_tasks(rq)) 1981391e43daSPeter Zijlstra return NULL; 1982391e43daSPeter Zijlstra 1983391e43daSPeter Zijlstra p = plist_first_entry(&rq->rt.pushable_tasks, 1984391e43daSPeter Zijlstra struct task_struct, pushable_tasks); 1985391e43daSPeter Zijlstra 1986391e43daSPeter Zijlstra BUG_ON(rq->cpu != task_cpu(p)); 1987391e43daSPeter Zijlstra BUG_ON(task_current(rq, p)); 19884b53a341SIngo Molnar BUG_ON(p->nr_cpus_allowed <= 1); 1989391e43daSPeter Zijlstra 1990da0c1e65SKirill Tkhai BUG_ON(!task_on_rq_queued(p)); 1991391e43daSPeter Zijlstra BUG_ON(!rt_task(p)); 1992391e43daSPeter Zijlstra 1993391e43daSPeter Zijlstra return p; 1994391e43daSPeter Zijlstra } 1995391e43daSPeter Zijlstra 1996391e43daSPeter Zijlstra /* 1997391e43daSPeter Zijlstra * If the current CPU has more than one RT task, see if the non 1998391e43daSPeter Zijlstra * running task can migrate over to a CPU that is running a task 1999391e43daSPeter Zijlstra * of lesser priority. 2000391e43daSPeter Zijlstra */ 2001a7c81556SPeter Zijlstra static int push_rt_task(struct rq *rq, bool pull) 2002391e43daSPeter Zijlstra { 2003391e43daSPeter Zijlstra struct task_struct *next_task; 2004391e43daSPeter Zijlstra struct rq *lowest_rq; 2005391e43daSPeter Zijlstra int ret = 0; 2006391e43daSPeter Zijlstra 2007391e43daSPeter Zijlstra if (!rq->rt.overloaded) 2008391e43daSPeter Zijlstra return 0; 2009391e43daSPeter Zijlstra 2010391e43daSPeter Zijlstra next_task = pick_next_pushable_task(rq); 2011391e43daSPeter Zijlstra if (!next_task) 2012391e43daSPeter Zijlstra return 0; 2013391e43daSPeter Zijlstra 2014391e43daSPeter Zijlstra retry: 201549bef33eSValentin Schneider /* 201649bef33eSValentin Schneider * It's possible that the next_task slipped in of 201749bef33eSValentin Schneider * higher priority than current. If that's the case 201849bef33eSValentin Schneider * just reschedule current. 201949bef33eSValentin Schneider */ 202049bef33eSValentin Schneider if (unlikely(next_task->prio < rq->curr->prio)) { 202149bef33eSValentin Schneider resched_curr(rq); 202249bef33eSValentin Schneider return 0; 202349bef33eSValentin Schneider } 202449bef33eSValentin Schneider 2025a7c81556SPeter Zijlstra if (is_migration_disabled(next_task)) { 2026a7c81556SPeter Zijlstra struct task_struct *push_task = NULL; 2027a7c81556SPeter Zijlstra int cpu; 2028a7c81556SPeter Zijlstra 2029a7c81556SPeter Zijlstra if (!pull || rq->push_busy) 2030a7c81556SPeter Zijlstra return 0; 2031a7c81556SPeter Zijlstra 203249bef33eSValentin Schneider /* 203349bef33eSValentin Schneider * Invoking find_lowest_rq() on anything but an RT task doesn't 203449bef33eSValentin Schneider * make sense. Per the above priority check, curr has to 203549bef33eSValentin Schneider * be of higher priority than next_task, so no need to 203649bef33eSValentin Schneider * reschedule when bailing out. 203749bef33eSValentin Schneider * 203849bef33eSValentin Schneider * Note that the stoppers are masqueraded as SCHED_FIFO 203949bef33eSValentin Schneider * (cf. sched_set_stop_task()), so we can't rely on rt_task(). 204049bef33eSValentin Schneider */ 204149bef33eSValentin Schneider if (rq->curr->sched_class != &rt_sched_class) 204249bef33eSValentin Schneider return 0; 204349bef33eSValentin Schneider 2044a7c81556SPeter Zijlstra cpu = find_lowest_rq(rq->curr); 2045a7c81556SPeter Zijlstra if (cpu == -1 || cpu == rq->cpu) 2046a7c81556SPeter Zijlstra return 0; 2047a7c81556SPeter Zijlstra 2048a7c81556SPeter Zijlstra /* 2049a7c81556SPeter Zijlstra * Given we found a CPU with lower priority than @next_task, 2050a7c81556SPeter Zijlstra * therefore it should be running. However we cannot migrate it 2051a7c81556SPeter Zijlstra * to this other CPU, instead attempt to push the current 2052a7c81556SPeter Zijlstra * running task on this CPU away. 2053a7c81556SPeter Zijlstra */ 2054a7c81556SPeter Zijlstra push_task = get_push_task(rq); 2055a7c81556SPeter Zijlstra if (push_task) { 20565cb9eaa3SPeter Zijlstra raw_spin_rq_unlock(rq); 2057a7c81556SPeter Zijlstra stop_one_cpu_nowait(rq->cpu, push_cpu_stop, 2058a7c81556SPeter Zijlstra push_task, &rq->push_work); 20595cb9eaa3SPeter Zijlstra raw_spin_rq_lock(rq); 2060a7c81556SPeter Zijlstra } 2061a7c81556SPeter Zijlstra 2062a7c81556SPeter Zijlstra return 0; 2063a7c81556SPeter Zijlstra } 2064a7c81556SPeter Zijlstra 20659ebc6053SYangtao Li if (WARN_ON(next_task == rq->curr)) 2066391e43daSPeter Zijlstra return 0; 2067391e43daSPeter Zijlstra 2068391e43daSPeter Zijlstra /* We might release rq lock */ 2069391e43daSPeter Zijlstra get_task_struct(next_task); 2070391e43daSPeter Zijlstra 2071391e43daSPeter Zijlstra /* find_lock_lowest_rq locks the rq if found */ 2072391e43daSPeter Zijlstra lowest_rq = find_lock_lowest_rq(next_task, rq); 2073391e43daSPeter Zijlstra if (!lowest_rq) { 2074391e43daSPeter Zijlstra struct task_struct *task; 2075391e43daSPeter Zijlstra /* 2076391e43daSPeter Zijlstra * find_lock_lowest_rq releases rq->lock 2077391e43daSPeter Zijlstra * so it is possible that next_task has migrated. 2078391e43daSPeter Zijlstra * 2079391e43daSPeter Zijlstra * We need to make sure that the task is still on the same 2080391e43daSPeter Zijlstra * run-queue and is also still the next task eligible for 2081391e43daSPeter Zijlstra * pushing. 2082391e43daSPeter Zijlstra */ 2083391e43daSPeter Zijlstra task = pick_next_pushable_task(rq); 2084de16b91eSByungchul Park if (task == next_task) { 2085391e43daSPeter Zijlstra /* 2086391e43daSPeter Zijlstra * The task hasn't migrated, and is still the next 2087391e43daSPeter Zijlstra * eligible task, but we failed to find a run-queue 2088391e43daSPeter Zijlstra * to push it to. Do not retry in this case, since 208997fb7a0aSIngo Molnar * other CPUs will pull from us when ready. 2090391e43daSPeter Zijlstra */ 2091391e43daSPeter Zijlstra goto out; 2092391e43daSPeter Zijlstra } 2093391e43daSPeter Zijlstra 2094391e43daSPeter Zijlstra if (!task) 2095391e43daSPeter Zijlstra /* No more tasks, just exit */ 2096391e43daSPeter Zijlstra goto out; 2097391e43daSPeter Zijlstra 2098391e43daSPeter Zijlstra /* 2099391e43daSPeter Zijlstra * Something has shifted, try again. 2100391e43daSPeter Zijlstra */ 2101391e43daSPeter Zijlstra put_task_struct(next_task); 2102391e43daSPeter Zijlstra next_task = task; 2103391e43daSPeter Zijlstra goto retry; 2104391e43daSPeter Zijlstra } 2105391e43daSPeter Zijlstra 2106391e43daSPeter Zijlstra deactivate_task(rq, next_task, 0); 2107391e43daSPeter Zijlstra set_task_cpu(next_task, lowest_rq->cpu); 2108391e43daSPeter Zijlstra activate_task(lowest_rq, next_task, 0); 2109a7c81556SPeter Zijlstra resched_curr(lowest_rq); 2110391e43daSPeter Zijlstra ret = 1; 2111391e43daSPeter Zijlstra 2112391e43daSPeter Zijlstra double_unlock_balance(rq, lowest_rq); 2113391e43daSPeter Zijlstra out: 2114391e43daSPeter Zijlstra put_task_struct(next_task); 2115391e43daSPeter Zijlstra 2116391e43daSPeter Zijlstra return ret; 2117391e43daSPeter Zijlstra } 2118391e43daSPeter Zijlstra 2119391e43daSPeter Zijlstra static void push_rt_tasks(struct rq *rq) 2120391e43daSPeter Zijlstra { 2121391e43daSPeter Zijlstra /* push_rt_task will return true if it moved an RT */ 2122a7c81556SPeter Zijlstra while (push_rt_task(rq, false)) 2123391e43daSPeter Zijlstra ; 2124391e43daSPeter Zijlstra } 2125391e43daSPeter Zijlstra 2126b6366f04SSteven Rostedt #ifdef HAVE_RT_PUSH_IPI 2127b6366f04SSteven Rostedt 21283e777f99SSteven Rostedt (VMware) /* 21293e777f99SSteven Rostedt (VMware) * When a high priority task schedules out from a CPU and a lower priority 21303e777f99SSteven Rostedt (VMware) * task is scheduled in, a check is made to see if there's any RT tasks 21313e777f99SSteven Rostedt (VMware) * on other CPUs that are waiting to run because a higher priority RT task 21323e777f99SSteven Rostedt (VMware) * is currently running on its CPU. In this case, the CPU with multiple RT 21333e777f99SSteven Rostedt (VMware) * tasks queued on it (overloaded) needs to be notified that a CPU has opened 21343e777f99SSteven Rostedt (VMware) * up that may be able to run one of its non-running queued RT tasks. 21353e777f99SSteven Rostedt (VMware) * 21364bdced5cSSteven Rostedt (Red Hat) * All CPUs with overloaded RT tasks need to be notified as there is currently 21374bdced5cSSteven Rostedt (Red Hat) * no way to know which of these CPUs have the highest priority task waiting 21384bdced5cSSteven Rostedt (Red Hat) * to run. Instead of trying to take a spinlock on each of these CPUs, 21394bdced5cSSteven Rostedt (Red Hat) * which has shown to cause large latency when done on machines with many 21404bdced5cSSteven Rostedt (Red Hat) * CPUs, sending an IPI to the CPUs to have them push off the overloaded 21414bdced5cSSteven Rostedt (Red Hat) * RT tasks waiting to run. 21423e777f99SSteven Rostedt (VMware) * 21434bdced5cSSteven Rostedt (Red Hat) * Just sending an IPI to each of the CPUs is also an issue, as on large 21444bdced5cSSteven Rostedt (Red Hat) * count CPU machines, this can cause an IPI storm on a CPU, especially 21454bdced5cSSteven Rostedt (Red Hat) * if its the only CPU with multiple RT tasks queued, and a large number 21464bdced5cSSteven Rostedt (Red Hat) * of CPUs scheduling a lower priority task at the same time. 21473e777f99SSteven Rostedt (VMware) * 21484bdced5cSSteven Rostedt (Red Hat) * Each root domain has its own irq work function that can iterate over 21494bdced5cSSteven Rostedt (Red Hat) * all CPUs with RT overloaded tasks. Since all CPUs with overloaded RT 21503b03706fSIngo Molnar * task must be checked if there's one or many CPUs that are lowering 21514bdced5cSSteven Rostedt (Red Hat) * their priority, there's a single irq work iterator that will try to 21524bdced5cSSteven Rostedt (Red Hat) * push off RT tasks that are waiting to run. 21533e777f99SSteven Rostedt (VMware) * 21544bdced5cSSteven Rostedt (Red Hat) * When a CPU schedules a lower priority task, it will kick off the 21554bdced5cSSteven Rostedt (Red Hat) * irq work iterator that will jump to each CPU with overloaded RT tasks. 21564bdced5cSSteven Rostedt (Red Hat) * As it only takes the first CPU that schedules a lower priority task 21574bdced5cSSteven Rostedt (Red Hat) * to start the process, the rto_start variable is incremented and if 21584bdced5cSSteven Rostedt (Red Hat) * the atomic result is one, then that CPU will try to take the rto_lock. 21594bdced5cSSteven Rostedt (Red Hat) * This prevents high contention on the lock as the process handles all 21604bdced5cSSteven Rostedt (Red Hat) * CPUs scheduling lower priority tasks. 21613e777f99SSteven Rostedt (VMware) * 21624bdced5cSSteven Rostedt (Red Hat) * All CPUs that are scheduling a lower priority task will increment the 21634bdced5cSSteven Rostedt (Red Hat) * rt_loop_next variable. This will make sure that the irq work iterator 21644bdced5cSSteven Rostedt (Red Hat) * checks all RT overloaded CPUs whenever a CPU schedules a new lower 21654bdced5cSSteven Rostedt (Red Hat) * priority task, even if the iterator is in the middle of a scan. Incrementing 21664bdced5cSSteven Rostedt (Red Hat) * the rt_loop_next will cause the iterator to perform another scan. 21673e777f99SSteven Rostedt (VMware) * 21683e777f99SSteven Rostedt (VMware) */ 2169ad0f1d9dSSteven Rostedt (VMware) static int rto_next_cpu(struct root_domain *rd) 2170b6366f04SSteven Rostedt { 21714bdced5cSSteven Rostedt (Red Hat) int next; 2172b6366f04SSteven Rostedt int cpu; 2173b6366f04SSteven Rostedt 2174b6366f04SSteven Rostedt /* 21754bdced5cSSteven Rostedt (Red Hat) * When starting the IPI RT pushing, the rto_cpu is set to -1, 21764bdced5cSSteven Rostedt (Red Hat) * rt_next_cpu() will simply return the first CPU found in 21774bdced5cSSteven Rostedt (Red Hat) * the rto_mask. 21784bdced5cSSteven Rostedt (Red Hat) * 217997fb7a0aSIngo Molnar * If rto_next_cpu() is called with rto_cpu is a valid CPU, it 21804bdced5cSSteven Rostedt (Red Hat) * will return the next CPU found in the rto_mask. 21814bdced5cSSteven Rostedt (Red Hat) * 21824bdced5cSSteven Rostedt (Red Hat) * If there are no more CPUs left in the rto_mask, then a check is made 21834bdced5cSSteven Rostedt (Red Hat) * against rto_loop and rto_loop_next. rto_loop is only updated with 21844bdced5cSSteven Rostedt (Red Hat) * the rto_lock held, but any CPU may increment the rto_loop_next 21854bdced5cSSteven Rostedt (Red Hat) * without any locking. 2186b6366f04SSteven Rostedt */ 21874bdced5cSSteven Rostedt (Red Hat) for (;;) { 21884bdced5cSSteven Rostedt (Red Hat) 21894bdced5cSSteven Rostedt (Red Hat) /* When rto_cpu is -1 this acts like cpumask_first() */ 21904bdced5cSSteven Rostedt (Red Hat) cpu = cpumask_next(rd->rto_cpu, rd->rto_mask); 21914bdced5cSSteven Rostedt (Red Hat) 21924bdced5cSSteven Rostedt (Red Hat) rd->rto_cpu = cpu; 21934bdced5cSSteven Rostedt (Red Hat) 21944bdced5cSSteven Rostedt (Red Hat) if (cpu < nr_cpu_ids) 21954bdced5cSSteven Rostedt (Red Hat) return cpu; 21964bdced5cSSteven Rostedt (Red Hat) 21974bdced5cSSteven Rostedt (Red Hat) rd->rto_cpu = -1; 21984bdced5cSSteven Rostedt (Red Hat) 21994bdced5cSSteven Rostedt (Red Hat) /* 22004bdced5cSSteven Rostedt (Red Hat) * ACQUIRE ensures we see the @rto_mask changes 22014bdced5cSSteven Rostedt (Red Hat) * made prior to the @next value observed. 22024bdced5cSSteven Rostedt (Red Hat) * 22034bdced5cSSteven Rostedt (Red Hat) * Matches WMB in rt_set_overload(). 22044bdced5cSSteven Rostedt (Red Hat) */ 22054bdced5cSSteven Rostedt (Red Hat) next = atomic_read_acquire(&rd->rto_loop_next); 22064bdced5cSSteven Rostedt (Red Hat) 22074bdced5cSSteven Rostedt (Red Hat) if (rd->rto_loop == next) 22084bdced5cSSteven Rostedt (Red Hat) break; 22094bdced5cSSteven Rostedt (Red Hat) 22104bdced5cSSteven Rostedt (Red Hat) rd->rto_loop = next; 2211b6366f04SSteven Rostedt } 2212b6366f04SSteven Rostedt 22134bdced5cSSteven Rostedt (Red Hat) return -1; 22144bdced5cSSteven Rostedt (Red Hat) } 2215b6366f04SSteven Rostedt 22164bdced5cSSteven Rostedt (Red Hat) static inline bool rto_start_trylock(atomic_t *v) 22174bdced5cSSteven Rostedt (Red Hat) { 22184bdced5cSSteven Rostedt (Red Hat) return !atomic_cmpxchg_acquire(v, 0, 1); 22194bdced5cSSteven Rostedt (Red Hat) } 22204bdced5cSSteven Rostedt (Red Hat) 22214bdced5cSSteven Rostedt (Red Hat) static inline void rto_start_unlock(atomic_t *v) 22224bdced5cSSteven Rostedt (Red Hat) { 22234bdced5cSSteven Rostedt (Red Hat) atomic_set_release(v, 0); 22244bdced5cSSteven Rostedt (Red Hat) } 22254bdced5cSSteven Rostedt (Red Hat) 22264bdced5cSSteven Rostedt (Red Hat) static void tell_cpu_to_push(struct rq *rq) 22274bdced5cSSteven Rostedt (Red Hat) { 22284bdced5cSSteven Rostedt (Red Hat) int cpu = -1; 22294bdced5cSSteven Rostedt (Red Hat) 22304bdced5cSSteven Rostedt (Red Hat) /* Keep the loop going if the IPI is currently active */ 22314bdced5cSSteven Rostedt (Red Hat) atomic_inc(&rq->rd->rto_loop_next); 22324bdced5cSSteven Rostedt (Red Hat) 22334bdced5cSSteven Rostedt (Red Hat) /* Only one CPU can initiate a loop at a time */ 22344bdced5cSSteven Rostedt (Red Hat) if (!rto_start_trylock(&rq->rd->rto_loop_start)) 2235b6366f04SSteven Rostedt return; 2236b6366f04SSteven Rostedt 22374bdced5cSSteven Rostedt (Red Hat) raw_spin_lock(&rq->rd->rto_lock); 2238b6366f04SSteven Rostedt 22394bdced5cSSteven Rostedt (Red Hat) /* 224097fb7a0aSIngo Molnar * The rto_cpu is updated under the lock, if it has a valid CPU 22414bdced5cSSteven Rostedt (Red Hat) * then the IPI is still running and will continue due to the 22424bdced5cSSteven Rostedt (Red Hat) * update to loop_next, and nothing needs to be done here. 22434bdced5cSSteven Rostedt (Red Hat) * Otherwise it is finishing up and an ipi needs to be sent. 22444bdced5cSSteven Rostedt (Red Hat) */ 22454bdced5cSSteven Rostedt (Red Hat) if (rq->rd->rto_cpu < 0) 2246ad0f1d9dSSteven Rostedt (VMware) cpu = rto_next_cpu(rq->rd); 22474bdced5cSSteven Rostedt (Red Hat) 22484bdced5cSSteven Rostedt (Red Hat) raw_spin_unlock(&rq->rd->rto_lock); 22494bdced5cSSteven Rostedt (Red Hat) 22504bdced5cSSteven Rostedt (Red Hat) rto_start_unlock(&rq->rd->rto_loop_start); 22514bdced5cSSteven Rostedt (Red Hat) 2252364f5665SSteven Rostedt (VMware) if (cpu >= 0) { 2253364f5665SSteven Rostedt (VMware) /* Make sure the rd does not get freed while pushing */ 2254364f5665SSteven Rostedt (VMware) sched_get_rd(rq->rd); 22554bdced5cSSteven Rostedt (Red Hat) irq_work_queue_on(&rq->rd->rto_push_work, cpu); 2256b6366f04SSteven Rostedt } 2257364f5665SSteven Rostedt (VMware) } 2258b6366f04SSteven Rostedt 2259b6366f04SSteven Rostedt /* Called from hardirq context */ 22604bdced5cSSteven Rostedt (Red Hat) void rto_push_irq_work_func(struct irq_work *work) 2261b6366f04SSteven Rostedt { 2262ad0f1d9dSSteven Rostedt (VMware) struct root_domain *rd = 2263ad0f1d9dSSteven Rostedt (VMware) container_of(work, struct root_domain, rto_push_work); 22644bdced5cSSteven Rostedt (Red Hat) struct rq *rq; 2265b6366f04SSteven Rostedt int cpu; 2266b6366f04SSteven Rostedt 22674bdced5cSSteven Rostedt (Red Hat) rq = this_rq(); 2268b6366f04SSteven Rostedt 22694bdced5cSSteven Rostedt (Red Hat) /* 22704bdced5cSSteven Rostedt (Red Hat) * We do not need to grab the lock to check for has_pushable_tasks. 22714bdced5cSSteven Rostedt (Red Hat) * When it gets updated, a check is made if a push is possible. 22724bdced5cSSteven Rostedt (Red Hat) */ 2273b6366f04SSteven Rostedt if (has_pushable_tasks(rq)) { 22745cb9eaa3SPeter Zijlstra raw_spin_rq_lock(rq); 2275a7c81556SPeter Zijlstra while (push_rt_task(rq, true)) 2276a7c81556SPeter Zijlstra ; 22775cb9eaa3SPeter Zijlstra raw_spin_rq_unlock(rq); 2278b6366f04SSteven Rostedt } 2279b6366f04SSteven Rostedt 2280ad0f1d9dSSteven Rostedt (VMware) raw_spin_lock(&rd->rto_lock); 22814bdced5cSSteven Rostedt (Red Hat) 2282b6366f04SSteven Rostedt /* Pass the IPI to the next rt overloaded queue */ 2283ad0f1d9dSSteven Rostedt (VMware) cpu = rto_next_cpu(rd); 2284b6366f04SSteven Rostedt 2285ad0f1d9dSSteven Rostedt (VMware) raw_spin_unlock(&rd->rto_lock); 2286b6366f04SSteven Rostedt 2287364f5665SSteven Rostedt (VMware) if (cpu < 0) { 2288364f5665SSteven Rostedt (VMware) sched_put_rd(rd); 2289b6366f04SSteven Rostedt return; 2290364f5665SSteven Rostedt (VMware) } 2291b6366f04SSteven Rostedt 2292b6366f04SSteven Rostedt /* Try the next RT overloaded CPU */ 2293ad0f1d9dSSteven Rostedt (VMware) irq_work_queue_on(&rd->rto_push_work, cpu); 2294b6366f04SSteven Rostedt } 2295b6366f04SSteven Rostedt #endif /* HAVE_RT_PUSH_IPI */ 2296b6366f04SSteven Rostedt 22978046d680SPeter Zijlstra static void pull_rt_task(struct rq *this_rq) 2298391e43daSPeter Zijlstra { 22998046d680SPeter Zijlstra int this_cpu = this_rq->cpu, cpu; 23008046d680SPeter Zijlstra bool resched = false; 2301a7c81556SPeter Zijlstra struct task_struct *p, *push_task; 2302391e43daSPeter Zijlstra struct rq *src_rq; 2303f73c52a5SSteven Rostedt int rt_overload_count = rt_overloaded(this_rq); 2304391e43daSPeter Zijlstra 2305f73c52a5SSteven Rostedt if (likely(!rt_overload_count)) 23068046d680SPeter Zijlstra return; 2307391e43daSPeter Zijlstra 23087c3f2ab7SPeter Zijlstra /* 23097c3f2ab7SPeter Zijlstra * Match the barrier from rt_set_overloaded; this guarantees that if we 23107c3f2ab7SPeter Zijlstra * see overloaded we must also see the rto_mask bit. 23117c3f2ab7SPeter Zijlstra */ 23127c3f2ab7SPeter Zijlstra smp_rmb(); 23137c3f2ab7SPeter Zijlstra 2314f73c52a5SSteven Rostedt /* If we are the only overloaded CPU do nothing */ 2315f73c52a5SSteven Rostedt if (rt_overload_count == 1 && 2316f73c52a5SSteven Rostedt cpumask_test_cpu(this_rq->cpu, this_rq->rd->rto_mask)) 2317f73c52a5SSteven Rostedt return; 2318f73c52a5SSteven Rostedt 2319b6366f04SSteven Rostedt #ifdef HAVE_RT_PUSH_IPI 2320b6366f04SSteven Rostedt if (sched_feat(RT_PUSH_IPI)) { 2321b6366f04SSteven Rostedt tell_cpu_to_push(this_rq); 23228046d680SPeter Zijlstra return; 2323b6366f04SSteven Rostedt } 2324b6366f04SSteven Rostedt #endif 2325b6366f04SSteven Rostedt 2326391e43daSPeter Zijlstra for_each_cpu(cpu, this_rq->rd->rto_mask) { 2327391e43daSPeter Zijlstra if (this_cpu == cpu) 2328391e43daSPeter Zijlstra continue; 2329391e43daSPeter Zijlstra 2330391e43daSPeter Zijlstra src_rq = cpu_rq(cpu); 2331391e43daSPeter Zijlstra 2332391e43daSPeter Zijlstra /* 2333391e43daSPeter Zijlstra * Don't bother taking the src_rq->lock if the next highest 2334391e43daSPeter Zijlstra * task is known to be lower-priority than our current task. 2335391e43daSPeter Zijlstra * This may look racy, but if this value is about to go 2336391e43daSPeter Zijlstra * logically higher, the src_rq will push this task away. 2337391e43daSPeter Zijlstra * And if its going logically lower, we do not care 2338391e43daSPeter Zijlstra */ 2339391e43daSPeter Zijlstra if (src_rq->rt.highest_prio.next >= 2340391e43daSPeter Zijlstra this_rq->rt.highest_prio.curr) 2341391e43daSPeter Zijlstra continue; 2342391e43daSPeter Zijlstra 2343391e43daSPeter Zijlstra /* 2344391e43daSPeter Zijlstra * We can potentially drop this_rq's lock in 2345391e43daSPeter Zijlstra * double_lock_balance, and another CPU could 2346391e43daSPeter Zijlstra * alter this_rq 2347391e43daSPeter Zijlstra */ 2348a7c81556SPeter Zijlstra push_task = NULL; 2349391e43daSPeter Zijlstra double_lock_balance(this_rq, src_rq); 2350391e43daSPeter Zijlstra 2351391e43daSPeter Zijlstra /* 2352e23ee747SKirill Tkhai * We can pull only a task, which is pushable 2353e23ee747SKirill Tkhai * on its rq, and no others. 2354391e43daSPeter Zijlstra */ 2355e23ee747SKirill Tkhai p = pick_highest_pushable_task(src_rq, this_cpu); 2356391e43daSPeter Zijlstra 2357391e43daSPeter Zijlstra /* 2358391e43daSPeter Zijlstra * Do we have an RT task that preempts 2359391e43daSPeter Zijlstra * the to-be-scheduled task? 2360391e43daSPeter Zijlstra */ 2361391e43daSPeter Zijlstra if (p && (p->prio < this_rq->rt.highest_prio.curr)) { 2362391e43daSPeter Zijlstra WARN_ON(p == src_rq->curr); 2363da0c1e65SKirill Tkhai WARN_ON(!task_on_rq_queued(p)); 2364391e43daSPeter Zijlstra 2365391e43daSPeter Zijlstra /* 2366391e43daSPeter Zijlstra * There's a chance that p is higher in priority 236797fb7a0aSIngo Molnar * than what's currently running on its CPU. 23683b03706fSIngo Molnar * This is just that p is waking up and hasn't 2369391e43daSPeter Zijlstra * had a chance to schedule. We only pull 2370391e43daSPeter Zijlstra * p if it is lower in priority than the 2371391e43daSPeter Zijlstra * current task on the run queue 2372391e43daSPeter Zijlstra */ 2373391e43daSPeter Zijlstra if (p->prio < src_rq->curr->prio) 2374391e43daSPeter Zijlstra goto skip; 2375391e43daSPeter Zijlstra 2376a7c81556SPeter Zijlstra if (is_migration_disabled(p)) { 2377a7c81556SPeter Zijlstra push_task = get_push_task(src_rq); 2378a7c81556SPeter Zijlstra } else { 2379391e43daSPeter Zijlstra deactivate_task(src_rq, p, 0); 2380391e43daSPeter Zijlstra set_task_cpu(p, this_cpu); 2381391e43daSPeter Zijlstra activate_task(this_rq, p, 0); 2382a7c81556SPeter Zijlstra resched = true; 2383a7c81556SPeter Zijlstra } 2384391e43daSPeter Zijlstra /* 2385391e43daSPeter Zijlstra * We continue with the search, just in 2386391e43daSPeter Zijlstra * case there's an even higher prio task 2387391e43daSPeter Zijlstra * in another runqueue. (low likelihood 2388391e43daSPeter Zijlstra * but possible) 2389391e43daSPeter Zijlstra */ 2390391e43daSPeter Zijlstra } 2391391e43daSPeter Zijlstra skip: 2392391e43daSPeter Zijlstra double_unlock_balance(this_rq, src_rq); 2393a7c81556SPeter Zijlstra 2394a7c81556SPeter Zijlstra if (push_task) { 23955cb9eaa3SPeter Zijlstra raw_spin_rq_unlock(this_rq); 2396a7c81556SPeter Zijlstra stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop, 2397a7c81556SPeter Zijlstra push_task, &src_rq->push_work); 23985cb9eaa3SPeter Zijlstra raw_spin_rq_lock(this_rq); 2399a7c81556SPeter Zijlstra } 2400391e43daSPeter Zijlstra } 2401391e43daSPeter Zijlstra 24028046d680SPeter Zijlstra if (resched) 24038046d680SPeter Zijlstra resched_curr(this_rq); 2404391e43daSPeter Zijlstra } 2405391e43daSPeter Zijlstra 2406391e43daSPeter Zijlstra /* 2407391e43daSPeter Zijlstra * If we are not running and we are not going to reschedule soon, we should 2408391e43daSPeter Zijlstra * try to push tasks away now 2409391e43daSPeter Zijlstra */ 2410391e43daSPeter Zijlstra static void task_woken_rt(struct rq *rq, struct task_struct *p) 2411391e43daSPeter Zijlstra { 2412804d402fSQais Yousef bool need_to_push = !task_running(rq, p) && 2413391e43daSPeter Zijlstra !test_tsk_need_resched(rq->curr) && 24144b53a341SIngo Molnar p->nr_cpus_allowed > 1 && 24151baca4ceSJuri Lelli (dl_task(rq->curr) || rt_task(rq->curr)) && 24164b53a341SIngo Molnar (rq->curr->nr_cpus_allowed < 2 || 2417804d402fSQais Yousef rq->curr->prio <= p->prio); 2418804d402fSQais Yousef 2419d94a9df4SQais Yousef if (need_to_push) 2420391e43daSPeter Zijlstra push_rt_tasks(rq); 2421391e43daSPeter Zijlstra } 2422391e43daSPeter Zijlstra 2423391e43daSPeter Zijlstra /* Assumes rq->lock is held */ 2424391e43daSPeter Zijlstra static void rq_online_rt(struct rq *rq) 2425391e43daSPeter Zijlstra { 2426391e43daSPeter Zijlstra if (rq->rt.overloaded) 2427391e43daSPeter Zijlstra rt_set_overload(rq); 2428391e43daSPeter Zijlstra 2429391e43daSPeter Zijlstra __enable_runtime(rq); 2430391e43daSPeter Zijlstra 2431391e43daSPeter Zijlstra cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr); 2432391e43daSPeter Zijlstra } 2433391e43daSPeter Zijlstra 2434391e43daSPeter Zijlstra /* Assumes rq->lock is held */ 2435391e43daSPeter Zijlstra static void rq_offline_rt(struct rq *rq) 2436391e43daSPeter Zijlstra { 2437391e43daSPeter Zijlstra if (rq->rt.overloaded) 2438391e43daSPeter Zijlstra rt_clear_overload(rq); 2439391e43daSPeter Zijlstra 2440391e43daSPeter Zijlstra __disable_runtime(rq); 2441391e43daSPeter Zijlstra 2442391e43daSPeter Zijlstra cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID); 2443391e43daSPeter Zijlstra } 2444391e43daSPeter Zijlstra 2445391e43daSPeter Zijlstra /* 2446391e43daSPeter Zijlstra * When switch from the rt queue, we bring ourselves to a position 2447391e43daSPeter Zijlstra * that we might want to pull RT tasks from other runqueues. 2448391e43daSPeter Zijlstra */ 2449391e43daSPeter Zijlstra static void switched_from_rt(struct rq *rq, struct task_struct *p) 2450391e43daSPeter Zijlstra { 2451391e43daSPeter Zijlstra /* 2452391e43daSPeter Zijlstra * If there are other RT tasks then we will reschedule 2453391e43daSPeter Zijlstra * and the scheduling of the other RT tasks will handle 2454391e43daSPeter Zijlstra * the balancing. But if we are the last RT task 2455391e43daSPeter Zijlstra * we may need to handle the pulling of RT tasks 2456391e43daSPeter Zijlstra * now. 2457391e43daSPeter Zijlstra */ 2458da0c1e65SKirill Tkhai if (!task_on_rq_queued(p) || rq->rt.rt_nr_running) 24591158ddb5SKirill Tkhai return; 24601158ddb5SKirill Tkhai 246102d8ec94SIngo Molnar rt_queue_pull_task(rq); 2462391e43daSPeter Zijlstra } 2463391e43daSPeter Zijlstra 246411c785b7SLi Zefan void __init init_sched_rt_class(void) 2465391e43daSPeter Zijlstra { 2466391e43daSPeter Zijlstra unsigned int i; 2467391e43daSPeter Zijlstra 2468391e43daSPeter Zijlstra for_each_possible_cpu(i) { 2469391e43daSPeter Zijlstra zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i), 2470391e43daSPeter Zijlstra GFP_KERNEL, cpu_to_node(i)); 2471391e43daSPeter Zijlstra } 2472391e43daSPeter Zijlstra } 2473391e43daSPeter Zijlstra #endif /* CONFIG_SMP */ 2474391e43daSPeter Zijlstra 2475391e43daSPeter Zijlstra /* 2476391e43daSPeter Zijlstra * When switching a task to RT, we may overload the runqueue 2477391e43daSPeter Zijlstra * with RT tasks. In this case we try to push them off to 2478391e43daSPeter Zijlstra * other runqueues. 2479391e43daSPeter Zijlstra */ 2480391e43daSPeter Zijlstra static void switched_to_rt(struct rq *rq, struct task_struct *p) 2481391e43daSPeter Zijlstra { 2482391e43daSPeter Zijlstra /* 2483fecfcbc2SVincent Donnefort * If we are running, update the avg_rt tracking, as the running time 2484fecfcbc2SVincent Donnefort * will now on be accounted into the latter. 2485fecfcbc2SVincent Donnefort */ 2486fecfcbc2SVincent Donnefort if (task_current(rq, p)) { 2487fecfcbc2SVincent Donnefort update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0); 2488fecfcbc2SVincent Donnefort return; 2489fecfcbc2SVincent Donnefort } 2490fecfcbc2SVincent Donnefort 2491fecfcbc2SVincent Donnefort /* 2492fecfcbc2SVincent Donnefort * If we are not running we may need to preempt the current 2493fecfcbc2SVincent Donnefort * running task. If that current running task is also an RT task 2494391e43daSPeter Zijlstra * then see if we can move to another run queue. 2495391e43daSPeter Zijlstra */ 2496fecfcbc2SVincent Donnefort if (task_on_rq_queued(p)) { 2497391e43daSPeter Zijlstra #ifdef CONFIG_SMP 2498d94a9df4SQais Yousef if (p->nr_cpus_allowed > 1 && rq->rt.overloaded) 249902d8ec94SIngo Molnar rt_queue_push_tasks(rq); 2500619bd4a7SSebastian Andrzej Siewior #endif /* CONFIG_SMP */ 25012fe25826SPaul E. McKenney if (p->prio < rq->curr->prio && cpu_online(cpu_of(rq))) 25028875125eSKirill Tkhai resched_curr(rq); 2503391e43daSPeter Zijlstra } 2504391e43daSPeter Zijlstra } 2505391e43daSPeter Zijlstra 2506391e43daSPeter Zijlstra /* 2507391e43daSPeter Zijlstra * Priority of the task has changed. This may cause 2508391e43daSPeter Zijlstra * us to initiate a push or pull. 2509391e43daSPeter Zijlstra */ 2510391e43daSPeter Zijlstra static void 2511391e43daSPeter Zijlstra prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio) 2512391e43daSPeter Zijlstra { 2513da0c1e65SKirill Tkhai if (!task_on_rq_queued(p)) 2514391e43daSPeter Zijlstra return; 2515391e43daSPeter Zijlstra 251665bcf072SHui Su if (task_current(rq, p)) { 2517391e43daSPeter Zijlstra #ifdef CONFIG_SMP 2518391e43daSPeter Zijlstra /* 2519391e43daSPeter Zijlstra * If our priority decreases while running, we 2520391e43daSPeter Zijlstra * may need to pull tasks to this runqueue. 2521391e43daSPeter Zijlstra */ 2522391e43daSPeter Zijlstra if (oldprio < p->prio) 252302d8ec94SIngo Molnar rt_queue_pull_task(rq); 2524fd7a4bedSPeter Zijlstra 2525391e43daSPeter Zijlstra /* 2526391e43daSPeter Zijlstra * If there's a higher priority task waiting to run 2527fd7a4bedSPeter Zijlstra * then reschedule. 2528391e43daSPeter Zijlstra */ 2529fd7a4bedSPeter Zijlstra if (p->prio > rq->rt.highest_prio.curr) 25308875125eSKirill Tkhai resched_curr(rq); 2531391e43daSPeter Zijlstra #else 2532391e43daSPeter Zijlstra /* For UP simply resched on drop of prio */ 2533391e43daSPeter Zijlstra if (oldprio < p->prio) 25348875125eSKirill Tkhai resched_curr(rq); 2535391e43daSPeter Zijlstra #endif /* CONFIG_SMP */ 2536391e43daSPeter Zijlstra } else { 2537391e43daSPeter Zijlstra /* 2538391e43daSPeter Zijlstra * This task is not running, but if it is 2539391e43daSPeter Zijlstra * greater than the current running task 2540391e43daSPeter Zijlstra * then reschedule. 2541391e43daSPeter Zijlstra */ 2542391e43daSPeter Zijlstra if (p->prio < rq->curr->prio) 25438875125eSKirill Tkhai resched_curr(rq); 2544391e43daSPeter Zijlstra } 2545391e43daSPeter Zijlstra } 2546391e43daSPeter Zijlstra 2547b18b6a9cSNicolas Pitre #ifdef CONFIG_POSIX_TIMERS 2548391e43daSPeter Zijlstra static void watchdog(struct rq *rq, struct task_struct *p) 2549391e43daSPeter Zijlstra { 2550391e43daSPeter Zijlstra unsigned long soft, hard; 2551391e43daSPeter Zijlstra 2552391e43daSPeter Zijlstra /* max may change after cur was read, this will be fixed next tick */ 2553391e43daSPeter Zijlstra soft = task_rlimit(p, RLIMIT_RTTIME); 2554391e43daSPeter Zijlstra hard = task_rlimit_max(p, RLIMIT_RTTIME); 2555391e43daSPeter Zijlstra 2556391e43daSPeter Zijlstra if (soft != RLIM_INFINITY) { 2557391e43daSPeter Zijlstra unsigned long next; 2558391e43daSPeter Zijlstra 255957d2aa00SYing Xue if (p->rt.watchdog_stamp != jiffies) { 2560391e43daSPeter Zijlstra p->rt.timeout++; 256157d2aa00SYing Xue p->rt.watchdog_stamp = jiffies; 256257d2aa00SYing Xue } 256357d2aa00SYing Xue 2564391e43daSPeter Zijlstra next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ); 25653a245c0fSThomas Gleixner if (p->rt.timeout > next) { 25663a245c0fSThomas Gleixner posix_cputimers_rt_watchdog(&p->posix_cputimers, 25673a245c0fSThomas Gleixner p->se.sum_exec_runtime); 25683a245c0fSThomas Gleixner } 2569391e43daSPeter Zijlstra } 2570391e43daSPeter Zijlstra } 2571b18b6a9cSNicolas Pitre #else 2572b18b6a9cSNicolas Pitre static inline void watchdog(struct rq *rq, struct task_struct *p) { } 2573b18b6a9cSNicolas Pitre #endif 2574391e43daSPeter Zijlstra 2575d84b3131SFrederic Weisbecker /* 2576d84b3131SFrederic Weisbecker * scheduler tick hitting a task of our scheduling class. 2577d84b3131SFrederic Weisbecker * 2578d84b3131SFrederic Weisbecker * NOTE: This function can be called remotely by the tick offload that 2579d84b3131SFrederic Weisbecker * goes along full dynticks. Therefore no local assumption can be made 2580d84b3131SFrederic Weisbecker * and everything must be accessed through the @rq and @curr passed in 2581d84b3131SFrederic Weisbecker * parameters. 2582d84b3131SFrederic Weisbecker */ 2583391e43daSPeter Zijlstra static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued) 2584391e43daSPeter Zijlstra { 2585454c7999SColin Cross struct sched_rt_entity *rt_se = &p->rt; 2586454c7999SColin Cross 2587391e43daSPeter Zijlstra update_curr_rt(rq); 258823127296SVincent Guittot update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1); 2589391e43daSPeter Zijlstra 2590391e43daSPeter Zijlstra watchdog(rq, p); 2591391e43daSPeter Zijlstra 2592391e43daSPeter Zijlstra /* 2593391e43daSPeter Zijlstra * RR tasks need a special form of timeslice management. 2594391e43daSPeter Zijlstra * FIFO tasks have no timeslices. 2595391e43daSPeter Zijlstra */ 2596391e43daSPeter Zijlstra if (p->policy != SCHED_RR) 2597391e43daSPeter Zijlstra return; 2598391e43daSPeter Zijlstra 2599391e43daSPeter Zijlstra if (--p->rt.time_slice) 2600391e43daSPeter Zijlstra return; 2601391e43daSPeter Zijlstra 2602ce0dbbbbSClark Williams p->rt.time_slice = sched_rr_timeslice; 2603391e43daSPeter Zijlstra 2604391e43daSPeter Zijlstra /* 2605e9aa39bbSLi Bin * Requeue to the end of queue if we (and all of our ancestors) are not 2606e9aa39bbSLi Bin * the only element on the queue 2607391e43daSPeter Zijlstra */ 2608454c7999SColin Cross for_each_sched_rt_entity(rt_se) { 2609454c7999SColin Cross if (rt_se->run_list.prev != rt_se->run_list.next) { 2610391e43daSPeter Zijlstra requeue_task_rt(rq, p, 0); 26118aa6f0ebSKirill Tkhai resched_curr(rq); 2612454c7999SColin Cross return; 2613454c7999SColin Cross } 2614391e43daSPeter Zijlstra } 2615391e43daSPeter Zijlstra } 2616391e43daSPeter Zijlstra 2617391e43daSPeter Zijlstra static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task) 2618391e43daSPeter Zijlstra { 2619391e43daSPeter Zijlstra /* 2620391e43daSPeter Zijlstra * Time slice is 0 for SCHED_FIFO tasks 2621391e43daSPeter Zijlstra */ 2622391e43daSPeter Zijlstra if (task->policy == SCHED_RR) 2623ce0dbbbbSClark Williams return sched_rr_timeslice; 2624391e43daSPeter Zijlstra else 2625391e43daSPeter Zijlstra return 0; 2626391e43daSPeter Zijlstra } 2627391e43daSPeter Zijlstra 262843c31ac0SPeter Zijlstra DEFINE_SCHED_CLASS(rt) = { 262943c31ac0SPeter Zijlstra 2630391e43daSPeter Zijlstra .enqueue_task = enqueue_task_rt, 2631391e43daSPeter Zijlstra .dequeue_task = dequeue_task_rt, 2632391e43daSPeter Zijlstra .yield_task = yield_task_rt, 2633391e43daSPeter Zijlstra 2634391e43daSPeter Zijlstra .check_preempt_curr = check_preempt_curr_rt, 2635391e43daSPeter Zijlstra 2636391e43daSPeter Zijlstra .pick_next_task = pick_next_task_rt, 2637391e43daSPeter Zijlstra .put_prev_task = put_prev_task_rt, 263803b7fad1SPeter Zijlstra .set_next_task = set_next_task_rt, 2639391e43daSPeter Zijlstra 2640391e43daSPeter Zijlstra #ifdef CONFIG_SMP 26416e2df058SPeter Zijlstra .balance = balance_rt, 264221f56ffeSPeter Zijlstra .pick_task = pick_task_rt, 2643391e43daSPeter Zijlstra .select_task_rq = select_task_rq_rt, 26446c37067eSPeter Zijlstra .set_cpus_allowed = set_cpus_allowed_common, 2645391e43daSPeter Zijlstra .rq_online = rq_online_rt, 2646391e43daSPeter Zijlstra .rq_offline = rq_offline_rt, 2647391e43daSPeter Zijlstra .task_woken = task_woken_rt, 2648391e43daSPeter Zijlstra .switched_from = switched_from_rt, 2649a7c81556SPeter Zijlstra .find_lock_rq = find_lock_lowest_rq, 2650391e43daSPeter Zijlstra #endif 2651391e43daSPeter Zijlstra 2652391e43daSPeter Zijlstra .task_tick = task_tick_rt, 2653391e43daSPeter Zijlstra 2654391e43daSPeter Zijlstra .get_rr_interval = get_rr_interval_rt, 2655391e43daSPeter Zijlstra 2656391e43daSPeter Zijlstra .prio_changed = prio_changed_rt, 2657391e43daSPeter Zijlstra .switched_to = switched_to_rt, 26586e998916SStanislaw Gruszka 26596e998916SStanislaw Gruszka .update_curr = update_curr_rt, 2660982d9cdcSPatrick Bellasi 2661982d9cdcSPatrick Bellasi #ifdef CONFIG_UCLAMP_TASK 2662982d9cdcSPatrick Bellasi .uclamp_enabled = 1, 2663982d9cdcSPatrick Bellasi #endif 2664391e43daSPeter Zijlstra }; 2665391e43daSPeter Zijlstra 26668887cd99SNicolas Pitre #ifdef CONFIG_RT_GROUP_SCHED 26678887cd99SNicolas Pitre /* 26688887cd99SNicolas Pitre * Ensure that the real time constraints are schedulable. 26698887cd99SNicolas Pitre */ 26708887cd99SNicolas Pitre static DEFINE_MUTEX(rt_constraints_mutex); 26718887cd99SNicolas Pitre 26728887cd99SNicolas Pitre static inline int tg_has_rt_tasks(struct task_group *tg) 26738887cd99SNicolas Pitre { 2674b4fb015eSKonstantin Khlebnikov struct task_struct *task; 2675b4fb015eSKonstantin Khlebnikov struct css_task_iter it; 2676b4fb015eSKonstantin Khlebnikov int ret = 0; 26778887cd99SNicolas Pitre 26788887cd99SNicolas Pitre /* 26798887cd99SNicolas Pitre * Autogroups do not have RT tasks; see autogroup_create(). 26808887cd99SNicolas Pitre */ 26818887cd99SNicolas Pitre if (task_group_is_autogroup(tg)) 26828887cd99SNicolas Pitre return 0; 26838887cd99SNicolas Pitre 2684b4fb015eSKonstantin Khlebnikov css_task_iter_start(&tg->css, 0, &it); 2685b4fb015eSKonstantin Khlebnikov while (!ret && (task = css_task_iter_next(&it))) 2686b4fb015eSKonstantin Khlebnikov ret |= rt_task(task); 2687b4fb015eSKonstantin Khlebnikov css_task_iter_end(&it); 26888887cd99SNicolas Pitre 2689b4fb015eSKonstantin Khlebnikov return ret; 26908887cd99SNicolas Pitre } 26918887cd99SNicolas Pitre 26928887cd99SNicolas Pitre struct rt_schedulable_data { 26938887cd99SNicolas Pitre struct task_group *tg; 26948887cd99SNicolas Pitre u64 rt_period; 26958887cd99SNicolas Pitre u64 rt_runtime; 26968887cd99SNicolas Pitre }; 26978887cd99SNicolas Pitre 26988887cd99SNicolas Pitre static int tg_rt_schedulable(struct task_group *tg, void *data) 26998887cd99SNicolas Pitre { 27008887cd99SNicolas Pitre struct rt_schedulable_data *d = data; 27018887cd99SNicolas Pitre struct task_group *child; 27028887cd99SNicolas Pitre unsigned long total, sum = 0; 27038887cd99SNicolas Pitre u64 period, runtime; 27048887cd99SNicolas Pitre 27058887cd99SNicolas Pitre period = ktime_to_ns(tg->rt_bandwidth.rt_period); 27068887cd99SNicolas Pitre runtime = tg->rt_bandwidth.rt_runtime; 27078887cd99SNicolas Pitre 27088887cd99SNicolas Pitre if (tg == d->tg) { 27098887cd99SNicolas Pitre period = d->rt_period; 27108887cd99SNicolas Pitre runtime = d->rt_runtime; 27118887cd99SNicolas Pitre } 27128887cd99SNicolas Pitre 27138887cd99SNicolas Pitre /* 27148887cd99SNicolas Pitre * Cannot have more runtime than the period. 27158887cd99SNicolas Pitre */ 27168887cd99SNicolas Pitre if (runtime > period && runtime != RUNTIME_INF) 27178887cd99SNicolas Pitre return -EINVAL; 27188887cd99SNicolas Pitre 27198887cd99SNicolas Pitre /* 2720b4fb015eSKonstantin Khlebnikov * Ensure we don't starve existing RT tasks if runtime turns zero. 27218887cd99SNicolas Pitre */ 2722b4fb015eSKonstantin Khlebnikov if (rt_bandwidth_enabled() && !runtime && 2723b4fb015eSKonstantin Khlebnikov tg->rt_bandwidth.rt_runtime && tg_has_rt_tasks(tg)) 27248887cd99SNicolas Pitre return -EBUSY; 27258887cd99SNicolas Pitre 27268887cd99SNicolas Pitre total = to_ratio(period, runtime); 27278887cd99SNicolas Pitre 27288887cd99SNicolas Pitre /* 27298887cd99SNicolas Pitre * Nobody can have more than the global setting allows. 27308887cd99SNicolas Pitre */ 27318887cd99SNicolas Pitre if (total > to_ratio(global_rt_period(), global_rt_runtime())) 27328887cd99SNicolas Pitre return -EINVAL; 27338887cd99SNicolas Pitre 27348887cd99SNicolas Pitre /* 27358887cd99SNicolas Pitre * The sum of our children's runtime should not exceed our own. 27368887cd99SNicolas Pitre */ 27378887cd99SNicolas Pitre list_for_each_entry_rcu(child, &tg->children, siblings) { 27388887cd99SNicolas Pitre period = ktime_to_ns(child->rt_bandwidth.rt_period); 27398887cd99SNicolas Pitre runtime = child->rt_bandwidth.rt_runtime; 27408887cd99SNicolas Pitre 27418887cd99SNicolas Pitre if (child == d->tg) { 27428887cd99SNicolas Pitre period = d->rt_period; 27438887cd99SNicolas Pitre runtime = d->rt_runtime; 27448887cd99SNicolas Pitre } 27458887cd99SNicolas Pitre 27468887cd99SNicolas Pitre sum += to_ratio(period, runtime); 27478887cd99SNicolas Pitre } 27488887cd99SNicolas Pitre 27498887cd99SNicolas Pitre if (sum > total) 27508887cd99SNicolas Pitre return -EINVAL; 27518887cd99SNicolas Pitre 27528887cd99SNicolas Pitre return 0; 27538887cd99SNicolas Pitre } 27548887cd99SNicolas Pitre 27558887cd99SNicolas Pitre static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) 27568887cd99SNicolas Pitre { 27578887cd99SNicolas Pitre int ret; 27588887cd99SNicolas Pitre 27598887cd99SNicolas Pitre struct rt_schedulable_data data = { 27608887cd99SNicolas Pitre .tg = tg, 27618887cd99SNicolas Pitre .rt_period = period, 27628887cd99SNicolas Pitre .rt_runtime = runtime, 27638887cd99SNicolas Pitre }; 27648887cd99SNicolas Pitre 27658887cd99SNicolas Pitre rcu_read_lock(); 27668887cd99SNicolas Pitre ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data); 27678887cd99SNicolas Pitre rcu_read_unlock(); 27688887cd99SNicolas Pitre 27698887cd99SNicolas Pitre return ret; 27708887cd99SNicolas Pitre } 27718887cd99SNicolas Pitre 27728887cd99SNicolas Pitre static int tg_set_rt_bandwidth(struct task_group *tg, 27738887cd99SNicolas Pitre u64 rt_period, u64 rt_runtime) 27748887cd99SNicolas Pitre { 27758887cd99SNicolas Pitre int i, err = 0; 27768887cd99SNicolas Pitre 27778887cd99SNicolas Pitre /* 27788887cd99SNicolas Pitre * Disallowing the root group RT runtime is BAD, it would disallow the 27798887cd99SNicolas Pitre * kernel creating (and or operating) RT threads. 27808887cd99SNicolas Pitre */ 27818887cd99SNicolas Pitre if (tg == &root_task_group && rt_runtime == 0) 27828887cd99SNicolas Pitre return -EINVAL; 27838887cd99SNicolas Pitre 27848887cd99SNicolas Pitre /* No period doesn't make any sense. */ 27858887cd99SNicolas Pitre if (rt_period == 0) 27868887cd99SNicolas Pitre return -EINVAL; 27878887cd99SNicolas Pitre 2788d505b8afSHuaixin Chang /* 2789d505b8afSHuaixin Chang * Bound quota to defend quota against overflow during bandwidth shift. 2790d505b8afSHuaixin Chang */ 2791d505b8afSHuaixin Chang if (rt_runtime != RUNTIME_INF && rt_runtime > max_rt_runtime) 2792d505b8afSHuaixin Chang return -EINVAL; 2793d505b8afSHuaixin Chang 27948887cd99SNicolas Pitre mutex_lock(&rt_constraints_mutex); 27958887cd99SNicolas Pitre err = __rt_schedulable(tg, rt_period, rt_runtime); 27968887cd99SNicolas Pitre if (err) 27978887cd99SNicolas Pitre goto unlock; 27988887cd99SNicolas Pitre 27998887cd99SNicolas Pitre raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock); 28008887cd99SNicolas Pitre tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period); 28018887cd99SNicolas Pitre tg->rt_bandwidth.rt_runtime = rt_runtime; 28028887cd99SNicolas Pitre 28038887cd99SNicolas Pitre for_each_possible_cpu(i) { 28048887cd99SNicolas Pitre struct rt_rq *rt_rq = tg->rt_rq[i]; 28058887cd99SNicolas Pitre 28068887cd99SNicolas Pitre raw_spin_lock(&rt_rq->rt_runtime_lock); 28078887cd99SNicolas Pitre rt_rq->rt_runtime = rt_runtime; 28088887cd99SNicolas Pitre raw_spin_unlock(&rt_rq->rt_runtime_lock); 28098887cd99SNicolas Pitre } 28108887cd99SNicolas Pitre raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock); 28118887cd99SNicolas Pitre unlock: 28128887cd99SNicolas Pitre mutex_unlock(&rt_constraints_mutex); 28138887cd99SNicolas Pitre 28148887cd99SNicolas Pitre return err; 28158887cd99SNicolas Pitre } 28168887cd99SNicolas Pitre 28178887cd99SNicolas Pitre int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us) 28188887cd99SNicolas Pitre { 28198887cd99SNicolas Pitre u64 rt_runtime, rt_period; 28208887cd99SNicolas Pitre 28218887cd99SNicolas Pitre rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period); 28228887cd99SNicolas Pitre rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC; 28238887cd99SNicolas Pitre if (rt_runtime_us < 0) 28248887cd99SNicolas Pitre rt_runtime = RUNTIME_INF; 28251a010e29SKonstantin Khlebnikov else if ((u64)rt_runtime_us > U64_MAX / NSEC_PER_USEC) 28261a010e29SKonstantin Khlebnikov return -EINVAL; 28278887cd99SNicolas Pitre 28288887cd99SNicolas Pitre return tg_set_rt_bandwidth(tg, rt_period, rt_runtime); 28298887cd99SNicolas Pitre } 28308887cd99SNicolas Pitre 28318887cd99SNicolas Pitre long sched_group_rt_runtime(struct task_group *tg) 28328887cd99SNicolas Pitre { 28338887cd99SNicolas Pitre u64 rt_runtime_us; 28348887cd99SNicolas Pitre 28358887cd99SNicolas Pitre if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF) 28368887cd99SNicolas Pitre return -1; 28378887cd99SNicolas Pitre 28388887cd99SNicolas Pitre rt_runtime_us = tg->rt_bandwidth.rt_runtime; 28398887cd99SNicolas Pitre do_div(rt_runtime_us, NSEC_PER_USEC); 28408887cd99SNicolas Pitre return rt_runtime_us; 28418887cd99SNicolas Pitre } 28428887cd99SNicolas Pitre 28438887cd99SNicolas Pitre int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us) 28448887cd99SNicolas Pitre { 28458887cd99SNicolas Pitre u64 rt_runtime, rt_period; 28468887cd99SNicolas Pitre 28471a010e29SKonstantin Khlebnikov if (rt_period_us > U64_MAX / NSEC_PER_USEC) 28481a010e29SKonstantin Khlebnikov return -EINVAL; 28491a010e29SKonstantin Khlebnikov 28508887cd99SNicolas Pitre rt_period = rt_period_us * NSEC_PER_USEC; 28518887cd99SNicolas Pitre rt_runtime = tg->rt_bandwidth.rt_runtime; 28528887cd99SNicolas Pitre 28538887cd99SNicolas Pitre return tg_set_rt_bandwidth(tg, rt_period, rt_runtime); 28548887cd99SNicolas Pitre } 28558887cd99SNicolas Pitre 28568887cd99SNicolas Pitre long sched_group_rt_period(struct task_group *tg) 28578887cd99SNicolas Pitre { 28588887cd99SNicolas Pitre u64 rt_period_us; 28598887cd99SNicolas Pitre 28608887cd99SNicolas Pitre rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period); 28618887cd99SNicolas Pitre do_div(rt_period_us, NSEC_PER_USEC); 28628887cd99SNicolas Pitre return rt_period_us; 28638887cd99SNicolas Pitre } 28648887cd99SNicolas Pitre 28658887cd99SNicolas Pitre static int sched_rt_global_constraints(void) 28668887cd99SNicolas Pitre { 28678887cd99SNicolas Pitre int ret = 0; 28688887cd99SNicolas Pitre 28698887cd99SNicolas Pitre mutex_lock(&rt_constraints_mutex); 28708887cd99SNicolas Pitre ret = __rt_schedulable(NULL, 0, 0); 28718887cd99SNicolas Pitre mutex_unlock(&rt_constraints_mutex); 28728887cd99SNicolas Pitre 28738887cd99SNicolas Pitre return ret; 28748887cd99SNicolas Pitre } 28758887cd99SNicolas Pitre 28768887cd99SNicolas Pitre int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk) 28778887cd99SNicolas Pitre { 28788887cd99SNicolas Pitre /* Don't accept realtime tasks when there is no way for them to run */ 28798887cd99SNicolas Pitre if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0) 28808887cd99SNicolas Pitre return 0; 28818887cd99SNicolas Pitre 28828887cd99SNicolas Pitre return 1; 28838887cd99SNicolas Pitre } 28848887cd99SNicolas Pitre 28858887cd99SNicolas Pitre #else /* !CONFIG_RT_GROUP_SCHED */ 28868887cd99SNicolas Pitre static int sched_rt_global_constraints(void) 28878887cd99SNicolas Pitre { 28888887cd99SNicolas Pitre unsigned long flags; 28898887cd99SNicolas Pitre int i; 28908887cd99SNicolas Pitre 28918887cd99SNicolas Pitre raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags); 28928887cd99SNicolas Pitre for_each_possible_cpu(i) { 28938887cd99SNicolas Pitre struct rt_rq *rt_rq = &cpu_rq(i)->rt; 28948887cd99SNicolas Pitre 28958887cd99SNicolas Pitre raw_spin_lock(&rt_rq->rt_runtime_lock); 28968887cd99SNicolas Pitre rt_rq->rt_runtime = global_rt_runtime(); 28978887cd99SNicolas Pitre raw_spin_unlock(&rt_rq->rt_runtime_lock); 28988887cd99SNicolas Pitre } 28998887cd99SNicolas Pitre raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags); 29008887cd99SNicolas Pitre 29018887cd99SNicolas Pitre return 0; 29028887cd99SNicolas Pitre } 29038887cd99SNicolas Pitre #endif /* CONFIG_RT_GROUP_SCHED */ 29048887cd99SNicolas Pitre 29058887cd99SNicolas Pitre static int sched_rt_global_validate(void) 29068887cd99SNicolas Pitre { 29078887cd99SNicolas Pitre if (sysctl_sched_rt_period <= 0) 29088887cd99SNicolas Pitre return -EINVAL; 29098887cd99SNicolas Pitre 29108887cd99SNicolas Pitre if ((sysctl_sched_rt_runtime != RUNTIME_INF) && 2911d505b8afSHuaixin Chang ((sysctl_sched_rt_runtime > sysctl_sched_rt_period) || 2912d505b8afSHuaixin Chang ((u64)sysctl_sched_rt_runtime * 2913d505b8afSHuaixin Chang NSEC_PER_USEC > max_rt_runtime))) 29148887cd99SNicolas Pitre return -EINVAL; 29158887cd99SNicolas Pitre 29168887cd99SNicolas Pitre return 0; 29178887cd99SNicolas Pitre } 29188887cd99SNicolas Pitre 29198887cd99SNicolas Pitre static void sched_rt_do_global(void) 29208887cd99SNicolas Pitre { 29219b58e976SLi Hua unsigned long flags; 29229b58e976SLi Hua 29239b58e976SLi Hua raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags); 29248887cd99SNicolas Pitre def_rt_bandwidth.rt_runtime = global_rt_runtime(); 29258887cd99SNicolas Pitre def_rt_bandwidth.rt_period = ns_to_ktime(global_rt_period()); 29269b58e976SLi Hua raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags); 29278887cd99SNicolas Pitre } 29288887cd99SNicolas Pitre 292932927393SChristoph Hellwig int sched_rt_handler(struct ctl_table *table, int write, void *buffer, 293032927393SChristoph Hellwig size_t *lenp, loff_t *ppos) 29318887cd99SNicolas Pitre { 29328887cd99SNicolas Pitre int old_period, old_runtime; 29338887cd99SNicolas Pitre static DEFINE_MUTEX(mutex); 29348887cd99SNicolas Pitre int ret; 29358887cd99SNicolas Pitre 29368887cd99SNicolas Pitre mutex_lock(&mutex); 29378887cd99SNicolas Pitre old_period = sysctl_sched_rt_period; 29388887cd99SNicolas Pitre old_runtime = sysctl_sched_rt_runtime; 29398887cd99SNicolas Pitre 29408887cd99SNicolas Pitre ret = proc_dointvec(table, write, buffer, lenp, ppos); 29418887cd99SNicolas Pitre 29428887cd99SNicolas Pitre if (!ret && write) { 29438887cd99SNicolas Pitre ret = sched_rt_global_validate(); 29448887cd99SNicolas Pitre if (ret) 29458887cd99SNicolas Pitre goto undo; 29468887cd99SNicolas Pitre 29478887cd99SNicolas Pitre ret = sched_dl_global_validate(); 29488887cd99SNicolas Pitre if (ret) 29498887cd99SNicolas Pitre goto undo; 29508887cd99SNicolas Pitre 29518887cd99SNicolas Pitre ret = sched_rt_global_constraints(); 29528887cd99SNicolas Pitre if (ret) 29538887cd99SNicolas Pitre goto undo; 29548887cd99SNicolas Pitre 29558887cd99SNicolas Pitre sched_rt_do_global(); 29568887cd99SNicolas Pitre sched_dl_do_global(); 29578887cd99SNicolas Pitre } 29588887cd99SNicolas Pitre if (0) { 29598887cd99SNicolas Pitre undo: 29608887cd99SNicolas Pitre sysctl_sched_rt_period = old_period; 29618887cd99SNicolas Pitre sysctl_sched_rt_runtime = old_runtime; 29628887cd99SNicolas Pitre } 29638887cd99SNicolas Pitre mutex_unlock(&mutex); 29648887cd99SNicolas Pitre 29658887cd99SNicolas Pitre return ret; 29668887cd99SNicolas Pitre } 29678887cd99SNicolas Pitre 296832927393SChristoph Hellwig int sched_rr_handler(struct ctl_table *table, int write, void *buffer, 296932927393SChristoph Hellwig size_t *lenp, loff_t *ppos) 29708887cd99SNicolas Pitre { 29718887cd99SNicolas Pitre int ret; 29728887cd99SNicolas Pitre static DEFINE_MUTEX(mutex); 29738887cd99SNicolas Pitre 29748887cd99SNicolas Pitre mutex_lock(&mutex); 29758887cd99SNicolas Pitre ret = proc_dointvec(table, write, buffer, lenp, ppos); 29768887cd99SNicolas Pitre /* 29778887cd99SNicolas Pitre * Make sure that internally we keep jiffies. 29788887cd99SNicolas Pitre * Also, writing zero resets the timeslice to default: 29798887cd99SNicolas Pitre */ 29808887cd99SNicolas Pitre if (!ret && write) { 29818887cd99SNicolas Pitre sched_rr_timeslice = 29828887cd99SNicolas Pitre sysctl_sched_rr_timeslice <= 0 ? RR_TIMESLICE : 29838887cd99SNicolas Pitre msecs_to_jiffies(sysctl_sched_rr_timeslice); 29848887cd99SNicolas Pitre } 29858887cd99SNicolas Pitre mutex_unlock(&mutex); 298697fb7a0aSIngo Molnar 29878887cd99SNicolas Pitre return ret; 29888887cd99SNicolas Pitre } 29898887cd99SNicolas Pitre 2990391e43daSPeter Zijlstra #ifdef CONFIG_SCHED_DEBUG 2991391e43daSPeter Zijlstra void print_rt_stats(struct seq_file *m, int cpu) 2992391e43daSPeter Zijlstra { 2993391e43daSPeter Zijlstra rt_rq_iter_t iter; 2994391e43daSPeter Zijlstra struct rt_rq *rt_rq; 2995391e43daSPeter Zijlstra 2996391e43daSPeter Zijlstra rcu_read_lock(); 2997391e43daSPeter Zijlstra for_each_rt_rq(rt_rq, iter, cpu_rq(cpu)) 2998391e43daSPeter Zijlstra print_rt_rq(m, cpu, rt_rq); 2999391e43daSPeter Zijlstra rcu_read_unlock(); 3000391e43daSPeter Zijlstra } 3001391e43daSPeter Zijlstra #endif /* CONFIG_SCHED_DEBUG */ 3002