1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2391e43daSPeter Zijlstra /*
3391e43daSPeter Zijlstra * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
4391e43daSPeter Zijlstra * policies)
5391e43daSPeter Zijlstra */
6371bf427SVincent Guittot
7ce0dbbbbSClark Williams int sched_rr_timeslice = RR_TIMESLICE;
8d505b8afSHuaixin Chang /* More than 4 hours if BW_SHIFT equals 20. */
9d505b8afSHuaixin Chang static const u64 max_rt_runtime = MAX_BW;
10ce0dbbbbSClark Williams
11391e43daSPeter Zijlstra static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
12391e43daSPeter Zijlstra
13391e43daSPeter Zijlstra struct rt_bandwidth def_rt_bandwidth;
14391e43daSPeter Zijlstra
15d9ab0e63SZhen Ni /*
16d9ab0e63SZhen Ni * period over which we measure -rt task CPU usage in us.
17d9ab0e63SZhen Ni * default: 1s
18d9ab0e63SZhen Ni */
19d9ab0e63SZhen Ni unsigned int sysctl_sched_rt_period = 1000000;
20d9ab0e63SZhen Ni
21d9ab0e63SZhen Ni /*
22d9ab0e63SZhen Ni * part of the period that we allow rt tasks to run in us.
23d9ab0e63SZhen Ni * default: 0.95s
24d9ab0e63SZhen Ni */
25d9ab0e63SZhen Ni int sysctl_sched_rt_runtime = 950000;
26d9ab0e63SZhen Ni
2728f152cdSBaisong Zhong #ifdef CONFIG_SYSCTL
28c7fcb998SCyril Hrubis static int sysctl_sched_rr_timeslice = (MSEC_PER_SEC * RR_TIMESLICE) / HZ;
29d9ab0e63SZhen Ni static int sched_rt_handler(struct ctl_table *table, int write, void *buffer,
30d9ab0e63SZhen Ni size_t *lenp, loff_t *ppos);
31dafd7a9dSZhen Ni static int sched_rr_handler(struct ctl_table *table, int write, void *buffer,
32dafd7a9dSZhen Ni size_t *lenp, loff_t *ppos);
33d9ab0e63SZhen Ni static struct ctl_table sched_rt_sysctls[] = {
34d9ab0e63SZhen Ni {
35d9ab0e63SZhen Ni .procname = "sched_rt_period_us",
36d9ab0e63SZhen Ni .data = &sysctl_sched_rt_period,
37d9ab0e63SZhen Ni .maxlen = sizeof(unsigned int),
38d9ab0e63SZhen Ni .mode = 0644,
39d9ab0e63SZhen Ni .proc_handler = sched_rt_handler,
4074fd1b8cSCyril Hrubis .extra1 = SYSCTL_ONE,
4174fd1b8cSCyril Hrubis .extra2 = SYSCTL_INT_MAX,
42d9ab0e63SZhen Ni },
43d9ab0e63SZhen Ni {
44d9ab0e63SZhen Ni .procname = "sched_rt_runtime_us",
45d9ab0e63SZhen Ni .data = &sysctl_sched_rt_runtime,
46d9ab0e63SZhen Ni .maxlen = sizeof(int),
47d9ab0e63SZhen Ni .mode = 0644,
48d9ab0e63SZhen Ni .proc_handler = sched_rt_handler,
4974fd1b8cSCyril Hrubis .extra1 = SYSCTL_NEG_ONE,
5074fd1b8cSCyril Hrubis .extra2 = SYSCTL_INT_MAX,
51d9ab0e63SZhen Ni },
52dafd7a9dSZhen Ni {
53dafd7a9dSZhen Ni .procname = "sched_rr_timeslice_ms",
54dafd7a9dSZhen Ni .data = &sysctl_sched_rr_timeslice,
55dafd7a9dSZhen Ni .maxlen = sizeof(int),
56dafd7a9dSZhen Ni .mode = 0644,
57dafd7a9dSZhen Ni .proc_handler = sched_rr_handler,
58dafd7a9dSZhen Ni },
59d9ab0e63SZhen Ni {}
60d9ab0e63SZhen Ni };
61d9ab0e63SZhen Ni
sched_rt_sysctl_init(void)62d9ab0e63SZhen Ni static int __init sched_rt_sysctl_init(void)
63d9ab0e63SZhen Ni {
64d9ab0e63SZhen Ni register_sysctl_init("kernel", sched_rt_sysctls);
65d9ab0e63SZhen Ni return 0;
66d9ab0e63SZhen Ni }
67d9ab0e63SZhen Ni late_initcall(sched_rt_sysctl_init);
68d9ab0e63SZhen Ni #endif
69d9ab0e63SZhen Ni
sched_rt_period_timer(struct hrtimer * timer)70391e43daSPeter Zijlstra static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
71391e43daSPeter Zijlstra {
72391e43daSPeter Zijlstra struct rt_bandwidth *rt_b =
73391e43daSPeter Zijlstra container_of(timer, struct rt_bandwidth, rt_period_timer);
74391e43daSPeter Zijlstra int idle = 0;
7577a4d1a1SPeter Zijlstra int overrun;
76391e43daSPeter Zijlstra
7777a4d1a1SPeter Zijlstra raw_spin_lock(&rt_b->rt_runtime_lock);
78391e43daSPeter Zijlstra for (;;) {
7977a4d1a1SPeter Zijlstra overrun = hrtimer_forward_now(timer, rt_b->rt_period);
80391e43daSPeter Zijlstra if (!overrun)
81391e43daSPeter Zijlstra break;
82391e43daSPeter Zijlstra
8377a4d1a1SPeter Zijlstra raw_spin_unlock(&rt_b->rt_runtime_lock);
84391e43daSPeter Zijlstra idle = do_sched_rt_period_timer(rt_b, overrun);
8577a4d1a1SPeter Zijlstra raw_spin_lock(&rt_b->rt_runtime_lock);
86391e43daSPeter Zijlstra }
874cfafd30SPeter Zijlstra if (idle)
884cfafd30SPeter Zijlstra rt_b->rt_period_active = 0;
8977a4d1a1SPeter Zijlstra raw_spin_unlock(&rt_b->rt_runtime_lock);
90391e43daSPeter Zijlstra
91391e43daSPeter Zijlstra return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
92391e43daSPeter Zijlstra }
93391e43daSPeter Zijlstra
init_rt_bandwidth(struct rt_bandwidth * rt_b,u64 period,u64 runtime)94391e43daSPeter Zijlstra void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
95391e43daSPeter Zijlstra {
96391e43daSPeter Zijlstra rt_b->rt_period = ns_to_ktime(period);
97391e43daSPeter Zijlstra rt_b->rt_runtime = runtime;
98391e43daSPeter Zijlstra
99391e43daSPeter Zijlstra raw_spin_lock_init(&rt_b->rt_runtime_lock);
100391e43daSPeter Zijlstra
101d5096aa6SSebastian Andrzej Siewior hrtimer_init(&rt_b->rt_period_timer, CLOCK_MONOTONIC,
102d5096aa6SSebastian Andrzej Siewior HRTIMER_MODE_REL_HARD);
103391e43daSPeter Zijlstra rt_b->rt_period_timer.function = sched_rt_period_timer;
104391e43daSPeter Zijlstra }
105391e43daSPeter Zijlstra
do_start_rt_bandwidth(struct rt_bandwidth * rt_b)1069b58e976SLi Hua static inline void do_start_rt_bandwidth(struct rt_bandwidth *rt_b)
107391e43daSPeter Zijlstra {
108391e43daSPeter Zijlstra raw_spin_lock(&rt_b->rt_runtime_lock);
1094cfafd30SPeter Zijlstra if (!rt_b->rt_period_active) {
1104cfafd30SPeter Zijlstra rt_b->rt_period_active = 1;
111c3a990dcSSteven Rostedt /*
112c3a990dcSSteven Rostedt * SCHED_DEADLINE updates the bandwidth, as a run away
113c3a990dcSSteven Rostedt * RT task with a DL task could hog a CPU. But DL does
114c3a990dcSSteven Rostedt * not reset the period. If a deadline task was running
115c3a990dcSSteven Rostedt * without an RT task running, it can cause RT tasks to
116c3a990dcSSteven Rostedt * throttle when they start up. Kick the timer right away
117c3a990dcSSteven Rostedt * to update the period.
118c3a990dcSSteven Rostedt */
119c3a990dcSSteven Rostedt hrtimer_forward_now(&rt_b->rt_period_timer, ns_to_ktime(0));
120d5096aa6SSebastian Andrzej Siewior hrtimer_start_expires(&rt_b->rt_period_timer,
121d5096aa6SSebastian Andrzej Siewior HRTIMER_MODE_ABS_PINNED_HARD);
1224cfafd30SPeter Zijlstra }
123391e43daSPeter Zijlstra raw_spin_unlock(&rt_b->rt_runtime_lock);
124391e43daSPeter Zijlstra }
125391e43daSPeter Zijlstra
start_rt_bandwidth(struct rt_bandwidth * rt_b)1269b58e976SLi Hua static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
1279b58e976SLi Hua {
1289b58e976SLi Hua if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
1299b58e976SLi Hua return;
1309b58e976SLi Hua
1319b58e976SLi Hua do_start_rt_bandwidth(rt_b);
1329b58e976SLi Hua }
1339b58e976SLi Hua
init_rt_rq(struct rt_rq * rt_rq)13407c54f7aSAbel Vesa void init_rt_rq(struct rt_rq *rt_rq)
135391e43daSPeter Zijlstra {
136391e43daSPeter Zijlstra struct rt_prio_array *array;
137391e43daSPeter Zijlstra int i;
138391e43daSPeter Zijlstra
139391e43daSPeter Zijlstra array = &rt_rq->active;
140391e43daSPeter Zijlstra for (i = 0; i < MAX_RT_PRIO; i++) {
141391e43daSPeter Zijlstra INIT_LIST_HEAD(array->queue + i);
142391e43daSPeter Zijlstra __clear_bit(i, array->bitmap);
143391e43daSPeter Zijlstra }
144391e43daSPeter Zijlstra /* delimiter for bitsearch: */
145391e43daSPeter Zijlstra __set_bit(MAX_RT_PRIO, array->bitmap);
146391e43daSPeter Zijlstra
147391e43daSPeter Zijlstra #if defined CONFIG_SMP
148934fc331SPeter Zijlstra rt_rq->highest_prio.curr = MAX_RT_PRIO-1;
149934fc331SPeter Zijlstra rt_rq->highest_prio.next = MAX_RT_PRIO-1;
150391e43daSPeter Zijlstra rt_rq->rt_nr_migratory = 0;
151391e43daSPeter Zijlstra rt_rq->overloaded = 0;
152391e43daSPeter Zijlstra plist_head_init(&rt_rq->pushable_tasks);
153b6366f04SSteven Rostedt #endif /* CONFIG_SMP */
154f4ebcbc0SKirill Tkhai /* We start is dequeued state, because no RT tasks are queued */
155f4ebcbc0SKirill Tkhai rt_rq->rt_queued = 0;
156391e43daSPeter Zijlstra
157391e43daSPeter Zijlstra rt_rq->rt_time = 0;
158391e43daSPeter Zijlstra rt_rq->rt_throttled = 0;
159391e43daSPeter Zijlstra rt_rq->rt_runtime = 0;
160391e43daSPeter Zijlstra raw_spin_lock_init(&rt_rq->rt_runtime_lock);
161391e43daSPeter Zijlstra }
162391e43daSPeter Zijlstra
163391e43daSPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED
destroy_rt_bandwidth(struct rt_bandwidth * rt_b)164391e43daSPeter Zijlstra static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
165391e43daSPeter Zijlstra {
166391e43daSPeter Zijlstra hrtimer_cancel(&rt_b->rt_period_timer);
167391e43daSPeter Zijlstra }
168391e43daSPeter Zijlstra
169391e43daSPeter Zijlstra #define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
170391e43daSPeter Zijlstra
rt_task_of(struct sched_rt_entity * rt_se)171391e43daSPeter Zijlstra static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
172391e43daSPeter Zijlstra {
173391e43daSPeter Zijlstra #ifdef CONFIG_SCHED_DEBUG
174391e43daSPeter Zijlstra WARN_ON_ONCE(!rt_entity_is_task(rt_se));
175391e43daSPeter Zijlstra #endif
176391e43daSPeter Zijlstra return container_of(rt_se, struct task_struct, rt);
177391e43daSPeter Zijlstra }
178391e43daSPeter Zijlstra
rq_of_rt_rq(struct rt_rq * rt_rq)179391e43daSPeter Zijlstra static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
180391e43daSPeter Zijlstra {
181391e43daSPeter Zijlstra return rt_rq->rq;
182391e43daSPeter Zijlstra }
183391e43daSPeter Zijlstra
rt_rq_of_se(struct sched_rt_entity * rt_se)184391e43daSPeter Zijlstra static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
185391e43daSPeter Zijlstra {
186391e43daSPeter Zijlstra return rt_se->rt_rq;
187391e43daSPeter Zijlstra }
188391e43daSPeter Zijlstra
rq_of_rt_se(struct sched_rt_entity * rt_se)189653d07a6SKirill Tkhai static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
190653d07a6SKirill Tkhai {
191653d07a6SKirill Tkhai struct rt_rq *rt_rq = rt_se->rt_rq;
192653d07a6SKirill Tkhai
193653d07a6SKirill Tkhai return rt_rq->rq;
194653d07a6SKirill Tkhai }
195653d07a6SKirill Tkhai
unregister_rt_sched_group(struct task_group * tg)196b027789eSMathias Krause void unregister_rt_sched_group(struct task_group *tg)
197b027789eSMathias Krause {
198b027789eSMathias Krause if (tg->rt_se)
199b027789eSMathias Krause destroy_rt_bandwidth(&tg->rt_bandwidth);
200b027789eSMathias Krause
201b027789eSMathias Krause }
202b027789eSMathias Krause
free_rt_sched_group(struct task_group * tg)203391e43daSPeter Zijlstra void free_rt_sched_group(struct task_group *tg)
204391e43daSPeter Zijlstra {
205391e43daSPeter Zijlstra int i;
206391e43daSPeter Zijlstra
207391e43daSPeter Zijlstra for_each_possible_cpu(i) {
208391e43daSPeter Zijlstra if (tg->rt_rq)
209391e43daSPeter Zijlstra kfree(tg->rt_rq[i]);
210391e43daSPeter Zijlstra if (tg->rt_se)
211391e43daSPeter Zijlstra kfree(tg->rt_se[i]);
212391e43daSPeter Zijlstra }
213391e43daSPeter Zijlstra
214391e43daSPeter Zijlstra kfree(tg->rt_rq);
215391e43daSPeter Zijlstra kfree(tg->rt_se);
216391e43daSPeter Zijlstra }
217391e43daSPeter Zijlstra
init_tg_rt_entry(struct task_group * tg,struct rt_rq * rt_rq,struct sched_rt_entity * rt_se,int cpu,struct sched_rt_entity * parent)218391e43daSPeter Zijlstra void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
219391e43daSPeter Zijlstra struct sched_rt_entity *rt_se, int cpu,
220391e43daSPeter Zijlstra struct sched_rt_entity *parent)
221391e43daSPeter Zijlstra {
222391e43daSPeter Zijlstra struct rq *rq = cpu_rq(cpu);
223391e43daSPeter Zijlstra
224934fc331SPeter Zijlstra rt_rq->highest_prio.curr = MAX_RT_PRIO-1;
225391e43daSPeter Zijlstra rt_rq->rt_nr_boosted = 0;
226391e43daSPeter Zijlstra rt_rq->rq = rq;
227391e43daSPeter Zijlstra rt_rq->tg = tg;
228391e43daSPeter Zijlstra
229391e43daSPeter Zijlstra tg->rt_rq[cpu] = rt_rq;
230391e43daSPeter Zijlstra tg->rt_se[cpu] = rt_se;
231391e43daSPeter Zijlstra
232391e43daSPeter Zijlstra if (!rt_se)
233391e43daSPeter Zijlstra return;
234391e43daSPeter Zijlstra
235391e43daSPeter Zijlstra if (!parent)
236391e43daSPeter Zijlstra rt_se->rt_rq = &rq->rt;
237391e43daSPeter Zijlstra else
238391e43daSPeter Zijlstra rt_se->rt_rq = parent->my_q;
239391e43daSPeter Zijlstra
240391e43daSPeter Zijlstra rt_se->my_q = rt_rq;
241391e43daSPeter Zijlstra rt_se->parent = parent;
242391e43daSPeter Zijlstra INIT_LIST_HEAD(&rt_se->run_list);
243391e43daSPeter Zijlstra }
244391e43daSPeter Zijlstra
alloc_rt_sched_group(struct task_group * tg,struct task_group * parent)245391e43daSPeter Zijlstra int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
246391e43daSPeter Zijlstra {
247391e43daSPeter Zijlstra struct rt_rq *rt_rq;
248391e43daSPeter Zijlstra struct sched_rt_entity *rt_se;
249391e43daSPeter Zijlstra int i;
250391e43daSPeter Zijlstra
2516396bb22SKees Cook tg->rt_rq = kcalloc(nr_cpu_ids, sizeof(rt_rq), GFP_KERNEL);
252391e43daSPeter Zijlstra if (!tg->rt_rq)
253391e43daSPeter Zijlstra goto err;
2546396bb22SKees Cook tg->rt_se = kcalloc(nr_cpu_ids, sizeof(rt_se), GFP_KERNEL);
255391e43daSPeter Zijlstra if (!tg->rt_se)
256391e43daSPeter Zijlstra goto err;
257391e43daSPeter Zijlstra
258391e43daSPeter Zijlstra init_rt_bandwidth(&tg->rt_bandwidth,
259391e43daSPeter Zijlstra ktime_to_ns(def_rt_bandwidth.rt_period), 0);
260391e43daSPeter Zijlstra
261391e43daSPeter Zijlstra for_each_possible_cpu(i) {
262391e43daSPeter Zijlstra rt_rq = kzalloc_node(sizeof(struct rt_rq),
263391e43daSPeter Zijlstra GFP_KERNEL, cpu_to_node(i));
264391e43daSPeter Zijlstra if (!rt_rq)
265391e43daSPeter Zijlstra goto err;
266391e43daSPeter Zijlstra
267391e43daSPeter Zijlstra rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
268391e43daSPeter Zijlstra GFP_KERNEL, cpu_to_node(i));
269391e43daSPeter Zijlstra if (!rt_se)
270391e43daSPeter Zijlstra goto err_free_rq;
271391e43daSPeter Zijlstra
27207c54f7aSAbel Vesa init_rt_rq(rt_rq);
273391e43daSPeter Zijlstra rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
274391e43daSPeter Zijlstra init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
275391e43daSPeter Zijlstra }
276391e43daSPeter Zijlstra
277391e43daSPeter Zijlstra return 1;
278391e43daSPeter Zijlstra
279391e43daSPeter Zijlstra err_free_rq:
280391e43daSPeter Zijlstra kfree(rt_rq);
281391e43daSPeter Zijlstra err:
282391e43daSPeter Zijlstra return 0;
283391e43daSPeter Zijlstra }
284391e43daSPeter Zijlstra
285391e43daSPeter Zijlstra #else /* CONFIG_RT_GROUP_SCHED */
286391e43daSPeter Zijlstra
287391e43daSPeter Zijlstra #define rt_entity_is_task(rt_se) (1)
288391e43daSPeter Zijlstra
rt_task_of(struct sched_rt_entity * rt_se)289391e43daSPeter Zijlstra static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
290391e43daSPeter Zijlstra {
291391e43daSPeter Zijlstra return container_of(rt_se, struct task_struct, rt);
292391e43daSPeter Zijlstra }
293391e43daSPeter Zijlstra
rq_of_rt_rq(struct rt_rq * rt_rq)294391e43daSPeter Zijlstra static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
295391e43daSPeter Zijlstra {
296391e43daSPeter Zijlstra return container_of(rt_rq, struct rq, rt);
297391e43daSPeter Zijlstra }
298391e43daSPeter Zijlstra
rq_of_rt_se(struct sched_rt_entity * rt_se)299653d07a6SKirill Tkhai static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
300391e43daSPeter Zijlstra {
301391e43daSPeter Zijlstra struct task_struct *p = rt_task_of(rt_se);
302653d07a6SKirill Tkhai
303653d07a6SKirill Tkhai return task_rq(p);
304653d07a6SKirill Tkhai }
305653d07a6SKirill Tkhai
rt_rq_of_se(struct sched_rt_entity * rt_se)306653d07a6SKirill Tkhai static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
307653d07a6SKirill Tkhai {
308653d07a6SKirill Tkhai struct rq *rq = rq_of_rt_se(rt_se);
309391e43daSPeter Zijlstra
310391e43daSPeter Zijlstra return &rq->rt;
311391e43daSPeter Zijlstra }
312391e43daSPeter Zijlstra
unregister_rt_sched_group(struct task_group * tg)313b027789eSMathias Krause void unregister_rt_sched_group(struct task_group *tg) { }
314b027789eSMathias Krause
free_rt_sched_group(struct task_group * tg)315391e43daSPeter Zijlstra void free_rt_sched_group(struct task_group *tg) { }
316391e43daSPeter Zijlstra
alloc_rt_sched_group(struct task_group * tg,struct task_group * parent)317391e43daSPeter Zijlstra int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
318391e43daSPeter Zijlstra {
319391e43daSPeter Zijlstra return 1;
320391e43daSPeter Zijlstra }
321391e43daSPeter Zijlstra #endif /* CONFIG_RT_GROUP_SCHED */
322391e43daSPeter Zijlstra
323391e43daSPeter Zijlstra #ifdef CONFIG_SMP
324391e43daSPeter Zijlstra
need_pull_rt_task(struct rq * rq,struct task_struct * prev)325dc877341SPeter Zijlstra static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
326dc877341SPeter Zijlstra {
327dc877341SPeter Zijlstra /* Try to pull RT tasks here if we lower this rq's prio */
328120455c5SPeter Zijlstra return rq->online && rq->rt.highest_prio.curr > prev->prio;
329dc877341SPeter Zijlstra }
330dc877341SPeter Zijlstra
rt_overloaded(struct rq * rq)331391e43daSPeter Zijlstra static inline int rt_overloaded(struct rq *rq)
332391e43daSPeter Zijlstra {
333391e43daSPeter Zijlstra return atomic_read(&rq->rd->rto_count);
334391e43daSPeter Zijlstra }
335391e43daSPeter Zijlstra
rt_set_overload(struct rq * rq)336391e43daSPeter Zijlstra static inline void rt_set_overload(struct rq *rq)
337391e43daSPeter Zijlstra {
338391e43daSPeter Zijlstra if (!rq->online)
339391e43daSPeter Zijlstra return;
340391e43daSPeter Zijlstra
341391e43daSPeter Zijlstra cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
342391e43daSPeter Zijlstra /*
343391e43daSPeter Zijlstra * Make sure the mask is visible before we set
344391e43daSPeter Zijlstra * the overload count. That is checked to determine
345391e43daSPeter Zijlstra * if we should look at the mask. It would be a shame
346391e43daSPeter Zijlstra * if we looked at the mask, but the mask was not
347391e43daSPeter Zijlstra * updated yet.
3487c3f2ab7SPeter Zijlstra *
3497c3f2ab7SPeter Zijlstra * Matched by the barrier in pull_rt_task().
350391e43daSPeter Zijlstra */
3517c3f2ab7SPeter Zijlstra smp_wmb();
352391e43daSPeter Zijlstra atomic_inc(&rq->rd->rto_count);
353391e43daSPeter Zijlstra }
354391e43daSPeter Zijlstra
rt_clear_overload(struct rq * rq)355391e43daSPeter Zijlstra static inline void rt_clear_overload(struct rq *rq)
356391e43daSPeter Zijlstra {
357391e43daSPeter Zijlstra if (!rq->online)
358391e43daSPeter Zijlstra return;
359391e43daSPeter Zijlstra
360391e43daSPeter Zijlstra /* the order here really doesn't matter */
361391e43daSPeter Zijlstra atomic_dec(&rq->rd->rto_count);
362391e43daSPeter Zijlstra cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
363391e43daSPeter Zijlstra }
364391e43daSPeter Zijlstra
update_rt_migration(struct rt_rq * rt_rq)365391e43daSPeter Zijlstra static void update_rt_migration(struct rt_rq *rt_rq)
366391e43daSPeter Zijlstra {
367391e43daSPeter Zijlstra if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
368391e43daSPeter Zijlstra if (!rt_rq->overloaded) {
369391e43daSPeter Zijlstra rt_set_overload(rq_of_rt_rq(rt_rq));
370391e43daSPeter Zijlstra rt_rq->overloaded = 1;
371391e43daSPeter Zijlstra }
372391e43daSPeter Zijlstra } else if (rt_rq->overloaded) {
373391e43daSPeter Zijlstra rt_clear_overload(rq_of_rt_rq(rt_rq));
374391e43daSPeter Zijlstra rt_rq->overloaded = 0;
375391e43daSPeter Zijlstra }
376391e43daSPeter Zijlstra }
377391e43daSPeter Zijlstra
inc_rt_migration(struct sched_rt_entity * rt_se,struct rt_rq * rt_rq)378391e43daSPeter Zijlstra static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
379391e43daSPeter Zijlstra {
38029baa747SPeter Zijlstra struct task_struct *p;
38129baa747SPeter Zijlstra
382391e43daSPeter Zijlstra if (!rt_entity_is_task(rt_se))
383391e43daSPeter Zijlstra return;
384391e43daSPeter Zijlstra
38529baa747SPeter Zijlstra p = rt_task_of(rt_se);
386391e43daSPeter Zijlstra rt_rq = &rq_of_rt_rq(rt_rq)->rt;
387391e43daSPeter Zijlstra
388391e43daSPeter Zijlstra rt_rq->rt_nr_total++;
3894b53a341SIngo Molnar if (p->nr_cpus_allowed > 1)
390391e43daSPeter Zijlstra rt_rq->rt_nr_migratory++;
391391e43daSPeter Zijlstra
392391e43daSPeter Zijlstra update_rt_migration(rt_rq);
393391e43daSPeter Zijlstra }
394391e43daSPeter Zijlstra
dec_rt_migration(struct sched_rt_entity * rt_se,struct rt_rq * rt_rq)395391e43daSPeter Zijlstra static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
396391e43daSPeter Zijlstra {
39729baa747SPeter Zijlstra struct task_struct *p;
39829baa747SPeter Zijlstra
399391e43daSPeter Zijlstra if (!rt_entity_is_task(rt_se))
400391e43daSPeter Zijlstra return;
401391e43daSPeter Zijlstra
40229baa747SPeter Zijlstra p = rt_task_of(rt_se);
403391e43daSPeter Zijlstra rt_rq = &rq_of_rt_rq(rt_rq)->rt;
404391e43daSPeter Zijlstra
405391e43daSPeter Zijlstra rt_rq->rt_nr_total--;
4064b53a341SIngo Molnar if (p->nr_cpus_allowed > 1)
407391e43daSPeter Zijlstra rt_rq->rt_nr_migratory--;
408391e43daSPeter Zijlstra
409391e43daSPeter Zijlstra update_rt_migration(rt_rq);
410391e43daSPeter Zijlstra }
411391e43daSPeter Zijlstra
has_pushable_tasks(struct rq * rq)412391e43daSPeter Zijlstra static inline int has_pushable_tasks(struct rq *rq)
413391e43daSPeter Zijlstra {
414391e43daSPeter Zijlstra return !plist_head_empty(&rq->rt.pushable_tasks);
415391e43daSPeter Zijlstra }
416391e43daSPeter Zijlstra
4178e5bad7dSKees Cook static DEFINE_PER_CPU(struct balance_callback, rt_push_head);
4188e5bad7dSKees Cook static DEFINE_PER_CPU(struct balance_callback, rt_pull_head);
419e3fca9e7SPeter Zijlstra
420e3fca9e7SPeter Zijlstra static void push_rt_tasks(struct rq *);
421fd7a4bedSPeter Zijlstra static void pull_rt_task(struct rq *);
422e3fca9e7SPeter Zijlstra
rt_queue_push_tasks(struct rq * rq)42302d8ec94SIngo Molnar static inline void rt_queue_push_tasks(struct rq *rq)
424dc877341SPeter Zijlstra {
425e3fca9e7SPeter Zijlstra if (!has_pushable_tasks(rq))
426e3fca9e7SPeter Zijlstra return;
427e3fca9e7SPeter Zijlstra
428fd7a4bedSPeter Zijlstra queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks);
429fd7a4bedSPeter Zijlstra }
430fd7a4bedSPeter Zijlstra
rt_queue_pull_task(struct rq * rq)43102d8ec94SIngo Molnar static inline void rt_queue_pull_task(struct rq *rq)
432fd7a4bedSPeter Zijlstra {
433fd7a4bedSPeter Zijlstra queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task);
434dc877341SPeter Zijlstra }
435dc877341SPeter Zijlstra
enqueue_pushable_task(struct rq * rq,struct task_struct * p)436391e43daSPeter Zijlstra static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
437391e43daSPeter Zijlstra {
438391e43daSPeter Zijlstra plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
439391e43daSPeter Zijlstra plist_node_init(&p->pushable_tasks, p->prio);
440391e43daSPeter Zijlstra plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
441391e43daSPeter Zijlstra
442391e43daSPeter Zijlstra /* Update the highest prio pushable task */
443391e43daSPeter Zijlstra if (p->prio < rq->rt.highest_prio.next)
444391e43daSPeter Zijlstra rq->rt.highest_prio.next = p->prio;
445391e43daSPeter Zijlstra }
446391e43daSPeter Zijlstra
dequeue_pushable_task(struct rq * rq,struct task_struct * p)447391e43daSPeter Zijlstra static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
448391e43daSPeter Zijlstra {
449391e43daSPeter Zijlstra plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
450391e43daSPeter Zijlstra
451391e43daSPeter Zijlstra /* Update the new highest prio pushable task */
452391e43daSPeter Zijlstra if (has_pushable_tasks(rq)) {
453391e43daSPeter Zijlstra p = plist_first_entry(&rq->rt.pushable_tasks,
454391e43daSPeter Zijlstra struct task_struct, pushable_tasks);
455391e43daSPeter Zijlstra rq->rt.highest_prio.next = p->prio;
456934fc331SPeter Zijlstra } else {
457934fc331SPeter Zijlstra rq->rt.highest_prio.next = MAX_RT_PRIO-1;
458934fc331SPeter Zijlstra }
459391e43daSPeter Zijlstra }
460391e43daSPeter Zijlstra
461391e43daSPeter Zijlstra #else
462391e43daSPeter Zijlstra
enqueue_pushable_task(struct rq * rq,struct task_struct * p)463391e43daSPeter Zijlstra static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
464391e43daSPeter Zijlstra {
465391e43daSPeter Zijlstra }
466391e43daSPeter Zijlstra
dequeue_pushable_task(struct rq * rq,struct task_struct * p)467391e43daSPeter Zijlstra static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
468391e43daSPeter Zijlstra {
469391e43daSPeter Zijlstra }
470391e43daSPeter Zijlstra
471391e43daSPeter Zijlstra static inline
inc_rt_migration(struct sched_rt_entity * rt_se,struct rt_rq * rt_rq)472391e43daSPeter Zijlstra void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
473391e43daSPeter Zijlstra {
474391e43daSPeter Zijlstra }
475391e43daSPeter Zijlstra
476391e43daSPeter Zijlstra static inline
dec_rt_migration(struct sched_rt_entity * rt_se,struct rt_rq * rt_rq)477391e43daSPeter Zijlstra void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
478391e43daSPeter Zijlstra {
479391e43daSPeter Zijlstra }
480391e43daSPeter Zijlstra
rt_queue_push_tasks(struct rq * rq)48102d8ec94SIngo Molnar static inline void rt_queue_push_tasks(struct rq *rq)
482dc877341SPeter Zijlstra {
483dc877341SPeter Zijlstra }
484391e43daSPeter Zijlstra #endif /* CONFIG_SMP */
485391e43daSPeter Zijlstra
486f4ebcbc0SKirill Tkhai static void enqueue_top_rt_rq(struct rt_rq *rt_rq);
4875c66d1b9SNicolas Saenz Julienne static void dequeue_top_rt_rq(struct rt_rq *rt_rq, unsigned int count);
488f4ebcbc0SKirill Tkhai
on_rt_rq(struct sched_rt_entity * rt_se)489391e43daSPeter Zijlstra static inline int on_rt_rq(struct sched_rt_entity *rt_se)
490391e43daSPeter Zijlstra {
491ff77e468SPeter Zijlstra return rt_se->on_rq;
492391e43daSPeter Zijlstra }
493391e43daSPeter Zijlstra
494804d402fSQais Yousef #ifdef CONFIG_UCLAMP_TASK
495804d402fSQais Yousef /*
496804d402fSQais Yousef * Verify the fitness of task @p to run on @cpu taking into account the uclamp
497804d402fSQais Yousef * settings.
498804d402fSQais Yousef *
499804d402fSQais Yousef * This check is only important for heterogeneous systems where uclamp_min value
500804d402fSQais Yousef * is higher than the capacity of a @cpu. For non-heterogeneous system this
501804d402fSQais Yousef * function will always return true.
502804d402fSQais Yousef *
503804d402fSQais Yousef * The function will return true if the capacity of the @cpu is >= the
504804d402fSQais Yousef * uclamp_min and false otherwise.
505804d402fSQais Yousef *
506804d402fSQais Yousef * Note that uclamp_min will be clamped to uclamp_max if uclamp_min
507804d402fSQais Yousef * > uclamp_max.
508804d402fSQais Yousef */
rt_task_fits_capacity(struct task_struct * p,int cpu)509804d402fSQais Yousef static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu)
510804d402fSQais Yousef {
511804d402fSQais Yousef unsigned int min_cap;
512804d402fSQais Yousef unsigned int max_cap;
513804d402fSQais Yousef unsigned int cpu_cap;
514804d402fSQais Yousef
515804d402fSQais Yousef /* Only heterogeneous systems can benefit from this check */
516740cf8a7SDietmar Eggemann if (!sched_asym_cpucap_active())
517804d402fSQais Yousef return true;
518804d402fSQais Yousef
519804d402fSQais Yousef min_cap = uclamp_eff_value(p, UCLAMP_MIN);
520804d402fSQais Yousef max_cap = uclamp_eff_value(p, UCLAMP_MAX);
521804d402fSQais Yousef
522804d402fSQais Yousef cpu_cap = capacity_orig_of(cpu);
523804d402fSQais Yousef
524804d402fSQais Yousef return cpu_cap >= min(min_cap, max_cap);
525804d402fSQais Yousef }
526804d402fSQais Yousef #else
rt_task_fits_capacity(struct task_struct * p,int cpu)527804d402fSQais Yousef static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu)
528804d402fSQais Yousef {
529804d402fSQais Yousef return true;
530804d402fSQais Yousef }
531804d402fSQais Yousef #endif
532804d402fSQais Yousef
533391e43daSPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED
534391e43daSPeter Zijlstra
sched_rt_runtime(struct rt_rq * rt_rq)535391e43daSPeter Zijlstra static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
536391e43daSPeter Zijlstra {
537391e43daSPeter Zijlstra if (!rt_rq->tg)
538391e43daSPeter Zijlstra return RUNTIME_INF;
539391e43daSPeter Zijlstra
540391e43daSPeter Zijlstra return rt_rq->rt_runtime;
541391e43daSPeter Zijlstra }
542391e43daSPeter Zijlstra
sched_rt_period(struct rt_rq * rt_rq)543391e43daSPeter Zijlstra static inline u64 sched_rt_period(struct rt_rq *rt_rq)
544391e43daSPeter Zijlstra {
545391e43daSPeter Zijlstra return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
546391e43daSPeter Zijlstra }
547391e43daSPeter Zijlstra
548391e43daSPeter Zijlstra typedef struct task_group *rt_rq_iter_t;
549391e43daSPeter Zijlstra
next_task_group(struct task_group * tg)550391e43daSPeter Zijlstra static inline struct task_group *next_task_group(struct task_group *tg)
551391e43daSPeter Zijlstra {
552391e43daSPeter Zijlstra do {
553391e43daSPeter Zijlstra tg = list_entry_rcu(tg->list.next,
554391e43daSPeter Zijlstra typeof(struct task_group), list);
555391e43daSPeter Zijlstra } while (&tg->list != &task_groups && task_group_is_autogroup(tg));
556391e43daSPeter Zijlstra
557391e43daSPeter Zijlstra if (&tg->list == &task_groups)
558391e43daSPeter Zijlstra tg = NULL;
559391e43daSPeter Zijlstra
560391e43daSPeter Zijlstra return tg;
561391e43daSPeter Zijlstra }
562391e43daSPeter Zijlstra
563391e43daSPeter Zijlstra #define for_each_rt_rq(rt_rq, iter, rq) \
564391e43daSPeter Zijlstra for (iter = container_of(&task_groups, typeof(*iter), list); \
565391e43daSPeter Zijlstra (iter = next_task_group(iter)) && \
566391e43daSPeter Zijlstra (rt_rq = iter->rt_rq[cpu_of(rq)]);)
567391e43daSPeter Zijlstra
568391e43daSPeter Zijlstra #define for_each_sched_rt_entity(rt_se) \
569391e43daSPeter Zijlstra for (; rt_se; rt_se = rt_se->parent)
570391e43daSPeter Zijlstra
group_rt_rq(struct sched_rt_entity * rt_se)571391e43daSPeter Zijlstra static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
572391e43daSPeter Zijlstra {
573391e43daSPeter Zijlstra return rt_se->my_q;
574391e43daSPeter Zijlstra }
575391e43daSPeter Zijlstra
576ff77e468SPeter Zijlstra static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
577ff77e468SPeter Zijlstra static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
578391e43daSPeter Zijlstra
sched_rt_rq_enqueue(struct rt_rq * rt_rq)579391e43daSPeter Zijlstra static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
580391e43daSPeter Zijlstra {
581391e43daSPeter Zijlstra struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
5828875125eSKirill Tkhai struct rq *rq = rq_of_rt_rq(rt_rq);
583391e43daSPeter Zijlstra struct sched_rt_entity *rt_se;
584391e43daSPeter Zijlstra
5858875125eSKirill Tkhai int cpu = cpu_of(rq);
586391e43daSPeter Zijlstra
587391e43daSPeter Zijlstra rt_se = rt_rq->tg->rt_se[cpu];
588391e43daSPeter Zijlstra
589391e43daSPeter Zijlstra if (rt_rq->rt_nr_running) {
590f4ebcbc0SKirill Tkhai if (!rt_se)
591f4ebcbc0SKirill Tkhai enqueue_top_rt_rq(rt_rq);
592f4ebcbc0SKirill Tkhai else if (!on_rt_rq(rt_se))
593ff77e468SPeter Zijlstra enqueue_rt_entity(rt_se, 0);
594f4ebcbc0SKirill Tkhai
595391e43daSPeter Zijlstra if (rt_rq->highest_prio.curr < curr->prio)
5968875125eSKirill Tkhai resched_curr(rq);
597391e43daSPeter Zijlstra }
598391e43daSPeter Zijlstra }
599391e43daSPeter Zijlstra
sched_rt_rq_dequeue(struct rt_rq * rt_rq)600391e43daSPeter Zijlstra static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
601391e43daSPeter Zijlstra {
602391e43daSPeter Zijlstra struct sched_rt_entity *rt_se;
603391e43daSPeter Zijlstra int cpu = cpu_of(rq_of_rt_rq(rt_rq));
604391e43daSPeter Zijlstra
605391e43daSPeter Zijlstra rt_se = rt_rq->tg->rt_se[cpu];
606391e43daSPeter Zijlstra
607296b2ffeSVincent Guittot if (!rt_se) {
6085c66d1b9SNicolas Saenz Julienne dequeue_top_rt_rq(rt_rq, rt_rq->rt_nr_running);
609296b2ffeSVincent Guittot /* Kick cpufreq (see the comment in kernel/sched/sched.h). */
610296b2ffeSVincent Guittot cpufreq_update_util(rq_of_rt_rq(rt_rq), 0);
611296b2ffeSVincent Guittot }
612f4ebcbc0SKirill Tkhai else if (on_rt_rq(rt_se))
613ff77e468SPeter Zijlstra dequeue_rt_entity(rt_se, 0);
614391e43daSPeter Zijlstra }
615391e43daSPeter Zijlstra
rt_rq_throttled(struct rt_rq * rt_rq)61646383648SKirill Tkhai static inline int rt_rq_throttled(struct rt_rq *rt_rq)
61746383648SKirill Tkhai {
61846383648SKirill Tkhai return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
61946383648SKirill Tkhai }
62046383648SKirill Tkhai
rt_se_boosted(struct sched_rt_entity * rt_se)621391e43daSPeter Zijlstra static int rt_se_boosted(struct sched_rt_entity *rt_se)
622391e43daSPeter Zijlstra {
623391e43daSPeter Zijlstra struct rt_rq *rt_rq = group_rt_rq(rt_se);
624391e43daSPeter Zijlstra struct task_struct *p;
625391e43daSPeter Zijlstra
626391e43daSPeter Zijlstra if (rt_rq)
627391e43daSPeter Zijlstra return !!rt_rq->rt_nr_boosted;
628391e43daSPeter Zijlstra
629391e43daSPeter Zijlstra p = rt_task_of(rt_se);
630391e43daSPeter Zijlstra return p->prio != p->normal_prio;
631391e43daSPeter Zijlstra }
632391e43daSPeter Zijlstra
633391e43daSPeter Zijlstra #ifdef CONFIG_SMP
sched_rt_period_mask(void)634391e43daSPeter Zijlstra static inline const struct cpumask *sched_rt_period_mask(void)
635391e43daSPeter Zijlstra {
636424c93feSNathan Zimmer return this_rq()->rd->span;
637391e43daSPeter Zijlstra }
638391e43daSPeter Zijlstra #else
sched_rt_period_mask(void)639391e43daSPeter Zijlstra static inline const struct cpumask *sched_rt_period_mask(void)
640391e43daSPeter Zijlstra {
641391e43daSPeter Zijlstra return cpu_online_mask;
642391e43daSPeter Zijlstra }
643391e43daSPeter Zijlstra #endif
644391e43daSPeter Zijlstra
645391e43daSPeter Zijlstra static inline
sched_rt_period_rt_rq(struct rt_bandwidth * rt_b,int cpu)646391e43daSPeter Zijlstra struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
647391e43daSPeter Zijlstra {
648391e43daSPeter Zijlstra return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
649391e43daSPeter Zijlstra }
650391e43daSPeter Zijlstra
sched_rt_bandwidth(struct rt_rq * rt_rq)651391e43daSPeter Zijlstra static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
652391e43daSPeter Zijlstra {
653391e43daSPeter Zijlstra return &rt_rq->tg->rt_bandwidth;
654391e43daSPeter Zijlstra }
655391e43daSPeter Zijlstra
656391e43daSPeter Zijlstra #else /* !CONFIG_RT_GROUP_SCHED */
657391e43daSPeter Zijlstra
sched_rt_runtime(struct rt_rq * rt_rq)658391e43daSPeter Zijlstra static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
659391e43daSPeter Zijlstra {
660391e43daSPeter Zijlstra return rt_rq->rt_runtime;
661391e43daSPeter Zijlstra }
662391e43daSPeter Zijlstra
sched_rt_period(struct rt_rq * rt_rq)663391e43daSPeter Zijlstra static inline u64 sched_rt_period(struct rt_rq *rt_rq)
664391e43daSPeter Zijlstra {
665391e43daSPeter Zijlstra return ktime_to_ns(def_rt_bandwidth.rt_period);
666391e43daSPeter Zijlstra }
667391e43daSPeter Zijlstra
668391e43daSPeter Zijlstra typedef struct rt_rq *rt_rq_iter_t;
669391e43daSPeter Zijlstra
670391e43daSPeter Zijlstra #define for_each_rt_rq(rt_rq, iter, rq) \
671391e43daSPeter Zijlstra for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
672391e43daSPeter Zijlstra
673391e43daSPeter Zijlstra #define for_each_sched_rt_entity(rt_se) \
674391e43daSPeter Zijlstra for (; rt_se; rt_se = NULL)
675391e43daSPeter Zijlstra
group_rt_rq(struct sched_rt_entity * rt_se)676391e43daSPeter Zijlstra static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
677391e43daSPeter Zijlstra {
678391e43daSPeter Zijlstra return NULL;
679391e43daSPeter Zijlstra }
680391e43daSPeter Zijlstra
sched_rt_rq_enqueue(struct rt_rq * rt_rq)681391e43daSPeter Zijlstra static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
682391e43daSPeter Zijlstra {
683f4ebcbc0SKirill Tkhai struct rq *rq = rq_of_rt_rq(rt_rq);
684f4ebcbc0SKirill Tkhai
685f4ebcbc0SKirill Tkhai if (!rt_rq->rt_nr_running)
686f4ebcbc0SKirill Tkhai return;
687f4ebcbc0SKirill Tkhai
688f4ebcbc0SKirill Tkhai enqueue_top_rt_rq(rt_rq);
6898875125eSKirill Tkhai resched_curr(rq);
690391e43daSPeter Zijlstra }
691391e43daSPeter Zijlstra
sched_rt_rq_dequeue(struct rt_rq * rt_rq)692391e43daSPeter Zijlstra static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
693391e43daSPeter Zijlstra {
6945c66d1b9SNicolas Saenz Julienne dequeue_top_rt_rq(rt_rq, rt_rq->rt_nr_running);
695391e43daSPeter Zijlstra }
696391e43daSPeter Zijlstra
rt_rq_throttled(struct rt_rq * rt_rq)69746383648SKirill Tkhai static inline int rt_rq_throttled(struct rt_rq *rt_rq)
69846383648SKirill Tkhai {
69946383648SKirill Tkhai return rt_rq->rt_throttled;
70046383648SKirill Tkhai }
70146383648SKirill Tkhai
sched_rt_period_mask(void)702391e43daSPeter Zijlstra static inline const struct cpumask *sched_rt_period_mask(void)
703391e43daSPeter Zijlstra {
704391e43daSPeter Zijlstra return cpu_online_mask;
705391e43daSPeter Zijlstra }
706391e43daSPeter Zijlstra
707391e43daSPeter Zijlstra static inline
sched_rt_period_rt_rq(struct rt_bandwidth * rt_b,int cpu)708391e43daSPeter Zijlstra struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
709391e43daSPeter Zijlstra {
710391e43daSPeter Zijlstra return &cpu_rq(cpu)->rt;
711391e43daSPeter Zijlstra }
712391e43daSPeter Zijlstra
sched_rt_bandwidth(struct rt_rq * rt_rq)713391e43daSPeter Zijlstra static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
714391e43daSPeter Zijlstra {
715391e43daSPeter Zijlstra return &def_rt_bandwidth;
716391e43daSPeter Zijlstra }
717391e43daSPeter Zijlstra
718391e43daSPeter Zijlstra #endif /* CONFIG_RT_GROUP_SCHED */
719391e43daSPeter Zijlstra
sched_rt_bandwidth_account(struct rt_rq * rt_rq)720faa59937SJuri Lelli bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
721faa59937SJuri Lelli {
722faa59937SJuri Lelli struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
723faa59937SJuri Lelli
724faa59937SJuri Lelli return (hrtimer_active(&rt_b->rt_period_timer) ||
725faa59937SJuri Lelli rt_rq->rt_time < rt_b->rt_runtime);
726faa59937SJuri Lelli }
727faa59937SJuri Lelli
728391e43daSPeter Zijlstra #ifdef CONFIG_SMP
729391e43daSPeter Zijlstra /*
730391e43daSPeter Zijlstra * We ran out of runtime, see if we can borrow some from our neighbours.
731391e43daSPeter Zijlstra */
do_balance_runtime(struct rt_rq * rt_rq)732269b26a5SJuri Lelli static void do_balance_runtime(struct rt_rq *rt_rq)
733391e43daSPeter Zijlstra {
734391e43daSPeter Zijlstra struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
735aa7f6730SShawn Bohrer struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
736269b26a5SJuri Lelli int i, weight;
737391e43daSPeter Zijlstra u64 rt_period;
738391e43daSPeter Zijlstra
739391e43daSPeter Zijlstra weight = cpumask_weight(rd->span);
740391e43daSPeter Zijlstra
741391e43daSPeter Zijlstra raw_spin_lock(&rt_b->rt_runtime_lock);
742391e43daSPeter Zijlstra rt_period = ktime_to_ns(rt_b->rt_period);
743391e43daSPeter Zijlstra for_each_cpu(i, rd->span) {
744391e43daSPeter Zijlstra struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
745391e43daSPeter Zijlstra s64 diff;
746391e43daSPeter Zijlstra
747391e43daSPeter Zijlstra if (iter == rt_rq)
748391e43daSPeter Zijlstra continue;
749391e43daSPeter Zijlstra
750391e43daSPeter Zijlstra raw_spin_lock(&iter->rt_runtime_lock);
751391e43daSPeter Zijlstra /*
752391e43daSPeter Zijlstra * Either all rqs have inf runtime and there's nothing to steal
753391e43daSPeter Zijlstra * or __disable_runtime() below sets a specific rq to inf to
7543b03706fSIngo Molnar * indicate its been disabled and disallow stealing.
755391e43daSPeter Zijlstra */
756391e43daSPeter Zijlstra if (iter->rt_runtime == RUNTIME_INF)
757391e43daSPeter Zijlstra goto next;
758391e43daSPeter Zijlstra
759391e43daSPeter Zijlstra /*
760391e43daSPeter Zijlstra * From runqueues with spare time, take 1/n part of their
761391e43daSPeter Zijlstra * spare time, but no more than our period.
762391e43daSPeter Zijlstra */
763391e43daSPeter Zijlstra diff = iter->rt_runtime - iter->rt_time;
764391e43daSPeter Zijlstra if (diff > 0) {
765391e43daSPeter Zijlstra diff = div_u64((u64)diff, weight);
766391e43daSPeter Zijlstra if (rt_rq->rt_runtime + diff > rt_period)
767391e43daSPeter Zijlstra diff = rt_period - rt_rq->rt_runtime;
768391e43daSPeter Zijlstra iter->rt_runtime -= diff;
769391e43daSPeter Zijlstra rt_rq->rt_runtime += diff;
770391e43daSPeter Zijlstra if (rt_rq->rt_runtime == rt_period) {
771391e43daSPeter Zijlstra raw_spin_unlock(&iter->rt_runtime_lock);
772391e43daSPeter Zijlstra break;
773391e43daSPeter Zijlstra }
774391e43daSPeter Zijlstra }
775391e43daSPeter Zijlstra next:
776391e43daSPeter Zijlstra raw_spin_unlock(&iter->rt_runtime_lock);
777391e43daSPeter Zijlstra }
778391e43daSPeter Zijlstra raw_spin_unlock(&rt_b->rt_runtime_lock);
779391e43daSPeter Zijlstra }
780391e43daSPeter Zijlstra
781391e43daSPeter Zijlstra /*
782391e43daSPeter Zijlstra * Ensure this RQ takes back all the runtime it lend to its neighbours.
783391e43daSPeter Zijlstra */
__disable_runtime(struct rq * rq)784391e43daSPeter Zijlstra static void __disable_runtime(struct rq *rq)
785391e43daSPeter Zijlstra {
786391e43daSPeter Zijlstra struct root_domain *rd = rq->rd;
787391e43daSPeter Zijlstra rt_rq_iter_t iter;
788391e43daSPeter Zijlstra struct rt_rq *rt_rq;
789391e43daSPeter Zijlstra
790391e43daSPeter Zijlstra if (unlikely(!scheduler_running))
791391e43daSPeter Zijlstra return;
792391e43daSPeter Zijlstra
793391e43daSPeter Zijlstra for_each_rt_rq(rt_rq, iter, rq) {
794391e43daSPeter Zijlstra struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
795391e43daSPeter Zijlstra s64 want;
796391e43daSPeter Zijlstra int i;
797391e43daSPeter Zijlstra
798391e43daSPeter Zijlstra raw_spin_lock(&rt_b->rt_runtime_lock);
799391e43daSPeter Zijlstra raw_spin_lock(&rt_rq->rt_runtime_lock);
800391e43daSPeter Zijlstra /*
801391e43daSPeter Zijlstra * Either we're all inf and nobody needs to borrow, or we're
802391e43daSPeter Zijlstra * already disabled and thus have nothing to do, or we have
803391e43daSPeter Zijlstra * exactly the right amount of runtime to take out.
804391e43daSPeter Zijlstra */
805391e43daSPeter Zijlstra if (rt_rq->rt_runtime == RUNTIME_INF ||
806391e43daSPeter Zijlstra rt_rq->rt_runtime == rt_b->rt_runtime)
807391e43daSPeter Zijlstra goto balanced;
808391e43daSPeter Zijlstra raw_spin_unlock(&rt_rq->rt_runtime_lock);
809391e43daSPeter Zijlstra
810391e43daSPeter Zijlstra /*
811391e43daSPeter Zijlstra * Calculate the difference between what we started out with
812391e43daSPeter Zijlstra * and what we current have, that's the amount of runtime
813391e43daSPeter Zijlstra * we lend and now have to reclaim.
814391e43daSPeter Zijlstra */
815391e43daSPeter Zijlstra want = rt_b->rt_runtime - rt_rq->rt_runtime;
816391e43daSPeter Zijlstra
817391e43daSPeter Zijlstra /*
818391e43daSPeter Zijlstra * Greedy reclaim, take back as much as we can.
819391e43daSPeter Zijlstra */
820391e43daSPeter Zijlstra for_each_cpu(i, rd->span) {
821391e43daSPeter Zijlstra struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
822391e43daSPeter Zijlstra s64 diff;
823391e43daSPeter Zijlstra
824391e43daSPeter Zijlstra /*
825391e43daSPeter Zijlstra * Can't reclaim from ourselves or disabled runqueues.
826391e43daSPeter Zijlstra */
827391e43daSPeter Zijlstra if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
828391e43daSPeter Zijlstra continue;
829391e43daSPeter Zijlstra
830391e43daSPeter Zijlstra raw_spin_lock(&iter->rt_runtime_lock);
831391e43daSPeter Zijlstra if (want > 0) {
832391e43daSPeter Zijlstra diff = min_t(s64, iter->rt_runtime, want);
833391e43daSPeter Zijlstra iter->rt_runtime -= diff;
834391e43daSPeter Zijlstra want -= diff;
835391e43daSPeter Zijlstra } else {
836391e43daSPeter Zijlstra iter->rt_runtime -= want;
837391e43daSPeter Zijlstra want -= want;
838391e43daSPeter Zijlstra }
839391e43daSPeter Zijlstra raw_spin_unlock(&iter->rt_runtime_lock);
840391e43daSPeter Zijlstra
841391e43daSPeter Zijlstra if (!want)
842391e43daSPeter Zijlstra break;
843391e43daSPeter Zijlstra }
844391e43daSPeter Zijlstra
845391e43daSPeter Zijlstra raw_spin_lock(&rt_rq->rt_runtime_lock);
846391e43daSPeter Zijlstra /*
847391e43daSPeter Zijlstra * We cannot be left wanting - that would mean some runtime
848391e43daSPeter Zijlstra * leaked out of the system.
849391e43daSPeter Zijlstra */
85009348d75SIngo Molnar WARN_ON_ONCE(want);
851391e43daSPeter Zijlstra balanced:
852391e43daSPeter Zijlstra /*
853391e43daSPeter Zijlstra * Disable all the borrow logic by pretending we have inf
854391e43daSPeter Zijlstra * runtime - in which case borrowing doesn't make sense.
855391e43daSPeter Zijlstra */
856391e43daSPeter Zijlstra rt_rq->rt_runtime = RUNTIME_INF;
857a4c96ae3SPeter Boonstoppel rt_rq->rt_throttled = 0;
858391e43daSPeter Zijlstra raw_spin_unlock(&rt_rq->rt_runtime_lock);
859391e43daSPeter Zijlstra raw_spin_unlock(&rt_b->rt_runtime_lock);
86099b62567SKirill Tkhai
86199b62567SKirill Tkhai /* Make rt_rq available for pick_next_task() */
86299b62567SKirill Tkhai sched_rt_rq_enqueue(rt_rq);
863391e43daSPeter Zijlstra }
864391e43daSPeter Zijlstra }
865391e43daSPeter Zijlstra
__enable_runtime(struct rq * rq)866391e43daSPeter Zijlstra static void __enable_runtime(struct rq *rq)
867391e43daSPeter Zijlstra {
868391e43daSPeter Zijlstra rt_rq_iter_t iter;
869391e43daSPeter Zijlstra struct rt_rq *rt_rq;
870391e43daSPeter Zijlstra
871391e43daSPeter Zijlstra if (unlikely(!scheduler_running))
872391e43daSPeter Zijlstra return;
873391e43daSPeter Zijlstra
874391e43daSPeter Zijlstra /*
875391e43daSPeter Zijlstra * Reset each runqueue's bandwidth settings
876391e43daSPeter Zijlstra */
877391e43daSPeter Zijlstra for_each_rt_rq(rt_rq, iter, rq) {
878391e43daSPeter Zijlstra struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
879391e43daSPeter Zijlstra
880391e43daSPeter Zijlstra raw_spin_lock(&rt_b->rt_runtime_lock);
881391e43daSPeter Zijlstra raw_spin_lock(&rt_rq->rt_runtime_lock);
882391e43daSPeter Zijlstra rt_rq->rt_runtime = rt_b->rt_runtime;
883391e43daSPeter Zijlstra rt_rq->rt_time = 0;
884391e43daSPeter Zijlstra rt_rq->rt_throttled = 0;
885391e43daSPeter Zijlstra raw_spin_unlock(&rt_rq->rt_runtime_lock);
886391e43daSPeter Zijlstra raw_spin_unlock(&rt_b->rt_runtime_lock);
887391e43daSPeter Zijlstra }
888391e43daSPeter Zijlstra }
889391e43daSPeter Zijlstra
balance_runtime(struct rt_rq * rt_rq)890269b26a5SJuri Lelli static void balance_runtime(struct rt_rq *rt_rq)
891391e43daSPeter Zijlstra {
892391e43daSPeter Zijlstra if (!sched_feat(RT_RUNTIME_SHARE))
893269b26a5SJuri Lelli return;
894391e43daSPeter Zijlstra
895391e43daSPeter Zijlstra if (rt_rq->rt_time > rt_rq->rt_runtime) {
896391e43daSPeter Zijlstra raw_spin_unlock(&rt_rq->rt_runtime_lock);
897269b26a5SJuri Lelli do_balance_runtime(rt_rq);
898391e43daSPeter Zijlstra raw_spin_lock(&rt_rq->rt_runtime_lock);
899391e43daSPeter Zijlstra }
900391e43daSPeter Zijlstra }
901391e43daSPeter Zijlstra #else /* !CONFIG_SMP */
balance_runtime(struct rt_rq * rt_rq)902269b26a5SJuri Lelli static inline void balance_runtime(struct rt_rq *rt_rq) {}
903391e43daSPeter Zijlstra #endif /* CONFIG_SMP */
904391e43daSPeter Zijlstra
do_sched_rt_period_timer(struct rt_bandwidth * rt_b,int overrun)905391e43daSPeter Zijlstra static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
906391e43daSPeter Zijlstra {
90742c62a58SPeter Zijlstra int i, idle = 1, throttled = 0;
908391e43daSPeter Zijlstra const struct cpumask *span;
909391e43daSPeter Zijlstra
910391e43daSPeter Zijlstra span = sched_rt_period_mask();
911e221d028SMike Galbraith #ifdef CONFIG_RT_GROUP_SCHED
912e221d028SMike Galbraith /*
913e221d028SMike Galbraith * FIXME: isolated CPUs should really leave the root task group,
914e221d028SMike Galbraith * whether they are isolcpus or were isolated via cpusets, lest
915e221d028SMike Galbraith * the timer run on a CPU which does not service all runqueues,
916e221d028SMike Galbraith * potentially leaving other CPUs indefinitely throttled. If
917e221d028SMike Galbraith * isolation is really required, the user will turn the throttle
918e221d028SMike Galbraith * off to kill the perturbations it causes anyway. Meanwhile,
919e221d028SMike Galbraith * this maintains functionality for boot and/or troubleshooting.
920e221d028SMike Galbraith */
921e221d028SMike Galbraith if (rt_b == &root_task_group.rt_bandwidth)
922e221d028SMike Galbraith span = cpu_online_mask;
923e221d028SMike Galbraith #endif
924391e43daSPeter Zijlstra for_each_cpu(i, span) {
925391e43daSPeter Zijlstra int enqueue = 0;
926391e43daSPeter Zijlstra struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
927391e43daSPeter Zijlstra struct rq *rq = rq_of_rt_rq(rt_rq);
9282679a837SHao Jia struct rq_flags rf;
929c249f255SDave Kleikamp int skip;
930c249f255SDave Kleikamp
931c249f255SDave Kleikamp /*
932c249f255SDave Kleikamp * When span == cpu_online_mask, taking each rq->lock
933c249f255SDave Kleikamp * can be time-consuming. Try to avoid it when possible.
934c249f255SDave Kleikamp */
935c249f255SDave Kleikamp raw_spin_lock(&rt_rq->rt_runtime_lock);
936f3d133eeSHailong Liu if (!sched_feat(RT_RUNTIME_SHARE) && rt_rq->rt_runtime != RUNTIME_INF)
937f3d133eeSHailong Liu rt_rq->rt_runtime = rt_b->rt_runtime;
938c249f255SDave Kleikamp skip = !rt_rq->rt_time && !rt_rq->rt_nr_running;
939c249f255SDave Kleikamp raw_spin_unlock(&rt_rq->rt_runtime_lock);
940c249f255SDave Kleikamp if (skip)
941c249f255SDave Kleikamp continue;
942391e43daSPeter Zijlstra
9432679a837SHao Jia rq_lock(rq, &rf);
944d29a2064SDavidlohr Bueso update_rq_clock(rq);
945d29a2064SDavidlohr Bueso
946391e43daSPeter Zijlstra if (rt_rq->rt_time) {
947391e43daSPeter Zijlstra u64 runtime;
948391e43daSPeter Zijlstra
949391e43daSPeter Zijlstra raw_spin_lock(&rt_rq->rt_runtime_lock);
950391e43daSPeter Zijlstra if (rt_rq->rt_throttled)
951391e43daSPeter Zijlstra balance_runtime(rt_rq);
952391e43daSPeter Zijlstra runtime = rt_rq->rt_runtime;
953391e43daSPeter Zijlstra rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
954391e43daSPeter Zijlstra if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
955391e43daSPeter Zijlstra rt_rq->rt_throttled = 0;
956391e43daSPeter Zijlstra enqueue = 1;
957391e43daSPeter Zijlstra
958391e43daSPeter Zijlstra /*
9599edfbfedSPeter Zijlstra * When we're idle and a woken (rt) task is
960b2f7d750SIngo Molnar * throttled wakeup_preempt() will set
9619edfbfedSPeter Zijlstra * skip_update and the time between the wakeup
9629edfbfedSPeter Zijlstra * and this unthrottle will get accounted as
9639edfbfedSPeter Zijlstra * 'runtime'.
964391e43daSPeter Zijlstra */
965391e43daSPeter Zijlstra if (rt_rq->rt_nr_running && rq->curr == rq->idle)
966adcc8da8SDavidlohr Bueso rq_clock_cancel_skipupdate(rq);
967391e43daSPeter Zijlstra }
968391e43daSPeter Zijlstra if (rt_rq->rt_time || rt_rq->rt_nr_running)
969391e43daSPeter Zijlstra idle = 0;
970391e43daSPeter Zijlstra raw_spin_unlock(&rt_rq->rt_runtime_lock);
971391e43daSPeter Zijlstra } else if (rt_rq->rt_nr_running) {
972391e43daSPeter Zijlstra idle = 0;
973391e43daSPeter Zijlstra if (!rt_rq_throttled(rt_rq))
974391e43daSPeter Zijlstra enqueue = 1;
975391e43daSPeter Zijlstra }
97642c62a58SPeter Zijlstra if (rt_rq->rt_throttled)
97742c62a58SPeter Zijlstra throttled = 1;
978391e43daSPeter Zijlstra
979391e43daSPeter Zijlstra if (enqueue)
980391e43daSPeter Zijlstra sched_rt_rq_enqueue(rt_rq);
9812679a837SHao Jia rq_unlock(rq, &rf);
982391e43daSPeter Zijlstra }
983391e43daSPeter Zijlstra
98442c62a58SPeter Zijlstra if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
98542c62a58SPeter Zijlstra return 1;
98642c62a58SPeter Zijlstra
987391e43daSPeter Zijlstra return idle;
988391e43daSPeter Zijlstra }
989391e43daSPeter Zijlstra
rt_se_prio(struct sched_rt_entity * rt_se)990391e43daSPeter Zijlstra static inline int rt_se_prio(struct sched_rt_entity *rt_se)
991391e43daSPeter Zijlstra {
992391e43daSPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED
993391e43daSPeter Zijlstra struct rt_rq *rt_rq = group_rt_rq(rt_se);
994391e43daSPeter Zijlstra
995391e43daSPeter Zijlstra if (rt_rq)
996391e43daSPeter Zijlstra return rt_rq->highest_prio.curr;
997391e43daSPeter Zijlstra #endif
998391e43daSPeter Zijlstra
999391e43daSPeter Zijlstra return rt_task_of(rt_se)->prio;
1000391e43daSPeter Zijlstra }
1001391e43daSPeter Zijlstra
sched_rt_runtime_exceeded(struct rt_rq * rt_rq)1002391e43daSPeter Zijlstra static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
1003391e43daSPeter Zijlstra {
1004391e43daSPeter Zijlstra u64 runtime = sched_rt_runtime(rt_rq);
1005391e43daSPeter Zijlstra
1006391e43daSPeter Zijlstra if (rt_rq->rt_throttled)
1007391e43daSPeter Zijlstra return rt_rq_throttled(rt_rq);
1008391e43daSPeter Zijlstra
10095b680fd6SShan Hai if (runtime >= sched_rt_period(rt_rq))
1010391e43daSPeter Zijlstra return 0;
1011391e43daSPeter Zijlstra
1012391e43daSPeter Zijlstra balance_runtime(rt_rq);
1013391e43daSPeter Zijlstra runtime = sched_rt_runtime(rt_rq);
1014391e43daSPeter Zijlstra if (runtime == RUNTIME_INF)
1015391e43daSPeter Zijlstra return 0;
1016391e43daSPeter Zijlstra
1017391e43daSPeter Zijlstra if (rt_rq->rt_time > runtime) {
10187abc63b1SPeter Zijlstra struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
10197abc63b1SPeter Zijlstra
10207abc63b1SPeter Zijlstra /*
10217abc63b1SPeter Zijlstra * Don't actually throttle groups that have no runtime assigned
10227abc63b1SPeter Zijlstra * but accrue some time due to boosting.
10237abc63b1SPeter Zijlstra */
10247abc63b1SPeter Zijlstra if (likely(rt_b->rt_runtime)) {
1025391e43daSPeter Zijlstra rt_rq->rt_throttled = 1;
1026c224815dSJohn Stultz printk_deferred_once("sched: RT throttling activated\n");
10277abc63b1SPeter Zijlstra } else {
10287abc63b1SPeter Zijlstra /*
10297abc63b1SPeter Zijlstra * In case we did anyway, make it go away,
10307abc63b1SPeter Zijlstra * replenishment is a joke, since it will replenish us
10317abc63b1SPeter Zijlstra * with exactly 0 ns.
10327abc63b1SPeter Zijlstra */
10337abc63b1SPeter Zijlstra rt_rq->rt_time = 0;
10347abc63b1SPeter Zijlstra }
10357abc63b1SPeter Zijlstra
1036391e43daSPeter Zijlstra if (rt_rq_throttled(rt_rq)) {
1037391e43daSPeter Zijlstra sched_rt_rq_dequeue(rt_rq);
1038391e43daSPeter Zijlstra return 1;
1039391e43daSPeter Zijlstra }
1040391e43daSPeter Zijlstra }
1041391e43daSPeter Zijlstra
1042391e43daSPeter Zijlstra return 0;
1043391e43daSPeter Zijlstra }
1044391e43daSPeter Zijlstra
1045391e43daSPeter Zijlstra /*
1046391e43daSPeter Zijlstra * Update the current task's runtime statistics. Skip current tasks that
1047391e43daSPeter Zijlstra * are not in our scheduling class.
1048391e43daSPeter Zijlstra */
update_curr_rt(struct rq * rq)1049391e43daSPeter Zijlstra static void update_curr_rt(struct rq *rq)
1050391e43daSPeter Zijlstra {
1051391e43daSPeter Zijlstra struct task_struct *curr = rq->curr;
1052391e43daSPeter Zijlstra struct sched_rt_entity *rt_se = &curr->rt;
1053*4db5988bSPeter Zijlstra s64 delta_exec;
1054391e43daSPeter Zijlstra
1055391e43daSPeter Zijlstra if (curr->sched_class != &rt_sched_class)
1056391e43daSPeter Zijlstra return;
1057391e43daSPeter Zijlstra
1058*4db5988bSPeter Zijlstra delta_exec = update_curr_common(rq);
1059*4db5988bSPeter Zijlstra if (unlikely(delta_exec <= 0))
1060fc79e240SKirill Tkhai return;
1061391e43daSPeter Zijlstra
1062391e43daSPeter Zijlstra if (!rt_bandwidth_enabled())
1063391e43daSPeter Zijlstra return;
1064391e43daSPeter Zijlstra
1065391e43daSPeter Zijlstra for_each_sched_rt_entity(rt_se) {
10660b07939cSGiedrius Rekasius struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
10679b58e976SLi Hua int exceeded;
1068391e43daSPeter Zijlstra
1069391e43daSPeter Zijlstra if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
1070391e43daSPeter Zijlstra raw_spin_lock(&rt_rq->rt_runtime_lock);
1071391e43daSPeter Zijlstra rt_rq->rt_time += delta_exec;
10729b58e976SLi Hua exceeded = sched_rt_runtime_exceeded(rt_rq);
10739b58e976SLi Hua if (exceeded)
10748875125eSKirill Tkhai resched_curr(rq);
1075391e43daSPeter Zijlstra raw_spin_unlock(&rt_rq->rt_runtime_lock);
10769b58e976SLi Hua if (exceeded)
10779b58e976SLi Hua do_start_rt_bandwidth(sched_rt_bandwidth(rt_rq));
1078391e43daSPeter Zijlstra }
1079391e43daSPeter Zijlstra }
1080391e43daSPeter Zijlstra }
1081391e43daSPeter Zijlstra
1082f4ebcbc0SKirill Tkhai static void
dequeue_top_rt_rq(struct rt_rq * rt_rq,unsigned int count)10835c66d1b9SNicolas Saenz Julienne dequeue_top_rt_rq(struct rt_rq *rt_rq, unsigned int count)
1084f4ebcbc0SKirill Tkhai {
1085f4ebcbc0SKirill Tkhai struct rq *rq = rq_of_rt_rq(rt_rq);
1086f4ebcbc0SKirill Tkhai
1087f4ebcbc0SKirill Tkhai BUG_ON(&rq->rt != rt_rq);
1088f4ebcbc0SKirill Tkhai
1089f4ebcbc0SKirill Tkhai if (!rt_rq->rt_queued)
1090f4ebcbc0SKirill Tkhai return;
1091f4ebcbc0SKirill Tkhai
1092f4ebcbc0SKirill Tkhai BUG_ON(!rq->nr_running);
1093f4ebcbc0SKirill Tkhai
10945c66d1b9SNicolas Saenz Julienne sub_nr_running(rq, count);
1095f4ebcbc0SKirill Tkhai rt_rq->rt_queued = 0;
10968f111bc3SPeter Zijlstra
1097f4ebcbc0SKirill Tkhai }
1098f4ebcbc0SKirill Tkhai
1099f4ebcbc0SKirill Tkhai static void
enqueue_top_rt_rq(struct rt_rq * rt_rq)1100f4ebcbc0SKirill Tkhai enqueue_top_rt_rq(struct rt_rq *rt_rq)
1101f4ebcbc0SKirill Tkhai {
1102f4ebcbc0SKirill Tkhai struct rq *rq = rq_of_rt_rq(rt_rq);
1103f4ebcbc0SKirill Tkhai
1104f4ebcbc0SKirill Tkhai BUG_ON(&rq->rt != rt_rq);
1105f4ebcbc0SKirill Tkhai
1106f4ebcbc0SKirill Tkhai if (rt_rq->rt_queued)
1107f4ebcbc0SKirill Tkhai return;
1108296b2ffeSVincent Guittot
1109296b2ffeSVincent Guittot if (rt_rq_throttled(rt_rq))
1110f4ebcbc0SKirill Tkhai return;
1111f4ebcbc0SKirill Tkhai
1112296b2ffeSVincent Guittot if (rt_rq->rt_nr_running) {
111372465447SKirill Tkhai add_nr_running(rq, rt_rq->rt_nr_running);
1114f4ebcbc0SKirill Tkhai rt_rq->rt_queued = 1;
1115296b2ffeSVincent Guittot }
11168f111bc3SPeter Zijlstra
11178f111bc3SPeter Zijlstra /* Kick cpufreq (see the comment in kernel/sched/sched.h). */
11188f111bc3SPeter Zijlstra cpufreq_update_util(rq, 0);
1119f4ebcbc0SKirill Tkhai }
1120f4ebcbc0SKirill Tkhai
1121391e43daSPeter Zijlstra #if defined CONFIG_SMP
1122391e43daSPeter Zijlstra
1123391e43daSPeter Zijlstra static void
inc_rt_prio_smp(struct rt_rq * rt_rq,int prio,int prev_prio)1124391e43daSPeter Zijlstra inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1125391e43daSPeter Zijlstra {
1126391e43daSPeter Zijlstra struct rq *rq = rq_of_rt_rq(rt_rq);
1127391e43daSPeter Zijlstra
1128757dfcaaSKirill Tkhai #ifdef CONFIG_RT_GROUP_SCHED
1129757dfcaaSKirill Tkhai /*
1130757dfcaaSKirill Tkhai * Change rq's cpupri only if rt_rq is the top queue.
1131757dfcaaSKirill Tkhai */
1132757dfcaaSKirill Tkhai if (&rq->rt != rt_rq)
1133757dfcaaSKirill Tkhai return;
1134757dfcaaSKirill Tkhai #endif
1135391e43daSPeter Zijlstra if (rq->online && prio < prev_prio)
1136391e43daSPeter Zijlstra cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
1137391e43daSPeter Zijlstra }
1138391e43daSPeter Zijlstra
1139391e43daSPeter Zijlstra static void
dec_rt_prio_smp(struct rt_rq * rt_rq,int prio,int prev_prio)1140391e43daSPeter Zijlstra dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1141391e43daSPeter Zijlstra {
1142391e43daSPeter Zijlstra struct rq *rq = rq_of_rt_rq(rt_rq);
1143391e43daSPeter Zijlstra
1144757dfcaaSKirill Tkhai #ifdef CONFIG_RT_GROUP_SCHED
1145757dfcaaSKirill Tkhai /*
1146757dfcaaSKirill Tkhai * Change rq's cpupri only if rt_rq is the top queue.
1147757dfcaaSKirill Tkhai */
1148757dfcaaSKirill Tkhai if (&rq->rt != rt_rq)
1149757dfcaaSKirill Tkhai return;
1150757dfcaaSKirill Tkhai #endif
1151391e43daSPeter Zijlstra if (rq->online && rt_rq->highest_prio.curr != prev_prio)
1152391e43daSPeter Zijlstra cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
1153391e43daSPeter Zijlstra }
1154391e43daSPeter Zijlstra
1155391e43daSPeter Zijlstra #else /* CONFIG_SMP */
1156391e43daSPeter Zijlstra
1157391e43daSPeter Zijlstra static inline
inc_rt_prio_smp(struct rt_rq * rt_rq,int prio,int prev_prio)1158391e43daSPeter Zijlstra void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1159391e43daSPeter Zijlstra static inline
dec_rt_prio_smp(struct rt_rq * rt_rq,int prio,int prev_prio)1160391e43daSPeter Zijlstra void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1161391e43daSPeter Zijlstra
1162391e43daSPeter Zijlstra #endif /* CONFIG_SMP */
1163391e43daSPeter Zijlstra
1164391e43daSPeter Zijlstra #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
1165391e43daSPeter Zijlstra static void
inc_rt_prio(struct rt_rq * rt_rq,int prio)1166391e43daSPeter Zijlstra inc_rt_prio(struct rt_rq *rt_rq, int prio)
1167391e43daSPeter Zijlstra {
1168391e43daSPeter Zijlstra int prev_prio = rt_rq->highest_prio.curr;
1169391e43daSPeter Zijlstra
1170391e43daSPeter Zijlstra if (prio < prev_prio)
1171391e43daSPeter Zijlstra rt_rq->highest_prio.curr = prio;
1172391e43daSPeter Zijlstra
1173391e43daSPeter Zijlstra inc_rt_prio_smp(rt_rq, prio, prev_prio);
1174391e43daSPeter Zijlstra }
1175391e43daSPeter Zijlstra
1176391e43daSPeter Zijlstra static void
dec_rt_prio(struct rt_rq * rt_rq,int prio)1177391e43daSPeter Zijlstra dec_rt_prio(struct rt_rq *rt_rq, int prio)
1178391e43daSPeter Zijlstra {
1179391e43daSPeter Zijlstra int prev_prio = rt_rq->highest_prio.curr;
1180391e43daSPeter Zijlstra
1181391e43daSPeter Zijlstra if (rt_rq->rt_nr_running) {
1182391e43daSPeter Zijlstra
1183391e43daSPeter Zijlstra WARN_ON(prio < prev_prio);
1184391e43daSPeter Zijlstra
1185391e43daSPeter Zijlstra /*
1186391e43daSPeter Zijlstra * This may have been our highest task, and therefore
1187391e43daSPeter Zijlstra * we may have some recomputation to do
1188391e43daSPeter Zijlstra */
1189391e43daSPeter Zijlstra if (prio == prev_prio) {
1190391e43daSPeter Zijlstra struct rt_prio_array *array = &rt_rq->active;
1191391e43daSPeter Zijlstra
1192391e43daSPeter Zijlstra rt_rq->highest_prio.curr =
1193391e43daSPeter Zijlstra sched_find_first_bit(array->bitmap);
1194391e43daSPeter Zijlstra }
1195391e43daSPeter Zijlstra
1196934fc331SPeter Zijlstra } else {
1197934fc331SPeter Zijlstra rt_rq->highest_prio.curr = MAX_RT_PRIO-1;
1198934fc331SPeter Zijlstra }
1199391e43daSPeter Zijlstra
1200391e43daSPeter Zijlstra dec_rt_prio_smp(rt_rq, prio, prev_prio);
1201391e43daSPeter Zijlstra }
1202391e43daSPeter Zijlstra
1203391e43daSPeter Zijlstra #else
1204391e43daSPeter Zijlstra
inc_rt_prio(struct rt_rq * rt_rq,int prio)1205391e43daSPeter Zijlstra static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
dec_rt_prio(struct rt_rq * rt_rq,int prio)1206391e43daSPeter Zijlstra static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
1207391e43daSPeter Zijlstra
1208391e43daSPeter Zijlstra #endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
1209391e43daSPeter Zijlstra
1210391e43daSPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED
1211391e43daSPeter Zijlstra
1212391e43daSPeter Zijlstra static void
inc_rt_group(struct sched_rt_entity * rt_se,struct rt_rq * rt_rq)1213391e43daSPeter Zijlstra inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1214391e43daSPeter Zijlstra {
1215391e43daSPeter Zijlstra if (rt_se_boosted(rt_se))
1216391e43daSPeter Zijlstra rt_rq->rt_nr_boosted++;
1217391e43daSPeter Zijlstra
1218391e43daSPeter Zijlstra if (rt_rq->tg)
1219391e43daSPeter Zijlstra start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
1220391e43daSPeter Zijlstra }
1221391e43daSPeter Zijlstra
1222391e43daSPeter Zijlstra static void
dec_rt_group(struct sched_rt_entity * rt_se,struct rt_rq * rt_rq)1223391e43daSPeter Zijlstra dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1224391e43daSPeter Zijlstra {
1225391e43daSPeter Zijlstra if (rt_se_boosted(rt_se))
1226391e43daSPeter Zijlstra rt_rq->rt_nr_boosted--;
1227391e43daSPeter Zijlstra
1228391e43daSPeter Zijlstra WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
1229391e43daSPeter Zijlstra }
1230391e43daSPeter Zijlstra
1231391e43daSPeter Zijlstra #else /* CONFIG_RT_GROUP_SCHED */
1232391e43daSPeter Zijlstra
1233391e43daSPeter Zijlstra static void
inc_rt_group(struct sched_rt_entity * rt_se,struct rt_rq * rt_rq)1234391e43daSPeter Zijlstra inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1235391e43daSPeter Zijlstra {
1236391e43daSPeter Zijlstra start_rt_bandwidth(&def_rt_bandwidth);
1237391e43daSPeter Zijlstra }
1238391e43daSPeter Zijlstra
1239391e43daSPeter Zijlstra static inline
dec_rt_group(struct sched_rt_entity * rt_se,struct rt_rq * rt_rq)1240391e43daSPeter Zijlstra void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
1241391e43daSPeter Zijlstra
1242391e43daSPeter Zijlstra #endif /* CONFIG_RT_GROUP_SCHED */
1243391e43daSPeter Zijlstra
1244391e43daSPeter Zijlstra static inline
rt_se_nr_running(struct sched_rt_entity * rt_se)124522abdef3SKirill Tkhai unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se)
124622abdef3SKirill Tkhai {
124722abdef3SKirill Tkhai struct rt_rq *group_rq = group_rt_rq(rt_se);
124822abdef3SKirill Tkhai
124922abdef3SKirill Tkhai if (group_rq)
125022abdef3SKirill Tkhai return group_rq->rt_nr_running;
125122abdef3SKirill Tkhai else
125222abdef3SKirill Tkhai return 1;
125322abdef3SKirill Tkhai }
125422abdef3SKirill Tkhai
125522abdef3SKirill Tkhai static inline
rt_se_rr_nr_running(struct sched_rt_entity * rt_se)125601d36d0aSFrederic Weisbecker unsigned int rt_se_rr_nr_running(struct sched_rt_entity *rt_se)
125701d36d0aSFrederic Weisbecker {
125801d36d0aSFrederic Weisbecker struct rt_rq *group_rq = group_rt_rq(rt_se);
125901d36d0aSFrederic Weisbecker struct task_struct *tsk;
126001d36d0aSFrederic Weisbecker
126101d36d0aSFrederic Weisbecker if (group_rq)
126201d36d0aSFrederic Weisbecker return group_rq->rr_nr_running;
126301d36d0aSFrederic Weisbecker
126401d36d0aSFrederic Weisbecker tsk = rt_task_of(rt_se);
126501d36d0aSFrederic Weisbecker
126601d36d0aSFrederic Weisbecker return (tsk->policy == SCHED_RR) ? 1 : 0;
126701d36d0aSFrederic Weisbecker }
126801d36d0aSFrederic Weisbecker
126901d36d0aSFrederic Weisbecker static inline
inc_rt_tasks(struct sched_rt_entity * rt_se,struct rt_rq * rt_rq)1270391e43daSPeter Zijlstra void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1271391e43daSPeter Zijlstra {
1272391e43daSPeter Zijlstra int prio = rt_se_prio(rt_se);
1273391e43daSPeter Zijlstra
1274391e43daSPeter Zijlstra WARN_ON(!rt_prio(prio));
127522abdef3SKirill Tkhai rt_rq->rt_nr_running += rt_se_nr_running(rt_se);
127601d36d0aSFrederic Weisbecker rt_rq->rr_nr_running += rt_se_rr_nr_running(rt_se);
1277391e43daSPeter Zijlstra
1278391e43daSPeter Zijlstra inc_rt_prio(rt_rq, prio);
1279391e43daSPeter Zijlstra inc_rt_migration(rt_se, rt_rq);
1280391e43daSPeter Zijlstra inc_rt_group(rt_se, rt_rq);
1281391e43daSPeter Zijlstra }
1282391e43daSPeter Zijlstra
1283391e43daSPeter Zijlstra static inline
dec_rt_tasks(struct sched_rt_entity * rt_se,struct rt_rq * rt_rq)1284391e43daSPeter Zijlstra void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1285391e43daSPeter Zijlstra {
1286391e43daSPeter Zijlstra WARN_ON(!rt_prio(rt_se_prio(rt_se)));
1287391e43daSPeter Zijlstra WARN_ON(!rt_rq->rt_nr_running);
128822abdef3SKirill Tkhai rt_rq->rt_nr_running -= rt_se_nr_running(rt_se);
128901d36d0aSFrederic Weisbecker rt_rq->rr_nr_running -= rt_se_rr_nr_running(rt_se);
1290391e43daSPeter Zijlstra
1291391e43daSPeter Zijlstra dec_rt_prio(rt_rq, rt_se_prio(rt_se));
1292391e43daSPeter Zijlstra dec_rt_migration(rt_se, rt_rq);
1293391e43daSPeter Zijlstra dec_rt_group(rt_se, rt_rq);
1294391e43daSPeter Zijlstra }
1295391e43daSPeter Zijlstra
1296ff77e468SPeter Zijlstra /*
1297ff77e468SPeter Zijlstra * Change rt_se->run_list location unless SAVE && !MOVE
1298ff77e468SPeter Zijlstra *
1299ff77e468SPeter Zijlstra * assumes ENQUEUE/DEQUEUE flags match
1300ff77e468SPeter Zijlstra */
move_entity(unsigned int flags)1301ff77e468SPeter Zijlstra static inline bool move_entity(unsigned int flags)
1302ff77e468SPeter Zijlstra {
1303ff77e468SPeter Zijlstra if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE)
1304ff77e468SPeter Zijlstra return false;
1305ff77e468SPeter Zijlstra
1306ff77e468SPeter Zijlstra return true;
1307ff77e468SPeter Zijlstra }
1308ff77e468SPeter Zijlstra
__delist_rt_entity(struct sched_rt_entity * rt_se,struct rt_prio_array * array)1309ff77e468SPeter Zijlstra static void __delist_rt_entity(struct sched_rt_entity *rt_se, struct rt_prio_array *array)
1310ff77e468SPeter Zijlstra {
1311ff77e468SPeter Zijlstra list_del_init(&rt_se->run_list);
1312ff77e468SPeter Zijlstra
1313ff77e468SPeter Zijlstra if (list_empty(array->queue + rt_se_prio(rt_se)))
1314ff77e468SPeter Zijlstra __clear_bit(rt_se_prio(rt_se), array->bitmap);
1315ff77e468SPeter Zijlstra
1316ff77e468SPeter Zijlstra rt_se->on_list = 0;
1317ff77e468SPeter Zijlstra }
1318ff77e468SPeter Zijlstra
131957a5c2daSYafang Shao static inline struct sched_statistics *
__schedstats_from_rt_se(struct sched_rt_entity * rt_se)132057a5c2daSYafang Shao __schedstats_from_rt_se(struct sched_rt_entity *rt_se)
132157a5c2daSYafang Shao {
132257a5c2daSYafang Shao #ifdef CONFIG_RT_GROUP_SCHED
132357a5c2daSYafang Shao /* schedstats is not supported for rt group. */
132457a5c2daSYafang Shao if (!rt_entity_is_task(rt_se))
132557a5c2daSYafang Shao return NULL;
132657a5c2daSYafang Shao #endif
132757a5c2daSYafang Shao
132857a5c2daSYafang Shao return &rt_task_of(rt_se)->stats;
132957a5c2daSYafang Shao }
133057a5c2daSYafang Shao
133157a5c2daSYafang Shao static inline void
update_stats_wait_start_rt(struct rt_rq * rt_rq,struct sched_rt_entity * rt_se)133257a5c2daSYafang Shao update_stats_wait_start_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
133357a5c2daSYafang Shao {
133457a5c2daSYafang Shao struct sched_statistics *stats;
133557a5c2daSYafang Shao struct task_struct *p = NULL;
133657a5c2daSYafang Shao
133757a5c2daSYafang Shao if (!schedstat_enabled())
133857a5c2daSYafang Shao return;
133957a5c2daSYafang Shao
134057a5c2daSYafang Shao if (rt_entity_is_task(rt_se))
134157a5c2daSYafang Shao p = rt_task_of(rt_se);
134257a5c2daSYafang Shao
134357a5c2daSYafang Shao stats = __schedstats_from_rt_se(rt_se);
134457a5c2daSYafang Shao if (!stats)
134557a5c2daSYafang Shao return;
134657a5c2daSYafang Shao
134757a5c2daSYafang Shao __update_stats_wait_start(rq_of_rt_rq(rt_rq), p, stats);
134857a5c2daSYafang Shao }
134957a5c2daSYafang Shao
135057a5c2daSYafang Shao static inline void
update_stats_enqueue_sleeper_rt(struct rt_rq * rt_rq,struct sched_rt_entity * rt_se)135157a5c2daSYafang Shao update_stats_enqueue_sleeper_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
135257a5c2daSYafang Shao {
135357a5c2daSYafang Shao struct sched_statistics *stats;
135457a5c2daSYafang Shao struct task_struct *p = NULL;
135557a5c2daSYafang Shao
135657a5c2daSYafang Shao if (!schedstat_enabled())
135757a5c2daSYafang Shao return;
135857a5c2daSYafang Shao
135957a5c2daSYafang Shao if (rt_entity_is_task(rt_se))
136057a5c2daSYafang Shao p = rt_task_of(rt_se);
136157a5c2daSYafang Shao
136257a5c2daSYafang Shao stats = __schedstats_from_rt_se(rt_se);
136357a5c2daSYafang Shao if (!stats)
136457a5c2daSYafang Shao return;
136557a5c2daSYafang Shao
136657a5c2daSYafang Shao __update_stats_enqueue_sleeper(rq_of_rt_rq(rt_rq), p, stats);
136757a5c2daSYafang Shao }
136857a5c2daSYafang Shao
136957a5c2daSYafang Shao static inline void
update_stats_enqueue_rt(struct rt_rq * rt_rq,struct sched_rt_entity * rt_se,int flags)137057a5c2daSYafang Shao update_stats_enqueue_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se,
137157a5c2daSYafang Shao int flags)
137257a5c2daSYafang Shao {
137357a5c2daSYafang Shao if (!schedstat_enabled())
137457a5c2daSYafang Shao return;
137557a5c2daSYafang Shao
137657a5c2daSYafang Shao if (flags & ENQUEUE_WAKEUP)
137757a5c2daSYafang Shao update_stats_enqueue_sleeper_rt(rt_rq, rt_se);
137857a5c2daSYafang Shao }
137957a5c2daSYafang Shao
138057a5c2daSYafang Shao static inline void
update_stats_wait_end_rt(struct rt_rq * rt_rq,struct sched_rt_entity * rt_se)138157a5c2daSYafang Shao update_stats_wait_end_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
138257a5c2daSYafang Shao {
138357a5c2daSYafang Shao struct sched_statistics *stats;
138457a5c2daSYafang Shao struct task_struct *p = NULL;
138557a5c2daSYafang Shao
138657a5c2daSYafang Shao if (!schedstat_enabled())
138757a5c2daSYafang Shao return;
138857a5c2daSYafang Shao
138957a5c2daSYafang Shao if (rt_entity_is_task(rt_se))
139057a5c2daSYafang Shao p = rt_task_of(rt_se);
139157a5c2daSYafang Shao
139257a5c2daSYafang Shao stats = __schedstats_from_rt_se(rt_se);
139357a5c2daSYafang Shao if (!stats)
139457a5c2daSYafang Shao return;
139557a5c2daSYafang Shao
139657a5c2daSYafang Shao __update_stats_wait_end(rq_of_rt_rq(rt_rq), p, stats);
139757a5c2daSYafang Shao }
139857a5c2daSYafang Shao
139957a5c2daSYafang Shao static inline void
update_stats_dequeue_rt(struct rt_rq * rt_rq,struct sched_rt_entity * rt_se,int flags)140057a5c2daSYafang Shao update_stats_dequeue_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se,
140157a5c2daSYafang Shao int flags)
140257a5c2daSYafang Shao {
140357a5c2daSYafang Shao struct task_struct *p = NULL;
140457a5c2daSYafang Shao
140557a5c2daSYafang Shao if (!schedstat_enabled())
140657a5c2daSYafang Shao return;
140757a5c2daSYafang Shao
140857a5c2daSYafang Shao if (rt_entity_is_task(rt_se))
140957a5c2daSYafang Shao p = rt_task_of(rt_se);
141057a5c2daSYafang Shao
141157a5c2daSYafang Shao if ((flags & DEQUEUE_SLEEP) && p) {
141257a5c2daSYafang Shao unsigned int state;
141357a5c2daSYafang Shao
141457a5c2daSYafang Shao state = READ_ONCE(p->__state);
141557a5c2daSYafang Shao if (state & TASK_INTERRUPTIBLE)
141657a5c2daSYafang Shao __schedstat_set(p->stats.sleep_start,
141757a5c2daSYafang Shao rq_clock(rq_of_rt_rq(rt_rq)));
141857a5c2daSYafang Shao
141957a5c2daSYafang Shao if (state & TASK_UNINTERRUPTIBLE)
142057a5c2daSYafang Shao __schedstat_set(p->stats.block_start,
142157a5c2daSYafang Shao rq_clock(rq_of_rt_rq(rt_rq)));
142257a5c2daSYafang Shao }
142357a5c2daSYafang Shao }
142457a5c2daSYafang Shao
__enqueue_rt_entity(struct sched_rt_entity * rt_se,unsigned int flags)1425ff77e468SPeter Zijlstra static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1426391e43daSPeter Zijlstra {
1427391e43daSPeter Zijlstra struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1428391e43daSPeter Zijlstra struct rt_prio_array *array = &rt_rq->active;
1429391e43daSPeter Zijlstra struct rt_rq *group_rq = group_rt_rq(rt_se);
1430391e43daSPeter Zijlstra struct list_head *queue = array->queue + rt_se_prio(rt_se);
1431391e43daSPeter Zijlstra
1432391e43daSPeter Zijlstra /*
1433391e43daSPeter Zijlstra * Don't enqueue the group if its throttled, or when empty.
1434391e43daSPeter Zijlstra * The latter is a consequence of the former when a child group
1435391e43daSPeter Zijlstra * get throttled and the current group doesn't have any other
1436391e43daSPeter Zijlstra * active members.
1437391e43daSPeter Zijlstra */
1438ff77e468SPeter Zijlstra if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) {
1439ff77e468SPeter Zijlstra if (rt_se->on_list)
1440ff77e468SPeter Zijlstra __delist_rt_entity(rt_se, array);
1441391e43daSPeter Zijlstra return;
1442ff77e468SPeter Zijlstra }
1443391e43daSPeter Zijlstra
1444ff77e468SPeter Zijlstra if (move_entity(flags)) {
1445ff77e468SPeter Zijlstra WARN_ON_ONCE(rt_se->on_list);
1446ff77e468SPeter Zijlstra if (flags & ENQUEUE_HEAD)
1447391e43daSPeter Zijlstra list_add(&rt_se->run_list, queue);
1448391e43daSPeter Zijlstra else
1449391e43daSPeter Zijlstra list_add_tail(&rt_se->run_list, queue);
1450ff77e468SPeter Zijlstra
1451391e43daSPeter Zijlstra __set_bit(rt_se_prio(rt_se), array->bitmap);
1452ff77e468SPeter Zijlstra rt_se->on_list = 1;
1453ff77e468SPeter Zijlstra }
1454ff77e468SPeter Zijlstra rt_se->on_rq = 1;
1455391e43daSPeter Zijlstra
1456391e43daSPeter Zijlstra inc_rt_tasks(rt_se, rt_rq);
1457391e43daSPeter Zijlstra }
1458391e43daSPeter Zijlstra
__dequeue_rt_entity(struct sched_rt_entity * rt_se,unsigned int flags)1459ff77e468SPeter Zijlstra static void __dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1460391e43daSPeter Zijlstra {
1461391e43daSPeter Zijlstra struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1462391e43daSPeter Zijlstra struct rt_prio_array *array = &rt_rq->active;
1463391e43daSPeter Zijlstra
1464ff77e468SPeter Zijlstra if (move_entity(flags)) {
1465ff77e468SPeter Zijlstra WARN_ON_ONCE(!rt_se->on_list);
1466ff77e468SPeter Zijlstra __delist_rt_entity(rt_se, array);
1467ff77e468SPeter Zijlstra }
1468ff77e468SPeter Zijlstra rt_se->on_rq = 0;
1469391e43daSPeter Zijlstra
1470391e43daSPeter Zijlstra dec_rt_tasks(rt_se, rt_rq);
1471391e43daSPeter Zijlstra }
1472391e43daSPeter Zijlstra
1473391e43daSPeter Zijlstra /*
1474391e43daSPeter Zijlstra * Because the prio of an upper entry depends on the lower
1475391e43daSPeter Zijlstra * entries, we must remove entries top - down.
1476391e43daSPeter Zijlstra */
dequeue_rt_stack(struct sched_rt_entity * rt_se,unsigned int flags)1477ff77e468SPeter Zijlstra static void dequeue_rt_stack(struct sched_rt_entity *rt_se, unsigned int flags)
1478391e43daSPeter Zijlstra {
1479391e43daSPeter Zijlstra struct sched_rt_entity *back = NULL;
14805c66d1b9SNicolas Saenz Julienne unsigned int rt_nr_running;
1481391e43daSPeter Zijlstra
1482391e43daSPeter Zijlstra for_each_sched_rt_entity(rt_se) {
1483391e43daSPeter Zijlstra rt_se->back = back;
1484391e43daSPeter Zijlstra back = rt_se;
1485391e43daSPeter Zijlstra }
1486391e43daSPeter Zijlstra
14875c66d1b9SNicolas Saenz Julienne rt_nr_running = rt_rq_of_se(back)->rt_nr_running;
1488f4ebcbc0SKirill Tkhai
1489391e43daSPeter Zijlstra for (rt_se = back; rt_se; rt_se = rt_se->back) {
1490391e43daSPeter Zijlstra if (on_rt_rq(rt_se))
1491ff77e468SPeter Zijlstra __dequeue_rt_entity(rt_se, flags);
1492391e43daSPeter Zijlstra }
14935c66d1b9SNicolas Saenz Julienne
14945c66d1b9SNicolas Saenz Julienne dequeue_top_rt_rq(rt_rq_of_se(back), rt_nr_running);
1495391e43daSPeter Zijlstra }
1496391e43daSPeter Zijlstra
enqueue_rt_entity(struct sched_rt_entity * rt_se,unsigned int flags)1497ff77e468SPeter Zijlstra static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1498391e43daSPeter Zijlstra {
1499f4ebcbc0SKirill Tkhai struct rq *rq = rq_of_rt_se(rt_se);
1500f4ebcbc0SKirill Tkhai
150157a5c2daSYafang Shao update_stats_enqueue_rt(rt_rq_of_se(rt_se), rt_se, flags);
150257a5c2daSYafang Shao
1503ff77e468SPeter Zijlstra dequeue_rt_stack(rt_se, flags);
1504391e43daSPeter Zijlstra for_each_sched_rt_entity(rt_se)
1505ff77e468SPeter Zijlstra __enqueue_rt_entity(rt_se, flags);
1506f4ebcbc0SKirill Tkhai enqueue_top_rt_rq(&rq->rt);
1507391e43daSPeter Zijlstra }
1508391e43daSPeter Zijlstra
dequeue_rt_entity(struct sched_rt_entity * rt_se,unsigned int flags)1509ff77e468SPeter Zijlstra static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1510391e43daSPeter Zijlstra {
1511f4ebcbc0SKirill Tkhai struct rq *rq = rq_of_rt_se(rt_se);
1512f4ebcbc0SKirill Tkhai
151357a5c2daSYafang Shao update_stats_dequeue_rt(rt_rq_of_se(rt_se), rt_se, flags);
151457a5c2daSYafang Shao
1515ff77e468SPeter Zijlstra dequeue_rt_stack(rt_se, flags);
1516391e43daSPeter Zijlstra
1517391e43daSPeter Zijlstra for_each_sched_rt_entity(rt_se) {
1518391e43daSPeter Zijlstra struct rt_rq *rt_rq = group_rt_rq(rt_se);
1519391e43daSPeter Zijlstra
1520391e43daSPeter Zijlstra if (rt_rq && rt_rq->rt_nr_running)
1521ff77e468SPeter Zijlstra __enqueue_rt_entity(rt_se, flags);
1522391e43daSPeter Zijlstra }
1523f4ebcbc0SKirill Tkhai enqueue_top_rt_rq(&rq->rt);
1524391e43daSPeter Zijlstra }
1525391e43daSPeter Zijlstra
1526391e43daSPeter Zijlstra /*
1527391e43daSPeter Zijlstra * Adding/removing a task to/from a priority array:
1528391e43daSPeter Zijlstra */
1529391e43daSPeter Zijlstra static void
enqueue_task_rt(struct rq * rq,struct task_struct * p,int flags)1530391e43daSPeter Zijlstra enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1531391e43daSPeter Zijlstra {
1532391e43daSPeter Zijlstra struct sched_rt_entity *rt_se = &p->rt;
1533391e43daSPeter Zijlstra
1534391e43daSPeter Zijlstra if (flags & ENQUEUE_WAKEUP)
1535391e43daSPeter Zijlstra rt_se->timeout = 0;
1536391e43daSPeter Zijlstra
153757a5c2daSYafang Shao check_schedstat_required();
153857a5c2daSYafang Shao update_stats_wait_start_rt(rt_rq_of_se(rt_se), rt_se);
153957a5c2daSYafang Shao
1540ff77e468SPeter Zijlstra enqueue_rt_entity(rt_se, flags);
1541391e43daSPeter Zijlstra
15424b53a341SIngo Molnar if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1543391e43daSPeter Zijlstra enqueue_pushable_task(rq, p);
1544391e43daSPeter Zijlstra }
1545391e43daSPeter Zijlstra
dequeue_task_rt(struct rq * rq,struct task_struct * p,int flags)1546391e43daSPeter Zijlstra static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1547391e43daSPeter Zijlstra {
1548391e43daSPeter Zijlstra struct sched_rt_entity *rt_se = &p->rt;
1549391e43daSPeter Zijlstra
1550391e43daSPeter Zijlstra update_curr_rt(rq);
1551ff77e468SPeter Zijlstra dequeue_rt_entity(rt_se, flags);
1552391e43daSPeter Zijlstra
1553391e43daSPeter Zijlstra dequeue_pushable_task(rq, p);
1554391e43daSPeter Zijlstra }
1555391e43daSPeter Zijlstra
1556391e43daSPeter Zijlstra /*
1557391e43daSPeter Zijlstra * Put task to the head or the end of the run list without the overhead of
1558391e43daSPeter Zijlstra * dequeue followed by enqueue.
1559391e43daSPeter Zijlstra */
1560391e43daSPeter Zijlstra static void
requeue_rt_entity(struct rt_rq * rt_rq,struct sched_rt_entity * rt_se,int head)1561391e43daSPeter Zijlstra requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
1562391e43daSPeter Zijlstra {
1563391e43daSPeter Zijlstra if (on_rt_rq(rt_se)) {
1564391e43daSPeter Zijlstra struct rt_prio_array *array = &rt_rq->active;
1565391e43daSPeter Zijlstra struct list_head *queue = array->queue + rt_se_prio(rt_se);
1566391e43daSPeter Zijlstra
1567391e43daSPeter Zijlstra if (head)
1568391e43daSPeter Zijlstra list_move(&rt_se->run_list, queue);
1569391e43daSPeter Zijlstra else
1570391e43daSPeter Zijlstra list_move_tail(&rt_se->run_list, queue);
1571391e43daSPeter Zijlstra }
1572391e43daSPeter Zijlstra }
1573391e43daSPeter Zijlstra
requeue_task_rt(struct rq * rq,struct task_struct * p,int head)1574391e43daSPeter Zijlstra static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
1575391e43daSPeter Zijlstra {
1576391e43daSPeter Zijlstra struct sched_rt_entity *rt_se = &p->rt;
1577391e43daSPeter Zijlstra struct rt_rq *rt_rq;
1578391e43daSPeter Zijlstra
1579391e43daSPeter Zijlstra for_each_sched_rt_entity(rt_se) {
1580391e43daSPeter Zijlstra rt_rq = rt_rq_of_se(rt_se);
1581391e43daSPeter Zijlstra requeue_rt_entity(rt_rq, rt_se, head);
1582391e43daSPeter Zijlstra }
1583391e43daSPeter Zijlstra }
1584391e43daSPeter Zijlstra
yield_task_rt(struct rq * rq)1585391e43daSPeter Zijlstra static void yield_task_rt(struct rq *rq)
1586391e43daSPeter Zijlstra {
1587391e43daSPeter Zijlstra requeue_task_rt(rq, rq->curr, 0);
1588391e43daSPeter Zijlstra }
1589391e43daSPeter Zijlstra
1590391e43daSPeter Zijlstra #ifdef CONFIG_SMP
1591391e43daSPeter Zijlstra static int find_lowest_rq(struct task_struct *task);
1592391e43daSPeter Zijlstra
1593391e43daSPeter Zijlstra static int
select_task_rq_rt(struct task_struct * p,int cpu,int flags)15943aef1551SValentin Schneider select_task_rq_rt(struct task_struct *p, int cpu, int flags)
1595391e43daSPeter Zijlstra {
1596391e43daSPeter Zijlstra struct task_struct *curr;
1597391e43daSPeter Zijlstra struct rq *rq;
1598804d402fSQais Yousef bool test;
1599391e43daSPeter Zijlstra
1600391e43daSPeter Zijlstra /* For anything but wake ups, just return the task_cpu */
16013aef1551SValentin Schneider if (!(flags & (WF_TTWU | WF_FORK)))
1602391e43daSPeter Zijlstra goto out;
1603391e43daSPeter Zijlstra
1604391e43daSPeter Zijlstra rq = cpu_rq(cpu);
1605391e43daSPeter Zijlstra
1606391e43daSPeter Zijlstra rcu_read_lock();
1607316c1608SJason Low curr = READ_ONCE(rq->curr); /* unlocked access */
1608391e43daSPeter Zijlstra
1609391e43daSPeter Zijlstra /*
1610391e43daSPeter Zijlstra * If the current task on @p's runqueue is an RT task, then
1611391e43daSPeter Zijlstra * try to see if we can wake this RT task up on another
1612391e43daSPeter Zijlstra * runqueue. Otherwise simply start this RT task
1613391e43daSPeter Zijlstra * on its current runqueue.
1614391e43daSPeter Zijlstra *
1615391e43daSPeter Zijlstra * We want to avoid overloading runqueues. If the woken
1616391e43daSPeter Zijlstra * task is a higher priority, then it will stay on this CPU
1617391e43daSPeter Zijlstra * and the lower prio task should be moved to another CPU.
1618391e43daSPeter Zijlstra * Even though this will probably make the lower prio task
1619391e43daSPeter Zijlstra * lose its cache, we do not want to bounce a higher task
1620391e43daSPeter Zijlstra * around just because it gave up its CPU, perhaps for a
1621391e43daSPeter Zijlstra * lock?
1622391e43daSPeter Zijlstra *
1623391e43daSPeter Zijlstra * For equal prio tasks, we just let the scheduler sort it out.
1624391e43daSPeter Zijlstra *
1625391e43daSPeter Zijlstra * Otherwise, just let it ride on the affined RQ and the
1626391e43daSPeter Zijlstra * post-schedule router will push the preempted task away
1627391e43daSPeter Zijlstra *
1628391e43daSPeter Zijlstra * This test is optimistic, if we get it wrong the load-balancer
1629391e43daSPeter Zijlstra * will have to sort it out.
1630804d402fSQais Yousef *
1631804d402fSQais Yousef * We take into account the capacity of the CPU to ensure it fits the
1632804d402fSQais Yousef * requirement of the task - which is only important on heterogeneous
1633804d402fSQais Yousef * systems like big.LITTLE.
1634391e43daSPeter Zijlstra */
1635804d402fSQais Yousef test = curr &&
1636804d402fSQais Yousef unlikely(rt_task(curr)) &&
1637804d402fSQais Yousef (curr->nr_cpus_allowed < 2 || curr->prio <= p->prio);
1638804d402fSQais Yousef
1639804d402fSQais Yousef if (test || !rt_task_fits_capacity(p, cpu)) {
1640391e43daSPeter Zijlstra int target = find_lowest_rq(p);
1641391e43daSPeter Zijlstra
164280e3d87bSTim Chen /*
1643b28bc1e0SQais Yousef * Bail out if we were forcing a migration to find a better
1644b28bc1e0SQais Yousef * fitting CPU but our search failed.
1645b28bc1e0SQais Yousef */
1646b28bc1e0SQais Yousef if (!test && target != -1 && !rt_task_fits_capacity(p, target))
1647b28bc1e0SQais Yousef goto out_unlock;
1648b28bc1e0SQais Yousef
1649b28bc1e0SQais Yousef /*
165080e3d87bSTim Chen * Don't bother moving it if the destination CPU is
165180e3d87bSTim Chen * not running a lower priority task.
165280e3d87bSTim Chen */
165380e3d87bSTim Chen if (target != -1 &&
165480e3d87bSTim Chen p->prio < cpu_rq(target)->rt.highest_prio.curr)
1655391e43daSPeter Zijlstra cpu = target;
1656391e43daSPeter Zijlstra }
1657b28bc1e0SQais Yousef
1658b28bc1e0SQais Yousef out_unlock:
1659391e43daSPeter Zijlstra rcu_read_unlock();
1660391e43daSPeter Zijlstra
1661391e43daSPeter Zijlstra out:
1662391e43daSPeter Zijlstra return cpu;
1663391e43daSPeter Zijlstra }
1664391e43daSPeter Zijlstra
check_preempt_equal_prio(struct rq * rq,struct task_struct * p)1665391e43daSPeter Zijlstra static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1666391e43daSPeter Zijlstra {
1667308a623aSWanpeng Li /*
1668308a623aSWanpeng Li * Current can't be migrated, useless to reschedule,
1669308a623aSWanpeng Li * let's hope p can move out.
1670308a623aSWanpeng Li */
16714b53a341SIngo Molnar if (rq->curr->nr_cpus_allowed == 1 ||
1672a1bd02e1SQais Yousef !cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
1673391e43daSPeter Zijlstra return;
1674391e43daSPeter Zijlstra
1675308a623aSWanpeng Li /*
1676308a623aSWanpeng Li * p is migratable, so let's not schedule it and
1677308a623aSWanpeng Li * see if it is pushed or pulled somewhere else.
1678308a623aSWanpeng Li */
1679804d402fSQais Yousef if (p->nr_cpus_allowed != 1 &&
1680a1bd02e1SQais Yousef cpupri_find(&rq->rd->cpupri, p, NULL))
1681391e43daSPeter Zijlstra return;
1682391e43daSPeter Zijlstra
1683391e43daSPeter Zijlstra /*
168497fb7a0aSIngo Molnar * There appear to be other CPUs that can accept
168597fb7a0aSIngo Molnar * the current task but none can run 'p', so lets reschedule
168697fb7a0aSIngo Molnar * to try and push the current task away:
1687391e43daSPeter Zijlstra */
1688391e43daSPeter Zijlstra requeue_task_rt(rq, p, 1);
16898875125eSKirill Tkhai resched_curr(rq);
1690391e43daSPeter Zijlstra }
1691391e43daSPeter Zijlstra
balance_rt(struct rq * rq,struct task_struct * p,struct rq_flags * rf)16926e2df058SPeter Zijlstra static int balance_rt(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
16936e2df058SPeter Zijlstra {
16946e2df058SPeter Zijlstra if (!on_rt_rq(&p->rt) && need_pull_rt_task(rq, p)) {
16956e2df058SPeter Zijlstra /*
16966e2df058SPeter Zijlstra * This is OK, because current is on_cpu, which avoids it being
16976e2df058SPeter Zijlstra * picked for load-balance and preemption/IRQs are still
16986e2df058SPeter Zijlstra * disabled avoiding further scheduler activity on it and we've
16996e2df058SPeter Zijlstra * not yet started the picking loop.
17006e2df058SPeter Zijlstra */
17016e2df058SPeter Zijlstra rq_unpin_lock(rq, rf);
17026e2df058SPeter Zijlstra pull_rt_task(rq);
17036e2df058SPeter Zijlstra rq_repin_lock(rq, rf);
17046e2df058SPeter Zijlstra }
17056e2df058SPeter Zijlstra
17066e2df058SPeter Zijlstra return sched_stop_runnable(rq) || sched_dl_runnable(rq) || sched_rt_runnable(rq);
17076e2df058SPeter Zijlstra }
1708391e43daSPeter Zijlstra #endif /* CONFIG_SMP */
1709391e43daSPeter Zijlstra
1710391e43daSPeter Zijlstra /*
1711391e43daSPeter Zijlstra * Preempt the current task with a newly woken task if needed:
1712391e43daSPeter Zijlstra */
wakeup_preempt_rt(struct rq * rq,struct task_struct * p,int flags)1713b2f7d750SIngo Molnar static void wakeup_preempt_rt(struct rq *rq, struct task_struct *p, int flags)
1714391e43daSPeter Zijlstra {
1715391e43daSPeter Zijlstra if (p->prio < rq->curr->prio) {
17168875125eSKirill Tkhai resched_curr(rq);
1717391e43daSPeter Zijlstra return;
1718391e43daSPeter Zijlstra }
1719391e43daSPeter Zijlstra
1720391e43daSPeter Zijlstra #ifdef CONFIG_SMP
1721391e43daSPeter Zijlstra /*
1722391e43daSPeter Zijlstra * If:
1723391e43daSPeter Zijlstra *
1724391e43daSPeter Zijlstra * - the newly woken task is of equal priority to the current task
1725391e43daSPeter Zijlstra * - the newly woken task is non-migratable while current is migratable
1726391e43daSPeter Zijlstra * - current will be preempted on the next reschedule
1727391e43daSPeter Zijlstra *
1728391e43daSPeter Zijlstra * we should check to see if current can readily move to a different
1729391e43daSPeter Zijlstra * cpu. If so, we will reschedule to allow the push logic to try
1730391e43daSPeter Zijlstra * to move current somewhere else, making room for our non-migratable
1731391e43daSPeter Zijlstra * task.
1732391e43daSPeter Zijlstra */
1733391e43daSPeter Zijlstra if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
1734391e43daSPeter Zijlstra check_preempt_equal_prio(rq, p);
1735391e43daSPeter Zijlstra #endif
1736391e43daSPeter Zijlstra }
1737391e43daSPeter Zijlstra
set_next_task_rt(struct rq * rq,struct task_struct * p,bool first)1738a0e813f2SPeter Zijlstra static inline void set_next_task_rt(struct rq *rq, struct task_struct *p, bool first)
1739ff1cdc94SMuchun Song {
174057a5c2daSYafang Shao struct sched_rt_entity *rt_se = &p->rt;
174157a5c2daSYafang Shao struct rt_rq *rt_rq = &rq->rt;
174257a5c2daSYafang Shao
1743ff1cdc94SMuchun Song p->se.exec_start = rq_clock_task(rq);
174457a5c2daSYafang Shao if (on_rt_rq(&p->rt))
174557a5c2daSYafang Shao update_stats_wait_end_rt(rt_rq, rt_se);
1746ff1cdc94SMuchun Song
1747ff1cdc94SMuchun Song /* The running task is never eligible for pushing */
1748ff1cdc94SMuchun Song dequeue_pushable_task(rq, p);
1749f95d4eaeSPeter Zijlstra
1750a0e813f2SPeter Zijlstra if (!first)
1751a0e813f2SPeter Zijlstra return;
1752a0e813f2SPeter Zijlstra
1753f95d4eaeSPeter Zijlstra /*
1754f95d4eaeSPeter Zijlstra * If prev task was rt, put_prev_task() has already updated the
1755f95d4eaeSPeter Zijlstra * utilization. We only care of the case where we start to schedule a
1756f95d4eaeSPeter Zijlstra * rt task
1757f95d4eaeSPeter Zijlstra */
1758f95d4eaeSPeter Zijlstra if (rq->curr->sched_class != &rt_sched_class)
1759f95d4eaeSPeter Zijlstra update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0);
1760f95d4eaeSPeter Zijlstra
1761f95d4eaeSPeter Zijlstra rt_queue_push_tasks(rq);
1762ff1cdc94SMuchun Song }
1763ff1cdc94SMuchun Song
pick_next_rt_entity(struct rt_rq * rt_rq)1764821aecd0SDietmar Eggemann static struct sched_rt_entity *pick_next_rt_entity(struct rt_rq *rt_rq)
1765391e43daSPeter Zijlstra {
1766391e43daSPeter Zijlstra struct rt_prio_array *array = &rt_rq->active;
1767391e43daSPeter Zijlstra struct sched_rt_entity *next = NULL;
1768391e43daSPeter Zijlstra struct list_head *queue;
1769391e43daSPeter Zijlstra int idx;
1770391e43daSPeter Zijlstra
1771391e43daSPeter Zijlstra idx = sched_find_first_bit(array->bitmap);
1772391e43daSPeter Zijlstra BUG_ON(idx >= MAX_RT_PRIO);
1773391e43daSPeter Zijlstra
1774391e43daSPeter Zijlstra queue = array->queue + idx;
17757c4a5b89SPietro Borrello if (SCHED_WARN_ON(list_empty(queue)))
17767c4a5b89SPietro Borrello return NULL;
1777391e43daSPeter Zijlstra next = list_entry(queue->next, struct sched_rt_entity, run_list);
1778391e43daSPeter Zijlstra
1779391e43daSPeter Zijlstra return next;
1780391e43daSPeter Zijlstra }
1781391e43daSPeter Zijlstra
_pick_next_task_rt(struct rq * rq)1782391e43daSPeter Zijlstra static struct task_struct *_pick_next_task_rt(struct rq *rq)
1783391e43daSPeter Zijlstra {
1784391e43daSPeter Zijlstra struct sched_rt_entity *rt_se;
1785606dba2eSPeter Zijlstra struct rt_rq *rt_rq = &rq->rt;
1786391e43daSPeter Zijlstra
1787391e43daSPeter Zijlstra do {
1788821aecd0SDietmar Eggemann rt_se = pick_next_rt_entity(rt_rq);
17897c4a5b89SPietro Borrello if (unlikely(!rt_se))
17907c4a5b89SPietro Borrello return NULL;
1791391e43daSPeter Zijlstra rt_rq = group_rt_rq(rt_se);
1792391e43daSPeter Zijlstra } while (rt_rq);
1793391e43daSPeter Zijlstra
1794ff1cdc94SMuchun Song return rt_task_of(rt_se);
1795391e43daSPeter Zijlstra }
1796391e43daSPeter Zijlstra
pick_task_rt(struct rq * rq)179721f56ffeSPeter Zijlstra static struct task_struct *pick_task_rt(struct rq *rq)
1798391e43daSPeter Zijlstra {
1799606dba2eSPeter Zijlstra struct task_struct *p;
1800606dba2eSPeter Zijlstra
18016e2df058SPeter Zijlstra if (!sched_rt_runnable(rq))
1802606dba2eSPeter Zijlstra return NULL;
1803606dba2eSPeter Zijlstra
1804606dba2eSPeter Zijlstra p = _pick_next_task_rt(rq);
180521f56ffeSPeter Zijlstra
180621f56ffeSPeter Zijlstra return p;
180721f56ffeSPeter Zijlstra }
180821f56ffeSPeter Zijlstra
pick_next_task_rt(struct rq * rq)180921f56ffeSPeter Zijlstra static struct task_struct *pick_next_task_rt(struct rq *rq)
181021f56ffeSPeter Zijlstra {
181121f56ffeSPeter Zijlstra struct task_struct *p = pick_task_rt(rq);
181221f56ffeSPeter Zijlstra
181321f56ffeSPeter Zijlstra if (p)
1814a0e813f2SPeter Zijlstra set_next_task_rt(rq, p, true);
181521f56ffeSPeter Zijlstra
1816391e43daSPeter Zijlstra return p;
1817391e43daSPeter Zijlstra }
1818391e43daSPeter Zijlstra
put_prev_task_rt(struct rq * rq,struct task_struct * p)18196e2df058SPeter Zijlstra static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1820391e43daSPeter Zijlstra {
182157a5c2daSYafang Shao struct sched_rt_entity *rt_se = &p->rt;
182257a5c2daSYafang Shao struct rt_rq *rt_rq = &rq->rt;
182357a5c2daSYafang Shao
182457a5c2daSYafang Shao if (on_rt_rq(&p->rt))
182557a5c2daSYafang Shao update_stats_wait_start_rt(rt_rq, rt_se);
182657a5c2daSYafang Shao
1827391e43daSPeter Zijlstra update_curr_rt(rq);
1828391e43daSPeter Zijlstra
182923127296SVincent Guittot update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1);
1830371bf427SVincent Guittot
1831391e43daSPeter Zijlstra /*
1832391e43daSPeter Zijlstra * The previous task needs to be made eligible for pushing
1833391e43daSPeter Zijlstra * if it is still active
1834391e43daSPeter Zijlstra */
18354b53a341SIngo Molnar if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
1836391e43daSPeter Zijlstra enqueue_pushable_task(rq, p);
1837391e43daSPeter Zijlstra }
1838391e43daSPeter Zijlstra
1839391e43daSPeter Zijlstra #ifdef CONFIG_SMP
1840391e43daSPeter Zijlstra
1841391e43daSPeter Zijlstra /* Only try algorithms three times */
1842391e43daSPeter Zijlstra #define RT_MAX_TRIES 3
1843391e43daSPeter Zijlstra
pick_rt_task(struct rq * rq,struct task_struct * p,int cpu)1844391e43daSPeter Zijlstra static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1845391e43daSPeter Zijlstra {
18460b9d46fcSPeter Zijlstra if (!task_on_cpu(rq, p) &&
184795158a89SPeter Zijlstra cpumask_test_cpu(cpu, &p->cpus_mask))
1848391e43daSPeter Zijlstra return 1;
184997fb7a0aSIngo Molnar
1850391e43daSPeter Zijlstra return 0;
1851391e43daSPeter Zijlstra }
1852391e43daSPeter Zijlstra
1853e23ee747SKirill Tkhai /*
1854e23ee747SKirill Tkhai * Return the highest pushable rq's task, which is suitable to be executed
185597fb7a0aSIngo Molnar * on the CPU, NULL otherwise
1856e23ee747SKirill Tkhai */
pick_highest_pushable_task(struct rq * rq,int cpu)1857e23ee747SKirill Tkhai static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
1858391e43daSPeter Zijlstra {
1859e23ee747SKirill Tkhai struct plist_head *head = &rq->rt.pushable_tasks;
1860391e43daSPeter Zijlstra struct task_struct *p;
1861391e43daSPeter Zijlstra
1862e23ee747SKirill Tkhai if (!has_pushable_tasks(rq))
1863e23ee747SKirill Tkhai return NULL;
1864391e43daSPeter Zijlstra
1865e23ee747SKirill Tkhai plist_for_each_entry(p, head, pushable_tasks) {
1866e23ee747SKirill Tkhai if (pick_rt_task(rq, p, cpu))
1867e23ee747SKirill Tkhai return p;
1868391e43daSPeter Zijlstra }
1869391e43daSPeter Zijlstra
1870e23ee747SKirill Tkhai return NULL;
1871391e43daSPeter Zijlstra }
1872391e43daSPeter Zijlstra
1873391e43daSPeter Zijlstra static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
1874391e43daSPeter Zijlstra
find_lowest_rq(struct task_struct * task)1875391e43daSPeter Zijlstra static int find_lowest_rq(struct task_struct *task)
1876391e43daSPeter Zijlstra {
1877391e43daSPeter Zijlstra struct sched_domain *sd;
18784ba29684SChristoph Lameter struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask);
1879391e43daSPeter Zijlstra int this_cpu = smp_processor_id();
1880391e43daSPeter Zijlstra int cpu = task_cpu(task);
1881a1bd02e1SQais Yousef int ret;
1882391e43daSPeter Zijlstra
1883391e43daSPeter Zijlstra /* Make sure the mask is initialized first */
1884391e43daSPeter Zijlstra if (unlikely(!lowest_mask))
1885391e43daSPeter Zijlstra return -1;
1886391e43daSPeter Zijlstra
18874b53a341SIngo Molnar if (task->nr_cpus_allowed == 1)
1888391e43daSPeter Zijlstra return -1; /* No other targets possible */
1889391e43daSPeter Zijlstra
1890a1bd02e1SQais Yousef /*
1891a1bd02e1SQais Yousef * If we're on asym system ensure we consider the different capacities
1892a1bd02e1SQais Yousef * of the CPUs when searching for the lowest_mask.
1893a1bd02e1SQais Yousef */
1894740cf8a7SDietmar Eggemann if (sched_asym_cpucap_active()) {
1895a1bd02e1SQais Yousef
1896a1bd02e1SQais Yousef ret = cpupri_find_fitness(&task_rq(task)->rd->cpupri,
1897a1bd02e1SQais Yousef task, lowest_mask,
1898a1bd02e1SQais Yousef rt_task_fits_capacity);
1899a1bd02e1SQais Yousef } else {
1900a1bd02e1SQais Yousef
1901a1bd02e1SQais Yousef ret = cpupri_find(&task_rq(task)->rd->cpupri,
1902a1bd02e1SQais Yousef task, lowest_mask);
1903a1bd02e1SQais Yousef }
1904a1bd02e1SQais Yousef
1905a1bd02e1SQais Yousef if (!ret)
1906391e43daSPeter Zijlstra return -1; /* No targets found */
1907391e43daSPeter Zijlstra
1908391e43daSPeter Zijlstra /*
190997fb7a0aSIngo Molnar * At this point we have built a mask of CPUs representing the
1910391e43daSPeter Zijlstra * lowest priority tasks in the system. Now we want to elect
1911391e43daSPeter Zijlstra * the best one based on our affinity and topology.
1912391e43daSPeter Zijlstra *
191397fb7a0aSIngo Molnar * We prioritize the last CPU that the task executed on since
1914391e43daSPeter Zijlstra * it is most likely cache-hot in that location.
1915391e43daSPeter Zijlstra */
1916391e43daSPeter Zijlstra if (cpumask_test_cpu(cpu, lowest_mask))
1917391e43daSPeter Zijlstra return cpu;
1918391e43daSPeter Zijlstra
1919391e43daSPeter Zijlstra /*
1920391e43daSPeter Zijlstra * Otherwise, we consult the sched_domains span maps to figure
192197fb7a0aSIngo Molnar * out which CPU is logically closest to our hot cache data.
1922391e43daSPeter Zijlstra */
1923391e43daSPeter Zijlstra if (!cpumask_test_cpu(this_cpu, lowest_mask))
1924391e43daSPeter Zijlstra this_cpu = -1; /* Skip this_cpu opt if not among lowest */
1925391e43daSPeter Zijlstra
1926391e43daSPeter Zijlstra rcu_read_lock();
1927391e43daSPeter Zijlstra for_each_domain(cpu, sd) {
1928391e43daSPeter Zijlstra if (sd->flags & SD_WAKE_AFFINE) {
1929391e43daSPeter Zijlstra int best_cpu;
1930391e43daSPeter Zijlstra
1931391e43daSPeter Zijlstra /*
1932391e43daSPeter Zijlstra * "this_cpu" is cheaper to preempt than a
1933391e43daSPeter Zijlstra * remote processor.
1934391e43daSPeter Zijlstra */
1935391e43daSPeter Zijlstra if (this_cpu != -1 &&
1936391e43daSPeter Zijlstra cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1937391e43daSPeter Zijlstra rcu_read_unlock();
1938391e43daSPeter Zijlstra return this_cpu;
1939391e43daSPeter Zijlstra }
1940391e43daSPeter Zijlstra
194114e292f8SPeter Zijlstra best_cpu = cpumask_any_and_distribute(lowest_mask,
1942391e43daSPeter Zijlstra sched_domain_span(sd));
1943391e43daSPeter Zijlstra if (best_cpu < nr_cpu_ids) {
1944391e43daSPeter Zijlstra rcu_read_unlock();
1945391e43daSPeter Zijlstra return best_cpu;
1946391e43daSPeter Zijlstra }
1947391e43daSPeter Zijlstra }
1948391e43daSPeter Zijlstra }
1949391e43daSPeter Zijlstra rcu_read_unlock();
1950391e43daSPeter Zijlstra
1951391e43daSPeter Zijlstra /*
1952391e43daSPeter Zijlstra * And finally, if there were no matches within the domains
1953391e43daSPeter Zijlstra * just give the caller *something* to work with from the compatible
1954391e43daSPeter Zijlstra * locations.
1955391e43daSPeter Zijlstra */
1956391e43daSPeter Zijlstra if (this_cpu != -1)
1957391e43daSPeter Zijlstra return this_cpu;
1958391e43daSPeter Zijlstra
195914e292f8SPeter Zijlstra cpu = cpumask_any_distribute(lowest_mask);
1960391e43daSPeter Zijlstra if (cpu < nr_cpu_ids)
1961391e43daSPeter Zijlstra return cpu;
196297fb7a0aSIngo Molnar
1963391e43daSPeter Zijlstra return -1;
1964391e43daSPeter Zijlstra }
1965391e43daSPeter Zijlstra
1966391e43daSPeter Zijlstra /* Will lock the rq it finds */
find_lock_lowest_rq(struct task_struct * task,struct rq * rq)1967391e43daSPeter Zijlstra static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1968391e43daSPeter Zijlstra {
1969391e43daSPeter Zijlstra struct rq *lowest_rq = NULL;
1970391e43daSPeter Zijlstra int tries;
1971391e43daSPeter Zijlstra int cpu;
1972391e43daSPeter Zijlstra
1973391e43daSPeter Zijlstra for (tries = 0; tries < RT_MAX_TRIES; tries++) {
1974391e43daSPeter Zijlstra cpu = find_lowest_rq(task);
1975391e43daSPeter Zijlstra
1976391e43daSPeter Zijlstra if ((cpu == -1) || (cpu == rq->cpu))
1977391e43daSPeter Zijlstra break;
1978391e43daSPeter Zijlstra
1979391e43daSPeter Zijlstra lowest_rq = cpu_rq(cpu);
1980391e43daSPeter Zijlstra
198180e3d87bSTim Chen if (lowest_rq->rt.highest_prio.curr <= task->prio) {
198280e3d87bSTim Chen /*
198380e3d87bSTim Chen * Target rq has tasks of equal or higher priority,
198480e3d87bSTim Chen * retrying does not release any lock and is unlikely
198580e3d87bSTim Chen * to yield a different result.
198680e3d87bSTim Chen */
198780e3d87bSTim Chen lowest_rq = NULL;
198880e3d87bSTim Chen break;
198980e3d87bSTim Chen }
199080e3d87bSTim Chen
1991391e43daSPeter Zijlstra /* if the prio of this runqueue changed, try again */
1992391e43daSPeter Zijlstra if (double_lock_balance(rq, lowest_rq)) {
1993391e43daSPeter Zijlstra /*
1994391e43daSPeter Zijlstra * We had to unlock the run queue. In
1995391e43daSPeter Zijlstra * the mean time, task could have
1996391e43daSPeter Zijlstra * migrated already or had its affinity changed.
1997391e43daSPeter Zijlstra * Also make sure that it wasn't scheduled on its rq.
1998feffe5bbSSchspa Shi * It is possible the task was scheduled, set
1999feffe5bbSSchspa Shi * "migrate_disabled" and then got preempted, so we must
2000feffe5bbSSchspa Shi * check the task migration disable flag here too.
2001391e43daSPeter Zijlstra */
2002391e43daSPeter Zijlstra if (unlikely(task_rq(task) != rq ||
200395158a89SPeter Zijlstra !cpumask_test_cpu(lowest_rq->cpu, &task->cpus_mask) ||
20040b9d46fcSPeter Zijlstra task_on_cpu(rq, task) ||
200513b5ab02SXunlei Pang !rt_task(task) ||
2006feffe5bbSSchspa Shi is_migration_disabled(task) ||
2007da0c1e65SKirill Tkhai !task_on_rq_queued(task))) {
2008391e43daSPeter Zijlstra
20097f1b4393SPeter Zijlstra double_unlock_balance(rq, lowest_rq);
2010391e43daSPeter Zijlstra lowest_rq = NULL;
2011391e43daSPeter Zijlstra break;
2012391e43daSPeter Zijlstra }
2013391e43daSPeter Zijlstra }
2014391e43daSPeter Zijlstra
2015391e43daSPeter Zijlstra /* If this rq is still suitable use it. */
2016391e43daSPeter Zijlstra if (lowest_rq->rt.highest_prio.curr > task->prio)
2017391e43daSPeter Zijlstra break;
2018391e43daSPeter Zijlstra
2019391e43daSPeter Zijlstra /* try again */
2020391e43daSPeter Zijlstra double_unlock_balance(rq, lowest_rq);
2021391e43daSPeter Zijlstra lowest_rq = NULL;
2022391e43daSPeter Zijlstra }
2023391e43daSPeter Zijlstra
2024391e43daSPeter Zijlstra return lowest_rq;
2025391e43daSPeter Zijlstra }
2026391e43daSPeter Zijlstra
pick_next_pushable_task(struct rq * rq)2027391e43daSPeter Zijlstra static struct task_struct *pick_next_pushable_task(struct rq *rq)
2028391e43daSPeter Zijlstra {
2029391e43daSPeter Zijlstra struct task_struct *p;
2030391e43daSPeter Zijlstra
2031391e43daSPeter Zijlstra if (!has_pushable_tasks(rq))
2032391e43daSPeter Zijlstra return NULL;
2033391e43daSPeter Zijlstra
2034391e43daSPeter Zijlstra p = plist_first_entry(&rq->rt.pushable_tasks,
2035391e43daSPeter Zijlstra struct task_struct, pushable_tasks);
2036391e43daSPeter Zijlstra
2037391e43daSPeter Zijlstra BUG_ON(rq->cpu != task_cpu(p));
2038391e43daSPeter Zijlstra BUG_ON(task_current(rq, p));
20394b53a341SIngo Molnar BUG_ON(p->nr_cpus_allowed <= 1);
2040391e43daSPeter Zijlstra
2041da0c1e65SKirill Tkhai BUG_ON(!task_on_rq_queued(p));
2042391e43daSPeter Zijlstra BUG_ON(!rt_task(p));
2043391e43daSPeter Zijlstra
2044391e43daSPeter Zijlstra return p;
2045391e43daSPeter Zijlstra }
2046391e43daSPeter Zijlstra
2047391e43daSPeter Zijlstra /*
2048391e43daSPeter Zijlstra * If the current CPU has more than one RT task, see if the non
2049391e43daSPeter Zijlstra * running task can migrate over to a CPU that is running a task
2050391e43daSPeter Zijlstra * of lesser priority.
2051391e43daSPeter Zijlstra */
push_rt_task(struct rq * rq,bool pull)2052a7c81556SPeter Zijlstra static int push_rt_task(struct rq *rq, bool pull)
2053391e43daSPeter Zijlstra {
2054391e43daSPeter Zijlstra struct task_struct *next_task;
2055391e43daSPeter Zijlstra struct rq *lowest_rq;
2056391e43daSPeter Zijlstra int ret = 0;
2057391e43daSPeter Zijlstra
2058391e43daSPeter Zijlstra if (!rq->rt.overloaded)
2059391e43daSPeter Zijlstra return 0;
2060391e43daSPeter Zijlstra
2061391e43daSPeter Zijlstra next_task = pick_next_pushable_task(rq);
2062391e43daSPeter Zijlstra if (!next_task)
2063391e43daSPeter Zijlstra return 0;
2064391e43daSPeter Zijlstra
2065391e43daSPeter Zijlstra retry:
206649bef33eSValentin Schneider /*
206749bef33eSValentin Schneider * It's possible that the next_task slipped in of
206849bef33eSValentin Schneider * higher priority than current. If that's the case
206949bef33eSValentin Schneider * just reschedule current.
207049bef33eSValentin Schneider */
207149bef33eSValentin Schneider if (unlikely(next_task->prio < rq->curr->prio)) {
207249bef33eSValentin Schneider resched_curr(rq);
207349bef33eSValentin Schneider return 0;
207449bef33eSValentin Schneider }
207549bef33eSValentin Schneider
2076a7c81556SPeter Zijlstra if (is_migration_disabled(next_task)) {
2077a7c81556SPeter Zijlstra struct task_struct *push_task = NULL;
2078a7c81556SPeter Zijlstra int cpu;
2079a7c81556SPeter Zijlstra
2080a7c81556SPeter Zijlstra if (!pull || rq->push_busy)
2081a7c81556SPeter Zijlstra return 0;
2082a7c81556SPeter Zijlstra
208349bef33eSValentin Schneider /*
208449bef33eSValentin Schneider * Invoking find_lowest_rq() on anything but an RT task doesn't
208549bef33eSValentin Schneider * make sense. Per the above priority check, curr has to
208649bef33eSValentin Schneider * be of higher priority than next_task, so no need to
208749bef33eSValentin Schneider * reschedule when bailing out.
208849bef33eSValentin Schneider *
208949bef33eSValentin Schneider * Note that the stoppers are masqueraded as SCHED_FIFO
209049bef33eSValentin Schneider * (cf. sched_set_stop_task()), so we can't rely on rt_task().
209149bef33eSValentin Schneider */
209249bef33eSValentin Schneider if (rq->curr->sched_class != &rt_sched_class)
209349bef33eSValentin Schneider return 0;
209449bef33eSValentin Schneider
2095a7c81556SPeter Zijlstra cpu = find_lowest_rq(rq->curr);
2096a7c81556SPeter Zijlstra if (cpu == -1 || cpu == rq->cpu)
2097a7c81556SPeter Zijlstra return 0;
2098a7c81556SPeter Zijlstra
2099a7c81556SPeter Zijlstra /*
2100a7c81556SPeter Zijlstra * Given we found a CPU with lower priority than @next_task,
2101a7c81556SPeter Zijlstra * therefore it should be running. However we cannot migrate it
2102a7c81556SPeter Zijlstra * to this other CPU, instead attempt to push the current
2103a7c81556SPeter Zijlstra * running task on this CPU away.
2104a7c81556SPeter Zijlstra */
2105a7c81556SPeter Zijlstra push_task = get_push_task(rq);
2106a7c81556SPeter Zijlstra if (push_task) {
2107d03b4817SPeter Zijlstra preempt_disable();
21085cb9eaa3SPeter Zijlstra raw_spin_rq_unlock(rq);
2109a7c81556SPeter Zijlstra stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
2110a7c81556SPeter Zijlstra push_task, &rq->push_work);
2111d03b4817SPeter Zijlstra preempt_enable();
21125cb9eaa3SPeter Zijlstra raw_spin_rq_lock(rq);
2113a7c81556SPeter Zijlstra }
2114a7c81556SPeter Zijlstra
2115a7c81556SPeter Zijlstra return 0;
2116a7c81556SPeter Zijlstra }
2117a7c81556SPeter Zijlstra
21189ebc6053SYangtao Li if (WARN_ON(next_task == rq->curr))
2119391e43daSPeter Zijlstra return 0;
2120391e43daSPeter Zijlstra
2121391e43daSPeter Zijlstra /* We might release rq lock */
2122391e43daSPeter Zijlstra get_task_struct(next_task);
2123391e43daSPeter Zijlstra
2124391e43daSPeter Zijlstra /* find_lock_lowest_rq locks the rq if found */
2125391e43daSPeter Zijlstra lowest_rq = find_lock_lowest_rq(next_task, rq);
2126391e43daSPeter Zijlstra if (!lowest_rq) {
2127391e43daSPeter Zijlstra struct task_struct *task;
2128391e43daSPeter Zijlstra /*
2129391e43daSPeter Zijlstra * find_lock_lowest_rq releases rq->lock
2130391e43daSPeter Zijlstra * so it is possible that next_task has migrated.
2131391e43daSPeter Zijlstra *
2132391e43daSPeter Zijlstra * We need to make sure that the task is still on the same
2133391e43daSPeter Zijlstra * run-queue and is also still the next task eligible for
2134391e43daSPeter Zijlstra * pushing.
2135391e43daSPeter Zijlstra */
2136391e43daSPeter Zijlstra task = pick_next_pushable_task(rq);
2137de16b91eSByungchul Park if (task == next_task) {
2138391e43daSPeter Zijlstra /*
2139391e43daSPeter Zijlstra * The task hasn't migrated, and is still the next
2140391e43daSPeter Zijlstra * eligible task, but we failed to find a run-queue
2141391e43daSPeter Zijlstra * to push it to. Do not retry in this case, since
214297fb7a0aSIngo Molnar * other CPUs will pull from us when ready.
2143391e43daSPeter Zijlstra */
2144391e43daSPeter Zijlstra goto out;
2145391e43daSPeter Zijlstra }
2146391e43daSPeter Zijlstra
2147391e43daSPeter Zijlstra if (!task)
2148391e43daSPeter Zijlstra /* No more tasks, just exit */
2149391e43daSPeter Zijlstra goto out;
2150391e43daSPeter Zijlstra
2151391e43daSPeter Zijlstra /*
2152391e43daSPeter Zijlstra * Something has shifted, try again.
2153391e43daSPeter Zijlstra */
2154391e43daSPeter Zijlstra put_task_struct(next_task);
2155391e43daSPeter Zijlstra next_task = task;
2156391e43daSPeter Zijlstra goto retry;
2157391e43daSPeter Zijlstra }
2158391e43daSPeter Zijlstra
2159391e43daSPeter Zijlstra deactivate_task(rq, next_task, 0);
2160391e43daSPeter Zijlstra set_task_cpu(next_task, lowest_rq->cpu);
2161391e43daSPeter Zijlstra activate_task(lowest_rq, next_task, 0);
2162a7c81556SPeter Zijlstra resched_curr(lowest_rq);
2163391e43daSPeter Zijlstra ret = 1;
2164391e43daSPeter Zijlstra
2165391e43daSPeter Zijlstra double_unlock_balance(rq, lowest_rq);
2166391e43daSPeter Zijlstra out:
2167391e43daSPeter Zijlstra put_task_struct(next_task);
2168391e43daSPeter Zijlstra
2169391e43daSPeter Zijlstra return ret;
2170391e43daSPeter Zijlstra }
2171391e43daSPeter Zijlstra
push_rt_tasks(struct rq * rq)2172391e43daSPeter Zijlstra static void push_rt_tasks(struct rq *rq)
2173391e43daSPeter Zijlstra {
2174391e43daSPeter Zijlstra /* push_rt_task will return true if it moved an RT */
2175a7c81556SPeter Zijlstra while (push_rt_task(rq, false))
2176391e43daSPeter Zijlstra ;
2177391e43daSPeter Zijlstra }
2178391e43daSPeter Zijlstra
2179b6366f04SSteven Rostedt #ifdef HAVE_RT_PUSH_IPI
2180b6366f04SSteven Rostedt
21813e777f99SSteven Rostedt (VMware) /*
21823e777f99SSteven Rostedt (VMware) * When a high priority task schedules out from a CPU and a lower priority
21833e777f99SSteven Rostedt (VMware) * task is scheduled in, a check is made to see if there's any RT tasks
21843e777f99SSteven Rostedt (VMware) * on other CPUs that are waiting to run because a higher priority RT task
21853e777f99SSteven Rostedt (VMware) * is currently running on its CPU. In this case, the CPU with multiple RT
21863e777f99SSteven Rostedt (VMware) * tasks queued on it (overloaded) needs to be notified that a CPU has opened
21873e777f99SSteven Rostedt (VMware) * up that may be able to run one of its non-running queued RT tasks.
21883e777f99SSteven Rostedt (VMware) *
21894bdced5cSSteven Rostedt (Red Hat) * All CPUs with overloaded RT tasks need to be notified as there is currently
21904bdced5cSSteven Rostedt (Red Hat) * no way to know which of these CPUs have the highest priority task waiting
21914bdced5cSSteven Rostedt (Red Hat) * to run. Instead of trying to take a spinlock on each of these CPUs,
21924bdced5cSSteven Rostedt (Red Hat) * which has shown to cause large latency when done on machines with many
21934bdced5cSSteven Rostedt (Red Hat) * CPUs, sending an IPI to the CPUs to have them push off the overloaded
21944bdced5cSSteven Rostedt (Red Hat) * RT tasks waiting to run.
21953e777f99SSteven Rostedt (VMware) *
21964bdced5cSSteven Rostedt (Red Hat) * Just sending an IPI to each of the CPUs is also an issue, as on large
21974bdced5cSSteven Rostedt (Red Hat) * count CPU machines, this can cause an IPI storm on a CPU, especially
21984bdced5cSSteven Rostedt (Red Hat) * if its the only CPU with multiple RT tasks queued, and a large number
21994bdced5cSSteven Rostedt (Red Hat) * of CPUs scheduling a lower priority task at the same time.
22003e777f99SSteven Rostedt (VMware) *
22014bdced5cSSteven Rostedt (Red Hat) * Each root domain has its own irq work function that can iterate over
22024bdced5cSSteven Rostedt (Red Hat) * all CPUs with RT overloaded tasks. Since all CPUs with overloaded RT
22033b03706fSIngo Molnar * task must be checked if there's one or many CPUs that are lowering
22044bdced5cSSteven Rostedt (Red Hat) * their priority, there's a single irq work iterator that will try to
22054bdced5cSSteven Rostedt (Red Hat) * push off RT tasks that are waiting to run.
22063e777f99SSteven Rostedt (VMware) *
22074bdced5cSSteven Rostedt (Red Hat) * When a CPU schedules a lower priority task, it will kick off the
22084bdced5cSSteven Rostedt (Red Hat) * irq work iterator that will jump to each CPU with overloaded RT tasks.
22094bdced5cSSteven Rostedt (Red Hat) * As it only takes the first CPU that schedules a lower priority task
22104bdced5cSSteven Rostedt (Red Hat) * to start the process, the rto_start variable is incremented and if
22114bdced5cSSteven Rostedt (Red Hat) * the atomic result is one, then that CPU will try to take the rto_lock.
22124bdced5cSSteven Rostedt (Red Hat) * This prevents high contention on the lock as the process handles all
22134bdced5cSSteven Rostedt (Red Hat) * CPUs scheduling lower priority tasks.
22143e777f99SSteven Rostedt (VMware) *
22154bdced5cSSteven Rostedt (Red Hat) * All CPUs that are scheduling a lower priority task will increment the
22164bdced5cSSteven Rostedt (Red Hat) * rt_loop_next variable. This will make sure that the irq work iterator
22174bdced5cSSteven Rostedt (Red Hat) * checks all RT overloaded CPUs whenever a CPU schedules a new lower
22184bdced5cSSteven Rostedt (Red Hat) * priority task, even if the iterator is in the middle of a scan. Incrementing
22194bdced5cSSteven Rostedt (Red Hat) * the rt_loop_next will cause the iterator to perform another scan.
22203e777f99SSteven Rostedt (VMware) *
22213e777f99SSteven Rostedt (VMware) */
rto_next_cpu(struct root_domain * rd)2222ad0f1d9dSSteven Rostedt (VMware) static int rto_next_cpu(struct root_domain *rd)
2223b6366f04SSteven Rostedt {
22244bdced5cSSteven Rostedt (Red Hat) int next;
2225b6366f04SSteven Rostedt int cpu;
2226b6366f04SSteven Rostedt
2227b6366f04SSteven Rostedt /*
22284bdced5cSSteven Rostedt (Red Hat) * When starting the IPI RT pushing, the rto_cpu is set to -1,
22294bdced5cSSteven Rostedt (Red Hat) * rt_next_cpu() will simply return the first CPU found in
22304bdced5cSSteven Rostedt (Red Hat) * the rto_mask.
22314bdced5cSSteven Rostedt (Red Hat) *
223297fb7a0aSIngo Molnar * If rto_next_cpu() is called with rto_cpu is a valid CPU, it
22334bdced5cSSteven Rostedt (Red Hat) * will return the next CPU found in the rto_mask.
22344bdced5cSSteven Rostedt (Red Hat) *
22354bdced5cSSteven Rostedt (Red Hat) * If there are no more CPUs left in the rto_mask, then a check is made
22364bdced5cSSteven Rostedt (Red Hat) * against rto_loop and rto_loop_next. rto_loop is only updated with
22374bdced5cSSteven Rostedt (Red Hat) * the rto_lock held, but any CPU may increment the rto_loop_next
22384bdced5cSSteven Rostedt (Red Hat) * without any locking.
2239b6366f04SSteven Rostedt */
22404bdced5cSSteven Rostedt (Red Hat) for (;;) {
22414bdced5cSSteven Rostedt (Red Hat)
22424bdced5cSSteven Rostedt (Red Hat) /* When rto_cpu is -1 this acts like cpumask_first() */
22434bdced5cSSteven Rostedt (Red Hat) cpu = cpumask_next(rd->rto_cpu, rd->rto_mask);
22444bdced5cSSteven Rostedt (Red Hat)
22454bdced5cSSteven Rostedt (Red Hat) rd->rto_cpu = cpu;
22464bdced5cSSteven Rostedt (Red Hat)
22474bdced5cSSteven Rostedt (Red Hat) if (cpu < nr_cpu_ids)
22484bdced5cSSteven Rostedt (Red Hat) return cpu;
22494bdced5cSSteven Rostedt (Red Hat)
22504bdced5cSSteven Rostedt (Red Hat) rd->rto_cpu = -1;
22514bdced5cSSteven Rostedt (Red Hat)
22524bdced5cSSteven Rostedt (Red Hat) /*
22534bdced5cSSteven Rostedt (Red Hat) * ACQUIRE ensures we see the @rto_mask changes
22544bdced5cSSteven Rostedt (Red Hat) * made prior to the @next value observed.
22554bdced5cSSteven Rostedt (Red Hat) *
22564bdced5cSSteven Rostedt (Red Hat) * Matches WMB in rt_set_overload().
22574bdced5cSSteven Rostedt (Red Hat) */
22584bdced5cSSteven Rostedt (Red Hat) next = atomic_read_acquire(&rd->rto_loop_next);
22594bdced5cSSteven Rostedt (Red Hat)
22604bdced5cSSteven Rostedt (Red Hat) if (rd->rto_loop == next)
22614bdced5cSSteven Rostedt (Red Hat) break;
22624bdced5cSSteven Rostedt (Red Hat)
22634bdced5cSSteven Rostedt (Red Hat) rd->rto_loop = next;
2264b6366f04SSteven Rostedt }
2265b6366f04SSteven Rostedt
22664bdced5cSSteven Rostedt (Red Hat) return -1;
22674bdced5cSSteven Rostedt (Red Hat) }
2268b6366f04SSteven Rostedt
rto_start_trylock(atomic_t * v)22694bdced5cSSteven Rostedt (Red Hat) static inline bool rto_start_trylock(atomic_t *v)
22704bdced5cSSteven Rostedt (Red Hat) {
22714bdced5cSSteven Rostedt (Red Hat) return !atomic_cmpxchg_acquire(v, 0, 1);
22724bdced5cSSteven Rostedt (Red Hat) }
22734bdced5cSSteven Rostedt (Red Hat)
rto_start_unlock(atomic_t * v)22744bdced5cSSteven Rostedt (Red Hat) static inline void rto_start_unlock(atomic_t *v)
22754bdced5cSSteven Rostedt (Red Hat) {
22764bdced5cSSteven Rostedt (Red Hat) atomic_set_release(v, 0);
22774bdced5cSSteven Rostedt (Red Hat) }
22784bdced5cSSteven Rostedt (Red Hat)
tell_cpu_to_push(struct rq * rq)22794bdced5cSSteven Rostedt (Red Hat) static void tell_cpu_to_push(struct rq *rq)
22804bdced5cSSteven Rostedt (Red Hat) {
22814bdced5cSSteven Rostedt (Red Hat) int cpu = -1;
22824bdced5cSSteven Rostedt (Red Hat)
22834bdced5cSSteven Rostedt (Red Hat) /* Keep the loop going if the IPI is currently active */
22844bdced5cSSteven Rostedt (Red Hat) atomic_inc(&rq->rd->rto_loop_next);
22854bdced5cSSteven Rostedt (Red Hat)
22864bdced5cSSteven Rostedt (Red Hat) /* Only one CPU can initiate a loop at a time */
22874bdced5cSSteven Rostedt (Red Hat) if (!rto_start_trylock(&rq->rd->rto_loop_start))
2288b6366f04SSteven Rostedt return;
2289b6366f04SSteven Rostedt
22904bdced5cSSteven Rostedt (Red Hat) raw_spin_lock(&rq->rd->rto_lock);
2291b6366f04SSteven Rostedt
22924bdced5cSSteven Rostedt (Red Hat) /*
229397fb7a0aSIngo Molnar * The rto_cpu is updated under the lock, if it has a valid CPU
22944bdced5cSSteven Rostedt (Red Hat) * then the IPI is still running and will continue due to the
22954bdced5cSSteven Rostedt (Red Hat) * update to loop_next, and nothing needs to be done here.
22964bdced5cSSteven Rostedt (Red Hat) * Otherwise it is finishing up and an ipi needs to be sent.
22974bdced5cSSteven Rostedt (Red Hat) */
22984bdced5cSSteven Rostedt (Red Hat) if (rq->rd->rto_cpu < 0)
2299ad0f1d9dSSteven Rostedt (VMware) cpu = rto_next_cpu(rq->rd);
23004bdced5cSSteven Rostedt (Red Hat)
23014bdced5cSSteven Rostedt (Red Hat) raw_spin_unlock(&rq->rd->rto_lock);
23024bdced5cSSteven Rostedt (Red Hat)
23034bdced5cSSteven Rostedt (Red Hat) rto_start_unlock(&rq->rd->rto_loop_start);
23044bdced5cSSteven Rostedt (Red Hat)
2305364f5665SSteven Rostedt (VMware) if (cpu >= 0) {
2306364f5665SSteven Rostedt (VMware) /* Make sure the rd does not get freed while pushing */
2307364f5665SSteven Rostedt (VMware) sched_get_rd(rq->rd);
23084bdced5cSSteven Rostedt (Red Hat) irq_work_queue_on(&rq->rd->rto_push_work, cpu);
2309b6366f04SSteven Rostedt }
2310364f5665SSteven Rostedt (VMware) }
2311b6366f04SSteven Rostedt
2312b6366f04SSteven Rostedt /* Called from hardirq context */
rto_push_irq_work_func(struct irq_work * work)23134bdced5cSSteven Rostedt (Red Hat) void rto_push_irq_work_func(struct irq_work *work)
2314b6366f04SSteven Rostedt {
2315ad0f1d9dSSteven Rostedt (VMware) struct root_domain *rd =
2316ad0f1d9dSSteven Rostedt (VMware) container_of(work, struct root_domain, rto_push_work);
23174bdced5cSSteven Rostedt (Red Hat) struct rq *rq;
2318b6366f04SSteven Rostedt int cpu;
2319b6366f04SSteven Rostedt
23204bdced5cSSteven Rostedt (Red Hat) rq = this_rq();
2321b6366f04SSteven Rostedt
23224bdced5cSSteven Rostedt (Red Hat) /*
23234bdced5cSSteven Rostedt (Red Hat) * We do not need to grab the lock to check for has_pushable_tasks.
23244bdced5cSSteven Rostedt (Red Hat) * When it gets updated, a check is made if a push is possible.
23254bdced5cSSteven Rostedt (Red Hat) */
2326b6366f04SSteven Rostedt if (has_pushable_tasks(rq)) {
23275cb9eaa3SPeter Zijlstra raw_spin_rq_lock(rq);
2328a7c81556SPeter Zijlstra while (push_rt_task(rq, true))
2329a7c81556SPeter Zijlstra ;
23305cb9eaa3SPeter Zijlstra raw_spin_rq_unlock(rq);
2331b6366f04SSteven Rostedt }
2332b6366f04SSteven Rostedt
2333ad0f1d9dSSteven Rostedt (VMware) raw_spin_lock(&rd->rto_lock);
23344bdced5cSSteven Rostedt (Red Hat)
2335b6366f04SSteven Rostedt /* Pass the IPI to the next rt overloaded queue */
2336ad0f1d9dSSteven Rostedt (VMware) cpu = rto_next_cpu(rd);
2337b6366f04SSteven Rostedt
2338ad0f1d9dSSteven Rostedt (VMware) raw_spin_unlock(&rd->rto_lock);
2339b6366f04SSteven Rostedt
2340364f5665SSteven Rostedt (VMware) if (cpu < 0) {
2341364f5665SSteven Rostedt (VMware) sched_put_rd(rd);
2342b6366f04SSteven Rostedt return;
2343364f5665SSteven Rostedt (VMware) }
2344b6366f04SSteven Rostedt
2345b6366f04SSteven Rostedt /* Try the next RT overloaded CPU */
2346ad0f1d9dSSteven Rostedt (VMware) irq_work_queue_on(&rd->rto_push_work, cpu);
2347b6366f04SSteven Rostedt }
2348b6366f04SSteven Rostedt #endif /* HAVE_RT_PUSH_IPI */
2349b6366f04SSteven Rostedt
pull_rt_task(struct rq * this_rq)23508046d680SPeter Zijlstra static void pull_rt_task(struct rq *this_rq)
2351391e43daSPeter Zijlstra {
23528046d680SPeter Zijlstra int this_cpu = this_rq->cpu, cpu;
23538046d680SPeter Zijlstra bool resched = false;
2354a7c81556SPeter Zijlstra struct task_struct *p, *push_task;
2355391e43daSPeter Zijlstra struct rq *src_rq;
2356f73c52a5SSteven Rostedt int rt_overload_count = rt_overloaded(this_rq);
2357391e43daSPeter Zijlstra
2358f73c52a5SSteven Rostedt if (likely(!rt_overload_count))
23598046d680SPeter Zijlstra return;
2360391e43daSPeter Zijlstra
23617c3f2ab7SPeter Zijlstra /*
23627c3f2ab7SPeter Zijlstra * Match the barrier from rt_set_overloaded; this guarantees that if we
23637c3f2ab7SPeter Zijlstra * see overloaded we must also see the rto_mask bit.
23647c3f2ab7SPeter Zijlstra */
23657c3f2ab7SPeter Zijlstra smp_rmb();
23667c3f2ab7SPeter Zijlstra
2367f73c52a5SSteven Rostedt /* If we are the only overloaded CPU do nothing */
2368f73c52a5SSteven Rostedt if (rt_overload_count == 1 &&
2369f73c52a5SSteven Rostedt cpumask_test_cpu(this_rq->cpu, this_rq->rd->rto_mask))
2370f73c52a5SSteven Rostedt return;
2371f73c52a5SSteven Rostedt
2372b6366f04SSteven Rostedt #ifdef HAVE_RT_PUSH_IPI
2373b6366f04SSteven Rostedt if (sched_feat(RT_PUSH_IPI)) {
2374b6366f04SSteven Rostedt tell_cpu_to_push(this_rq);
23758046d680SPeter Zijlstra return;
2376b6366f04SSteven Rostedt }
2377b6366f04SSteven Rostedt #endif
2378b6366f04SSteven Rostedt
2379391e43daSPeter Zijlstra for_each_cpu(cpu, this_rq->rd->rto_mask) {
2380391e43daSPeter Zijlstra if (this_cpu == cpu)
2381391e43daSPeter Zijlstra continue;
2382391e43daSPeter Zijlstra
2383391e43daSPeter Zijlstra src_rq = cpu_rq(cpu);
2384391e43daSPeter Zijlstra
2385391e43daSPeter Zijlstra /*
2386391e43daSPeter Zijlstra * Don't bother taking the src_rq->lock if the next highest
2387391e43daSPeter Zijlstra * task is known to be lower-priority than our current task.
2388391e43daSPeter Zijlstra * This may look racy, but if this value is about to go
2389391e43daSPeter Zijlstra * logically higher, the src_rq will push this task away.
2390391e43daSPeter Zijlstra * And if its going logically lower, we do not care
2391391e43daSPeter Zijlstra */
2392391e43daSPeter Zijlstra if (src_rq->rt.highest_prio.next >=
2393391e43daSPeter Zijlstra this_rq->rt.highest_prio.curr)
2394391e43daSPeter Zijlstra continue;
2395391e43daSPeter Zijlstra
2396391e43daSPeter Zijlstra /*
2397391e43daSPeter Zijlstra * We can potentially drop this_rq's lock in
2398391e43daSPeter Zijlstra * double_lock_balance, and another CPU could
2399391e43daSPeter Zijlstra * alter this_rq
2400391e43daSPeter Zijlstra */
2401a7c81556SPeter Zijlstra push_task = NULL;
2402391e43daSPeter Zijlstra double_lock_balance(this_rq, src_rq);
2403391e43daSPeter Zijlstra
2404391e43daSPeter Zijlstra /*
2405e23ee747SKirill Tkhai * We can pull only a task, which is pushable
2406e23ee747SKirill Tkhai * on its rq, and no others.
2407391e43daSPeter Zijlstra */
2408e23ee747SKirill Tkhai p = pick_highest_pushable_task(src_rq, this_cpu);
2409391e43daSPeter Zijlstra
2410391e43daSPeter Zijlstra /*
2411391e43daSPeter Zijlstra * Do we have an RT task that preempts
2412391e43daSPeter Zijlstra * the to-be-scheduled task?
2413391e43daSPeter Zijlstra */
2414391e43daSPeter Zijlstra if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
2415391e43daSPeter Zijlstra WARN_ON(p == src_rq->curr);
2416da0c1e65SKirill Tkhai WARN_ON(!task_on_rq_queued(p));
2417391e43daSPeter Zijlstra
2418391e43daSPeter Zijlstra /*
2419391e43daSPeter Zijlstra * There's a chance that p is higher in priority
242097fb7a0aSIngo Molnar * than what's currently running on its CPU.
24213b03706fSIngo Molnar * This is just that p is waking up and hasn't
2422391e43daSPeter Zijlstra * had a chance to schedule. We only pull
2423391e43daSPeter Zijlstra * p if it is lower in priority than the
2424391e43daSPeter Zijlstra * current task on the run queue
2425391e43daSPeter Zijlstra */
2426391e43daSPeter Zijlstra if (p->prio < src_rq->curr->prio)
2427391e43daSPeter Zijlstra goto skip;
2428391e43daSPeter Zijlstra
2429a7c81556SPeter Zijlstra if (is_migration_disabled(p)) {
2430a7c81556SPeter Zijlstra push_task = get_push_task(src_rq);
2431a7c81556SPeter Zijlstra } else {
2432391e43daSPeter Zijlstra deactivate_task(src_rq, p, 0);
2433391e43daSPeter Zijlstra set_task_cpu(p, this_cpu);
2434391e43daSPeter Zijlstra activate_task(this_rq, p, 0);
2435a7c81556SPeter Zijlstra resched = true;
2436a7c81556SPeter Zijlstra }
2437391e43daSPeter Zijlstra /*
2438391e43daSPeter Zijlstra * We continue with the search, just in
2439391e43daSPeter Zijlstra * case there's an even higher prio task
2440391e43daSPeter Zijlstra * in another runqueue. (low likelihood
2441391e43daSPeter Zijlstra * but possible)
2442391e43daSPeter Zijlstra */
2443391e43daSPeter Zijlstra }
2444391e43daSPeter Zijlstra skip:
2445391e43daSPeter Zijlstra double_unlock_balance(this_rq, src_rq);
2446a7c81556SPeter Zijlstra
2447a7c81556SPeter Zijlstra if (push_task) {
2448d03b4817SPeter Zijlstra preempt_disable();
24495cb9eaa3SPeter Zijlstra raw_spin_rq_unlock(this_rq);
2450a7c81556SPeter Zijlstra stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop,
2451a7c81556SPeter Zijlstra push_task, &src_rq->push_work);
2452d03b4817SPeter Zijlstra preempt_enable();
24535cb9eaa3SPeter Zijlstra raw_spin_rq_lock(this_rq);
2454a7c81556SPeter Zijlstra }
2455391e43daSPeter Zijlstra }
2456391e43daSPeter Zijlstra
24578046d680SPeter Zijlstra if (resched)
24588046d680SPeter Zijlstra resched_curr(this_rq);
2459391e43daSPeter Zijlstra }
2460391e43daSPeter Zijlstra
2461391e43daSPeter Zijlstra /*
2462391e43daSPeter Zijlstra * If we are not running and we are not going to reschedule soon, we should
2463391e43daSPeter Zijlstra * try to push tasks away now
2464391e43daSPeter Zijlstra */
task_woken_rt(struct rq * rq,struct task_struct * p)2465391e43daSPeter Zijlstra static void task_woken_rt(struct rq *rq, struct task_struct *p)
2466391e43daSPeter Zijlstra {
24670b9d46fcSPeter Zijlstra bool need_to_push = !task_on_cpu(rq, p) &&
2468391e43daSPeter Zijlstra !test_tsk_need_resched(rq->curr) &&
24694b53a341SIngo Molnar p->nr_cpus_allowed > 1 &&
24701baca4ceSJuri Lelli (dl_task(rq->curr) || rt_task(rq->curr)) &&
24714b53a341SIngo Molnar (rq->curr->nr_cpus_allowed < 2 ||
2472804d402fSQais Yousef rq->curr->prio <= p->prio);
2473804d402fSQais Yousef
2474d94a9df4SQais Yousef if (need_to_push)
2475391e43daSPeter Zijlstra push_rt_tasks(rq);
2476391e43daSPeter Zijlstra }
2477391e43daSPeter Zijlstra
2478391e43daSPeter Zijlstra /* Assumes rq->lock is held */
rq_online_rt(struct rq * rq)2479391e43daSPeter Zijlstra static void rq_online_rt(struct rq *rq)
2480391e43daSPeter Zijlstra {
2481391e43daSPeter Zijlstra if (rq->rt.overloaded)
2482391e43daSPeter Zijlstra rt_set_overload(rq);
2483391e43daSPeter Zijlstra
2484391e43daSPeter Zijlstra __enable_runtime(rq);
2485391e43daSPeter Zijlstra
2486391e43daSPeter Zijlstra cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
2487391e43daSPeter Zijlstra }
2488391e43daSPeter Zijlstra
2489391e43daSPeter Zijlstra /* Assumes rq->lock is held */
rq_offline_rt(struct rq * rq)2490391e43daSPeter Zijlstra static void rq_offline_rt(struct rq *rq)
2491391e43daSPeter Zijlstra {
2492391e43daSPeter Zijlstra if (rq->rt.overloaded)
2493391e43daSPeter Zijlstra rt_clear_overload(rq);
2494391e43daSPeter Zijlstra
2495391e43daSPeter Zijlstra __disable_runtime(rq);
2496391e43daSPeter Zijlstra
2497391e43daSPeter Zijlstra cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
2498391e43daSPeter Zijlstra }
2499391e43daSPeter Zijlstra
2500391e43daSPeter Zijlstra /*
2501391e43daSPeter Zijlstra * When switch from the rt queue, we bring ourselves to a position
2502391e43daSPeter Zijlstra * that we might want to pull RT tasks from other runqueues.
2503391e43daSPeter Zijlstra */
switched_from_rt(struct rq * rq,struct task_struct * p)2504391e43daSPeter Zijlstra static void switched_from_rt(struct rq *rq, struct task_struct *p)
2505391e43daSPeter Zijlstra {
2506391e43daSPeter Zijlstra /*
2507391e43daSPeter Zijlstra * If there are other RT tasks then we will reschedule
2508391e43daSPeter Zijlstra * and the scheduling of the other RT tasks will handle
2509391e43daSPeter Zijlstra * the balancing. But if we are the last RT task
2510391e43daSPeter Zijlstra * we may need to handle the pulling of RT tasks
2511391e43daSPeter Zijlstra * now.
2512391e43daSPeter Zijlstra */
2513da0c1e65SKirill Tkhai if (!task_on_rq_queued(p) || rq->rt.rt_nr_running)
25141158ddb5SKirill Tkhai return;
25151158ddb5SKirill Tkhai
251602d8ec94SIngo Molnar rt_queue_pull_task(rq);
2517391e43daSPeter Zijlstra }
2518391e43daSPeter Zijlstra
init_sched_rt_class(void)251911c785b7SLi Zefan void __init init_sched_rt_class(void)
2520391e43daSPeter Zijlstra {
2521391e43daSPeter Zijlstra unsigned int i;
2522391e43daSPeter Zijlstra
2523391e43daSPeter Zijlstra for_each_possible_cpu(i) {
2524391e43daSPeter Zijlstra zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
2525391e43daSPeter Zijlstra GFP_KERNEL, cpu_to_node(i));
2526391e43daSPeter Zijlstra }
2527391e43daSPeter Zijlstra }
2528391e43daSPeter Zijlstra #endif /* CONFIG_SMP */
2529391e43daSPeter Zijlstra
2530391e43daSPeter Zijlstra /*
2531391e43daSPeter Zijlstra * When switching a task to RT, we may overload the runqueue
2532391e43daSPeter Zijlstra * with RT tasks. In this case we try to push them off to
2533391e43daSPeter Zijlstra * other runqueues.
2534391e43daSPeter Zijlstra */
switched_to_rt(struct rq * rq,struct task_struct * p)2535391e43daSPeter Zijlstra static void switched_to_rt(struct rq *rq, struct task_struct *p)
2536391e43daSPeter Zijlstra {
2537391e43daSPeter Zijlstra /*
2538fecfcbc2SVincent Donnefort * If we are running, update the avg_rt tracking, as the running time
2539fecfcbc2SVincent Donnefort * will now on be accounted into the latter.
2540fecfcbc2SVincent Donnefort */
2541fecfcbc2SVincent Donnefort if (task_current(rq, p)) {
2542fecfcbc2SVincent Donnefort update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0);
2543fecfcbc2SVincent Donnefort return;
2544fecfcbc2SVincent Donnefort }
2545fecfcbc2SVincent Donnefort
2546fecfcbc2SVincent Donnefort /*
2547fecfcbc2SVincent Donnefort * If we are not running we may need to preempt the current
2548fecfcbc2SVincent Donnefort * running task. If that current running task is also an RT task
2549391e43daSPeter Zijlstra * then see if we can move to another run queue.
2550391e43daSPeter Zijlstra */
2551fecfcbc2SVincent Donnefort if (task_on_rq_queued(p)) {
2552391e43daSPeter Zijlstra #ifdef CONFIG_SMP
2553d94a9df4SQais Yousef if (p->nr_cpus_allowed > 1 && rq->rt.overloaded)
255402d8ec94SIngo Molnar rt_queue_push_tasks(rq);
2555619bd4a7SSebastian Andrzej Siewior #endif /* CONFIG_SMP */
25562fe25826SPaul E. McKenney if (p->prio < rq->curr->prio && cpu_online(cpu_of(rq)))
25578875125eSKirill Tkhai resched_curr(rq);
2558391e43daSPeter Zijlstra }
2559391e43daSPeter Zijlstra }
2560391e43daSPeter Zijlstra
2561391e43daSPeter Zijlstra /*
2562391e43daSPeter Zijlstra * Priority of the task has changed. This may cause
2563391e43daSPeter Zijlstra * us to initiate a push or pull.
2564391e43daSPeter Zijlstra */
2565391e43daSPeter Zijlstra static void
prio_changed_rt(struct rq * rq,struct task_struct * p,int oldprio)2566391e43daSPeter Zijlstra prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
2567391e43daSPeter Zijlstra {
2568da0c1e65SKirill Tkhai if (!task_on_rq_queued(p))
2569391e43daSPeter Zijlstra return;
2570391e43daSPeter Zijlstra
257165bcf072SHui Su if (task_current(rq, p)) {
2572391e43daSPeter Zijlstra #ifdef CONFIG_SMP
2573391e43daSPeter Zijlstra /*
2574391e43daSPeter Zijlstra * If our priority decreases while running, we
2575391e43daSPeter Zijlstra * may need to pull tasks to this runqueue.
2576391e43daSPeter Zijlstra */
2577391e43daSPeter Zijlstra if (oldprio < p->prio)
257802d8ec94SIngo Molnar rt_queue_pull_task(rq);
2579fd7a4bedSPeter Zijlstra
2580391e43daSPeter Zijlstra /*
2581391e43daSPeter Zijlstra * If there's a higher priority task waiting to run
2582fd7a4bedSPeter Zijlstra * then reschedule.
2583391e43daSPeter Zijlstra */
2584fd7a4bedSPeter Zijlstra if (p->prio > rq->rt.highest_prio.curr)
25858875125eSKirill Tkhai resched_curr(rq);
2586391e43daSPeter Zijlstra #else
2587391e43daSPeter Zijlstra /* For UP simply resched on drop of prio */
2588391e43daSPeter Zijlstra if (oldprio < p->prio)
25898875125eSKirill Tkhai resched_curr(rq);
2590391e43daSPeter Zijlstra #endif /* CONFIG_SMP */
2591391e43daSPeter Zijlstra } else {
2592391e43daSPeter Zijlstra /*
2593391e43daSPeter Zijlstra * This task is not running, but if it is
2594391e43daSPeter Zijlstra * greater than the current running task
2595391e43daSPeter Zijlstra * then reschedule.
2596391e43daSPeter Zijlstra */
2597391e43daSPeter Zijlstra if (p->prio < rq->curr->prio)
25988875125eSKirill Tkhai resched_curr(rq);
2599391e43daSPeter Zijlstra }
2600391e43daSPeter Zijlstra }
2601391e43daSPeter Zijlstra
2602b18b6a9cSNicolas Pitre #ifdef CONFIG_POSIX_TIMERS
watchdog(struct rq * rq,struct task_struct * p)2603391e43daSPeter Zijlstra static void watchdog(struct rq *rq, struct task_struct *p)
2604391e43daSPeter Zijlstra {
2605391e43daSPeter Zijlstra unsigned long soft, hard;
2606391e43daSPeter Zijlstra
2607391e43daSPeter Zijlstra /* max may change after cur was read, this will be fixed next tick */
2608391e43daSPeter Zijlstra soft = task_rlimit(p, RLIMIT_RTTIME);
2609391e43daSPeter Zijlstra hard = task_rlimit_max(p, RLIMIT_RTTIME);
2610391e43daSPeter Zijlstra
2611391e43daSPeter Zijlstra if (soft != RLIM_INFINITY) {
2612391e43daSPeter Zijlstra unsigned long next;
2613391e43daSPeter Zijlstra
261457d2aa00SYing Xue if (p->rt.watchdog_stamp != jiffies) {
2615391e43daSPeter Zijlstra p->rt.timeout++;
261657d2aa00SYing Xue p->rt.watchdog_stamp = jiffies;
261757d2aa00SYing Xue }
261857d2aa00SYing Xue
2619391e43daSPeter Zijlstra next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
26203a245c0fSThomas Gleixner if (p->rt.timeout > next) {
26213a245c0fSThomas Gleixner posix_cputimers_rt_watchdog(&p->posix_cputimers,
26223a245c0fSThomas Gleixner p->se.sum_exec_runtime);
26233a245c0fSThomas Gleixner }
2624391e43daSPeter Zijlstra }
2625391e43daSPeter Zijlstra }
2626b18b6a9cSNicolas Pitre #else
watchdog(struct rq * rq,struct task_struct * p)2627b18b6a9cSNicolas Pitre static inline void watchdog(struct rq *rq, struct task_struct *p) { }
2628b18b6a9cSNicolas Pitre #endif
2629391e43daSPeter Zijlstra
2630d84b3131SFrederic Weisbecker /*
2631d84b3131SFrederic Weisbecker * scheduler tick hitting a task of our scheduling class.
2632d84b3131SFrederic Weisbecker *
2633d84b3131SFrederic Weisbecker * NOTE: This function can be called remotely by the tick offload that
2634d84b3131SFrederic Weisbecker * goes along full dynticks. Therefore no local assumption can be made
2635d84b3131SFrederic Weisbecker * and everything must be accessed through the @rq and @curr passed in
2636d84b3131SFrederic Weisbecker * parameters.
2637d84b3131SFrederic Weisbecker */
task_tick_rt(struct rq * rq,struct task_struct * p,int queued)2638391e43daSPeter Zijlstra static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
2639391e43daSPeter Zijlstra {
2640454c7999SColin Cross struct sched_rt_entity *rt_se = &p->rt;
2641454c7999SColin Cross
2642391e43daSPeter Zijlstra update_curr_rt(rq);
264323127296SVincent Guittot update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1);
2644391e43daSPeter Zijlstra
2645391e43daSPeter Zijlstra watchdog(rq, p);
2646391e43daSPeter Zijlstra
2647391e43daSPeter Zijlstra /*
2648391e43daSPeter Zijlstra * RR tasks need a special form of timeslice management.
2649391e43daSPeter Zijlstra * FIFO tasks have no timeslices.
2650391e43daSPeter Zijlstra */
2651391e43daSPeter Zijlstra if (p->policy != SCHED_RR)
2652391e43daSPeter Zijlstra return;
2653391e43daSPeter Zijlstra
2654391e43daSPeter Zijlstra if (--p->rt.time_slice)
2655391e43daSPeter Zijlstra return;
2656391e43daSPeter Zijlstra
2657ce0dbbbbSClark Williams p->rt.time_slice = sched_rr_timeslice;
2658391e43daSPeter Zijlstra
2659391e43daSPeter Zijlstra /*
2660e9aa39bbSLi Bin * Requeue to the end of queue if we (and all of our ancestors) are not
2661e9aa39bbSLi Bin * the only element on the queue
2662391e43daSPeter Zijlstra */
2663454c7999SColin Cross for_each_sched_rt_entity(rt_se) {
2664454c7999SColin Cross if (rt_se->run_list.prev != rt_se->run_list.next) {
2665391e43daSPeter Zijlstra requeue_task_rt(rq, p, 0);
26668aa6f0ebSKirill Tkhai resched_curr(rq);
2667454c7999SColin Cross return;
2668454c7999SColin Cross }
2669391e43daSPeter Zijlstra }
2670391e43daSPeter Zijlstra }
2671391e43daSPeter Zijlstra
get_rr_interval_rt(struct rq * rq,struct task_struct * task)2672391e43daSPeter Zijlstra static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
2673391e43daSPeter Zijlstra {
2674391e43daSPeter Zijlstra /*
2675391e43daSPeter Zijlstra * Time slice is 0 for SCHED_FIFO tasks
2676391e43daSPeter Zijlstra */
2677391e43daSPeter Zijlstra if (task->policy == SCHED_RR)
2678ce0dbbbbSClark Williams return sched_rr_timeslice;
2679391e43daSPeter Zijlstra else
2680391e43daSPeter Zijlstra return 0;
2681391e43daSPeter Zijlstra }
2682391e43daSPeter Zijlstra
2683530bfad1SHao Jia #ifdef CONFIG_SCHED_CORE
task_is_throttled_rt(struct task_struct * p,int cpu)2684530bfad1SHao Jia static int task_is_throttled_rt(struct task_struct *p, int cpu)
2685530bfad1SHao Jia {
2686530bfad1SHao Jia struct rt_rq *rt_rq;
2687530bfad1SHao Jia
2688530bfad1SHao Jia #ifdef CONFIG_RT_GROUP_SCHED
2689530bfad1SHao Jia rt_rq = task_group(p)->rt_rq[cpu];
2690530bfad1SHao Jia #else
2691530bfad1SHao Jia rt_rq = &cpu_rq(cpu)->rt;
2692530bfad1SHao Jia #endif
2693530bfad1SHao Jia
2694530bfad1SHao Jia return rt_rq_throttled(rt_rq);
2695530bfad1SHao Jia }
2696530bfad1SHao Jia #endif
2697530bfad1SHao Jia
269843c31ac0SPeter Zijlstra DEFINE_SCHED_CLASS(rt) = {
269943c31ac0SPeter Zijlstra
2700391e43daSPeter Zijlstra .enqueue_task = enqueue_task_rt,
2701391e43daSPeter Zijlstra .dequeue_task = dequeue_task_rt,
2702391e43daSPeter Zijlstra .yield_task = yield_task_rt,
2703391e43daSPeter Zijlstra
2704b2f7d750SIngo Molnar .wakeup_preempt = wakeup_preempt_rt,
2705391e43daSPeter Zijlstra
2706391e43daSPeter Zijlstra .pick_next_task = pick_next_task_rt,
2707391e43daSPeter Zijlstra .put_prev_task = put_prev_task_rt,
270803b7fad1SPeter Zijlstra .set_next_task = set_next_task_rt,
2709391e43daSPeter Zijlstra
2710391e43daSPeter Zijlstra #ifdef CONFIG_SMP
27116e2df058SPeter Zijlstra .balance = balance_rt,
271221f56ffeSPeter Zijlstra .pick_task = pick_task_rt,
2713391e43daSPeter Zijlstra .select_task_rq = select_task_rq_rt,
27146c37067eSPeter Zijlstra .set_cpus_allowed = set_cpus_allowed_common,
2715391e43daSPeter Zijlstra .rq_online = rq_online_rt,
2716391e43daSPeter Zijlstra .rq_offline = rq_offline_rt,
2717391e43daSPeter Zijlstra .task_woken = task_woken_rt,
2718391e43daSPeter Zijlstra .switched_from = switched_from_rt,
2719a7c81556SPeter Zijlstra .find_lock_rq = find_lock_lowest_rq,
2720391e43daSPeter Zijlstra #endif
2721391e43daSPeter Zijlstra
2722391e43daSPeter Zijlstra .task_tick = task_tick_rt,
2723391e43daSPeter Zijlstra
2724391e43daSPeter Zijlstra .get_rr_interval = get_rr_interval_rt,
2725391e43daSPeter Zijlstra
2726391e43daSPeter Zijlstra .prio_changed = prio_changed_rt,
2727391e43daSPeter Zijlstra .switched_to = switched_to_rt,
27286e998916SStanislaw Gruszka
27296e998916SStanislaw Gruszka .update_curr = update_curr_rt,
2730982d9cdcSPatrick Bellasi
2731530bfad1SHao Jia #ifdef CONFIG_SCHED_CORE
2732530bfad1SHao Jia .task_is_throttled = task_is_throttled_rt,
2733530bfad1SHao Jia #endif
2734530bfad1SHao Jia
2735982d9cdcSPatrick Bellasi #ifdef CONFIG_UCLAMP_TASK
2736982d9cdcSPatrick Bellasi .uclamp_enabled = 1,
2737982d9cdcSPatrick Bellasi #endif
2738391e43daSPeter Zijlstra };
2739391e43daSPeter Zijlstra
27408887cd99SNicolas Pitre #ifdef CONFIG_RT_GROUP_SCHED
27418887cd99SNicolas Pitre /*
27428887cd99SNicolas Pitre * Ensure that the real time constraints are schedulable.
27438887cd99SNicolas Pitre */
27448887cd99SNicolas Pitre static DEFINE_MUTEX(rt_constraints_mutex);
27458887cd99SNicolas Pitre
tg_has_rt_tasks(struct task_group * tg)27468887cd99SNicolas Pitre static inline int tg_has_rt_tasks(struct task_group *tg)
27478887cd99SNicolas Pitre {
2748b4fb015eSKonstantin Khlebnikov struct task_struct *task;
2749b4fb015eSKonstantin Khlebnikov struct css_task_iter it;
2750b4fb015eSKonstantin Khlebnikov int ret = 0;
27518887cd99SNicolas Pitre
27528887cd99SNicolas Pitre /*
27538887cd99SNicolas Pitre * Autogroups do not have RT tasks; see autogroup_create().
27548887cd99SNicolas Pitre */
27558887cd99SNicolas Pitre if (task_group_is_autogroup(tg))
27568887cd99SNicolas Pitre return 0;
27578887cd99SNicolas Pitre
2758b4fb015eSKonstantin Khlebnikov css_task_iter_start(&tg->css, 0, &it);
2759b4fb015eSKonstantin Khlebnikov while (!ret && (task = css_task_iter_next(&it)))
2760b4fb015eSKonstantin Khlebnikov ret |= rt_task(task);
2761b4fb015eSKonstantin Khlebnikov css_task_iter_end(&it);
27628887cd99SNicolas Pitre
2763b4fb015eSKonstantin Khlebnikov return ret;
27648887cd99SNicolas Pitre }
27658887cd99SNicolas Pitre
27668887cd99SNicolas Pitre struct rt_schedulable_data {
27678887cd99SNicolas Pitre struct task_group *tg;
27688887cd99SNicolas Pitre u64 rt_period;
27698887cd99SNicolas Pitre u64 rt_runtime;
27708887cd99SNicolas Pitre };
27718887cd99SNicolas Pitre
tg_rt_schedulable(struct task_group * tg,void * data)27728887cd99SNicolas Pitre static int tg_rt_schedulable(struct task_group *tg, void *data)
27738887cd99SNicolas Pitre {
27748887cd99SNicolas Pitre struct rt_schedulable_data *d = data;
27758887cd99SNicolas Pitre struct task_group *child;
27768887cd99SNicolas Pitre unsigned long total, sum = 0;
27778887cd99SNicolas Pitre u64 period, runtime;
27788887cd99SNicolas Pitre
27798887cd99SNicolas Pitre period = ktime_to_ns(tg->rt_bandwidth.rt_period);
27808887cd99SNicolas Pitre runtime = tg->rt_bandwidth.rt_runtime;
27818887cd99SNicolas Pitre
27828887cd99SNicolas Pitre if (tg == d->tg) {
27838887cd99SNicolas Pitre period = d->rt_period;
27848887cd99SNicolas Pitre runtime = d->rt_runtime;
27858887cd99SNicolas Pitre }
27868887cd99SNicolas Pitre
27878887cd99SNicolas Pitre /*
27888887cd99SNicolas Pitre * Cannot have more runtime than the period.
27898887cd99SNicolas Pitre */
27908887cd99SNicolas Pitre if (runtime > period && runtime != RUNTIME_INF)
27918887cd99SNicolas Pitre return -EINVAL;
27928887cd99SNicolas Pitre
27938887cd99SNicolas Pitre /*
2794b4fb015eSKonstantin Khlebnikov * Ensure we don't starve existing RT tasks if runtime turns zero.
27958887cd99SNicolas Pitre */
2796b4fb015eSKonstantin Khlebnikov if (rt_bandwidth_enabled() && !runtime &&
2797b4fb015eSKonstantin Khlebnikov tg->rt_bandwidth.rt_runtime && tg_has_rt_tasks(tg))
27988887cd99SNicolas Pitre return -EBUSY;
27998887cd99SNicolas Pitre
28008887cd99SNicolas Pitre total = to_ratio(period, runtime);
28018887cd99SNicolas Pitre
28028887cd99SNicolas Pitre /*
28038887cd99SNicolas Pitre * Nobody can have more than the global setting allows.
28048887cd99SNicolas Pitre */
28058887cd99SNicolas Pitre if (total > to_ratio(global_rt_period(), global_rt_runtime()))
28068887cd99SNicolas Pitre return -EINVAL;
28078887cd99SNicolas Pitre
28088887cd99SNicolas Pitre /*
28098887cd99SNicolas Pitre * The sum of our children's runtime should not exceed our own.
28108887cd99SNicolas Pitre */
28118887cd99SNicolas Pitre list_for_each_entry_rcu(child, &tg->children, siblings) {
28128887cd99SNicolas Pitre period = ktime_to_ns(child->rt_bandwidth.rt_period);
28138887cd99SNicolas Pitre runtime = child->rt_bandwidth.rt_runtime;
28148887cd99SNicolas Pitre
28158887cd99SNicolas Pitre if (child == d->tg) {
28168887cd99SNicolas Pitre period = d->rt_period;
28178887cd99SNicolas Pitre runtime = d->rt_runtime;
28188887cd99SNicolas Pitre }
28198887cd99SNicolas Pitre
28208887cd99SNicolas Pitre sum += to_ratio(period, runtime);
28218887cd99SNicolas Pitre }
28228887cd99SNicolas Pitre
28238887cd99SNicolas Pitre if (sum > total)
28248887cd99SNicolas Pitre return -EINVAL;
28258887cd99SNicolas Pitre
28268887cd99SNicolas Pitre return 0;
28278887cd99SNicolas Pitre }
28288887cd99SNicolas Pitre
__rt_schedulable(struct task_group * tg,u64 period,u64 runtime)28298887cd99SNicolas Pitre static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
28308887cd99SNicolas Pitre {
28318887cd99SNicolas Pitre int ret;
28328887cd99SNicolas Pitre
28338887cd99SNicolas Pitre struct rt_schedulable_data data = {
28348887cd99SNicolas Pitre .tg = tg,
28358887cd99SNicolas Pitre .rt_period = period,
28368887cd99SNicolas Pitre .rt_runtime = runtime,
28378887cd99SNicolas Pitre };
28388887cd99SNicolas Pitre
28398887cd99SNicolas Pitre rcu_read_lock();
28408887cd99SNicolas Pitre ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data);
28418887cd99SNicolas Pitre rcu_read_unlock();
28428887cd99SNicolas Pitre
28438887cd99SNicolas Pitre return ret;
28448887cd99SNicolas Pitre }
28458887cd99SNicolas Pitre
tg_set_rt_bandwidth(struct task_group * tg,u64 rt_period,u64 rt_runtime)28468887cd99SNicolas Pitre static int tg_set_rt_bandwidth(struct task_group *tg,
28478887cd99SNicolas Pitre u64 rt_period, u64 rt_runtime)
28488887cd99SNicolas Pitre {
28498887cd99SNicolas Pitre int i, err = 0;
28508887cd99SNicolas Pitre
28518887cd99SNicolas Pitre /*
28528887cd99SNicolas Pitre * Disallowing the root group RT runtime is BAD, it would disallow the
28538887cd99SNicolas Pitre * kernel creating (and or operating) RT threads.
28548887cd99SNicolas Pitre */
28558887cd99SNicolas Pitre if (tg == &root_task_group && rt_runtime == 0)
28568887cd99SNicolas Pitre return -EINVAL;
28578887cd99SNicolas Pitre
28588887cd99SNicolas Pitre /* No period doesn't make any sense. */
28598887cd99SNicolas Pitre if (rt_period == 0)
28608887cd99SNicolas Pitre return -EINVAL;
28618887cd99SNicolas Pitre
2862d505b8afSHuaixin Chang /*
2863d505b8afSHuaixin Chang * Bound quota to defend quota against overflow during bandwidth shift.
2864d505b8afSHuaixin Chang */
2865d505b8afSHuaixin Chang if (rt_runtime != RUNTIME_INF && rt_runtime > max_rt_runtime)
2866d505b8afSHuaixin Chang return -EINVAL;
2867d505b8afSHuaixin Chang
28688887cd99SNicolas Pitre mutex_lock(&rt_constraints_mutex);
28698887cd99SNicolas Pitre err = __rt_schedulable(tg, rt_period, rt_runtime);
28708887cd99SNicolas Pitre if (err)
28718887cd99SNicolas Pitre goto unlock;
28728887cd99SNicolas Pitre
28738887cd99SNicolas Pitre raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
28748887cd99SNicolas Pitre tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
28758887cd99SNicolas Pitre tg->rt_bandwidth.rt_runtime = rt_runtime;
28768887cd99SNicolas Pitre
28778887cd99SNicolas Pitre for_each_possible_cpu(i) {
28788887cd99SNicolas Pitre struct rt_rq *rt_rq = tg->rt_rq[i];
28798887cd99SNicolas Pitre
28808887cd99SNicolas Pitre raw_spin_lock(&rt_rq->rt_runtime_lock);
28818887cd99SNicolas Pitre rt_rq->rt_runtime = rt_runtime;
28828887cd99SNicolas Pitre raw_spin_unlock(&rt_rq->rt_runtime_lock);
28838887cd99SNicolas Pitre }
28848887cd99SNicolas Pitre raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
28858887cd99SNicolas Pitre unlock:
28868887cd99SNicolas Pitre mutex_unlock(&rt_constraints_mutex);
28878887cd99SNicolas Pitre
28888887cd99SNicolas Pitre return err;
28898887cd99SNicolas Pitre }
28908887cd99SNicolas Pitre
sched_group_set_rt_runtime(struct task_group * tg,long rt_runtime_us)28918887cd99SNicolas Pitre int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
28928887cd99SNicolas Pitre {
28938887cd99SNicolas Pitre u64 rt_runtime, rt_period;
28948887cd99SNicolas Pitre
28958887cd99SNicolas Pitre rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
28968887cd99SNicolas Pitre rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
28978887cd99SNicolas Pitre if (rt_runtime_us < 0)
28988887cd99SNicolas Pitre rt_runtime = RUNTIME_INF;
28991a010e29SKonstantin Khlebnikov else if ((u64)rt_runtime_us > U64_MAX / NSEC_PER_USEC)
29001a010e29SKonstantin Khlebnikov return -EINVAL;
29018887cd99SNicolas Pitre
29028887cd99SNicolas Pitre return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
29038887cd99SNicolas Pitre }
29048887cd99SNicolas Pitre
sched_group_rt_runtime(struct task_group * tg)29058887cd99SNicolas Pitre long sched_group_rt_runtime(struct task_group *tg)
29068887cd99SNicolas Pitre {
29078887cd99SNicolas Pitre u64 rt_runtime_us;
29088887cd99SNicolas Pitre
29098887cd99SNicolas Pitre if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
29108887cd99SNicolas Pitre return -1;
29118887cd99SNicolas Pitre
29128887cd99SNicolas Pitre rt_runtime_us = tg->rt_bandwidth.rt_runtime;
29138887cd99SNicolas Pitre do_div(rt_runtime_us, NSEC_PER_USEC);
29148887cd99SNicolas Pitre return rt_runtime_us;
29158887cd99SNicolas Pitre }
29168887cd99SNicolas Pitre
sched_group_set_rt_period(struct task_group * tg,u64 rt_period_us)29178887cd99SNicolas Pitre int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us)
29188887cd99SNicolas Pitre {
29198887cd99SNicolas Pitre u64 rt_runtime, rt_period;
29208887cd99SNicolas Pitre
29211a010e29SKonstantin Khlebnikov if (rt_period_us > U64_MAX / NSEC_PER_USEC)
29221a010e29SKonstantin Khlebnikov return -EINVAL;
29231a010e29SKonstantin Khlebnikov
29248887cd99SNicolas Pitre rt_period = rt_period_us * NSEC_PER_USEC;
29258887cd99SNicolas Pitre rt_runtime = tg->rt_bandwidth.rt_runtime;
29268887cd99SNicolas Pitre
29278887cd99SNicolas Pitre return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
29288887cd99SNicolas Pitre }
29298887cd99SNicolas Pitre
sched_group_rt_period(struct task_group * tg)29308887cd99SNicolas Pitre long sched_group_rt_period(struct task_group *tg)
29318887cd99SNicolas Pitre {
29328887cd99SNicolas Pitre u64 rt_period_us;
29338887cd99SNicolas Pitre
29348887cd99SNicolas Pitre rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
29358887cd99SNicolas Pitre do_div(rt_period_us, NSEC_PER_USEC);
29368887cd99SNicolas Pitre return rt_period_us;
29378887cd99SNicolas Pitre }
29388887cd99SNicolas Pitre
293928f152cdSBaisong Zhong #ifdef CONFIG_SYSCTL
sched_rt_global_constraints(void)29408887cd99SNicolas Pitre static int sched_rt_global_constraints(void)
29418887cd99SNicolas Pitre {
29428887cd99SNicolas Pitre int ret = 0;
29438887cd99SNicolas Pitre
29448887cd99SNicolas Pitre mutex_lock(&rt_constraints_mutex);
29458887cd99SNicolas Pitre ret = __rt_schedulable(NULL, 0, 0);
29468887cd99SNicolas Pitre mutex_unlock(&rt_constraints_mutex);
29478887cd99SNicolas Pitre
29488887cd99SNicolas Pitre return ret;
29498887cd99SNicolas Pitre }
295028f152cdSBaisong Zhong #endif /* CONFIG_SYSCTL */
29518887cd99SNicolas Pitre
sched_rt_can_attach(struct task_group * tg,struct task_struct * tsk)29528887cd99SNicolas Pitre int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
29538887cd99SNicolas Pitre {
29548887cd99SNicolas Pitre /* Don't accept realtime tasks when there is no way for them to run */
29558887cd99SNicolas Pitre if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
29568887cd99SNicolas Pitre return 0;
29578887cd99SNicolas Pitre
29588887cd99SNicolas Pitre return 1;
29598887cd99SNicolas Pitre }
29608887cd99SNicolas Pitre
29618887cd99SNicolas Pitre #else /* !CONFIG_RT_GROUP_SCHED */
296228f152cdSBaisong Zhong
296328f152cdSBaisong Zhong #ifdef CONFIG_SYSCTL
sched_rt_global_constraints(void)29648887cd99SNicolas Pitre static int sched_rt_global_constraints(void)
29658887cd99SNicolas Pitre {
29668887cd99SNicolas Pitre unsigned long flags;
29678887cd99SNicolas Pitre int i;
29688887cd99SNicolas Pitre
29698887cd99SNicolas Pitre raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
29708887cd99SNicolas Pitre for_each_possible_cpu(i) {
29718887cd99SNicolas Pitre struct rt_rq *rt_rq = &cpu_rq(i)->rt;
29728887cd99SNicolas Pitre
29738887cd99SNicolas Pitre raw_spin_lock(&rt_rq->rt_runtime_lock);
29748887cd99SNicolas Pitre rt_rq->rt_runtime = global_rt_runtime();
29758887cd99SNicolas Pitre raw_spin_unlock(&rt_rq->rt_runtime_lock);
29768887cd99SNicolas Pitre }
29778887cd99SNicolas Pitre raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
29788887cd99SNicolas Pitre
29798887cd99SNicolas Pitre return 0;
29808887cd99SNicolas Pitre }
298128f152cdSBaisong Zhong #endif /* CONFIG_SYSCTL */
29828887cd99SNicolas Pitre #endif /* CONFIG_RT_GROUP_SCHED */
29838887cd99SNicolas Pitre
298428f152cdSBaisong Zhong #ifdef CONFIG_SYSCTL
sched_rt_global_validate(void)29858887cd99SNicolas Pitre static int sched_rt_global_validate(void)
29868887cd99SNicolas Pitre {
29878887cd99SNicolas Pitre if ((sysctl_sched_rt_runtime != RUNTIME_INF) &&
2988d505b8afSHuaixin Chang ((sysctl_sched_rt_runtime > sysctl_sched_rt_period) ||
2989d505b8afSHuaixin Chang ((u64)sysctl_sched_rt_runtime *
2990d505b8afSHuaixin Chang NSEC_PER_USEC > max_rt_runtime)))
29918887cd99SNicolas Pitre return -EINVAL;
29928887cd99SNicolas Pitre
29938887cd99SNicolas Pitre return 0;
29948887cd99SNicolas Pitre }
29958887cd99SNicolas Pitre
sched_rt_do_global(void)29968887cd99SNicolas Pitre static void sched_rt_do_global(void)
29978887cd99SNicolas Pitre {
29989b58e976SLi Hua unsigned long flags;
29999b58e976SLi Hua
30009b58e976SLi Hua raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
30018887cd99SNicolas Pitre def_rt_bandwidth.rt_runtime = global_rt_runtime();
30028887cd99SNicolas Pitre def_rt_bandwidth.rt_period = ns_to_ktime(global_rt_period());
30039b58e976SLi Hua raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
30048887cd99SNicolas Pitre }
30058887cd99SNicolas Pitre
sched_rt_handler(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)3006d9ab0e63SZhen Ni static int sched_rt_handler(struct ctl_table *table, int write, void *buffer,
300732927393SChristoph Hellwig size_t *lenp, loff_t *ppos)
30088887cd99SNicolas Pitre {
30098887cd99SNicolas Pitre int old_period, old_runtime;
30108887cd99SNicolas Pitre static DEFINE_MUTEX(mutex);
30118887cd99SNicolas Pitre int ret;
30128887cd99SNicolas Pitre
30138887cd99SNicolas Pitre mutex_lock(&mutex);
30148887cd99SNicolas Pitre old_period = sysctl_sched_rt_period;
30158887cd99SNicolas Pitre old_runtime = sysctl_sched_rt_runtime;
30168887cd99SNicolas Pitre
301774fd1b8cSCyril Hrubis ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
30188887cd99SNicolas Pitre
30198887cd99SNicolas Pitre if (!ret && write) {
30208887cd99SNicolas Pitre ret = sched_rt_global_validate();
30218887cd99SNicolas Pitre if (ret)
30228887cd99SNicolas Pitre goto undo;
30238887cd99SNicolas Pitre
30248887cd99SNicolas Pitre ret = sched_dl_global_validate();
30258887cd99SNicolas Pitre if (ret)
30268887cd99SNicolas Pitre goto undo;
30278887cd99SNicolas Pitre
30288887cd99SNicolas Pitre ret = sched_rt_global_constraints();
30298887cd99SNicolas Pitre if (ret)
30308887cd99SNicolas Pitre goto undo;
30318887cd99SNicolas Pitre
30328887cd99SNicolas Pitre sched_rt_do_global();
30338887cd99SNicolas Pitre sched_dl_do_global();
30348887cd99SNicolas Pitre }
30358887cd99SNicolas Pitre if (0) {
30368887cd99SNicolas Pitre undo:
30378887cd99SNicolas Pitre sysctl_sched_rt_period = old_period;
30388887cd99SNicolas Pitre sysctl_sched_rt_runtime = old_runtime;
30398887cd99SNicolas Pitre }
30408887cd99SNicolas Pitre mutex_unlock(&mutex);
30418887cd99SNicolas Pitre
30428887cd99SNicolas Pitre return ret;
30438887cd99SNicolas Pitre }
30448887cd99SNicolas Pitre
sched_rr_handler(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)3045dafd7a9dSZhen Ni static int sched_rr_handler(struct ctl_table *table, int write, void *buffer,
304632927393SChristoph Hellwig size_t *lenp, loff_t *ppos)
30478887cd99SNicolas Pitre {
30488887cd99SNicolas Pitre int ret;
30498887cd99SNicolas Pitre static DEFINE_MUTEX(mutex);
30508887cd99SNicolas Pitre
30518887cd99SNicolas Pitre mutex_lock(&mutex);
30528887cd99SNicolas Pitre ret = proc_dointvec(table, write, buffer, lenp, ppos);
30538887cd99SNicolas Pitre /*
30548887cd99SNicolas Pitre * Make sure that internally we keep jiffies.
30558887cd99SNicolas Pitre * Also, writing zero resets the timeslice to default:
30568887cd99SNicolas Pitre */
30578887cd99SNicolas Pitre if (!ret && write) {
30588887cd99SNicolas Pitre sched_rr_timeslice =
30598887cd99SNicolas Pitre sysctl_sched_rr_timeslice <= 0 ? RR_TIMESLICE :
30608887cd99SNicolas Pitre msecs_to_jiffies(sysctl_sched_rr_timeslice);
3061c1fc6484SCyril Hrubis
3062c1fc6484SCyril Hrubis if (sysctl_sched_rr_timeslice <= 0)
3063c1fc6484SCyril Hrubis sysctl_sched_rr_timeslice = jiffies_to_msecs(RR_TIMESLICE);
30648887cd99SNicolas Pitre }
30658887cd99SNicolas Pitre mutex_unlock(&mutex);
306697fb7a0aSIngo Molnar
30678887cd99SNicolas Pitre return ret;
30688887cd99SNicolas Pitre }
306928f152cdSBaisong Zhong #endif /* CONFIG_SYSCTL */
30708887cd99SNicolas Pitre
3071391e43daSPeter Zijlstra #ifdef CONFIG_SCHED_DEBUG
print_rt_stats(struct seq_file * m,int cpu)3072391e43daSPeter Zijlstra void print_rt_stats(struct seq_file *m, int cpu)
3073391e43daSPeter Zijlstra {
3074391e43daSPeter Zijlstra rt_rq_iter_t iter;
3075391e43daSPeter Zijlstra struct rt_rq *rt_rq;
3076391e43daSPeter Zijlstra
3077391e43daSPeter Zijlstra rcu_read_lock();
3078391e43daSPeter Zijlstra for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
3079391e43daSPeter Zijlstra print_rt_rq(m, cpu, rt_rq);
3080391e43daSPeter Zijlstra rcu_read_unlock();
3081391e43daSPeter Zijlstra }
3082391e43daSPeter Zijlstra #endif /* CONFIG_SCHED_DEBUG */
3083