xref: /openbmc/linux/kernel/sched/rt.c (revision 49bef33e4b87b743495627a529029156c6e09530)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2391e43daSPeter Zijlstra /*
3391e43daSPeter Zijlstra  * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
4391e43daSPeter Zijlstra  * policies)
5391e43daSPeter Zijlstra  */
6391e43daSPeter Zijlstra #include "sched.h"
7391e43daSPeter Zijlstra 
8371bf427SVincent Guittot #include "pelt.h"
9371bf427SVincent Guittot 
10ce0dbbbbSClark Williams int sched_rr_timeslice = RR_TIMESLICE;
11975e155eSShile Zhang int sysctl_sched_rr_timeslice = (MSEC_PER_SEC / HZ) * RR_TIMESLICE;
12d505b8afSHuaixin Chang /* More than 4 hours if BW_SHIFT equals 20. */
13d505b8afSHuaixin Chang static const u64 max_rt_runtime = MAX_BW;
14ce0dbbbbSClark Williams 
15391e43daSPeter Zijlstra static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
16391e43daSPeter Zijlstra 
17391e43daSPeter Zijlstra struct rt_bandwidth def_rt_bandwidth;
18391e43daSPeter Zijlstra 
19391e43daSPeter Zijlstra static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
20391e43daSPeter Zijlstra {
21391e43daSPeter Zijlstra 	struct rt_bandwidth *rt_b =
22391e43daSPeter Zijlstra 		container_of(timer, struct rt_bandwidth, rt_period_timer);
23391e43daSPeter Zijlstra 	int idle = 0;
2477a4d1a1SPeter Zijlstra 	int overrun;
25391e43daSPeter Zijlstra 
2677a4d1a1SPeter Zijlstra 	raw_spin_lock(&rt_b->rt_runtime_lock);
27391e43daSPeter Zijlstra 	for (;;) {
2877a4d1a1SPeter Zijlstra 		overrun = hrtimer_forward_now(timer, rt_b->rt_period);
29391e43daSPeter Zijlstra 		if (!overrun)
30391e43daSPeter Zijlstra 			break;
31391e43daSPeter Zijlstra 
3277a4d1a1SPeter Zijlstra 		raw_spin_unlock(&rt_b->rt_runtime_lock);
33391e43daSPeter Zijlstra 		idle = do_sched_rt_period_timer(rt_b, overrun);
3477a4d1a1SPeter Zijlstra 		raw_spin_lock(&rt_b->rt_runtime_lock);
35391e43daSPeter Zijlstra 	}
364cfafd30SPeter Zijlstra 	if (idle)
374cfafd30SPeter Zijlstra 		rt_b->rt_period_active = 0;
3877a4d1a1SPeter Zijlstra 	raw_spin_unlock(&rt_b->rt_runtime_lock);
39391e43daSPeter Zijlstra 
40391e43daSPeter Zijlstra 	return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
41391e43daSPeter Zijlstra }
42391e43daSPeter Zijlstra 
43391e43daSPeter Zijlstra void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
44391e43daSPeter Zijlstra {
45391e43daSPeter Zijlstra 	rt_b->rt_period = ns_to_ktime(period);
46391e43daSPeter Zijlstra 	rt_b->rt_runtime = runtime;
47391e43daSPeter Zijlstra 
48391e43daSPeter Zijlstra 	raw_spin_lock_init(&rt_b->rt_runtime_lock);
49391e43daSPeter Zijlstra 
50d5096aa6SSebastian Andrzej Siewior 	hrtimer_init(&rt_b->rt_period_timer, CLOCK_MONOTONIC,
51d5096aa6SSebastian Andrzej Siewior 		     HRTIMER_MODE_REL_HARD);
52391e43daSPeter Zijlstra 	rt_b->rt_period_timer.function = sched_rt_period_timer;
53391e43daSPeter Zijlstra }
54391e43daSPeter Zijlstra 
559b58e976SLi Hua static inline void do_start_rt_bandwidth(struct rt_bandwidth *rt_b)
56391e43daSPeter Zijlstra {
57391e43daSPeter Zijlstra 	raw_spin_lock(&rt_b->rt_runtime_lock);
584cfafd30SPeter Zijlstra 	if (!rt_b->rt_period_active) {
594cfafd30SPeter Zijlstra 		rt_b->rt_period_active = 1;
60c3a990dcSSteven Rostedt 		/*
61c3a990dcSSteven Rostedt 		 * SCHED_DEADLINE updates the bandwidth, as a run away
62c3a990dcSSteven Rostedt 		 * RT task with a DL task could hog a CPU. But DL does
63c3a990dcSSteven Rostedt 		 * not reset the period. If a deadline task was running
64c3a990dcSSteven Rostedt 		 * without an RT task running, it can cause RT tasks to
65c3a990dcSSteven Rostedt 		 * throttle when they start up. Kick the timer right away
66c3a990dcSSteven Rostedt 		 * to update the period.
67c3a990dcSSteven Rostedt 		 */
68c3a990dcSSteven Rostedt 		hrtimer_forward_now(&rt_b->rt_period_timer, ns_to_ktime(0));
69d5096aa6SSebastian Andrzej Siewior 		hrtimer_start_expires(&rt_b->rt_period_timer,
70d5096aa6SSebastian Andrzej Siewior 				      HRTIMER_MODE_ABS_PINNED_HARD);
714cfafd30SPeter Zijlstra 	}
72391e43daSPeter Zijlstra 	raw_spin_unlock(&rt_b->rt_runtime_lock);
73391e43daSPeter Zijlstra }
74391e43daSPeter Zijlstra 
759b58e976SLi Hua static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
769b58e976SLi Hua {
779b58e976SLi Hua 	if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
789b58e976SLi Hua 		return;
799b58e976SLi Hua 
809b58e976SLi Hua 	do_start_rt_bandwidth(rt_b);
819b58e976SLi Hua }
829b58e976SLi Hua 
8307c54f7aSAbel Vesa void init_rt_rq(struct rt_rq *rt_rq)
84391e43daSPeter Zijlstra {
85391e43daSPeter Zijlstra 	struct rt_prio_array *array;
86391e43daSPeter Zijlstra 	int i;
87391e43daSPeter Zijlstra 
88391e43daSPeter Zijlstra 	array = &rt_rq->active;
89391e43daSPeter Zijlstra 	for (i = 0; i < MAX_RT_PRIO; i++) {
90391e43daSPeter Zijlstra 		INIT_LIST_HEAD(array->queue + i);
91391e43daSPeter Zijlstra 		__clear_bit(i, array->bitmap);
92391e43daSPeter Zijlstra 	}
93391e43daSPeter Zijlstra 	/* delimiter for bitsearch: */
94391e43daSPeter Zijlstra 	__set_bit(MAX_RT_PRIO, array->bitmap);
95391e43daSPeter Zijlstra 
96391e43daSPeter Zijlstra #if defined CONFIG_SMP
97934fc331SPeter Zijlstra 	rt_rq->highest_prio.curr = MAX_RT_PRIO-1;
98934fc331SPeter Zijlstra 	rt_rq->highest_prio.next = MAX_RT_PRIO-1;
99391e43daSPeter Zijlstra 	rt_rq->rt_nr_migratory = 0;
100391e43daSPeter Zijlstra 	rt_rq->overloaded = 0;
101391e43daSPeter Zijlstra 	plist_head_init(&rt_rq->pushable_tasks);
102b6366f04SSteven Rostedt #endif /* CONFIG_SMP */
103f4ebcbc0SKirill Tkhai 	/* We start is dequeued state, because no RT tasks are queued */
104f4ebcbc0SKirill Tkhai 	rt_rq->rt_queued = 0;
105391e43daSPeter Zijlstra 
106391e43daSPeter Zijlstra 	rt_rq->rt_time = 0;
107391e43daSPeter Zijlstra 	rt_rq->rt_throttled = 0;
108391e43daSPeter Zijlstra 	rt_rq->rt_runtime = 0;
109391e43daSPeter Zijlstra 	raw_spin_lock_init(&rt_rq->rt_runtime_lock);
110391e43daSPeter Zijlstra }
111391e43daSPeter Zijlstra 
112391e43daSPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED
113391e43daSPeter Zijlstra static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
114391e43daSPeter Zijlstra {
115391e43daSPeter Zijlstra 	hrtimer_cancel(&rt_b->rt_period_timer);
116391e43daSPeter Zijlstra }
117391e43daSPeter Zijlstra 
118391e43daSPeter Zijlstra #define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
119391e43daSPeter Zijlstra 
120391e43daSPeter Zijlstra static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
121391e43daSPeter Zijlstra {
122391e43daSPeter Zijlstra #ifdef CONFIG_SCHED_DEBUG
123391e43daSPeter Zijlstra 	WARN_ON_ONCE(!rt_entity_is_task(rt_se));
124391e43daSPeter Zijlstra #endif
125391e43daSPeter Zijlstra 	return container_of(rt_se, struct task_struct, rt);
126391e43daSPeter Zijlstra }
127391e43daSPeter Zijlstra 
128391e43daSPeter Zijlstra static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
129391e43daSPeter Zijlstra {
130391e43daSPeter Zijlstra 	return rt_rq->rq;
131391e43daSPeter Zijlstra }
132391e43daSPeter Zijlstra 
133391e43daSPeter Zijlstra static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
134391e43daSPeter Zijlstra {
135391e43daSPeter Zijlstra 	return rt_se->rt_rq;
136391e43daSPeter Zijlstra }
137391e43daSPeter Zijlstra 
138653d07a6SKirill Tkhai static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
139653d07a6SKirill Tkhai {
140653d07a6SKirill Tkhai 	struct rt_rq *rt_rq = rt_se->rt_rq;
141653d07a6SKirill Tkhai 
142653d07a6SKirill Tkhai 	return rt_rq->rq;
143653d07a6SKirill Tkhai }
144653d07a6SKirill Tkhai 
145b027789eSMathias Krause void unregister_rt_sched_group(struct task_group *tg)
146b027789eSMathias Krause {
147b027789eSMathias Krause 	if (tg->rt_se)
148b027789eSMathias Krause 		destroy_rt_bandwidth(&tg->rt_bandwidth);
149b027789eSMathias Krause 
150b027789eSMathias Krause }
151b027789eSMathias Krause 
152391e43daSPeter Zijlstra void free_rt_sched_group(struct task_group *tg)
153391e43daSPeter Zijlstra {
154391e43daSPeter Zijlstra 	int i;
155391e43daSPeter Zijlstra 
156391e43daSPeter Zijlstra 	for_each_possible_cpu(i) {
157391e43daSPeter Zijlstra 		if (tg->rt_rq)
158391e43daSPeter Zijlstra 			kfree(tg->rt_rq[i]);
159391e43daSPeter Zijlstra 		if (tg->rt_se)
160391e43daSPeter Zijlstra 			kfree(tg->rt_se[i]);
161391e43daSPeter Zijlstra 	}
162391e43daSPeter Zijlstra 
163391e43daSPeter Zijlstra 	kfree(tg->rt_rq);
164391e43daSPeter Zijlstra 	kfree(tg->rt_se);
165391e43daSPeter Zijlstra }
166391e43daSPeter Zijlstra 
167391e43daSPeter Zijlstra void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
168391e43daSPeter Zijlstra 		struct sched_rt_entity *rt_se, int cpu,
169391e43daSPeter Zijlstra 		struct sched_rt_entity *parent)
170391e43daSPeter Zijlstra {
171391e43daSPeter Zijlstra 	struct rq *rq = cpu_rq(cpu);
172391e43daSPeter Zijlstra 
173934fc331SPeter Zijlstra 	rt_rq->highest_prio.curr = MAX_RT_PRIO-1;
174391e43daSPeter Zijlstra 	rt_rq->rt_nr_boosted = 0;
175391e43daSPeter Zijlstra 	rt_rq->rq = rq;
176391e43daSPeter Zijlstra 	rt_rq->tg = tg;
177391e43daSPeter Zijlstra 
178391e43daSPeter Zijlstra 	tg->rt_rq[cpu] = rt_rq;
179391e43daSPeter Zijlstra 	tg->rt_se[cpu] = rt_se;
180391e43daSPeter Zijlstra 
181391e43daSPeter Zijlstra 	if (!rt_se)
182391e43daSPeter Zijlstra 		return;
183391e43daSPeter Zijlstra 
184391e43daSPeter Zijlstra 	if (!parent)
185391e43daSPeter Zijlstra 		rt_se->rt_rq = &rq->rt;
186391e43daSPeter Zijlstra 	else
187391e43daSPeter Zijlstra 		rt_se->rt_rq = parent->my_q;
188391e43daSPeter Zijlstra 
189391e43daSPeter Zijlstra 	rt_se->my_q = rt_rq;
190391e43daSPeter Zijlstra 	rt_se->parent = parent;
191391e43daSPeter Zijlstra 	INIT_LIST_HEAD(&rt_se->run_list);
192391e43daSPeter Zijlstra }
193391e43daSPeter Zijlstra 
194391e43daSPeter Zijlstra int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
195391e43daSPeter Zijlstra {
196391e43daSPeter Zijlstra 	struct rt_rq *rt_rq;
197391e43daSPeter Zijlstra 	struct sched_rt_entity *rt_se;
198391e43daSPeter Zijlstra 	int i;
199391e43daSPeter Zijlstra 
2006396bb22SKees Cook 	tg->rt_rq = kcalloc(nr_cpu_ids, sizeof(rt_rq), GFP_KERNEL);
201391e43daSPeter Zijlstra 	if (!tg->rt_rq)
202391e43daSPeter Zijlstra 		goto err;
2036396bb22SKees Cook 	tg->rt_se = kcalloc(nr_cpu_ids, sizeof(rt_se), GFP_KERNEL);
204391e43daSPeter Zijlstra 	if (!tg->rt_se)
205391e43daSPeter Zijlstra 		goto err;
206391e43daSPeter Zijlstra 
207391e43daSPeter Zijlstra 	init_rt_bandwidth(&tg->rt_bandwidth,
208391e43daSPeter Zijlstra 			ktime_to_ns(def_rt_bandwidth.rt_period), 0);
209391e43daSPeter Zijlstra 
210391e43daSPeter Zijlstra 	for_each_possible_cpu(i) {
211391e43daSPeter Zijlstra 		rt_rq = kzalloc_node(sizeof(struct rt_rq),
212391e43daSPeter Zijlstra 				     GFP_KERNEL, cpu_to_node(i));
213391e43daSPeter Zijlstra 		if (!rt_rq)
214391e43daSPeter Zijlstra 			goto err;
215391e43daSPeter Zijlstra 
216391e43daSPeter Zijlstra 		rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
217391e43daSPeter Zijlstra 				     GFP_KERNEL, cpu_to_node(i));
218391e43daSPeter Zijlstra 		if (!rt_se)
219391e43daSPeter Zijlstra 			goto err_free_rq;
220391e43daSPeter Zijlstra 
22107c54f7aSAbel Vesa 		init_rt_rq(rt_rq);
222391e43daSPeter Zijlstra 		rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
223391e43daSPeter Zijlstra 		init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
224391e43daSPeter Zijlstra 	}
225391e43daSPeter Zijlstra 
226391e43daSPeter Zijlstra 	return 1;
227391e43daSPeter Zijlstra 
228391e43daSPeter Zijlstra err_free_rq:
229391e43daSPeter Zijlstra 	kfree(rt_rq);
230391e43daSPeter Zijlstra err:
231391e43daSPeter Zijlstra 	return 0;
232391e43daSPeter Zijlstra }
233391e43daSPeter Zijlstra 
234391e43daSPeter Zijlstra #else /* CONFIG_RT_GROUP_SCHED */
235391e43daSPeter Zijlstra 
236391e43daSPeter Zijlstra #define rt_entity_is_task(rt_se) (1)
237391e43daSPeter Zijlstra 
238391e43daSPeter Zijlstra static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
239391e43daSPeter Zijlstra {
240391e43daSPeter Zijlstra 	return container_of(rt_se, struct task_struct, rt);
241391e43daSPeter Zijlstra }
242391e43daSPeter Zijlstra 
243391e43daSPeter Zijlstra static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
244391e43daSPeter Zijlstra {
245391e43daSPeter Zijlstra 	return container_of(rt_rq, struct rq, rt);
246391e43daSPeter Zijlstra }
247391e43daSPeter Zijlstra 
248653d07a6SKirill Tkhai static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
249391e43daSPeter Zijlstra {
250391e43daSPeter Zijlstra 	struct task_struct *p = rt_task_of(rt_se);
251653d07a6SKirill Tkhai 
252653d07a6SKirill Tkhai 	return task_rq(p);
253653d07a6SKirill Tkhai }
254653d07a6SKirill Tkhai 
255653d07a6SKirill Tkhai static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
256653d07a6SKirill Tkhai {
257653d07a6SKirill Tkhai 	struct rq *rq = rq_of_rt_se(rt_se);
258391e43daSPeter Zijlstra 
259391e43daSPeter Zijlstra 	return &rq->rt;
260391e43daSPeter Zijlstra }
261391e43daSPeter Zijlstra 
262b027789eSMathias Krause void unregister_rt_sched_group(struct task_group *tg) { }
263b027789eSMathias Krause 
264391e43daSPeter Zijlstra void free_rt_sched_group(struct task_group *tg) { }
265391e43daSPeter Zijlstra 
266391e43daSPeter Zijlstra int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
267391e43daSPeter Zijlstra {
268391e43daSPeter Zijlstra 	return 1;
269391e43daSPeter Zijlstra }
270391e43daSPeter Zijlstra #endif /* CONFIG_RT_GROUP_SCHED */
271391e43daSPeter Zijlstra 
272391e43daSPeter Zijlstra #ifdef CONFIG_SMP
273391e43daSPeter Zijlstra 
2748046d680SPeter Zijlstra static void pull_rt_task(struct rq *this_rq);
27538033c37SPeter Zijlstra 
276dc877341SPeter Zijlstra static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
277dc877341SPeter Zijlstra {
278dc877341SPeter Zijlstra 	/* Try to pull RT tasks here if we lower this rq's prio */
279120455c5SPeter Zijlstra 	return rq->online && rq->rt.highest_prio.curr > prev->prio;
280dc877341SPeter Zijlstra }
281dc877341SPeter Zijlstra 
282391e43daSPeter Zijlstra static inline int rt_overloaded(struct rq *rq)
283391e43daSPeter Zijlstra {
284391e43daSPeter Zijlstra 	return atomic_read(&rq->rd->rto_count);
285391e43daSPeter Zijlstra }
286391e43daSPeter Zijlstra 
287391e43daSPeter Zijlstra static inline void rt_set_overload(struct rq *rq)
288391e43daSPeter Zijlstra {
289391e43daSPeter Zijlstra 	if (!rq->online)
290391e43daSPeter Zijlstra 		return;
291391e43daSPeter Zijlstra 
292391e43daSPeter Zijlstra 	cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
293391e43daSPeter Zijlstra 	/*
294391e43daSPeter Zijlstra 	 * Make sure the mask is visible before we set
295391e43daSPeter Zijlstra 	 * the overload count. That is checked to determine
296391e43daSPeter Zijlstra 	 * if we should look at the mask. It would be a shame
297391e43daSPeter Zijlstra 	 * if we looked at the mask, but the mask was not
298391e43daSPeter Zijlstra 	 * updated yet.
2997c3f2ab7SPeter Zijlstra 	 *
3007c3f2ab7SPeter Zijlstra 	 * Matched by the barrier in pull_rt_task().
301391e43daSPeter Zijlstra 	 */
3027c3f2ab7SPeter Zijlstra 	smp_wmb();
303391e43daSPeter Zijlstra 	atomic_inc(&rq->rd->rto_count);
304391e43daSPeter Zijlstra }
305391e43daSPeter Zijlstra 
306391e43daSPeter Zijlstra static inline void rt_clear_overload(struct rq *rq)
307391e43daSPeter Zijlstra {
308391e43daSPeter Zijlstra 	if (!rq->online)
309391e43daSPeter Zijlstra 		return;
310391e43daSPeter Zijlstra 
311391e43daSPeter Zijlstra 	/* the order here really doesn't matter */
312391e43daSPeter Zijlstra 	atomic_dec(&rq->rd->rto_count);
313391e43daSPeter Zijlstra 	cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
314391e43daSPeter Zijlstra }
315391e43daSPeter Zijlstra 
316391e43daSPeter Zijlstra static void update_rt_migration(struct rt_rq *rt_rq)
317391e43daSPeter Zijlstra {
318391e43daSPeter Zijlstra 	if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
319391e43daSPeter Zijlstra 		if (!rt_rq->overloaded) {
320391e43daSPeter Zijlstra 			rt_set_overload(rq_of_rt_rq(rt_rq));
321391e43daSPeter Zijlstra 			rt_rq->overloaded = 1;
322391e43daSPeter Zijlstra 		}
323391e43daSPeter Zijlstra 	} else if (rt_rq->overloaded) {
324391e43daSPeter Zijlstra 		rt_clear_overload(rq_of_rt_rq(rt_rq));
325391e43daSPeter Zijlstra 		rt_rq->overloaded = 0;
326391e43daSPeter Zijlstra 	}
327391e43daSPeter Zijlstra }
328391e43daSPeter Zijlstra 
329391e43daSPeter Zijlstra static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
330391e43daSPeter Zijlstra {
33129baa747SPeter Zijlstra 	struct task_struct *p;
33229baa747SPeter Zijlstra 
333391e43daSPeter Zijlstra 	if (!rt_entity_is_task(rt_se))
334391e43daSPeter Zijlstra 		return;
335391e43daSPeter Zijlstra 
33629baa747SPeter Zijlstra 	p = rt_task_of(rt_se);
337391e43daSPeter Zijlstra 	rt_rq = &rq_of_rt_rq(rt_rq)->rt;
338391e43daSPeter Zijlstra 
339391e43daSPeter Zijlstra 	rt_rq->rt_nr_total++;
3404b53a341SIngo Molnar 	if (p->nr_cpus_allowed > 1)
341391e43daSPeter Zijlstra 		rt_rq->rt_nr_migratory++;
342391e43daSPeter Zijlstra 
343391e43daSPeter Zijlstra 	update_rt_migration(rt_rq);
344391e43daSPeter Zijlstra }
345391e43daSPeter Zijlstra 
346391e43daSPeter Zijlstra static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
347391e43daSPeter Zijlstra {
34829baa747SPeter Zijlstra 	struct task_struct *p;
34929baa747SPeter Zijlstra 
350391e43daSPeter Zijlstra 	if (!rt_entity_is_task(rt_se))
351391e43daSPeter Zijlstra 		return;
352391e43daSPeter Zijlstra 
35329baa747SPeter Zijlstra 	p = rt_task_of(rt_se);
354391e43daSPeter Zijlstra 	rt_rq = &rq_of_rt_rq(rt_rq)->rt;
355391e43daSPeter Zijlstra 
356391e43daSPeter Zijlstra 	rt_rq->rt_nr_total--;
3574b53a341SIngo Molnar 	if (p->nr_cpus_allowed > 1)
358391e43daSPeter Zijlstra 		rt_rq->rt_nr_migratory--;
359391e43daSPeter Zijlstra 
360391e43daSPeter Zijlstra 	update_rt_migration(rt_rq);
361391e43daSPeter Zijlstra }
362391e43daSPeter Zijlstra 
363391e43daSPeter Zijlstra static inline int has_pushable_tasks(struct rq *rq)
364391e43daSPeter Zijlstra {
365391e43daSPeter Zijlstra 	return !plist_head_empty(&rq->rt.pushable_tasks);
366391e43daSPeter Zijlstra }
367391e43daSPeter Zijlstra 
368fd7a4bedSPeter Zijlstra static DEFINE_PER_CPU(struct callback_head, rt_push_head);
369fd7a4bedSPeter Zijlstra static DEFINE_PER_CPU(struct callback_head, rt_pull_head);
370e3fca9e7SPeter Zijlstra 
371e3fca9e7SPeter Zijlstra static void push_rt_tasks(struct rq *);
372fd7a4bedSPeter Zijlstra static void pull_rt_task(struct rq *);
373e3fca9e7SPeter Zijlstra 
37402d8ec94SIngo Molnar static inline void rt_queue_push_tasks(struct rq *rq)
375dc877341SPeter Zijlstra {
376e3fca9e7SPeter Zijlstra 	if (!has_pushable_tasks(rq))
377e3fca9e7SPeter Zijlstra 		return;
378e3fca9e7SPeter Zijlstra 
379fd7a4bedSPeter Zijlstra 	queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks);
380fd7a4bedSPeter Zijlstra }
381fd7a4bedSPeter Zijlstra 
38202d8ec94SIngo Molnar static inline void rt_queue_pull_task(struct rq *rq)
383fd7a4bedSPeter Zijlstra {
384fd7a4bedSPeter Zijlstra 	queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task);
385dc877341SPeter Zijlstra }
386dc877341SPeter Zijlstra 
387391e43daSPeter Zijlstra static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
388391e43daSPeter Zijlstra {
389391e43daSPeter Zijlstra 	plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
390391e43daSPeter Zijlstra 	plist_node_init(&p->pushable_tasks, p->prio);
391391e43daSPeter Zijlstra 	plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
392391e43daSPeter Zijlstra 
393391e43daSPeter Zijlstra 	/* Update the highest prio pushable task */
394391e43daSPeter Zijlstra 	if (p->prio < rq->rt.highest_prio.next)
395391e43daSPeter Zijlstra 		rq->rt.highest_prio.next = p->prio;
396391e43daSPeter Zijlstra }
397391e43daSPeter Zijlstra 
398391e43daSPeter Zijlstra static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
399391e43daSPeter Zijlstra {
400391e43daSPeter Zijlstra 	plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
401391e43daSPeter Zijlstra 
402391e43daSPeter Zijlstra 	/* Update the new highest prio pushable task */
403391e43daSPeter Zijlstra 	if (has_pushable_tasks(rq)) {
404391e43daSPeter Zijlstra 		p = plist_first_entry(&rq->rt.pushable_tasks,
405391e43daSPeter Zijlstra 				      struct task_struct, pushable_tasks);
406391e43daSPeter Zijlstra 		rq->rt.highest_prio.next = p->prio;
407934fc331SPeter Zijlstra 	} else {
408934fc331SPeter Zijlstra 		rq->rt.highest_prio.next = MAX_RT_PRIO-1;
409934fc331SPeter Zijlstra 	}
410391e43daSPeter Zijlstra }
411391e43daSPeter Zijlstra 
412391e43daSPeter Zijlstra #else
413391e43daSPeter Zijlstra 
414391e43daSPeter Zijlstra static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
415391e43daSPeter Zijlstra {
416391e43daSPeter Zijlstra }
417391e43daSPeter Zijlstra 
418391e43daSPeter Zijlstra static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
419391e43daSPeter Zijlstra {
420391e43daSPeter Zijlstra }
421391e43daSPeter Zijlstra 
422391e43daSPeter Zijlstra static inline
423391e43daSPeter Zijlstra void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
424391e43daSPeter Zijlstra {
425391e43daSPeter Zijlstra }
426391e43daSPeter Zijlstra 
427391e43daSPeter Zijlstra static inline
428391e43daSPeter Zijlstra void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
429391e43daSPeter Zijlstra {
430391e43daSPeter Zijlstra }
431391e43daSPeter Zijlstra 
432dc877341SPeter Zijlstra static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
433dc877341SPeter Zijlstra {
434dc877341SPeter Zijlstra 	return false;
435dc877341SPeter Zijlstra }
436dc877341SPeter Zijlstra 
4378046d680SPeter Zijlstra static inline void pull_rt_task(struct rq *this_rq)
438dc877341SPeter Zijlstra {
439dc877341SPeter Zijlstra }
440dc877341SPeter Zijlstra 
44102d8ec94SIngo Molnar static inline void rt_queue_push_tasks(struct rq *rq)
442dc877341SPeter Zijlstra {
443dc877341SPeter Zijlstra }
444391e43daSPeter Zijlstra #endif /* CONFIG_SMP */
445391e43daSPeter Zijlstra 
446f4ebcbc0SKirill Tkhai static void enqueue_top_rt_rq(struct rt_rq *rt_rq);
447f4ebcbc0SKirill Tkhai static void dequeue_top_rt_rq(struct rt_rq *rt_rq);
448f4ebcbc0SKirill Tkhai 
449391e43daSPeter Zijlstra static inline int on_rt_rq(struct sched_rt_entity *rt_se)
450391e43daSPeter Zijlstra {
451ff77e468SPeter Zijlstra 	return rt_se->on_rq;
452391e43daSPeter Zijlstra }
453391e43daSPeter Zijlstra 
454804d402fSQais Yousef #ifdef CONFIG_UCLAMP_TASK
455804d402fSQais Yousef /*
456804d402fSQais Yousef  * Verify the fitness of task @p to run on @cpu taking into account the uclamp
457804d402fSQais Yousef  * settings.
458804d402fSQais Yousef  *
459804d402fSQais Yousef  * This check is only important for heterogeneous systems where uclamp_min value
460804d402fSQais Yousef  * is higher than the capacity of a @cpu. For non-heterogeneous system this
461804d402fSQais Yousef  * function will always return true.
462804d402fSQais Yousef  *
463804d402fSQais Yousef  * The function will return true if the capacity of the @cpu is >= the
464804d402fSQais Yousef  * uclamp_min and false otherwise.
465804d402fSQais Yousef  *
466804d402fSQais Yousef  * Note that uclamp_min will be clamped to uclamp_max if uclamp_min
467804d402fSQais Yousef  * > uclamp_max.
468804d402fSQais Yousef  */
469804d402fSQais Yousef static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu)
470804d402fSQais Yousef {
471804d402fSQais Yousef 	unsigned int min_cap;
472804d402fSQais Yousef 	unsigned int max_cap;
473804d402fSQais Yousef 	unsigned int cpu_cap;
474804d402fSQais Yousef 
475804d402fSQais Yousef 	/* Only heterogeneous systems can benefit from this check */
476804d402fSQais Yousef 	if (!static_branch_unlikely(&sched_asym_cpucapacity))
477804d402fSQais Yousef 		return true;
478804d402fSQais Yousef 
479804d402fSQais Yousef 	min_cap = uclamp_eff_value(p, UCLAMP_MIN);
480804d402fSQais Yousef 	max_cap = uclamp_eff_value(p, UCLAMP_MAX);
481804d402fSQais Yousef 
482804d402fSQais Yousef 	cpu_cap = capacity_orig_of(cpu);
483804d402fSQais Yousef 
484804d402fSQais Yousef 	return cpu_cap >= min(min_cap, max_cap);
485804d402fSQais Yousef }
486804d402fSQais Yousef #else
487804d402fSQais Yousef static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu)
488804d402fSQais Yousef {
489804d402fSQais Yousef 	return true;
490804d402fSQais Yousef }
491804d402fSQais Yousef #endif
492804d402fSQais Yousef 
493391e43daSPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED
494391e43daSPeter Zijlstra 
495391e43daSPeter Zijlstra static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
496391e43daSPeter Zijlstra {
497391e43daSPeter Zijlstra 	if (!rt_rq->tg)
498391e43daSPeter Zijlstra 		return RUNTIME_INF;
499391e43daSPeter Zijlstra 
500391e43daSPeter Zijlstra 	return rt_rq->rt_runtime;
501391e43daSPeter Zijlstra }
502391e43daSPeter Zijlstra 
503391e43daSPeter Zijlstra static inline u64 sched_rt_period(struct rt_rq *rt_rq)
504391e43daSPeter Zijlstra {
505391e43daSPeter Zijlstra 	return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
506391e43daSPeter Zijlstra }
507391e43daSPeter Zijlstra 
508391e43daSPeter Zijlstra typedef struct task_group *rt_rq_iter_t;
509391e43daSPeter Zijlstra 
510391e43daSPeter Zijlstra static inline struct task_group *next_task_group(struct task_group *tg)
511391e43daSPeter Zijlstra {
512391e43daSPeter Zijlstra 	do {
513391e43daSPeter Zijlstra 		tg = list_entry_rcu(tg->list.next,
514391e43daSPeter Zijlstra 			typeof(struct task_group), list);
515391e43daSPeter Zijlstra 	} while (&tg->list != &task_groups && task_group_is_autogroup(tg));
516391e43daSPeter Zijlstra 
517391e43daSPeter Zijlstra 	if (&tg->list == &task_groups)
518391e43daSPeter Zijlstra 		tg = NULL;
519391e43daSPeter Zijlstra 
520391e43daSPeter Zijlstra 	return tg;
521391e43daSPeter Zijlstra }
522391e43daSPeter Zijlstra 
523391e43daSPeter Zijlstra #define for_each_rt_rq(rt_rq, iter, rq)					\
524391e43daSPeter Zijlstra 	for (iter = container_of(&task_groups, typeof(*iter), list);	\
525391e43daSPeter Zijlstra 		(iter = next_task_group(iter)) &&			\
526391e43daSPeter Zijlstra 		(rt_rq = iter->rt_rq[cpu_of(rq)]);)
527391e43daSPeter Zijlstra 
528391e43daSPeter Zijlstra #define for_each_sched_rt_entity(rt_se) \
529391e43daSPeter Zijlstra 	for (; rt_se; rt_se = rt_se->parent)
530391e43daSPeter Zijlstra 
531391e43daSPeter Zijlstra static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
532391e43daSPeter Zijlstra {
533391e43daSPeter Zijlstra 	return rt_se->my_q;
534391e43daSPeter Zijlstra }
535391e43daSPeter Zijlstra 
536ff77e468SPeter Zijlstra static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
537ff77e468SPeter Zijlstra static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
538391e43daSPeter Zijlstra 
539391e43daSPeter Zijlstra static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
540391e43daSPeter Zijlstra {
541391e43daSPeter Zijlstra 	struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
5428875125eSKirill Tkhai 	struct rq *rq = rq_of_rt_rq(rt_rq);
543391e43daSPeter Zijlstra 	struct sched_rt_entity *rt_se;
544391e43daSPeter Zijlstra 
5458875125eSKirill Tkhai 	int cpu = cpu_of(rq);
546391e43daSPeter Zijlstra 
547391e43daSPeter Zijlstra 	rt_se = rt_rq->tg->rt_se[cpu];
548391e43daSPeter Zijlstra 
549391e43daSPeter Zijlstra 	if (rt_rq->rt_nr_running) {
550f4ebcbc0SKirill Tkhai 		if (!rt_se)
551f4ebcbc0SKirill Tkhai 			enqueue_top_rt_rq(rt_rq);
552f4ebcbc0SKirill Tkhai 		else if (!on_rt_rq(rt_se))
553ff77e468SPeter Zijlstra 			enqueue_rt_entity(rt_se, 0);
554f4ebcbc0SKirill Tkhai 
555391e43daSPeter Zijlstra 		if (rt_rq->highest_prio.curr < curr->prio)
5568875125eSKirill Tkhai 			resched_curr(rq);
557391e43daSPeter Zijlstra 	}
558391e43daSPeter Zijlstra }
559391e43daSPeter Zijlstra 
560391e43daSPeter Zijlstra static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
561391e43daSPeter Zijlstra {
562391e43daSPeter Zijlstra 	struct sched_rt_entity *rt_se;
563391e43daSPeter Zijlstra 	int cpu = cpu_of(rq_of_rt_rq(rt_rq));
564391e43daSPeter Zijlstra 
565391e43daSPeter Zijlstra 	rt_se = rt_rq->tg->rt_se[cpu];
566391e43daSPeter Zijlstra 
567296b2ffeSVincent Guittot 	if (!rt_se) {
568f4ebcbc0SKirill Tkhai 		dequeue_top_rt_rq(rt_rq);
569296b2ffeSVincent Guittot 		/* Kick cpufreq (see the comment in kernel/sched/sched.h). */
570296b2ffeSVincent Guittot 		cpufreq_update_util(rq_of_rt_rq(rt_rq), 0);
571296b2ffeSVincent Guittot 	}
572f4ebcbc0SKirill Tkhai 	else if (on_rt_rq(rt_se))
573ff77e468SPeter Zijlstra 		dequeue_rt_entity(rt_se, 0);
574391e43daSPeter Zijlstra }
575391e43daSPeter Zijlstra 
57646383648SKirill Tkhai static inline int rt_rq_throttled(struct rt_rq *rt_rq)
57746383648SKirill Tkhai {
57846383648SKirill Tkhai 	return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
57946383648SKirill Tkhai }
58046383648SKirill Tkhai 
581391e43daSPeter Zijlstra static int rt_se_boosted(struct sched_rt_entity *rt_se)
582391e43daSPeter Zijlstra {
583391e43daSPeter Zijlstra 	struct rt_rq *rt_rq = group_rt_rq(rt_se);
584391e43daSPeter Zijlstra 	struct task_struct *p;
585391e43daSPeter Zijlstra 
586391e43daSPeter Zijlstra 	if (rt_rq)
587391e43daSPeter Zijlstra 		return !!rt_rq->rt_nr_boosted;
588391e43daSPeter Zijlstra 
589391e43daSPeter Zijlstra 	p = rt_task_of(rt_se);
590391e43daSPeter Zijlstra 	return p->prio != p->normal_prio;
591391e43daSPeter Zijlstra }
592391e43daSPeter Zijlstra 
593391e43daSPeter Zijlstra #ifdef CONFIG_SMP
594391e43daSPeter Zijlstra static inline const struct cpumask *sched_rt_period_mask(void)
595391e43daSPeter Zijlstra {
596424c93feSNathan Zimmer 	return this_rq()->rd->span;
597391e43daSPeter Zijlstra }
598391e43daSPeter Zijlstra #else
599391e43daSPeter Zijlstra static inline const struct cpumask *sched_rt_period_mask(void)
600391e43daSPeter Zijlstra {
601391e43daSPeter Zijlstra 	return cpu_online_mask;
602391e43daSPeter Zijlstra }
603391e43daSPeter Zijlstra #endif
604391e43daSPeter Zijlstra 
605391e43daSPeter Zijlstra static inline
606391e43daSPeter Zijlstra struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
607391e43daSPeter Zijlstra {
608391e43daSPeter Zijlstra 	return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
609391e43daSPeter Zijlstra }
610391e43daSPeter Zijlstra 
611391e43daSPeter Zijlstra static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
612391e43daSPeter Zijlstra {
613391e43daSPeter Zijlstra 	return &rt_rq->tg->rt_bandwidth;
614391e43daSPeter Zijlstra }
615391e43daSPeter Zijlstra 
616391e43daSPeter Zijlstra #else /* !CONFIG_RT_GROUP_SCHED */
617391e43daSPeter Zijlstra 
618391e43daSPeter Zijlstra static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
619391e43daSPeter Zijlstra {
620391e43daSPeter Zijlstra 	return rt_rq->rt_runtime;
621391e43daSPeter Zijlstra }
622391e43daSPeter Zijlstra 
623391e43daSPeter Zijlstra static inline u64 sched_rt_period(struct rt_rq *rt_rq)
624391e43daSPeter Zijlstra {
625391e43daSPeter Zijlstra 	return ktime_to_ns(def_rt_bandwidth.rt_period);
626391e43daSPeter Zijlstra }
627391e43daSPeter Zijlstra 
628391e43daSPeter Zijlstra typedef struct rt_rq *rt_rq_iter_t;
629391e43daSPeter Zijlstra 
630391e43daSPeter Zijlstra #define for_each_rt_rq(rt_rq, iter, rq) \
631391e43daSPeter Zijlstra 	for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
632391e43daSPeter Zijlstra 
633391e43daSPeter Zijlstra #define for_each_sched_rt_entity(rt_se) \
634391e43daSPeter Zijlstra 	for (; rt_se; rt_se = NULL)
635391e43daSPeter Zijlstra 
636391e43daSPeter Zijlstra static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
637391e43daSPeter Zijlstra {
638391e43daSPeter Zijlstra 	return NULL;
639391e43daSPeter Zijlstra }
640391e43daSPeter Zijlstra 
641391e43daSPeter Zijlstra static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
642391e43daSPeter Zijlstra {
643f4ebcbc0SKirill Tkhai 	struct rq *rq = rq_of_rt_rq(rt_rq);
644f4ebcbc0SKirill Tkhai 
645f4ebcbc0SKirill Tkhai 	if (!rt_rq->rt_nr_running)
646f4ebcbc0SKirill Tkhai 		return;
647f4ebcbc0SKirill Tkhai 
648f4ebcbc0SKirill Tkhai 	enqueue_top_rt_rq(rt_rq);
6498875125eSKirill Tkhai 	resched_curr(rq);
650391e43daSPeter Zijlstra }
651391e43daSPeter Zijlstra 
652391e43daSPeter Zijlstra static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
653391e43daSPeter Zijlstra {
654f4ebcbc0SKirill Tkhai 	dequeue_top_rt_rq(rt_rq);
655391e43daSPeter Zijlstra }
656391e43daSPeter Zijlstra 
65746383648SKirill Tkhai static inline int rt_rq_throttled(struct rt_rq *rt_rq)
65846383648SKirill Tkhai {
65946383648SKirill Tkhai 	return rt_rq->rt_throttled;
66046383648SKirill Tkhai }
66146383648SKirill Tkhai 
662391e43daSPeter Zijlstra static inline const struct cpumask *sched_rt_period_mask(void)
663391e43daSPeter Zijlstra {
664391e43daSPeter Zijlstra 	return cpu_online_mask;
665391e43daSPeter Zijlstra }
666391e43daSPeter Zijlstra 
667391e43daSPeter Zijlstra static inline
668391e43daSPeter Zijlstra struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
669391e43daSPeter Zijlstra {
670391e43daSPeter Zijlstra 	return &cpu_rq(cpu)->rt;
671391e43daSPeter Zijlstra }
672391e43daSPeter Zijlstra 
673391e43daSPeter Zijlstra static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
674391e43daSPeter Zijlstra {
675391e43daSPeter Zijlstra 	return &def_rt_bandwidth;
676391e43daSPeter Zijlstra }
677391e43daSPeter Zijlstra 
678391e43daSPeter Zijlstra #endif /* CONFIG_RT_GROUP_SCHED */
679391e43daSPeter Zijlstra 
680faa59937SJuri Lelli bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
681faa59937SJuri Lelli {
682faa59937SJuri Lelli 	struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
683faa59937SJuri Lelli 
684faa59937SJuri Lelli 	return (hrtimer_active(&rt_b->rt_period_timer) ||
685faa59937SJuri Lelli 		rt_rq->rt_time < rt_b->rt_runtime);
686faa59937SJuri Lelli }
687faa59937SJuri Lelli 
688391e43daSPeter Zijlstra #ifdef CONFIG_SMP
689391e43daSPeter Zijlstra /*
690391e43daSPeter Zijlstra  * We ran out of runtime, see if we can borrow some from our neighbours.
691391e43daSPeter Zijlstra  */
692269b26a5SJuri Lelli static void do_balance_runtime(struct rt_rq *rt_rq)
693391e43daSPeter Zijlstra {
694391e43daSPeter Zijlstra 	struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
695aa7f6730SShawn Bohrer 	struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
696269b26a5SJuri Lelli 	int i, weight;
697391e43daSPeter Zijlstra 	u64 rt_period;
698391e43daSPeter Zijlstra 
699391e43daSPeter Zijlstra 	weight = cpumask_weight(rd->span);
700391e43daSPeter Zijlstra 
701391e43daSPeter Zijlstra 	raw_spin_lock(&rt_b->rt_runtime_lock);
702391e43daSPeter Zijlstra 	rt_period = ktime_to_ns(rt_b->rt_period);
703391e43daSPeter Zijlstra 	for_each_cpu(i, rd->span) {
704391e43daSPeter Zijlstra 		struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
705391e43daSPeter Zijlstra 		s64 diff;
706391e43daSPeter Zijlstra 
707391e43daSPeter Zijlstra 		if (iter == rt_rq)
708391e43daSPeter Zijlstra 			continue;
709391e43daSPeter Zijlstra 
710391e43daSPeter Zijlstra 		raw_spin_lock(&iter->rt_runtime_lock);
711391e43daSPeter Zijlstra 		/*
712391e43daSPeter Zijlstra 		 * Either all rqs have inf runtime and there's nothing to steal
713391e43daSPeter Zijlstra 		 * or __disable_runtime() below sets a specific rq to inf to
7143b03706fSIngo Molnar 		 * indicate its been disabled and disallow stealing.
715391e43daSPeter Zijlstra 		 */
716391e43daSPeter Zijlstra 		if (iter->rt_runtime == RUNTIME_INF)
717391e43daSPeter Zijlstra 			goto next;
718391e43daSPeter Zijlstra 
719391e43daSPeter Zijlstra 		/*
720391e43daSPeter Zijlstra 		 * From runqueues with spare time, take 1/n part of their
721391e43daSPeter Zijlstra 		 * spare time, but no more than our period.
722391e43daSPeter Zijlstra 		 */
723391e43daSPeter Zijlstra 		diff = iter->rt_runtime - iter->rt_time;
724391e43daSPeter Zijlstra 		if (diff > 0) {
725391e43daSPeter Zijlstra 			diff = div_u64((u64)diff, weight);
726391e43daSPeter Zijlstra 			if (rt_rq->rt_runtime + diff > rt_period)
727391e43daSPeter Zijlstra 				diff = rt_period - rt_rq->rt_runtime;
728391e43daSPeter Zijlstra 			iter->rt_runtime -= diff;
729391e43daSPeter Zijlstra 			rt_rq->rt_runtime += diff;
730391e43daSPeter Zijlstra 			if (rt_rq->rt_runtime == rt_period) {
731391e43daSPeter Zijlstra 				raw_spin_unlock(&iter->rt_runtime_lock);
732391e43daSPeter Zijlstra 				break;
733391e43daSPeter Zijlstra 			}
734391e43daSPeter Zijlstra 		}
735391e43daSPeter Zijlstra next:
736391e43daSPeter Zijlstra 		raw_spin_unlock(&iter->rt_runtime_lock);
737391e43daSPeter Zijlstra 	}
738391e43daSPeter Zijlstra 	raw_spin_unlock(&rt_b->rt_runtime_lock);
739391e43daSPeter Zijlstra }
740391e43daSPeter Zijlstra 
741391e43daSPeter Zijlstra /*
742391e43daSPeter Zijlstra  * Ensure this RQ takes back all the runtime it lend to its neighbours.
743391e43daSPeter Zijlstra  */
744391e43daSPeter Zijlstra static void __disable_runtime(struct rq *rq)
745391e43daSPeter Zijlstra {
746391e43daSPeter Zijlstra 	struct root_domain *rd = rq->rd;
747391e43daSPeter Zijlstra 	rt_rq_iter_t iter;
748391e43daSPeter Zijlstra 	struct rt_rq *rt_rq;
749391e43daSPeter Zijlstra 
750391e43daSPeter Zijlstra 	if (unlikely(!scheduler_running))
751391e43daSPeter Zijlstra 		return;
752391e43daSPeter Zijlstra 
753391e43daSPeter Zijlstra 	for_each_rt_rq(rt_rq, iter, rq) {
754391e43daSPeter Zijlstra 		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
755391e43daSPeter Zijlstra 		s64 want;
756391e43daSPeter Zijlstra 		int i;
757391e43daSPeter Zijlstra 
758391e43daSPeter Zijlstra 		raw_spin_lock(&rt_b->rt_runtime_lock);
759391e43daSPeter Zijlstra 		raw_spin_lock(&rt_rq->rt_runtime_lock);
760391e43daSPeter Zijlstra 		/*
761391e43daSPeter Zijlstra 		 * Either we're all inf and nobody needs to borrow, or we're
762391e43daSPeter Zijlstra 		 * already disabled and thus have nothing to do, or we have
763391e43daSPeter Zijlstra 		 * exactly the right amount of runtime to take out.
764391e43daSPeter Zijlstra 		 */
765391e43daSPeter Zijlstra 		if (rt_rq->rt_runtime == RUNTIME_INF ||
766391e43daSPeter Zijlstra 				rt_rq->rt_runtime == rt_b->rt_runtime)
767391e43daSPeter Zijlstra 			goto balanced;
768391e43daSPeter Zijlstra 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
769391e43daSPeter Zijlstra 
770391e43daSPeter Zijlstra 		/*
771391e43daSPeter Zijlstra 		 * Calculate the difference between what we started out with
772391e43daSPeter Zijlstra 		 * and what we current have, that's the amount of runtime
773391e43daSPeter Zijlstra 		 * we lend and now have to reclaim.
774391e43daSPeter Zijlstra 		 */
775391e43daSPeter Zijlstra 		want = rt_b->rt_runtime - rt_rq->rt_runtime;
776391e43daSPeter Zijlstra 
777391e43daSPeter Zijlstra 		/*
778391e43daSPeter Zijlstra 		 * Greedy reclaim, take back as much as we can.
779391e43daSPeter Zijlstra 		 */
780391e43daSPeter Zijlstra 		for_each_cpu(i, rd->span) {
781391e43daSPeter Zijlstra 			struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
782391e43daSPeter Zijlstra 			s64 diff;
783391e43daSPeter Zijlstra 
784391e43daSPeter Zijlstra 			/*
785391e43daSPeter Zijlstra 			 * Can't reclaim from ourselves or disabled runqueues.
786391e43daSPeter Zijlstra 			 */
787391e43daSPeter Zijlstra 			if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
788391e43daSPeter Zijlstra 				continue;
789391e43daSPeter Zijlstra 
790391e43daSPeter Zijlstra 			raw_spin_lock(&iter->rt_runtime_lock);
791391e43daSPeter Zijlstra 			if (want > 0) {
792391e43daSPeter Zijlstra 				diff = min_t(s64, iter->rt_runtime, want);
793391e43daSPeter Zijlstra 				iter->rt_runtime -= diff;
794391e43daSPeter Zijlstra 				want -= diff;
795391e43daSPeter Zijlstra 			} else {
796391e43daSPeter Zijlstra 				iter->rt_runtime -= want;
797391e43daSPeter Zijlstra 				want -= want;
798391e43daSPeter Zijlstra 			}
799391e43daSPeter Zijlstra 			raw_spin_unlock(&iter->rt_runtime_lock);
800391e43daSPeter Zijlstra 
801391e43daSPeter Zijlstra 			if (!want)
802391e43daSPeter Zijlstra 				break;
803391e43daSPeter Zijlstra 		}
804391e43daSPeter Zijlstra 
805391e43daSPeter Zijlstra 		raw_spin_lock(&rt_rq->rt_runtime_lock);
806391e43daSPeter Zijlstra 		/*
807391e43daSPeter Zijlstra 		 * We cannot be left wanting - that would mean some runtime
808391e43daSPeter Zijlstra 		 * leaked out of the system.
809391e43daSPeter Zijlstra 		 */
810391e43daSPeter Zijlstra 		BUG_ON(want);
811391e43daSPeter Zijlstra balanced:
812391e43daSPeter Zijlstra 		/*
813391e43daSPeter Zijlstra 		 * Disable all the borrow logic by pretending we have inf
814391e43daSPeter Zijlstra 		 * runtime - in which case borrowing doesn't make sense.
815391e43daSPeter Zijlstra 		 */
816391e43daSPeter Zijlstra 		rt_rq->rt_runtime = RUNTIME_INF;
817a4c96ae3SPeter Boonstoppel 		rt_rq->rt_throttled = 0;
818391e43daSPeter Zijlstra 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
819391e43daSPeter Zijlstra 		raw_spin_unlock(&rt_b->rt_runtime_lock);
82099b62567SKirill Tkhai 
82199b62567SKirill Tkhai 		/* Make rt_rq available for pick_next_task() */
82299b62567SKirill Tkhai 		sched_rt_rq_enqueue(rt_rq);
823391e43daSPeter Zijlstra 	}
824391e43daSPeter Zijlstra }
825391e43daSPeter Zijlstra 
826391e43daSPeter Zijlstra static void __enable_runtime(struct rq *rq)
827391e43daSPeter Zijlstra {
828391e43daSPeter Zijlstra 	rt_rq_iter_t iter;
829391e43daSPeter Zijlstra 	struct rt_rq *rt_rq;
830391e43daSPeter Zijlstra 
831391e43daSPeter Zijlstra 	if (unlikely(!scheduler_running))
832391e43daSPeter Zijlstra 		return;
833391e43daSPeter Zijlstra 
834391e43daSPeter Zijlstra 	/*
835391e43daSPeter Zijlstra 	 * Reset each runqueue's bandwidth settings
836391e43daSPeter Zijlstra 	 */
837391e43daSPeter Zijlstra 	for_each_rt_rq(rt_rq, iter, rq) {
838391e43daSPeter Zijlstra 		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
839391e43daSPeter Zijlstra 
840391e43daSPeter Zijlstra 		raw_spin_lock(&rt_b->rt_runtime_lock);
841391e43daSPeter Zijlstra 		raw_spin_lock(&rt_rq->rt_runtime_lock);
842391e43daSPeter Zijlstra 		rt_rq->rt_runtime = rt_b->rt_runtime;
843391e43daSPeter Zijlstra 		rt_rq->rt_time = 0;
844391e43daSPeter Zijlstra 		rt_rq->rt_throttled = 0;
845391e43daSPeter Zijlstra 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
846391e43daSPeter Zijlstra 		raw_spin_unlock(&rt_b->rt_runtime_lock);
847391e43daSPeter Zijlstra 	}
848391e43daSPeter Zijlstra }
849391e43daSPeter Zijlstra 
850269b26a5SJuri Lelli static void balance_runtime(struct rt_rq *rt_rq)
851391e43daSPeter Zijlstra {
852391e43daSPeter Zijlstra 	if (!sched_feat(RT_RUNTIME_SHARE))
853269b26a5SJuri Lelli 		return;
854391e43daSPeter Zijlstra 
855391e43daSPeter Zijlstra 	if (rt_rq->rt_time > rt_rq->rt_runtime) {
856391e43daSPeter Zijlstra 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
857269b26a5SJuri Lelli 		do_balance_runtime(rt_rq);
858391e43daSPeter Zijlstra 		raw_spin_lock(&rt_rq->rt_runtime_lock);
859391e43daSPeter Zijlstra 	}
860391e43daSPeter Zijlstra }
861391e43daSPeter Zijlstra #else /* !CONFIG_SMP */
862269b26a5SJuri Lelli static inline void balance_runtime(struct rt_rq *rt_rq) {}
863391e43daSPeter Zijlstra #endif /* CONFIG_SMP */
864391e43daSPeter Zijlstra 
865391e43daSPeter Zijlstra static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
866391e43daSPeter Zijlstra {
86742c62a58SPeter Zijlstra 	int i, idle = 1, throttled = 0;
868391e43daSPeter Zijlstra 	const struct cpumask *span;
869391e43daSPeter Zijlstra 
870391e43daSPeter Zijlstra 	span = sched_rt_period_mask();
871e221d028SMike Galbraith #ifdef CONFIG_RT_GROUP_SCHED
872e221d028SMike Galbraith 	/*
873e221d028SMike Galbraith 	 * FIXME: isolated CPUs should really leave the root task group,
874e221d028SMike Galbraith 	 * whether they are isolcpus or were isolated via cpusets, lest
875e221d028SMike Galbraith 	 * the timer run on a CPU which does not service all runqueues,
876e221d028SMike Galbraith 	 * potentially leaving other CPUs indefinitely throttled.  If
877e221d028SMike Galbraith 	 * isolation is really required, the user will turn the throttle
878e221d028SMike Galbraith 	 * off to kill the perturbations it causes anyway.  Meanwhile,
879e221d028SMike Galbraith 	 * this maintains functionality for boot and/or troubleshooting.
880e221d028SMike Galbraith 	 */
881e221d028SMike Galbraith 	if (rt_b == &root_task_group.rt_bandwidth)
882e221d028SMike Galbraith 		span = cpu_online_mask;
883e221d028SMike Galbraith #endif
884391e43daSPeter Zijlstra 	for_each_cpu(i, span) {
885391e43daSPeter Zijlstra 		int enqueue = 0;
886391e43daSPeter Zijlstra 		struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
887391e43daSPeter Zijlstra 		struct rq *rq = rq_of_rt_rq(rt_rq);
888c249f255SDave Kleikamp 		int skip;
889c249f255SDave Kleikamp 
890c249f255SDave Kleikamp 		/*
891c249f255SDave Kleikamp 		 * When span == cpu_online_mask, taking each rq->lock
892c249f255SDave Kleikamp 		 * can be time-consuming. Try to avoid it when possible.
893c249f255SDave Kleikamp 		 */
894c249f255SDave Kleikamp 		raw_spin_lock(&rt_rq->rt_runtime_lock);
895f3d133eeSHailong Liu 		if (!sched_feat(RT_RUNTIME_SHARE) && rt_rq->rt_runtime != RUNTIME_INF)
896f3d133eeSHailong Liu 			rt_rq->rt_runtime = rt_b->rt_runtime;
897c249f255SDave Kleikamp 		skip = !rt_rq->rt_time && !rt_rq->rt_nr_running;
898c249f255SDave Kleikamp 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
899c249f255SDave Kleikamp 		if (skip)
900c249f255SDave Kleikamp 			continue;
901391e43daSPeter Zijlstra 
9025cb9eaa3SPeter Zijlstra 		raw_spin_rq_lock(rq);
903d29a2064SDavidlohr Bueso 		update_rq_clock(rq);
904d29a2064SDavidlohr Bueso 
905391e43daSPeter Zijlstra 		if (rt_rq->rt_time) {
906391e43daSPeter Zijlstra 			u64 runtime;
907391e43daSPeter Zijlstra 
908391e43daSPeter Zijlstra 			raw_spin_lock(&rt_rq->rt_runtime_lock);
909391e43daSPeter Zijlstra 			if (rt_rq->rt_throttled)
910391e43daSPeter Zijlstra 				balance_runtime(rt_rq);
911391e43daSPeter Zijlstra 			runtime = rt_rq->rt_runtime;
912391e43daSPeter Zijlstra 			rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
913391e43daSPeter Zijlstra 			if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
914391e43daSPeter Zijlstra 				rt_rq->rt_throttled = 0;
915391e43daSPeter Zijlstra 				enqueue = 1;
916391e43daSPeter Zijlstra 
917391e43daSPeter Zijlstra 				/*
9189edfbfedSPeter Zijlstra 				 * When we're idle and a woken (rt) task is
9199edfbfedSPeter Zijlstra 				 * throttled check_preempt_curr() will set
9209edfbfedSPeter Zijlstra 				 * skip_update and the time between the wakeup
9219edfbfedSPeter Zijlstra 				 * and this unthrottle will get accounted as
9229edfbfedSPeter Zijlstra 				 * 'runtime'.
923391e43daSPeter Zijlstra 				 */
924391e43daSPeter Zijlstra 				if (rt_rq->rt_nr_running && rq->curr == rq->idle)
925adcc8da8SDavidlohr Bueso 					rq_clock_cancel_skipupdate(rq);
926391e43daSPeter Zijlstra 			}
927391e43daSPeter Zijlstra 			if (rt_rq->rt_time || rt_rq->rt_nr_running)
928391e43daSPeter Zijlstra 				idle = 0;
929391e43daSPeter Zijlstra 			raw_spin_unlock(&rt_rq->rt_runtime_lock);
930391e43daSPeter Zijlstra 		} else if (rt_rq->rt_nr_running) {
931391e43daSPeter Zijlstra 			idle = 0;
932391e43daSPeter Zijlstra 			if (!rt_rq_throttled(rt_rq))
933391e43daSPeter Zijlstra 				enqueue = 1;
934391e43daSPeter Zijlstra 		}
93542c62a58SPeter Zijlstra 		if (rt_rq->rt_throttled)
93642c62a58SPeter Zijlstra 			throttled = 1;
937391e43daSPeter Zijlstra 
938391e43daSPeter Zijlstra 		if (enqueue)
939391e43daSPeter Zijlstra 			sched_rt_rq_enqueue(rt_rq);
9405cb9eaa3SPeter Zijlstra 		raw_spin_rq_unlock(rq);
941391e43daSPeter Zijlstra 	}
942391e43daSPeter Zijlstra 
94342c62a58SPeter Zijlstra 	if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
94442c62a58SPeter Zijlstra 		return 1;
94542c62a58SPeter Zijlstra 
946391e43daSPeter Zijlstra 	return idle;
947391e43daSPeter Zijlstra }
948391e43daSPeter Zijlstra 
949391e43daSPeter Zijlstra static inline int rt_se_prio(struct sched_rt_entity *rt_se)
950391e43daSPeter Zijlstra {
951391e43daSPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED
952391e43daSPeter Zijlstra 	struct rt_rq *rt_rq = group_rt_rq(rt_se);
953391e43daSPeter Zijlstra 
954391e43daSPeter Zijlstra 	if (rt_rq)
955391e43daSPeter Zijlstra 		return rt_rq->highest_prio.curr;
956391e43daSPeter Zijlstra #endif
957391e43daSPeter Zijlstra 
958391e43daSPeter Zijlstra 	return rt_task_of(rt_se)->prio;
959391e43daSPeter Zijlstra }
960391e43daSPeter Zijlstra 
961391e43daSPeter Zijlstra static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
962391e43daSPeter Zijlstra {
963391e43daSPeter Zijlstra 	u64 runtime = sched_rt_runtime(rt_rq);
964391e43daSPeter Zijlstra 
965391e43daSPeter Zijlstra 	if (rt_rq->rt_throttled)
966391e43daSPeter Zijlstra 		return rt_rq_throttled(rt_rq);
967391e43daSPeter Zijlstra 
9685b680fd6SShan Hai 	if (runtime >= sched_rt_period(rt_rq))
969391e43daSPeter Zijlstra 		return 0;
970391e43daSPeter Zijlstra 
971391e43daSPeter Zijlstra 	balance_runtime(rt_rq);
972391e43daSPeter Zijlstra 	runtime = sched_rt_runtime(rt_rq);
973391e43daSPeter Zijlstra 	if (runtime == RUNTIME_INF)
974391e43daSPeter Zijlstra 		return 0;
975391e43daSPeter Zijlstra 
976391e43daSPeter Zijlstra 	if (rt_rq->rt_time > runtime) {
9777abc63b1SPeter Zijlstra 		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
9787abc63b1SPeter Zijlstra 
9797abc63b1SPeter Zijlstra 		/*
9807abc63b1SPeter Zijlstra 		 * Don't actually throttle groups that have no runtime assigned
9817abc63b1SPeter Zijlstra 		 * but accrue some time due to boosting.
9827abc63b1SPeter Zijlstra 		 */
9837abc63b1SPeter Zijlstra 		if (likely(rt_b->rt_runtime)) {
984391e43daSPeter Zijlstra 			rt_rq->rt_throttled = 1;
985c224815dSJohn Stultz 			printk_deferred_once("sched: RT throttling activated\n");
9867abc63b1SPeter Zijlstra 		} else {
9877abc63b1SPeter Zijlstra 			/*
9887abc63b1SPeter Zijlstra 			 * In case we did anyway, make it go away,
9897abc63b1SPeter Zijlstra 			 * replenishment is a joke, since it will replenish us
9907abc63b1SPeter Zijlstra 			 * with exactly 0 ns.
9917abc63b1SPeter Zijlstra 			 */
9927abc63b1SPeter Zijlstra 			rt_rq->rt_time = 0;
9937abc63b1SPeter Zijlstra 		}
9947abc63b1SPeter Zijlstra 
995391e43daSPeter Zijlstra 		if (rt_rq_throttled(rt_rq)) {
996391e43daSPeter Zijlstra 			sched_rt_rq_dequeue(rt_rq);
997391e43daSPeter Zijlstra 			return 1;
998391e43daSPeter Zijlstra 		}
999391e43daSPeter Zijlstra 	}
1000391e43daSPeter Zijlstra 
1001391e43daSPeter Zijlstra 	return 0;
1002391e43daSPeter Zijlstra }
1003391e43daSPeter Zijlstra 
1004391e43daSPeter Zijlstra /*
1005391e43daSPeter Zijlstra  * Update the current task's runtime statistics. Skip current tasks that
1006391e43daSPeter Zijlstra  * are not in our scheduling class.
1007391e43daSPeter Zijlstra  */
1008391e43daSPeter Zijlstra static void update_curr_rt(struct rq *rq)
1009391e43daSPeter Zijlstra {
1010391e43daSPeter Zijlstra 	struct task_struct *curr = rq->curr;
1011391e43daSPeter Zijlstra 	struct sched_rt_entity *rt_se = &curr->rt;
1012391e43daSPeter Zijlstra 	u64 delta_exec;
1013a7711602SWen Yang 	u64 now;
1014391e43daSPeter Zijlstra 
1015391e43daSPeter Zijlstra 	if (curr->sched_class != &rt_sched_class)
1016391e43daSPeter Zijlstra 		return;
1017391e43daSPeter Zijlstra 
1018a7711602SWen Yang 	now = rq_clock_task(rq);
1019e7ad2031SWen Yang 	delta_exec = now - curr->se.exec_start;
1020fc79e240SKirill Tkhai 	if (unlikely((s64)delta_exec <= 0))
1021fc79e240SKirill Tkhai 		return;
1022391e43daSPeter Zijlstra 
1023ceeadb83SYafang Shao 	schedstat_set(curr->stats.exec_max,
1024ceeadb83SYafang Shao 		      max(curr->stats.exec_max, delta_exec));
1025391e43daSPeter Zijlstra 
1026ed7b564cSYafang Shao 	trace_sched_stat_runtime(curr, delta_exec, 0);
1027ed7b564cSYafang Shao 
1028391e43daSPeter Zijlstra 	curr->se.sum_exec_runtime += delta_exec;
1029391e43daSPeter Zijlstra 	account_group_exec_runtime(curr, delta_exec);
1030391e43daSPeter Zijlstra 
1031e7ad2031SWen Yang 	curr->se.exec_start = now;
1032d2cc5ed6STejun Heo 	cgroup_account_cputime(curr, delta_exec);
1033391e43daSPeter Zijlstra 
1034391e43daSPeter Zijlstra 	if (!rt_bandwidth_enabled())
1035391e43daSPeter Zijlstra 		return;
1036391e43daSPeter Zijlstra 
1037391e43daSPeter Zijlstra 	for_each_sched_rt_entity(rt_se) {
10380b07939cSGiedrius Rekasius 		struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
10399b58e976SLi Hua 		int exceeded;
1040391e43daSPeter Zijlstra 
1041391e43daSPeter Zijlstra 		if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
1042391e43daSPeter Zijlstra 			raw_spin_lock(&rt_rq->rt_runtime_lock);
1043391e43daSPeter Zijlstra 			rt_rq->rt_time += delta_exec;
10449b58e976SLi Hua 			exceeded = sched_rt_runtime_exceeded(rt_rq);
10459b58e976SLi Hua 			if (exceeded)
10468875125eSKirill Tkhai 				resched_curr(rq);
1047391e43daSPeter Zijlstra 			raw_spin_unlock(&rt_rq->rt_runtime_lock);
10489b58e976SLi Hua 			if (exceeded)
10499b58e976SLi Hua 				do_start_rt_bandwidth(sched_rt_bandwidth(rt_rq));
1050391e43daSPeter Zijlstra 		}
1051391e43daSPeter Zijlstra 	}
1052391e43daSPeter Zijlstra }
1053391e43daSPeter Zijlstra 
1054f4ebcbc0SKirill Tkhai static void
1055f4ebcbc0SKirill Tkhai dequeue_top_rt_rq(struct rt_rq *rt_rq)
1056f4ebcbc0SKirill Tkhai {
1057f4ebcbc0SKirill Tkhai 	struct rq *rq = rq_of_rt_rq(rt_rq);
1058f4ebcbc0SKirill Tkhai 
1059f4ebcbc0SKirill Tkhai 	BUG_ON(&rq->rt != rt_rq);
1060f4ebcbc0SKirill Tkhai 
1061f4ebcbc0SKirill Tkhai 	if (!rt_rq->rt_queued)
1062f4ebcbc0SKirill Tkhai 		return;
1063f4ebcbc0SKirill Tkhai 
1064f4ebcbc0SKirill Tkhai 	BUG_ON(!rq->nr_running);
1065f4ebcbc0SKirill Tkhai 
106672465447SKirill Tkhai 	sub_nr_running(rq, rt_rq->rt_nr_running);
1067f4ebcbc0SKirill Tkhai 	rt_rq->rt_queued = 0;
10688f111bc3SPeter Zijlstra 
1069f4ebcbc0SKirill Tkhai }
1070f4ebcbc0SKirill Tkhai 
1071f4ebcbc0SKirill Tkhai static void
1072f4ebcbc0SKirill Tkhai enqueue_top_rt_rq(struct rt_rq *rt_rq)
1073f4ebcbc0SKirill Tkhai {
1074f4ebcbc0SKirill Tkhai 	struct rq *rq = rq_of_rt_rq(rt_rq);
1075f4ebcbc0SKirill Tkhai 
1076f4ebcbc0SKirill Tkhai 	BUG_ON(&rq->rt != rt_rq);
1077f4ebcbc0SKirill Tkhai 
1078f4ebcbc0SKirill Tkhai 	if (rt_rq->rt_queued)
1079f4ebcbc0SKirill Tkhai 		return;
1080296b2ffeSVincent Guittot 
1081296b2ffeSVincent Guittot 	if (rt_rq_throttled(rt_rq))
1082f4ebcbc0SKirill Tkhai 		return;
1083f4ebcbc0SKirill Tkhai 
1084296b2ffeSVincent Guittot 	if (rt_rq->rt_nr_running) {
108572465447SKirill Tkhai 		add_nr_running(rq, rt_rq->rt_nr_running);
1086f4ebcbc0SKirill Tkhai 		rt_rq->rt_queued = 1;
1087296b2ffeSVincent Guittot 	}
10888f111bc3SPeter Zijlstra 
10898f111bc3SPeter Zijlstra 	/* Kick cpufreq (see the comment in kernel/sched/sched.h). */
10908f111bc3SPeter Zijlstra 	cpufreq_update_util(rq, 0);
1091f4ebcbc0SKirill Tkhai }
1092f4ebcbc0SKirill Tkhai 
1093391e43daSPeter Zijlstra #if defined CONFIG_SMP
1094391e43daSPeter Zijlstra 
1095391e43daSPeter Zijlstra static void
1096391e43daSPeter Zijlstra inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1097391e43daSPeter Zijlstra {
1098391e43daSPeter Zijlstra 	struct rq *rq = rq_of_rt_rq(rt_rq);
1099391e43daSPeter Zijlstra 
1100757dfcaaSKirill Tkhai #ifdef CONFIG_RT_GROUP_SCHED
1101757dfcaaSKirill Tkhai 	/*
1102757dfcaaSKirill Tkhai 	 * Change rq's cpupri only if rt_rq is the top queue.
1103757dfcaaSKirill Tkhai 	 */
1104757dfcaaSKirill Tkhai 	if (&rq->rt != rt_rq)
1105757dfcaaSKirill Tkhai 		return;
1106757dfcaaSKirill Tkhai #endif
1107391e43daSPeter Zijlstra 	if (rq->online && prio < prev_prio)
1108391e43daSPeter Zijlstra 		cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
1109391e43daSPeter Zijlstra }
1110391e43daSPeter Zijlstra 
1111391e43daSPeter Zijlstra static void
1112391e43daSPeter Zijlstra dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1113391e43daSPeter Zijlstra {
1114391e43daSPeter Zijlstra 	struct rq *rq = rq_of_rt_rq(rt_rq);
1115391e43daSPeter Zijlstra 
1116757dfcaaSKirill Tkhai #ifdef CONFIG_RT_GROUP_SCHED
1117757dfcaaSKirill Tkhai 	/*
1118757dfcaaSKirill Tkhai 	 * Change rq's cpupri only if rt_rq is the top queue.
1119757dfcaaSKirill Tkhai 	 */
1120757dfcaaSKirill Tkhai 	if (&rq->rt != rt_rq)
1121757dfcaaSKirill Tkhai 		return;
1122757dfcaaSKirill Tkhai #endif
1123391e43daSPeter Zijlstra 	if (rq->online && rt_rq->highest_prio.curr != prev_prio)
1124391e43daSPeter Zijlstra 		cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
1125391e43daSPeter Zijlstra }
1126391e43daSPeter Zijlstra 
1127391e43daSPeter Zijlstra #else /* CONFIG_SMP */
1128391e43daSPeter Zijlstra 
1129391e43daSPeter Zijlstra static inline
1130391e43daSPeter Zijlstra void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1131391e43daSPeter Zijlstra static inline
1132391e43daSPeter Zijlstra void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1133391e43daSPeter Zijlstra 
1134391e43daSPeter Zijlstra #endif /* CONFIG_SMP */
1135391e43daSPeter Zijlstra 
1136391e43daSPeter Zijlstra #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
1137391e43daSPeter Zijlstra static void
1138391e43daSPeter Zijlstra inc_rt_prio(struct rt_rq *rt_rq, int prio)
1139391e43daSPeter Zijlstra {
1140391e43daSPeter Zijlstra 	int prev_prio = rt_rq->highest_prio.curr;
1141391e43daSPeter Zijlstra 
1142391e43daSPeter Zijlstra 	if (prio < prev_prio)
1143391e43daSPeter Zijlstra 		rt_rq->highest_prio.curr = prio;
1144391e43daSPeter Zijlstra 
1145391e43daSPeter Zijlstra 	inc_rt_prio_smp(rt_rq, prio, prev_prio);
1146391e43daSPeter Zijlstra }
1147391e43daSPeter Zijlstra 
1148391e43daSPeter Zijlstra static void
1149391e43daSPeter Zijlstra dec_rt_prio(struct rt_rq *rt_rq, int prio)
1150391e43daSPeter Zijlstra {
1151391e43daSPeter Zijlstra 	int prev_prio = rt_rq->highest_prio.curr;
1152391e43daSPeter Zijlstra 
1153391e43daSPeter Zijlstra 	if (rt_rq->rt_nr_running) {
1154391e43daSPeter Zijlstra 
1155391e43daSPeter Zijlstra 		WARN_ON(prio < prev_prio);
1156391e43daSPeter Zijlstra 
1157391e43daSPeter Zijlstra 		/*
1158391e43daSPeter Zijlstra 		 * This may have been our highest task, and therefore
1159391e43daSPeter Zijlstra 		 * we may have some recomputation to do
1160391e43daSPeter Zijlstra 		 */
1161391e43daSPeter Zijlstra 		if (prio == prev_prio) {
1162391e43daSPeter Zijlstra 			struct rt_prio_array *array = &rt_rq->active;
1163391e43daSPeter Zijlstra 
1164391e43daSPeter Zijlstra 			rt_rq->highest_prio.curr =
1165391e43daSPeter Zijlstra 				sched_find_first_bit(array->bitmap);
1166391e43daSPeter Zijlstra 		}
1167391e43daSPeter Zijlstra 
1168934fc331SPeter Zijlstra 	} else {
1169934fc331SPeter Zijlstra 		rt_rq->highest_prio.curr = MAX_RT_PRIO-1;
1170934fc331SPeter Zijlstra 	}
1171391e43daSPeter Zijlstra 
1172391e43daSPeter Zijlstra 	dec_rt_prio_smp(rt_rq, prio, prev_prio);
1173391e43daSPeter Zijlstra }
1174391e43daSPeter Zijlstra 
1175391e43daSPeter Zijlstra #else
1176391e43daSPeter Zijlstra 
1177391e43daSPeter Zijlstra static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
1178391e43daSPeter Zijlstra static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
1179391e43daSPeter Zijlstra 
1180391e43daSPeter Zijlstra #endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
1181391e43daSPeter Zijlstra 
1182391e43daSPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED
1183391e43daSPeter Zijlstra 
1184391e43daSPeter Zijlstra static void
1185391e43daSPeter Zijlstra inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1186391e43daSPeter Zijlstra {
1187391e43daSPeter Zijlstra 	if (rt_se_boosted(rt_se))
1188391e43daSPeter Zijlstra 		rt_rq->rt_nr_boosted++;
1189391e43daSPeter Zijlstra 
1190391e43daSPeter Zijlstra 	if (rt_rq->tg)
1191391e43daSPeter Zijlstra 		start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
1192391e43daSPeter Zijlstra }
1193391e43daSPeter Zijlstra 
1194391e43daSPeter Zijlstra static void
1195391e43daSPeter Zijlstra dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1196391e43daSPeter Zijlstra {
1197391e43daSPeter Zijlstra 	if (rt_se_boosted(rt_se))
1198391e43daSPeter Zijlstra 		rt_rq->rt_nr_boosted--;
1199391e43daSPeter Zijlstra 
1200391e43daSPeter Zijlstra 	WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
1201391e43daSPeter Zijlstra }
1202391e43daSPeter Zijlstra 
1203391e43daSPeter Zijlstra #else /* CONFIG_RT_GROUP_SCHED */
1204391e43daSPeter Zijlstra 
1205391e43daSPeter Zijlstra static void
1206391e43daSPeter Zijlstra inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1207391e43daSPeter Zijlstra {
1208391e43daSPeter Zijlstra 	start_rt_bandwidth(&def_rt_bandwidth);
1209391e43daSPeter Zijlstra }
1210391e43daSPeter Zijlstra 
1211391e43daSPeter Zijlstra static inline
1212391e43daSPeter Zijlstra void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
1213391e43daSPeter Zijlstra 
1214391e43daSPeter Zijlstra #endif /* CONFIG_RT_GROUP_SCHED */
1215391e43daSPeter Zijlstra 
1216391e43daSPeter Zijlstra static inline
121722abdef3SKirill Tkhai unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se)
121822abdef3SKirill Tkhai {
121922abdef3SKirill Tkhai 	struct rt_rq *group_rq = group_rt_rq(rt_se);
122022abdef3SKirill Tkhai 
122122abdef3SKirill Tkhai 	if (group_rq)
122222abdef3SKirill Tkhai 		return group_rq->rt_nr_running;
122322abdef3SKirill Tkhai 	else
122422abdef3SKirill Tkhai 		return 1;
122522abdef3SKirill Tkhai }
122622abdef3SKirill Tkhai 
122722abdef3SKirill Tkhai static inline
122801d36d0aSFrederic Weisbecker unsigned int rt_se_rr_nr_running(struct sched_rt_entity *rt_se)
122901d36d0aSFrederic Weisbecker {
123001d36d0aSFrederic Weisbecker 	struct rt_rq *group_rq = group_rt_rq(rt_se);
123101d36d0aSFrederic Weisbecker 	struct task_struct *tsk;
123201d36d0aSFrederic Weisbecker 
123301d36d0aSFrederic Weisbecker 	if (group_rq)
123401d36d0aSFrederic Weisbecker 		return group_rq->rr_nr_running;
123501d36d0aSFrederic Weisbecker 
123601d36d0aSFrederic Weisbecker 	tsk = rt_task_of(rt_se);
123701d36d0aSFrederic Weisbecker 
123801d36d0aSFrederic Weisbecker 	return (tsk->policy == SCHED_RR) ? 1 : 0;
123901d36d0aSFrederic Weisbecker }
124001d36d0aSFrederic Weisbecker 
124101d36d0aSFrederic Weisbecker static inline
1242391e43daSPeter Zijlstra void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1243391e43daSPeter Zijlstra {
1244391e43daSPeter Zijlstra 	int prio = rt_se_prio(rt_se);
1245391e43daSPeter Zijlstra 
1246391e43daSPeter Zijlstra 	WARN_ON(!rt_prio(prio));
124722abdef3SKirill Tkhai 	rt_rq->rt_nr_running += rt_se_nr_running(rt_se);
124801d36d0aSFrederic Weisbecker 	rt_rq->rr_nr_running += rt_se_rr_nr_running(rt_se);
1249391e43daSPeter Zijlstra 
1250391e43daSPeter Zijlstra 	inc_rt_prio(rt_rq, prio);
1251391e43daSPeter Zijlstra 	inc_rt_migration(rt_se, rt_rq);
1252391e43daSPeter Zijlstra 	inc_rt_group(rt_se, rt_rq);
1253391e43daSPeter Zijlstra }
1254391e43daSPeter Zijlstra 
1255391e43daSPeter Zijlstra static inline
1256391e43daSPeter Zijlstra void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1257391e43daSPeter Zijlstra {
1258391e43daSPeter Zijlstra 	WARN_ON(!rt_prio(rt_se_prio(rt_se)));
1259391e43daSPeter Zijlstra 	WARN_ON(!rt_rq->rt_nr_running);
126022abdef3SKirill Tkhai 	rt_rq->rt_nr_running -= rt_se_nr_running(rt_se);
126101d36d0aSFrederic Weisbecker 	rt_rq->rr_nr_running -= rt_se_rr_nr_running(rt_se);
1262391e43daSPeter Zijlstra 
1263391e43daSPeter Zijlstra 	dec_rt_prio(rt_rq, rt_se_prio(rt_se));
1264391e43daSPeter Zijlstra 	dec_rt_migration(rt_se, rt_rq);
1265391e43daSPeter Zijlstra 	dec_rt_group(rt_se, rt_rq);
1266391e43daSPeter Zijlstra }
1267391e43daSPeter Zijlstra 
1268ff77e468SPeter Zijlstra /*
1269ff77e468SPeter Zijlstra  * Change rt_se->run_list location unless SAVE && !MOVE
1270ff77e468SPeter Zijlstra  *
1271ff77e468SPeter Zijlstra  * assumes ENQUEUE/DEQUEUE flags match
1272ff77e468SPeter Zijlstra  */
1273ff77e468SPeter Zijlstra static inline bool move_entity(unsigned int flags)
1274ff77e468SPeter Zijlstra {
1275ff77e468SPeter Zijlstra 	if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE)
1276ff77e468SPeter Zijlstra 		return false;
1277ff77e468SPeter Zijlstra 
1278ff77e468SPeter Zijlstra 	return true;
1279ff77e468SPeter Zijlstra }
1280ff77e468SPeter Zijlstra 
1281ff77e468SPeter Zijlstra static void __delist_rt_entity(struct sched_rt_entity *rt_se, struct rt_prio_array *array)
1282ff77e468SPeter Zijlstra {
1283ff77e468SPeter Zijlstra 	list_del_init(&rt_se->run_list);
1284ff77e468SPeter Zijlstra 
1285ff77e468SPeter Zijlstra 	if (list_empty(array->queue + rt_se_prio(rt_se)))
1286ff77e468SPeter Zijlstra 		__clear_bit(rt_se_prio(rt_se), array->bitmap);
1287ff77e468SPeter Zijlstra 
1288ff77e468SPeter Zijlstra 	rt_se->on_list = 0;
1289ff77e468SPeter Zijlstra }
1290ff77e468SPeter Zijlstra 
129157a5c2daSYafang Shao static inline struct sched_statistics *
129257a5c2daSYafang Shao __schedstats_from_rt_se(struct sched_rt_entity *rt_se)
129357a5c2daSYafang Shao {
129457a5c2daSYafang Shao #ifdef CONFIG_RT_GROUP_SCHED
129557a5c2daSYafang Shao 	/* schedstats is not supported for rt group. */
129657a5c2daSYafang Shao 	if (!rt_entity_is_task(rt_se))
129757a5c2daSYafang Shao 		return NULL;
129857a5c2daSYafang Shao #endif
129957a5c2daSYafang Shao 
130057a5c2daSYafang Shao 	return &rt_task_of(rt_se)->stats;
130157a5c2daSYafang Shao }
130257a5c2daSYafang Shao 
130357a5c2daSYafang Shao static inline void
130457a5c2daSYafang Shao update_stats_wait_start_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
130557a5c2daSYafang Shao {
130657a5c2daSYafang Shao 	struct sched_statistics *stats;
130757a5c2daSYafang Shao 	struct task_struct *p = NULL;
130857a5c2daSYafang Shao 
130957a5c2daSYafang Shao 	if (!schedstat_enabled())
131057a5c2daSYafang Shao 		return;
131157a5c2daSYafang Shao 
131257a5c2daSYafang Shao 	if (rt_entity_is_task(rt_se))
131357a5c2daSYafang Shao 		p = rt_task_of(rt_se);
131457a5c2daSYafang Shao 
131557a5c2daSYafang Shao 	stats = __schedstats_from_rt_se(rt_se);
131657a5c2daSYafang Shao 	if (!stats)
131757a5c2daSYafang Shao 		return;
131857a5c2daSYafang Shao 
131957a5c2daSYafang Shao 	__update_stats_wait_start(rq_of_rt_rq(rt_rq), p, stats);
132057a5c2daSYafang Shao }
132157a5c2daSYafang Shao 
132257a5c2daSYafang Shao static inline void
132357a5c2daSYafang Shao update_stats_enqueue_sleeper_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
132457a5c2daSYafang Shao {
132557a5c2daSYafang Shao 	struct sched_statistics *stats;
132657a5c2daSYafang Shao 	struct task_struct *p = NULL;
132757a5c2daSYafang Shao 
132857a5c2daSYafang Shao 	if (!schedstat_enabled())
132957a5c2daSYafang Shao 		return;
133057a5c2daSYafang Shao 
133157a5c2daSYafang Shao 	if (rt_entity_is_task(rt_se))
133257a5c2daSYafang Shao 		p = rt_task_of(rt_se);
133357a5c2daSYafang Shao 
133457a5c2daSYafang Shao 	stats = __schedstats_from_rt_se(rt_se);
133557a5c2daSYafang Shao 	if (!stats)
133657a5c2daSYafang Shao 		return;
133757a5c2daSYafang Shao 
133857a5c2daSYafang Shao 	__update_stats_enqueue_sleeper(rq_of_rt_rq(rt_rq), p, stats);
133957a5c2daSYafang Shao }
134057a5c2daSYafang Shao 
134157a5c2daSYafang Shao static inline void
134257a5c2daSYafang Shao update_stats_enqueue_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se,
134357a5c2daSYafang Shao 			int flags)
134457a5c2daSYafang Shao {
134557a5c2daSYafang Shao 	if (!schedstat_enabled())
134657a5c2daSYafang Shao 		return;
134757a5c2daSYafang Shao 
134857a5c2daSYafang Shao 	if (flags & ENQUEUE_WAKEUP)
134957a5c2daSYafang Shao 		update_stats_enqueue_sleeper_rt(rt_rq, rt_se);
135057a5c2daSYafang Shao }
135157a5c2daSYafang Shao 
135257a5c2daSYafang Shao static inline void
135357a5c2daSYafang Shao update_stats_wait_end_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
135457a5c2daSYafang Shao {
135557a5c2daSYafang Shao 	struct sched_statistics *stats;
135657a5c2daSYafang Shao 	struct task_struct *p = NULL;
135757a5c2daSYafang Shao 
135857a5c2daSYafang Shao 	if (!schedstat_enabled())
135957a5c2daSYafang Shao 		return;
136057a5c2daSYafang Shao 
136157a5c2daSYafang Shao 	if (rt_entity_is_task(rt_se))
136257a5c2daSYafang Shao 		p = rt_task_of(rt_se);
136357a5c2daSYafang Shao 
136457a5c2daSYafang Shao 	stats = __schedstats_from_rt_se(rt_se);
136557a5c2daSYafang Shao 	if (!stats)
136657a5c2daSYafang Shao 		return;
136757a5c2daSYafang Shao 
136857a5c2daSYafang Shao 	__update_stats_wait_end(rq_of_rt_rq(rt_rq), p, stats);
136957a5c2daSYafang Shao }
137057a5c2daSYafang Shao 
137157a5c2daSYafang Shao static inline void
137257a5c2daSYafang Shao update_stats_dequeue_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se,
137357a5c2daSYafang Shao 			int flags)
137457a5c2daSYafang Shao {
137557a5c2daSYafang Shao 	struct task_struct *p = NULL;
137657a5c2daSYafang Shao 
137757a5c2daSYafang Shao 	if (!schedstat_enabled())
137857a5c2daSYafang Shao 		return;
137957a5c2daSYafang Shao 
138057a5c2daSYafang Shao 	if (rt_entity_is_task(rt_se))
138157a5c2daSYafang Shao 		p = rt_task_of(rt_se);
138257a5c2daSYafang Shao 
138357a5c2daSYafang Shao 	if ((flags & DEQUEUE_SLEEP) && p) {
138457a5c2daSYafang Shao 		unsigned int state;
138557a5c2daSYafang Shao 
138657a5c2daSYafang Shao 		state = READ_ONCE(p->__state);
138757a5c2daSYafang Shao 		if (state & TASK_INTERRUPTIBLE)
138857a5c2daSYafang Shao 			__schedstat_set(p->stats.sleep_start,
138957a5c2daSYafang Shao 					rq_clock(rq_of_rt_rq(rt_rq)));
139057a5c2daSYafang Shao 
139157a5c2daSYafang Shao 		if (state & TASK_UNINTERRUPTIBLE)
139257a5c2daSYafang Shao 			__schedstat_set(p->stats.block_start,
139357a5c2daSYafang Shao 					rq_clock(rq_of_rt_rq(rt_rq)));
139457a5c2daSYafang Shao 	}
139557a5c2daSYafang Shao }
139657a5c2daSYafang Shao 
1397ff77e468SPeter Zijlstra static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1398391e43daSPeter Zijlstra {
1399391e43daSPeter Zijlstra 	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1400391e43daSPeter Zijlstra 	struct rt_prio_array *array = &rt_rq->active;
1401391e43daSPeter Zijlstra 	struct rt_rq *group_rq = group_rt_rq(rt_se);
1402391e43daSPeter Zijlstra 	struct list_head *queue = array->queue + rt_se_prio(rt_se);
1403391e43daSPeter Zijlstra 
1404391e43daSPeter Zijlstra 	/*
1405391e43daSPeter Zijlstra 	 * Don't enqueue the group if its throttled, or when empty.
1406391e43daSPeter Zijlstra 	 * The latter is a consequence of the former when a child group
1407391e43daSPeter Zijlstra 	 * get throttled and the current group doesn't have any other
1408391e43daSPeter Zijlstra 	 * active members.
1409391e43daSPeter Zijlstra 	 */
1410ff77e468SPeter Zijlstra 	if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) {
1411ff77e468SPeter Zijlstra 		if (rt_se->on_list)
1412ff77e468SPeter Zijlstra 			__delist_rt_entity(rt_se, array);
1413391e43daSPeter Zijlstra 		return;
1414ff77e468SPeter Zijlstra 	}
1415391e43daSPeter Zijlstra 
1416ff77e468SPeter Zijlstra 	if (move_entity(flags)) {
1417ff77e468SPeter Zijlstra 		WARN_ON_ONCE(rt_se->on_list);
1418ff77e468SPeter Zijlstra 		if (flags & ENQUEUE_HEAD)
1419391e43daSPeter Zijlstra 			list_add(&rt_se->run_list, queue);
1420391e43daSPeter Zijlstra 		else
1421391e43daSPeter Zijlstra 			list_add_tail(&rt_se->run_list, queue);
1422ff77e468SPeter Zijlstra 
1423391e43daSPeter Zijlstra 		__set_bit(rt_se_prio(rt_se), array->bitmap);
1424ff77e468SPeter Zijlstra 		rt_se->on_list = 1;
1425ff77e468SPeter Zijlstra 	}
1426ff77e468SPeter Zijlstra 	rt_se->on_rq = 1;
1427391e43daSPeter Zijlstra 
1428391e43daSPeter Zijlstra 	inc_rt_tasks(rt_se, rt_rq);
1429391e43daSPeter Zijlstra }
1430391e43daSPeter Zijlstra 
1431ff77e468SPeter Zijlstra static void __dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1432391e43daSPeter Zijlstra {
1433391e43daSPeter Zijlstra 	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1434391e43daSPeter Zijlstra 	struct rt_prio_array *array = &rt_rq->active;
1435391e43daSPeter Zijlstra 
1436ff77e468SPeter Zijlstra 	if (move_entity(flags)) {
1437ff77e468SPeter Zijlstra 		WARN_ON_ONCE(!rt_se->on_list);
1438ff77e468SPeter Zijlstra 		__delist_rt_entity(rt_se, array);
1439ff77e468SPeter Zijlstra 	}
1440ff77e468SPeter Zijlstra 	rt_se->on_rq = 0;
1441391e43daSPeter Zijlstra 
1442391e43daSPeter Zijlstra 	dec_rt_tasks(rt_se, rt_rq);
1443391e43daSPeter Zijlstra }
1444391e43daSPeter Zijlstra 
1445391e43daSPeter Zijlstra /*
1446391e43daSPeter Zijlstra  * Because the prio of an upper entry depends on the lower
1447391e43daSPeter Zijlstra  * entries, we must remove entries top - down.
1448391e43daSPeter Zijlstra  */
1449ff77e468SPeter Zijlstra static void dequeue_rt_stack(struct sched_rt_entity *rt_se, unsigned int flags)
1450391e43daSPeter Zijlstra {
1451391e43daSPeter Zijlstra 	struct sched_rt_entity *back = NULL;
1452391e43daSPeter Zijlstra 
1453391e43daSPeter Zijlstra 	for_each_sched_rt_entity(rt_se) {
1454391e43daSPeter Zijlstra 		rt_se->back = back;
1455391e43daSPeter Zijlstra 		back = rt_se;
1456391e43daSPeter Zijlstra 	}
1457391e43daSPeter Zijlstra 
1458f4ebcbc0SKirill Tkhai 	dequeue_top_rt_rq(rt_rq_of_se(back));
1459f4ebcbc0SKirill Tkhai 
1460391e43daSPeter Zijlstra 	for (rt_se = back; rt_se; rt_se = rt_se->back) {
1461391e43daSPeter Zijlstra 		if (on_rt_rq(rt_se))
1462ff77e468SPeter Zijlstra 			__dequeue_rt_entity(rt_se, flags);
1463391e43daSPeter Zijlstra 	}
1464391e43daSPeter Zijlstra }
1465391e43daSPeter Zijlstra 
1466ff77e468SPeter Zijlstra static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1467391e43daSPeter Zijlstra {
1468f4ebcbc0SKirill Tkhai 	struct rq *rq = rq_of_rt_se(rt_se);
1469f4ebcbc0SKirill Tkhai 
147057a5c2daSYafang Shao 	update_stats_enqueue_rt(rt_rq_of_se(rt_se), rt_se, flags);
147157a5c2daSYafang Shao 
1472ff77e468SPeter Zijlstra 	dequeue_rt_stack(rt_se, flags);
1473391e43daSPeter Zijlstra 	for_each_sched_rt_entity(rt_se)
1474ff77e468SPeter Zijlstra 		__enqueue_rt_entity(rt_se, flags);
1475f4ebcbc0SKirill Tkhai 	enqueue_top_rt_rq(&rq->rt);
1476391e43daSPeter Zijlstra }
1477391e43daSPeter Zijlstra 
1478ff77e468SPeter Zijlstra static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1479391e43daSPeter Zijlstra {
1480f4ebcbc0SKirill Tkhai 	struct rq *rq = rq_of_rt_se(rt_se);
1481f4ebcbc0SKirill Tkhai 
148257a5c2daSYafang Shao 	update_stats_dequeue_rt(rt_rq_of_se(rt_se), rt_se, flags);
148357a5c2daSYafang Shao 
1484ff77e468SPeter Zijlstra 	dequeue_rt_stack(rt_se, flags);
1485391e43daSPeter Zijlstra 
1486391e43daSPeter Zijlstra 	for_each_sched_rt_entity(rt_se) {
1487391e43daSPeter Zijlstra 		struct rt_rq *rt_rq = group_rt_rq(rt_se);
1488391e43daSPeter Zijlstra 
1489391e43daSPeter Zijlstra 		if (rt_rq && rt_rq->rt_nr_running)
1490ff77e468SPeter Zijlstra 			__enqueue_rt_entity(rt_se, flags);
1491391e43daSPeter Zijlstra 	}
1492f4ebcbc0SKirill Tkhai 	enqueue_top_rt_rq(&rq->rt);
1493391e43daSPeter Zijlstra }
1494391e43daSPeter Zijlstra 
1495391e43daSPeter Zijlstra /*
1496391e43daSPeter Zijlstra  * Adding/removing a task to/from a priority array:
1497391e43daSPeter Zijlstra  */
1498391e43daSPeter Zijlstra static void
1499391e43daSPeter Zijlstra enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1500391e43daSPeter Zijlstra {
1501391e43daSPeter Zijlstra 	struct sched_rt_entity *rt_se = &p->rt;
1502391e43daSPeter Zijlstra 
1503391e43daSPeter Zijlstra 	if (flags & ENQUEUE_WAKEUP)
1504391e43daSPeter Zijlstra 		rt_se->timeout = 0;
1505391e43daSPeter Zijlstra 
150657a5c2daSYafang Shao 	check_schedstat_required();
150757a5c2daSYafang Shao 	update_stats_wait_start_rt(rt_rq_of_se(rt_se), rt_se);
150857a5c2daSYafang Shao 
1509ff77e468SPeter Zijlstra 	enqueue_rt_entity(rt_se, flags);
1510391e43daSPeter Zijlstra 
15114b53a341SIngo Molnar 	if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1512391e43daSPeter Zijlstra 		enqueue_pushable_task(rq, p);
1513391e43daSPeter Zijlstra }
1514391e43daSPeter Zijlstra 
1515391e43daSPeter Zijlstra static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1516391e43daSPeter Zijlstra {
1517391e43daSPeter Zijlstra 	struct sched_rt_entity *rt_se = &p->rt;
1518391e43daSPeter Zijlstra 
1519391e43daSPeter Zijlstra 	update_curr_rt(rq);
1520ff77e468SPeter Zijlstra 	dequeue_rt_entity(rt_se, flags);
1521391e43daSPeter Zijlstra 
1522391e43daSPeter Zijlstra 	dequeue_pushable_task(rq, p);
1523391e43daSPeter Zijlstra }
1524391e43daSPeter Zijlstra 
1525391e43daSPeter Zijlstra /*
1526391e43daSPeter Zijlstra  * Put task to the head or the end of the run list without the overhead of
1527391e43daSPeter Zijlstra  * dequeue followed by enqueue.
1528391e43daSPeter Zijlstra  */
1529391e43daSPeter Zijlstra static void
1530391e43daSPeter Zijlstra requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
1531391e43daSPeter Zijlstra {
1532391e43daSPeter Zijlstra 	if (on_rt_rq(rt_se)) {
1533391e43daSPeter Zijlstra 		struct rt_prio_array *array = &rt_rq->active;
1534391e43daSPeter Zijlstra 		struct list_head *queue = array->queue + rt_se_prio(rt_se);
1535391e43daSPeter Zijlstra 
1536391e43daSPeter Zijlstra 		if (head)
1537391e43daSPeter Zijlstra 			list_move(&rt_se->run_list, queue);
1538391e43daSPeter Zijlstra 		else
1539391e43daSPeter Zijlstra 			list_move_tail(&rt_se->run_list, queue);
1540391e43daSPeter Zijlstra 	}
1541391e43daSPeter Zijlstra }
1542391e43daSPeter Zijlstra 
1543391e43daSPeter Zijlstra static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
1544391e43daSPeter Zijlstra {
1545391e43daSPeter Zijlstra 	struct sched_rt_entity *rt_se = &p->rt;
1546391e43daSPeter Zijlstra 	struct rt_rq *rt_rq;
1547391e43daSPeter Zijlstra 
1548391e43daSPeter Zijlstra 	for_each_sched_rt_entity(rt_se) {
1549391e43daSPeter Zijlstra 		rt_rq = rt_rq_of_se(rt_se);
1550391e43daSPeter Zijlstra 		requeue_rt_entity(rt_rq, rt_se, head);
1551391e43daSPeter Zijlstra 	}
1552391e43daSPeter Zijlstra }
1553391e43daSPeter Zijlstra 
1554391e43daSPeter Zijlstra static void yield_task_rt(struct rq *rq)
1555391e43daSPeter Zijlstra {
1556391e43daSPeter Zijlstra 	requeue_task_rt(rq, rq->curr, 0);
1557391e43daSPeter Zijlstra }
1558391e43daSPeter Zijlstra 
1559391e43daSPeter Zijlstra #ifdef CONFIG_SMP
1560391e43daSPeter Zijlstra static int find_lowest_rq(struct task_struct *task);
1561391e43daSPeter Zijlstra 
1562391e43daSPeter Zijlstra static int
15633aef1551SValentin Schneider select_task_rq_rt(struct task_struct *p, int cpu, int flags)
1564391e43daSPeter Zijlstra {
1565391e43daSPeter Zijlstra 	struct task_struct *curr;
1566391e43daSPeter Zijlstra 	struct rq *rq;
1567804d402fSQais Yousef 	bool test;
1568391e43daSPeter Zijlstra 
1569391e43daSPeter Zijlstra 	/* For anything but wake ups, just return the task_cpu */
15703aef1551SValentin Schneider 	if (!(flags & (WF_TTWU | WF_FORK)))
1571391e43daSPeter Zijlstra 		goto out;
1572391e43daSPeter Zijlstra 
1573391e43daSPeter Zijlstra 	rq = cpu_rq(cpu);
1574391e43daSPeter Zijlstra 
1575391e43daSPeter Zijlstra 	rcu_read_lock();
1576316c1608SJason Low 	curr = READ_ONCE(rq->curr); /* unlocked access */
1577391e43daSPeter Zijlstra 
1578391e43daSPeter Zijlstra 	/*
1579391e43daSPeter Zijlstra 	 * If the current task on @p's runqueue is an RT task, then
1580391e43daSPeter Zijlstra 	 * try to see if we can wake this RT task up on another
1581391e43daSPeter Zijlstra 	 * runqueue. Otherwise simply start this RT task
1582391e43daSPeter Zijlstra 	 * on its current runqueue.
1583391e43daSPeter Zijlstra 	 *
1584391e43daSPeter Zijlstra 	 * We want to avoid overloading runqueues. If the woken
1585391e43daSPeter Zijlstra 	 * task is a higher priority, then it will stay on this CPU
1586391e43daSPeter Zijlstra 	 * and the lower prio task should be moved to another CPU.
1587391e43daSPeter Zijlstra 	 * Even though this will probably make the lower prio task
1588391e43daSPeter Zijlstra 	 * lose its cache, we do not want to bounce a higher task
1589391e43daSPeter Zijlstra 	 * around just because it gave up its CPU, perhaps for a
1590391e43daSPeter Zijlstra 	 * lock?
1591391e43daSPeter Zijlstra 	 *
1592391e43daSPeter Zijlstra 	 * For equal prio tasks, we just let the scheduler sort it out.
1593391e43daSPeter Zijlstra 	 *
1594391e43daSPeter Zijlstra 	 * Otherwise, just let it ride on the affined RQ and the
1595391e43daSPeter Zijlstra 	 * post-schedule router will push the preempted task away
1596391e43daSPeter Zijlstra 	 *
1597391e43daSPeter Zijlstra 	 * This test is optimistic, if we get it wrong the load-balancer
1598391e43daSPeter Zijlstra 	 * will have to sort it out.
1599804d402fSQais Yousef 	 *
1600804d402fSQais Yousef 	 * We take into account the capacity of the CPU to ensure it fits the
1601804d402fSQais Yousef 	 * requirement of the task - which is only important on heterogeneous
1602804d402fSQais Yousef 	 * systems like big.LITTLE.
1603391e43daSPeter Zijlstra 	 */
1604804d402fSQais Yousef 	test = curr &&
1605804d402fSQais Yousef 	       unlikely(rt_task(curr)) &&
1606804d402fSQais Yousef 	       (curr->nr_cpus_allowed < 2 || curr->prio <= p->prio);
1607804d402fSQais Yousef 
1608804d402fSQais Yousef 	if (test || !rt_task_fits_capacity(p, cpu)) {
1609391e43daSPeter Zijlstra 		int target = find_lowest_rq(p);
1610391e43daSPeter Zijlstra 
161180e3d87bSTim Chen 		/*
1612b28bc1e0SQais Yousef 		 * Bail out if we were forcing a migration to find a better
1613b28bc1e0SQais Yousef 		 * fitting CPU but our search failed.
1614b28bc1e0SQais Yousef 		 */
1615b28bc1e0SQais Yousef 		if (!test && target != -1 && !rt_task_fits_capacity(p, target))
1616b28bc1e0SQais Yousef 			goto out_unlock;
1617b28bc1e0SQais Yousef 
1618b28bc1e0SQais Yousef 		/*
161980e3d87bSTim Chen 		 * Don't bother moving it if the destination CPU is
162080e3d87bSTim Chen 		 * not running a lower priority task.
162180e3d87bSTim Chen 		 */
162280e3d87bSTim Chen 		if (target != -1 &&
162380e3d87bSTim Chen 		    p->prio < cpu_rq(target)->rt.highest_prio.curr)
1624391e43daSPeter Zijlstra 			cpu = target;
1625391e43daSPeter Zijlstra 	}
1626b28bc1e0SQais Yousef 
1627b28bc1e0SQais Yousef out_unlock:
1628391e43daSPeter Zijlstra 	rcu_read_unlock();
1629391e43daSPeter Zijlstra 
1630391e43daSPeter Zijlstra out:
1631391e43daSPeter Zijlstra 	return cpu;
1632391e43daSPeter Zijlstra }
1633391e43daSPeter Zijlstra 
1634391e43daSPeter Zijlstra static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1635391e43daSPeter Zijlstra {
1636308a623aSWanpeng Li 	/*
1637308a623aSWanpeng Li 	 * Current can't be migrated, useless to reschedule,
1638308a623aSWanpeng Li 	 * let's hope p can move out.
1639308a623aSWanpeng Li 	 */
16404b53a341SIngo Molnar 	if (rq->curr->nr_cpus_allowed == 1 ||
1641a1bd02e1SQais Yousef 	    !cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
1642391e43daSPeter Zijlstra 		return;
1643391e43daSPeter Zijlstra 
1644308a623aSWanpeng Li 	/*
1645308a623aSWanpeng Li 	 * p is migratable, so let's not schedule it and
1646308a623aSWanpeng Li 	 * see if it is pushed or pulled somewhere else.
1647308a623aSWanpeng Li 	 */
1648804d402fSQais Yousef 	if (p->nr_cpus_allowed != 1 &&
1649a1bd02e1SQais Yousef 	    cpupri_find(&rq->rd->cpupri, p, NULL))
1650391e43daSPeter Zijlstra 		return;
1651391e43daSPeter Zijlstra 
1652391e43daSPeter Zijlstra 	/*
165397fb7a0aSIngo Molnar 	 * There appear to be other CPUs that can accept
165497fb7a0aSIngo Molnar 	 * the current task but none can run 'p', so lets reschedule
165597fb7a0aSIngo Molnar 	 * to try and push the current task away:
1656391e43daSPeter Zijlstra 	 */
1657391e43daSPeter Zijlstra 	requeue_task_rt(rq, p, 1);
16588875125eSKirill Tkhai 	resched_curr(rq);
1659391e43daSPeter Zijlstra }
1660391e43daSPeter Zijlstra 
16616e2df058SPeter Zijlstra static int balance_rt(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
16626e2df058SPeter Zijlstra {
16636e2df058SPeter Zijlstra 	if (!on_rt_rq(&p->rt) && need_pull_rt_task(rq, p)) {
16646e2df058SPeter Zijlstra 		/*
16656e2df058SPeter Zijlstra 		 * This is OK, because current is on_cpu, which avoids it being
16666e2df058SPeter Zijlstra 		 * picked for load-balance and preemption/IRQs are still
16676e2df058SPeter Zijlstra 		 * disabled avoiding further scheduler activity on it and we've
16686e2df058SPeter Zijlstra 		 * not yet started the picking loop.
16696e2df058SPeter Zijlstra 		 */
16706e2df058SPeter Zijlstra 		rq_unpin_lock(rq, rf);
16716e2df058SPeter Zijlstra 		pull_rt_task(rq);
16726e2df058SPeter Zijlstra 		rq_repin_lock(rq, rf);
16736e2df058SPeter Zijlstra 	}
16746e2df058SPeter Zijlstra 
16756e2df058SPeter Zijlstra 	return sched_stop_runnable(rq) || sched_dl_runnable(rq) || sched_rt_runnable(rq);
16766e2df058SPeter Zijlstra }
1677391e43daSPeter Zijlstra #endif /* CONFIG_SMP */
1678391e43daSPeter Zijlstra 
1679391e43daSPeter Zijlstra /*
1680391e43daSPeter Zijlstra  * Preempt the current task with a newly woken task if needed:
1681391e43daSPeter Zijlstra  */
1682391e43daSPeter Zijlstra static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
1683391e43daSPeter Zijlstra {
1684391e43daSPeter Zijlstra 	if (p->prio < rq->curr->prio) {
16858875125eSKirill Tkhai 		resched_curr(rq);
1686391e43daSPeter Zijlstra 		return;
1687391e43daSPeter Zijlstra 	}
1688391e43daSPeter Zijlstra 
1689391e43daSPeter Zijlstra #ifdef CONFIG_SMP
1690391e43daSPeter Zijlstra 	/*
1691391e43daSPeter Zijlstra 	 * If:
1692391e43daSPeter Zijlstra 	 *
1693391e43daSPeter Zijlstra 	 * - the newly woken task is of equal priority to the current task
1694391e43daSPeter Zijlstra 	 * - the newly woken task is non-migratable while current is migratable
1695391e43daSPeter Zijlstra 	 * - current will be preempted on the next reschedule
1696391e43daSPeter Zijlstra 	 *
1697391e43daSPeter Zijlstra 	 * we should check to see if current can readily move to a different
1698391e43daSPeter Zijlstra 	 * cpu.  If so, we will reschedule to allow the push logic to try
1699391e43daSPeter Zijlstra 	 * to move current somewhere else, making room for our non-migratable
1700391e43daSPeter Zijlstra 	 * task.
1701391e43daSPeter Zijlstra 	 */
1702391e43daSPeter Zijlstra 	if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
1703391e43daSPeter Zijlstra 		check_preempt_equal_prio(rq, p);
1704391e43daSPeter Zijlstra #endif
1705391e43daSPeter Zijlstra }
1706391e43daSPeter Zijlstra 
1707a0e813f2SPeter Zijlstra static inline void set_next_task_rt(struct rq *rq, struct task_struct *p, bool first)
1708ff1cdc94SMuchun Song {
170957a5c2daSYafang Shao 	struct sched_rt_entity *rt_se = &p->rt;
171057a5c2daSYafang Shao 	struct rt_rq *rt_rq = &rq->rt;
171157a5c2daSYafang Shao 
1712ff1cdc94SMuchun Song 	p->se.exec_start = rq_clock_task(rq);
171357a5c2daSYafang Shao 	if (on_rt_rq(&p->rt))
171457a5c2daSYafang Shao 		update_stats_wait_end_rt(rt_rq, rt_se);
1715ff1cdc94SMuchun Song 
1716ff1cdc94SMuchun Song 	/* The running task is never eligible for pushing */
1717ff1cdc94SMuchun Song 	dequeue_pushable_task(rq, p);
1718f95d4eaeSPeter Zijlstra 
1719a0e813f2SPeter Zijlstra 	if (!first)
1720a0e813f2SPeter Zijlstra 		return;
1721a0e813f2SPeter Zijlstra 
1722f95d4eaeSPeter Zijlstra 	/*
1723f95d4eaeSPeter Zijlstra 	 * If prev task was rt, put_prev_task() has already updated the
1724f95d4eaeSPeter Zijlstra 	 * utilization. We only care of the case where we start to schedule a
1725f95d4eaeSPeter Zijlstra 	 * rt task
1726f95d4eaeSPeter Zijlstra 	 */
1727f95d4eaeSPeter Zijlstra 	if (rq->curr->sched_class != &rt_sched_class)
1728f95d4eaeSPeter Zijlstra 		update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0);
1729f95d4eaeSPeter Zijlstra 
1730f95d4eaeSPeter Zijlstra 	rt_queue_push_tasks(rq);
1731ff1cdc94SMuchun Song }
1732ff1cdc94SMuchun Song 
1733391e43daSPeter Zijlstra static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
1734391e43daSPeter Zijlstra 						   struct rt_rq *rt_rq)
1735391e43daSPeter Zijlstra {
1736391e43daSPeter Zijlstra 	struct rt_prio_array *array = &rt_rq->active;
1737391e43daSPeter Zijlstra 	struct sched_rt_entity *next = NULL;
1738391e43daSPeter Zijlstra 	struct list_head *queue;
1739391e43daSPeter Zijlstra 	int idx;
1740391e43daSPeter Zijlstra 
1741391e43daSPeter Zijlstra 	idx = sched_find_first_bit(array->bitmap);
1742391e43daSPeter Zijlstra 	BUG_ON(idx >= MAX_RT_PRIO);
1743391e43daSPeter Zijlstra 
1744391e43daSPeter Zijlstra 	queue = array->queue + idx;
1745391e43daSPeter Zijlstra 	next = list_entry(queue->next, struct sched_rt_entity, run_list);
1746391e43daSPeter Zijlstra 
1747391e43daSPeter Zijlstra 	return next;
1748391e43daSPeter Zijlstra }
1749391e43daSPeter Zijlstra 
1750391e43daSPeter Zijlstra static struct task_struct *_pick_next_task_rt(struct rq *rq)
1751391e43daSPeter Zijlstra {
1752391e43daSPeter Zijlstra 	struct sched_rt_entity *rt_se;
1753606dba2eSPeter Zijlstra 	struct rt_rq *rt_rq  = &rq->rt;
1754391e43daSPeter Zijlstra 
1755391e43daSPeter Zijlstra 	do {
1756391e43daSPeter Zijlstra 		rt_se = pick_next_rt_entity(rq, rt_rq);
1757391e43daSPeter Zijlstra 		BUG_ON(!rt_se);
1758391e43daSPeter Zijlstra 		rt_rq = group_rt_rq(rt_se);
1759391e43daSPeter Zijlstra 	} while (rt_rq);
1760391e43daSPeter Zijlstra 
1761ff1cdc94SMuchun Song 	return rt_task_of(rt_se);
1762391e43daSPeter Zijlstra }
1763391e43daSPeter Zijlstra 
176421f56ffeSPeter Zijlstra static struct task_struct *pick_task_rt(struct rq *rq)
1765391e43daSPeter Zijlstra {
1766606dba2eSPeter Zijlstra 	struct task_struct *p;
1767606dba2eSPeter Zijlstra 
17686e2df058SPeter Zijlstra 	if (!sched_rt_runnable(rq))
1769606dba2eSPeter Zijlstra 		return NULL;
1770606dba2eSPeter Zijlstra 
1771606dba2eSPeter Zijlstra 	p = _pick_next_task_rt(rq);
177221f56ffeSPeter Zijlstra 
177321f56ffeSPeter Zijlstra 	return p;
177421f56ffeSPeter Zijlstra }
177521f56ffeSPeter Zijlstra 
177621f56ffeSPeter Zijlstra static struct task_struct *pick_next_task_rt(struct rq *rq)
177721f56ffeSPeter Zijlstra {
177821f56ffeSPeter Zijlstra 	struct task_struct *p = pick_task_rt(rq);
177921f56ffeSPeter Zijlstra 
178021f56ffeSPeter Zijlstra 	if (p)
1781a0e813f2SPeter Zijlstra 		set_next_task_rt(rq, p, true);
178221f56ffeSPeter Zijlstra 
1783391e43daSPeter Zijlstra 	return p;
1784391e43daSPeter Zijlstra }
1785391e43daSPeter Zijlstra 
17866e2df058SPeter Zijlstra static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1787391e43daSPeter Zijlstra {
178857a5c2daSYafang Shao 	struct sched_rt_entity *rt_se = &p->rt;
178957a5c2daSYafang Shao 	struct rt_rq *rt_rq = &rq->rt;
179057a5c2daSYafang Shao 
179157a5c2daSYafang Shao 	if (on_rt_rq(&p->rt))
179257a5c2daSYafang Shao 		update_stats_wait_start_rt(rt_rq, rt_se);
179357a5c2daSYafang Shao 
1794391e43daSPeter Zijlstra 	update_curr_rt(rq);
1795391e43daSPeter Zijlstra 
179623127296SVincent Guittot 	update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1);
1797371bf427SVincent Guittot 
1798391e43daSPeter Zijlstra 	/*
1799391e43daSPeter Zijlstra 	 * The previous task needs to be made eligible for pushing
1800391e43daSPeter Zijlstra 	 * if it is still active
1801391e43daSPeter Zijlstra 	 */
18024b53a341SIngo Molnar 	if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
1803391e43daSPeter Zijlstra 		enqueue_pushable_task(rq, p);
1804391e43daSPeter Zijlstra }
1805391e43daSPeter Zijlstra 
1806391e43daSPeter Zijlstra #ifdef CONFIG_SMP
1807391e43daSPeter Zijlstra 
1808391e43daSPeter Zijlstra /* Only try algorithms three times */
1809391e43daSPeter Zijlstra #define RT_MAX_TRIES 3
1810391e43daSPeter Zijlstra 
1811391e43daSPeter Zijlstra static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1812391e43daSPeter Zijlstra {
1813391e43daSPeter Zijlstra 	if (!task_running(rq, p) &&
181495158a89SPeter Zijlstra 	    cpumask_test_cpu(cpu, &p->cpus_mask))
1815391e43daSPeter Zijlstra 		return 1;
181697fb7a0aSIngo Molnar 
1817391e43daSPeter Zijlstra 	return 0;
1818391e43daSPeter Zijlstra }
1819391e43daSPeter Zijlstra 
1820e23ee747SKirill Tkhai /*
1821e23ee747SKirill Tkhai  * Return the highest pushable rq's task, which is suitable to be executed
182297fb7a0aSIngo Molnar  * on the CPU, NULL otherwise
1823e23ee747SKirill Tkhai  */
1824e23ee747SKirill Tkhai static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
1825391e43daSPeter Zijlstra {
1826e23ee747SKirill Tkhai 	struct plist_head *head = &rq->rt.pushable_tasks;
1827391e43daSPeter Zijlstra 	struct task_struct *p;
1828391e43daSPeter Zijlstra 
1829e23ee747SKirill Tkhai 	if (!has_pushable_tasks(rq))
1830e23ee747SKirill Tkhai 		return NULL;
1831391e43daSPeter Zijlstra 
1832e23ee747SKirill Tkhai 	plist_for_each_entry(p, head, pushable_tasks) {
1833e23ee747SKirill Tkhai 		if (pick_rt_task(rq, p, cpu))
1834e23ee747SKirill Tkhai 			return p;
1835391e43daSPeter Zijlstra 	}
1836391e43daSPeter Zijlstra 
1837e23ee747SKirill Tkhai 	return NULL;
1838391e43daSPeter Zijlstra }
1839391e43daSPeter Zijlstra 
1840391e43daSPeter Zijlstra static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
1841391e43daSPeter Zijlstra 
1842391e43daSPeter Zijlstra static int find_lowest_rq(struct task_struct *task)
1843391e43daSPeter Zijlstra {
1844391e43daSPeter Zijlstra 	struct sched_domain *sd;
18454ba29684SChristoph Lameter 	struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask);
1846391e43daSPeter Zijlstra 	int this_cpu = smp_processor_id();
1847391e43daSPeter Zijlstra 	int cpu      = task_cpu(task);
1848a1bd02e1SQais Yousef 	int ret;
1849391e43daSPeter Zijlstra 
1850391e43daSPeter Zijlstra 	/* Make sure the mask is initialized first */
1851391e43daSPeter Zijlstra 	if (unlikely(!lowest_mask))
1852391e43daSPeter Zijlstra 		return -1;
1853391e43daSPeter Zijlstra 
18544b53a341SIngo Molnar 	if (task->nr_cpus_allowed == 1)
1855391e43daSPeter Zijlstra 		return -1; /* No other targets possible */
1856391e43daSPeter Zijlstra 
1857a1bd02e1SQais Yousef 	/*
1858a1bd02e1SQais Yousef 	 * If we're on asym system ensure we consider the different capacities
1859a1bd02e1SQais Yousef 	 * of the CPUs when searching for the lowest_mask.
1860a1bd02e1SQais Yousef 	 */
1861a1bd02e1SQais Yousef 	if (static_branch_unlikely(&sched_asym_cpucapacity)) {
1862a1bd02e1SQais Yousef 
1863a1bd02e1SQais Yousef 		ret = cpupri_find_fitness(&task_rq(task)->rd->cpupri,
1864a1bd02e1SQais Yousef 					  task, lowest_mask,
1865a1bd02e1SQais Yousef 					  rt_task_fits_capacity);
1866a1bd02e1SQais Yousef 	} else {
1867a1bd02e1SQais Yousef 
1868a1bd02e1SQais Yousef 		ret = cpupri_find(&task_rq(task)->rd->cpupri,
1869a1bd02e1SQais Yousef 				  task, lowest_mask);
1870a1bd02e1SQais Yousef 	}
1871a1bd02e1SQais Yousef 
1872a1bd02e1SQais Yousef 	if (!ret)
1873391e43daSPeter Zijlstra 		return -1; /* No targets found */
1874391e43daSPeter Zijlstra 
1875391e43daSPeter Zijlstra 	/*
187697fb7a0aSIngo Molnar 	 * At this point we have built a mask of CPUs representing the
1877391e43daSPeter Zijlstra 	 * lowest priority tasks in the system.  Now we want to elect
1878391e43daSPeter Zijlstra 	 * the best one based on our affinity and topology.
1879391e43daSPeter Zijlstra 	 *
188097fb7a0aSIngo Molnar 	 * We prioritize the last CPU that the task executed on since
1881391e43daSPeter Zijlstra 	 * it is most likely cache-hot in that location.
1882391e43daSPeter Zijlstra 	 */
1883391e43daSPeter Zijlstra 	if (cpumask_test_cpu(cpu, lowest_mask))
1884391e43daSPeter Zijlstra 		return cpu;
1885391e43daSPeter Zijlstra 
1886391e43daSPeter Zijlstra 	/*
1887391e43daSPeter Zijlstra 	 * Otherwise, we consult the sched_domains span maps to figure
188897fb7a0aSIngo Molnar 	 * out which CPU is logically closest to our hot cache data.
1889391e43daSPeter Zijlstra 	 */
1890391e43daSPeter Zijlstra 	if (!cpumask_test_cpu(this_cpu, lowest_mask))
1891391e43daSPeter Zijlstra 		this_cpu = -1; /* Skip this_cpu opt if not among lowest */
1892391e43daSPeter Zijlstra 
1893391e43daSPeter Zijlstra 	rcu_read_lock();
1894391e43daSPeter Zijlstra 	for_each_domain(cpu, sd) {
1895391e43daSPeter Zijlstra 		if (sd->flags & SD_WAKE_AFFINE) {
1896391e43daSPeter Zijlstra 			int best_cpu;
1897391e43daSPeter Zijlstra 
1898391e43daSPeter Zijlstra 			/*
1899391e43daSPeter Zijlstra 			 * "this_cpu" is cheaper to preempt than a
1900391e43daSPeter Zijlstra 			 * remote processor.
1901391e43daSPeter Zijlstra 			 */
1902391e43daSPeter Zijlstra 			if (this_cpu != -1 &&
1903391e43daSPeter Zijlstra 			    cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1904391e43daSPeter Zijlstra 				rcu_read_unlock();
1905391e43daSPeter Zijlstra 				return this_cpu;
1906391e43daSPeter Zijlstra 			}
1907391e43daSPeter Zijlstra 
190814e292f8SPeter Zijlstra 			best_cpu = cpumask_any_and_distribute(lowest_mask,
1909391e43daSPeter Zijlstra 							      sched_domain_span(sd));
1910391e43daSPeter Zijlstra 			if (best_cpu < nr_cpu_ids) {
1911391e43daSPeter Zijlstra 				rcu_read_unlock();
1912391e43daSPeter Zijlstra 				return best_cpu;
1913391e43daSPeter Zijlstra 			}
1914391e43daSPeter Zijlstra 		}
1915391e43daSPeter Zijlstra 	}
1916391e43daSPeter Zijlstra 	rcu_read_unlock();
1917391e43daSPeter Zijlstra 
1918391e43daSPeter Zijlstra 	/*
1919391e43daSPeter Zijlstra 	 * And finally, if there were no matches within the domains
1920391e43daSPeter Zijlstra 	 * just give the caller *something* to work with from the compatible
1921391e43daSPeter Zijlstra 	 * locations.
1922391e43daSPeter Zijlstra 	 */
1923391e43daSPeter Zijlstra 	if (this_cpu != -1)
1924391e43daSPeter Zijlstra 		return this_cpu;
1925391e43daSPeter Zijlstra 
192614e292f8SPeter Zijlstra 	cpu = cpumask_any_distribute(lowest_mask);
1927391e43daSPeter Zijlstra 	if (cpu < nr_cpu_ids)
1928391e43daSPeter Zijlstra 		return cpu;
192997fb7a0aSIngo Molnar 
1930391e43daSPeter Zijlstra 	return -1;
1931391e43daSPeter Zijlstra }
1932391e43daSPeter Zijlstra 
1933391e43daSPeter Zijlstra /* Will lock the rq it finds */
1934391e43daSPeter Zijlstra static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1935391e43daSPeter Zijlstra {
1936391e43daSPeter Zijlstra 	struct rq *lowest_rq = NULL;
1937391e43daSPeter Zijlstra 	int tries;
1938391e43daSPeter Zijlstra 	int cpu;
1939391e43daSPeter Zijlstra 
1940391e43daSPeter Zijlstra 	for (tries = 0; tries < RT_MAX_TRIES; tries++) {
1941391e43daSPeter Zijlstra 		cpu = find_lowest_rq(task);
1942391e43daSPeter Zijlstra 
1943391e43daSPeter Zijlstra 		if ((cpu == -1) || (cpu == rq->cpu))
1944391e43daSPeter Zijlstra 			break;
1945391e43daSPeter Zijlstra 
1946391e43daSPeter Zijlstra 		lowest_rq = cpu_rq(cpu);
1947391e43daSPeter Zijlstra 
194880e3d87bSTim Chen 		if (lowest_rq->rt.highest_prio.curr <= task->prio) {
194980e3d87bSTim Chen 			/*
195080e3d87bSTim Chen 			 * Target rq has tasks of equal or higher priority,
195180e3d87bSTim Chen 			 * retrying does not release any lock and is unlikely
195280e3d87bSTim Chen 			 * to yield a different result.
195380e3d87bSTim Chen 			 */
195480e3d87bSTim Chen 			lowest_rq = NULL;
195580e3d87bSTim Chen 			break;
195680e3d87bSTim Chen 		}
195780e3d87bSTim Chen 
1958391e43daSPeter Zijlstra 		/* if the prio of this runqueue changed, try again */
1959391e43daSPeter Zijlstra 		if (double_lock_balance(rq, lowest_rq)) {
1960391e43daSPeter Zijlstra 			/*
1961391e43daSPeter Zijlstra 			 * We had to unlock the run queue. In
1962391e43daSPeter Zijlstra 			 * the mean time, task could have
1963391e43daSPeter Zijlstra 			 * migrated already or had its affinity changed.
1964391e43daSPeter Zijlstra 			 * Also make sure that it wasn't scheduled on its rq.
1965391e43daSPeter Zijlstra 			 */
1966391e43daSPeter Zijlstra 			if (unlikely(task_rq(task) != rq ||
196795158a89SPeter Zijlstra 				     !cpumask_test_cpu(lowest_rq->cpu, &task->cpus_mask) ||
1968391e43daSPeter Zijlstra 				     task_running(rq, task) ||
196913b5ab02SXunlei Pang 				     !rt_task(task) ||
1970da0c1e65SKirill Tkhai 				     !task_on_rq_queued(task))) {
1971391e43daSPeter Zijlstra 
19727f1b4393SPeter Zijlstra 				double_unlock_balance(rq, lowest_rq);
1973391e43daSPeter Zijlstra 				lowest_rq = NULL;
1974391e43daSPeter Zijlstra 				break;
1975391e43daSPeter Zijlstra 			}
1976391e43daSPeter Zijlstra 		}
1977391e43daSPeter Zijlstra 
1978391e43daSPeter Zijlstra 		/* If this rq is still suitable use it. */
1979391e43daSPeter Zijlstra 		if (lowest_rq->rt.highest_prio.curr > task->prio)
1980391e43daSPeter Zijlstra 			break;
1981391e43daSPeter Zijlstra 
1982391e43daSPeter Zijlstra 		/* try again */
1983391e43daSPeter Zijlstra 		double_unlock_balance(rq, lowest_rq);
1984391e43daSPeter Zijlstra 		lowest_rq = NULL;
1985391e43daSPeter Zijlstra 	}
1986391e43daSPeter Zijlstra 
1987391e43daSPeter Zijlstra 	return lowest_rq;
1988391e43daSPeter Zijlstra }
1989391e43daSPeter Zijlstra 
1990391e43daSPeter Zijlstra static struct task_struct *pick_next_pushable_task(struct rq *rq)
1991391e43daSPeter Zijlstra {
1992391e43daSPeter Zijlstra 	struct task_struct *p;
1993391e43daSPeter Zijlstra 
1994391e43daSPeter Zijlstra 	if (!has_pushable_tasks(rq))
1995391e43daSPeter Zijlstra 		return NULL;
1996391e43daSPeter Zijlstra 
1997391e43daSPeter Zijlstra 	p = plist_first_entry(&rq->rt.pushable_tasks,
1998391e43daSPeter Zijlstra 			      struct task_struct, pushable_tasks);
1999391e43daSPeter Zijlstra 
2000391e43daSPeter Zijlstra 	BUG_ON(rq->cpu != task_cpu(p));
2001391e43daSPeter Zijlstra 	BUG_ON(task_current(rq, p));
20024b53a341SIngo Molnar 	BUG_ON(p->nr_cpus_allowed <= 1);
2003391e43daSPeter Zijlstra 
2004da0c1e65SKirill Tkhai 	BUG_ON(!task_on_rq_queued(p));
2005391e43daSPeter Zijlstra 	BUG_ON(!rt_task(p));
2006391e43daSPeter Zijlstra 
2007391e43daSPeter Zijlstra 	return p;
2008391e43daSPeter Zijlstra }
2009391e43daSPeter Zijlstra 
2010391e43daSPeter Zijlstra /*
2011391e43daSPeter Zijlstra  * If the current CPU has more than one RT task, see if the non
2012391e43daSPeter Zijlstra  * running task can migrate over to a CPU that is running a task
2013391e43daSPeter Zijlstra  * of lesser priority.
2014391e43daSPeter Zijlstra  */
2015a7c81556SPeter Zijlstra static int push_rt_task(struct rq *rq, bool pull)
2016391e43daSPeter Zijlstra {
2017391e43daSPeter Zijlstra 	struct task_struct *next_task;
2018391e43daSPeter Zijlstra 	struct rq *lowest_rq;
2019391e43daSPeter Zijlstra 	int ret = 0;
2020391e43daSPeter Zijlstra 
2021391e43daSPeter Zijlstra 	if (!rq->rt.overloaded)
2022391e43daSPeter Zijlstra 		return 0;
2023391e43daSPeter Zijlstra 
2024391e43daSPeter Zijlstra 	next_task = pick_next_pushable_task(rq);
2025391e43daSPeter Zijlstra 	if (!next_task)
2026391e43daSPeter Zijlstra 		return 0;
2027391e43daSPeter Zijlstra 
2028391e43daSPeter Zijlstra retry:
2029*49bef33eSValentin Schneider 	/*
2030*49bef33eSValentin Schneider 	 * It's possible that the next_task slipped in of
2031*49bef33eSValentin Schneider 	 * higher priority than current. If that's the case
2032*49bef33eSValentin Schneider 	 * just reschedule current.
2033*49bef33eSValentin Schneider 	 */
2034*49bef33eSValentin Schneider 	if (unlikely(next_task->prio < rq->curr->prio)) {
2035*49bef33eSValentin Schneider 		resched_curr(rq);
2036*49bef33eSValentin Schneider 		return 0;
2037*49bef33eSValentin Schneider 	}
2038*49bef33eSValentin Schneider 
2039a7c81556SPeter Zijlstra 	if (is_migration_disabled(next_task)) {
2040a7c81556SPeter Zijlstra 		struct task_struct *push_task = NULL;
2041a7c81556SPeter Zijlstra 		int cpu;
2042a7c81556SPeter Zijlstra 
2043a7c81556SPeter Zijlstra 		if (!pull || rq->push_busy)
2044a7c81556SPeter Zijlstra 			return 0;
2045a7c81556SPeter Zijlstra 
2046*49bef33eSValentin Schneider 		/*
2047*49bef33eSValentin Schneider 		 * Invoking find_lowest_rq() on anything but an RT task doesn't
2048*49bef33eSValentin Schneider 		 * make sense. Per the above priority check, curr has to
2049*49bef33eSValentin Schneider 		 * be of higher priority than next_task, so no need to
2050*49bef33eSValentin Schneider 		 * reschedule when bailing out.
2051*49bef33eSValentin Schneider 		 *
2052*49bef33eSValentin Schneider 		 * Note that the stoppers are masqueraded as SCHED_FIFO
2053*49bef33eSValentin Schneider 		 * (cf. sched_set_stop_task()), so we can't rely on rt_task().
2054*49bef33eSValentin Schneider 		 */
2055*49bef33eSValentin Schneider 		if (rq->curr->sched_class != &rt_sched_class)
2056*49bef33eSValentin Schneider 			return 0;
2057*49bef33eSValentin Schneider 
2058a7c81556SPeter Zijlstra 		cpu = find_lowest_rq(rq->curr);
2059a7c81556SPeter Zijlstra 		if (cpu == -1 || cpu == rq->cpu)
2060a7c81556SPeter Zijlstra 			return 0;
2061a7c81556SPeter Zijlstra 
2062a7c81556SPeter Zijlstra 		/*
2063a7c81556SPeter Zijlstra 		 * Given we found a CPU with lower priority than @next_task,
2064a7c81556SPeter Zijlstra 		 * therefore it should be running. However we cannot migrate it
2065a7c81556SPeter Zijlstra 		 * to this other CPU, instead attempt to push the current
2066a7c81556SPeter Zijlstra 		 * running task on this CPU away.
2067a7c81556SPeter Zijlstra 		 */
2068a7c81556SPeter Zijlstra 		push_task = get_push_task(rq);
2069a7c81556SPeter Zijlstra 		if (push_task) {
20705cb9eaa3SPeter Zijlstra 			raw_spin_rq_unlock(rq);
2071a7c81556SPeter Zijlstra 			stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
2072a7c81556SPeter Zijlstra 					    push_task, &rq->push_work);
20735cb9eaa3SPeter Zijlstra 			raw_spin_rq_lock(rq);
2074a7c81556SPeter Zijlstra 		}
2075a7c81556SPeter Zijlstra 
2076a7c81556SPeter Zijlstra 		return 0;
2077a7c81556SPeter Zijlstra 	}
2078a7c81556SPeter Zijlstra 
20799ebc6053SYangtao Li 	if (WARN_ON(next_task == rq->curr))
2080391e43daSPeter Zijlstra 		return 0;
2081391e43daSPeter Zijlstra 
2082391e43daSPeter Zijlstra 	/* We might release rq lock */
2083391e43daSPeter Zijlstra 	get_task_struct(next_task);
2084391e43daSPeter Zijlstra 
2085391e43daSPeter Zijlstra 	/* find_lock_lowest_rq locks the rq if found */
2086391e43daSPeter Zijlstra 	lowest_rq = find_lock_lowest_rq(next_task, rq);
2087391e43daSPeter Zijlstra 	if (!lowest_rq) {
2088391e43daSPeter Zijlstra 		struct task_struct *task;
2089391e43daSPeter Zijlstra 		/*
2090391e43daSPeter Zijlstra 		 * find_lock_lowest_rq releases rq->lock
2091391e43daSPeter Zijlstra 		 * so it is possible that next_task has migrated.
2092391e43daSPeter Zijlstra 		 *
2093391e43daSPeter Zijlstra 		 * We need to make sure that the task is still on the same
2094391e43daSPeter Zijlstra 		 * run-queue and is also still the next task eligible for
2095391e43daSPeter Zijlstra 		 * pushing.
2096391e43daSPeter Zijlstra 		 */
2097391e43daSPeter Zijlstra 		task = pick_next_pushable_task(rq);
2098de16b91eSByungchul Park 		if (task == next_task) {
2099391e43daSPeter Zijlstra 			/*
2100391e43daSPeter Zijlstra 			 * The task hasn't migrated, and is still the next
2101391e43daSPeter Zijlstra 			 * eligible task, but we failed to find a run-queue
2102391e43daSPeter Zijlstra 			 * to push it to.  Do not retry in this case, since
210397fb7a0aSIngo Molnar 			 * other CPUs will pull from us when ready.
2104391e43daSPeter Zijlstra 			 */
2105391e43daSPeter Zijlstra 			goto out;
2106391e43daSPeter Zijlstra 		}
2107391e43daSPeter Zijlstra 
2108391e43daSPeter Zijlstra 		if (!task)
2109391e43daSPeter Zijlstra 			/* No more tasks, just exit */
2110391e43daSPeter Zijlstra 			goto out;
2111391e43daSPeter Zijlstra 
2112391e43daSPeter Zijlstra 		/*
2113391e43daSPeter Zijlstra 		 * Something has shifted, try again.
2114391e43daSPeter Zijlstra 		 */
2115391e43daSPeter Zijlstra 		put_task_struct(next_task);
2116391e43daSPeter Zijlstra 		next_task = task;
2117391e43daSPeter Zijlstra 		goto retry;
2118391e43daSPeter Zijlstra 	}
2119391e43daSPeter Zijlstra 
2120391e43daSPeter Zijlstra 	deactivate_task(rq, next_task, 0);
2121391e43daSPeter Zijlstra 	set_task_cpu(next_task, lowest_rq->cpu);
2122391e43daSPeter Zijlstra 	activate_task(lowest_rq, next_task, 0);
2123a7c81556SPeter Zijlstra 	resched_curr(lowest_rq);
2124391e43daSPeter Zijlstra 	ret = 1;
2125391e43daSPeter Zijlstra 
2126391e43daSPeter Zijlstra 	double_unlock_balance(rq, lowest_rq);
2127391e43daSPeter Zijlstra out:
2128391e43daSPeter Zijlstra 	put_task_struct(next_task);
2129391e43daSPeter Zijlstra 
2130391e43daSPeter Zijlstra 	return ret;
2131391e43daSPeter Zijlstra }
2132391e43daSPeter Zijlstra 
2133391e43daSPeter Zijlstra static void push_rt_tasks(struct rq *rq)
2134391e43daSPeter Zijlstra {
2135391e43daSPeter Zijlstra 	/* push_rt_task will return true if it moved an RT */
2136a7c81556SPeter Zijlstra 	while (push_rt_task(rq, false))
2137391e43daSPeter Zijlstra 		;
2138391e43daSPeter Zijlstra }
2139391e43daSPeter Zijlstra 
2140b6366f04SSteven Rostedt #ifdef HAVE_RT_PUSH_IPI
2141b6366f04SSteven Rostedt 
21423e777f99SSteven Rostedt (VMware) /*
21433e777f99SSteven Rostedt (VMware)  * When a high priority task schedules out from a CPU and a lower priority
21443e777f99SSteven Rostedt (VMware)  * task is scheduled in, a check is made to see if there's any RT tasks
21453e777f99SSteven Rostedt (VMware)  * on other CPUs that are waiting to run because a higher priority RT task
21463e777f99SSteven Rostedt (VMware)  * is currently running on its CPU. In this case, the CPU with multiple RT
21473e777f99SSteven Rostedt (VMware)  * tasks queued on it (overloaded) needs to be notified that a CPU has opened
21483e777f99SSteven Rostedt (VMware)  * up that may be able to run one of its non-running queued RT tasks.
21493e777f99SSteven Rostedt (VMware)  *
21504bdced5cSSteven Rostedt (Red Hat)  * All CPUs with overloaded RT tasks need to be notified as there is currently
21514bdced5cSSteven Rostedt (Red Hat)  * no way to know which of these CPUs have the highest priority task waiting
21524bdced5cSSteven Rostedt (Red Hat)  * to run. Instead of trying to take a spinlock on each of these CPUs,
21534bdced5cSSteven Rostedt (Red Hat)  * which has shown to cause large latency when done on machines with many
21544bdced5cSSteven Rostedt (Red Hat)  * CPUs, sending an IPI to the CPUs to have them push off the overloaded
21554bdced5cSSteven Rostedt (Red Hat)  * RT tasks waiting to run.
21563e777f99SSteven Rostedt (VMware)  *
21574bdced5cSSteven Rostedt (Red Hat)  * Just sending an IPI to each of the CPUs is also an issue, as on large
21584bdced5cSSteven Rostedt (Red Hat)  * count CPU machines, this can cause an IPI storm on a CPU, especially
21594bdced5cSSteven Rostedt (Red Hat)  * if its the only CPU with multiple RT tasks queued, and a large number
21604bdced5cSSteven Rostedt (Red Hat)  * of CPUs scheduling a lower priority task at the same time.
21613e777f99SSteven Rostedt (VMware)  *
21624bdced5cSSteven Rostedt (Red Hat)  * Each root domain has its own irq work function that can iterate over
21634bdced5cSSteven Rostedt (Red Hat)  * all CPUs with RT overloaded tasks. Since all CPUs with overloaded RT
21643b03706fSIngo Molnar  * task must be checked if there's one or many CPUs that are lowering
21654bdced5cSSteven Rostedt (Red Hat)  * their priority, there's a single irq work iterator that will try to
21664bdced5cSSteven Rostedt (Red Hat)  * push off RT tasks that are waiting to run.
21673e777f99SSteven Rostedt (VMware)  *
21684bdced5cSSteven Rostedt (Red Hat)  * When a CPU schedules a lower priority task, it will kick off the
21694bdced5cSSteven Rostedt (Red Hat)  * irq work iterator that will jump to each CPU with overloaded RT tasks.
21704bdced5cSSteven Rostedt (Red Hat)  * As it only takes the first CPU that schedules a lower priority task
21714bdced5cSSteven Rostedt (Red Hat)  * to start the process, the rto_start variable is incremented and if
21724bdced5cSSteven Rostedt (Red Hat)  * the atomic result is one, then that CPU will try to take the rto_lock.
21734bdced5cSSteven Rostedt (Red Hat)  * This prevents high contention on the lock as the process handles all
21744bdced5cSSteven Rostedt (Red Hat)  * CPUs scheduling lower priority tasks.
21753e777f99SSteven Rostedt (VMware)  *
21764bdced5cSSteven Rostedt (Red Hat)  * All CPUs that are scheduling a lower priority task will increment the
21774bdced5cSSteven Rostedt (Red Hat)  * rt_loop_next variable. This will make sure that the irq work iterator
21784bdced5cSSteven Rostedt (Red Hat)  * checks all RT overloaded CPUs whenever a CPU schedules a new lower
21794bdced5cSSteven Rostedt (Red Hat)  * priority task, even if the iterator is in the middle of a scan. Incrementing
21804bdced5cSSteven Rostedt (Red Hat)  * the rt_loop_next will cause the iterator to perform another scan.
21813e777f99SSteven Rostedt (VMware)  *
21823e777f99SSteven Rostedt (VMware)  */
2183ad0f1d9dSSteven Rostedt (VMware) static int rto_next_cpu(struct root_domain *rd)
2184b6366f04SSteven Rostedt {
21854bdced5cSSteven Rostedt (Red Hat) 	int next;
2186b6366f04SSteven Rostedt 	int cpu;
2187b6366f04SSteven Rostedt 
2188b6366f04SSteven Rostedt 	/*
21894bdced5cSSteven Rostedt (Red Hat) 	 * When starting the IPI RT pushing, the rto_cpu is set to -1,
21904bdced5cSSteven Rostedt (Red Hat) 	 * rt_next_cpu() will simply return the first CPU found in
21914bdced5cSSteven Rostedt (Red Hat) 	 * the rto_mask.
21924bdced5cSSteven Rostedt (Red Hat) 	 *
219397fb7a0aSIngo Molnar 	 * If rto_next_cpu() is called with rto_cpu is a valid CPU, it
21944bdced5cSSteven Rostedt (Red Hat) 	 * will return the next CPU found in the rto_mask.
21954bdced5cSSteven Rostedt (Red Hat) 	 *
21964bdced5cSSteven Rostedt (Red Hat) 	 * If there are no more CPUs left in the rto_mask, then a check is made
21974bdced5cSSteven Rostedt (Red Hat) 	 * against rto_loop and rto_loop_next. rto_loop is only updated with
21984bdced5cSSteven Rostedt (Red Hat) 	 * the rto_lock held, but any CPU may increment the rto_loop_next
21994bdced5cSSteven Rostedt (Red Hat) 	 * without any locking.
2200b6366f04SSteven Rostedt 	 */
22014bdced5cSSteven Rostedt (Red Hat) 	for (;;) {
22024bdced5cSSteven Rostedt (Red Hat) 
22034bdced5cSSteven Rostedt (Red Hat) 		/* When rto_cpu is -1 this acts like cpumask_first() */
22044bdced5cSSteven Rostedt (Red Hat) 		cpu = cpumask_next(rd->rto_cpu, rd->rto_mask);
22054bdced5cSSteven Rostedt (Red Hat) 
22064bdced5cSSteven Rostedt (Red Hat) 		rd->rto_cpu = cpu;
22074bdced5cSSteven Rostedt (Red Hat) 
22084bdced5cSSteven Rostedt (Red Hat) 		if (cpu < nr_cpu_ids)
22094bdced5cSSteven Rostedt (Red Hat) 			return cpu;
22104bdced5cSSteven Rostedt (Red Hat) 
22114bdced5cSSteven Rostedt (Red Hat) 		rd->rto_cpu = -1;
22124bdced5cSSteven Rostedt (Red Hat) 
22134bdced5cSSteven Rostedt (Red Hat) 		/*
22144bdced5cSSteven Rostedt (Red Hat) 		 * ACQUIRE ensures we see the @rto_mask changes
22154bdced5cSSteven Rostedt (Red Hat) 		 * made prior to the @next value observed.
22164bdced5cSSteven Rostedt (Red Hat) 		 *
22174bdced5cSSteven Rostedt (Red Hat) 		 * Matches WMB in rt_set_overload().
22184bdced5cSSteven Rostedt (Red Hat) 		 */
22194bdced5cSSteven Rostedt (Red Hat) 		next = atomic_read_acquire(&rd->rto_loop_next);
22204bdced5cSSteven Rostedt (Red Hat) 
22214bdced5cSSteven Rostedt (Red Hat) 		if (rd->rto_loop == next)
22224bdced5cSSteven Rostedt (Red Hat) 			break;
22234bdced5cSSteven Rostedt (Red Hat) 
22244bdced5cSSteven Rostedt (Red Hat) 		rd->rto_loop = next;
2225b6366f04SSteven Rostedt 	}
2226b6366f04SSteven Rostedt 
22274bdced5cSSteven Rostedt (Red Hat) 	return -1;
22284bdced5cSSteven Rostedt (Red Hat) }
2229b6366f04SSteven Rostedt 
22304bdced5cSSteven Rostedt (Red Hat) static inline bool rto_start_trylock(atomic_t *v)
22314bdced5cSSteven Rostedt (Red Hat) {
22324bdced5cSSteven Rostedt (Red Hat) 	return !atomic_cmpxchg_acquire(v, 0, 1);
22334bdced5cSSteven Rostedt (Red Hat) }
22344bdced5cSSteven Rostedt (Red Hat) 
22354bdced5cSSteven Rostedt (Red Hat) static inline void rto_start_unlock(atomic_t *v)
22364bdced5cSSteven Rostedt (Red Hat) {
22374bdced5cSSteven Rostedt (Red Hat) 	atomic_set_release(v, 0);
22384bdced5cSSteven Rostedt (Red Hat) }
22394bdced5cSSteven Rostedt (Red Hat) 
22404bdced5cSSteven Rostedt (Red Hat) static void tell_cpu_to_push(struct rq *rq)
22414bdced5cSSteven Rostedt (Red Hat) {
22424bdced5cSSteven Rostedt (Red Hat) 	int cpu = -1;
22434bdced5cSSteven Rostedt (Red Hat) 
22444bdced5cSSteven Rostedt (Red Hat) 	/* Keep the loop going if the IPI is currently active */
22454bdced5cSSteven Rostedt (Red Hat) 	atomic_inc(&rq->rd->rto_loop_next);
22464bdced5cSSteven Rostedt (Red Hat) 
22474bdced5cSSteven Rostedt (Red Hat) 	/* Only one CPU can initiate a loop at a time */
22484bdced5cSSteven Rostedt (Red Hat) 	if (!rto_start_trylock(&rq->rd->rto_loop_start))
2249b6366f04SSteven Rostedt 		return;
2250b6366f04SSteven Rostedt 
22514bdced5cSSteven Rostedt (Red Hat) 	raw_spin_lock(&rq->rd->rto_lock);
2252b6366f04SSteven Rostedt 
22534bdced5cSSteven Rostedt (Red Hat) 	/*
225497fb7a0aSIngo Molnar 	 * The rto_cpu is updated under the lock, if it has a valid CPU
22554bdced5cSSteven Rostedt (Red Hat) 	 * then the IPI is still running and will continue due to the
22564bdced5cSSteven Rostedt (Red Hat) 	 * update to loop_next, and nothing needs to be done here.
22574bdced5cSSteven Rostedt (Red Hat) 	 * Otherwise it is finishing up and an ipi needs to be sent.
22584bdced5cSSteven Rostedt (Red Hat) 	 */
22594bdced5cSSteven Rostedt (Red Hat) 	if (rq->rd->rto_cpu < 0)
2260ad0f1d9dSSteven Rostedt (VMware) 		cpu = rto_next_cpu(rq->rd);
22614bdced5cSSteven Rostedt (Red Hat) 
22624bdced5cSSteven Rostedt (Red Hat) 	raw_spin_unlock(&rq->rd->rto_lock);
22634bdced5cSSteven Rostedt (Red Hat) 
22644bdced5cSSteven Rostedt (Red Hat) 	rto_start_unlock(&rq->rd->rto_loop_start);
22654bdced5cSSteven Rostedt (Red Hat) 
2266364f5665SSteven Rostedt (VMware) 	if (cpu >= 0) {
2267364f5665SSteven Rostedt (VMware) 		/* Make sure the rd does not get freed while pushing */
2268364f5665SSteven Rostedt (VMware) 		sched_get_rd(rq->rd);
22694bdced5cSSteven Rostedt (Red Hat) 		irq_work_queue_on(&rq->rd->rto_push_work, cpu);
2270b6366f04SSteven Rostedt 	}
2271364f5665SSteven Rostedt (VMware) }
2272b6366f04SSteven Rostedt 
2273b6366f04SSteven Rostedt /* Called from hardirq context */
22744bdced5cSSteven Rostedt (Red Hat) void rto_push_irq_work_func(struct irq_work *work)
2275b6366f04SSteven Rostedt {
2276ad0f1d9dSSteven Rostedt (VMware) 	struct root_domain *rd =
2277ad0f1d9dSSteven Rostedt (VMware) 		container_of(work, struct root_domain, rto_push_work);
22784bdced5cSSteven Rostedt (Red Hat) 	struct rq *rq;
2279b6366f04SSteven Rostedt 	int cpu;
2280b6366f04SSteven Rostedt 
22814bdced5cSSteven Rostedt (Red Hat) 	rq = this_rq();
2282b6366f04SSteven Rostedt 
22834bdced5cSSteven Rostedt (Red Hat) 	/*
22844bdced5cSSteven Rostedt (Red Hat) 	 * We do not need to grab the lock to check for has_pushable_tasks.
22854bdced5cSSteven Rostedt (Red Hat) 	 * When it gets updated, a check is made if a push is possible.
22864bdced5cSSteven Rostedt (Red Hat) 	 */
2287b6366f04SSteven Rostedt 	if (has_pushable_tasks(rq)) {
22885cb9eaa3SPeter Zijlstra 		raw_spin_rq_lock(rq);
2289a7c81556SPeter Zijlstra 		while (push_rt_task(rq, true))
2290a7c81556SPeter Zijlstra 			;
22915cb9eaa3SPeter Zijlstra 		raw_spin_rq_unlock(rq);
2292b6366f04SSteven Rostedt 	}
2293b6366f04SSteven Rostedt 
2294ad0f1d9dSSteven Rostedt (VMware) 	raw_spin_lock(&rd->rto_lock);
22954bdced5cSSteven Rostedt (Red Hat) 
2296b6366f04SSteven Rostedt 	/* Pass the IPI to the next rt overloaded queue */
2297ad0f1d9dSSteven Rostedt (VMware) 	cpu = rto_next_cpu(rd);
2298b6366f04SSteven Rostedt 
2299ad0f1d9dSSteven Rostedt (VMware) 	raw_spin_unlock(&rd->rto_lock);
2300b6366f04SSteven Rostedt 
2301364f5665SSteven Rostedt (VMware) 	if (cpu < 0) {
2302364f5665SSteven Rostedt (VMware) 		sched_put_rd(rd);
2303b6366f04SSteven Rostedt 		return;
2304364f5665SSteven Rostedt (VMware) 	}
2305b6366f04SSteven Rostedt 
2306b6366f04SSteven Rostedt 	/* Try the next RT overloaded CPU */
2307ad0f1d9dSSteven Rostedt (VMware) 	irq_work_queue_on(&rd->rto_push_work, cpu);
2308b6366f04SSteven Rostedt }
2309b6366f04SSteven Rostedt #endif /* HAVE_RT_PUSH_IPI */
2310b6366f04SSteven Rostedt 
23118046d680SPeter Zijlstra static void pull_rt_task(struct rq *this_rq)
2312391e43daSPeter Zijlstra {
23138046d680SPeter Zijlstra 	int this_cpu = this_rq->cpu, cpu;
23148046d680SPeter Zijlstra 	bool resched = false;
2315a7c81556SPeter Zijlstra 	struct task_struct *p, *push_task;
2316391e43daSPeter Zijlstra 	struct rq *src_rq;
2317f73c52a5SSteven Rostedt 	int rt_overload_count = rt_overloaded(this_rq);
2318391e43daSPeter Zijlstra 
2319f73c52a5SSteven Rostedt 	if (likely(!rt_overload_count))
23208046d680SPeter Zijlstra 		return;
2321391e43daSPeter Zijlstra 
23227c3f2ab7SPeter Zijlstra 	/*
23237c3f2ab7SPeter Zijlstra 	 * Match the barrier from rt_set_overloaded; this guarantees that if we
23247c3f2ab7SPeter Zijlstra 	 * see overloaded we must also see the rto_mask bit.
23257c3f2ab7SPeter Zijlstra 	 */
23267c3f2ab7SPeter Zijlstra 	smp_rmb();
23277c3f2ab7SPeter Zijlstra 
2328f73c52a5SSteven Rostedt 	/* If we are the only overloaded CPU do nothing */
2329f73c52a5SSteven Rostedt 	if (rt_overload_count == 1 &&
2330f73c52a5SSteven Rostedt 	    cpumask_test_cpu(this_rq->cpu, this_rq->rd->rto_mask))
2331f73c52a5SSteven Rostedt 		return;
2332f73c52a5SSteven Rostedt 
2333b6366f04SSteven Rostedt #ifdef HAVE_RT_PUSH_IPI
2334b6366f04SSteven Rostedt 	if (sched_feat(RT_PUSH_IPI)) {
2335b6366f04SSteven Rostedt 		tell_cpu_to_push(this_rq);
23368046d680SPeter Zijlstra 		return;
2337b6366f04SSteven Rostedt 	}
2338b6366f04SSteven Rostedt #endif
2339b6366f04SSteven Rostedt 
2340391e43daSPeter Zijlstra 	for_each_cpu(cpu, this_rq->rd->rto_mask) {
2341391e43daSPeter Zijlstra 		if (this_cpu == cpu)
2342391e43daSPeter Zijlstra 			continue;
2343391e43daSPeter Zijlstra 
2344391e43daSPeter Zijlstra 		src_rq = cpu_rq(cpu);
2345391e43daSPeter Zijlstra 
2346391e43daSPeter Zijlstra 		/*
2347391e43daSPeter Zijlstra 		 * Don't bother taking the src_rq->lock if the next highest
2348391e43daSPeter Zijlstra 		 * task is known to be lower-priority than our current task.
2349391e43daSPeter Zijlstra 		 * This may look racy, but if this value is about to go
2350391e43daSPeter Zijlstra 		 * logically higher, the src_rq will push this task away.
2351391e43daSPeter Zijlstra 		 * And if its going logically lower, we do not care
2352391e43daSPeter Zijlstra 		 */
2353391e43daSPeter Zijlstra 		if (src_rq->rt.highest_prio.next >=
2354391e43daSPeter Zijlstra 		    this_rq->rt.highest_prio.curr)
2355391e43daSPeter Zijlstra 			continue;
2356391e43daSPeter Zijlstra 
2357391e43daSPeter Zijlstra 		/*
2358391e43daSPeter Zijlstra 		 * We can potentially drop this_rq's lock in
2359391e43daSPeter Zijlstra 		 * double_lock_balance, and another CPU could
2360391e43daSPeter Zijlstra 		 * alter this_rq
2361391e43daSPeter Zijlstra 		 */
2362a7c81556SPeter Zijlstra 		push_task = NULL;
2363391e43daSPeter Zijlstra 		double_lock_balance(this_rq, src_rq);
2364391e43daSPeter Zijlstra 
2365391e43daSPeter Zijlstra 		/*
2366e23ee747SKirill Tkhai 		 * We can pull only a task, which is pushable
2367e23ee747SKirill Tkhai 		 * on its rq, and no others.
2368391e43daSPeter Zijlstra 		 */
2369e23ee747SKirill Tkhai 		p = pick_highest_pushable_task(src_rq, this_cpu);
2370391e43daSPeter Zijlstra 
2371391e43daSPeter Zijlstra 		/*
2372391e43daSPeter Zijlstra 		 * Do we have an RT task that preempts
2373391e43daSPeter Zijlstra 		 * the to-be-scheduled task?
2374391e43daSPeter Zijlstra 		 */
2375391e43daSPeter Zijlstra 		if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
2376391e43daSPeter Zijlstra 			WARN_ON(p == src_rq->curr);
2377da0c1e65SKirill Tkhai 			WARN_ON(!task_on_rq_queued(p));
2378391e43daSPeter Zijlstra 
2379391e43daSPeter Zijlstra 			/*
2380391e43daSPeter Zijlstra 			 * There's a chance that p is higher in priority
238197fb7a0aSIngo Molnar 			 * than what's currently running on its CPU.
23823b03706fSIngo Molnar 			 * This is just that p is waking up and hasn't
2383391e43daSPeter Zijlstra 			 * had a chance to schedule. We only pull
2384391e43daSPeter Zijlstra 			 * p if it is lower in priority than the
2385391e43daSPeter Zijlstra 			 * current task on the run queue
2386391e43daSPeter Zijlstra 			 */
2387391e43daSPeter Zijlstra 			if (p->prio < src_rq->curr->prio)
2388391e43daSPeter Zijlstra 				goto skip;
2389391e43daSPeter Zijlstra 
2390a7c81556SPeter Zijlstra 			if (is_migration_disabled(p)) {
2391a7c81556SPeter Zijlstra 				push_task = get_push_task(src_rq);
2392a7c81556SPeter Zijlstra 			} else {
2393391e43daSPeter Zijlstra 				deactivate_task(src_rq, p, 0);
2394391e43daSPeter Zijlstra 				set_task_cpu(p, this_cpu);
2395391e43daSPeter Zijlstra 				activate_task(this_rq, p, 0);
2396a7c81556SPeter Zijlstra 				resched = true;
2397a7c81556SPeter Zijlstra 			}
2398391e43daSPeter Zijlstra 			/*
2399391e43daSPeter Zijlstra 			 * We continue with the search, just in
2400391e43daSPeter Zijlstra 			 * case there's an even higher prio task
2401391e43daSPeter Zijlstra 			 * in another runqueue. (low likelihood
2402391e43daSPeter Zijlstra 			 * but possible)
2403391e43daSPeter Zijlstra 			 */
2404391e43daSPeter Zijlstra 		}
2405391e43daSPeter Zijlstra skip:
2406391e43daSPeter Zijlstra 		double_unlock_balance(this_rq, src_rq);
2407a7c81556SPeter Zijlstra 
2408a7c81556SPeter Zijlstra 		if (push_task) {
24095cb9eaa3SPeter Zijlstra 			raw_spin_rq_unlock(this_rq);
2410a7c81556SPeter Zijlstra 			stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop,
2411a7c81556SPeter Zijlstra 					    push_task, &src_rq->push_work);
24125cb9eaa3SPeter Zijlstra 			raw_spin_rq_lock(this_rq);
2413a7c81556SPeter Zijlstra 		}
2414391e43daSPeter Zijlstra 	}
2415391e43daSPeter Zijlstra 
24168046d680SPeter Zijlstra 	if (resched)
24178046d680SPeter Zijlstra 		resched_curr(this_rq);
2418391e43daSPeter Zijlstra }
2419391e43daSPeter Zijlstra 
2420391e43daSPeter Zijlstra /*
2421391e43daSPeter Zijlstra  * If we are not running and we are not going to reschedule soon, we should
2422391e43daSPeter Zijlstra  * try to push tasks away now
2423391e43daSPeter Zijlstra  */
2424391e43daSPeter Zijlstra static void task_woken_rt(struct rq *rq, struct task_struct *p)
2425391e43daSPeter Zijlstra {
2426804d402fSQais Yousef 	bool need_to_push = !task_running(rq, p) &&
2427391e43daSPeter Zijlstra 			    !test_tsk_need_resched(rq->curr) &&
24284b53a341SIngo Molnar 			    p->nr_cpus_allowed > 1 &&
24291baca4ceSJuri Lelli 			    (dl_task(rq->curr) || rt_task(rq->curr)) &&
24304b53a341SIngo Molnar 			    (rq->curr->nr_cpus_allowed < 2 ||
2431804d402fSQais Yousef 			     rq->curr->prio <= p->prio);
2432804d402fSQais Yousef 
2433d94a9df4SQais Yousef 	if (need_to_push)
2434391e43daSPeter Zijlstra 		push_rt_tasks(rq);
2435391e43daSPeter Zijlstra }
2436391e43daSPeter Zijlstra 
2437391e43daSPeter Zijlstra /* Assumes rq->lock is held */
2438391e43daSPeter Zijlstra static void rq_online_rt(struct rq *rq)
2439391e43daSPeter Zijlstra {
2440391e43daSPeter Zijlstra 	if (rq->rt.overloaded)
2441391e43daSPeter Zijlstra 		rt_set_overload(rq);
2442391e43daSPeter Zijlstra 
2443391e43daSPeter Zijlstra 	__enable_runtime(rq);
2444391e43daSPeter Zijlstra 
2445391e43daSPeter Zijlstra 	cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
2446391e43daSPeter Zijlstra }
2447391e43daSPeter Zijlstra 
2448391e43daSPeter Zijlstra /* Assumes rq->lock is held */
2449391e43daSPeter Zijlstra static void rq_offline_rt(struct rq *rq)
2450391e43daSPeter Zijlstra {
2451391e43daSPeter Zijlstra 	if (rq->rt.overloaded)
2452391e43daSPeter Zijlstra 		rt_clear_overload(rq);
2453391e43daSPeter Zijlstra 
2454391e43daSPeter Zijlstra 	__disable_runtime(rq);
2455391e43daSPeter Zijlstra 
2456391e43daSPeter Zijlstra 	cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
2457391e43daSPeter Zijlstra }
2458391e43daSPeter Zijlstra 
2459391e43daSPeter Zijlstra /*
2460391e43daSPeter Zijlstra  * When switch from the rt queue, we bring ourselves to a position
2461391e43daSPeter Zijlstra  * that we might want to pull RT tasks from other runqueues.
2462391e43daSPeter Zijlstra  */
2463391e43daSPeter Zijlstra static void switched_from_rt(struct rq *rq, struct task_struct *p)
2464391e43daSPeter Zijlstra {
2465391e43daSPeter Zijlstra 	/*
2466391e43daSPeter Zijlstra 	 * If there are other RT tasks then we will reschedule
2467391e43daSPeter Zijlstra 	 * and the scheduling of the other RT tasks will handle
2468391e43daSPeter Zijlstra 	 * the balancing. But if we are the last RT task
2469391e43daSPeter Zijlstra 	 * we may need to handle the pulling of RT tasks
2470391e43daSPeter Zijlstra 	 * now.
2471391e43daSPeter Zijlstra 	 */
2472da0c1e65SKirill Tkhai 	if (!task_on_rq_queued(p) || rq->rt.rt_nr_running)
24731158ddb5SKirill Tkhai 		return;
24741158ddb5SKirill Tkhai 
247502d8ec94SIngo Molnar 	rt_queue_pull_task(rq);
2476391e43daSPeter Zijlstra }
2477391e43daSPeter Zijlstra 
247811c785b7SLi Zefan void __init init_sched_rt_class(void)
2479391e43daSPeter Zijlstra {
2480391e43daSPeter Zijlstra 	unsigned int i;
2481391e43daSPeter Zijlstra 
2482391e43daSPeter Zijlstra 	for_each_possible_cpu(i) {
2483391e43daSPeter Zijlstra 		zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
2484391e43daSPeter Zijlstra 					GFP_KERNEL, cpu_to_node(i));
2485391e43daSPeter Zijlstra 	}
2486391e43daSPeter Zijlstra }
2487391e43daSPeter Zijlstra #endif /* CONFIG_SMP */
2488391e43daSPeter Zijlstra 
2489391e43daSPeter Zijlstra /*
2490391e43daSPeter Zijlstra  * When switching a task to RT, we may overload the runqueue
2491391e43daSPeter Zijlstra  * with RT tasks. In this case we try to push them off to
2492391e43daSPeter Zijlstra  * other runqueues.
2493391e43daSPeter Zijlstra  */
2494391e43daSPeter Zijlstra static void switched_to_rt(struct rq *rq, struct task_struct *p)
2495391e43daSPeter Zijlstra {
2496391e43daSPeter Zijlstra 	/*
2497fecfcbc2SVincent Donnefort 	 * If we are running, update the avg_rt tracking, as the running time
2498fecfcbc2SVincent Donnefort 	 * will now on be accounted into the latter.
2499fecfcbc2SVincent Donnefort 	 */
2500fecfcbc2SVincent Donnefort 	if (task_current(rq, p)) {
2501fecfcbc2SVincent Donnefort 		update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0);
2502fecfcbc2SVincent Donnefort 		return;
2503fecfcbc2SVincent Donnefort 	}
2504fecfcbc2SVincent Donnefort 
2505fecfcbc2SVincent Donnefort 	/*
2506fecfcbc2SVincent Donnefort 	 * If we are not running we may need to preempt the current
2507fecfcbc2SVincent Donnefort 	 * running task. If that current running task is also an RT task
2508391e43daSPeter Zijlstra 	 * then see if we can move to another run queue.
2509391e43daSPeter Zijlstra 	 */
2510fecfcbc2SVincent Donnefort 	if (task_on_rq_queued(p)) {
2511391e43daSPeter Zijlstra #ifdef CONFIG_SMP
2512d94a9df4SQais Yousef 		if (p->nr_cpus_allowed > 1 && rq->rt.overloaded)
251302d8ec94SIngo Molnar 			rt_queue_push_tasks(rq);
2514619bd4a7SSebastian Andrzej Siewior #endif /* CONFIG_SMP */
25152fe25826SPaul E. McKenney 		if (p->prio < rq->curr->prio && cpu_online(cpu_of(rq)))
25168875125eSKirill Tkhai 			resched_curr(rq);
2517391e43daSPeter Zijlstra 	}
2518391e43daSPeter Zijlstra }
2519391e43daSPeter Zijlstra 
2520391e43daSPeter Zijlstra /*
2521391e43daSPeter Zijlstra  * Priority of the task has changed. This may cause
2522391e43daSPeter Zijlstra  * us to initiate a push or pull.
2523391e43daSPeter Zijlstra  */
2524391e43daSPeter Zijlstra static void
2525391e43daSPeter Zijlstra prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
2526391e43daSPeter Zijlstra {
2527da0c1e65SKirill Tkhai 	if (!task_on_rq_queued(p))
2528391e43daSPeter Zijlstra 		return;
2529391e43daSPeter Zijlstra 
253065bcf072SHui Su 	if (task_current(rq, p)) {
2531391e43daSPeter Zijlstra #ifdef CONFIG_SMP
2532391e43daSPeter Zijlstra 		/*
2533391e43daSPeter Zijlstra 		 * If our priority decreases while running, we
2534391e43daSPeter Zijlstra 		 * may need to pull tasks to this runqueue.
2535391e43daSPeter Zijlstra 		 */
2536391e43daSPeter Zijlstra 		if (oldprio < p->prio)
253702d8ec94SIngo Molnar 			rt_queue_pull_task(rq);
2538fd7a4bedSPeter Zijlstra 
2539391e43daSPeter Zijlstra 		/*
2540391e43daSPeter Zijlstra 		 * If there's a higher priority task waiting to run
2541fd7a4bedSPeter Zijlstra 		 * then reschedule.
2542391e43daSPeter Zijlstra 		 */
2543fd7a4bedSPeter Zijlstra 		if (p->prio > rq->rt.highest_prio.curr)
25448875125eSKirill Tkhai 			resched_curr(rq);
2545391e43daSPeter Zijlstra #else
2546391e43daSPeter Zijlstra 		/* For UP simply resched on drop of prio */
2547391e43daSPeter Zijlstra 		if (oldprio < p->prio)
25488875125eSKirill Tkhai 			resched_curr(rq);
2549391e43daSPeter Zijlstra #endif /* CONFIG_SMP */
2550391e43daSPeter Zijlstra 	} else {
2551391e43daSPeter Zijlstra 		/*
2552391e43daSPeter Zijlstra 		 * This task is not running, but if it is
2553391e43daSPeter Zijlstra 		 * greater than the current running task
2554391e43daSPeter Zijlstra 		 * then reschedule.
2555391e43daSPeter Zijlstra 		 */
2556391e43daSPeter Zijlstra 		if (p->prio < rq->curr->prio)
25578875125eSKirill Tkhai 			resched_curr(rq);
2558391e43daSPeter Zijlstra 	}
2559391e43daSPeter Zijlstra }
2560391e43daSPeter Zijlstra 
2561b18b6a9cSNicolas Pitre #ifdef CONFIG_POSIX_TIMERS
2562391e43daSPeter Zijlstra static void watchdog(struct rq *rq, struct task_struct *p)
2563391e43daSPeter Zijlstra {
2564391e43daSPeter Zijlstra 	unsigned long soft, hard;
2565391e43daSPeter Zijlstra 
2566391e43daSPeter Zijlstra 	/* max may change after cur was read, this will be fixed next tick */
2567391e43daSPeter Zijlstra 	soft = task_rlimit(p, RLIMIT_RTTIME);
2568391e43daSPeter Zijlstra 	hard = task_rlimit_max(p, RLIMIT_RTTIME);
2569391e43daSPeter Zijlstra 
2570391e43daSPeter Zijlstra 	if (soft != RLIM_INFINITY) {
2571391e43daSPeter Zijlstra 		unsigned long next;
2572391e43daSPeter Zijlstra 
257357d2aa00SYing Xue 		if (p->rt.watchdog_stamp != jiffies) {
2574391e43daSPeter Zijlstra 			p->rt.timeout++;
257557d2aa00SYing Xue 			p->rt.watchdog_stamp = jiffies;
257657d2aa00SYing Xue 		}
257757d2aa00SYing Xue 
2578391e43daSPeter Zijlstra 		next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
25793a245c0fSThomas Gleixner 		if (p->rt.timeout > next) {
25803a245c0fSThomas Gleixner 			posix_cputimers_rt_watchdog(&p->posix_cputimers,
25813a245c0fSThomas Gleixner 						    p->se.sum_exec_runtime);
25823a245c0fSThomas Gleixner 		}
2583391e43daSPeter Zijlstra 	}
2584391e43daSPeter Zijlstra }
2585b18b6a9cSNicolas Pitre #else
2586b18b6a9cSNicolas Pitre static inline void watchdog(struct rq *rq, struct task_struct *p) { }
2587b18b6a9cSNicolas Pitre #endif
2588391e43daSPeter Zijlstra 
2589d84b3131SFrederic Weisbecker /*
2590d84b3131SFrederic Weisbecker  * scheduler tick hitting a task of our scheduling class.
2591d84b3131SFrederic Weisbecker  *
2592d84b3131SFrederic Weisbecker  * NOTE: This function can be called remotely by the tick offload that
2593d84b3131SFrederic Weisbecker  * goes along full dynticks. Therefore no local assumption can be made
2594d84b3131SFrederic Weisbecker  * and everything must be accessed through the @rq and @curr passed in
2595d84b3131SFrederic Weisbecker  * parameters.
2596d84b3131SFrederic Weisbecker  */
2597391e43daSPeter Zijlstra static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
2598391e43daSPeter Zijlstra {
2599454c7999SColin Cross 	struct sched_rt_entity *rt_se = &p->rt;
2600454c7999SColin Cross 
2601391e43daSPeter Zijlstra 	update_curr_rt(rq);
260223127296SVincent Guittot 	update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1);
2603391e43daSPeter Zijlstra 
2604391e43daSPeter Zijlstra 	watchdog(rq, p);
2605391e43daSPeter Zijlstra 
2606391e43daSPeter Zijlstra 	/*
2607391e43daSPeter Zijlstra 	 * RR tasks need a special form of timeslice management.
2608391e43daSPeter Zijlstra 	 * FIFO tasks have no timeslices.
2609391e43daSPeter Zijlstra 	 */
2610391e43daSPeter Zijlstra 	if (p->policy != SCHED_RR)
2611391e43daSPeter Zijlstra 		return;
2612391e43daSPeter Zijlstra 
2613391e43daSPeter Zijlstra 	if (--p->rt.time_slice)
2614391e43daSPeter Zijlstra 		return;
2615391e43daSPeter Zijlstra 
2616ce0dbbbbSClark Williams 	p->rt.time_slice = sched_rr_timeslice;
2617391e43daSPeter Zijlstra 
2618391e43daSPeter Zijlstra 	/*
2619e9aa39bbSLi Bin 	 * Requeue to the end of queue if we (and all of our ancestors) are not
2620e9aa39bbSLi Bin 	 * the only element on the queue
2621391e43daSPeter Zijlstra 	 */
2622454c7999SColin Cross 	for_each_sched_rt_entity(rt_se) {
2623454c7999SColin Cross 		if (rt_se->run_list.prev != rt_se->run_list.next) {
2624391e43daSPeter Zijlstra 			requeue_task_rt(rq, p, 0);
26258aa6f0ebSKirill Tkhai 			resched_curr(rq);
2626454c7999SColin Cross 			return;
2627454c7999SColin Cross 		}
2628391e43daSPeter Zijlstra 	}
2629391e43daSPeter Zijlstra }
2630391e43daSPeter Zijlstra 
2631391e43daSPeter Zijlstra static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
2632391e43daSPeter Zijlstra {
2633391e43daSPeter Zijlstra 	/*
2634391e43daSPeter Zijlstra 	 * Time slice is 0 for SCHED_FIFO tasks
2635391e43daSPeter Zijlstra 	 */
2636391e43daSPeter Zijlstra 	if (task->policy == SCHED_RR)
2637ce0dbbbbSClark Williams 		return sched_rr_timeslice;
2638391e43daSPeter Zijlstra 	else
2639391e43daSPeter Zijlstra 		return 0;
2640391e43daSPeter Zijlstra }
2641391e43daSPeter Zijlstra 
264243c31ac0SPeter Zijlstra DEFINE_SCHED_CLASS(rt) = {
264343c31ac0SPeter Zijlstra 
2644391e43daSPeter Zijlstra 	.enqueue_task		= enqueue_task_rt,
2645391e43daSPeter Zijlstra 	.dequeue_task		= dequeue_task_rt,
2646391e43daSPeter Zijlstra 	.yield_task		= yield_task_rt,
2647391e43daSPeter Zijlstra 
2648391e43daSPeter Zijlstra 	.check_preempt_curr	= check_preempt_curr_rt,
2649391e43daSPeter Zijlstra 
2650391e43daSPeter Zijlstra 	.pick_next_task		= pick_next_task_rt,
2651391e43daSPeter Zijlstra 	.put_prev_task		= put_prev_task_rt,
265203b7fad1SPeter Zijlstra 	.set_next_task          = set_next_task_rt,
2653391e43daSPeter Zijlstra 
2654391e43daSPeter Zijlstra #ifdef CONFIG_SMP
26556e2df058SPeter Zijlstra 	.balance		= balance_rt,
265621f56ffeSPeter Zijlstra 	.pick_task		= pick_task_rt,
2657391e43daSPeter Zijlstra 	.select_task_rq		= select_task_rq_rt,
26586c37067eSPeter Zijlstra 	.set_cpus_allowed       = set_cpus_allowed_common,
2659391e43daSPeter Zijlstra 	.rq_online              = rq_online_rt,
2660391e43daSPeter Zijlstra 	.rq_offline             = rq_offline_rt,
2661391e43daSPeter Zijlstra 	.task_woken		= task_woken_rt,
2662391e43daSPeter Zijlstra 	.switched_from		= switched_from_rt,
2663a7c81556SPeter Zijlstra 	.find_lock_rq		= find_lock_lowest_rq,
2664391e43daSPeter Zijlstra #endif
2665391e43daSPeter Zijlstra 
2666391e43daSPeter Zijlstra 	.task_tick		= task_tick_rt,
2667391e43daSPeter Zijlstra 
2668391e43daSPeter Zijlstra 	.get_rr_interval	= get_rr_interval_rt,
2669391e43daSPeter Zijlstra 
2670391e43daSPeter Zijlstra 	.prio_changed		= prio_changed_rt,
2671391e43daSPeter Zijlstra 	.switched_to		= switched_to_rt,
26726e998916SStanislaw Gruszka 
26736e998916SStanislaw Gruszka 	.update_curr		= update_curr_rt,
2674982d9cdcSPatrick Bellasi 
2675982d9cdcSPatrick Bellasi #ifdef CONFIG_UCLAMP_TASK
2676982d9cdcSPatrick Bellasi 	.uclamp_enabled		= 1,
2677982d9cdcSPatrick Bellasi #endif
2678391e43daSPeter Zijlstra };
2679391e43daSPeter Zijlstra 
26808887cd99SNicolas Pitre #ifdef CONFIG_RT_GROUP_SCHED
26818887cd99SNicolas Pitre /*
26828887cd99SNicolas Pitre  * Ensure that the real time constraints are schedulable.
26838887cd99SNicolas Pitre  */
26848887cd99SNicolas Pitre static DEFINE_MUTEX(rt_constraints_mutex);
26858887cd99SNicolas Pitre 
26868887cd99SNicolas Pitre static inline int tg_has_rt_tasks(struct task_group *tg)
26878887cd99SNicolas Pitre {
2688b4fb015eSKonstantin Khlebnikov 	struct task_struct *task;
2689b4fb015eSKonstantin Khlebnikov 	struct css_task_iter it;
2690b4fb015eSKonstantin Khlebnikov 	int ret = 0;
26918887cd99SNicolas Pitre 
26928887cd99SNicolas Pitre 	/*
26938887cd99SNicolas Pitre 	 * Autogroups do not have RT tasks; see autogroup_create().
26948887cd99SNicolas Pitre 	 */
26958887cd99SNicolas Pitre 	if (task_group_is_autogroup(tg))
26968887cd99SNicolas Pitre 		return 0;
26978887cd99SNicolas Pitre 
2698b4fb015eSKonstantin Khlebnikov 	css_task_iter_start(&tg->css, 0, &it);
2699b4fb015eSKonstantin Khlebnikov 	while (!ret && (task = css_task_iter_next(&it)))
2700b4fb015eSKonstantin Khlebnikov 		ret |= rt_task(task);
2701b4fb015eSKonstantin Khlebnikov 	css_task_iter_end(&it);
27028887cd99SNicolas Pitre 
2703b4fb015eSKonstantin Khlebnikov 	return ret;
27048887cd99SNicolas Pitre }
27058887cd99SNicolas Pitre 
27068887cd99SNicolas Pitre struct rt_schedulable_data {
27078887cd99SNicolas Pitre 	struct task_group *tg;
27088887cd99SNicolas Pitre 	u64 rt_period;
27098887cd99SNicolas Pitre 	u64 rt_runtime;
27108887cd99SNicolas Pitre };
27118887cd99SNicolas Pitre 
27128887cd99SNicolas Pitre static int tg_rt_schedulable(struct task_group *tg, void *data)
27138887cd99SNicolas Pitre {
27148887cd99SNicolas Pitre 	struct rt_schedulable_data *d = data;
27158887cd99SNicolas Pitre 	struct task_group *child;
27168887cd99SNicolas Pitre 	unsigned long total, sum = 0;
27178887cd99SNicolas Pitre 	u64 period, runtime;
27188887cd99SNicolas Pitre 
27198887cd99SNicolas Pitre 	period = ktime_to_ns(tg->rt_bandwidth.rt_period);
27208887cd99SNicolas Pitre 	runtime = tg->rt_bandwidth.rt_runtime;
27218887cd99SNicolas Pitre 
27228887cd99SNicolas Pitre 	if (tg == d->tg) {
27238887cd99SNicolas Pitre 		period = d->rt_period;
27248887cd99SNicolas Pitre 		runtime = d->rt_runtime;
27258887cd99SNicolas Pitre 	}
27268887cd99SNicolas Pitre 
27278887cd99SNicolas Pitre 	/*
27288887cd99SNicolas Pitre 	 * Cannot have more runtime than the period.
27298887cd99SNicolas Pitre 	 */
27308887cd99SNicolas Pitre 	if (runtime > period && runtime != RUNTIME_INF)
27318887cd99SNicolas Pitre 		return -EINVAL;
27328887cd99SNicolas Pitre 
27338887cd99SNicolas Pitre 	/*
2734b4fb015eSKonstantin Khlebnikov 	 * Ensure we don't starve existing RT tasks if runtime turns zero.
27358887cd99SNicolas Pitre 	 */
2736b4fb015eSKonstantin Khlebnikov 	if (rt_bandwidth_enabled() && !runtime &&
2737b4fb015eSKonstantin Khlebnikov 	    tg->rt_bandwidth.rt_runtime && tg_has_rt_tasks(tg))
27388887cd99SNicolas Pitre 		return -EBUSY;
27398887cd99SNicolas Pitre 
27408887cd99SNicolas Pitre 	total = to_ratio(period, runtime);
27418887cd99SNicolas Pitre 
27428887cd99SNicolas Pitre 	/*
27438887cd99SNicolas Pitre 	 * Nobody can have more than the global setting allows.
27448887cd99SNicolas Pitre 	 */
27458887cd99SNicolas Pitre 	if (total > to_ratio(global_rt_period(), global_rt_runtime()))
27468887cd99SNicolas Pitre 		return -EINVAL;
27478887cd99SNicolas Pitre 
27488887cd99SNicolas Pitre 	/*
27498887cd99SNicolas Pitre 	 * The sum of our children's runtime should not exceed our own.
27508887cd99SNicolas Pitre 	 */
27518887cd99SNicolas Pitre 	list_for_each_entry_rcu(child, &tg->children, siblings) {
27528887cd99SNicolas Pitre 		period = ktime_to_ns(child->rt_bandwidth.rt_period);
27538887cd99SNicolas Pitre 		runtime = child->rt_bandwidth.rt_runtime;
27548887cd99SNicolas Pitre 
27558887cd99SNicolas Pitre 		if (child == d->tg) {
27568887cd99SNicolas Pitre 			period = d->rt_period;
27578887cd99SNicolas Pitre 			runtime = d->rt_runtime;
27588887cd99SNicolas Pitre 		}
27598887cd99SNicolas Pitre 
27608887cd99SNicolas Pitre 		sum += to_ratio(period, runtime);
27618887cd99SNicolas Pitre 	}
27628887cd99SNicolas Pitre 
27638887cd99SNicolas Pitre 	if (sum > total)
27648887cd99SNicolas Pitre 		return -EINVAL;
27658887cd99SNicolas Pitre 
27668887cd99SNicolas Pitre 	return 0;
27678887cd99SNicolas Pitre }
27688887cd99SNicolas Pitre 
27698887cd99SNicolas Pitre static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
27708887cd99SNicolas Pitre {
27718887cd99SNicolas Pitre 	int ret;
27728887cd99SNicolas Pitre 
27738887cd99SNicolas Pitre 	struct rt_schedulable_data data = {
27748887cd99SNicolas Pitre 		.tg = tg,
27758887cd99SNicolas Pitre 		.rt_period = period,
27768887cd99SNicolas Pitre 		.rt_runtime = runtime,
27778887cd99SNicolas Pitre 	};
27788887cd99SNicolas Pitre 
27798887cd99SNicolas Pitre 	rcu_read_lock();
27808887cd99SNicolas Pitre 	ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data);
27818887cd99SNicolas Pitre 	rcu_read_unlock();
27828887cd99SNicolas Pitre 
27838887cd99SNicolas Pitre 	return ret;
27848887cd99SNicolas Pitre }
27858887cd99SNicolas Pitre 
27868887cd99SNicolas Pitre static int tg_set_rt_bandwidth(struct task_group *tg,
27878887cd99SNicolas Pitre 		u64 rt_period, u64 rt_runtime)
27888887cd99SNicolas Pitre {
27898887cd99SNicolas Pitre 	int i, err = 0;
27908887cd99SNicolas Pitre 
27918887cd99SNicolas Pitre 	/*
27928887cd99SNicolas Pitre 	 * Disallowing the root group RT runtime is BAD, it would disallow the
27938887cd99SNicolas Pitre 	 * kernel creating (and or operating) RT threads.
27948887cd99SNicolas Pitre 	 */
27958887cd99SNicolas Pitre 	if (tg == &root_task_group && rt_runtime == 0)
27968887cd99SNicolas Pitre 		return -EINVAL;
27978887cd99SNicolas Pitre 
27988887cd99SNicolas Pitre 	/* No period doesn't make any sense. */
27998887cd99SNicolas Pitre 	if (rt_period == 0)
28008887cd99SNicolas Pitre 		return -EINVAL;
28018887cd99SNicolas Pitre 
2802d505b8afSHuaixin Chang 	/*
2803d505b8afSHuaixin Chang 	 * Bound quota to defend quota against overflow during bandwidth shift.
2804d505b8afSHuaixin Chang 	 */
2805d505b8afSHuaixin Chang 	if (rt_runtime != RUNTIME_INF && rt_runtime > max_rt_runtime)
2806d505b8afSHuaixin Chang 		return -EINVAL;
2807d505b8afSHuaixin Chang 
28088887cd99SNicolas Pitre 	mutex_lock(&rt_constraints_mutex);
28098887cd99SNicolas Pitre 	err = __rt_schedulable(tg, rt_period, rt_runtime);
28108887cd99SNicolas Pitre 	if (err)
28118887cd99SNicolas Pitre 		goto unlock;
28128887cd99SNicolas Pitre 
28138887cd99SNicolas Pitre 	raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
28148887cd99SNicolas Pitre 	tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
28158887cd99SNicolas Pitre 	tg->rt_bandwidth.rt_runtime = rt_runtime;
28168887cd99SNicolas Pitre 
28178887cd99SNicolas Pitre 	for_each_possible_cpu(i) {
28188887cd99SNicolas Pitre 		struct rt_rq *rt_rq = tg->rt_rq[i];
28198887cd99SNicolas Pitre 
28208887cd99SNicolas Pitre 		raw_spin_lock(&rt_rq->rt_runtime_lock);
28218887cd99SNicolas Pitre 		rt_rq->rt_runtime = rt_runtime;
28228887cd99SNicolas Pitre 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
28238887cd99SNicolas Pitre 	}
28248887cd99SNicolas Pitre 	raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
28258887cd99SNicolas Pitre unlock:
28268887cd99SNicolas Pitre 	mutex_unlock(&rt_constraints_mutex);
28278887cd99SNicolas Pitre 
28288887cd99SNicolas Pitre 	return err;
28298887cd99SNicolas Pitre }
28308887cd99SNicolas Pitre 
28318887cd99SNicolas Pitre int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
28328887cd99SNicolas Pitre {
28338887cd99SNicolas Pitre 	u64 rt_runtime, rt_period;
28348887cd99SNicolas Pitre 
28358887cd99SNicolas Pitre 	rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
28368887cd99SNicolas Pitre 	rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
28378887cd99SNicolas Pitre 	if (rt_runtime_us < 0)
28388887cd99SNicolas Pitre 		rt_runtime = RUNTIME_INF;
28391a010e29SKonstantin Khlebnikov 	else if ((u64)rt_runtime_us > U64_MAX / NSEC_PER_USEC)
28401a010e29SKonstantin Khlebnikov 		return -EINVAL;
28418887cd99SNicolas Pitre 
28428887cd99SNicolas Pitre 	return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
28438887cd99SNicolas Pitre }
28448887cd99SNicolas Pitre 
28458887cd99SNicolas Pitre long sched_group_rt_runtime(struct task_group *tg)
28468887cd99SNicolas Pitre {
28478887cd99SNicolas Pitre 	u64 rt_runtime_us;
28488887cd99SNicolas Pitre 
28498887cd99SNicolas Pitre 	if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
28508887cd99SNicolas Pitre 		return -1;
28518887cd99SNicolas Pitre 
28528887cd99SNicolas Pitre 	rt_runtime_us = tg->rt_bandwidth.rt_runtime;
28538887cd99SNicolas Pitre 	do_div(rt_runtime_us, NSEC_PER_USEC);
28548887cd99SNicolas Pitre 	return rt_runtime_us;
28558887cd99SNicolas Pitre }
28568887cd99SNicolas Pitre 
28578887cd99SNicolas Pitre int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us)
28588887cd99SNicolas Pitre {
28598887cd99SNicolas Pitre 	u64 rt_runtime, rt_period;
28608887cd99SNicolas Pitre 
28611a010e29SKonstantin Khlebnikov 	if (rt_period_us > U64_MAX / NSEC_PER_USEC)
28621a010e29SKonstantin Khlebnikov 		return -EINVAL;
28631a010e29SKonstantin Khlebnikov 
28648887cd99SNicolas Pitre 	rt_period = rt_period_us * NSEC_PER_USEC;
28658887cd99SNicolas Pitre 	rt_runtime = tg->rt_bandwidth.rt_runtime;
28668887cd99SNicolas Pitre 
28678887cd99SNicolas Pitre 	return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
28688887cd99SNicolas Pitre }
28698887cd99SNicolas Pitre 
28708887cd99SNicolas Pitre long sched_group_rt_period(struct task_group *tg)
28718887cd99SNicolas Pitre {
28728887cd99SNicolas Pitre 	u64 rt_period_us;
28738887cd99SNicolas Pitre 
28748887cd99SNicolas Pitre 	rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
28758887cd99SNicolas Pitre 	do_div(rt_period_us, NSEC_PER_USEC);
28768887cd99SNicolas Pitre 	return rt_period_us;
28778887cd99SNicolas Pitre }
28788887cd99SNicolas Pitre 
28798887cd99SNicolas Pitre static int sched_rt_global_constraints(void)
28808887cd99SNicolas Pitre {
28818887cd99SNicolas Pitre 	int ret = 0;
28828887cd99SNicolas Pitre 
28838887cd99SNicolas Pitre 	mutex_lock(&rt_constraints_mutex);
28848887cd99SNicolas Pitre 	ret = __rt_schedulable(NULL, 0, 0);
28858887cd99SNicolas Pitre 	mutex_unlock(&rt_constraints_mutex);
28868887cd99SNicolas Pitre 
28878887cd99SNicolas Pitre 	return ret;
28888887cd99SNicolas Pitre }
28898887cd99SNicolas Pitre 
28908887cd99SNicolas Pitre int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
28918887cd99SNicolas Pitre {
28928887cd99SNicolas Pitre 	/* Don't accept realtime tasks when there is no way for them to run */
28938887cd99SNicolas Pitre 	if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
28948887cd99SNicolas Pitre 		return 0;
28958887cd99SNicolas Pitre 
28968887cd99SNicolas Pitre 	return 1;
28978887cd99SNicolas Pitre }
28988887cd99SNicolas Pitre 
28998887cd99SNicolas Pitre #else /* !CONFIG_RT_GROUP_SCHED */
29008887cd99SNicolas Pitre static int sched_rt_global_constraints(void)
29018887cd99SNicolas Pitre {
29028887cd99SNicolas Pitre 	unsigned long flags;
29038887cd99SNicolas Pitre 	int i;
29048887cd99SNicolas Pitre 
29058887cd99SNicolas Pitre 	raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
29068887cd99SNicolas Pitre 	for_each_possible_cpu(i) {
29078887cd99SNicolas Pitre 		struct rt_rq *rt_rq = &cpu_rq(i)->rt;
29088887cd99SNicolas Pitre 
29098887cd99SNicolas Pitre 		raw_spin_lock(&rt_rq->rt_runtime_lock);
29108887cd99SNicolas Pitre 		rt_rq->rt_runtime = global_rt_runtime();
29118887cd99SNicolas Pitre 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
29128887cd99SNicolas Pitre 	}
29138887cd99SNicolas Pitre 	raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
29148887cd99SNicolas Pitre 
29158887cd99SNicolas Pitre 	return 0;
29168887cd99SNicolas Pitre }
29178887cd99SNicolas Pitre #endif /* CONFIG_RT_GROUP_SCHED */
29188887cd99SNicolas Pitre 
29198887cd99SNicolas Pitre static int sched_rt_global_validate(void)
29208887cd99SNicolas Pitre {
29218887cd99SNicolas Pitre 	if (sysctl_sched_rt_period <= 0)
29228887cd99SNicolas Pitre 		return -EINVAL;
29238887cd99SNicolas Pitre 
29248887cd99SNicolas Pitre 	if ((sysctl_sched_rt_runtime != RUNTIME_INF) &&
2925d505b8afSHuaixin Chang 		((sysctl_sched_rt_runtime > sysctl_sched_rt_period) ||
2926d505b8afSHuaixin Chang 		 ((u64)sysctl_sched_rt_runtime *
2927d505b8afSHuaixin Chang 			NSEC_PER_USEC > max_rt_runtime)))
29288887cd99SNicolas Pitre 		return -EINVAL;
29298887cd99SNicolas Pitre 
29308887cd99SNicolas Pitre 	return 0;
29318887cd99SNicolas Pitre }
29328887cd99SNicolas Pitre 
29338887cd99SNicolas Pitre static void sched_rt_do_global(void)
29348887cd99SNicolas Pitre {
29359b58e976SLi Hua 	unsigned long flags;
29369b58e976SLi Hua 
29379b58e976SLi Hua 	raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
29388887cd99SNicolas Pitre 	def_rt_bandwidth.rt_runtime = global_rt_runtime();
29398887cd99SNicolas Pitre 	def_rt_bandwidth.rt_period = ns_to_ktime(global_rt_period());
29409b58e976SLi Hua 	raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
29418887cd99SNicolas Pitre }
29428887cd99SNicolas Pitre 
294332927393SChristoph Hellwig int sched_rt_handler(struct ctl_table *table, int write, void *buffer,
294432927393SChristoph Hellwig 		size_t *lenp, loff_t *ppos)
29458887cd99SNicolas Pitre {
29468887cd99SNicolas Pitre 	int old_period, old_runtime;
29478887cd99SNicolas Pitre 	static DEFINE_MUTEX(mutex);
29488887cd99SNicolas Pitre 	int ret;
29498887cd99SNicolas Pitre 
29508887cd99SNicolas Pitre 	mutex_lock(&mutex);
29518887cd99SNicolas Pitre 	old_period = sysctl_sched_rt_period;
29528887cd99SNicolas Pitre 	old_runtime = sysctl_sched_rt_runtime;
29538887cd99SNicolas Pitre 
29548887cd99SNicolas Pitre 	ret = proc_dointvec(table, write, buffer, lenp, ppos);
29558887cd99SNicolas Pitre 
29568887cd99SNicolas Pitre 	if (!ret && write) {
29578887cd99SNicolas Pitre 		ret = sched_rt_global_validate();
29588887cd99SNicolas Pitre 		if (ret)
29598887cd99SNicolas Pitre 			goto undo;
29608887cd99SNicolas Pitre 
29618887cd99SNicolas Pitre 		ret = sched_dl_global_validate();
29628887cd99SNicolas Pitre 		if (ret)
29638887cd99SNicolas Pitre 			goto undo;
29648887cd99SNicolas Pitre 
29658887cd99SNicolas Pitre 		ret = sched_rt_global_constraints();
29668887cd99SNicolas Pitre 		if (ret)
29678887cd99SNicolas Pitre 			goto undo;
29688887cd99SNicolas Pitre 
29698887cd99SNicolas Pitre 		sched_rt_do_global();
29708887cd99SNicolas Pitre 		sched_dl_do_global();
29718887cd99SNicolas Pitre 	}
29728887cd99SNicolas Pitre 	if (0) {
29738887cd99SNicolas Pitre undo:
29748887cd99SNicolas Pitre 		sysctl_sched_rt_period = old_period;
29758887cd99SNicolas Pitre 		sysctl_sched_rt_runtime = old_runtime;
29768887cd99SNicolas Pitre 	}
29778887cd99SNicolas Pitre 	mutex_unlock(&mutex);
29788887cd99SNicolas Pitre 
29798887cd99SNicolas Pitre 	return ret;
29808887cd99SNicolas Pitre }
29818887cd99SNicolas Pitre 
298232927393SChristoph Hellwig int sched_rr_handler(struct ctl_table *table, int write, void *buffer,
298332927393SChristoph Hellwig 		size_t *lenp, loff_t *ppos)
29848887cd99SNicolas Pitre {
29858887cd99SNicolas Pitre 	int ret;
29868887cd99SNicolas Pitre 	static DEFINE_MUTEX(mutex);
29878887cd99SNicolas Pitre 
29888887cd99SNicolas Pitre 	mutex_lock(&mutex);
29898887cd99SNicolas Pitre 	ret = proc_dointvec(table, write, buffer, lenp, ppos);
29908887cd99SNicolas Pitre 	/*
29918887cd99SNicolas Pitre 	 * Make sure that internally we keep jiffies.
29928887cd99SNicolas Pitre 	 * Also, writing zero resets the timeslice to default:
29938887cd99SNicolas Pitre 	 */
29948887cd99SNicolas Pitre 	if (!ret && write) {
29958887cd99SNicolas Pitre 		sched_rr_timeslice =
29968887cd99SNicolas Pitre 			sysctl_sched_rr_timeslice <= 0 ? RR_TIMESLICE :
29978887cd99SNicolas Pitre 			msecs_to_jiffies(sysctl_sched_rr_timeslice);
29988887cd99SNicolas Pitre 	}
29998887cd99SNicolas Pitre 	mutex_unlock(&mutex);
300097fb7a0aSIngo Molnar 
30018887cd99SNicolas Pitre 	return ret;
30028887cd99SNicolas Pitre }
30038887cd99SNicolas Pitre 
3004391e43daSPeter Zijlstra #ifdef CONFIG_SCHED_DEBUG
3005391e43daSPeter Zijlstra void print_rt_stats(struct seq_file *m, int cpu)
3006391e43daSPeter Zijlstra {
3007391e43daSPeter Zijlstra 	rt_rq_iter_t iter;
3008391e43daSPeter Zijlstra 	struct rt_rq *rt_rq;
3009391e43daSPeter Zijlstra 
3010391e43daSPeter Zijlstra 	rcu_read_lock();
3011391e43daSPeter Zijlstra 	for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
3012391e43daSPeter Zijlstra 		print_rt_rq(m, cpu, rt_rq);
3013391e43daSPeter Zijlstra 	rcu_read_unlock();
3014391e43daSPeter Zijlstra }
3015391e43daSPeter Zijlstra #endif /* CONFIG_SCHED_DEBUG */
3016