xref: /openbmc/linux/kernel/sched/rt.c (revision 4cfafd3082afc707653aeb82e9f8e7b596fbbfd6)
1391e43daSPeter Zijlstra /*
2391e43daSPeter Zijlstra  * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3391e43daSPeter Zijlstra  * policies)
4391e43daSPeter Zijlstra  */
5391e43daSPeter Zijlstra 
6391e43daSPeter Zijlstra #include "sched.h"
7391e43daSPeter Zijlstra 
8391e43daSPeter Zijlstra #include <linux/slab.h>
9b6366f04SSteven Rostedt #include <linux/irq_work.h>
10391e43daSPeter Zijlstra 
11ce0dbbbbSClark Williams int sched_rr_timeslice = RR_TIMESLICE;
12ce0dbbbbSClark Williams 
13391e43daSPeter Zijlstra static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
14391e43daSPeter Zijlstra 
15391e43daSPeter Zijlstra struct rt_bandwidth def_rt_bandwidth;
16391e43daSPeter Zijlstra 
17391e43daSPeter Zijlstra static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
18391e43daSPeter Zijlstra {
19391e43daSPeter Zijlstra 	struct rt_bandwidth *rt_b =
20391e43daSPeter Zijlstra 		container_of(timer, struct rt_bandwidth, rt_period_timer);
21391e43daSPeter Zijlstra 	int idle = 0;
2277a4d1a1SPeter Zijlstra 	int overrun;
23391e43daSPeter Zijlstra 
2477a4d1a1SPeter Zijlstra 	raw_spin_lock(&rt_b->rt_runtime_lock);
25391e43daSPeter Zijlstra 	for (;;) {
2677a4d1a1SPeter Zijlstra 		overrun = hrtimer_forward_now(timer, rt_b->rt_period);
27391e43daSPeter Zijlstra 		if (!overrun)
28391e43daSPeter Zijlstra 			break;
29391e43daSPeter Zijlstra 
3077a4d1a1SPeter Zijlstra 		raw_spin_unlock(&rt_b->rt_runtime_lock);
31391e43daSPeter Zijlstra 		idle = do_sched_rt_period_timer(rt_b, overrun);
3277a4d1a1SPeter Zijlstra 		raw_spin_lock(&rt_b->rt_runtime_lock);
33391e43daSPeter Zijlstra 	}
34*4cfafd30SPeter Zijlstra 	if (idle)
35*4cfafd30SPeter Zijlstra 		rt_b->rt_period_active = 0;
3677a4d1a1SPeter Zijlstra 	raw_spin_unlock(&rt_b->rt_runtime_lock);
37391e43daSPeter Zijlstra 
38391e43daSPeter Zijlstra 	return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
39391e43daSPeter Zijlstra }
40391e43daSPeter Zijlstra 
41391e43daSPeter Zijlstra void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
42391e43daSPeter Zijlstra {
43391e43daSPeter Zijlstra 	rt_b->rt_period = ns_to_ktime(period);
44391e43daSPeter Zijlstra 	rt_b->rt_runtime = runtime;
45391e43daSPeter Zijlstra 
46391e43daSPeter Zijlstra 	raw_spin_lock_init(&rt_b->rt_runtime_lock);
47391e43daSPeter Zijlstra 
48391e43daSPeter Zijlstra 	hrtimer_init(&rt_b->rt_period_timer,
49391e43daSPeter Zijlstra 			CLOCK_MONOTONIC, HRTIMER_MODE_REL);
50391e43daSPeter Zijlstra 	rt_b->rt_period_timer.function = sched_rt_period_timer;
51391e43daSPeter Zijlstra }
52391e43daSPeter Zijlstra 
53391e43daSPeter Zijlstra static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
54391e43daSPeter Zijlstra {
55391e43daSPeter Zijlstra 	if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
56391e43daSPeter Zijlstra 		return;
57391e43daSPeter Zijlstra 
58391e43daSPeter Zijlstra 	raw_spin_lock(&rt_b->rt_runtime_lock);
59*4cfafd30SPeter Zijlstra 	if (!rt_b->rt_period_active) {
60*4cfafd30SPeter Zijlstra 		rt_b->rt_period_active = 1;
61*4cfafd30SPeter Zijlstra 		hrtimer_forward_now(&rt_b->rt_period_timer, rt_b->rt_period);
62*4cfafd30SPeter Zijlstra 		hrtimer_start_expires(&rt_b->rt_period_timer, HRTIMER_MODE_ABS_PINNED);
63*4cfafd30SPeter Zijlstra 	}
64391e43daSPeter Zijlstra 	raw_spin_unlock(&rt_b->rt_runtime_lock);
65391e43daSPeter Zijlstra }
66391e43daSPeter Zijlstra 
67b6366f04SSteven Rostedt #ifdef CONFIG_SMP
68b6366f04SSteven Rostedt static void push_irq_work_func(struct irq_work *work);
69b6366f04SSteven Rostedt #endif
70b6366f04SSteven Rostedt 
7107c54f7aSAbel Vesa void init_rt_rq(struct rt_rq *rt_rq)
72391e43daSPeter Zijlstra {
73391e43daSPeter Zijlstra 	struct rt_prio_array *array;
74391e43daSPeter Zijlstra 	int i;
75391e43daSPeter Zijlstra 
76391e43daSPeter Zijlstra 	array = &rt_rq->active;
77391e43daSPeter Zijlstra 	for (i = 0; i < MAX_RT_PRIO; i++) {
78391e43daSPeter Zijlstra 		INIT_LIST_HEAD(array->queue + i);
79391e43daSPeter Zijlstra 		__clear_bit(i, array->bitmap);
80391e43daSPeter Zijlstra 	}
81391e43daSPeter Zijlstra 	/* delimiter for bitsearch: */
82391e43daSPeter Zijlstra 	__set_bit(MAX_RT_PRIO, array->bitmap);
83391e43daSPeter Zijlstra 
84391e43daSPeter Zijlstra #if defined CONFIG_SMP
85391e43daSPeter Zijlstra 	rt_rq->highest_prio.curr = MAX_RT_PRIO;
86391e43daSPeter Zijlstra 	rt_rq->highest_prio.next = MAX_RT_PRIO;
87391e43daSPeter Zijlstra 	rt_rq->rt_nr_migratory = 0;
88391e43daSPeter Zijlstra 	rt_rq->overloaded = 0;
89391e43daSPeter Zijlstra 	plist_head_init(&rt_rq->pushable_tasks);
90b6366f04SSteven Rostedt 
91b6366f04SSteven Rostedt #ifdef HAVE_RT_PUSH_IPI
92b6366f04SSteven Rostedt 	rt_rq->push_flags = 0;
93b6366f04SSteven Rostedt 	rt_rq->push_cpu = nr_cpu_ids;
94b6366f04SSteven Rostedt 	raw_spin_lock_init(&rt_rq->push_lock);
95b6366f04SSteven Rostedt 	init_irq_work(&rt_rq->push_work, push_irq_work_func);
96391e43daSPeter Zijlstra #endif
97b6366f04SSteven Rostedt #endif /* CONFIG_SMP */
98f4ebcbc0SKirill Tkhai 	/* We start is dequeued state, because no RT tasks are queued */
99f4ebcbc0SKirill Tkhai 	rt_rq->rt_queued = 0;
100391e43daSPeter Zijlstra 
101391e43daSPeter Zijlstra 	rt_rq->rt_time = 0;
102391e43daSPeter Zijlstra 	rt_rq->rt_throttled = 0;
103391e43daSPeter Zijlstra 	rt_rq->rt_runtime = 0;
104391e43daSPeter Zijlstra 	raw_spin_lock_init(&rt_rq->rt_runtime_lock);
105391e43daSPeter Zijlstra }
106391e43daSPeter Zijlstra 
107391e43daSPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED
108391e43daSPeter Zijlstra static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
109391e43daSPeter Zijlstra {
110391e43daSPeter Zijlstra 	hrtimer_cancel(&rt_b->rt_period_timer);
111391e43daSPeter Zijlstra }
112391e43daSPeter Zijlstra 
113391e43daSPeter Zijlstra #define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
114391e43daSPeter Zijlstra 
115391e43daSPeter Zijlstra static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
116391e43daSPeter Zijlstra {
117391e43daSPeter Zijlstra #ifdef CONFIG_SCHED_DEBUG
118391e43daSPeter Zijlstra 	WARN_ON_ONCE(!rt_entity_is_task(rt_se));
119391e43daSPeter Zijlstra #endif
120391e43daSPeter Zijlstra 	return container_of(rt_se, struct task_struct, rt);
121391e43daSPeter Zijlstra }
122391e43daSPeter Zijlstra 
123391e43daSPeter Zijlstra static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
124391e43daSPeter Zijlstra {
125391e43daSPeter Zijlstra 	return rt_rq->rq;
126391e43daSPeter Zijlstra }
127391e43daSPeter Zijlstra 
128391e43daSPeter Zijlstra static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
129391e43daSPeter Zijlstra {
130391e43daSPeter Zijlstra 	return rt_se->rt_rq;
131391e43daSPeter Zijlstra }
132391e43daSPeter Zijlstra 
133653d07a6SKirill Tkhai static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
134653d07a6SKirill Tkhai {
135653d07a6SKirill Tkhai 	struct rt_rq *rt_rq = rt_se->rt_rq;
136653d07a6SKirill Tkhai 
137653d07a6SKirill Tkhai 	return rt_rq->rq;
138653d07a6SKirill Tkhai }
139653d07a6SKirill Tkhai 
140391e43daSPeter Zijlstra void free_rt_sched_group(struct task_group *tg)
141391e43daSPeter Zijlstra {
142391e43daSPeter Zijlstra 	int i;
143391e43daSPeter Zijlstra 
144391e43daSPeter Zijlstra 	if (tg->rt_se)
145391e43daSPeter Zijlstra 		destroy_rt_bandwidth(&tg->rt_bandwidth);
146391e43daSPeter Zijlstra 
147391e43daSPeter Zijlstra 	for_each_possible_cpu(i) {
148391e43daSPeter Zijlstra 		if (tg->rt_rq)
149391e43daSPeter Zijlstra 			kfree(tg->rt_rq[i]);
150391e43daSPeter Zijlstra 		if (tg->rt_se)
151391e43daSPeter Zijlstra 			kfree(tg->rt_se[i]);
152391e43daSPeter Zijlstra 	}
153391e43daSPeter Zijlstra 
154391e43daSPeter Zijlstra 	kfree(tg->rt_rq);
155391e43daSPeter Zijlstra 	kfree(tg->rt_se);
156391e43daSPeter Zijlstra }
157391e43daSPeter Zijlstra 
158391e43daSPeter Zijlstra void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
159391e43daSPeter Zijlstra 		struct sched_rt_entity *rt_se, int cpu,
160391e43daSPeter Zijlstra 		struct sched_rt_entity *parent)
161391e43daSPeter Zijlstra {
162391e43daSPeter Zijlstra 	struct rq *rq = cpu_rq(cpu);
163391e43daSPeter Zijlstra 
164391e43daSPeter Zijlstra 	rt_rq->highest_prio.curr = MAX_RT_PRIO;
165391e43daSPeter Zijlstra 	rt_rq->rt_nr_boosted = 0;
166391e43daSPeter Zijlstra 	rt_rq->rq = rq;
167391e43daSPeter Zijlstra 	rt_rq->tg = tg;
168391e43daSPeter Zijlstra 
169391e43daSPeter Zijlstra 	tg->rt_rq[cpu] = rt_rq;
170391e43daSPeter Zijlstra 	tg->rt_se[cpu] = rt_se;
171391e43daSPeter Zijlstra 
172391e43daSPeter Zijlstra 	if (!rt_se)
173391e43daSPeter Zijlstra 		return;
174391e43daSPeter Zijlstra 
175391e43daSPeter Zijlstra 	if (!parent)
176391e43daSPeter Zijlstra 		rt_se->rt_rq = &rq->rt;
177391e43daSPeter Zijlstra 	else
178391e43daSPeter Zijlstra 		rt_se->rt_rq = parent->my_q;
179391e43daSPeter Zijlstra 
180391e43daSPeter Zijlstra 	rt_se->my_q = rt_rq;
181391e43daSPeter Zijlstra 	rt_se->parent = parent;
182391e43daSPeter Zijlstra 	INIT_LIST_HEAD(&rt_se->run_list);
183391e43daSPeter Zijlstra }
184391e43daSPeter Zijlstra 
185391e43daSPeter Zijlstra int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
186391e43daSPeter Zijlstra {
187391e43daSPeter Zijlstra 	struct rt_rq *rt_rq;
188391e43daSPeter Zijlstra 	struct sched_rt_entity *rt_se;
189391e43daSPeter Zijlstra 	int i;
190391e43daSPeter Zijlstra 
191391e43daSPeter Zijlstra 	tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
192391e43daSPeter Zijlstra 	if (!tg->rt_rq)
193391e43daSPeter Zijlstra 		goto err;
194391e43daSPeter Zijlstra 	tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
195391e43daSPeter Zijlstra 	if (!tg->rt_se)
196391e43daSPeter Zijlstra 		goto err;
197391e43daSPeter Zijlstra 
198391e43daSPeter Zijlstra 	init_rt_bandwidth(&tg->rt_bandwidth,
199391e43daSPeter Zijlstra 			ktime_to_ns(def_rt_bandwidth.rt_period), 0);
200391e43daSPeter Zijlstra 
201391e43daSPeter Zijlstra 	for_each_possible_cpu(i) {
202391e43daSPeter Zijlstra 		rt_rq = kzalloc_node(sizeof(struct rt_rq),
203391e43daSPeter Zijlstra 				     GFP_KERNEL, cpu_to_node(i));
204391e43daSPeter Zijlstra 		if (!rt_rq)
205391e43daSPeter Zijlstra 			goto err;
206391e43daSPeter Zijlstra 
207391e43daSPeter Zijlstra 		rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
208391e43daSPeter Zijlstra 				     GFP_KERNEL, cpu_to_node(i));
209391e43daSPeter Zijlstra 		if (!rt_se)
210391e43daSPeter Zijlstra 			goto err_free_rq;
211391e43daSPeter Zijlstra 
21207c54f7aSAbel Vesa 		init_rt_rq(rt_rq);
213391e43daSPeter Zijlstra 		rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
214391e43daSPeter Zijlstra 		init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
215391e43daSPeter Zijlstra 	}
216391e43daSPeter Zijlstra 
217391e43daSPeter Zijlstra 	return 1;
218391e43daSPeter Zijlstra 
219391e43daSPeter Zijlstra err_free_rq:
220391e43daSPeter Zijlstra 	kfree(rt_rq);
221391e43daSPeter Zijlstra err:
222391e43daSPeter Zijlstra 	return 0;
223391e43daSPeter Zijlstra }
224391e43daSPeter Zijlstra 
225391e43daSPeter Zijlstra #else /* CONFIG_RT_GROUP_SCHED */
226391e43daSPeter Zijlstra 
227391e43daSPeter Zijlstra #define rt_entity_is_task(rt_se) (1)
228391e43daSPeter Zijlstra 
229391e43daSPeter Zijlstra static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
230391e43daSPeter Zijlstra {
231391e43daSPeter Zijlstra 	return container_of(rt_se, struct task_struct, rt);
232391e43daSPeter Zijlstra }
233391e43daSPeter Zijlstra 
234391e43daSPeter Zijlstra static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
235391e43daSPeter Zijlstra {
236391e43daSPeter Zijlstra 	return container_of(rt_rq, struct rq, rt);
237391e43daSPeter Zijlstra }
238391e43daSPeter Zijlstra 
239653d07a6SKirill Tkhai static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
240391e43daSPeter Zijlstra {
241391e43daSPeter Zijlstra 	struct task_struct *p = rt_task_of(rt_se);
242653d07a6SKirill Tkhai 
243653d07a6SKirill Tkhai 	return task_rq(p);
244653d07a6SKirill Tkhai }
245653d07a6SKirill Tkhai 
246653d07a6SKirill Tkhai static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
247653d07a6SKirill Tkhai {
248653d07a6SKirill Tkhai 	struct rq *rq = rq_of_rt_se(rt_se);
249391e43daSPeter Zijlstra 
250391e43daSPeter Zijlstra 	return &rq->rt;
251391e43daSPeter Zijlstra }
252391e43daSPeter Zijlstra 
253391e43daSPeter Zijlstra void free_rt_sched_group(struct task_group *tg) { }
254391e43daSPeter Zijlstra 
255391e43daSPeter Zijlstra int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
256391e43daSPeter Zijlstra {
257391e43daSPeter Zijlstra 	return 1;
258391e43daSPeter Zijlstra }
259391e43daSPeter Zijlstra #endif /* CONFIG_RT_GROUP_SCHED */
260391e43daSPeter Zijlstra 
261391e43daSPeter Zijlstra #ifdef CONFIG_SMP
262391e43daSPeter Zijlstra 
26338033c37SPeter Zijlstra static int pull_rt_task(struct rq *this_rq);
26438033c37SPeter Zijlstra 
265dc877341SPeter Zijlstra static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
266dc877341SPeter Zijlstra {
267dc877341SPeter Zijlstra 	/* Try to pull RT tasks here if we lower this rq's prio */
268dc877341SPeter Zijlstra 	return rq->rt.highest_prio.curr > prev->prio;
269dc877341SPeter Zijlstra }
270dc877341SPeter Zijlstra 
271391e43daSPeter Zijlstra static inline int rt_overloaded(struct rq *rq)
272391e43daSPeter Zijlstra {
273391e43daSPeter Zijlstra 	return atomic_read(&rq->rd->rto_count);
274391e43daSPeter Zijlstra }
275391e43daSPeter Zijlstra 
276391e43daSPeter Zijlstra static inline void rt_set_overload(struct rq *rq)
277391e43daSPeter Zijlstra {
278391e43daSPeter Zijlstra 	if (!rq->online)
279391e43daSPeter Zijlstra 		return;
280391e43daSPeter Zijlstra 
281391e43daSPeter Zijlstra 	cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
282391e43daSPeter Zijlstra 	/*
283391e43daSPeter Zijlstra 	 * Make sure the mask is visible before we set
284391e43daSPeter Zijlstra 	 * the overload count. That is checked to determine
285391e43daSPeter Zijlstra 	 * if we should look at the mask. It would be a shame
286391e43daSPeter Zijlstra 	 * if we looked at the mask, but the mask was not
287391e43daSPeter Zijlstra 	 * updated yet.
2887c3f2ab7SPeter Zijlstra 	 *
2897c3f2ab7SPeter Zijlstra 	 * Matched by the barrier in pull_rt_task().
290391e43daSPeter Zijlstra 	 */
2917c3f2ab7SPeter Zijlstra 	smp_wmb();
292391e43daSPeter Zijlstra 	atomic_inc(&rq->rd->rto_count);
293391e43daSPeter Zijlstra }
294391e43daSPeter Zijlstra 
295391e43daSPeter Zijlstra static inline void rt_clear_overload(struct rq *rq)
296391e43daSPeter Zijlstra {
297391e43daSPeter Zijlstra 	if (!rq->online)
298391e43daSPeter Zijlstra 		return;
299391e43daSPeter Zijlstra 
300391e43daSPeter Zijlstra 	/* the order here really doesn't matter */
301391e43daSPeter Zijlstra 	atomic_dec(&rq->rd->rto_count);
302391e43daSPeter Zijlstra 	cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
303391e43daSPeter Zijlstra }
304391e43daSPeter Zijlstra 
305391e43daSPeter Zijlstra static void update_rt_migration(struct rt_rq *rt_rq)
306391e43daSPeter Zijlstra {
307391e43daSPeter Zijlstra 	if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
308391e43daSPeter Zijlstra 		if (!rt_rq->overloaded) {
309391e43daSPeter Zijlstra 			rt_set_overload(rq_of_rt_rq(rt_rq));
310391e43daSPeter Zijlstra 			rt_rq->overloaded = 1;
311391e43daSPeter Zijlstra 		}
312391e43daSPeter Zijlstra 	} else if (rt_rq->overloaded) {
313391e43daSPeter Zijlstra 		rt_clear_overload(rq_of_rt_rq(rt_rq));
314391e43daSPeter Zijlstra 		rt_rq->overloaded = 0;
315391e43daSPeter Zijlstra 	}
316391e43daSPeter Zijlstra }
317391e43daSPeter Zijlstra 
318391e43daSPeter Zijlstra static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
319391e43daSPeter Zijlstra {
32029baa747SPeter Zijlstra 	struct task_struct *p;
32129baa747SPeter Zijlstra 
322391e43daSPeter Zijlstra 	if (!rt_entity_is_task(rt_se))
323391e43daSPeter Zijlstra 		return;
324391e43daSPeter Zijlstra 
32529baa747SPeter Zijlstra 	p = rt_task_of(rt_se);
326391e43daSPeter Zijlstra 	rt_rq = &rq_of_rt_rq(rt_rq)->rt;
327391e43daSPeter Zijlstra 
328391e43daSPeter Zijlstra 	rt_rq->rt_nr_total++;
32929baa747SPeter Zijlstra 	if (p->nr_cpus_allowed > 1)
330391e43daSPeter Zijlstra 		rt_rq->rt_nr_migratory++;
331391e43daSPeter Zijlstra 
332391e43daSPeter Zijlstra 	update_rt_migration(rt_rq);
333391e43daSPeter Zijlstra }
334391e43daSPeter Zijlstra 
335391e43daSPeter Zijlstra static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
336391e43daSPeter Zijlstra {
33729baa747SPeter Zijlstra 	struct task_struct *p;
33829baa747SPeter Zijlstra 
339391e43daSPeter Zijlstra 	if (!rt_entity_is_task(rt_se))
340391e43daSPeter Zijlstra 		return;
341391e43daSPeter Zijlstra 
34229baa747SPeter Zijlstra 	p = rt_task_of(rt_se);
343391e43daSPeter Zijlstra 	rt_rq = &rq_of_rt_rq(rt_rq)->rt;
344391e43daSPeter Zijlstra 
345391e43daSPeter Zijlstra 	rt_rq->rt_nr_total--;
34629baa747SPeter Zijlstra 	if (p->nr_cpus_allowed > 1)
347391e43daSPeter Zijlstra 		rt_rq->rt_nr_migratory--;
348391e43daSPeter Zijlstra 
349391e43daSPeter Zijlstra 	update_rt_migration(rt_rq);
350391e43daSPeter Zijlstra }
351391e43daSPeter Zijlstra 
352391e43daSPeter Zijlstra static inline int has_pushable_tasks(struct rq *rq)
353391e43daSPeter Zijlstra {
354391e43daSPeter Zijlstra 	return !plist_head_empty(&rq->rt.pushable_tasks);
355391e43daSPeter Zijlstra }
356391e43daSPeter Zijlstra 
357dc877341SPeter Zijlstra static inline void set_post_schedule(struct rq *rq)
358dc877341SPeter Zijlstra {
359dc877341SPeter Zijlstra 	/*
360dc877341SPeter Zijlstra 	 * We detect this state here so that we can avoid taking the RQ
361dc877341SPeter Zijlstra 	 * lock again later if there is no need to push
362dc877341SPeter Zijlstra 	 */
363dc877341SPeter Zijlstra 	rq->post_schedule = has_pushable_tasks(rq);
364dc877341SPeter Zijlstra }
365dc877341SPeter Zijlstra 
366391e43daSPeter Zijlstra static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
367391e43daSPeter Zijlstra {
368391e43daSPeter Zijlstra 	plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
369391e43daSPeter Zijlstra 	plist_node_init(&p->pushable_tasks, p->prio);
370391e43daSPeter Zijlstra 	plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
371391e43daSPeter Zijlstra 
372391e43daSPeter Zijlstra 	/* Update the highest prio pushable task */
373391e43daSPeter Zijlstra 	if (p->prio < rq->rt.highest_prio.next)
374391e43daSPeter Zijlstra 		rq->rt.highest_prio.next = p->prio;
375391e43daSPeter Zijlstra }
376391e43daSPeter Zijlstra 
377391e43daSPeter Zijlstra static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
378391e43daSPeter Zijlstra {
379391e43daSPeter Zijlstra 	plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
380391e43daSPeter Zijlstra 
381391e43daSPeter Zijlstra 	/* Update the new highest prio pushable task */
382391e43daSPeter Zijlstra 	if (has_pushable_tasks(rq)) {
383391e43daSPeter Zijlstra 		p = plist_first_entry(&rq->rt.pushable_tasks,
384391e43daSPeter Zijlstra 				      struct task_struct, pushable_tasks);
385391e43daSPeter Zijlstra 		rq->rt.highest_prio.next = p->prio;
386391e43daSPeter Zijlstra 	} else
387391e43daSPeter Zijlstra 		rq->rt.highest_prio.next = MAX_RT_PRIO;
388391e43daSPeter Zijlstra }
389391e43daSPeter Zijlstra 
390391e43daSPeter Zijlstra #else
391391e43daSPeter Zijlstra 
392391e43daSPeter Zijlstra static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
393391e43daSPeter Zijlstra {
394391e43daSPeter Zijlstra }
395391e43daSPeter Zijlstra 
396391e43daSPeter Zijlstra static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
397391e43daSPeter Zijlstra {
398391e43daSPeter Zijlstra }
399391e43daSPeter Zijlstra 
400391e43daSPeter Zijlstra static inline
401391e43daSPeter Zijlstra void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
402391e43daSPeter Zijlstra {
403391e43daSPeter Zijlstra }
404391e43daSPeter Zijlstra 
405391e43daSPeter Zijlstra static inline
406391e43daSPeter Zijlstra void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
407391e43daSPeter Zijlstra {
408391e43daSPeter Zijlstra }
409391e43daSPeter Zijlstra 
410dc877341SPeter Zijlstra static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
411dc877341SPeter Zijlstra {
412dc877341SPeter Zijlstra 	return false;
413dc877341SPeter Zijlstra }
414dc877341SPeter Zijlstra 
415dc877341SPeter Zijlstra static inline int pull_rt_task(struct rq *this_rq)
416dc877341SPeter Zijlstra {
417dc877341SPeter Zijlstra 	return 0;
418dc877341SPeter Zijlstra }
419dc877341SPeter Zijlstra 
420dc877341SPeter Zijlstra static inline void set_post_schedule(struct rq *rq)
421dc877341SPeter Zijlstra {
422dc877341SPeter Zijlstra }
423391e43daSPeter Zijlstra #endif /* CONFIG_SMP */
424391e43daSPeter Zijlstra 
425f4ebcbc0SKirill Tkhai static void enqueue_top_rt_rq(struct rt_rq *rt_rq);
426f4ebcbc0SKirill Tkhai static void dequeue_top_rt_rq(struct rt_rq *rt_rq);
427f4ebcbc0SKirill Tkhai 
428391e43daSPeter Zijlstra static inline int on_rt_rq(struct sched_rt_entity *rt_se)
429391e43daSPeter Zijlstra {
430391e43daSPeter Zijlstra 	return !list_empty(&rt_se->run_list);
431391e43daSPeter Zijlstra }
432391e43daSPeter Zijlstra 
433391e43daSPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED
434391e43daSPeter Zijlstra 
435391e43daSPeter Zijlstra static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
436391e43daSPeter Zijlstra {
437391e43daSPeter Zijlstra 	if (!rt_rq->tg)
438391e43daSPeter Zijlstra 		return RUNTIME_INF;
439391e43daSPeter Zijlstra 
440391e43daSPeter Zijlstra 	return rt_rq->rt_runtime;
441391e43daSPeter Zijlstra }
442391e43daSPeter Zijlstra 
443391e43daSPeter Zijlstra static inline u64 sched_rt_period(struct rt_rq *rt_rq)
444391e43daSPeter Zijlstra {
445391e43daSPeter Zijlstra 	return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
446391e43daSPeter Zijlstra }
447391e43daSPeter Zijlstra 
448391e43daSPeter Zijlstra typedef struct task_group *rt_rq_iter_t;
449391e43daSPeter Zijlstra 
450391e43daSPeter Zijlstra static inline struct task_group *next_task_group(struct task_group *tg)
451391e43daSPeter Zijlstra {
452391e43daSPeter Zijlstra 	do {
453391e43daSPeter Zijlstra 		tg = list_entry_rcu(tg->list.next,
454391e43daSPeter Zijlstra 			typeof(struct task_group), list);
455391e43daSPeter Zijlstra 	} while (&tg->list != &task_groups && task_group_is_autogroup(tg));
456391e43daSPeter Zijlstra 
457391e43daSPeter Zijlstra 	if (&tg->list == &task_groups)
458391e43daSPeter Zijlstra 		tg = NULL;
459391e43daSPeter Zijlstra 
460391e43daSPeter Zijlstra 	return tg;
461391e43daSPeter Zijlstra }
462391e43daSPeter Zijlstra 
463391e43daSPeter Zijlstra #define for_each_rt_rq(rt_rq, iter, rq)					\
464391e43daSPeter Zijlstra 	for (iter = container_of(&task_groups, typeof(*iter), list);	\
465391e43daSPeter Zijlstra 		(iter = next_task_group(iter)) &&			\
466391e43daSPeter Zijlstra 		(rt_rq = iter->rt_rq[cpu_of(rq)]);)
467391e43daSPeter Zijlstra 
468391e43daSPeter Zijlstra #define for_each_sched_rt_entity(rt_se) \
469391e43daSPeter Zijlstra 	for (; rt_se; rt_se = rt_se->parent)
470391e43daSPeter Zijlstra 
471391e43daSPeter Zijlstra static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
472391e43daSPeter Zijlstra {
473391e43daSPeter Zijlstra 	return rt_se->my_q;
474391e43daSPeter Zijlstra }
475391e43daSPeter Zijlstra 
476391e43daSPeter Zijlstra static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head);
477391e43daSPeter Zijlstra static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
478391e43daSPeter Zijlstra 
479391e43daSPeter Zijlstra static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
480391e43daSPeter Zijlstra {
481391e43daSPeter Zijlstra 	struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
4828875125eSKirill Tkhai 	struct rq *rq = rq_of_rt_rq(rt_rq);
483391e43daSPeter Zijlstra 	struct sched_rt_entity *rt_se;
484391e43daSPeter Zijlstra 
4858875125eSKirill Tkhai 	int cpu = cpu_of(rq);
486391e43daSPeter Zijlstra 
487391e43daSPeter Zijlstra 	rt_se = rt_rq->tg->rt_se[cpu];
488391e43daSPeter Zijlstra 
489391e43daSPeter Zijlstra 	if (rt_rq->rt_nr_running) {
490f4ebcbc0SKirill Tkhai 		if (!rt_se)
491f4ebcbc0SKirill Tkhai 			enqueue_top_rt_rq(rt_rq);
492f4ebcbc0SKirill Tkhai 		else if (!on_rt_rq(rt_se))
493391e43daSPeter Zijlstra 			enqueue_rt_entity(rt_se, false);
494f4ebcbc0SKirill Tkhai 
495391e43daSPeter Zijlstra 		if (rt_rq->highest_prio.curr < curr->prio)
4968875125eSKirill Tkhai 			resched_curr(rq);
497391e43daSPeter Zijlstra 	}
498391e43daSPeter Zijlstra }
499391e43daSPeter Zijlstra 
500391e43daSPeter Zijlstra static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
501391e43daSPeter Zijlstra {
502391e43daSPeter Zijlstra 	struct sched_rt_entity *rt_se;
503391e43daSPeter Zijlstra 	int cpu = cpu_of(rq_of_rt_rq(rt_rq));
504391e43daSPeter Zijlstra 
505391e43daSPeter Zijlstra 	rt_se = rt_rq->tg->rt_se[cpu];
506391e43daSPeter Zijlstra 
507f4ebcbc0SKirill Tkhai 	if (!rt_se)
508f4ebcbc0SKirill Tkhai 		dequeue_top_rt_rq(rt_rq);
509f4ebcbc0SKirill Tkhai 	else if (on_rt_rq(rt_se))
510391e43daSPeter Zijlstra 		dequeue_rt_entity(rt_se);
511391e43daSPeter Zijlstra }
512391e43daSPeter Zijlstra 
51346383648SKirill Tkhai static inline int rt_rq_throttled(struct rt_rq *rt_rq)
51446383648SKirill Tkhai {
51546383648SKirill Tkhai 	return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
51646383648SKirill Tkhai }
51746383648SKirill Tkhai 
518391e43daSPeter Zijlstra static int rt_se_boosted(struct sched_rt_entity *rt_se)
519391e43daSPeter Zijlstra {
520391e43daSPeter Zijlstra 	struct rt_rq *rt_rq = group_rt_rq(rt_se);
521391e43daSPeter Zijlstra 	struct task_struct *p;
522391e43daSPeter Zijlstra 
523391e43daSPeter Zijlstra 	if (rt_rq)
524391e43daSPeter Zijlstra 		return !!rt_rq->rt_nr_boosted;
525391e43daSPeter Zijlstra 
526391e43daSPeter Zijlstra 	p = rt_task_of(rt_se);
527391e43daSPeter Zijlstra 	return p->prio != p->normal_prio;
528391e43daSPeter Zijlstra }
529391e43daSPeter Zijlstra 
530391e43daSPeter Zijlstra #ifdef CONFIG_SMP
531391e43daSPeter Zijlstra static inline const struct cpumask *sched_rt_period_mask(void)
532391e43daSPeter Zijlstra {
533424c93feSNathan Zimmer 	return this_rq()->rd->span;
534391e43daSPeter Zijlstra }
535391e43daSPeter Zijlstra #else
536391e43daSPeter Zijlstra static inline const struct cpumask *sched_rt_period_mask(void)
537391e43daSPeter Zijlstra {
538391e43daSPeter Zijlstra 	return cpu_online_mask;
539391e43daSPeter Zijlstra }
540391e43daSPeter Zijlstra #endif
541391e43daSPeter Zijlstra 
542391e43daSPeter Zijlstra static inline
543391e43daSPeter Zijlstra struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
544391e43daSPeter Zijlstra {
545391e43daSPeter Zijlstra 	return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
546391e43daSPeter Zijlstra }
547391e43daSPeter Zijlstra 
548391e43daSPeter Zijlstra static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
549391e43daSPeter Zijlstra {
550391e43daSPeter Zijlstra 	return &rt_rq->tg->rt_bandwidth;
551391e43daSPeter Zijlstra }
552391e43daSPeter Zijlstra 
553391e43daSPeter Zijlstra #else /* !CONFIG_RT_GROUP_SCHED */
554391e43daSPeter Zijlstra 
555391e43daSPeter Zijlstra static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
556391e43daSPeter Zijlstra {
557391e43daSPeter Zijlstra 	return rt_rq->rt_runtime;
558391e43daSPeter Zijlstra }
559391e43daSPeter Zijlstra 
560391e43daSPeter Zijlstra static inline u64 sched_rt_period(struct rt_rq *rt_rq)
561391e43daSPeter Zijlstra {
562391e43daSPeter Zijlstra 	return ktime_to_ns(def_rt_bandwidth.rt_period);
563391e43daSPeter Zijlstra }
564391e43daSPeter Zijlstra 
565391e43daSPeter Zijlstra typedef struct rt_rq *rt_rq_iter_t;
566391e43daSPeter Zijlstra 
567391e43daSPeter Zijlstra #define for_each_rt_rq(rt_rq, iter, rq) \
568391e43daSPeter Zijlstra 	for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
569391e43daSPeter Zijlstra 
570391e43daSPeter Zijlstra #define for_each_sched_rt_entity(rt_se) \
571391e43daSPeter Zijlstra 	for (; rt_se; rt_se = NULL)
572391e43daSPeter Zijlstra 
573391e43daSPeter Zijlstra static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
574391e43daSPeter Zijlstra {
575391e43daSPeter Zijlstra 	return NULL;
576391e43daSPeter Zijlstra }
577391e43daSPeter Zijlstra 
578391e43daSPeter Zijlstra static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
579391e43daSPeter Zijlstra {
580f4ebcbc0SKirill Tkhai 	struct rq *rq = rq_of_rt_rq(rt_rq);
581f4ebcbc0SKirill Tkhai 
582f4ebcbc0SKirill Tkhai 	if (!rt_rq->rt_nr_running)
583f4ebcbc0SKirill Tkhai 		return;
584f4ebcbc0SKirill Tkhai 
585f4ebcbc0SKirill Tkhai 	enqueue_top_rt_rq(rt_rq);
5868875125eSKirill Tkhai 	resched_curr(rq);
587391e43daSPeter Zijlstra }
588391e43daSPeter Zijlstra 
589391e43daSPeter Zijlstra static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
590391e43daSPeter Zijlstra {
591f4ebcbc0SKirill Tkhai 	dequeue_top_rt_rq(rt_rq);
592391e43daSPeter Zijlstra }
593391e43daSPeter Zijlstra 
59446383648SKirill Tkhai static inline int rt_rq_throttled(struct rt_rq *rt_rq)
59546383648SKirill Tkhai {
59646383648SKirill Tkhai 	return rt_rq->rt_throttled;
59746383648SKirill Tkhai }
59846383648SKirill Tkhai 
599391e43daSPeter Zijlstra static inline const struct cpumask *sched_rt_period_mask(void)
600391e43daSPeter Zijlstra {
601391e43daSPeter Zijlstra 	return cpu_online_mask;
602391e43daSPeter Zijlstra }
603391e43daSPeter Zijlstra 
604391e43daSPeter Zijlstra static inline
605391e43daSPeter Zijlstra struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
606391e43daSPeter Zijlstra {
607391e43daSPeter Zijlstra 	return &cpu_rq(cpu)->rt;
608391e43daSPeter Zijlstra }
609391e43daSPeter Zijlstra 
610391e43daSPeter Zijlstra static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
611391e43daSPeter Zijlstra {
612391e43daSPeter Zijlstra 	return &def_rt_bandwidth;
613391e43daSPeter Zijlstra }
614391e43daSPeter Zijlstra 
615391e43daSPeter Zijlstra #endif /* CONFIG_RT_GROUP_SCHED */
616391e43daSPeter Zijlstra 
617faa59937SJuri Lelli bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
618faa59937SJuri Lelli {
619faa59937SJuri Lelli 	struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
620faa59937SJuri Lelli 
621faa59937SJuri Lelli 	return (hrtimer_active(&rt_b->rt_period_timer) ||
622faa59937SJuri Lelli 		rt_rq->rt_time < rt_b->rt_runtime);
623faa59937SJuri Lelli }
624faa59937SJuri Lelli 
625391e43daSPeter Zijlstra #ifdef CONFIG_SMP
626391e43daSPeter Zijlstra /*
627391e43daSPeter Zijlstra  * We ran out of runtime, see if we can borrow some from our neighbours.
628391e43daSPeter Zijlstra  */
629391e43daSPeter Zijlstra static int do_balance_runtime(struct rt_rq *rt_rq)
630391e43daSPeter Zijlstra {
631391e43daSPeter Zijlstra 	struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
632aa7f6730SShawn Bohrer 	struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
633391e43daSPeter Zijlstra 	int i, weight, more = 0;
634391e43daSPeter Zijlstra 	u64 rt_period;
635391e43daSPeter Zijlstra 
636391e43daSPeter Zijlstra 	weight = cpumask_weight(rd->span);
637391e43daSPeter Zijlstra 
638391e43daSPeter Zijlstra 	raw_spin_lock(&rt_b->rt_runtime_lock);
639391e43daSPeter Zijlstra 	rt_period = ktime_to_ns(rt_b->rt_period);
640391e43daSPeter Zijlstra 	for_each_cpu(i, rd->span) {
641391e43daSPeter Zijlstra 		struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
642391e43daSPeter Zijlstra 		s64 diff;
643391e43daSPeter Zijlstra 
644391e43daSPeter Zijlstra 		if (iter == rt_rq)
645391e43daSPeter Zijlstra 			continue;
646391e43daSPeter Zijlstra 
647391e43daSPeter Zijlstra 		raw_spin_lock(&iter->rt_runtime_lock);
648391e43daSPeter Zijlstra 		/*
649391e43daSPeter Zijlstra 		 * Either all rqs have inf runtime and there's nothing to steal
650391e43daSPeter Zijlstra 		 * or __disable_runtime() below sets a specific rq to inf to
651391e43daSPeter Zijlstra 		 * indicate its been disabled and disalow stealing.
652391e43daSPeter Zijlstra 		 */
653391e43daSPeter Zijlstra 		if (iter->rt_runtime == RUNTIME_INF)
654391e43daSPeter Zijlstra 			goto next;
655391e43daSPeter Zijlstra 
656391e43daSPeter Zijlstra 		/*
657391e43daSPeter Zijlstra 		 * From runqueues with spare time, take 1/n part of their
658391e43daSPeter Zijlstra 		 * spare time, but no more than our period.
659391e43daSPeter Zijlstra 		 */
660391e43daSPeter Zijlstra 		diff = iter->rt_runtime - iter->rt_time;
661391e43daSPeter Zijlstra 		if (diff > 0) {
662391e43daSPeter Zijlstra 			diff = div_u64((u64)diff, weight);
663391e43daSPeter Zijlstra 			if (rt_rq->rt_runtime + diff > rt_period)
664391e43daSPeter Zijlstra 				diff = rt_period - rt_rq->rt_runtime;
665391e43daSPeter Zijlstra 			iter->rt_runtime -= diff;
666391e43daSPeter Zijlstra 			rt_rq->rt_runtime += diff;
667391e43daSPeter Zijlstra 			more = 1;
668391e43daSPeter Zijlstra 			if (rt_rq->rt_runtime == rt_period) {
669391e43daSPeter Zijlstra 				raw_spin_unlock(&iter->rt_runtime_lock);
670391e43daSPeter Zijlstra 				break;
671391e43daSPeter Zijlstra 			}
672391e43daSPeter Zijlstra 		}
673391e43daSPeter Zijlstra next:
674391e43daSPeter Zijlstra 		raw_spin_unlock(&iter->rt_runtime_lock);
675391e43daSPeter Zijlstra 	}
676391e43daSPeter Zijlstra 	raw_spin_unlock(&rt_b->rt_runtime_lock);
677391e43daSPeter Zijlstra 
678391e43daSPeter Zijlstra 	return more;
679391e43daSPeter Zijlstra }
680391e43daSPeter Zijlstra 
681391e43daSPeter Zijlstra /*
682391e43daSPeter Zijlstra  * Ensure this RQ takes back all the runtime it lend to its neighbours.
683391e43daSPeter Zijlstra  */
684391e43daSPeter Zijlstra static void __disable_runtime(struct rq *rq)
685391e43daSPeter Zijlstra {
686391e43daSPeter Zijlstra 	struct root_domain *rd = rq->rd;
687391e43daSPeter Zijlstra 	rt_rq_iter_t iter;
688391e43daSPeter Zijlstra 	struct rt_rq *rt_rq;
689391e43daSPeter Zijlstra 
690391e43daSPeter Zijlstra 	if (unlikely(!scheduler_running))
691391e43daSPeter Zijlstra 		return;
692391e43daSPeter Zijlstra 
693391e43daSPeter Zijlstra 	for_each_rt_rq(rt_rq, iter, rq) {
694391e43daSPeter Zijlstra 		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
695391e43daSPeter Zijlstra 		s64 want;
696391e43daSPeter Zijlstra 		int i;
697391e43daSPeter Zijlstra 
698391e43daSPeter Zijlstra 		raw_spin_lock(&rt_b->rt_runtime_lock);
699391e43daSPeter Zijlstra 		raw_spin_lock(&rt_rq->rt_runtime_lock);
700391e43daSPeter Zijlstra 		/*
701391e43daSPeter Zijlstra 		 * Either we're all inf and nobody needs to borrow, or we're
702391e43daSPeter Zijlstra 		 * already disabled and thus have nothing to do, or we have
703391e43daSPeter Zijlstra 		 * exactly the right amount of runtime to take out.
704391e43daSPeter Zijlstra 		 */
705391e43daSPeter Zijlstra 		if (rt_rq->rt_runtime == RUNTIME_INF ||
706391e43daSPeter Zijlstra 				rt_rq->rt_runtime == rt_b->rt_runtime)
707391e43daSPeter Zijlstra 			goto balanced;
708391e43daSPeter Zijlstra 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
709391e43daSPeter Zijlstra 
710391e43daSPeter Zijlstra 		/*
711391e43daSPeter Zijlstra 		 * Calculate the difference between what we started out with
712391e43daSPeter Zijlstra 		 * and what we current have, that's the amount of runtime
713391e43daSPeter Zijlstra 		 * we lend and now have to reclaim.
714391e43daSPeter Zijlstra 		 */
715391e43daSPeter Zijlstra 		want = rt_b->rt_runtime - rt_rq->rt_runtime;
716391e43daSPeter Zijlstra 
717391e43daSPeter Zijlstra 		/*
718391e43daSPeter Zijlstra 		 * Greedy reclaim, take back as much as we can.
719391e43daSPeter Zijlstra 		 */
720391e43daSPeter Zijlstra 		for_each_cpu(i, rd->span) {
721391e43daSPeter Zijlstra 			struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
722391e43daSPeter Zijlstra 			s64 diff;
723391e43daSPeter Zijlstra 
724391e43daSPeter Zijlstra 			/*
725391e43daSPeter Zijlstra 			 * Can't reclaim from ourselves or disabled runqueues.
726391e43daSPeter Zijlstra 			 */
727391e43daSPeter Zijlstra 			if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
728391e43daSPeter Zijlstra 				continue;
729391e43daSPeter Zijlstra 
730391e43daSPeter Zijlstra 			raw_spin_lock(&iter->rt_runtime_lock);
731391e43daSPeter Zijlstra 			if (want > 0) {
732391e43daSPeter Zijlstra 				diff = min_t(s64, iter->rt_runtime, want);
733391e43daSPeter Zijlstra 				iter->rt_runtime -= diff;
734391e43daSPeter Zijlstra 				want -= diff;
735391e43daSPeter Zijlstra 			} else {
736391e43daSPeter Zijlstra 				iter->rt_runtime -= want;
737391e43daSPeter Zijlstra 				want -= want;
738391e43daSPeter Zijlstra 			}
739391e43daSPeter Zijlstra 			raw_spin_unlock(&iter->rt_runtime_lock);
740391e43daSPeter Zijlstra 
741391e43daSPeter Zijlstra 			if (!want)
742391e43daSPeter Zijlstra 				break;
743391e43daSPeter Zijlstra 		}
744391e43daSPeter Zijlstra 
745391e43daSPeter Zijlstra 		raw_spin_lock(&rt_rq->rt_runtime_lock);
746391e43daSPeter Zijlstra 		/*
747391e43daSPeter Zijlstra 		 * We cannot be left wanting - that would mean some runtime
748391e43daSPeter Zijlstra 		 * leaked out of the system.
749391e43daSPeter Zijlstra 		 */
750391e43daSPeter Zijlstra 		BUG_ON(want);
751391e43daSPeter Zijlstra balanced:
752391e43daSPeter Zijlstra 		/*
753391e43daSPeter Zijlstra 		 * Disable all the borrow logic by pretending we have inf
754391e43daSPeter Zijlstra 		 * runtime - in which case borrowing doesn't make sense.
755391e43daSPeter Zijlstra 		 */
756391e43daSPeter Zijlstra 		rt_rq->rt_runtime = RUNTIME_INF;
757a4c96ae3SPeter Boonstoppel 		rt_rq->rt_throttled = 0;
758391e43daSPeter Zijlstra 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
759391e43daSPeter Zijlstra 		raw_spin_unlock(&rt_b->rt_runtime_lock);
76099b62567SKirill Tkhai 
76199b62567SKirill Tkhai 		/* Make rt_rq available for pick_next_task() */
76299b62567SKirill Tkhai 		sched_rt_rq_enqueue(rt_rq);
763391e43daSPeter Zijlstra 	}
764391e43daSPeter Zijlstra }
765391e43daSPeter Zijlstra 
766391e43daSPeter Zijlstra static void __enable_runtime(struct rq *rq)
767391e43daSPeter Zijlstra {
768391e43daSPeter Zijlstra 	rt_rq_iter_t iter;
769391e43daSPeter Zijlstra 	struct rt_rq *rt_rq;
770391e43daSPeter Zijlstra 
771391e43daSPeter Zijlstra 	if (unlikely(!scheduler_running))
772391e43daSPeter Zijlstra 		return;
773391e43daSPeter Zijlstra 
774391e43daSPeter Zijlstra 	/*
775391e43daSPeter Zijlstra 	 * Reset each runqueue's bandwidth settings
776391e43daSPeter Zijlstra 	 */
777391e43daSPeter Zijlstra 	for_each_rt_rq(rt_rq, iter, rq) {
778391e43daSPeter Zijlstra 		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
779391e43daSPeter Zijlstra 
780391e43daSPeter Zijlstra 		raw_spin_lock(&rt_b->rt_runtime_lock);
781391e43daSPeter Zijlstra 		raw_spin_lock(&rt_rq->rt_runtime_lock);
782391e43daSPeter Zijlstra 		rt_rq->rt_runtime = rt_b->rt_runtime;
783391e43daSPeter Zijlstra 		rt_rq->rt_time = 0;
784391e43daSPeter Zijlstra 		rt_rq->rt_throttled = 0;
785391e43daSPeter Zijlstra 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
786391e43daSPeter Zijlstra 		raw_spin_unlock(&rt_b->rt_runtime_lock);
787391e43daSPeter Zijlstra 	}
788391e43daSPeter Zijlstra }
789391e43daSPeter Zijlstra 
790391e43daSPeter Zijlstra static int balance_runtime(struct rt_rq *rt_rq)
791391e43daSPeter Zijlstra {
792391e43daSPeter Zijlstra 	int more = 0;
793391e43daSPeter Zijlstra 
794391e43daSPeter Zijlstra 	if (!sched_feat(RT_RUNTIME_SHARE))
795391e43daSPeter Zijlstra 		return more;
796391e43daSPeter Zijlstra 
797391e43daSPeter Zijlstra 	if (rt_rq->rt_time > rt_rq->rt_runtime) {
798391e43daSPeter Zijlstra 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
799391e43daSPeter Zijlstra 		more = do_balance_runtime(rt_rq);
800391e43daSPeter Zijlstra 		raw_spin_lock(&rt_rq->rt_runtime_lock);
801391e43daSPeter Zijlstra 	}
802391e43daSPeter Zijlstra 
803391e43daSPeter Zijlstra 	return more;
804391e43daSPeter Zijlstra }
805391e43daSPeter Zijlstra #else /* !CONFIG_SMP */
806391e43daSPeter Zijlstra static inline int balance_runtime(struct rt_rq *rt_rq)
807391e43daSPeter Zijlstra {
808391e43daSPeter Zijlstra 	return 0;
809391e43daSPeter Zijlstra }
810391e43daSPeter Zijlstra #endif /* CONFIG_SMP */
811391e43daSPeter Zijlstra 
812391e43daSPeter Zijlstra static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
813391e43daSPeter Zijlstra {
81442c62a58SPeter Zijlstra 	int i, idle = 1, throttled = 0;
815391e43daSPeter Zijlstra 	const struct cpumask *span;
816391e43daSPeter Zijlstra 
817391e43daSPeter Zijlstra 	span = sched_rt_period_mask();
818e221d028SMike Galbraith #ifdef CONFIG_RT_GROUP_SCHED
819e221d028SMike Galbraith 	/*
820e221d028SMike Galbraith 	 * FIXME: isolated CPUs should really leave the root task group,
821e221d028SMike Galbraith 	 * whether they are isolcpus or were isolated via cpusets, lest
822e221d028SMike Galbraith 	 * the timer run on a CPU which does not service all runqueues,
823e221d028SMike Galbraith 	 * potentially leaving other CPUs indefinitely throttled.  If
824e221d028SMike Galbraith 	 * isolation is really required, the user will turn the throttle
825e221d028SMike Galbraith 	 * off to kill the perturbations it causes anyway.  Meanwhile,
826e221d028SMike Galbraith 	 * this maintains functionality for boot and/or troubleshooting.
827e221d028SMike Galbraith 	 */
828e221d028SMike Galbraith 	if (rt_b == &root_task_group.rt_bandwidth)
829e221d028SMike Galbraith 		span = cpu_online_mask;
830e221d028SMike Galbraith #endif
831391e43daSPeter Zijlstra 	for_each_cpu(i, span) {
832391e43daSPeter Zijlstra 		int enqueue = 0;
833391e43daSPeter Zijlstra 		struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
834391e43daSPeter Zijlstra 		struct rq *rq = rq_of_rt_rq(rt_rq);
835391e43daSPeter Zijlstra 
836391e43daSPeter Zijlstra 		raw_spin_lock(&rq->lock);
837391e43daSPeter Zijlstra 		if (rt_rq->rt_time) {
838391e43daSPeter Zijlstra 			u64 runtime;
839391e43daSPeter Zijlstra 
840391e43daSPeter Zijlstra 			raw_spin_lock(&rt_rq->rt_runtime_lock);
841391e43daSPeter Zijlstra 			if (rt_rq->rt_throttled)
842391e43daSPeter Zijlstra 				balance_runtime(rt_rq);
843391e43daSPeter Zijlstra 			runtime = rt_rq->rt_runtime;
844391e43daSPeter Zijlstra 			rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
845391e43daSPeter Zijlstra 			if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
846391e43daSPeter Zijlstra 				rt_rq->rt_throttled = 0;
847391e43daSPeter Zijlstra 				enqueue = 1;
848391e43daSPeter Zijlstra 
849391e43daSPeter Zijlstra 				/*
8509edfbfedSPeter Zijlstra 				 * When we're idle and a woken (rt) task is
8519edfbfedSPeter Zijlstra 				 * throttled check_preempt_curr() will set
8529edfbfedSPeter Zijlstra 				 * skip_update and the time between the wakeup
8539edfbfedSPeter Zijlstra 				 * and this unthrottle will get accounted as
8549edfbfedSPeter Zijlstra 				 * 'runtime'.
855391e43daSPeter Zijlstra 				 */
856391e43daSPeter Zijlstra 				if (rt_rq->rt_nr_running && rq->curr == rq->idle)
8579edfbfedSPeter Zijlstra 					rq_clock_skip_update(rq, false);
858391e43daSPeter Zijlstra 			}
859391e43daSPeter Zijlstra 			if (rt_rq->rt_time || rt_rq->rt_nr_running)
860391e43daSPeter Zijlstra 				idle = 0;
861391e43daSPeter Zijlstra 			raw_spin_unlock(&rt_rq->rt_runtime_lock);
862391e43daSPeter Zijlstra 		} else if (rt_rq->rt_nr_running) {
863391e43daSPeter Zijlstra 			idle = 0;
864391e43daSPeter Zijlstra 			if (!rt_rq_throttled(rt_rq))
865391e43daSPeter Zijlstra 				enqueue = 1;
866391e43daSPeter Zijlstra 		}
86742c62a58SPeter Zijlstra 		if (rt_rq->rt_throttled)
86842c62a58SPeter Zijlstra 			throttled = 1;
869391e43daSPeter Zijlstra 
870391e43daSPeter Zijlstra 		if (enqueue)
871391e43daSPeter Zijlstra 			sched_rt_rq_enqueue(rt_rq);
872391e43daSPeter Zijlstra 		raw_spin_unlock(&rq->lock);
873391e43daSPeter Zijlstra 	}
874391e43daSPeter Zijlstra 
87542c62a58SPeter Zijlstra 	if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
87642c62a58SPeter Zijlstra 		return 1;
87742c62a58SPeter Zijlstra 
878391e43daSPeter Zijlstra 	return idle;
879391e43daSPeter Zijlstra }
880391e43daSPeter Zijlstra 
881391e43daSPeter Zijlstra static inline int rt_se_prio(struct sched_rt_entity *rt_se)
882391e43daSPeter Zijlstra {
883391e43daSPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED
884391e43daSPeter Zijlstra 	struct rt_rq *rt_rq = group_rt_rq(rt_se);
885391e43daSPeter Zijlstra 
886391e43daSPeter Zijlstra 	if (rt_rq)
887391e43daSPeter Zijlstra 		return rt_rq->highest_prio.curr;
888391e43daSPeter Zijlstra #endif
889391e43daSPeter Zijlstra 
890391e43daSPeter Zijlstra 	return rt_task_of(rt_se)->prio;
891391e43daSPeter Zijlstra }
892391e43daSPeter Zijlstra 
893391e43daSPeter Zijlstra static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
894391e43daSPeter Zijlstra {
895391e43daSPeter Zijlstra 	u64 runtime = sched_rt_runtime(rt_rq);
896391e43daSPeter Zijlstra 
897391e43daSPeter Zijlstra 	if (rt_rq->rt_throttled)
898391e43daSPeter Zijlstra 		return rt_rq_throttled(rt_rq);
899391e43daSPeter Zijlstra 
9005b680fd6SShan Hai 	if (runtime >= sched_rt_period(rt_rq))
901391e43daSPeter Zijlstra 		return 0;
902391e43daSPeter Zijlstra 
903391e43daSPeter Zijlstra 	balance_runtime(rt_rq);
904391e43daSPeter Zijlstra 	runtime = sched_rt_runtime(rt_rq);
905391e43daSPeter Zijlstra 	if (runtime == RUNTIME_INF)
906391e43daSPeter Zijlstra 		return 0;
907391e43daSPeter Zijlstra 
908391e43daSPeter Zijlstra 	if (rt_rq->rt_time > runtime) {
9097abc63b1SPeter Zijlstra 		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
9107abc63b1SPeter Zijlstra 
9117abc63b1SPeter Zijlstra 		/*
9127abc63b1SPeter Zijlstra 		 * Don't actually throttle groups that have no runtime assigned
9137abc63b1SPeter Zijlstra 		 * but accrue some time due to boosting.
9147abc63b1SPeter Zijlstra 		 */
9157abc63b1SPeter Zijlstra 		if (likely(rt_b->rt_runtime)) {
916391e43daSPeter Zijlstra 			rt_rq->rt_throttled = 1;
917c224815dSJohn Stultz 			printk_deferred_once("sched: RT throttling activated\n");
9187abc63b1SPeter Zijlstra 		} else {
9197abc63b1SPeter Zijlstra 			/*
9207abc63b1SPeter Zijlstra 			 * In case we did anyway, make it go away,
9217abc63b1SPeter Zijlstra 			 * replenishment is a joke, since it will replenish us
9227abc63b1SPeter Zijlstra 			 * with exactly 0 ns.
9237abc63b1SPeter Zijlstra 			 */
9247abc63b1SPeter Zijlstra 			rt_rq->rt_time = 0;
9257abc63b1SPeter Zijlstra 		}
9267abc63b1SPeter Zijlstra 
927391e43daSPeter Zijlstra 		if (rt_rq_throttled(rt_rq)) {
928391e43daSPeter Zijlstra 			sched_rt_rq_dequeue(rt_rq);
929391e43daSPeter Zijlstra 			return 1;
930391e43daSPeter Zijlstra 		}
931391e43daSPeter Zijlstra 	}
932391e43daSPeter Zijlstra 
933391e43daSPeter Zijlstra 	return 0;
934391e43daSPeter Zijlstra }
935391e43daSPeter Zijlstra 
936391e43daSPeter Zijlstra /*
937391e43daSPeter Zijlstra  * Update the current task's runtime statistics. Skip current tasks that
938391e43daSPeter Zijlstra  * are not in our scheduling class.
939391e43daSPeter Zijlstra  */
940391e43daSPeter Zijlstra static void update_curr_rt(struct rq *rq)
941391e43daSPeter Zijlstra {
942391e43daSPeter Zijlstra 	struct task_struct *curr = rq->curr;
943391e43daSPeter Zijlstra 	struct sched_rt_entity *rt_se = &curr->rt;
944391e43daSPeter Zijlstra 	u64 delta_exec;
945391e43daSPeter Zijlstra 
946391e43daSPeter Zijlstra 	if (curr->sched_class != &rt_sched_class)
947391e43daSPeter Zijlstra 		return;
948391e43daSPeter Zijlstra 
94978becc27SFrederic Weisbecker 	delta_exec = rq_clock_task(rq) - curr->se.exec_start;
950fc79e240SKirill Tkhai 	if (unlikely((s64)delta_exec <= 0))
951fc79e240SKirill Tkhai 		return;
952391e43daSPeter Zijlstra 
95342c62a58SPeter Zijlstra 	schedstat_set(curr->se.statistics.exec_max,
95442c62a58SPeter Zijlstra 		      max(curr->se.statistics.exec_max, delta_exec));
955391e43daSPeter Zijlstra 
956391e43daSPeter Zijlstra 	curr->se.sum_exec_runtime += delta_exec;
957391e43daSPeter Zijlstra 	account_group_exec_runtime(curr, delta_exec);
958391e43daSPeter Zijlstra 
95978becc27SFrederic Weisbecker 	curr->se.exec_start = rq_clock_task(rq);
960391e43daSPeter Zijlstra 	cpuacct_charge(curr, delta_exec);
961391e43daSPeter Zijlstra 
962391e43daSPeter Zijlstra 	sched_rt_avg_update(rq, delta_exec);
963391e43daSPeter Zijlstra 
964391e43daSPeter Zijlstra 	if (!rt_bandwidth_enabled())
965391e43daSPeter Zijlstra 		return;
966391e43daSPeter Zijlstra 
967391e43daSPeter Zijlstra 	for_each_sched_rt_entity(rt_se) {
9680b07939cSGiedrius Rekasius 		struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
969391e43daSPeter Zijlstra 
970391e43daSPeter Zijlstra 		if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
971391e43daSPeter Zijlstra 			raw_spin_lock(&rt_rq->rt_runtime_lock);
972391e43daSPeter Zijlstra 			rt_rq->rt_time += delta_exec;
973391e43daSPeter Zijlstra 			if (sched_rt_runtime_exceeded(rt_rq))
9748875125eSKirill Tkhai 				resched_curr(rq);
975391e43daSPeter Zijlstra 			raw_spin_unlock(&rt_rq->rt_runtime_lock);
976391e43daSPeter Zijlstra 		}
977391e43daSPeter Zijlstra 	}
978391e43daSPeter Zijlstra }
979391e43daSPeter Zijlstra 
980f4ebcbc0SKirill Tkhai static void
981f4ebcbc0SKirill Tkhai dequeue_top_rt_rq(struct rt_rq *rt_rq)
982f4ebcbc0SKirill Tkhai {
983f4ebcbc0SKirill Tkhai 	struct rq *rq = rq_of_rt_rq(rt_rq);
984f4ebcbc0SKirill Tkhai 
985f4ebcbc0SKirill Tkhai 	BUG_ON(&rq->rt != rt_rq);
986f4ebcbc0SKirill Tkhai 
987f4ebcbc0SKirill Tkhai 	if (!rt_rq->rt_queued)
988f4ebcbc0SKirill Tkhai 		return;
989f4ebcbc0SKirill Tkhai 
990f4ebcbc0SKirill Tkhai 	BUG_ON(!rq->nr_running);
991f4ebcbc0SKirill Tkhai 
99272465447SKirill Tkhai 	sub_nr_running(rq, rt_rq->rt_nr_running);
993f4ebcbc0SKirill Tkhai 	rt_rq->rt_queued = 0;
994f4ebcbc0SKirill Tkhai }
995f4ebcbc0SKirill Tkhai 
996f4ebcbc0SKirill Tkhai static void
997f4ebcbc0SKirill Tkhai enqueue_top_rt_rq(struct rt_rq *rt_rq)
998f4ebcbc0SKirill Tkhai {
999f4ebcbc0SKirill Tkhai 	struct rq *rq = rq_of_rt_rq(rt_rq);
1000f4ebcbc0SKirill Tkhai 
1001f4ebcbc0SKirill Tkhai 	BUG_ON(&rq->rt != rt_rq);
1002f4ebcbc0SKirill Tkhai 
1003f4ebcbc0SKirill Tkhai 	if (rt_rq->rt_queued)
1004f4ebcbc0SKirill Tkhai 		return;
1005f4ebcbc0SKirill Tkhai 	if (rt_rq_throttled(rt_rq) || !rt_rq->rt_nr_running)
1006f4ebcbc0SKirill Tkhai 		return;
1007f4ebcbc0SKirill Tkhai 
100872465447SKirill Tkhai 	add_nr_running(rq, rt_rq->rt_nr_running);
1009f4ebcbc0SKirill Tkhai 	rt_rq->rt_queued = 1;
1010f4ebcbc0SKirill Tkhai }
1011f4ebcbc0SKirill Tkhai 
1012391e43daSPeter Zijlstra #if defined CONFIG_SMP
1013391e43daSPeter Zijlstra 
1014391e43daSPeter Zijlstra static void
1015391e43daSPeter Zijlstra inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1016391e43daSPeter Zijlstra {
1017391e43daSPeter Zijlstra 	struct rq *rq = rq_of_rt_rq(rt_rq);
1018391e43daSPeter Zijlstra 
1019757dfcaaSKirill Tkhai #ifdef CONFIG_RT_GROUP_SCHED
1020757dfcaaSKirill Tkhai 	/*
1021757dfcaaSKirill Tkhai 	 * Change rq's cpupri only if rt_rq is the top queue.
1022757dfcaaSKirill Tkhai 	 */
1023757dfcaaSKirill Tkhai 	if (&rq->rt != rt_rq)
1024757dfcaaSKirill Tkhai 		return;
1025757dfcaaSKirill Tkhai #endif
1026391e43daSPeter Zijlstra 	if (rq->online && prio < prev_prio)
1027391e43daSPeter Zijlstra 		cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
1028391e43daSPeter Zijlstra }
1029391e43daSPeter Zijlstra 
1030391e43daSPeter Zijlstra static void
1031391e43daSPeter Zijlstra dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1032391e43daSPeter Zijlstra {
1033391e43daSPeter Zijlstra 	struct rq *rq = rq_of_rt_rq(rt_rq);
1034391e43daSPeter Zijlstra 
1035757dfcaaSKirill Tkhai #ifdef CONFIG_RT_GROUP_SCHED
1036757dfcaaSKirill Tkhai 	/*
1037757dfcaaSKirill Tkhai 	 * Change rq's cpupri only if rt_rq is the top queue.
1038757dfcaaSKirill Tkhai 	 */
1039757dfcaaSKirill Tkhai 	if (&rq->rt != rt_rq)
1040757dfcaaSKirill Tkhai 		return;
1041757dfcaaSKirill Tkhai #endif
1042391e43daSPeter Zijlstra 	if (rq->online && rt_rq->highest_prio.curr != prev_prio)
1043391e43daSPeter Zijlstra 		cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
1044391e43daSPeter Zijlstra }
1045391e43daSPeter Zijlstra 
1046391e43daSPeter Zijlstra #else /* CONFIG_SMP */
1047391e43daSPeter Zijlstra 
1048391e43daSPeter Zijlstra static inline
1049391e43daSPeter Zijlstra void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1050391e43daSPeter Zijlstra static inline
1051391e43daSPeter Zijlstra void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1052391e43daSPeter Zijlstra 
1053391e43daSPeter Zijlstra #endif /* CONFIG_SMP */
1054391e43daSPeter Zijlstra 
1055391e43daSPeter Zijlstra #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
1056391e43daSPeter Zijlstra static void
1057391e43daSPeter Zijlstra inc_rt_prio(struct rt_rq *rt_rq, int prio)
1058391e43daSPeter Zijlstra {
1059391e43daSPeter Zijlstra 	int prev_prio = rt_rq->highest_prio.curr;
1060391e43daSPeter Zijlstra 
1061391e43daSPeter Zijlstra 	if (prio < prev_prio)
1062391e43daSPeter Zijlstra 		rt_rq->highest_prio.curr = prio;
1063391e43daSPeter Zijlstra 
1064391e43daSPeter Zijlstra 	inc_rt_prio_smp(rt_rq, prio, prev_prio);
1065391e43daSPeter Zijlstra }
1066391e43daSPeter Zijlstra 
1067391e43daSPeter Zijlstra static void
1068391e43daSPeter Zijlstra dec_rt_prio(struct rt_rq *rt_rq, int prio)
1069391e43daSPeter Zijlstra {
1070391e43daSPeter Zijlstra 	int prev_prio = rt_rq->highest_prio.curr;
1071391e43daSPeter Zijlstra 
1072391e43daSPeter Zijlstra 	if (rt_rq->rt_nr_running) {
1073391e43daSPeter Zijlstra 
1074391e43daSPeter Zijlstra 		WARN_ON(prio < prev_prio);
1075391e43daSPeter Zijlstra 
1076391e43daSPeter Zijlstra 		/*
1077391e43daSPeter Zijlstra 		 * This may have been our highest task, and therefore
1078391e43daSPeter Zijlstra 		 * we may have some recomputation to do
1079391e43daSPeter Zijlstra 		 */
1080391e43daSPeter Zijlstra 		if (prio == prev_prio) {
1081391e43daSPeter Zijlstra 			struct rt_prio_array *array = &rt_rq->active;
1082391e43daSPeter Zijlstra 
1083391e43daSPeter Zijlstra 			rt_rq->highest_prio.curr =
1084391e43daSPeter Zijlstra 				sched_find_first_bit(array->bitmap);
1085391e43daSPeter Zijlstra 		}
1086391e43daSPeter Zijlstra 
1087391e43daSPeter Zijlstra 	} else
1088391e43daSPeter Zijlstra 		rt_rq->highest_prio.curr = MAX_RT_PRIO;
1089391e43daSPeter Zijlstra 
1090391e43daSPeter Zijlstra 	dec_rt_prio_smp(rt_rq, prio, prev_prio);
1091391e43daSPeter Zijlstra }
1092391e43daSPeter Zijlstra 
1093391e43daSPeter Zijlstra #else
1094391e43daSPeter Zijlstra 
1095391e43daSPeter Zijlstra static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
1096391e43daSPeter Zijlstra static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
1097391e43daSPeter Zijlstra 
1098391e43daSPeter Zijlstra #endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
1099391e43daSPeter Zijlstra 
1100391e43daSPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED
1101391e43daSPeter Zijlstra 
1102391e43daSPeter Zijlstra static void
1103391e43daSPeter Zijlstra inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1104391e43daSPeter Zijlstra {
1105391e43daSPeter Zijlstra 	if (rt_se_boosted(rt_se))
1106391e43daSPeter Zijlstra 		rt_rq->rt_nr_boosted++;
1107391e43daSPeter Zijlstra 
1108391e43daSPeter Zijlstra 	if (rt_rq->tg)
1109391e43daSPeter Zijlstra 		start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
1110391e43daSPeter Zijlstra }
1111391e43daSPeter Zijlstra 
1112391e43daSPeter Zijlstra static void
1113391e43daSPeter Zijlstra dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1114391e43daSPeter Zijlstra {
1115391e43daSPeter Zijlstra 	if (rt_se_boosted(rt_se))
1116391e43daSPeter Zijlstra 		rt_rq->rt_nr_boosted--;
1117391e43daSPeter Zijlstra 
1118391e43daSPeter Zijlstra 	WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
1119391e43daSPeter Zijlstra }
1120391e43daSPeter Zijlstra 
1121391e43daSPeter Zijlstra #else /* CONFIG_RT_GROUP_SCHED */
1122391e43daSPeter Zijlstra 
1123391e43daSPeter Zijlstra static void
1124391e43daSPeter Zijlstra inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1125391e43daSPeter Zijlstra {
1126391e43daSPeter Zijlstra 	start_rt_bandwidth(&def_rt_bandwidth);
1127391e43daSPeter Zijlstra }
1128391e43daSPeter Zijlstra 
1129391e43daSPeter Zijlstra static inline
1130391e43daSPeter Zijlstra void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
1131391e43daSPeter Zijlstra 
1132391e43daSPeter Zijlstra #endif /* CONFIG_RT_GROUP_SCHED */
1133391e43daSPeter Zijlstra 
1134391e43daSPeter Zijlstra static inline
113522abdef3SKirill Tkhai unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se)
113622abdef3SKirill Tkhai {
113722abdef3SKirill Tkhai 	struct rt_rq *group_rq = group_rt_rq(rt_se);
113822abdef3SKirill Tkhai 
113922abdef3SKirill Tkhai 	if (group_rq)
114022abdef3SKirill Tkhai 		return group_rq->rt_nr_running;
114122abdef3SKirill Tkhai 	else
114222abdef3SKirill Tkhai 		return 1;
114322abdef3SKirill Tkhai }
114422abdef3SKirill Tkhai 
114522abdef3SKirill Tkhai static inline
1146391e43daSPeter Zijlstra void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1147391e43daSPeter Zijlstra {
1148391e43daSPeter Zijlstra 	int prio = rt_se_prio(rt_se);
1149391e43daSPeter Zijlstra 
1150391e43daSPeter Zijlstra 	WARN_ON(!rt_prio(prio));
115122abdef3SKirill Tkhai 	rt_rq->rt_nr_running += rt_se_nr_running(rt_se);
1152391e43daSPeter Zijlstra 
1153391e43daSPeter Zijlstra 	inc_rt_prio(rt_rq, prio);
1154391e43daSPeter Zijlstra 	inc_rt_migration(rt_se, rt_rq);
1155391e43daSPeter Zijlstra 	inc_rt_group(rt_se, rt_rq);
1156391e43daSPeter Zijlstra }
1157391e43daSPeter Zijlstra 
1158391e43daSPeter Zijlstra static inline
1159391e43daSPeter Zijlstra void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1160391e43daSPeter Zijlstra {
1161391e43daSPeter Zijlstra 	WARN_ON(!rt_prio(rt_se_prio(rt_se)));
1162391e43daSPeter Zijlstra 	WARN_ON(!rt_rq->rt_nr_running);
116322abdef3SKirill Tkhai 	rt_rq->rt_nr_running -= rt_se_nr_running(rt_se);
1164391e43daSPeter Zijlstra 
1165391e43daSPeter Zijlstra 	dec_rt_prio(rt_rq, rt_se_prio(rt_se));
1166391e43daSPeter Zijlstra 	dec_rt_migration(rt_se, rt_rq);
1167391e43daSPeter Zijlstra 	dec_rt_group(rt_se, rt_rq);
1168391e43daSPeter Zijlstra }
1169391e43daSPeter Zijlstra 
1170391e43daSPeter Zijlstra static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
1171391e43daSPeter Zijlstra {
1172391e43daSPeter Zijlstra 	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1173391e43daSPeter Zijlstra 	struct rt_prio_array *array = &rt_rq->active;
1174391e43daSPeter Zijlstra 	struct rt_rq *group_rq = group_rt_rq(rt_se);
1175391e43daSPeter Zijlstra 	struct list_head *queue = array->queue + rt_se_prio(rt_se);
1176391e43daSPeter Zijlstra 
1177391e43daSPeter Zijlstra 	/*
1178391e43daSPeter Zijlstra 	 * Don't enqueue the group if its throttled, or when empty.
1179391e43daSPeter Zijlstra 	 * The latter is a consequence of the former when a child group
1180391e43daSPeter Zijlstra 	 * get throttled and the current group doesn't have any other
1181391e43daSPeter Zijlstra 	 * active members.
1182391e43daSPeter Zijlstra 	 */
1183391e43daSPeter Zijlstra 	if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
1184391e43daSPeter Zijlstra 		return;
1185391e43daSPeter Zijlstra 
1186391e43daSPeter Zijlstra 	if (head)
1187391e43daSPeter Zijlstra 		list_add(&rt_se->run_list, queue);
1188391e43daSPeter Zijlstra 	else
1189391e43daSPeter Zijlstra 		list_add_tail(&rt_se->run_list, queue);
1190391e43daSPeter Zijlstra 	__set_bit(rt_se_prio(rt_se), array->bitmap);
1191391e43daSPeter Zijlstra 
1192391e43daSPeter Zijlstra 	inc_rt_tasks(rt_se, rt_rq);
1193391e43daSPeter Zijlstra }
1194391e43daSPeter Zijlstra 
1195391e43daSPeter Zijlstra static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
1196391e43daSPeter Zijlstra {
1197391e43daSPeter Zijlstra 	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1198391e43daSPeter Zijlstra 	struct rt_prio_array *array = &rt_rq->active;
1199391e43daSPeter Zijlstra 
1200391e43daSPeter Zijlstra 	list_del_init(&rt_se->run_list);
1201391e43daSPeter Zijlstra 	if (list_empty(array->queue + rt_se_prio(rt_se)))
1202391e43daSPeter Zijlstra 		__clear_bit(rt_se_prio(rt_se), array->bitmap);
1203391e43daSPeter Zijlstra 
1204391e43daSPeter Zijlstra 	dec_rt_tasks(rt_se, rt_rq);
1205391e43daSPeter Zijlstra }
1206391e43daSPeter Zijlstra 
1207391e43daSPeter Zijlstra /*
1208391e43daSPeter Zijlstra  * Because the prio of an upper entry depends on the lower
1209391e43daSPeter Zijlstra  * entries, we must remove entries top - down.
1210391e43daSPeter Zijlstra  */
1211391e43daSPeter Zijlstra static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
1212391e43daSPeter Zijlstra {
1213391e43daSPeter Zijlstra 	struct sched_rt_entity *back = NULL;
1214391e43daSPeter Zijlstra 
1215391e43daSPeter Zijlstra 	for_each_sched_rt_entity(rt_se) {
1216391e43daSPeter Zijlstra 		rt_se->back = back;
1217391e43daSPeter Zijlstra 		back = rt_se;
1218391e43daSPeter Zijlstra 	}
1219391e43daSPeter Zijlstra 
1220f4ebcbc0SKirill Tkhai 	dequeue_top_rt_rq(rt_rq_of_se(back));
1221f4ebcbc0SKirill Tkhai 
1222391e43daSPeter Zijlstra 	for (rt_se = back; rt_se; rt_se = rt_se->back) {
1223391e43daSPeter Zijlstra 		if (on_rt_rq(rt_se))
1224391e43daSPeter Zijlstra 			__dequeue_rt_entity(rt_se);
1225391e43daSPeter Zijlstra 	}
1226391e43daSPeter Zijlstra }
1227391e43daSPeter Zijlstra 
1228391e43daSPeter Zijlstra static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
1229391e43daSPeter Zijlstra {
1230f4ebcbc0SKirill Tkhai 	struct rq *rq = rq_of_rt_se(rt_se);
1231f4ebcbc0SKirill Tkhai 
1232391e43daSPeter Zijlstra 	dequeue_rt_stack(rt_se);
1233391e43daSPeter Zijlstra 	for_each_sched_rt_entity(rt_se)
1234391e43daSPeter Zijlstra 		__enqueue_rt_entity(rt_se, head);
1235f4ebcbc0SKirill Tkhai 	enqueue_top_rt_rq(&rq->rt);
1236391e43daSPeter Zijlstra }
1237391e43daSPeter Zijlstra 
1238391e43daSPeter Zijlstra static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
1239391e43daSPeter Zijlstra {
1240f4ebcbc0SKirill Tkhai 	struct rq *rq = rq_of_rt_se(rt_se);
1241f4ebcbc0SKirill Tkhai 
1242391e43daSPeter Zijlstra 	dequeue_rt_stack(rt_se);
1243391e43daSPeter Zijlstra 
1244391e43daSPeter Zijlstra 	for_each_sched_rt_entity(rt_se) {
1245391e43daSPeter Zijlstra 		struct rt_rq *rt_rq = group_rt_rq(rt_se);
1246391e43daSPeter Zijlstra 
1247391e43daSPeter Zijlstra 		if (rt_rq && rt_rq->rt_nr_running)
1248391e43daSPeter Zijlstra 			__enqueue_rt_entity(rt_se, false);
1249391e43daSPeter Zijlstra 	}
1250f4ebcbc0SKirill Tkhai 	enqueue_top_rt_rq(&rq->rt);
1251391e43daSPeter Zijlstra }
1252391e43daSPeter Zijlstra 
1253391e43daSPeter Zijlstra /*
1254391e43daSPeter Zijlstra  * Adding/removing a task to/from a priority array:
1255391e43daSPeter Zijlstra  */
1256391e43daSPeter Zijlstra static void
1257391e43daSPeter Zijlstra enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1258391e43daSPeter Zijlstra {
1259391e43daSPeter Zijlstra 	struct sched_rt_entity *rt_se = &p->rt;
1260391e43daSPeter Zijlstra 
1261391e43daSPeter Zijlstra 	if (flags & ENQUEUE_WAKEUP)
1262391e43daSPeter Zijlstra 		rt_se->timeout = 0;
1263391e43daSPeter Zijlstra 
1264391e43daSPeter Zijlstra 	enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD);
1265391e43daSPeter Zijlstra 
126629baa747SPeter Zijlstra 	if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1267391e43daSPeter Zijlstra 		enqueue_pushable_task(rq, p);
1268391e43daSPeter Zijlstra }
1269391e43daSPeter Zijlstra 
1270391e43daSPeter Zijlstra static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1271391e43daSPeter Zijlstra {
1272391e43daSPeter Zijlstra 	struct sched_rt_entity *rt_se = &p->rt;
1273391e43daSPeter Zijlstra 
1274391e43daSPeter Zijlstra 	update_curr_rt(rq);
1275391e43daSPeter Zijlstra 	dequeue_rt_entity(rt_se);
1276391e43daSPeter Zijlstra 
1277391e43daSPeter Zijlstra 	dequeue_pushable_task(rq, p);
1278391e43daSPeter Zijlstra }
1279391e43daSPeter Zijlstra 
1280391e43daSPeter Zijlstra /*
1281391e43daSPeter Zijlstra  * Put task to the head or the end of the run list without the overhead of
1282391e43daSPeter Zijlstra  * dequeue followed by enqueue.
1283391e43daSPeter Zijlstra  */
1284391e43daSPeter Zijlstra static void
1285391e43daSPeter Zijlstra requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
1286391e43daSPeter Zijlstra {
1287391e43daSPeter Zijlstra 	if (on_rt_rq(rt_se)) {
1288391e43daSPeter Zijlstra 		struct rt_prio_array *array = &rt_rq->active;
1289391e43daSPeter Zijlstra 		struct list_head *queue = array->queue + rt_se_prio(rt_se);
1290391e43daSPeter Zijlstra 
1291391e43daSPeter Zijlstra 		if (head)
1292391e43daSPeter Zijlstra 			list_move(&rt_se->run_list, queue);
1293391e43daSPeter Zijlstra 		else
1294391e43daSPeter Zijlstra 			list_move_tail(&rt_se->run_list, queue);
1295391e43daSPeter Zijlstra 	}
1296391e43daSPeter Zijlstra }
1297391e43daSPeter Zijlstra 
1298391e43daSPeter Zijlstra static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
1299391e43daSPeter Zijlstra {
1300391e43daSPeter Zijlstra 	struct sched_rt_entity *rt_se = &p->rt;
1301391e43daSPeter Zijlstra 	struct rt_rq *rt_rq;
1302391e43daSPeter Zijlstra 
1303391e43daSPeter Zijlstra 	for_each_sched_rt_entity(rt_se) {
1304391e43daSPeter Zijlstra 		rt_rq = rt_rq_of_se(rt_se);
1305391e43daSPeter Zijlstra 		requeue_rt_entity(rt_rq, rt_se, head);
1306391e43daSPeter Zijlstra 	}
1307391e43daSPeter Zijlstra }
1308391e43daSPeter Zijlstra 
1309391e43daSPeter Zijlstra static void yield_task_rt(struct rq *rq)
1310391e43daSPeter Zijlstra {
1311391e43daSPeter Zijlstra 	requeue_task_rt(rq, rq->curr, 0);
1312391e43daSPeter Zijlstra }
1313391e43daSPeter Zijlstra 
1314391e43daSPeter Zijlstra #ifdef CONFIG_SMP
1315391e43daSPeter Zijlstra static int find_lowest_rq(struct task_struct *task);
1316391e43daSPeter Zijlstra 
1317391e43daSPeter Zijlstra static int
1318ac66f547SPeter Zijlstra select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
1319391e43daSPeter Zijlstra {
1320391e43daSPeter Zijlstra 	struct task_struct *curr;
1321391e43daSPeter Zijlstra 	struct rq *rq;
1322391e43daSPeter Zijlstra 
1323391e43daSPeter Zijlstra 	/* For anything but wake ups, just return the task_cpu */
1324391e43daSPeter Zijlstra 	if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
1325391e43daSPeter Zijlstra 		goto out;
1326391e43daSPeter Zijlstra 
1327391e43daSPeter Zijlstra 	rq = cpu_rq(cpu);
1328391e43daSPeter Zijlstra 
1329391e43daSPeter Zijlstra 	rcu_read_lock();
1330391e43daSPeter Zijlstra 	curr = ACCESS_ONCE(rq->curr); /* unlocked access */
1331391e43daSPeter Zijlstra 
1332391e43daSPeter Zijlstra 	/*
1333391e43daSPeter Zijlstra 	 * If the current task on @p's runqueue is an RT task, then
1334391e43daSPeter Zijlstra 	 * try to see if we can wake this RT task up on another
1335391e43daSPeter Zijlstra 	 * runqueue. Otherwise simply start this RT task
1336391e43daSPeter Zijlstra 	 * on its current runqueue.
1337391e43daSPeter Zijlstra 	 *
1338391e43daSPeter Zijlstra 	 * We want to avoid overloading runqueues. If the woken
1339391e43daSPeter Zijlstra 	 * task is a higher priority, then it will stay on this CPU
1340391e43daSPeter Zijlstra 	 * and the lower prio task should be moved to another CPU.
1341391e43daSPeter Zijlstra 	 * Even though this will probably make the lower prio task
1342391e43daSPeter Zijlstra 	 * lose its cache, we do not want to bounce a higher task
1343391e43daSPeter Zijlstra 	 * around just because it gave up its CPU, perhaps for a
1344391e43daSPeter Zijlstra 	 * lock?
1345391e43daSPeter Zijlstra 	 *
1346391e43daSPeter Zijlstra 	 * For equal prio tasks, we just let the scheduler sort it out.
1347391e43daSPeter Zijlstra 	 *
1348391e43daSPeter Zijlstra 	 * Otherwise, just let it ride on the affined RQ and the
1349391e43daSPeter Zijlstra 	 * post-schedule router will push the preempted task away
1350391e43daSPeter Zijlstra 	 *
1351391e43daSPeter Zijlstra 	 * This test is optimistic, if we get it wrong the load-balancer
1352391e43daSPeter Zijlstra 	 * will have to sort it out.
1353391e43daSPeter Zijlstra 	 */
1354391e43daSPeter Zijlstra 	if (curr && unlikely(rt_task(curr)) &&
135529baa747SPeter Zijlstra 	    (curr->nr_cpus_allowed < 2 ||
13566bfa687cSShawn Bohrer 	     curr->prio <= p->prio)) {
1357391e43daSPeter Zijlstra 		int target = find_lowest_rq(p);
1358391e43daSPeter Zijlstra 
135980e3d87bSTim Chen 		/*
136080e3d87bSTim Chen 		 * Don't bother moving it if the destination CPU is
136180e3d87bSTim Chen 		 * not running a lower priority task.
136280e3d87bSTim Chen 		 */
136380e3d87bSTim Chen 		if (target != -1 &&
136480e3d87bSTim Chen 		    p->prio < cpu_rq(target)->rt.highest_prio.curr)
1365391e43daSPeter Zijlstra 			cpu = target;
1366391e43daSPeter Zijlstra 	}
1367391e43daSPeter Zijlstra 	rcu_read_unlock();
1368391e43daSPeter Zijlstra 
1369391e43daSPeter Zijlstra out:
1370391e43daSPeter Zijlstra 	return cpu;
1371391e43daSPeter Zijlstra }
1372391e43daSPeter Zijlstra 
1373391e43daSPeter Zijlstra static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1374391e43daSPeter Zijlstra {
1375308a623aSWanpeng Li 	/*
1376308a623aSWanpeng Li 	 * Current can't be migrated, useless to reschedule,
1377308a623aSWanpeng Li 	 * let's hope p can move out.
1378308a623aSWanpeng Li 	 */
1379308a623aSWanpeng Li 	if (rq->curr->nr_cpus_allowed == 1 ||
1380308a623aSWanpeng Li 	    !cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
1381391e43daSPeter Zijlstra 		return;
1382391e43daSPeter Zijlstra 
1383308a623aSWanpeng Li 	/*
1384308a623aSWanpeng Li 	 * p is migratable, so let's not schedule it and
1385308a623aSWanpeng Li 	 * see if it is pushed or pulled somewhere else.
1386308a623aSWanpeng Li 	 */
138729baa747SPeter Zijlstra 	if (p->nr_cpus_allowed != 1
1388391e43daSPeter Zijlstra 	    && cpupri_find(&rq->rd->cpupri, p, NULL))
1389391e43daSPeter Zijlstra 		return;
1390391e43daSPeter Zijlstra 
1391391e43daSPeter Zijlstra 	/*
1392391e43daSPeter Zijlstra 	 * There appears to be other cpus that can accept
1393391e43daSPeter Zijlstra 	 * current and none to run 'p', so lets reschedule
1394391e43daSPeter Zijlstra 	 * to try and push current away:
1395391e43daSPeter Zijlstra 	 */
1396391e43daSPeter Zijlstra 	requeue_task_rt(rq, p, 1);
13978875125eSKirill Tkhai 	resched_curr(rq);
1398391e43daSPeter Zijlstra }
1399391e43daSPeter Zijlstra 
1400391e43daSPeter Zijlstra #endif /* CONFIG_SMP */
1401391e43daSPeter Zijlstra 
1402391e43daSPeter Zijlstra /*
1403391e43daSPeter Zijlstra  * Preempt the current task with a newly woken task if needed:
1404391e43daSPeter Zijlstra  */
1405391e43daSPeter Zijlstra static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
1406391e43daSPeter Zijlstra {
1407391e43daSPeter Zijlstra 	if (p->prio < rq->curr->prio) {
14088875125eSKirill Tkhai 		resched_curr(rq);
1409391e43daSPeter Zijlstra 		return;
1410391e43daSPeter Zijlstra 	}
1411391e43daSPeter Zijlstra 
1412391e43daSPeter Zijlstra #ifdef CONFIG_SMP
1413391e43daSPeter Zijlstra 	/*
1414391e43daSPeter Zijlstra 	 * If:
1415391e43daSPeter Zijlstra 	 *
1416391e43daSPeter Zijlstra 	 * - the newly woken task is of equal priority to the current task
1417391e43daSPeter Zijlstra 	 * - the newly woken task is non-migratable while current is migratable
1418391e43daSPeter Zijlstra 	 * - current will be preempted on the next reschedule
1419391e43daSPeter Zijlstra 	 *
1420391e43daSPeter Zijlstra 	 * we should check to see if current can readily move to a different
1421391e43daSPeter Zijlstra 	 * cpu.  If so, we will reschedule to allow the push logic to try
1422391e43daSPeter Zijlstra 	 * to move current somewhere else, making room for our non-migratable
1423391e43daSPeter Zijlstra 	 * task.
1424391e43daSPeter Zijlstra 	 */
1425391e43daSPeter Zijlstra 	if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
1426391e43daSPeter Zijlstra 		check_preempt_equal_prio(rq, p);
1427391e43daSPeter Zijlstra #endif
1428391e43daSPeter Zijlstra }
1429391e43daSPeter Zijlstra 
1430391e43daSPeter Zijlstra static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
1431391e43daSPeter Zijlstra 						   struct rt_rq *rt_rq)
1432391e43daSPeter Zijlstra {
1433391e43daSPeter Zijlstra 	struct rt_prio_array *array = &rt_rq->active;
1434391e43daSPeter Zijlstra 	struct sched_rt_entity *next = NULL;
1435391e43daSPeter Zijlstra 	struct list_head *queue;
1436391e43daSPeter Zijlstra 	int idx;
1437391e43daSPeter Zijlstra 
1438391e43daSPeter Zijlstra 	idx = sched_find_first_bit(array->bitmap);
1439391e43daSPeter Zijlstra 	BUG_ON(idx >= MAX_RT_PRIO);
1440391e43daSPeter Zijlstra 
1441391e43daSPeter Zijlstra 	queue = array->queue + idx;
1442391e43daSPeter Zijlstra 	next = list_entry(queue->next, struct sched_rt_entity, run_list);
1443391e43daSPeter Zijlstra 
1444391e43daSPeter Zijlstra 	return next;
1445391e43daSPeter Zijlstra }
1446391e43daSPeter Zijlstra 
1447391e43daSPeter Zijlstra static struct task_struct *_pick_next_task_rt(struct rq *rq)
1448391e43daSPeter Zijlstra {
1449391e43daSPeter Zijlstra 	struct sched_rt_entity *rt_se;
1450391e43daSPeter Zijlstra 	struct task_struct *p;
1451606dba2eSPeter Zijlstra 	struct rt_rq *rt_rq  = &rq->rt;
1452391e43daSPeter Zijlstra 
1453391e43daSPeter Zijlstra 	do {
1454391e43daSPeter Zijlstra 		rt_se = pick_next_rt_entity(rq, rt_rq);
1455391e43daSPeter Zijlstra 		BUG_ON(!rt_se);
1456391e43daSPeter Zijlstra 		rt_rq = group_rt_rq(rt_se);
1457391e43daSPeter Zijlstra 	} while (rt_rq);
1458391e43daSPeter Zijlstra 
1459391e43daSPeter Zijlstra 	p = rt_task_of(rt_se);
146078becc27SFrederic Weisbecker 	p->se.exec_start = rq_clock_task(rq);
1461391e43daSPeter Zijlstra 
1462391e43daSPeter Zijlstra 	return p;
1463391e43daSPeter Zijlstra }
1464391e43daSPeter Zijlstra 
1465606dba2eSPeter Zijlstra static struct task_struct *
1466606dba2eSPeter Zijlstra pick_next_task_rt(struct rq *rq, struct task_struct *prev)
1467391e43daSPeter Zijlstra {
1468606dba2eSPeter Zijlstra 	struct task_struct *p;
1469606dba2eSPeter Zijlstra 	struct rt_rq *rt_rq = &rq->rt;
1470606dba2eSPeter Zijlstra 
147137e117c0SPeter Zijlstra 	if (need_pull_rt_task(rq, prev)) {
147238033c37SPeter Zijlstra 		pull_rt_task(rq);
147337e117c0SPeter Zijlstra 		/*
147437e117c0SPeter Zijlstra 		 * pull_rt_task() can drop (and re-acquire) rq->lock; this
1475a1d9a323SKirill Tkhai 		 * means a dl or stop task can slip in, in which case we need
1476a1d9a323SKirill Tkhai 		 * to re-start task selection.
147737e117c0SPeter Zijlstra 		 */
1478da0c1e65SKirill Tkhai 		if (unlikely((rq->stop && task_on_rq_queued(rq->stop)) ||
1479a1d9a323SKirill Tkhai 			     rq->dl.dl_nr_running))
148037e117c0SPeter Zijlstra 			return RETRY_TASK;
148137e117c0SPeter Zijlstra 	}
148238033c37SPeter Zijlstra 
1483734ff2a7SKirill Tkhai 	/*
1484734ff2a7SKirill Tkhai 	 * We may dequeue prev's rt_rq in put_prev_task().
1485734ff2a7SKirill Tkhai 	 * So, we update time before rt_nr_running check.
1486734ff2a7SKirill Tkhai 	 */
1487734ff2a7SKirill Tkhai 	if (prev->sched_class == &rt_sched_class)
1488734ff2a7SKirill Tkhai 		update_curr_rt(rq);
1489734ff2a7SKirill Tkhai 
1490f4ebcbc0SKirill Tkhai 	if (!rt_rq->rt_queued)
1491606dba2eSPeter Zijlstra 		return NULL;
1492606dba2eSPeter Zijlstra 
14933f1d2a31SPeter Zijlstra 	put_prev_task(rq, prev);
1494606dba2eSPeter Zijlstra 
1495606dba2eSPeter Zijlstra 	p = _pick_next_task_rt(rq);
1496391e43daSPeter Zijlstra 
1497391e43daSPeter Zijlstra 	/* The running task is never eligible for pushing */
1498391e43daSPeter Zijlstra 	dequeue_pushable_task(rq, p);
1499391e43daSPeter Zijlstra 
1500dc877341SPeter Zijlstra 	set_post_schedule(rq);
1501391e43daSPeter Zijlstra 
1502391e43daSPeter Zijlstra 	return p;
1503391e43daSPeter Zijlstra }
1504391e43daSPeter Zijlstra 
1505391e43daSPeter Zijlstra static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1506391e43daSPeter Zijlstra {
1507391e43daSPeter Zijlstra 	update_curr_rt(rq);
1508391e43daSPeter Zijlstra 
1509391e43daSPeter Zijlstra 	/*
1510391e43daSPeter Zijlstra 	 * The previous task needs to be made eligible for pushing
1511391e43daSPeter Zijlstra 	 * if it is still active
1512391e43daSPeter Zijlstra 	 */
151329baa747SPeter Zijlstra 	if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
1514391e43daSPeter Zijlstra 		enqueue_pushable_task(rq, p);
1515391e43daSPeter Zijlstra }
1516391e43daSPeter Zijlstra 
1517391e43daSPeter Zijlstra #ifdef CONFIG_SMP
1518391e43daSPeter Zijlstra 
1519391e43daSPeter Zijlstra /* Only try algorithms three times */
1520391e43daSPeter Zijlstra #define RT_MAX_TRIES 3
1521391e43daSPeter Zijlstra 
1522391e43daSPeter Zijlstra static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1523391e43daSPeter Zijlstra {
1524391e43daSPeter Zijlstra 	if (!task_running(rq, p) &&
152560334cafSKirill Tkhai 	    cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
1526391e43daSPeter Zijlstra 		return 1;
1527391e43daSPeter Zijlstra 	return 0;
1528391e43daSPeter Zijlstra }
1529391e43daSPeter Zijlstra 
1530e23ee747SKirill Tkhai /*
1531e23ee747SKirill Tkhai  * Return the highest pushable rq's task, which is suitable to be executed
1532e23ee747SKirill Tkhai  * on the cpu, NULL otherwise
1533e23ee747SKirill Tkhai  */
1534e23ee747SKirill Tkhai static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
1535391e43daSPeter Zijlstra {
1536e23ee747SKirill Tkhai 	struct plist_head *head = &rq->rt.pushable_tasks;
1537391e43daSPeter Zijlstra 	struct task_struct *p;
1538391e43daSPeter Zijlstra 
1539e23ee747SKirill Tkhai 	if (!has_pushable_tasks(rq))
1540e23ee747SKirill Tkhai 		return NULL;
1541391e43daSPeter Zijlstra 
1542e23ee747SKirill Tkhai 	plist_for_each_entry(p, head, pushable_tasks) {
1543e23ee747SKirill Tkhai 		if (pick_rt_task(rq, p, cpu))
1544e23ee747SKirill Tkhai 			return p;
1545391e43daSPeter Zijlstra 	}
1546391e43daSPeter Zijlstra 
1547e23ee747SKirill Tkhai 	return NULL;
1548391e43daSPeter Zijlstra }
1549391e43daSPeter Zijlstra 
1550391e43daSPeter Zijlstra static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
1551391e43daSPeter Zijlstra 
1552391e43daSPeter Zijlstra static int find_lowest_rq(struct task_struct *task)
1553391e43daSPeter Zijlstra {
1554391e43daSPeter Zijlstra 	struct sched_domain *sd;
15554ba29684SChristoph Lameter 	struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask);
1556391e43daSPeter Zijlstra 	int this_cpu = smp_processor_id();
1557391e43daSPeter Zijlstra 	int cpu      = task_cpu(task);
1558391e43daSPeter Zijlstra 
1559391e43daSPeter Zijlstra 	/* Make sure the mask is initialized first */
1560391e43daSPeter Zijlstra 	if (unlikely(!lowest_mask))
1561391e43daSPeter Zijlstra 		return -1;
1562391e43daSPeter Zijlstra 
156329baa747SPeter Zijlstra 	if (task->nr_cpus_allowed == 1)
1564391e43daSPeter Zijlstra 		return -1; /* No other targets possible */
1565391e43daSPeter Zijlstra 
1566391e43daSPeter Zijlstra 	if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
1567391e43daSPeter Zijlstra 		return -1; /* No targets found */
1568391e43daSPeter Zijlstra 
1569391e43daSPeter Zijlstra 	/*
1570391e43daSPeter Zijlstra 	 * At this point we have built a mask of cpus representing the
1571391e43daSPeter Zijlstra 	 * lowest priority tasks in the system.  Now we want to elect
1572391e43daSPeter Zijlstra 	 * the best one based on our affinity and topology.
1573391e43daSPeter Zijlstra 	 *
1574391e43daSPeter Zijlstra 	 * We prioritize the last cpu that the task executed on since
1575391e43daSPeter Zijlstra 	 * it is most likely cache-hot in that location.
1576391e43daSPeter Zijlstra 	 */
1577391e43daSPeter Zijlstra 	if (cpumask_test_cpu(cpu, lowest_mask))
1578391e43daSPeter Zijlstra 		return cpu;
1579391e43daSPeter Zijlstra 
1580391e43daSPeter Zijlstra 	/*
1581391e43daSPeter Zijlstra 	 * Otherwise, we consult the sched_domains span maps to figure
1582391e43daSPeter Zijlstra 	 * out which cpu is logically closest to our hot cache data.
1583391e43daSPeter Zijlstra 	 */
1584391e43daSPeter Zijlstra 	if (!cpumask_test_cpu(this_cpu, lowest_mask))
1585391e43daSPeter Zijlstra 		this_cpu = -1; /* Skip this_cpu opt if not among lowest */
1586391e43daSPeter Zijlstra 
1587391e43daSPeter Zijlstra 	rcu_read_lock();
1588391e43daSPeter Zijlstra 	for_each_domain(cpu, sd) {
1589391e43daSPeter Zijlstra 		if (sd->flags & SD_WAKE_AFFINE) {
1590391e43daSPeter Zijlstra 			int best_cpu;
1591391e43daSPeter Zijlstra 
1592391e43daSPeter Zijlstra 			/*
1593391e43daSPeter Zijlstra 			 * "this_cpu" is cheaper to preempt than a
1594391e43daSPeter Zijlstra 			 * remote processor.
1595391e43daSPeter Zijlstra 			 */
1596391e43daSPeter Zijlstra 			if (this_cpu != -1 &&
1597391e43daSPeter Zijlstra 			    cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1598391e43daSPeter Zijlstra 				rcu_read_unlock();
1599391e43daSPeter Zijlstra 				return this_cpu;
1600391e43daSPeter Zijlstra 			}
1601391e43daSPeter Zijlstra 
1602391e43daSPeter Zijlstra 			best_cpu = cpumask_first_and(lowest_mask,
1603391e43daSPeter Zijlstra 						     sched_domain_span(sd));
1604391e43daSPeter Zijlstra 			if (best_cpu < nr_cpu_ids) {
1605391e43daSPeter Zijlstra 				rcu_read_unlock();
1606391e43daSPeter Zijlstra 				return best_cpu;
1607391e43daSPeter Zijlstra 			}
1608391e43daSPeter Zijlstra 		}
1609391e43daSPeter Zijlstra 	}
1610391e43daSPeter Zijlstra 	rcu_read_unlock();
1611391e43daSPeter Zijlstra 
1612391e43daSPeter Zijlstra 	/*
1613391e43daSPeter Zijlstra 	 * And finally, if there were no matches within the domains
1614391e43daSPeter Zijlstra 	 * just give the caller *something* to work with from the compatible
1615391e43daSPeter Zijlstra 	 * locations.
1616391e43daSPeter Zijlstra 	 */
1617391e43daSPeter Zijlstra 	if (this_cpu != -1)
1618391e43daSPeter Zijlstra 		return this_cpu;
1619391e43daSPeter Zijlstra 
1620391e43daSPeter Zijlstra 	cpu = cpumask_any(lowest_mask);
1621391e43daSPeter Zijlstra 	if (cpu < nr_cpu_ids)
1622391e43daSPeter Zijlstra 		return cpu;
1623391e43daSPeter Zijlstra 	return -1;
1624391e43daSPeter Zijlstra }
1625391e43daSPeter Zijlstra 
1626391e43daSPeter Zijlstra /* Will lock the rq it finds */
1627391e43daSPeter Zijlstra static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1628391e43daSPeter Zijlstra {
1629391e43daSPeter Zijlstra 	struct rq *lowest_rq = NULL;
1630391e43daSPeter Zijlstra 	int tries;
1631391e43daSPeter Zijlstra 	int cpu;
1632391e43daSPeter Zijlstra 
1633391e43daSPeter Zijlstra 	for (tries = 0; tries < RT_MAX_TRIES; tries++) {
1634391e43daSPeter Zijlstra 		cpu = find_lowest_rq(task);
1635391e43daSPeter Zijlstra 
1636391e43daSPeter Zijlstra 		if ((cpu == -1) || (cpu == rq->cpu))
1637391e43daSPeter Zijlstra 			break;
1638391e43daSPeter Zijlstra 
1639391e43daSPeter Zijlstra 		lowest_rq = cpu_rq(cpu);
1640391e43daSPeter Zijlstra 
164180e3d87bSTim Chen 		if (lowest_rq->rt.highest_prio.curr <= task->prio) {
164280e3d87bSTim Chen 			/*
164380e3d87bSTim Chen 			 * Target rq has tasks of equal or higher priority,
164480e3d87bSTim Chen 			 * retrying does not release any lock and is unlikely
164580e3d87bSTim Chen 			 * to yield a different result.
164680e3d87bSTim Chen 			 */
164780e3d87bSTim Chen 			lowest_rq = NULL;
164880e3d87bSTim Chen 			break;
164980e3d87bSTim Chen 		}
165080e3d87bSTim Chen 
1651391e43daSPeter Zijlstra 		/* if the prio of this runqueue changed, try again */
1652391e43daSPeter Zijlstra 		if (double_lock_balance(rq, lowest_rq)) {
1653391e43daSPeter Zijlstra 			/*
1654391e43daSPeter Zijlstra 			 * We had to unlock the run queue. In
1655391e43daSPeter Zijlstra 			 * the mean time, task could have
1656391e43daSPeter Zijlstra 			 * migrated already or had its affinity changed.
1657391e43daSPeter Zijlstra 			 * Also make sure that it wasn't scheduled on its rq.
1658391e43daSPeter Zijlstra 			 */
1659391e43daSPeter Zijlstra 			if (unlikely(task_rq(task) != rq ||
1660391e43daSPeter Zijlstra 				     !cpumask_test_cpu(lowest_rq->cpu,
1661391e43daSPeter Zijlstra 						       tsk_cpus_allowed(task)) ||
1662391e43daSPeter Zijlstra 				     task_running(rq, task) ||
1663da0c1e65SKirill Tkhai 				     !task_on_rq_queued(task))) {
1664391e43daSPeter Zijlstra 
16657f1b4393SPeter Zijlstra 				double_unlock_balance(rq, lowest_rq);
1666391e43daSPeter Zijlstra 				lowest_rq = NULL;
1667391e43daSPeter Zijlstra 				break;
1668391e43daSPeter Zijlstra 			}
1669391e43daSPeter Zijlstra 		}
1670391e43daSPeter Zijlstra 
1671391e43daSPeter Zijlstra 		/* If this rq is still suitable use it. */
1672391e43daSPeter Zijlstra 		if (lowest_rq->rt.highest_prio.curr > task->prio)
1673391e43daSPeter Zijlstra 			break;
1674391e43daSPeter Zijlstra 
1675391e43daSPeter Zijlstra 		/* try again */
1676391e43daSPeter Zijlstra 		double_unlock_balance(rq, lowest_rq);
1677391e43daSPeter Zijlstra 		lowest_rq = NULL;
1678391e43daSPeter Zijlstra 	}
1679391e43daSPeter Zijlstra 
1680391e43daSPeter Zijlstra 	return lowest_rq;
1681391e43daSPeter Zijlstra }
1682391e43daSPeter Zijlstra 
1683391e43daSPeter Zijlstra static struct task_struct *pick_next_pushable_task(struct rq *rq)
1684391e43daSPeter Zijlstra {
1685391e43daSPeter Zijlstra 	struct task_struct *p;
1686391e43daSPeter Zijlstra 
1687391e43daSPeter Zijlstra 	if (!has_pushable_tasks(rq))
1688391e43daSPeter Zijlstra 		return NULL;
1689391e43daSPeter Zijlstra 
1690391e43daSPeter Zijlstra 	p = plist_first_entry(&rq->rt.pushable_tasks,
1691391e43daSPeter Zijlstra 			      struct task_struct, pushable_tasks);
1692391e43daSPeter Zijlstra 
1693391e43daSPeter Zijlstra 	BUG_ON(rq->cpu != task_cpu(p));
1694391e43daSPeter Zijlstra 	BUG_ON(task_current(rq, p));
169529baa747SPeter Zijlstra 	BUG_ON(p->nr_cpus_allowed <= 1);
1696391e43daSPeter Zijlstra 
1697da0c1e65SKirill Tkhai 	BUG_ON(!task_on_rq_queued(p));
1698391e43daSPeter Zijlstra 	BUG_ON(!rt_task(p));
1699391e43daSPeter Zijlstra 
1700391e43daSPeter Zijlstra 	return p;
1701391e43daSPeter Zijlstra }
1702391e43daSPeter Zijlstra 
1703391e43daSPeter Zijlstra /*
1704391e43daSPeter Zijlstra  * If the current CPU has more than one RT task, see if the non
1705391e43daSPeter Zijlstra  * running task can migrate over to a CPU that is running a task
1706391e43daSPeter Zijlstra  * of lesser priority.
1707391e43daSPeter Zijlstra  */
1708391e43daSPeter Zijlstra static int push_rt_task(struct rq *rq)
1709391e43daSPeter Zijlstra {
1710391e43daSPeter Zijlstra 	struct task_struct *next_task;
1711391e43daSPeter Zijlstra 	struct rq *lowest_rq;
1712391e43daSPeter Zijlstra 	int ret = 0;
1713391e43daSPeter Zijlstra 
1714391e43daSPeter Zijlstra 	if (!rq->rt.overloaded)
1715391e43daSPeter Zijlstra 		return 0;
1716391e43daSPeter Zijlstra 
1717391e43daSPeter Zijlstra 	next_task = pick_next_pushable_task(rq);
1718391e43daSPeter Zijlstra 	if (!next_task)
1719391e43daSPeter Zijlstra 		return 0;
1720391e43daSPeter Zijlstra 
1721391e43daSPeter Zijlstra retry:
1722391e43daSPeter Zijlstra 	if (unlikely(next_task == rq->curr)) {
1723391e43daSPeter Zijlstra 		WARN_ON(1);
1724391e43daSPeter Zijlstra 		return 0;
1725391e43daSPeter Zijlstra 	}
1726391e43daSPeter Zijlstra 
1727391e43daSPeter Zijlstra 	/*
1728391e43daSPeter Zijlstra 	 * It's possible that the next_task slipped in of
1729391e43daSPeter Zijlstra 	 * higher priority than current. If that's the case
1730391e43daSPeter Zijlstra 	 * just reschedule current.
1731391e43daSPeter Zijlstra 	 */
1732391e43daSPeter Zijlstra 	if (unlikely(next_task->prio < rq->curr->prio)) {
17338875125eSKirill Tkhai 		resched_curr(rq);
1734391e43daSPeter Zijlstra 		return 0;
1735391e43daSPeter Zijlstra 	}
1736391e43daSPeter Zijlstra 
1737391e43daSPeter Zijlstra 	/* We might release rq lock */
1738391e43daSPeter Zijlstra 	get_task_struct(next_task);
1739391e43daSPeter Zijlstra 
1740391e43daSPeter Zijlstra 	/* find_lock_lowest_rq locks the rq if found */
1741391e43daSPeter Zijlstra 	lowest_rq = find_lock_lowest_rq(next_task, rq);
1742391e43daSPeter Zijlstra 	if (!lowest_rq) {
1743391e43daSPeter Zijlstra 		struct task_struct *task;
1744391e43daSPeter Zijlstra 		/*
1745391e43daSPeter Zijlstra 		 * find_lock_lowest_rq releases rq->lock
1746391e43daSPeter Zijlstra 		 * so it is possible that next_task has migrated.
1747391e43daSPeter Zijlstra 		 *
1748391e43daSPeter Zijlstra 		 * We need to make sure that the task is still on the same
1749391e43daSPeter Zijlstra 		 * run-queue and is also still the next task eligible for
1750391e43daSPeter Zijlstra 		 * pushing.
1751391e43daSPeter Zijlstra 		 */
1752391e43daSPeter Zijlstra 		task = pick_next_pushable_task(rq);
1753391e43daSPeter Zijlstra 		if (task_cpu(next_task) == rq->cpu && task == next_task) {
1754391e43daSPeter Zijlstra 			/*
1755391e43daSPeter Zijlstra 			 * The task hasn't migrated, and is still the next
1756391e43daSPeter Zijlstra 			 * eligible task, but we failed to find a run-queue
1757391e43daSPeter Zijlstra 			 * to push it to.  Do not retry in this case, since
1758391e43daSPeter Zijlstra 			 * other cpus will pull from us when ready.
1759391e43daSPeter Zijlstra 			 */
1760391e43daSPeter Zijlstra 			goto out;
1761391e43daSPeter Zijlstra 		}
1762391e43daSPeter Zijlstra 
1763391e43daSPeter Zijlstra 		if (!task)
1764391e43daSPeter Zijlstra 			/* No more tasks, just exit */
1765391e43daSPeter Zijlstra 			goto out;
1766391e43daSPeter Zijlstra 
1767391e43daSPeter Zijlstra 		/*
1768391e43daSPeter Zijlstra 		 * Something has shifted, try again.
1769391e43daSPeter Zijlstra 		 */
1770391e43daSPeter Zijlstra 		put_task_struct(next_task);
1771391e43daSPeter Zijlstra 		next_task = task;
1772391e43daSPeter Zijlstra 		goto retry;
1773391e43daSPeter Zijlstra 	}
1774391e43daSPeter Zijlstra 
1775391e43daSPeter Zijlstra 	deactivate_task(rq, next_task, 0);
1776391e43daSPeter Zijlstra 	set_task_cpu(next_task, lowest_rq->cpu);
1777391e43daSPeter Zijlstra 	activate_task(lowest_rq, next_task, 0);
1778391e43daSPeter Zijlstra 	ret = 1;
1779391e43daSPeter Zijlstra 
17808875125eSKirill Tkhai 	resched_curr(lowest_rq);
1781391e43daSPeter Zijlstra 
1782391e43daSPeter Zijlstra 	double_unlock_balance(rq, lowest_rq);
1783391e43daSPeter Zijlstra 
1784391e43daSPeter Zijlstra out:
1785391e43daSPeter Zijlstra 	put_task_struct(next_task);
1786391e43daSPeter Zijlstra 
1787391e43daSPeter Zijlstra 	return ret;
1788391e43daSPeter Zijlstra }
1789391e43daSPeter Zijlstra 
1790391e43daSPeter Zijlstra static void push_rt_tasks(struct rq *rq)
1791391e43daSPeter Zijlstra {
1792391e43daSPeter Zijlstra 	/* push_rt_task will return true if it moved an RT */
1793391e43daSPeter Zijlstra 	while (push_rt_task(rq))
1794391e43daSPeter Zijlstra 		;
1795391e43daSPeter Zijlstra }
1796391e43daSPeter Zijlstra 
1797b6366f04SSteven Rostedt #ifdef HAVE_RT_PUSH_IPI
1798b6366f04SSteven Rostedt /*
1799b6366f04SSteven Rostedt  * The search for the next cpu always starts at rq->cpu and ends
1800b6366f04SSteven Rostedt  * when we reach rq->cpu again. It will never return rq->cpu.
1801b6366f04SSteven Rostedt  * This returns the next cpu to check, or nr_cpu_ids if the loop
1802b6366f04SSteven Rostedt  * is complete.
1803b6366f04SSteven Rostedt  *
1804b6366f04SSteven Rostedt  * rq->rt.push_cpu holds the last cpu returned by this function,
1805b6366f04SSteven Rostedt  * or if this is the first instance, it must hold rq->cpu.
1806b6366f04SSteven Rostedt  */
1807b6366f04SSteven Rostedt static int rto_next_cpu(struct rq *rq)
1808b6366f04SSteven Rostedt {
1809b6366f04SSteven Rostedt 	int prev_cpu = rq->rt.push_cpu;
1810b6366f04SSteven Rostedt 	int cpu;
1811b6366f04SSteven Rostedt 
1812b6366f04SSteven Rostedt 	cpu = cpumask_next(prev_cpu, rq->rd->rto_mask);
1813b6366f04SSteven Rostedt 
1814b6366f04SSteven Rostedt 	/*
1815b6366f04SSteven Rostedt 	 * If the previous cpu is less than the rq's CPU, then it already
1816b6366f04SSteven Rostedt 	 * passed the end of the mask, and has started from the beginning.
1817b6366f04SSteven Rostedt 	 * We end if the next CPU is greater or equal to rq's CPU.
1818b6366f04SSteven Rostedt 	 */
1819b6366f04SSteven Rostedt 	if (prev_cpu < rq->cpu) {
1820b6366f04SSteven Rostedt 		if (cpu >= rq->cpu)
1821b6366f04SSteven Rostedt 			return nr_cpu_ids;
1822b6366f04SSteven Rostedt 
1823b6366f04SSteven Rostedt 	} else if (cpu >= nr_cpu_ids) {
1824b6366f04SSteven Rostedt 		/*
1825b6366f04SSteven Rostedt 		 * We passed the end of the mask, start at the beginning.
1826b6366f04SSteven Rostedt 		 * If the result is greater or equal to the rq's CPU, then
1827b6366f04SSteven Rostedt 		 * the loop is finished.
1828b6366f04SSteven Rostedt 		 */
1829b6366f04SSteven Rostedt 		cpu = cpumask_first(rq->rd->rto_mask);
1830b6366f04SSteven Rostedt 		if (cpu >= rq->cpu)
1831b6366f04SSteven Rostedt 			return nr_cpu_ids;
1832b6366f04SSteven Rostedt 	}
1833b6366f04SSteven Rostedt 	rq->rt.push_cpu = cpu;
1834b6366f04SSteven Rostedt 
1835b6366f04SSteven Rostedt 	/* Return cpu to let the caller know if the loop is finished or not */
1836b6366f04SSteven Rostedt 	return cpu;
1837b6366f04SSteven Rostedt }
1838b6366f04SSteven Rostedt 
1839b6366f04SSteven Rostedt static int find_next_push_cpu(struct rq *rq)
1840b6366f04SSteven Rostedt {
1841b6366f04SSteven Rostedt 	struct rq *next_rq;
1842b6366f04SSteven Rostedt 	int cpu;
1843b6366f04SSteven Rostedt 
1844b6366f04SSteven Rostedt 	while (1) {
1845b6366f04SSteven Rostedt 		cpu = rto_next_cpu(rq);
1846b6366f04SSteven Rostedt 		if (cpu >= nr_cpu_ids)
1847b6366f04SSteven Rostedt 			break;
1848b6366f04SSteven Rostedt 		next_rq = cpu_rq(cpu);
1849b6366f04SSteven Rostedt 
1850b6366f04SSteven Rostedt 		/* Make sure the next rq can push to this rq */
1851b6366f04SSteven Rostedt 		if (next_rq->rt.highest_prio.next < rq->rt.highest_prio.curr)
1852b6366f04SSteven Rostedt 			break;
1853b6366f04SSteven Rostedt 	}
1854b6366f04SSteven Rostedt 
1855b6366f04SSteven Rostedt 	return cpu;
1856b6366f04SSteven Rostedt }
1857b6366f04SSteven Rostedt 
1858b6366f04SSteven Rostedt #define RT_PUSH_IPI_EXECUTING		1
1859b6366f04SSteven Rostedt #define RT_PUSH_IPI_RESTART		2
1860b6366f04SSteven Rostedt 
1861b6366f04SSteven Rostedt static void tell_cpu_to_push(struct rq *rq)
1862b6366f04SSteven Rostedt {
1863b6366f04SSteven Rostedt 	int cpu;
1864b6366f04SSteven Rostedt 
1865b6366f04SSteven Rostedt 	if (rq->rt.push_flags & RT_PUSH_IPI_EXECUTING) {
1866b6366f04SSteven Rostedt 		raw_spin_lock(&rq->rt.push_lock);
1867b6366f04SSteven Rostedt 		/* Make sure it's still executing */
1868b6366f04SSteven Rostedt 		if (rq->rt.push_flags & RT_PUSH_IPI_EXECUTING) {
1869b6366f04SSteven Rostedt 			/*
1870b6366f04SSteven Rostedt 			 * Tell the IPI to restart the loop as things have
1871b6366f04SSteven Rostedt 			 * changed since it started.
1872b6366f04SSteven Rostedt 			 */
1873b6366f04SSteven Rostedt 			rq->rt.push_flags |= RT_PUSH_IPI_RESTART;
1874b6366f04SSteven Rostedt 			raw_spin_unlock(&rq->rt.push_lock);
1875b6366f04SSteven Rostedt 			return;
1876b6366f04SSteven Rostedt 		}
1877b6366f04SSteven Rostedt 		raw_spin_unlock(&rq->rt.push_lock);
1878b6366f04SSteven Rostedt 	}
1879b6366f04SSteven Rostedt 
1880b6366f04SSteven Rostedt 	/* When here, there's no IPI going around */
1881b6366f04SSteven Rostedt 
1882b6366f04SSteven Rostedt 	rq->rt.push_cpu = rq->cpu;
1883b6366f04SSteven Rostedt 	cpu = find_next_push_cpu(rq);
1884b6366f04SSteven Rostedt 	if (cpu >= nr_cpu_ids)
1885b6366f04SSteven Rostedt 		return;
1886b6366f04SSteven Rostedt 
1887b6366f04SSteven Rostedt 	rq->rt.push_flags = RT_PUSH_IPI_EXECUTING;
1888b6366f04SSteven Rostedt 
1889b6366f04SSteven Rostedt 	irq_work_queue_on(&rq->rt.push_work, cpu);
1890b6366f04SSteven Rostedt }
1891b6366f04SSteven Rostedt 
1892b6366f04SSteven Rostedt /* Called from hardirq context */
1893b6366f04SSteven Rostedt static void try_to_push_tasks(void *arg)
1894b6366f04SSteven Rostedt {
1895b6366f04SSteven Rostedt 	struct rt_rq *rt_rq = arg;
1896b6366f04SSteven Rostedt 	struct rq *rq, *src_rq;
1897b6366f04SSteven Rostedt 	int this_cpu;
1898b6366f04SSteven Rostedt 	int cpu;
1899b6366f04SSteven Rostedt 
1900b6366f04SSteven Rostedt 	this_cpu = rt_rq->push_cpu;
1901b6366f04SSteven Rostedt 
1902b6366f04SSteven Rostedt 	/* Paranoid check */
1903b6366f04SSteven Rostedt 	BUG_ON(this_cpu != smp_processor_id());
1904b6366f04SSteven Rostedt 
1905b6366f04SSteven Rostedt 	rq = cpu_rq(this_cpu);
1906b6366f04SSteven Rostedt 	src_rq = rq_of_rt_rq(rt_rq);
1907b6366f04SSteven Rostedt 
1908b6366f04SSteven Rostedt again:
1909b6366f04SSteven Rostedt 	if (has_pushable_tasks(rq)) {
1910b6366f04SSteven Rostedt 		raw_spin_lock(&rq->lock);
1911b6366f04SSteven Rostedt 		push_rt_task(rq);
1912b6366f04SSteven Rostedt 		raw_spin_unlock(&rq->lock);
1913b6366f04SSteven Rostedt 	}
1914b6366f04SSteven Rostedt 
1915b6366f04SSteven Rostedt 	/* Pass the IPI to the next rt overloaded queue */
1916b6366f04SSteven Rostedt 	raw_spin_lock(&rt_rq->push_lock);
1917b6366f04SSteven Rostedt 	/*
1918b6366f04SSteven Rostedt 	 * If the source queue changed since the IPI went out,
1919b6366f04SSteven Rostedt 	 * we need to restart the search from that CPU again.
1920b6366f04SSteven Rostedt 	 */
1921b6366f04SSteven Rostedt 	if (rt_rq->push_flags & RT_PUSH_IPI_RESTART) {
1922b6366f04SSteven Rostedt 		rt_rq->push_flags &= ~RT_PUSH_IPI_RESTART;
1923b6366f04SSteven Rostedt 		rt_rq->push_cpu = src_rq->cpu;
1924b6366f04SSteven Rostedt 	}
1925b6366f04SSteven Rostedt 
1926b6366f04SSteven Rostedt 	cpu = find_next_push_cpu(src_rq);
1927b6366f04SSteven Rostedt 
1928b6366f04SSteven Rostedt 	if (cpu >= nr_cpu_ids)
1929b6366f04SSteven Rostedt 		rt_rq->push_flags &= ~RT_PUSH_IPI_EXECUTING;
1930b6366f04SSteven Rostedt 	raw_spin_unlock(&rt_rq->push_lock);
1931b6366f04SSteven Rostedt 
1932b6366f04SSteven Rostedt 	if (cpu >= nr_cpu_ids)
1933b6366f04SSteven Rostedt 		return;
1934b6366f04SSteven Rostedt 
1935b6366f04SSteven Rostedt 	/*
1936b6366f04SSteven Rostedt 	 * It is possible that a restart caused this CPU to be
1937b6366f04SSteven Rostedt 	 * chosen again. Don't bother with an IPI, just see if we
1938b6366f04SSteven Rostedt 	 * have more to push.
1939b6366f04SSteven Rostedt 	 */
1940b6366f04SSteven Rostedt 	if (unlikely(cpu == rq->cpu))
1941b6366f04SSteven Rostedt 		goto again;
1942b6366f04SSteven Rostedt 
1943b6366f04SSteven Rostedt 	/* Try the next RT overloaded CPU */
1944b6366f04SSteven Rostedt 	irq_work_queue_on(&rt_rq->push_work, cpu);
1945b6366f04SSteven Rostedt }
1946b6366f04SSteven Rostedt 
1947b6366f04SSteven Rostedt static void push_irq_work_func(struct irq_work *work)
1948b6366f04SSteven Rostedt {
1949b6366f04SSteven Rostedt 	struct rt_rq *rt_rq = container_of(work, struct rt_rq, push_work);
1950b6366f04SSteven Rostedt 
1951b6366f04SSteven Rostedt 	try_to_push_tasks(rt_rq);
1952b6366f04SSteven Rostedt }
1953b6366f04SSteven Rostedt #endif /* HAVE_RT_PUSH_IPI */
1954b6366f04SSteven Rostedt 
1955391e43daSPeter Zijlstra static int pull_rt_task(struct rq *this_rq)
1956391e43daSPeter Zijlstra {
1957391e43daSPeter Zijlstra 	int this_cpu = this_rq->cpu, ret = 0, cpu;
1958391e43daSPeter Zijlstra 	struct task_struct *p;
1959391e43daSPeter Zijlstra 	struct rq *src_rq;
1960391e43daSPeter Zijlstra 
1961391e43daSPeter Zijlstra 	if (likely(!rt_overloaded(this_rq)))
1962391e43daSPeter Zijlstra 		return 0;
1963391e43daSPeter Zijlstra 
19647c3f2ab7SPeter Zijlstra 	/*
19657c3f2ab7SPeter Zijlstra 	 * Match the barrier from rt_set_overloaded; this guarantees that if we
19667c3f2ab7SPeter Zijlstra 	 * see overloaded we must also see the rto_mask bit.
19677c3f2ab7SPeter Zijlstra 	 */
19687c3f2ab7SPeter Zijlstra 	smp_rmb();
19697c3f2ab7SPeter Zijlstra 
1970b6366f04SSteven Rostedt #ifdef HAVE_RT_PUSH_IPI
1971b6366f04SSteven Rostedt 	if (sched_feat(RT_PUSH_IPI)) {
1972b6366f04SSteven Rostedt 		tell_cpu_to_push(this_rq);
1973b6366f04SSteven Rostedt 		return 0;
1974b6366f04SSteven Rostedt 	}
1975b6366f04SSteven Rostedt #endif
1976b6366f04SSteven Rostedt 
1977391e43daSPeter Zijlstra 	for_each_cpu(cpu, this_rq->rd->rto_mask) {
1978391e43daSPeter Zijlstra 		if (this_cpu == cpu)
1979391e43daSPeter Zijlstra 			continue;
1980391e43daSPeter Zijlstra 
1981391e43daSPeter Zijlstra 		src_rq = cpu_rq(cpu);
1982391e43daSPeter Zijlstra 
1983391e43daSPeter Zijlstra 		/*
1984391e43daSPeter Zijlstra 		 * Don't bother taking the src_rq->lock if the next highest
1985391e43daSPeter Zijlstra 		 * task is known to be lower-priority than our current task.
1986391e43daSPeter Zijlstra 		 * This may look racy, but if this value is about to go
1987391e43daSPeter Zijlstra 		 * logically higher, the src_rq will push this task away.
1988391e43daSPeter Zijlstra 		 * And if its going logically lower, we do not care
1989391e43daSPeter Zijlstra 		 */
1990391e43daSPeter Zijlstra 		if (src_rq->rt.highest_prio.next >=
1991391e43daSPeter Zijlstra 		    this_rq->rt.highest_prio.curr)
1992391e43daSPeter Zijlstra 			continue;
1993391e43daSPeter Zijlstra 
1994391e43daSPeter Zijlstra 		/*
1995391e43daSPeter Zijlstra 		 * We can potentially drop this_rq's lock in
1996391e43daSPeter Zijlstra 		 * double_lock_balance, and another CPU could
1997391e43daSPeter Zijlstra 		 * alter this_rq
1998391e43daSPeter Zijlstra 		 */
1999391e43daSPeter Zijlstra 		double_lock_balance(this_rq, src_rq);
2000391e43daSPeter Zijlstra 
2001391e43daSPeter Zijlstra 		/*
2002e23ee747SKirill Tkhai 		 * We can pull only a task, which is pushable
2003e23ee747SKirill Tkhai 		 * on its rq, and no others.
2004391e43daSPeter Zijlstra 		 */
2005e23ee747SKirill Tkhai 		p = pick_highest_pushable_task(src_rq, this_cpu);
2006391e43daSPeter Zijlstra 
2007391e43daSPeter Zijlstra 		/*
2008391e43daSPeter Zijlstra 		 * Do we have an RT task that preempts
2009391e43daSPeter Zijlstra 		 * the to-be-scheduled task?
2010391e43daSPeter Zijlstra 		 */
2011391e43daSPeter Zijlstra 		if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
2012391e43daSPeter Zijlstra 			WARN_ON(p == src_rq->curr);
2013da0c1e65SKirill Tkhai 			WARN_ON(!task_on_rq_queued(p));
2014391e43daSPeter Zijlstra 
2015391e43daSPeter Zijlstra 			/*
2016391e43daSPeter Zijlstra 			 * There's a chance that p is higher in priority
2017391e43daSPeter Zijlstra 			 * than what's currently running on its cpu.
2018391e43daSPeter Zijlstra 			 * This is just that p is wakeing up and hasn't
2019391e43daSPeter Zijlstra 			 * had a chance to schedule. We only pull
2020391e43daSPeter Zijlstra 			 * p if it is lower in priority than the
2021391e43daSPeter Zijlstra 			 * current task on the run queue
2022391e43daSPeter Zijlstra 			 */
2023391e43daSPeter Zijlstra 			if (p->prio < src_rq->curr->prio)
2024391e43daSPeter Zijlstra 				goto skip;
2025391e43daSPeter Zijlstra 
2026391e43daSPeter Zijlstra 			ret = 1;
2027391e43daSPeter Zijlstra 
2028391e43daSPeter Zijlstra 			deactivate_task(src_rq, p, 0);
2029391e43daSPeter Zijlstra 			set_task_cpu(p, this_cpu);
2030391e43daSPeter Zijlstra 			activate_task(this_rq, p, 0);
2031391e43daSPeter Zijlstra 			/*
2032391e43daSPeter Zijlstra 			 * We continue with the search, just in
2033391e43daSPeter Zijlstra 			 * case there's an even higher prio task
2034391e43daSPeter Zijlstra 			 * in another runqueue. (low likelihood
2035391e43daSPeter Zijlstra 			 * but possible)
2036391e43daSPeter Zijlstra 			 */
2037391e43daSPeter Zijlstra 		}
2038391e43daSPeter Zijlstra skip:
2039391e43daSPeter Zijlstra 		double_unlock_balance(this_rq, src_rq);
2040391e43daSPeter Zijlstra 	}
2041391e43daSPeter Zijlstra 
2042391e43daSPeter Zijlstra 	return ret;
2043391e43daSPeter Zijlstra }
2044391e43daSPeter Zijlstra 
2045391e43daSPeter Zijlstra static void post_schedule_rt(struct rq *rq)
2046391e43daSPeter Zijlstra {
2047391e43daSPeter Zijlstra 	push_rt_tasks(rq);
2048391e43daSPeter Zijlstra }
2049391e43daSPeter Zijlstra 
2050391e43daSPeter Zijlstra /*
2051391e43daSPeter Zijlstra  * If we are not running and we are not going to reschedule soon, we should
2052391e43daSPeter Zijlstra  * try to push tasks away now
2053391e43daSPeter Zijlstra  */
2054391e43daSPeter Zijlstra static void task_woken_rt(struct rq *rq, struct task_struct *p)
2055391e43daSPeter Zijlstra {
2056391e43daSPeter Zijlstra 	if (!task_running(rq, p) &&
2057391e43daSPeter Zijlstra 	    !test_tsk_need_resched(rq->curr) &&
2058391e43daSPeter Zijlstra 	    has_pushable_tasks(rq) &&
205929baa747SPeter Zijlstra 	    p->nr_cpus_allowed > 1 &&
20601baca4ceSJuri Lelli 	    (dl_task(rq->curr) || rt_task(rq->curr)) &&
206129baa747SPeter Zijlstra 	    (rq->curr->nr_cpus_allowed < 2 ||
2062391e43daSPeter Zijlstra 	     rq->curr->prio <= p->prio))
2063391e43daSPeter Zijlstra 		push_rt_tasks(rq);
2064391e43daSPeter Zijlstra }
2065391e43daSPeter Zijlstra 
2066391e43daSPeter Zijlstra static void set_cpus_allowed_rt(struct task_struct *p,
2067391e43daSPeter Zijlstra 				const struct cpumask *new_mask)
2068391e43daSPeter Zijlstra {
20698d3d5adaSKirill Tkhai 	struct rq *rq;
20708d3d5adaSKirill Tkhai 	int weight;
2071391e43daSPeter Zijlstra 
2072391e43daSPeter Zijlstra 	BUG_ON(!rt_task(p));
2073391e43daSPeter Zijlstra 
2074da0c1e65SKirill Tkhai 	if (!task_on_rq_queued(p))
20758d3d5adaSKirill Tkhai 		return;
2076391e43daSPeter Zijlstra 
20778d3d5adaSKirill Tkhai 	weight = cpumask_weight(new_mask);
20788d3d5adaSKirill Tkhai 
2079391e43daSPeter Zijlstra 	/*
20808d3d5adaSKirill Tkhai 	 * Only update if the process changes its state from whether it
20818d3d5adaSKirill Tkhai 	 * can migrate or not.
2082391e43daSPeter Zijlstra 	 */
208329baa747SPeter Zijlstra 	if ((p->nr_cpus_allowed > 1) == (weight > 1))
20848d3d5adaSKirill Tkhai 		return;
20858d3d5adaSKirill Tkhai 
20868d3d5adaSKirill Tkhai 	rq = task_rq(p);
20878d3d5adaSKirill Tkhai 
20888d3d5adaSKirill Tkhai 	/*
20898d3d5adaSKirill Tkhai 	 * The process used to be able to migrate OR it can now migrate
20908d3d5adaSKirill Tkhai 	 */
20918d3d5adaSKirill Tkhai 	if (weight <= 1) {
20928d3d5adaSKirill Tkhai 		if (!task_current(rq, p))
2093391e43daSPeter Zijlstra 			dequeue_pushable_task(rq, p);
2094391e43daSPeter Zijlstra 		BUG_ON(!rq->rt.rt_nr_migratory);
2095391e43daSPeter Zijlstra 		rq->rt.rt_nr_migratory--;
20968d3d5adaSKirill Tkhai 	} else {
20978d3d5adaSKirill Tkhai 		if (!task_current(rq, p))
20988d3d5adaSKirill Tkhai 			enqueue_pushable_task(rq, p);
20998d3d5adaSKirill Tkhai 		rq->rt.rt_nr_migratory++;
2100391e43daSPeter Zijlstra 	}
2101391e43daSPeter Zijlstra 
2102391e43daSPeter Zijlstra 	update_rt_migration(&rq->rt);
2103391e43daSPeter Zijlstra }
2104391e43daSPeter Zijlstra 
2105391e43daSPeter Zijlstra /* Assumes rq->lock is held */
2106391e43daSPeter Zijlstra static void rq_online_rt(struct rq *rq)
2107391e43daSPeter Zijlstra {
2108391e43daSPeter Zijlstra 	if (rq->rt.overloaded)
2109391e43daSPeter Zijlstra 		rt_set_overload(rq);
2110391e43daSPeter Zijlstra 
2111391e43daSPeter Zijlstra 	__enable_runtime(rq);
2112391e43daSPeter Zijlstra 
2113391e43daSPeter Zijlstra 	cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
2114391e43daSPeter Zijlstra }
2115391e43daSPeter Zijlstra 
2116391e43daSPeter Zijlstra /* Assumes rq->lock is held */
2117391e43daSPeter Zijlstra static void rq_offline_rt(struct rq *rq)
2118391e43daSPeter Zijlstra {
2119391e43daSPeter Zijlstra 	if (rq->rt.overloaded)
2120391e43daSPeter Zijlstra 		rt_clear_overload(rq);
2121391e43daSPeter Zijlstra 
2122391e43daSPeter Zijlstra 	__disable_runtime(rq);
2123391e43daSPeter Zijlstra 
2124391e43daSPeter Zijlstra 	cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
2125391e43daSPeter Zijlstra }
2126391e43daSPeter Zijlstra 
2127391e43daSPeter Zijlstra /*
2128391e43daSPeter Zijlstra  * When switch from the rt queue, we bring ourselves to a position
2129391e43daSPeter Zijlstra  * that we might want to pull RT tasks from other runqueues.
2130391e43daSPeter Zijlstra  */
2131391e43daSPeter Zijlstra static void switched_from_rt(struct rq *rq, struct task_struct *p)
2132391e43daSPeter Zijlstra {
2133391e43daSPeter Zijlstra 	/*
2134391e43daSPeter Zijlstra 	 * If there are other RT tasks then we will reschedule
2135391e43daSPeter Zijlstra 	 * and the scheduling of the other RT tasks will handle
2136391e43daSPeter Zijlstra 	 * the balancing. But if we are the last RT task
2137391e43daSPeter Zijlstra 	 * we may need to handle the pulling of RT tasks
2138391e43daSPeter Zijlstra 	 * now.
2139391e43daSPeter Zijlstra 	 */
2140da0c1e65SKirill Tkhai 	if (!task_on_rq_queued(p) || rq->rt.rt_nr_running)
21411158ddb5SKirill Tkhai 		return;
21421158ddb5SKirill Tkhai 
21431158ddb5SKirill Tkhai 	if (pull_rt_task(rq))
21448875125eSKirill Tkhai 		resched_curr(rq);
2145391e43daSPeter Zijlstra }
2146391e43daSPeter Zijlstra 
214711c785b7SLi Zefan void __init init_sched_rt_class(void)
2148391e43daSPeter Zijlstra {
2149391e43daSPeter Zijlstra 	unsigned int i;
2150391e43daSPeter Zijlstra 
2151391e43daSPeter Zijlstra 	for_each_possible_cpu(i) {
2152391e43daSPeter Zijlstra 		zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
2153391e43daSPeter Zijlstra 					GFP_KERNEL, cpu_to_node(i));
2154391e43daSPeter Zijlstra 	}
2155391e43daSPeter Zijlstra }
2156391e43daSPeter Zijlstra #endif /* CONFIG_SMP */
2157391e43daSPeter Zijlstra 
2158391e43daSPeter Zijlstra /*
2159391e43daSPeter Zijlstra  * When switching a task to RT, we may overload the runqueue
2160391e43daSPeter Zijlstra  * with RT tasks. In this case we try to push them off to
2161391e43daSPeter Zijlstra  * other runqueues.
2162391e43daSPeter Zijlstra  */
2163391e43daSPeter Zijlstra static void switched_to_rt(struct rq *rq, struct task_struct *p)
2164391e43daSPeter Zijlstra {
2165391e43daSPeter Zijlstra 	int check_resched = 1;
2166391e43daSPeter Zijlstra 
2167391e43daSPeter Zijlstra 	/*
2168391e43daSPeter Zijlstra 	 * If we are already running, then there's nothing
2169391e43daSPeter Zijlstra 	 * that needs to be done. But if we are not running
2170391e43daSPeter Zijlstra 	 * we may need to preempt the current running task.
2171391e43daSPeter Zijlstra 	 * If that current running task is also an RT task
2172391e43daSPeter Zijlstra 	 * then see if we can move to another run queue.
2173391e43daSPeter Zijlstra 	 */
2174da0c1e65SKirill Tkhai 	if (task_on_rq_queued(p) && rq->curr != p) {
2175391e43daSPeter Zijlstra #ifdef CONFIG_SMP
217610447917SKirill V Tkhai 		if (p->nr_cpus_allowed > 1 && rq->rt.overloaded &&
2177391e43daSPeter Zijlstra 		    /* Don't resched if we changed runqueues */
217810447917SKirill V Tkhai 		    push_rt_task(rq) && rq != task_rq(p))
2179391e43daSPeter Zijlstra 			check_resched = 0;
2180391e43daSPeter Zijlstra #endif /* CONFIG_SMP */
2181391e43daSPeter Zijlstra 		if (check_resched && p->prio < rq->curr->prio)
21828875125eSKirill Tkhai 			resched_curr(rq);
2183391e43daSPeter Zijlstra 	}
2184391e43daSPeter Zijlstra }
2185391e43daSPeter Zijlstra 
2186391e43daSPeter Zijlstra /*
2187391e43daSPeter Zijlstra  * Priority of the task has changed. This may cause
2188391e43daSPeter Zijlstra  * us to initiate a push or pull.
2189391e43daSPeter Zijlstra  */
2190391e43daSPeter Zijlstra static void
2191391e43daSPeter Zijlstra prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
2192391e43daSPeter Zijlstra {
2193da0c1e65SKirill Tkhai 	if (!task_on_rq_queued(p))
2194391e43daSPeter Zijlstra 		return;
2195391e43daSPeter Zijlstra 
2196391e43daSPeter Zijlstra 	if (rq->curr == p) {
2197391e43daSPeter Zijlstra #ifdef CONFIG_SMP
2198391e43daSPeter Zijlstra 		/*
2199391e43daSPeter Zijlstra 		 * If our priority decreases while running, we
2200391e43daSPeter Zijlstra 		 * may need to pull tasks to this runqueue.
2201391e43daSPeter Zijlstra 		 */
2202391e43daSPeter Zijlstra 		if (oldprio < p->prio)
2203391e43daSPeter Zijlstra 			pull_rt_task(rq);
2204391e43daSPeter Zijlstra 		/*
2205391e43daSPeter Zijlstra 		 * If there's a higher priority task waiting to run
2206391e43daSPeter Zijlstra 		 * then reschedule. Note, the above pull_rt_task
2207391e43daSPeter Zijlstra 		 * can release the rq lock and p could migrate.
2208391e43daSPeter Zijlstra 		 * Only reschedule if p is still on the same runqueue.
2209391e43daSPeter Zijlstra 		 */
2210391e43daSPeter Zijlstra 		if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
22118875125eSKirill Tkhai 			resched_curr(rq);
2212391e43daSPeter Zijlstra #else
2213391e43daSPeter Zijlstra 		/* For UP simply resched on drop of prio */
2214391e43daSPeter Zijlstra 		if (oldprio < p->prio)
22158875125eSKirill Tkhai 			resched_curr(rq);
2216391e43daSPeter Zijlstra #endif /* CONFIG_SMP */
2217391e43daSPeter Zijlstra 	} else {
2218391e43daSPeter Zijlstra 		/*
2219391e43daSPeter Zijlstra 		 * This task is not running, but if it is
2220391e43daSPeter Zijlstra 		 * greater than the current running task
2221391e43daSPeter Zijlstra 		 * then reschedule.
2222391e43daSPeter Zijlstra 		 */
2223391e43daSPeter Zijlstra 		if (p->prio < rq->curr->prio)
22248875125eSKirill Tkhai 			resched_curr(rq);
2225391e43daSPeter Zijlstra 	}
2226391e43daSPeter Zijlstra }
2227391e43daSPeter Zijlstra 
2228391e43daSPeter Zijlstra static void watchdog(struct rq *rq, struct task_struct *p)
2229391e43daSPeter Zijlstra {
2230391e43daSPeter Zijlstra 	unsigned long soft, hard;
2231391e43daSPeter Zijlstra 
2232391e43daSPeter Zijlstra 	/* max may change after cur was read, this will be fixed next tick */
2233391e43daSPeter Zijlstra 	soft = task_rlimit(p, RLIMIT_RTTIME);
2234391e43daSPeter Zijlstra 	hard = task_rlimit_max(p, RLIMIT_RTTIME);
2235391e43daSPeter Zijlstra 
2236391e43daSPeter Zijlstra 	if (soft != RLIM_INFINITY) {
2237391e43daSPeter Zijlstra 		unsigned long next;
2238391e43daSPeter Zijlstra 
223957d2aa00SYing Xue 		if (p->rt.watchdog_stamp != jiffies) {
2240391e43daSPeter Zijlstra 			p->rt.timeout++;
224157d2aa00SYing Xue 			p->rt.watchdog_stamp = jiffies;
224257d2aa00SYing Xue 		}
224357d2aa00SYing Xue 
2244391e43daSPeter Zijlstra 		next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
2245391e43daSPeter Zijlstra 		if (p->rt.timeout > next)
2246391e43daSPeter Zijlstra 			p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
2247391e43daSPeter Zijlstra 	}
2248391e43daSPeter Zijlstra }
2249391e43daSPeter Zijlstra 
2250391e43daSPeter Zijlstra static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
2251391e43daSPeter Zijlstra {
2252454c7999SColin Cross 	struct sched_rt_entity *rt_se = &p->rt;
2253454c7999SColin Cross 
2254391e43daSPeter Zijlstra 	update_curr_rt(rq);
2255391e43daSPeter Zijlstra 
2256391e43daSPeter Zijlstra 	watchdog(rq, p);
2257391e43daSPeter Zijlstra 
2258391e43daSPeter Zijlstra 	/*
2259391e43daSPeter Zijlstra 	 * RR tasks need a special form of timeslice management.
2260391e43daSPeter Zijlstra 	 * FIFO tasks have no timeslices.
2261391e43daSPeter Zijlstra 	 */
2262391e43daSPeter Zijlstra 	if (p->policy != SCHED_RR)
2263391e43daSPeter Zijlstra 		return;
2264391e43daSPeter Zijlstra 
2265391e43daSPeter Zijlstra 	if (--p->rt.time_slice)
2266391e43daSPeter Zijlstra 		return;
2267391e43daSPeter Zijlstra 
2268ce0dbbbbSClark Williams 	p->rt.time_slice = sched_rr_timeslice;
2269391e43daSPeter Zijlstra 
2270391e43daSPeter Zijlstra 	/*
2271e9aa39bbSLi Bin 	 * Requeue to the end of queue if we (and all of our ancestors) are not
2272e9aa39bbSLi Bin 	 * the only element on the queue
2273391e43daSPeter Zijlstra 	 */
2274454c7999SColin Cross 	for_each_sched_rt_entity(rt_se) {
2275454c7999SColin Cross 		if (rt_se->run_list.prev != rt_se->run_list.next) {
2276391e43daSPeter Zijlstra 			requeue_task_rt(rq, p, 0);
22778aa6f0ebSKirill Tkhai 			resched_curr(rq);
2278454c7999SColin Cross 			return;
2279454c7999SColin Cross 		}
2280391e43daSPeter Zijlstra 	}
2281391e43daSPeter Zijlstra }
2282391e43daSPeter Zijlstra 
2283391e43daSPeter Zijlstra static void set_curr_task_rt(struct rq *rq)
2284391e43daSPeter Zijlstra {
2285391e43daSPeter Zijlstra 	struct task_struct *p = rq->curr;
2286391e43daSPeter Zijlstra 
228778becc27SFrederic Weisbecker 	p->se.exec_start = rq_clock_task(rq);
2288391e43daSPeter Zijlstra 
2289391e43daSPeter Zijlstra 	/* The running task is never eligible for pushing */
2290391e43daSPeter Zijlstra 	dequeue_pushable_task(rq, p);
2291391e43daSPeter Zijlstra }
2292391e43daSPeter Zijlstra 
2293391e43daSPeter Zijlstra static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
2294391e43daSPeter Zijlstra {
2295391e43daSPeter Zijlstra 	/*
2296391e43daSPeter Zijlstra 	 * Time slice is 0 for SCHED_FIFO tasks
2297391e43daSPeter Zijlstra 	 */
2298391e43daSPeter Zijlstra 	if (task->policy == SCHED_RR)
2299ce0dbbbbSClark Williams 		return sched_rr_timeslice;
2300391e43daSPeter Zijlstra 	else
2301391e43daSPeter Zijlstra 		return 0;
2302391e43daSPeter Zijlstra }
2303391e43daSPeter Zijlstra 
2304391e43daSPeter Zijlstra const struct sched_class rt_sched_class = {
2305391e43daSPeter Zijlstra 	.next			= &fair_sched_class,
2306391e43daSPeter Zijlstra 	.enqueue_task		= enqueue_task_rt,
2307391e43daSPeter Zijlstra 	.dequeue_task		= dequeue_task_rt,
2308391e43daSPeter Zijlstra 	.yield_task		= yield_task_rt,
2309391e43daSPeter Zijlstra 
2310391e43daSPeter Zijlstra 	.check_preempt_curr	= check_preempt_curr_rt,
2311391e43daSPeter Zijlstra 
2312391e43daSPeter Zijlstra 	.pick_next_task		= pick_next_task_rt,
2313391e43daSPeter Zijlstra 	.put_prev_task		= put_prev_task_rt,
2314391e43daSPeter Zijlstra 
2315391e43daSPeter Zijlstra #ifdef CONFIG_SMP
2316391e43daSPeter Zijlstra 	.select_task_rq		= select_task_rq_rt,
2317391e43daSPeter Zijlstra 
2318391e43daSPeter Zijlstra 	.set_cpus_allowed       = set_cpus_allowed_rt,
2319391e43daSPeter Zijlstra 	.rq_online              = rq_online_rt,
2320391e43daSPeter Zijlstra 	.rq_offline             = rq_offline_rt,
2321391e43daSPeter Zijlstra 	.post_schedule		= post_schedule_rt,
2322391e43daSPeter Zijlstra 	.task_woken		= task_woken_rt,
2323391e43daSPeter Zijlstra 	.switched_from		= switched_from_rt,
2324391e43daSPeter Zijlstra #endif
2325391e43daSPeter Zijlstra 
2326391e43daSPeter Zijlstra 	.set_curr_task          = set_curr_task_rt,
2327391e43daSPeter Zijlstra 	.task_tick		= task_tick_rt,
2328391e43daSPeter Zijlstra 
2329391e43daSPeter Zijlstra 	.get_rr_interval	= get_rr_interval_rt,
2330391e43daSPeter Zijlstra 
2331391e43daSPeter Zijlstra 	.prio_changed		= prio_changed_rt,
2332391e43daSPeter Zijlstra 	.switched_to		= switched_to_rt,
23336e998916SStanislaw Gruszka 
23346e998916SStanislaw Gruszka 	.update_curr		= update_curr_rt,
2335391e43daSPeter Zijlstra };
2336391e43daSPeter Zijlstra 
2337391e43daSPeter Zijlstra #ifdef CONFIG_SCHED_DEBUG
2338391e43daSPeter Zijlstra extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
2339391e43daSPeter Zijlstra 
2340391e43daSPeter Zijlstra void print_rt_stats(struct seq_file *m, int cpu)
2341391e43daSPeter Zijlstra {
2342391e43daSPeter Zijlstra 	rt_rq_iter_t iter;
2343391e43daSPeter Zijlstra 	struct rt_rq *rt_rq;
2344391e43daSPeter Zijlstra 
2345391e43daSPeter Zijlstra 	rcu_read_lock();
2346391e43daSPeter Zijlstra 	for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
2347391e43daSPeter Zijlstra 		print_rt_rq(m, cpu, rt_rq);
2348391e43daSPeter Zijlstra 	rcu_read_unlock();
2349391e43daSPeter Zijlstra }
2350391e43daSPeter Zijlstra #endif /* CONFIG_SCHED_DEBUG */
2351