xref: /openbmc/linux/kernel/sched/rt.c (revision b24413180f5600bcb3bb70fbed5cf186b60864bd)
1*b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2391e43daSPeter Zijlstra /*
3391e43daSPeter Zijlstra  * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
4391e43daSPeter Zijlstra  * policies)
5391e43daSPeter Zijlstra  */
6391e43daSPeter Zijlstra 
7391e43daSPeter Zijlstra #include "sched.h"
8391e43daSPeter Zijlstra 
9391e43daSPeter Zijlstra #include <linux/slab.h>
10b6366f04SSteven Rostedt #include <linux/irq_work.h>
11391e43daSPeter Zijlstra 
12ce0dbbbbSClark Williams int sched_rr_timeslice = RR_TIMESLICE;
13975e155eSShile Zhang int sysctl_sched_rr_timeslice = (MSEC_PER_SEC / HZ) * RR_TIMESLICE;
14ce0dbbbbSClark Williams 
15391e43daSPeter Zijlstra static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
16391e43daSPeter Zijlstra 
17391e43daSPeter Zijlstra struct rt_bandwidth def_rt_bandwidth;
18391e43daSPeter Zijlstra 
19391e43daSPeter Zijlstra static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
20391e43daSPeter Zijlstra {
21391e43daSPeter Zijlstra 	struct rt_bandwidth *rt_b =
22391e43daSPeter Zijlstra 		container_of(timer, struct rt_bandwidth, rt_period_timer);
23391e43daSPeter Zijlstra 	int idle = 0;
2477a4d1a1SPeter Zijlstra 	int overrun;
25391e43daSPeter Zijlstra 
2677a4d1a1SPeter Zijlstra 	raw_spin_lock(&rt_b->rt_runtime_lock);
27391e43daSPeter Zijlstra 	for (;;) {
2877a4d1a1SPeter Zijlstra 		overrun = hrtimer_forward_now(timer, rt_b->rt_period);
29391e43daSPeter Zijlstra 		if (!overrun)
30391e43daSPeter Zijlstra 			break;
31391e43daSPeter Zijlstra 
3277a4d1a1SPeter Zijlstra 		raw_spin_unlock(&rt_b->rt_runtime_lock);
33391e43daSPeter Zijlstra 		idle = do_sched_rt_period_timer(rt_b, overrun);
3477a4d1a1SPeter Zijlstra 		raw_spin_lock(&rt_b->rt_runtime_lock);
35391e43daSPeter Zijlstra 	}
364cfafd30SPeter Zijlstra 	if (idle)
374cfafd30SPeter Zijlstra 		rt_b->rt_period_active = 0;
3877a4d1a1SPeter Zijlstra 	raw_spin_unlock(&rt_b->rt_runtime_lock);
39391e43daSPeter Zijlstra 
40391e43daSPeter Zijlstra 	return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
41391e43daSPeter Zijlstra }
42391e43daSPeter Zijlstra 
43391e43daSPeter Zijlstra void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
44391e43daSPeter Zijlstra {
45391e43daSPeter Zijlstra 	rt_b->rt_period = ns_to_ktime(period);
46391e43daSPeter Zijlstra 	rt_b->rt_runtime = runtime;
47391e43daSPeter Zijlstra 
48391e43daSPeter Zijlstra 	raw_spin_lock_init(&rt_b->rt_runtime_lock);
49391e43daSPeter Zijlstra 
50391e43daSPeter Zijlstra 	hrtimer_init(&rt_b->rt_period_timer,
51391e43daSPeter Zijlstra 			CLOCK_MONOTONIC, HRTIMER_MODE_REL);
52391e43daSPeter Zijlstra 	rt_b->rt_period_timer.function = sched_rt_period_timer;
53391e43daSPeter Zijlstra }
54391e43daSPeter Zijlstra 
55391e43daSPeter Zijlstra static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
56391e43daSPeter Zijlstra {
57391e43daSPeter Zijlstra 	if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
58391e43daSPeter Zijlstra 		return;
59391e43daSPeter Zijlstra 
60391e43daSPeter Zijlstra 	raw_spin_lock(&rt_b->rt_runtime_lock);
614cfafd30SPeter Zijlstra 	if (!rt_b->rt_period_active) {
624cfafd30SPeter Zijlstra 		rt_b->rt_period_active = 1;
63c3a990dcSSteven Rostedt 		/*
64c3a990dcSSteven Rostedt 		 * SCHED_DEADLINE updates the bandwidth, as a run away
65c3a990dcSSteven Rostedt 		 * RT task with a DL task could hog a CPU. But DL does
66c3a990dcSSteven Rostedt 		 * not reset the period. If a deadline task was running
67c3a990dcSSteven Rostedt 		 * without an RT task running, it can cause RT tasks to
68c3a990dcSSteven Rostedt 		 * throttle when they start up. Kick the timer right away
69c3a990dcSSteven Rostedt 		 * to update the period.
70c3a990dcSSteven Rostedt 		 */
71c3a990dcSSteven Rostedt 		hrtimer_forward_now(&rt_b->rt_period_timer, ns_to_ktime(0));
724cfafd30SPeter Zijlstra 		hrtimer_start_expires(&rt_b->rt_period_timer, HRTIMER_MODE_ABS_PINNED);
734cfafd30SPeter Zijlstra 	}
74391e43daSPeter Zijlstra 	raw_spin_unlock(&rt_b->rt_runtime_lock);
75391e43daSPeter Zijlstra }
76391e43daSPeter Zijlstra 
7789b41108SArnd Bergmann #if defined(CONFIG_SMP) && defined(HAVE_RT_PUSH_IPI)
78b6366f04SSteven Rostedt static void push_irq_work_func(struct irq_work *work);
79b6366f04SSteven Rostedt #endif
80b6366f04SSteven Rostedt 
8107c54f7aSAbel Vesa void init_rt_rq(struct rt_rq *rt_rq)
82391e43daSPeter Zijlstra {
83391e43daSPeter Zijlstra 	struct rt_prio_array *array;
84391e43daSPeter Zijlstra 	int i;
85391e43daSPeter Zijlstra 
86391e43daSPeter Zijlstra 	array = &rt_rq->active;
87391e43daSPeter Zijlstra 	for (i = 0; i < MAX_RT_PRIO; i++) {
88391e43daSPeter Zijlstra 		INIT_LIST_HEAD(array->queue + i);
89391e43daSPeter Zijlstra 		__clear_bit(i, array->bitmap);
90391e43daSPeter Zijlstra 	}
91391e43daSPeter Zijlstra 	/* delimiter for bitsearch: */
92391e43daSPeter Zijlstra 	__set_bit(MAX_RT_PRIO, array->bitmap);
93391e43daSPeter Zijlstra 
94391e43daSPeter Zijlstra #if defined CONFIG_SMP
95391e43daSPeter Zijlstra 	rt_rq->highest_prio.curr = MAX_RT_PRIO;
96391e43daSPeter Zijlstra 	rt_rq->highest_prio.next = MAX_RT_PRIO;
97391e43daSPeter Zijlstra 	rt_rq->rt_nr_migratory = 0;
98391e43daSPeter Zijlstra 	rt_rq->overloaded = 0;
99391e43daSPeter Zijlstra 	plist_head_init(&rt_rq->pushable_tasks);
100b6366f04SSteven Rostedt 
101b6366f04SSteven Rostedt #ifdef HAVE_RT_PUSH_IPI
102b6366f04SSteven Rostedt 	rt_rq->push_flags = 0;
103b6366f04SSteven Rostedt 	rt_rq->push_cpu = nr_cpu_ids;
104b6366f04SSteven Rostedt 	raw_spin_lock_init(&rt_rq->push_lock);
105b6366f04SSteven Rostedt 	init_irq_work(&rt_rq->push_work, push_irq_work_func);
106391e43daSPeter Zijlstra #endif
107b6366f04SSteven Rostedt #endif /* CONFIG_SMP */
108f4ebcbc0SKirill Tkhai 	/* We start is dequeued state, because no RT tasks are queued */
109f4ebcbc0SKirill Tkhai 	rt_rq->rt_queued = 0;
110391e43daSPeter Zijlstra 
111391e43daSPeter Zijlstra 	rt_rq->rt_time = 0;
112391e43daSPeter Zijlstra 	rt_rq->rt_throttled = 0;
113391e43daSPeter Zijlstra 	rt_rq->rt_runtime = 0;
114391e43daSPeter Zijlstra 	raw_spin_lock_init(&rt_rq->rt_runtime_lock);
115391e43daSPeter Zijlstra }
116391e43daSPeter Zijlstra 
117391e43daSPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED
118391e43daSPeter Zijlstra static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
119391e43daSPeter Zijlstra {
120391e43daSPeter Zijlstra 	hrtimer_cancel(&rt_b->rt_period_timer);
121391e43daSPeter Zijlstra }
122391e43daSPeter Zijlstra 
123391e43daSPeter Zijlstra #define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
124391e43daSPeter Zijlstra 
125391e43daSPeter Zijlstra static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
126391e43daSPeter Zijlstra {
127391e43daSPeter Zijlstra #ifdef CONFIG_SCHED_DEBUG
128391e43daSPeter Zijlstra 	WARN_ON_ONCE(!rt_entity_is_task(rt_se));
129391e43daSPeter Zijlstra #endif
130391e43daSPeter Zijlstra 	return container_of(rt_se, struct task_struct, rt);
131391e43daSPeter Zijlstra }
132391e43daSPeter Zijlstra 
133391e43daSPeter Zijlstra static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
134391e43daSPeter Zijlstra {
135391e43daSPeter Zijlstra 	return rt_rq->rq;
136391e43daSPeter Zijlstra }
137391e43daSPeter Zijlstra 
138391e43daSPeter Zijlstra static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
139391e43daSPeter Zijlstra {
140391e43daSPeter Zijlstra 	return rt_se->rt_rq;
141391e43daSPeter Zijlstra }
142391e43daSPeter Zijlstra 
143653d07a6SKirill Tkhai static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
144653d07a6SKirill Tkhai {
145653d07a6SKirill Tkhai 	struct rt_rq *rt_rq = rt_se->rt_rq;
146653d07a6SKirill Tkhai 
147653d07a6SKirill Tkhai 	return rt_rq->rq;
148653d07a6SKirill Tkhai }
149653d07a6SKirill Tkhai 
150391e43daSPeter Zijlstra void free_rt_sched_group(struct task_group *tg)
151391e43daSPeter Zijlstra {
152391e43daSPeter Zijlstra 	int i;
153391e43daSPeter Zijlstra 
154391e43daSPeter Zijlstra 	if (tg->rt_se)
155391e43daSPeter Zijlstra 		destroy_rt_bandwidth(&tg->rt_bandwidth);
156391e43daSPeter Zijlstra 
157391e43daSPeter Zijlstra 	for_each_possible_cpu(i) {
158391e43daSPeter Zijlstra 		if (tg->rt_rq)
159391e43daSPeter Zijlstra 			kfree(tg->rt_rq[i]);
160391e43daSPeter Zijlstra 		if (tg->rt_se)
161391e43daSPeter Zijlstra 			kfree(tg->rt_se[i]);
162391e43daSPeter Zijlstra 	}
163391e43daSPeter Zijlstra 
164391e43daSPeter Zijlstra 	kfree(tg->rt_rq);
165391e43daSPeter Zijlstra 	kfree(tg->rt_se);
166391e43daSPeter Zijlstra }
167391e43daSPeter Zijlstra 
168391e43daSPeter Zijlstra void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
169391e43daSPeter Zijlstra 		struct sched_rt_entity *rt_se, int cpu,
170391e43daSPeter Zijlstra 		struct sched_rt_entity *parent)
171391e43daSPeter Zijlstra {
172391e43daSPeter Zijlstra 	struct rq *rq = cpu_rq(cpu);
173391e43daSPeter Zijlstra 
174391e43daSPeter Zijlstra 	rt_rq->highest_prio.curr = MAX_RT_PRIO;
175391e43daSPeter Zijlstra 	rt_rq->rt_nr_boosted = 0;
176391e43daSPeter Zijlstra 	rt_rq->rq = rq;
177391e43daSPeter Zijlstra 	rt_rq->tg = tg;
178391e43daSPeter Zijlstra 
179391e43daSPeter Zijlstra 	tg->rt_rq[cpu] = rt_rq;
180391e43daSPeter Zijlstra 	tg->rt_se[cpu] = rt_se;
181391e43daSPeter Zijlstra 
182391e43daSPeter Zijlstra 	if (!rt_se)
183391e43daSPeter Zijlstra 		return;
184391e43daSPeter Zijlstra 
185391e43daSPeter Zijlstra 	if (!parent)
186391e43daSPeter Zijlstra 		rt_se->rt_rq = &rq->rt;
187391e43daSPeter Zijlstra 	else
188391e43daSPeter Zijlstra 		rt_se->rt_rq = parent->my_q;
189391e43daSPeter Zijlstra 
190391e43daSPeter Zijlstra 	rt_se->my_q = rt_rq;
191391e43daSPeter Zijlstra 	rt_se->parent = parent;
192391e43daSPeter Zijlstra 	INIT_LIST_HEAD(&rt_se->run_list);
193391e43daSPeter Zijlstra }
194391e43daSPeter Zijlstra 
195391e43daSPeter Zijlstra int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
196391e43daSPeter Zijlstra {
197391e43daSPeter Zijlstra 	struct rt_rq *rt_rq;
198391e43daSPeter Zijlstra 	struct sched_rt_entity *rt_se;
199391e43daSPeter Zijlstra 	int i;
200391e43daSPeter Zijlstra 
201391e43daSPeter Zijlstra 	tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
202391e43daSPeter Zijlstra 	if (!tg->rt_rq)
203391e43daSPeter Zijlstra 		goto err;
204391e43daSPeter Zijlstra 	tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
205391e43daSPeter Zijlstra 	if (!tg->rt_se)
206391e43daSPeter Zijlstra 		goto err;
207391e43daSPeter Zijlstra 
208391e43daSPeter Zijlstra 	init_rt_bandwidth(&tg->rt_bandwidth,
209391e43daSPeter Zijlstra 			ktime_to_ns(def_rt_bandwidth.rt_period), 0);
210391e43daSPeter Zijlstra 
211391e43daSPeter Zijlstra 	for_each_possible_cpu(i) {
212391e43daSPeter Zijlstra 		rt_rq = kzalloc_node(sizeof(struct rt_rq),
213391e43daSPeter Zijlstra 				     GFP_KERNEL, cpu_to_node(i));
214391e43daSPeter Zijlstra 		if (!rt_rq)
215391e43daSPeter Zijlstra 			goto err;
216391e43daSPeter Zijlstra 
217391e43daSPeter Zijlstra 		rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
218391e43daSPeter Zijlstra 				     GFP_KERNEL, cpu_to_node(i));
219391e43daSPeter Zijlstra 		if (!rt_se)
220391e43daSPeter Zijlstra 			goto err_free_rq;
221391e43daSPeter Zijlstra 
22207c54f7aSAbel Vesa 		init_rt_rq(rt_rq);
223391e43daSPeter Zijlstra 		rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
224391e43daSPeter Zijlstra 		init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
225391e43daSPeter Zijlstra 	}
226391e43daSPeter Zijlstra 
227391e43daSPeter Zijlstra 	return 1;
228391e43daSPeter Zijlstra 
229391e43daSPeter Zijlstra err_free_rq:
230391e43daSPeter Zijlstra 	kfree(rt_rq);
231391e43daSPeter Zijlstra err:
232391e43daSPeter Zijlstra 	return 0;
233391e43daSPeter Zijlstra }
234391e43daSPeter Zijlstra 
235391e43daSPeter Zijlstra #else /* CONFIG_RT_GROUP_SCHED */
236391e43daSPeter Zijlstra 
237391e43daSPeter Zijlstra #define rt_entity_is_task(rt_se) (1)
238391e43daSPeter Zijlstra 
239391e43daSPeter Zijlstra static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
240391e43daSPeter Zijlstra {
241391e43daSPeter Zijlstra 	return container_of(rt_se, struct task_struct, rt);
242391e43daSPeter Zijlstra }
243391e43daSPeter Zijlstra 
244391e43daSPeter Zijlstra static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
245391e43daSPeter Zijlstra {
246391e43daSPeter Zijlstra 	return container_of(rt_rq, struct rq, rt);
247391e43daSPeter Zijlstra }
248391e43daSPeter Zijlstra 
249653d07a6SKirill Tkhai static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
250391e43daSPeter Zijlstra {
251391e43daSPeter Zijlstra 	struct task_struct *p = rt_task_of(rt_se);
252653d07a6SKirill Tkhai 
253653d07a6SKirill Tkhai 	return task_rq(p);
254653d07a6SKirill Tkhai }
255653d07a6SKirill Tkhai 
256653d07a6SKirill Tkhai static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
257653d07a6SKirill Tkhai {
258653d07a6SKirill Tkhai 	struct rq *rq = rq_of_rt_se(rt_se);
259391e43daSPeter Zijlstra 
260391e43daSPeter Zijlstra 	return &rq->rt;
261391e43daSPeter Zijlstra }
262391e43daSPeter Zijlstra 
263391e43daSPeter Zijlstra void free_rt_sched_group(struct task_group *tg) { }
264391e43daSPeter Zijlstra 
265391e43daSPeter Zijlstra int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
266391e43daSPeter Zijlstra {
267391e43daSPeter Zijlstra 	return 1;
268391e43daSPeter Zijlstra }
269391e43daSPeter Zijlstra #endif /* CONFIG_RT_GROUP_SCHED */
270391e43daSPeter Zijlstra 
271391e43daSPeter Zijlstra #ifdef CONFIG_SMP
272391e43daSPeter Zijlstra 
2738046d680SPeter Zijlstra static void pull_rt_task(struct rq *this_rq);
27438033c37SPeter Zijlstra 
275dc877341SPeter Zijlstra static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
276dc877341SPeter Zijlstra {
277dc877341SPeter Zijlstra 	/* Try to pull RT tasks here if we lower this rq's prio */
278dc877341SPeter Zijlstra 	return rq->rt.highest_prio.curr > prev->prio;
279dc877341SPeter Zijlstra }
280dc877341SPeter Zijlstra 
281391e43daSPeter Zijlstra static inline int rt_overloaded(struct rq *rq)
282391e43daSPeter Zijlstra {
283391e43daSPeter Zijlstra 	return atomic_read(&rq->rd->rto_count);
284391e43daSPeter Zijlstra }
285391e43daSPeter Zijlstra 
286391e43daSPeter Zijlstra static inline void rt_set_overload(struct rq *rq)
287391e43daSPeter Zijlstra {
288391e43daSPeter Zijlstra 	if (!rq->online)
289391e43daSPeter Zijlstra 		return;
290391e43daSPeter Zijlstra 
291391e43daSPeter Zijlstra 	cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
292391e43daSPeter Zijlstra 	/*
293391e43daSPeter Zijlstra 	 * Make sure the mask is visible before we set
294391e43daSPeter Zijlstra 	 * the overload count. That is checked to determine
295391e43daSPeter Zijlstra 	 * if we should look at the mask. It would be a shame
296391e43daSPeter Zijlstra 	 * if we looked at the mask, but the mask was not
297391e43daSPeter Zijlstra 	 * updated yet.
2987c3f2ab7SPeter Zijlstra 	 *
2997c3f2ab7SPeter Zijlstra 	 * Matched by the barrier in pull_rt_task().
300391e43daSPeter Zijlstra 	 */
3017c3f2ab7SPeter Zijlstra 	smp_wmb();
302391e43daSPeter Zijlstra 	atomic_inc(&rq->rd->rto_count);
303391e43daSPeter Zijlstra }
304391e43daSPeter Zijlstra 
305391e43daSPeter Zijlstra static inline void rt_clear_overload(struct rq *rq)
306391e43daSPeter Zijlstra {
307391e43daSPeter Zijlstra 	if (!rq->online)
308391e43daSPeter Zijlstra 		return;
309391e43daSPeter Zijlstra 
310391e43daSPeter Zijlstra 	/* the order here really doesn't matter */
311391e43daSPeter Zijlstra 	atomic_dec(&rq->rd->rto_count);
312391e43daSPeter Zijlstra 	cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
313391e43daSPeter Zijlstra }
314391e43daSPeter Zijlstra 
315391e43daSPeter Zijlstra static void update_rt_migration(struct rt_rq *rt_rq)
316391e43daSPeter Zijlstra {
317391e43daSPeter Zijlstra 	if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
318391e43daSPeter Zijlstra 		if (!rt_rq->overloaded) {
319391e43daSPeter Zijlstra 			rt_set_overload(rq_of_rt_rq(rt_rq));
320391e43daSPeter Zijlstra 			rt_rq->overloaded = 1;
321391e43daSPeter Zijlstra 		}
322391e43daSPeter Zijlstra 	} else if (rt_rq->overloaded) {
323391e43daSPeter Zijlstra 		rt_clear_overload(rq_of_rt_rq(rt_rq));
324391e43daSPeter Zijlstra 		rt_rq->overloaded = 0;
325391e43daSPeter Zijlstra 	}
326391e43daSPeter Zijlstra }
327391e43daSPeter Zijlstra 
328391e43daSPeter Zijlstra static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
329391e43daSPeter Zijlstra {
33029baa747SPeter Zijlstra 	struct task_struct *p;
33129baa747SPeter Zijlstra 
332391e43daSPeter Zijlstra 	if (!rt_entity_is_task(rt_se))
333391e43daSPeter Zijlstra 		return;
334391e43daSPeter Zijlstra 
33529baa747SPeter Zijlstra 	p = rt_task_of(rt_se);
336391e43daSPeter Zijlstra 	rt_rq = &rq_of_rt_rq(rt_rq)->rt;
337391e43daSPeter Zijlstra 
338391e43daSPeter Zijlstra 	rt_rq->rt_nr_total++;
3394b53a341SIngo Molnar 	if (p->nr_cpus_allowed > 1)
340391e43daSPeter Zijlstra 		rt_rq->rt_nr_migratory++;
341391e43daSPeter Zijlstra 
342391e43daSPeter Zijlstra 	update_rt_migration(rt_rq);
343391e43daSPeter Zijlstra }
344391e43daSPeter Zijlstra 
345391e43daSPeter Zijlstra static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
346391e43daSPeter Zijlstra {
34729baa747SPeter Zijlstra 	struct task_struct *p;
34829baa747SPeter Zijlstra 
349391e43daSPeter Zijlstra 	if (!rt_entity_is_task(rt_se))
350391e43daSPeter Zijlstra 		return;
351391e43daSPeter Zijlstra 
35229baa747SPeter Zijlstra 	p = rt_task_of(rt_se);
353391e43daSPeter Zijlstra 	rt_rq = &rq_of_rt_rq(rt_rq)->rt;
354391e43daSPeter Zijlstra 
355391e43daSPeter Zijlstra 	rt_rq->rt_nr_total--;
3564b53a341SIngo Molnar 	if (p->nr_cpus_allowed > 1)
357391e43daSPeter Zijlstra 		rt_rq->rt_nr_migratory--;
358391e43daSPeter Zijlstra 
359391e43daSPeter Zijlstra 	update_rt_migration(rt_rq);
360391e43daSPeter Zijlstra }
361391e43daSPeter Zijlstra 
362391e43daSPeter Zijlstra static inline int has_pushable_tasks(struct rq *rq)
363391e43daSPeter Zijlstra {
364391e43daSPeter Zijlstra 	return !plist_head_empty(&rq->rt.pushable_tasks);
365391e43daSPeter Zijlstra }
366391e43daSPeter Zijlstra 
367fd7a4bedSPeter Zijlstra static DEFINE_PER_CPU(struct callback_head, rt_push_head);
368fd7a4bedSPeter Zijlstra static DEFINE_PER_CPU(struct callback_head, rt_pull_head);
369e3fca9e7SPeter Zijlstra 
370e3fca9e7SPeter Zijlstra static void push_rt_tasks(struct rq *);
371fd7a4bedSPeter Zijlstra static void pull_rt_task(struct rq *);
372e3fca9e7SPeter Zijlstra 
373e3fca9e7SPeter Zijlstra static inline void queue_push_tasks(struct rq *rq)
374dc877341SPeter Zijlstra {
375e3fca9e7SPeter Zijlstra 	if (!has_pushable_tasks(rq))
376e3fca9e7SPeter Zijlstra 		return;
377e3fca9e7SPeter Zijlstra 
378fd7a4bedSPeter Zijlstra 	queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks);
379fd7a4bedSPeter Zijlstra }
380fd7a4bedSPeter Zijlstra 
381fd7a4bedSPeter Zijlstra static inline void queue_pull_task(struct rq *rq)
382fd7a4bedSPeter Zijlstra {
383fd7a4bedSPeter Zijlstra 	queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task);
384dc877341SPeter Zijlstra }
385dc877341SPeter Zijlstra 
386391e43daSPeter Zijlstra static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
387391e43daSPeter Zijlstra {
388391e43daSPeter Zijlstra 	plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
389391e43daSPeter Zijlstra 	plist_node_init(&p->pushable_tasks, p->prio);
390391e43daSPeter Zijlstra 	plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
391391e43daSPeter Zijlstra 
392391e43daSPeter Zijlstra 	/* Update the highest prio pushable task */
393391e43daSPeter Zijlstra 	if (p->prio < rq->rt.highest_prio.next)
394391e43daSPeter Zijlstra 		rq->rt.highest_prio.next = p->prio;
395391e43daSPeter Zijlstra }
396391e43daSPeter Zijlstra 
397391e43daSPeter Zijlstra static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
398391e43daSPeter Zijlstra {
399391e43daSPeter Zijlstra 	plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
400391e43daSPeter Zijlstra 
401391e43daSPeter Zijlstra 	/* Update the new highest prio pushable task */
402391e43daSPeter Zijlstra 	if (has_pushable_tasks(rq)) {
403391e43daSPeter Zijlstra 		p = plist_first_entry(&rq->rt.pushable_tasks,
404391e43daSPeter Zijlstra 				      struct task_struct, pushable_tasks);
405391e43daSPeter Zijlstra 		rq->rt.highest_prio.next = p->prio;
406391e43daSPeter Zijlstra 	} else
407391e43daSPeter Zijlstra 		rq->rt.highest_prio.next = MAX_RT_PRIO;
408391e43daSPeter Zijlstra }
409391e43daSPeter Zijlstra 
410391e43daSPeter Zijlstra #else
411391e43daSPeter Zijlstra 
412391e43daSPeter Zijlstra static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
413391e43daSPeter Zijlstra {
414391e43daSPeter Zijlstra }
415391e43daSPeter Zijlstra 
416391e43daSPeter Zijlstra static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
417391e43daSPeter Zijlstra {
418391e43daSPeter Zijlstra }
419391e43daSPeter Zijlstra 
420391e43daSPeter Zijlstra static inline
421391e43daSPeter Zijlstra void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
422391e43daSPeter Zijlstra {
423391e43daSPeter Zijlstra }
424391e43daSPeter Zijlstra 
425391e43daSPeter Zijlstra static inline
426391e43daSPeter Zijlstra void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
427391e43daSPeter Zijlstra {
428391e43daSPeter Zijlstra }
429391e43daSPeter Zijlstra 
430dc877341SPeter Zijlstra static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
431dc877341SPeter Zijlstra {
432dc877341SPeter Zijlstra 	return false;
433dc877341SPeter Zijlstra }
434dc877341SPeter Zijlstra 
4358046d680SPeter Zijlstra static inline void pull_rt_task(struct rq *this_rq)
436dc877341SPeter Zijlstra {
437dc877341SPeter Zijlstra }
438dc877341SPeter Zijlstra 
439e3fca9e7SPeter Zijlstra static inline void queue_push_tasks(struct rq *rq)
440dc877341SPeter Zijlstra {
441dc877341SPeter Zijlstra }
442391e43daSPeter Zijlstra #endif /* CONFIG_SMP */
443391e43daSPeter Zijlstra 
444f4ebcbc0SKirill Tkhai static void enqueue_top_rt_rq(struct rt_rq *rt_rq);
445f4ebcbc0SKirill Tkhai static void dequeue_top_rt_rq(struct rt_rq *rt_rq);
446f4ebcbc0SKirill Tkhai 
447391e43daSPeter Zijlstra static inline int on_rt_rq(struct sched_rt_entity *rt_se)
448391e43daSPeter Zijlstra {
449ff77e468SPeter Zijlstra 	return rt_se->on_rq;
450391e43daSPeter Zijlstra }
451391e43daSPeter Zijlstra 
452391e43daSPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED
453391e43daSPeter Zijlstra 
454391e43daSPeter Zijlstra static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
455391e43daSPeter Zijlstra {
456391e43daSPeter Zijlstra 	if (!rt_rq->tg)
457391e43daSPeter Zijlstra 		return RUNTIME_INF;
458391e43daSPeter Zijlstra 
459391e43daSPeter Zijlstra 	return rt_rq->rt_runtime;
460391e43daSPeter Zijlstra }
461391e43daSPeter Zijlstra 
462391e43daSPeter Zijlstra static inline u64 sched_rt_period(struct rt_rq *rt_rq)
463391e43daSPeter Zijlstra {
464391e43daSPeter Zijlstra 	return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
465391e43daSPeter Zijlstra }
466391e43daSPeter Zijlstra 
467391e43daSPeter Zijlstra typedef struct task_group *rt_rq_iter_t;
468391e43daSPeter Zijlstra 
469391e43daSPeter Zijlstra static inline struct task_group *next_task_group(struct task_group *tg)
470391e43daSPeter Zijlstra {
471391e43daSPeter Zijlstra 	do {
472391e43daSPeter Zijlstra 		tg = list_entry_rcu(tg->list.next,
473391e43daSPeter Zijlstra 			typeof(struct task_group), list);
474391e43daSPeter Zijlstra 	} while (&tg->list != &task_groups && task_group_is_autogroup(tg));
475391e43daSPeter Zijlstra 
476391e43daSPeter Zijlstra 	if (&tg->list == &task_groups)
477391e43daSPeter Zijlstra 		tg = NULL;
478391e43daSPeter Zijlstra 
479391e43daSPeter Zijlstra 	return tg;
480391e43daSPeter Zijlstra }
481391e43daSPeter Zijlstra 
482391e43daSPeter Zijlstra #define for_each_rt_rq(rt_rq, iter, rq)					\
483391e43daSPeter Zijlstra 	for (iter = container_of(&task_groups, typeof(*iter), list);	\
484391e43daSPeter Zijlstra 		(iter = next_task_group(iter)) &&			\
485391e43daSPeter Zijlstra 		(rt_rq = iter->rt_rq[cpu_of(rq)]);)
486391e43daSPeter Zijlstra 
487391e43daSPeter Zijlstra #define for_each_sched_rt_entity(rt_se) \
488391e43daSPeter Zijlstra 	for (; rt_se; rt_se = rt_se->parent)
489391e43daSPeter Zijlstra 
490391e43daSPeter Zijlstra static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
491391e43daSPeter Zijlstra {
492391e43daSPeter Zijlstra 	return rt_se->my_q;
493391e43daSPeter Zijlstra }
494391e43daSPeter Zijlstra 
495ff77e468SPeter Zijlstra static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
496ff77e468SPeter Zijlstra static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
497391e43daSPeter Zijlstra 
498391e43daSPeter Zijlstra static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
499391e43daSPeter Zijlstra {
500391e43daSPeter Zijlstra 	struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
5018875125eSKirill Tkhai 	struct rq *rq = rq_of_rt_rq(rt_rq);
502391e43daSPeter Zijlstra 	struct sched_rt_entity *rt_se;
503391e43daSPeter Zijlstra 
5048875125eSKirill Tkhai 	int cpu = cpu_of(rq);
505391e43daSPeter Zijlstra 
506391e43daSPeter Zijlstra 	rt_se = rt_rq->tg->rt_se[cpu];
507391e43daSPeter Zijlstra 
508391e43daSPeter Zijlstra 	if (rt_rq->rt_nr_running) {
509f4ebcbc0SKirill Tkhai 		if (!rt_se)
510f4ebcbc0SKirill Tkhai 			enqueue_top_rt_rq(rt_rq);
511f4ebcbc0SKirill Tkhai 		else if (!on_rt_rq(rt_se))
512ff77e468SPeter Zijlstra 			enqueue_rt_entity(rt_se, 0);
513f4ebcbc0SKirill Tkhai 
514391e43daSPeter Zijlstra 		if (rt_rq->highest_prio.curr < curr->prio)
5158875125eSKirill Tkhai 			resched_curr(rq);
516391e43daSPeter Zijlstra 	}
517391e43daSPeter Zijlstra }
518391e43daSPeter Zijlstra 
519391e43daSPeter Zijlstra static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
520391e43daSPeter Zijlstra {
521391e43daSPeter Zijlstra 	struct sched_rt_entity *rt_se;
522391e43daSPeter Zijlstra 	int cpu = cpu_of(rq_of_rt_rq(rt_rq));
523391e43daSPeter Zijlstra 
524391e43daSPeter Zijlstra 	rt_se = rt_rq->tg->rt_se[cpu];
525391e43daSPeter Zijlstra 
526f4ebcbc0SKirill Tkhai 	if (!rt_se)
527f4ebcbc0SKirill Tkhai 		dequeue_top_rt_rq(rt_rq);
528f4ebcbc0SKirill Tkhai 	else if (on_rt_rq(rt_se))
529ff77e468SPeter Zijlstra 		dequeue_rt_entity(rt_se, 0);
530391e43daSPeter Zijlstra }
531391e43daSPeter Zijlstra 
53246383648SKirill Tkhai static inline int rt_rq_throttled(struct rt_rq *rt_rq)
53346383648SKirill Tkhai {
53446383648SKirill Tkhai 	return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
53546383648SKirill Tkhai }
53646383648SKirill Tkhai 
537391e43daSPeter Zijlstra static int rt_se_boosted(struct sched_rt_entity *rt_se)
538391e43daSPeter Zijlstra {
539391e43daSPeter Zijlstra 	struct rt_rq *rt_rq = group_rt_rq(rt_se);
540391e43daSPeter Zijlstra 	struct task_struct *p;
541391e43daSPeter Zijlstra 
542391e43daSPeter Zijlstra 	if (rt_rq)
543391e43daSPeter Zijlstra 		return !!rt_rq->rt_nr_boosted;
544391e43daSPeter Zijlstra 
545391e43daSPeter Zijlstra 	p = rt_task_of(rt_se);
546391e43daSPeter Zijlstra 	return p->prio != p->normal_prio;
547391e43daSPeter Zijlstra }
548391e43daSPeter Zijlstra 
549391e43daSPeter Zijlstra #ifdef CONFIG_SMP
550391e43daSPeter Zijlstra static inline const struct cpumask *sched_rt_period_mask(void)
551391e43daSPeter Zijlstra {
552424c93feSNathan Zimmer 	return this_rq()->rd->span;
553391e43daSPeter Zijlstra }
554391e43daSPeter Zijlstra #else
555391e43daSPeter Zijlstra static inline const struct cpumask *sched_rt_period_mask(void)
556391e43daSPeter Zijlstra {
557391e43daSPeter Zijlstra 	return cpu_online_mask;
558391e43daSPeter Zijlstra }
559391e43daSPeter Zijlstra #endif
560391e43daSPeter Zijlstra 
561391e43daSPeter Zijlstra static inline
562391e43daSPeter Zijlstra struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
563391e43daSPeter Zijlstra {
564391e43daSPeter Zijlstra 	return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
565391e43daSPeter Zijlstra }
566391e43daSPeter Zijlstra 
567391e43daSPeter Zijlstra static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
568391e43daSPeter Zijlstra {
569391e43daSPeter Zijlstra 	return &rt_rq->tg->rt_bandwidth;
570391e43daSPeter Zijlstra }
571391e43daSPeter Zijlstra 
572391e43daSPeter Zijlstra #else /* !CONFIG_RT_GROUP_SCHED */
573391e43daSPeter Zijlstra 
574391e43daSPeter Zijlstra static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
575391e43daSPeter Zijlstra {
576391e43daSPeter Zijlstra 	return rt_rq->rt_runtime;
577391e43daSPeter Zijlstra }
578391e43daSPeter Zijlstra 
579391e43daSPeter Zijlstra static inline u64 sched_rt_period(struct rt_rq *rt_rq)
580391e43daSPeter Zijlstra {
581391e43daSPeter Zijlstra 	return ktime_to_ns(def_rt_bandwidth.rt_period);
582391e43daSPeter Zijlstra }
583391e43daSPeter Zijlstra 
584391e43daSPeter Zijlstra typedef struct rt_rq *rt_rq_iter_t;
585391e43daSPeter Zijlstra 
586391e43daSPeter Zijlstra #define for_each_rt_rq(rt_rq, iter, rq) \
587391e43daSPeter Zijlstra 	for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
588391e43daSPeter Zijlstra 
589391e43daSPeter Zijlstra #define for_each_sched_rt_entity(rt_se) \
590391e43daSPeter Zijlstra 	for (; rt_se; rt_se = NULL)
591391e43daSPeter Zijlstra 
592391e43daSPeter Zijlstra static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
593391e43daSPeter Zijlstra {
594391e43daSPeter Zijlstra 	return NULL;
595391e43daSPeter Zijlstra }
596391e43daSPeter Zijlstra 
597391e43daSPeter Zijlstra static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
598391e43daSPeter Zijlstra {
599f4ebcbc0SKirill Tkhai 	struct rq *rq = rq_of_rt_rq(rt_rq);
600f4ebcbc0SKirill Tkhai 
601f4ebcbc0SKirill Tkhai 	if (!rt_rq->rt_nr_running)
602f4ebcbc0SKirill Tkhai 		return;
603f4ebcbc0SKirill Tkhai 
604f4ebcbc0SKirill Tkhai 	enqueue_top_rt_rq(rt_rq);
6058875125eSKirill Tkhai 	resched_curr(rq);
606391e43daSPeter Zijlstra }
607391e43daSPeter Zijlstra 
608391e43daSPeter Zijlstra static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
609391e43daSPeter Zijlstra {
610f4ebcbc0SKirill Tkhai 	dequeue_top_rt_rq(rt_rq);
611391e43daSPeter Zijlstra }
612391e43daSPeter Zijlstra 
61346383648SKirill Tkhai static inline int rt_rq_throttled(struct rt_rq *rt_rq)
61446383648SKirill Tkhai {
61546383648SKirill Tkhai 	return rt_rq->rt_throttled;
61646383648SKirill Tkhai }
61746383648SKirill Tkhai 
618391e43daSPeter Zijlstra static inline const struct cpumask *sched_rt_period_mask(void)
619391e43daSPeter Zijlstra {
620391e43daSPeter Zijlstra 	return cpu_online_mask;
621391e43daSPeter Zijlstra }
622391e43daSPeter Zijlstra 
623391e43daSPeter Zijlstra static inline
624391e43daSPeter Zijlstra struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
625391e43daSPeter Zijlstra {
626391e43daSPeter Zijlstra 	return &cpu_rq(cpu)->rt;
627391e43daSPeter Zijlstra }
628391e43daSPeter Zijlstra 
629391e43daSPeter Zijlstra static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
630391e43daSPeter Zijlstra {
631391e43daSPeter Zijlstra 	return &def_rt_bandwidth;
632391e43daSPeter Zijlstra }
633391e43daSPeter Zijlstra 
634391e43daSPeter Zijlstra #endif /* CONFIG_RT_GROUP_SCHED */
635391e43daSPeter Zijlstra 
636faa59937SJuri Lelli bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
637faa59937SJuri Lelli {
638faa59937SJuri Lelli 	struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
639faa59937SJuri Lelli 
640faa59937SJuri Lelli 	return (hrtimer_active(&rt_b->rt_period_timer) ||
641faa59937SJuri Lelli 		rt_rq->rt_time < rt_b->rt_runtime);
642faa59937SJuri Lelli }
643faa59937SJuri Lelli 
644391e43daSPeter Zijlstra #ifdef CONFIG_SMP
645391e43daSPeter Zijlstra /*
646391e43daSPeter Zijlstra  * We ran out of runtime, see if we can borrow some from our neighbours.
647391e43daSPeter Zijlstra  */
648269b26a5SJuri Lelli static void do_balance_runtime(struct rt_rq *rt_rq)
649391e43daSPeter Zijlstra {
650391e43daSPeter Zijlstra 	struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
651aa7f6730SShawn Bohrer 	struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
652269b26a5SJuri Lelli 	int i, weight;
653391e43daSPeter Zijlstra 	u64 rt_period;
654391e43daSPeter Zijlstra 
655391e43daSPeter Zijlstra 	weight = cpumask_weight(rd->span);
656391e43daSPeter Zijlstra 
657391e43daSPeter Zijlstra 	raw_spin_lock(&rt_b->rt_runtime_lock);
658391e43daSPeter Zijlstra 	rt_period = ktime_to_ns(rt_b->rt_period);
659391e43daSPeter Zijlstra 	for_each_cpu(i, rd->span) {
660391e43daSPeter Zijlstra 		struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
661391e43daSPeter Zijlstra 		s64 diff;
662391e43daSPeter Zijlstra 
663391e43daSPeter Zijlstra 		if (iter == rt_rq)
664391e43daSPeter Zijlstra 			continue;
665391e43daSPeter Zijlstra 
666391e43daSPeter Zijlstra 		raw_spin_lock(&iter->rt_runtime_lock);
667391e43daSPeter Zijlstra 		/*
668391e43daSPeter Zijlstra 		 * Either all rqs have inf runtime and there's nothing to steal
669391e43daSPeter Zijlstra 		 * or __disable_runtime() below sets a specific rq to inf to
670391e43daSPeter Zijlstra 		 * indicate its been disabled and disalow stealing.
671391e43daSPeter Zijlstra 		 */
672391e43daSPeter Zijlstra 		if (iter->rt_runtime == RUNTIME_INF)
673391e43daSPeter Zijlstra 			goto next;
674391e43daSPeter Zijlstra 
675391e43daSPeter Zijlstra 		/*
676391e43daSPeter Zijlstra 		 * From runqueues with spare time, take 1/n part of their
677391e43daSPeter Zijlstra 		 * spare time, but no more than our period.
678391e43daSPeter Zijlstra 		 */
679391e43daSPeter Zijlstra 		diff = iter->rt_runtime - iter->rt_time;
680391e43daSPeter Zijlstra 		if (diff > 0) {
681391e43daSPeter Zijlstra 			diff = div_u64((u64)diff, weight);
682391e43daSPeter Zijlstra 			if (rt_rq->rt_runtime + diff > rt_period)
683391e43daSPeter Zijlstra 				diff = rt_period - rt_rq->rt_runtime;
684391e43daSPeter Zijlstra 			iter->rt_runtime -= diff;
685391e43daSPeter Zijlstra 			rt_rq->rt_runtime += diff;
686391e43daSPeter Zijlstra 			if (rt_rq->rt_runtime == rt_period) {
687391e43daSPeter Zijlstra 				raw_spin_unlock(&iter->rt_runtime_lock);
688391e43daSPeter Zijlstra 				break;
689391e43daSPeter Zijlstra 			}
690391e43daSPeter Zijlstra 		}
691391e43daSPeter Zijlstra next:
692391e43daSPeter Zijlstra 		raw_spin_unlock(&iter->rt_runtime_lock);
693391e43daSPeter Zijlstra 	}
694391e43daSPeter Zijlstra 	raw_spin_unlock(&rt_b->rt_runtime_lock);
695391e43daSPeter Zijlstra }
696391e43daSPeter Zijlstra 
697391e43daSPeter Zijlstra /*
698391e43daSPeter Zijlstra  * Ensure this RQ takes back all the runtime it lend to its neighbours.
699391e43daSPeter Zijlstra  */
700391e43daSPeter Zijlstra static void __disable_runtime(struct rq *rq)
701391e43daSPeter Zijlstra {
702391e43daSPeter Zijlstra 	struct root_domain *rd = rq->rd;
703391e43daSPeter Zijlstra 	rt_rq_iter_t iter;
704391e43daSPeter Zijlstra 	struct rt_rq *rt_rq;
705391e43daSPeter Zijlstra 
706391e43daSPeter Zijlstra 	if (unlikely(!scheduler_running))
707391e43daSPeter Zijlstra 		return;
708391e43daSPeter Zijlstra 
709391e43daSPeter Zijlstra 	for_each_rt_rq(rt_rq, iter, rq) {
710391e43daSPeter Zijlstra 		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
711391e43daSPeter Zijlstra 		s64 want;
712391e43daSPeter Zijlstra 		int i;
713391e43daSPeter Zijlstra 
714391e43daSPeter Zijlstra 		raw_spin_lock(&rt_b->rt_runtime_lock);
715391e43daSPeter Zijlstra 		raw_spin_lock(&rt_rq->rt_runtime_lock);
716391e43daSPeter Zijlstra 		/*
717391e43daSPeter Zijlstra 		 * Either we're all inf and nobody needs to borrow, or we're
718391e43daSPeter Zijlstra 		 * already disabled and thus have nothing to do, or we have
719391e43daSPeter Zijlstra 		 * exactly the right amount of runtime to take out.
720391e43daSPeter Zijlstra 		 */
721391e43daSPeter Zijlstra 		if (rt_rq->rt_runtime == RUNTIME_INF ||
722391e43daSPeter Zijlstra 				rt_rq->rt_runtime == rt_b->rt_runtime)
723391e43daSPeter Zijlstra 			goto balanced;
724391e43daSPeter Zijlstra 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
725391e43daSPeter Zijlstra 
726391e43daSPeter Zijlstra 		/*
727391e43daSPeter Zijlstra 		 * Calculate the difference between what we started out with
728391e43daSPeter Zijlstra 		 * and what we current have, that's the amount of runtime
729391e43daSPeter Zijlstra 		 * we lend and now have to reclaim.
730391e43daSPeter Zijlstra 		 */
731391e43daSPeter Zijlstra 		want = rt_b->rt_runtime - rt_rq->rt_runtime;
732391e43daSPeter Zijlstra 
733391e43daSPeter Zijlstra 		/*
734391e43daSPeter Zijlstra 		 * Greedy reclaim, take back as much as we can.
735391e43daSPeter Zijlstra 		 */
736391e43daSPeter Zijlstra 		for_each_cpu(i, rd->span) {
737391e43daSPeter Zijlstra 			struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
738391e43daSPeter Zijlstra 			s64 diff;
739391e43daSPeter Zijlstra 
740391e43daSPeter Zijlstra 			/*
741391e43daSPeter Zijlstra 			 * Can't reclaim from ourselves or disabled runqueues.
742391e43daSPeter Zijlstra 			 */
743391e43daSPeter Zijlstra 			if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
744391e43daSPeter Zijlstra 				continue;
745391e43daSPeter Zijlstra 
746391e43daSPeter Zijlstra 			raw_spin_lock(&iter->rt_runtime_lock);
747391e43daSPeter Zijlstra 			if (want > 0) {
748391e43daSPeter Zijlstra 				diff = min_t(s64, iter->rt_runtime, want);
749391e43daSPeter Zijlstra 				iter->rt_runtime -= diff;
750391e43daSPeter Zijlstra 				want -= diff;
751391e43daSPeter Zijlstra 			} else {
752391e43daSPeter Zijlstra 				iter->rt_runtime -= want;
753391e43daSPeter Zijlstra 				want -= want;
754391e43daSPeter Zijlstra 			}
755391e43daSPeter Zijlstra 			raw_spin_unlock(&iter->rt_runtime_lock);
756391e43daSPeter Zijlstra 
757391e43daSPeter Zijlstra 			if (!want)
758391e43daSPeter Zijlstra 				break;
759391e43daSPeter Zijlstra 		}
760391e43daSPeter Zijlstra 
761391e43daSPeter Zijlstra 		raw_spin_lock(&rt_rq->rt_runtime_lock);
762391e43daSPeter Zijlstra 		/*
763391e43daSPeter Zijlstra 		 * We cannot be left wanting - that would mean some runtime
764391e43daSPeter Zijlstra 		 * leaked out of the system.
765391e43daSPeter Zijlstra 		 */
766391e43daSPeter Zijlstra 		BUG_ON(want);
767391e43daSPeter Zijlstra balanced:
768391e43daSPeter Zijlstra 		/*
769391e43daSPeter Zijlstra 		 * Disable all the borrow logic by pretending we have inf
770391e43daSPeter Zijlstra 		 * runtime - in which case borrowing doesn't make sense.
771391e43daSPeter Zijlstra 		 */
772391e43daSPeter Zijlstra 		rt_rq->rt_runtime = RUNTIME_INF;
773a4c96ae3SPeter Boonstoppel 		rt_rq->rt_throttled = 0;
774391e43daSPeter Zijlstra 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
775391e43daSPeter Zijlstra 		raw_spin_unlock(&rt_b->rt_runtime_lock);
77699b62567SKirill Tkhai 
77799b62567SKirill Tkhai 		/* Make rt_rq available for pick_next_task() */
77899b62567SKirill Tkhai 		sched_rt_rq_enqueue(rt_rq);
779391e43daSPeter Zijlstra 	}
780391e43daSPeter Zijlstra }
781391e43daSPeter Zijlstra 
782391e43daSPeter Zijlstra static void __enable_runtime(struct rq *rq)
783391e43daSPeter Zijlstra {
784391e43daSPeter Zijlstra 	rt_rq_iter_t iter;
785391e43daSPeter Zijlstra 	struct rt_rq *rt_rq;
786391e43daSPeter Zijlstra 
787391e43daSPeter Zijlstra 	if (unlikely(!scheduler_running))
788391e43daSPeter Zijlstra 		return;
789391e43daSPeter Zijlstra 
790391e43daSPeter Zijlstra 	/*
791391e43daSPeter Zijlstra 	 * Reset each runqueue's bandwidth settings
792391e43daSPeter Zijlstra 	 */
793391e43daSPeter Zijlstra 	for_each_rt_rq(rt_rq, iter, rq) {
794391e43daSPeter Zijlstra 		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
795391e43daSPeter Zijlstra 
796391e43daSPeter Zijlstra 		raw_spin_lock(&rt_b->rt_runtime_lock);
797391e43daSPeter Zijlstra 		raw_spin_lock(&rt_rq->rt_runtime_lock);
798391e43daSPeter Zijlstra 		rt_rq->rt_runtime = rt_b->rt_runtime;
799391e43daSPeter Zijlstra 		rt_rq->rt_time = 0;
800391e43daSPeter Zijlstra 		rt_rq->rt_throttled = 0;
801391e43daSPeter Zijlstra 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
802391e43daSPeter Zijlstra 		raw_spin_unlock(&rt_b->rt_runtime_lock);
803391e43daSPeter Zijlstra 	}
804391e43daSPeter Zijlstra }
805391e43daSPeter Zijlstra 
806269b26a5SJuri Lelli static void balance_runtime(struct rt_rq *rt_rq)
807391e43daSPeter Zijlstra {
808391e43daSPeter Zijlstra 	if (!sched_feat(RT_RUNTIME_SHARE))
809269b26a5SJuri Lelli 		return;
810391e43daSPeter Zijlstra 
811391e43daSPeter Zijlstra 	if (rt_rq->rt_time > rt_rq->rt_runtime) {
812391e43daSPeter Zijlstra 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
813269b26a5SJuri Lelli 		do_balance_runtime(rt_rq);
814391e43daSPeter Zijlstra 		raw_spin_lock(&rt_rq->rt_runtime_lock);
815391e43daSPeter Zijlstra 	}
816391e43daSPeter Zijlstra }
817391e43daSPeter Zijlstra #else /* !CONFIG_SMP */
818269b26a5SJuri Lelli static inline void balance_runtime(struct rt_rq *rt_rq) {}
819391e43daSPeter Zijlstra #endif /* CONFIG_SMP */
820391e43daSPeter Zijlstra 
821391e43daSPeter Zijlstra static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
822391e43daSPeter Zijlstra {
82342c62a58SPeter Zijlstra 	int i, idle = 1, throttled = 0;
824391e43daSPeter Zijlstra 	const struct cpumask *span;
825391e43daSPeter Zijlstra 
826391e43daSPeter Zijlstra 	span = sched_rt_period_mask();
827e221d028SMike Galbraith #ifdef CONFIG_RT_GROUP_SCHED
828e221d028SMike Galbraith 	/*
829e221d028SMike Galbraith 	 * FIXME: isolated CPUs should really leave the root task group,
830e221d028SMike Galbraith 	 * whether they are isolcpus or were isolated via cpusets, lest
831e221d028SMike Galbraith 	 * the timer run on a CPU which does not service all runqueues,
832e221d028SMike Galbraith 	 * potentially leaving other CPUs indefinitely throttled.  If
833e221d028SMike Galbraith 	 * isolation is really required, the user will turn the throttle
834e221d028SMike Galbraith 	 * off to kill the perturbations it causes anyway.  Meanwhile,
835e221d028SMike Galbraith 	 * this maintains functionality for boot and/or troubleshooting.
836e221d028SMike Galbraith 	 */
837e221d028SMike Galbraith 	if (rt_b == &root_task_group.rt_bandwidth)
838e221d028SMike Galbraith 		span = cpu_online_mask;
839e221d028SMike Galbraith #endif
840391e43daSPeter Zijlstra 	for_each_cpu(i, span) {
841391e43daSPeter Zijlstra 		int enqueue = 0;
842391e43daSPeter Zijlstra 		struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
843391e43daSPeter Zijlstra 		struct rq *rq = rq_of_rt_rq(rt_rq);
844c249f255SDave Kleikamp 		int skip;
845c249f255SDave Kleikamp 
846c249f255SDave Kleikamp 		/*
847c249f255SDave Kleikamp 		 * When span == cpu_online_mask, taking each rq->lock
848c249f255SDave Kleikamp 		 * can be time-consuming. Try to avoid it when possible.
849c249f255SDave Kleikamp 		 */
850c249f255SDave Kleikamp 		raw_spin_lock(&rt_rq->rt_runtime_lock);
851c249f255SDave Kleikamp 		skip = !rt_rq->rt_time && !rt_rq->rt_nr_running;
852c249f255SDave Kleikamp 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
853c249f255SDave Kleikamp 		if (skip)
854c249f255SDave Kleikamp 			continue;
855391e43daSPeter Zijlstra 
856391e43daSPeter Zijlstra 		raw_spin_lock(&rq->lock);
857391e43daSPeter Zijlstra 		if (rt_rq->rt_time) {
858391e43daSPeter Zijlstra 			u64 runtime;
859391e43daSPeter Zijlstra 
860391e43daSPeter Zijlstra 			raw_spin_lock(&rt_rq->rt_runtime_lock);
861391e43daSPeter Zijlstra 			if (rt_rq->rt_throttled)
862391e43daSPeter Zijlstra 				balance_runtime(rt_rq);
863391e43daSPeter Zijlstra 			runtime = rt_rq->rt_runtime;
864391e43daSPeter Zijlstra 			rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
865391e43daSPeter Zijlstra 			if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
866391e43daSPeter Zijlstra 				rt_rq->rt_throttled = 0;
867391e43daSPeter Zijlstra 				enqueue = 1;
868391e43daSPeter Zijlstra 
869391e43daSPeter Zijlstra 				/*
8709edfbfedSPeter Zijlstra 				 * When we're idle and a woken (rt) task is
8719edfbfedSPeter Zijlstra 				 * throttled check_preempt_curr() will set
8729edfbfedSPeter Zijlstra 				 * skip_update and the time between the wakeup
8739edfbfedSPeter Zijlstra 				 * and this unthrottle will get accounted as
8749edfbfedSPeter Zijlstra 				 * 'runtime'.
875391e43daSPeter Zijlstra 				 */
876391e43daSPeter Zijlstra 				if (rt_rq->rt_nr_running && rq->curr == rq->idle)
8779edfbfedSPeter Zijlstra 					rq_clock_skip_update(rq, false);
878391e43daSPeter Zijlstra 			}
879391e43daSPeter Zijlstra 			if (rt_rq->rt_time || rt_rq->rt_nr_running)
880391e43daSPeter Zijlstra 				idle = 0;
881391e43daSPeter Zijlstra 			raw_spin_unlock(&rt_rq->rt_runtime_lock);
882391e43daSPeter Zijlstra 		} else if (rt_rq->rt_nr_running) {
883391e43daSPeter Zijlstra 			idle = 0;
884391e43daSPeter Zijlstra 			if (!rt_rq_throttled(rt_rq))
885391e43daSPeter Zijlstra 				enqueue = 1;
886391e43daSPeter Zijlstra 		}
88742c62a58SPeter Zijlstra 		if (rt_rq->rt_throttled)
88842c62a58SPeter Zijlstra 			throttled = 1;
889391e43daSPeter Zijlstra 
890391e43daSPeter Zijlstra 		if (enqueue)
891391e43daSPeter Zijlstra 			sched_rt_rq_enqueue(rt_rq);
892391e43daSPeter Zijlstra 		raw_spin_unlock(&rq->lock);
893391e43daSPeter Zijlstra 	}
894391e43daSPeter Zijlstra 
89542c62a58SPeter Zijlstra 	if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
89642c62a58SPeter Zijlstra 		return 1;
89742c62a58SPeter Zijlstra 
898391e43daSPeter Zijlstra 	return idle;
899391e43daSPeter Zijlstra }
900391e43daSPeter Zijlstra 
901391e43daSPeter Zijlstra static inline int rt_se_prio(struct sched_rt_entity *rt_se)
902391e43daSPeter Zijlstra {
903391e43daSPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED
904391e43daSPeter Zijlstra 	struct rt_rq *rt_rq = group_rt_rq(rt_se);
905391e43daSPeter Zijlstra 
906391e43daSPeter Zijlstra 	if (rt_rq)
907391e43daSPeter Zijlstra 		return rt_rq->highest_prio.curr;
908391e43daSPeter Zijlstra #endif
909391e43daSPeter Zijlstra 
910391e43daSPeter Zijlstra 	return rt_task_of(rt_se)->prio;
911391e43daSPeter Zijlstra }
912391e43daSPeter Zijlstra 
913391e43daSPeter Zijlstra static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
914391e43daSPeter Zijlstra {
915391e43daSPeter Zijlstra 	u64 runtime = sched_rt_runtime(rt_rq);
916391e43daSPeter Zijlstra 
917391e43daSPeter Zijlstra 	if (rt_rq->rt_throttled)
918391e43daSPeter Zijlstra 		return rt_rq_throttled(rt_rq);
919391e43daSPeter Zijlstra 
9205b680fd6SShan Hai 	if (runtime >= sched_rt_period(rt_rq))
921391e43daSPeter Zijlstra 		return 0;
922391e43daSPeter Zijlstra 
923391e43daSPeter Zijlstra 	balance_runtime(rt_rq);
924391e43daSPeter Zijlstra 	runtime = sched_rt_runtime(rt_rq);
925391e43daSPeter Zijlstra 	if (runtime == RUNTIME_INF)
926391e43daSPeter Zijlstra 		return 0;
927391e43daSPeter Zijlstra 
928391e43daSPeter Zijlstra 	if (rt_rq->rt_time > runtime) {
9297abc63b1SPeter Zijlstra 		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
9307abc63b1SPeter Zijlstra 
9317abc63b1SPeter Zijlstra 		/*
9327abc63b1SPeter Zijlstra 		 * Don't actually throttle groups that have no runtime assigned
9337abc63b1SPeter Zijlstra 		 * but accrue some time due to boosting.
9347abc63b1SPeter Zijlstra 		 */
9357abc63b1SPeter Zijlstra 		if (likely(rt_b->rt_runtime)) {
936391e43daSPeter Zijlstra 			rt_rq->rt_throttled = 1;
937c224815dSJohn Stultz 			printk_deferred_once("sched: RT throttling activated\n");
9387abc63b1SPeter Zijlstra 		} else {
9397abc63b1SPeter Zijlstra 			/*
9407abc63b1SPeter Zijlstra 			 * In case we did anyway, make it go away,
9417abc63b1SPeter Zijlstra 			 * replenishment is a joke, since it will replenish us
9427abc63b1SPeter Zijlstra 			 * with exactly 0 ns.
9437abc63b1SPeter Zijlstra 			 */
9447abc63b1SPeter Zijlstra 			rt_rq->rt_time = 0;
9457abc63b1SPeter Zijlstra 		}
9467abc63b1SPeter Zijlstra 
947391e43daSPeter Zijlstra 		if (rt_rq_throttled(rt_rq)) {
948391e43daSPeter Zijlstra 			sched_rt_rq_dequeue(rt_rq);
949391e43daSPeter Zijlstra 			return 1;
950391e43daSPeter Zijlstra 		}
951391e43daSPeter Zijlstra 	}
952391e43daSPeter Zijlstra 
953391e43daSPeter Zijlstra 	return 0;
954391e43daSPeter Zijlstra }
955391e43daSPeter Zijlstra 
956391e43daSPeter Zijlstra /*
957391e43daSPeter Zijlstra  * Update the current task's runtime statistics. Skip current tasks that
958391e43daSPeter Zijlstra  * are not in our scheduling class.
959391e43daSPeter Zijlstra  */
960391e43daSPeter Zijlstra static void update_curr_rt(struct rq *rq)
961391e43daSPeter Zijlstra {
962391e43daSPeter Zijlstra 	struct task_struct *curr = rq->curr;
963391e43daSPeter Zijlstra 	struct sched_rt_entity *rt_se = &curr->rt;
964391e43daSPeter Zijlstra 	u64 delta_exec;
965391e43daSPeter Zijlstra 
966391e43daSPeter Zijlstra 	if (curr->sched_class != &rt_sched_class)
967391e43daSPeter Zijlstra 		return;
968391e43daSPeter Zijlstra 
96978becc27SFrederic Weisbecker 	delta_exec = rq_clock_task(rq) - curr->se.exec_start;
970fc79e240SKirill Tkhai 	if (unlikely((s64)delta_exec <= 0))
971fc79e240SKirill Tkhai 		return;
972391e43daSPeter Zijlstra 
97358919e83SRafael J. Wysocki 	/* Kick cpufreq (see the comment in kernel/sched/sched.h). */
974674e7541SViresh Kumar 	cpufreq_update_util(rq, SCHED_CPUFREQ_RT);
975594dd290SWanpeng Li 
97642c62a58SPeter Zijlstra 	schedstat_set(curr->se.statistics.exec_max,
97742c62a58SPeter Zijlstra 		      max(curr->se.statistics.exec_max, delta_exec));
978391e43daSPeter Zijlstra 
979391e43daSPeter Zijlstra 	curr->se.sum_exec_runtime += delta_exec;
980391e43daSPeter Zijlstra 	account_group_exec_runtime(curr, delta_exec);
981391e43daSPeter Zijlstra 
98278becc27SFrederic Weisbecker 	curr->se.exec_start = rq_clock_task(rq);
983391e43daSPeter Zijlstra 	cpuacct_charge(curr, delta_exec);
984391e43daSPeter Zijlstra 
985391e43daSPeter Zijlstra 	sched_rt_avg_update(rq, delta_exec);
986391e43daSPeter Zijlstra 
987391e43daSPeter Zijlstra 	if (!rt_bandwidth_enabled())
988391e43daSPeter Zijlstra 		return;
989391e43daSPeter Zijlstra 
990391e43daSPeter Zijlstra 	for_each_sched_rt_entity(rt_se) {
9910b07939cSGiedrius Rekasius 		struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
992391e43daSPeter Zijlstra 
993391e43daSPeter Zijlstra 		if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
994391e43daSPeter Zijlstra 			raw_spin_lock(&rt_rq->rt_runtime_lock);
995391e43daSPeter Zijlstra 			rt_rq->rt_time += delta_exec;
996391e43daSPeter Zijlstra 			if (sched_rt_runtime_exceeded(rt_rq))
9978875125eSKirill Tkhai 				resched_curr(rq);
998391e43daSPeter Zijlstra 			raw_spin_unlock(&rt_rq->rt_runtime_lock);
999391e43daSPeter Zijlstra 		}
1000391e43daSPeter Zijlstra 	}
1001391e43daSPeter Zijlstra }
1002391e43daSPeter Zijlstra 
1003f4ebcbc0SKirill Tkhai static void
1004f4ebcbc0SKirill Tkhai dequeue_top_rt_rq(struct rt_rq *rt_rq)
1005f4ebcbc0SKirill Tkhai {
1006f4ebcbc0SKirill Tkhai 	struct rq *rq = rq_of_rt_rq(rt_rq);
1007f4ebcbc0SKirill Tkhai 
1008f4ebcbc0SKirill Tkhai 	BUG_ON(&rq->rt != rt_rq);
1009f4ebcbc0SKirill Tkhai 
1010f4ebcbc0SKirill Tkhai 	if (!rt_rq->rt_queued)
1011f4ebcbc0SKirill Tkhai 		return;
1012f4ebcbc0SKirill Tkhai 
1013f4ebcbc0SKirill Tkhai 	BUG_ON(!rq->nr_running);
1014f4ebcbc0SKirill Tkhai 
101572465447SKirill Tkhai 	sub_nr_running(rq, rt_rq->rt_nr_running);
1016f4ebcbc0SKirill Tkhai 	rt_rq->rt_queued = 0;
1017f4ebcbc0SKirill Tkhai }
1018f4ebcbc0SKirill Tkhai 
1019f4ebcbc0SKirill Tkhai static void
1020f4ebcbc0SKirill Tkhai enqueue_top_rt_rq(struct rt_rq *rt_rq)
1021f4ebcbc0SKirill Tkhai {
1022f4ebcbc0SKirill Tkhai 	struct rq *rq = rq_of_rt_rq(rt_rq);
1023f4ebcbc0SKirill Tkhai 
1024f4ebcbc0SKirill Tkhai 	BUG_ON(&rq->rt != rt_rq);
1025f4ebcbc0SKirill Tkhai 
1026f4ebcbc0SKirill Tkhai 	if (rt_rq->rt_queued)
1027f4ebcbc0SKirill Tkhai 		return;
1028f4ebcbc0SKirill Tkhai 	if (rt_rq_throttled(rt_rq) || !rt_rq->rt_nr_running)
1029f4ebcbc0SKirill Tkhai 		return;
1030f4ebcbc0SKirill Tkhai 
103172465447SKirill Tkhai 	add_nr_running(rq, rt_rq->rt_nr_running);
1032f4ebcbc0SKirill Tkhai 	rt_rq->rt_queued = 1;
1033f4ebcbc0SKirill Tkhai }
1034f4ebcbc0SKirill Tkhai 
1035391e43daSPeter Zijlstra #if defined CONFIG_SMP
1036391e43daSPeter Zijlstra 
1037391e43daSPeter Zijlstra static void
1038391e43daSPeter Zijlstra inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1039391e43daSPeter Zijlstra {
1040391e43daSPeter Zijlstra 	struct rq *rq = rq_of_rt_rq(rt_rq);
1041391e43daSPeter Zijlstra 
1042757dfcaaSKirill Tkhai #ifdef CONFIG_RT_GROUP_SCHED
1043757dfcaaSKirill Tkhai 	/*
1044757dfcaaSKirill Tkhai 	 * Change rq's cpupri only if rt_rq is the top queue.
1045757dfcaaSKirill Tkhai 	 */
1046757dfcaaSKirill Tkhai 	if (&rq->rt != rt_rq)
1047757dfcaaSKirill Tkhai 		return;
1048757dfcaaSKirill Tkhai #endif
1049391e43daSPeter Zijlstra 	if (rq->online && prio < prev_prio)
1050391e43daSPeter Zijlstra 		cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
1051391e43daSPeter Zijlstra }
1052391e43daSPeter Zijlstra 
1053391e43daSPeter Zijlstra static void
1054391e43daSPeter Zijlstra dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1055391e43daSPeter Zijlstra {
1056391e43daSPeter Zijlstra 	struct rq *rq = rq_of_rt_rq(rt_rq);
1057391e43daSPeter Zijlstra 
1058757dfcaaSKirill Tkhai #ifdef CONFIG_RT_GROUP_SCHED
1059757dfcaaSKirill Tkhai 	/*
1060757dfcaaSKirill Tkhai 	 * Change rq's cpupri only if rt_rq is the top queue.
1061757dfcaaSKirill Tkhai 	 */
1062757dfcaaSKirill Tkhai 	if (&rq->rt != rt_rq)
1063757dfcaaSKirill Tkhai 		return;
1064757dfcaaSKirill Tkhai #endif
1065391e43daSPeter Zijlstra 	if (rq->online && rt_rq->highest_prio.curr != prev_prio)
1066391e43daSPeter Zijlstra 		cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
1067391e43daSPeter Zijlstra }
1068391e43daSPeter Zijlstra 
1069391e43daSPeter Zijlstra #else /* CONFIG_SMP */
1070391e43daSPeter Zijlstra 
1071391e43daSPeter Zijlstra static inline
1072391e43daSPeter Zijlstra void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1073391e43daSPeter Zijlstra static inline
1074391e43daSPeter Zijlstra void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1075391e43daSPeter Zijlstra 
1076391e43daSPeter Zijlstra #endif /* CONFIG_SMP */
1077391e43daSPeter Zijlstra 
1078391e43daSPeter Zijlstra #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
1079391e43daSPeter Zijlstra static void
1080391e43daSPeter Zijlstra inc_rt_prio(struct rt_rq *rt_rq, int prio)
1081391e43daSPeter Zijlstra {
1082391e43daSPeter Zijlstra 	int prev_prio = rt_rq->highest_prio.curr;
1083391e43daSPeter Zijlstra 
1084391e43daSPeter Zijlstra 	if (prio < prev_prio)
1085391e43daSPeter Zijlstra 		rt_rq->highest_prio.curr = prio;
1086391e43daSPeter Zijlstra 
1087391e43daSPeter Zijlstra 	inc_rt_prio_smp(rt_rq, prio, prev_prio);
1088391e43daSPeter Zijlstra }
1089391e43daSPeter Zijlstra 
1090391e43daSPeter Zijlstra static void
1091391e43daSPeter Zijlstra dec_rt_prio(struct rt_rq *rt_rq, int prio)
1092391e43daSPeter Zijlstra {
1093391e43daSPeter Zijlstra 	int prev_prio = rt_rq->highest_prio.curr;
1094391e43daSPeter Zijlstra 
1095391e43daSPeter Zijlstra 	if (rt_rq->rt_nr_running) {
1096391e43daSPeter Zijlstra 
1097391e43daSPeter Zijlstra 		WARN_ON(prio < prev_prio);
1098391e43daSPeter Zijlstra 
1099391e43daSPeter Zijlstra 		/*
1100391e43daSPeter Zijlstra 		 * This may have been our highest task, and therefore
1101391e43daSPeter Zijlstra 		 * we may have some recomputation to do
1102391e43daSPeter Zijlstra 		 */
1103391e43daSPeter Zijlstra 		if (prio == prev_prio) {
1104391e43daSPeter Zijlstra 			struct rt_prio_array *array = &rt_rq->active;
1105391e43daSPeter Zijlstra 
1106391e43daSPeter Zijlstra 			rt_rq->highest_prio.curr =
1107391e43daSPeter Zijlstra 				sched_find_first_bit(array->bitmap);
1108391e43daSPeter Zijlstra 		}
1109391e43daSPeter Zijlstra 
1110391e43daSPeter Zijlstra 	} else
1111391e43daSPeter Zijlstra 		rt_rq->highest_prio.curr = MAX_RT_PRIO;
1112391e43daSPeter Zijlstra 
1113391e43daSPeter Zijlstra 	dec_rt_prio_smp(rt_rq, prio, prev_prio);
1114391e43daSPeter Zijlstra }
1115391e43daSPeter Zijlstra 
1116391e43daSPeter Zijlstra #else
1117391e43daSPeter Zijlstra 
1118391e43daSPeter Zijlstra static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
1119391e43daSPeter Zijlstra static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
1120391e43daSPeter Zijlstra 
1121391e43daSPeter Zijlstra #endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
1122391e43daSPeter Zijlstra 
1123391e43daSPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED
1124391e43daSPeter Zijlstra 
1125391e43daSPeter Zijlstra static void
1126391e43daSPeter Zijlstra inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1127391e43daSPeter Zijlstra {
1128391e43daSPeter Zijlstra 	if (rt_se_boosted(rt_se))
1129391e43daSPeter Zijlstra 		rt_rq->rt_nr_boosted++;
1130391e43daSPeter Zijlstra 
1131391e43daSPeter Zijlstra 	if (rt_rq->tg)
1132391e43daSPeter Zijlstra 		start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
1133391e43daSPeter Zijlstra }
1134391e43daSPeter Zijlstra 
1135391e43daSPeter Zijlstra static void
1136391e43daSPeter Zijlstra dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1137391e43daSPeter Zijlstra {
1138391e43daSPeter Zijlstra 	if (rt_se_boosted(rt_se))
1139391e43daSPeter Zijlstra 		rt_rq->rt_nr_boosted--;
1140391e43daSPeter Zijlstra 
1141391e43daSPeter Zijlstra 	WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
1142391e43daSPeter Zijlstra }
1143391e43daSPeter Zijlstra 
1144391e43daSPeter Zijlstra #else /* CONFIG_RT_GROUP_SCHED */
1145391e43daSPeter Zijlstra 
1146391e43daSPeter Zijlstra static void
1147391e43daSPeter Zijlstra inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1148391e43daSPeter Zijlstra {
1149391e43daSPeter Zijlstra 	start_rt_bandwidth(&def_rt_bandwidth);
1150391e43daSPeter Zijlstra }
1151391e43daSPeter Zijlstra 
1152391e43daSPeter Zijlstra static inline
1153391e43daSPeter Zijlstra void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
1154391e43daSPeter Zijlstra 
1155391e43daSPeter Zijlstra #endif /* CONFIG_RT_GROUP_SCHED */
1156391e43daSPeter Zijlstra 
1157391e43daSPeter Zijlstra static inline
115822abdef3SKirill Tkhai unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se)
115922abdef3SKirill Tkhai {
116022abdef3SKirill Tkhai 	struct rt_rq *group_rq = group_rt_rq(rt_se);
116122abdef3SKirill Tkhai 
116222abdef3SKirill Tkhai 	if (group_rq)
116322abdef3SKirill Tkhai 		return group_rq->rt_nr_running;
116422abdef3SKirill Tkhai 	else
116522abdef3SKirill Tkhai 		return 1;
116622abdef3SKirill Tkhai }
116722abdef3SKirill Tkhai 
116822abdef3SKirill Tkhai static inline
116901d36d0aSFrederic Weisbecker unsigned int rt_se_rr_nr_running(struct sched_rt_entity *rt_se)
117001d36d0aSFrederic Weisbecker {
117101d36d0aSFrederic Weisbecker 	struct rt_rq *group_rq = group_rt_rq(rt_se);
117201d36d0aSFrederic Weisbecker 	struct task_struct *tsk;
117301d36d0aSFrederic Weisbecker 
117401d36d0aSFrederic Weisbecker 	if (group_rq)
117501d36d0aSFrederic Weisbecker 		return group_rq->rr_nr_running;
117601d36d0aSFrederic Weisbecker 
117701d36d0aSFrederic Weisbecker 	tsk = rt_task_of(rt_se);
117801d36d0aSFrederic Weisbecker 
117901d36d0aSFrederic Weisbecker 	return (tsk->policy == SCHED_RR) ? 1 : 0;
118001d36d0aSFrederic Weisbecker }
118101d36d0aSFrederic Weisbecker 
118201d36d0aSFrederic Weisbecker static inline
1183391e43daSPeter Zijlstra void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1184391e43daSPeter Zijlstra {
1185391e43daSPeter Zijlstra 	int prio = rt_se_prio(rt_se);
1186391e43daSPeter Zijlstra 
1187391e43daSPeter Zijlstra 	WARN_ON(!rt_prio(prio));
118822abdef3SKirill Tkhai 	rt_rq->rt_nr_running += rt_se_nr_running(rt_se);
118901d36d0aSFrederic Weisbecker 	rt_rq->rr_nr_running += rt_se_rr_nr_running(rt_se);
1190391e43daSPeter Zijlstra 
1191391e43daSPeter Zijlstra 	inc_rt_prio(rt_rq, prio);
1192391e43daSPeter Zijlstra 	inc_rt_migration(rt_se, rt_rq);
1193391e43daSPeter Zijlstra 	inc_rt_group(rt_se, rt_rq);
1194391e43daSPeter Zijlstra }
1195391e43daSPeter Zijlstra 
1196391e43daSPeter Zijlstra static inline
1197391e43daSPeter Zijlstra void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1198391e43daSPeter Zijlstra {
1199391e43daSPeter Zijlstra 	WARN_ON(!rt_prio(rt_se_prio(rt_se)));
1200391e43daSPeter Zijlstra 	WARN_ON(!rt_rq->rt_nr_running);
120122abdef3SKirill Tkhai 	rt_rq->rt_nr_running -= rt_se_nr_running(rt_se);
120201d36d0aSFrederic Weisbecker 	rt_rq->rr_nr_running -= rt_se_rr_nr_running(rt_se);
1203391e43daSPeter Zijlstra 
1204391e43daSPeter Zijlstra 	dec_rt_prio(rt_rq, rt_se_prio(rt_se));
1205391e43daSPeter Zijlstra 	dec_rt_migration(rt_se, rt_rq);
1206391e43daSPeter Zijlstra 	dec_rt_group(rt_se, rt_rq);
1207391e43daSPeter Zijlstra }
1208391e43daSPeter Zijlstra 
1209ff77e468SPeter Zijlstra /*
1210ff77e468SPeter Zijlstra  * Change rt_se->run_list location unless SAVE && !MOVE
1211ff77e468SPeter Zijlstra  *
1212ff77e468SPeter Zijlstra  * assumes ENQUEUE/DEQUEUE flags match
1213ff77e468SPeter Zijlstra  */
1214ff77e468SPeter Zijlstra static inline bool move_entity(unsigned int flags)
1215ff77e468SPeter Zijlstra {
1216ff77e468SPeter Zijlstra 	if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE)
1217ff77e468SPeter Zijlstra 		return false;
1218ff77e468SPeter Zijlstra 
1219ff77e468SPeter Zijlstra 	return true;
1220ff77e468SPeter Zijlstra }
1221ff77e468SPeter Zijlstra 
1222ff77e468SPeter Zijlstra static void __delist_rt_entity(struct sched_rt_entity *rt_se, struct rt_prio_array *array)
1223ff77e468SPeter Zijlstra {
1224ff77e468SPeter Zijlstra 	list_del_init(&rt_se->run_list);
1225ff77e468SPeter Zijlstra 
1226ff77e468SPeter Zijlstra 	if (list_empty(array->queue + rt_se_prio(rt_se)))
1227ff77e468SPeter Zijlstra 		__clear_bit(rt_se_prio(rt_se), array->bitmap);
1228ff77e468SPeter Zijlstra 
1229ff77e468SPeter Zijlstra 	rt_se->on_list = 0;
1230ff77e468SPeter Zijlstra }
1231ff77e468SPeter Zijlstra 
1232ff77e468SPeter Zijlstra static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1233391e43daSPeter Zijlstra {
1234391e43daSPeter Zijlstra 	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1235391e43daSPeter Zijlstra 	struct rt_prio_array *array = &rt_rq->active;
1236391e43daSPeter Zijlstra 	struct rt_rq *group_rq = group_rt_rq(rt_se);
1237391e43daSPeter Zijlstra 	struct list_head *queue = array->queue + rt_se_prio(rt_se);
1238391e43daSPeter Zijlstra 
1239391e43daSPeter Zijlstra 	/*
1240391e43daSPeter Zijlstra 	 * Don't enqueue the group if its throttled, or when empty.
1241391e43daSPeter Zijlstra 	 * The latter is a consequence of the former when a child group
1242391e43daSPeter Zijlstra 	 * get throttled and the current group doesn't have any other
1243391e43daSPeter Zijlstra 	 * active members.
1244391e43daSPeter Zijlstra 	 */
1245ff77e468SPeter Zijlstra 	if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) {
1246ff77e468SPeter Zijlstra 		if (rt_se->on_list)
1247ff77e468SPeter Zijlstra 			__delist_rt_entity(rt_se, array);
1248391e43daSPeter Zijlstra 		return;
1249ff77e468SPeter Zijlstra 	}
1250391e43daSPeter Zijlstra 
1251ff77e468SPeter Zijlstra 	if (move_entity(flags)) {
1252ff77e468SPeter Zijlstra 		WARN_ON_ONCE(rt_se->on_list);
1253ff77e468SPeter Zijlstra 		if (flags & ENQUEUE_HEAD)
1254391e43daSPeter Zijlstra 			list_add(&rt_se->run_list, queue);
1255391e43daSPeter Zijlstra 		else
1256391e43daSPeter Zijlstra 			list_add_tail(&rt_se->run_list, queue);
1257ff77e468SPeter Zijlstra 
1258391e43daSPeter Zijlstra 		__set_bit(rt_se_prio(rt_se), array->bitmap);
1259ff77e468SPeter Zijlstra 		rt_se->on_list = 1;
1260ff77e468SPeter Zijlstra 	}
1261ff77e468SPeter Zijlstra 	rt_se->on_rq = 1;
1262391e43daSPeter Zijlstra 
1263391e43daSPeter Zijlstra 	inc_rt_tasks(rt_se, rt_rq);
1264391e43daSPeter Zijlstra }
1265391e43daSPeter Zijlstra 
1266ff77e468SPeter Zijlstra static void __dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1267391e43daSPeter Zijlstra {
1268391e43daSPeter Zijlstra 	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1269391e43daSPeter Zijlstra 	struct rt_prio_array *array = &rt_rq->active;
1270391e43daSPeter Zijlstra 
1271ff77e468SPeter Zijlstra 	if (move_entity(flags)) {
1272ff77e468SPeter Zijlstra 		WARN_ON_ONCE(!rt_se->on_list);
1273ff77e468SPeter Zijlstra 		__delist_rt_entity(rt_se, array);
1274ff77e468SPeter Zijlstra 	}
1275ff77e468SPeter Zijlstra 	rt_se->on_rq = 0;
1276391e43daSPeter Zijlstra 
1277391e43daSPeter Zijlstra 	dec_rt_tasks(rt_se, rt_rq);
1278391e43daSPeter Zijlstra }
1279391e43daSPeter Zijlstra 
1280391e43daSPeter Zijlstra /*
1281391e43daSPeter Zijlstra  * Because the prio of an upper entry depends on the lower
1282391e43daSPeter Zijlstra  * entries, we must remove entries top - down.
1283391e43daSPeter Zijlstra  */
1284ff77e468SPeter Zijlstra static void dequeue_rt_stack(struct sched_rt_entity *rt_se, unsigned int flags)
1285391e43daSPeter Zijlstra {
1286391e43daSPeter Zijlstra 	struct sched_rt_entity *back = NULL;
1287391e43daSPeter Zijlstra 
1288391e43daSPeter Zijlstra 	for_each_sched_rt_entity(rt_se) {
1289391e43daSPeter Zijlstra 		rt_se->back = back;
1290391e43daSPeter Zijlstra 		back = rt_se;
1291391e43daSPeter Zijlstra 	}
1292391e43daSPeter Zijlstra 
1293f4ebcbc0SKirill Tkhai 	dequeue_top_rt_rq(rt_rq_of_se(back));
1294f4ebcbc0SKirill Tkhai 
1295391e43daSPeter Zijlstra 	for (rt_se = back; rt_se; rt_se = rt_se->back) {
1296391e43daSPeter Zijlstra 		if (on_rt_rq(rt_se))
1297ff77e468SPeter Zijlstra 			__dequeue_rt_entity(rt_se, flags);
1298391e43daSPeter Zijlstra 	}
1299391e43daSPeter Zijlstra }
1300391e43daSPeter Zijlstra 
1301ff77e468SPeter Zijlstra static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1302391e43daSPeter Zijlstra {
1303f4ebcbc0SKirill Tkhai 	struct rq *rq = rq_of_rt_se(rt_se);
1304f4ebcbc0SKirill Tkhai 
1305ff77e468SPeter Zijlstra 	dequeue_rt_stack(rt_se, flags);
1306391e43daSPeter Zijlstra 	for_each_sched_rt_entity(rt_se)
1307ff77e468SPeter Zijlstra 		__enqueue_rt_entity(rt_se, flags);
1308f4ebcbc0SKirill Tkhai 	enqueue_top_rt_rq(&rq->rt);
1309391e43daSPeter Zijlstra }
1310391e43daSPeter Zijlstra 
1311ff77e468SPeter Zijlstra static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1312391e43daSPeter Zijlstra {
1313f4ebcbc0SKirill Tkhai 	struct rq *rq = rq_of_rt_se(rt_se);
1314f4ebcbc0SKirill Tkhai 
1315ff77e468SPeter Zijlstra 	dequeue_rt_stack(rt_se, flags);
1316391e43daSPeter Zijlstra 
1317391e43daSPeter Zijlstra 	for_each_sched_rt_entity(rt_se) {
1318391e43daSPeter Zijlstra 		struct rt_rq *rt_rq = group_rt_rq(rt_se);
1319391e43daSPeter Zijlstra 
1320391e43daSPeter Zijlstra 		if (rt_rq && rt_rq->rt_nr_running)
1321ff77e468SPeter Zijlstra 			__enqueue_rt_entity(rt_se, flags);
1322391e43daSPeter Zijlstra 	}
1323f4ebcbc0SKirill Tkhai 	enqueue_top_rt_rq(&rq->rt);
1324391e43daSPeter Zijlstra }
1325391e43daSPeter Zijlstra 
1326391e43daSPeter Zijlstra /*
1327391e43daSPeter Zijlstra  * Adding/removing a task to/from a priority array:
1328391e43daSPeter Zijlstra  */
1329391e43daSPeter Zijlstra static void
1330391e43daSPeter Zijlstra enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1331391e43daSPeter Zijlstra {
1332391e43daSPeter Zijlstra 	struct sched_rt_entity *rt_se = &p->rt;
1333391e43daSPeter Zijlstra 
1334391e43daSPeter Zijlstra 	if (flags & ENQUEUE_WAKEUP)
1335391e43daSPeter Zijlstra 		rt_se->timeout = 0;
1336391e43daSPeter Zijlstra 
1337ff77e468SPeter Zijlstra 	enqueue_rt_entity(rt_se, flags);
1338391e43daSPeter Zijlstra 
13394b53a341SIngo Molnar 	if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1340391e43daSPeter Zijlstra 		enqueue_pushable_task(rq, p);
1341391e43daSPeter Zijlstra }
1342391e43daSPeter Zijlstra 
1343391e43daSPeter Zijlstra static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1344391e43daSPeter Zijlstra {
1345391e43daSPeter Zijlstra 	struct sched_rt_entity *rt_se = &p->rt;
1346391e43daSPeter Zijlstra 
1347391e43daSPeter Zijlstra 	update_curr_rt(rq);
1348ff77e468SPeter Zijlstra 	dequeue_rt_entity(rt_se, flags);
1349391e43daSPeter Zijlstra 
1350391e43daSPeter Zijlstra 	dequeue_pushable_task(rq, p);
1351391e43daSPeter Zijlstra }
1352391e43daSPeter Zijlstra 
1353391e43daSPeter Zijlstra /*
1354391e43daSPeter Zijlstra  * Put task to the head or the end of the run list without the overhead of
1355391e43daSPeter Zijlstra  * dequeue followed by enqueue.
1356391e43daSPeter Zijlstra  */
1357391e43daSPeter Zijlstra static void
1358391e43daSPeter Zijlstra requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
1359391e43daSPeter Zijlstra {
1360391e43daSPeter Zijlstra 	if (on_rt_rq(rt_se)) {
1361391e43daSPeter Zijlstra 		struct rt_prio_array *array = &rt_rq->active;
1362391e43daSPeter Zijlstra 		struct list_head *queue = array->queue + rt_se_prio(rt_se);
1363391e43daSPeter Zijlstra 
1364391e43daSPeter Zijlstra 		if (head)
1365391e43daSPeter Zijlstra 			list_move(&rt_se->run_list, queue);
1366391e43daSPeter Zijlstra 		else
1367391e43daSPeter Zijlstra 			list_move_tail(&rt_se->run_list, queue);
1368391e43daSPeter Zijlstra 	}
1369391e43daSPeter Zijlstra }
1370391e43daSPeter Zijlstra 
1371391e43daSPeter Zijlstra static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
1372391e43daSPeter Zijlstra {
1373391e43daSPeter Zijlstra 	struct sched_rt_entity *rt_se = &p->rt;
1374391e43daSPeter Zijlstra 	struct rt_rq *rt_rq;
1375391e43daSPeter Zijlstra 
1376391e43daSPeter Zijlstra 	for_each_sched_rt_entity(rt_se) {
1377391e43daSPeter Zijlstra 		rt_rq = rt_rq_of_se(rt_se);
1378391e43daSPeter Zijlstra 		requeue_rt_entity(rt_rq, rt_se, head);
1379391e43daSPeter Zijlstra 	}
1380391e43daSPeter Zijlstra }
1381391e43daSPeter Zijlstra 
1382391e43daSPeter Zijlstra static void yield_task_rt(struct rq *rq)
1383391e43daSPeter Zijlstra {
1384391e43daSPeter Zijlstra 	requeue_task_rt(rq, rq->curr, 0);
1385391e43daSPeter Zijlstra }
1386391e43daSPeter Zijlstra 
1387391e43daSPeter Zijlstra #ifdef CONFIG_SMP
1388391e43daSPeter Zijlstra static int find_lowest_rq(struct task_struct *task);
1389391e43daSPeter Zijlstra 
1390391e43daSPeter Zijlstra static int
1391ac66f547SPeter Zijlstra select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
1392391e43daSPeter Zijlstra {
1393391e43daSPeter Zijlstra 	struct task_struct *curr;
1394391e43daSPeter Zijlstra 	struct rq *rq;
1395391e43daSPeter Zijlstra 
1396391e43daSPeter Zijlstra 	/* For anything but wake ups, just return the task_cpu */
1397391e43daSPeter Zijlstra 	if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
1398391e43daSPeter Zijlstra 		goto out;
1399391e43daSPeter Zijlstra 
1400391e43daSPeter Zijlstra 	rq = cpu_rq(cpu);
1401391e43daSPeter Zijlstra 
1402391e43daSPeter Zijlstra 	rcu_read_lock();
1403316c1608SJason Low 	curr = READ_ONCE(rq->curr); /* unlocked access */
1404391e43daSPeter Zijlstra 
1405391e43daSPeter Zijlstra 	/*
1406391e43daSPeter Zijlstra 	 * If the current task on @p's runqueue is an RT task, then
1407391e43daSPeter Zijlstra 	 * try to see if we can wake this RT task up on another
1408391e43daSPeter Zijlstra 	 * runqueue. Otherwise simply start this RT task
1409391e43daSPeter Zijlstra 	 * on its current runqueue.
1410391e43daSPeter Zijlstra 	 *
1411391e43daSPeter Zijlstra 	 * We want to avoid overloading runqueues. If the woken
1412391e43daSPeter Zijlstra 	 * task is a higher priority, then it will stay on this CPU
1413391e43daSPeter Zijlstra 	 * and the lower prio task should be moved to another CPU.
1414391e43daSPeter Zijlstra 	 * Even though this will probably make the lower prio task
1415391e43daSPeter Zijlstra 	 * lose its cache, we do not want to bounce a higher task
1416391e43daSPeter Zijlstra 	 * around just because it gave up its CPU, perhaps for a
1417391e43daSPeter Zijlstra 	 * lock?
1418391e43daSPeter Zijlstra 	 *
1419391e43daSPeter Zijlstra 	 * For equal prio tasks, we just let the scheduler sort it out.
1420391e43daSPeter Zijlstra 	 *
1421391e43daSPeter Zijlstra 	 * Otherwise, just let it ride on the affined RQ and the
1422391e43daSPeter Zijlstra 	 * post-schedule router will push the preempted task away
1423391e43daSPeter Zijlstra 	 *
1424391e43daSPeter Zijlstra 	 * This test is optimistic, if we get it wrong the load-balancer
1425391e43daSPeter Zijlstra 	 * will have to sort it out.
1426391e43daSPeter Zijlstra 	 */
1427391e43daSPeter Zijlstra 	if (curr && unlikely(rt_task(curr)) &&
14284b53a341SIngo Molnar 	    (curr->nr_cpus_allowed < 2 ||
14296bfa687cSShawn Bohrer 	     curr->prio <= p->prio)) {
1430391e43daSPeter Zijlstra 		int target = find_lowest_rq(p);
1431391e43daSPeter Zijlstra 
143280e3d87bSTim Chen 		/*
143380e3d87bSTim Chen 		 * Don't bother moving it if the destination CPU is
143480e3d87bSTim Chen 		 * not running a lower priority task.
143580e3d87bSTim Chen 		 */
143680e3d87bSTim Chen 		if (target != -1 &&
143780e3d87bSTim Chen 		    p->prio < cpu_rq(target)->rt.highest_prio.curr)
1438391e43daSPeter Zijlstra 			cpu = target;
1439391e43daSPeter Zijlstra 	}
1440391e43daSPeter Zijlstra 	rcu_read_unlock();
1441391e43daSPeter Zijlstra 
1442391e43daSPeter Zijlstra out:
1443391e43daSPeter Zijlstra 	return cpu;
1444391e43daSPeter Zijlstra }
1445391e43daSPeter Zijlstra 
1446391e43daSPeter Zijlstra static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1447391e43daSPeter Zijlstra {
1448308a623aSWanpeng Li 	/*
1449308a623aSWanpeng Li 	 * Current can't be migrated, useless to reschedule,
1450308a623aSWanpeng Li 	 * let's hope p can move out.
1451308a623aSWanpeng Li 	 */
14524b53a341SIngo Molnar 	if (rq->curr->nr_cpus_allowed == 1 ||
1453308a623aSWanpeng Li 	    !cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
1454391e43daSPeter Zijlstra 		return;
1455391e43daSPeter Zijlstra 
1456308a623aSWanpeng Li 	/*
1457308a623aSWanpeng Li 	 * p is migratable, so let's not schedule it and
1458308a623aSWanpeng Li 	 * see if it is pushed or pulled somewhere else.
1459308a623aSWanpeng Li 	 */
14604b53a341SIngo Molnar 	if (p->nr_cpus_allowed != 1
1461391e43daSPeter Zijlstra 	    && cpupri_find(&rq->rd->cpupri, p, NULL))
1462391e43daSPeter Zijlstra 		return;
1463391e43daSPeter Zijlstra 
1464391e43daSPeter Zijlstra 	/*
1465391e43daSPeter Zijlstra 	 * There appears to be other cpus that can accept
1466391e43daSPeter Zijlstra 	 * current and none to run 'p', so lets reschedule
1467391e43daSPeter Zijlstra 	 * to try and push current away:
1468391e43daSPeter Zijlstra 	 */
1469391e43daSPeter Zijlstra 	requeue_task_rt(rq, p, 1);
14708875125eSKirill Tkhai 	resched_curr(rq);
1471391e43daSPeter Zijlstra }
1472391e43daSPeter Zijlstra 
1473391e43daSPeter Zijlstra #endif /* CONFIG_SMP */
1474391e43daSPeter Zijlstra 
1475391e43daSPeter Zijlstra /*
1476391e43daSPeter Zijlstra  * Preempt the current task with a newly woken task if needed:
1477391e43daSPeter Zijlstra  */
1478391e43daSPeter Zijlstra static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
1479391e43daSPeter Zijlstra {
1480391e43daSPeter Zijlstra 	if (p->prio < rq->curr->prio) {
14818875125eSKirill Tkhai 		resched_curr(rq);
1482391e43daSPeter Zijlstra 		return;
1483391e43daSPeter Zijlstra 	}
1484391e43daSPeter Zijlstra 
1485391e43daSPeter Zijlstra #ifdef CONFIG_SMP
1486391e43daSPeter Zijlstra 	/*
1487391e43daSPeter Zijlstra 	 * If:
1488391e43daSPeter Zijlstra 	 *
1489391e43daSPeter Zijlstra 	 * - the newly woken task is of equal priority to the current task
1490391e43daSPeter Zijlstra 	 * - the newly woken task is non-migratable while current is migratable
1491391e43daSPeter Zijlstra 	 * - current will be preempted on the next reschedule
1492391e43daSPeter Zijlstra 	 *
1493391e43daSPeter Zijlstra 	 * we should check to see if current can readily move to a different
1494391e43daSPeter Zijlstra 	 * cpu.  If so, we will reschedule to allow the push logic to try
1495391e43daSPeter Zijlstra 	 * to move current somewhere else, making room for our non-migratable
1496391e43daSPeter Zijlstra 	 * task.
1497391e43daSPeter Zijlstra 	 */
1498391e43daSPeter Zijlstra 	if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
1499391e43daSPeter Zijlstra 		check_preempt_equal_prio(rq, p);
1500391e43daSPeter Zijlstra #endif
1501391e43daSPeter Zijlstra }
1502391e43daSPeter Zijlstra 
1503391e43daSPeter Zijlstra static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
1504391e43daSPeter Zijlstra 						   struct rt_rq *rt_rq)
1505391e43daSPeter Zijlstra {
1506391e43daSPeter Zijlstra 	struct rt_prio_array *array = &rt_rq->active;
1507391e43daSPeter Zijlstra 	struct sched_rt_entity *next = NULL;
1508391e43daSPeter Zijlstra 	struct list_head *queue;
1509391e43daSPeter Zijlstra 	int idx;
1510391e43daSPeter Zijlstra 
1511391e43daSPeter Zijlstra 	idx = sched_find_first_bit(array->bitmap);
1512391e43daSPeter Zijlstra 	BUG_ON(idx >= MAX_RT_PRIO);
1513391e43daSPeter Zijlstra 
1514391e43daSPeter Zijlstra 	queue = array->queue + idx;
1515391e43daSPeter Zijlstra 	next = list_entry(queue->next, struct sched_rt_entity, run_list);
1516391e43daSPeter Zijlstra 
1517391e43daSPeter Zijlstra 	return next;
1518391e43daSPeter Zijlstra }
1519391e43daSPeter Zijlstra 
1520391e43daSPeter Zijlstra static struct task_struct *_pick_next_task_rt(struct rq *rq)
1521391e43daSPeter Zijlstra {
1522391e43daSPeter Zijlstra 	struct sched_rt_entity *rt_se;
1523391e43daSPeter Zijlstra 	struct task_struct *p;
1524606dba2eSPeter Zijlstra 	struct rt_rq *rt_rq  = &rq->rt;
1525391e43daSPeter Zijlstra 
1526391e43daSPeter Zijlstra 	do {
1527391e43daSPeter Zijlstra 		rt_se = pick_next_rt_entity(rq, rt_rq);
1528391e43daSPeter Zijlstra 		BUG_ON(!rt_se);
1529391e43daSPeter Zijlstra 		rt_rq = group_rt_rq(rt_se);
1530391e43daSPeter Zijlstra 	} while (rt_rq);
1531391e43daSPeter Zijlstra 
1532391e43daSPeter Zijlstra 	p = rt_task_of(rt_se);
153378becc27SFrederic Weisbecker 	p->se.exec_start = rq_clock_task(rq);
1534391e43daSPeter Zijlstra 
1535391e43daSPeter Zijlstra 	return p;
1536391e43daSPeter Zijlstra }
1537391e43daSPeter Zijlstra 
1538606dba2eSPeter Zijlstra static struct task_struct *
1539d8ac8971SMatt Fleming pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
1540391e43daSPeter Zijlstra {
1541606dba2eSPeter Zijlstra 	struct task_struct *p;
1542606dba2eSPeter Zijlstra 	struct rt_rq *rt_rq = &rq->rt;
1543606dba2eSPeter Zijlstra 
154437e117c0SPeter Zijlstra 	if (need_pull_rt_task(rq, prev)) {
1545cbce1a68SPeter Zijlstra 		/*
1546cbce1a68SPeter Zijlstra 		 * This is OK, because current is on_cpu, which avoids it being
1547cbce1a68SPeter Zijlstra 		 * picked for load-balance and preemption/IRQs are still
1548cbce1a68SPeter Zijlstra 		 * disabled avoiding further scheduler activity on it and we're
1549cbce1a68SPeter Zijlstra 		 * being very careful to re-start the picking loop.
1550cbce1a68SPeter Zijlstra 		 */
1551d8ac8971SMatt Fleming 		rq_unpin_lock(rq, rf);
155238033c37SPeter Zijlstra 		pull_rt_task(rq);
1553d8ac8971SMatt Fleming 		rq_repin_lock(rq, rf);
155437e117c0SPeter Zijlstra 		/*
155537e117c0SPeter Zijlstra 		 * pull_rt_task() can drop (and re-acquire) rq->lock; this
1556a1d9a323SKirill Tkhai 		 * means a dl or stop task can slip in, in which case we need
1557a1d9a323SKirill Tkhai 		 * to re-start task selection.
155837e117c0SPeter Zijlstra 		 */
1559da0c1e65SKirill Tkhai 		if (unlikely((rq->stop && task_on_rq_queued(rq->stop)) ||
1560a1d9a323SKirill Tkhai 			     rq->dl.dl_nr_running))
156137e117c0SPeter Zijlstra 			return RETRY_TASK;
156237e117c0SPeter Zijlstra 	}
156338033c37SPeter Zijlstra 
1564734ff2a7SKirill Tkhai 	/*
1565734ff2a7SKirill Tkhai 	 * We may dequeue prev's rt_rq in put_prev_task().
1566734ff2a7SKirill Tkhai 	 * So, we update time before rt_nr_running check.
1567734ff2a7SKirill Tkhai 	 */
1568734ff2a7SKirill Tkhai 	if (prev->sched_class == &rt_sched_class)
1569734ff2a7SKirill Tkhai 		update_curr_rt(rq);
1570734ff2a7SKirill Tkhai 
1571f4ebcbc0SKirill Tkhai 	if (!rt_rq->rt_queued)
1572606dba2eSPeter Zijlstra 		return NULL;
1573606dba2eSPeter Zijlstra 
15743f1d2a31SPeter Zijlstra 	put_prev_task(rq, prev);
1575606dba2eSPeter Zijlstra 
1576606dba2eSPeter Zijlstra 	p = _pick_next_task_rt(rq);
1577391e43daSPeter Zijlstra 
1578391e43daSPeter Zijlstra 	/* The running task is never eligible for pushing */
1579391e43daSPeter Zijlstra 	dequeue_pushable_task(rq, p);
1580391e43daSPeter Zijlstra 
1581e3fca9e7SPeter Zijlstra 	queue_push_tasks(rq);
1582391e43daSPeter Zijlstra 
1583391e43daSPeter Zijlstra 	return p;
1584391e43daSPeter Zijlstra }
1585391e43daSPeter Zijlstra 
1586391e43daSPeter Zijlstra static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1587391e43daSPeter Zijlstra {
1588391e43daSPeter Zijlstra 	update_curr_rt(rq);
1589391e43daSPeter Zijlstra 
1590391e43daSPeter Zijlstra 	/*
1591391e43daSPeter Zijlstra 	 * The previous task needs to be made eligible for pushing
1592391e43daSPeter Zijlstra 	 * if it is still active
1593391e43daSPeter Zijlstra 	 */
15944b53a341SIngo Molnar 	if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
1595391e43daSPeter Zijlstra 		enqueue_pushable_task(rq, p);
1596391e43daSPeter Zijlstra }
1597391e43daSPeter Zijlstra 
1598391e43daSPeter Zijlstra #ifdef CONFIG_SMP
1599391e43daSPeter Zijlstra 
1600391e43daSPeter Zijlstra /* Only try algorithms three times */
1601391e43daSPeter Zijlstra #define RT_MAX_TRIES 3
1602391e43daSPeter Zijlstra 
1603391e43daSPeter Zijlstra static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1604391e43daSPeter Zijlstra {
1605391e43daSPeter Zijlstra 	if (!task_running(rq, p) &&
16060c98d344SIngo Molnar 	    cpumask_test_cpu(cpu, &p->cpus_allowed))
1607391e43daSPeter Zijlstra 		return 1;
1608391e43daSPeter Zijlstra 	return 0;
1609391e43daSPeter Zijlstra }
1610391e43daSPeter Zijlstra 
1611e23ee747SKirill Tkhai /*
1612e23ee747SKirill Tkhai  * Return the highest pushable rq's task, which is suitable to be executed
1613e23ee747SKirill Tkhai  * on the cpu, NULL otherwise
1614e23ee747SKirill Tkhai  */
1615e23ee747SKirill Tkhai static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
1616391e43daSPeter Zijlstra {
1617e23ee747SKirill Tkhai 	struct plist_head *head = &rq->rt.pushable_tasks;
1618391e43daSPeter Zijlstra 	struct task_struct *p;
1619391e43daSPeter Zijlstra 
1620e23ee747SKirill Tkhai 	if (!has_pushable_tasks(rq))
1621e23ee747SKirill Tkhai 		return NULL;
1622391e43daSPeter Zijlstra 
1623e23ee747SKirill Tkhai 	plist_for_each_entry(p, head, pushable_tasks) {
1624e23ee747SKirill Tkhai 		if (pick_rt_task(rq, p, cpu))
1625e23ee747SKirill Tkhai 			return p;
1626391e43daSPeter Zijlstra 	}
1627391e43daSPeter Zijlstra 
1628e23ee747SKirill Tkhai 	return NULL;
1629391e43daSPeter Zijlstra }
1630391e43daSPeter Zijlstra 
1631391e43daSPeter Zijlstra static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
1632391e43daSPeter Zijlstra 
1633391e43daSPeter Zijlstra static int find_lowest_rq(struct task_struct *task)
1634391e43daSPeter Zijlstra {
1635391e43daSPeter Zijlstra 	struct sched_domain *sd;
16364ba29684SChristoph Lameter 	struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask);
1637391e43daSPeter Zijlstra 	int this_cpu = smp_processor_id();
1638391e43daSPeter Zijlstra 	int cpu      = task_cpu(task);
1639391e43daSPeter Zijlstra 
1640391e43daSPeter Zijlstra 	/* Make sure the mask is initialized first */
1641391e43daSPeter Zijlstra 	if (unlikely(!lowest_mask))
1642391e43daSPeter Zijlstra 		return -1;
1643391e43daSPeter Zijlstra 
16444b53a341SIngo Molnar 	if (task->nr_cpus_allowed == 1)
1645391e43daSPeter Zijlstra 		return -1; /* No other targets possible */
1646391e43daSPeter Zijlstra 
1647391e43daSPeter Zijlstra 	if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
1648391e43daSPeter Zijlstra 		return -1; /* No targets found */
1649391e43daSPeter Zijlstra 
1650391e43daSPeter Zijlstra 	/*
1651391e43daSPeter Zijlstra 	 * At this point we have built a mask of cpus representing the
1652391e43daSPeter Zijlstra 	 * lowest priority tasks in the system.  Now we want to elect
1653391e43daSPeter Zijlstra 	 * the best one based on our affinity and topology.
1654391e43daSPeter Zijlstra 	 *
1655391e43daSPeter Zijlstra 	 * We prioritize the last cpu that the task executed on since
1656391e43daSPeter Zijlstra 	 * it is most likely cache-hot in that location.
1657391e43daSPeter Zijlstra 	 */
1658391e43daSPeter Zijlstra 	if (cpumask_test_cpu(cpu, lowest_mask))
1659391e43daSPeter Zijlstra 		return cpu;
1660391e43daSPeter Zijlstra 
1661391e43daSPeter Zijlstra 	/*
1662391e43daSPeter Zijlstra 	 * Otherwise, we consult the sched_domains span maps to figure
1663391e43daSPeter Zijlstra 	 * out which cpu is logically closest to our hot cache data.
1664391e43daSPeter Zijlstra 	 */
1665391e43daSPeter Zijlstra 	if (!cpumask_test_cpu(this_cpu, lowest_mask))
1666391e43daSPeter Zijlstra 		this_cpu = -1; /* Skip this_cpu opt if not among lowest */
1667391e43daSPeter Zijlstra 
1668391e43daSPeter Zijlstra 	rcu_read_lock();
1669391e43daSPeter Zijlstra 	for_each_domain(cpu, sd) {
1670391e43daSPeter Zijlstra 		if (sd->flags & SD_WAKE_AFFINE) {
1671391e43daSPeter Zijlstra 			int best_cpu;
1672391e43daSPeter Zijlstra 
1673391e43daSPeter Zijlstra 			/*
1674391e43daSPeter Zijlstra 			 * "this_cpu" is cheaper to preempt than a
1675391e43daSPeter Zijlstra 			 * remote processor.
1676391e43daSPeter Zijlstra 			 */
1677391e43daSPeter Zijlstra 			if (this_cpu != -1 &&
1678391e43daSPeter Zijlstra 			    cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1679391e43daSPeter Zijlstra 				rcu_read_unlock();
1680391e43daSPeter Zijlstra 				return this_cpu;
1681391e43daSPeter Zijlstra 			}
1682391e43daSPeter Zijlstra 
1683391e43daSPeter Zijlstra 			best_cpu = cpumask_first_and(lowest_mask,
1684391e43daSPeter Zijlstra 						     sched_domain_span(sd));
1685391e43daSPeter Zijlstra 			if (best_cpu < nr_cpu_ids) {
1686391e43daSPeter Zijlstra 				rcu_read_unlock();
1687391e43daSPeter Zijlstra 				return best_cpu;
1688391e43daSPeter Zijlstra 			}
1689391e43daSPeter Zijlstra 		}
1690391e43daSPeter Zijlstra 	}
1691391e43daSPeter Zijlstra 	rcu_read_unlock();
1692391e43daSPeter Zijlstra 
1693391e43daSPeter Zijlstra 	/*
1694391e43daSPeter Zijlstra 	 * And finally, if there were no matches within the domains
1695391e43daSPeter Zijlstra 	 * just give the caller *something* to work with from the compatible
1696391e43daSPeter Zijlstra 	 * locations.
1697391e43daSPeter Zijlstra 	 */
1698391e43daSPeter Zijlstra 	if (this_cpu != -1)
1699391e43daSPeter Zijlstra 		return this_cpu;
1700391e43daSPeter Zijlstra 
1701391e43daSPeter Zijlstra 	cpu = cpumask_any(lowest_mask);
1702391e43daSPeter Zijlstra 	if (cpu < nr_cpu_ids)
1703391e43daSPeter Zijlstra 		return cpu;
1704391e43daSPeter Zijlstra 	return -1;
1705391e43daSPeter Zijlstra }
1706391e43daSPeter Zijlstra 
1707391e43daSPeter Zijlstra /* Will lock the rq it finds */
1708391e43daSPeter Zijlstra static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1709391e43daSPeter Zijlstra {
1710391e43daSPeter Zijlstra 	struct rq *lowest_rq = NULL;
1711391e43daSPeter Zijlstra 	int tries;
1712391e43daSPeter Zijlstra 	int cpu;
1713391e43daSPeter Zijlstra 
1714391e43daSPeter Zijlstra 	for (tries = 0; tries < RT_MAX_TRIES; tries++) {
1715391e43daSPeter Zijlstra 		cpu = find_lowest_rq(task);
1716391e43daSPeter Zijlstra 
1717391e43daSPeter Zijlstra 		if ((cpu == -1) || (cpu == rq->cpu))
1718391e43daSPeter Zijlstra 			break;
1719391e43daSPeter Zijlstra 
1720391e43daSPeter Zijlstra 		lowest_rq = cpu_rq(cpu);
1721391e43daSPeter Zijlstra 
172280e3d87bSTim Chen 		if (lowest_rq->rt.highest_prio.curr <= task->prio) {
172380e3d87bSTim Chen 			/*
172480e3d87bSTim Chen 			 * Target rq has tasks of equal or higher priority,
172580e3d87bSTim Chen 			 * retrying does not release any lock and is unlikely
172680e3d87bSTim Chen 			 * to yield a different result.
172780e3d87bSTim Chen 			 */
172880e3d87bSTim Chen 			lowest_rq = NULL;
172980e3d87bSTim Chen 			break;
173080e3d87bSTim Chen 		}
173180e3d87bSTim Chen 
1732391e43daSPeter Zijlstra 		/* if the prio of this runqueue changed, try again */
1733391e43daSPeter Zijlstra 		if (double_lock_balance(rq, lowest_rq)) {
1734391e43daSPeter Zijlstra 			/*
1735391e43daSPeter Zijlstra 			 * We had to unlock the run queue. In
1736391e43daSPeter Zijlstra 			 * the mean time, task could have
1737391e43daSPeter Zijlstra 			 * migrated already or had its affinity changed.
1738391e43daSPeter Zijlstra 			 * Also make sure that it wasn't scheduled on its rq.
1739391e43daSPeter Zijlstra 			 */
1740391e43daSPeter Zijlstra 			if (unlikely(task_rq(task) != rq ||
17410c98d344SIngo Molnar 				     !cpumask_test_cpu(lowest_rq->cpu, &task->cpus_allowed) ||
1742391e43daSPeter Zijlstra 				     task_running(rq, task) ||
174313b5ab02SXunlei Pang 				     !rt_task(task) ||
1744da0c1e65SKirill Tkhai 				     !task_on_rq_queued(task))) {
1745391e43daSPeter Zijlstra 
17467f1b4393SPeter Zijlstra 				double_unlock_balance(rq, lowest_rq);
1747391e43daSPeter Zijlstra 				lowest_rq = NULL;
1748391e43daSPeter Zijlstra 				break;
1749391e43daSPeter Zijlstra 			}
1750391e43daSPeter Zijlstra 		}
1751391e43daSPeter Zijlstra 
1752391e43daSPeter Zijlstra 		/* If this rq is still suitable use it. */
1753391e43daSPeter Zijlstra 		if (lowest_rq->rt.highest_prio.curr > task->prio)
1754391e43daSPeter Zijlstra 			break;
1755391e43daSPeter Zijlstra 
1756391e43daSPeter Zijlstra 		/* try again */
1757391e43daSPeter Zijlstra 		double_unlock_balance(rq, lowest_rq);
1758391e43daSPeter Zijlstra 		lowest_rq = NULL;
1759391e43daSPeter Zijlstra 	}
1760391e43daSPeter Zijlstra 
1761391e43daSPeter Zijlstra 	return lowest_rq;
1762391e43daSPeter Zijlstra }
1763391e43daSPeter Zijlstra 
1764391e43daSPeter Zijlstra static struct task_struct *pick_next_pushable_task(struct rq *rq)
1765391e43daSPeter Zijlstra {
1766391e43daSPeter Zijlstra 	struct task_struct *p;
1767391e43daSPeter Zijlstra 
1768391e43daSPeter Zijlstra 	if (!has_pushable_tasks(rq))
1769391e43daSPeter Zijlstra 		return NULL;
1770391e43daSPeter Zijlstra 
1771391e43daSPeter Zijlstra 	p = plist_first_entry(&rq->rt.pushable_tasks,
1772391e43daSPeter Zijlstra 			      struct task_struct, pushable_tasks);
1773391e43daSPeter Zijlstra 
1774391e43daSPeter Zijlstra 	BUG_ON(rq->cpu != task_cpu(p));
1775391e43daSPeter Zijlstra 	BUG_ON(task_current(rq, p));
17764b53a341SIngo Molnar 	BUG_ON(p->nr_cpus_allowed <= 1);
1777391e43daSPeter Zijlstra 
1778da0c1e65SKirill Tkhai 	BUG_ON(!task_on_rq_queued(p));
1779391e43daSPeter Zijlstra 	BUG_ON(!rt_task(p));
1780391e43daSPeter Zijlstra 
1781391e43daSPeter Zijlstra 	return p;
1782391e43daSPeter Zijlstra }
1783391e43daSPeter Zijlstra 
1784391e43daSPeter Zijlstra /*
1785391e43daSPeter Zijlstra  * If the current CPU has more than one RT task, see if the non
1786391e43daSPeter Zijlstra  * running task can migrate over to a CPU that is running a task
1787391e43daSPeter Zijlstra  * of lesser priority.
1788391e43daSPeter Zijlstra  */
1789391e43daSPeter Zijlstra static int push_rt_task(struct rq *rq)
1790391e43daSPeter Zijlstra {
1791391e43daSPeter Zijlstra 	struct task_struct *next_task;
1792391e43daSPeter Zijlstra 	struct rq *lowest_rq;
1793391e43daSPeter Zijlstra 	int ret = 0;
1794391e43daSPeter Zijlstra 
1795391e43daSPeter Zijlstra 	if (!rq->rt.overloaded)
1796391e43daSPeter Zijlstra 		return 0;
1797391e43daSPeter Zijlstra 
1798391e43daSPeter Zijlstra 	next_task = pick_next_pushable_task(rq);
1799391e43daSPeter Zijlstra 	if (!next_task)
1800391e43daSPeter Zijlstra 		return 0;
1801391e43daSPeter Zijlstra 
1802391e43daSPeter Zijlstra retry:
1803391e43daSPeter Zijlstra 	if (unlikely(next_task == rq->curr)) {
1804391e43daSPeter Zijlstra 		WARN_ON(1);
1805391e43daSPeter Zijlstra 		return 0;
1806391e43daSPeter Zijlstra 	}
1807391e43daSPeter Zijlstra 
1808391e43daSPeter Zijlstra 	/*
1809391e43daSPeter Zijlstra 	 * It's possible that the next_task slipped in of
1810391e43daSPeter Zijlstra 	 * higher priority than current. If that's the case
1811391e43daSPeter Zijlstra 	 * just reschedule current.
1812391e43daSPeter Zijlstra 	 */
1813391e43daSPeter Zijlstra 	if (unlikely(next_task->prio < rq->curr->prio)) {
18148875125eSKirill Tkhai 		resched_curr(rq);
1815391e43daSPeter Zijlstra 		return 0;
1816391e43daSPeter Zijlstra 	}
1817391e43daSPeter Zijlstra 
1818391e43daSPeter Zijlstra 	/* We might release rq lock */
1819391e43daSPeter Zijlstra 	get_task_struct(next_task);
1820391e43daSPeter Zijlstra 
1821391e43daSPeter Zijlstra 	/* find_lock_lowest_rq locks the rq if found */
1822391e43daSPeter Zijlstra 	lowest_rq = find_lock_lowest_rq(next_task, rq);
1823391e43daSPeter Zijlstra 	if (!lowest_rq) {
1824391e43daSPeter Zijlstra 		struct task_struct *task;
1825391e43daSPeter Zijlstra 		/*
1826391e43daSPeter Zijlstra 		 * find_lock_lowest_rq releases rq->lock
1827391e43daSPeter Zijlstra 		 * so it is possible that next_task has migrated.
1828391e43daSPeter Zijlstra 		 *
1829391e43daSPeter Zijlstra 		 * We need to make sure that the task is still on the same
1830391e43daSPeter Zijlstra 		 * run-queue and is also still the next task eligible for
1831391e43daSPeter Zijlstra 		 * pushing.
1832391e43daSPeter Zijlstra 		 */
1833391e43daSPeter Zijlstra 		task = pick_next_pushable_task(rq);
1834de16b91eSByungchul Park 		if (task == next_task) {
1835391e43daSPeter Zijlstra 			/*
1836391e43daSPeter Zijlstra 			 * The task hasn't migrated, and is still the next
1837391e43daSPeter Zijlstra 			 * eligible task, but we failed to find a run-queue
1838391e43daSPeter Zijlstra 			 * to push it to.  Do not retry in this case, since
1839391e43daSPeter Zijlstra 			 * other cpus will pull from us when ready.
1840391e43daSPeter Zijlstra 			 */
1841391e43daSPeter Zijlstra 			goto out;
1842391e43daSPeter Zijlstra 		}
1843391e43daSPeter Zijlstra 
1844391e43daSPeter Zijlstra 		if (!task)
1845391e43daSPeter Zijlstra 			/* No more tasks, just exit */
1846391e43daSPeter Zijlstra 			goto out;
1847391e43daSPeter Zijlstra 
1848391e43daSPeter Zijlstra 		/*
1849391e43daSPeter Zijlstra 		 * Something has shifted, try again.
1850391e43daSPeter Zijlstra 		 */
1851391e43daSPeter Zijlstra 		put_task_struct(next_task);
1852391e43daSPeter Zijlstra 		next_task = task;
1853391e43daSPeter Zijlstra 		goto retry;
1854391e43daSPeter Zijlstra 	}
1855391e43daSPeter Zijlstra 
1856391e43daSPeter Zijlstra 	deactivate_task(rq, next_task, 0);
1857391e43daSPeter Zijlstra 	set_task_cpu(next_task, lowest_rq->cpu);
1858391e43daSPeter Zijlstra 	activate_task(lowest_rq, next_task, 0);
1859391e43daSPeter Zijlstra 	ret = 1;
1860391e43daSPeter Zijlstra 
18618875125eSKirill Tkhai 	resched_curr(lowest_rq);
1862391e43daSPeter Zijlstra 
1863391e43daSPeter Zijlstra 	double_unlock_balance(rq, lowest_rq);
1864391e43daSPeter Zijlstra 
1865391e43daSPeter Zijlstra out:
1866391e43daSPeter Zijlstra 	put_task_struct(next_task);
1867391e43daSPeter Zijlstra 
1868391e43daSPeter Zijlstra 	return ret;
1869391e43daSPeter Zijlstra }
1870391e43daSPeter Zijlstra 
1871391e43daSPeter Zijlstra static void push_rt_tasks(struct rq *rq)
1872391e43daSPeter Zijlstra {
1873391e43daSPeter Zijlstra 	/* push_rt_task will return true if it moved an RT */
1874391e43daSPeter Zijlstra 	while (push_rt_task(rq))
1875391e43daSPeter Zijlstra 		;
1876391e43daSPeter Zijlstra }
1877391e43daSPeter Zijlstra 
1878b6366f04SSteven Rostedt #ifdef HAVE_RT_PUSH_IPI
1879b6366f04SSteven Rostedt /*
1880b6366f04SSteven Rostedt  * The search for the next cpu always starts at rq->cpu and ends
1881b6366f04SSteven Rostedt  * when we reach rq->cpu again. It will never return rq->cpu.
1882b6366f04SSteven Rostedt  * This returns the next cpu to check, or nr_cpu_ids if the loop
1883b6366f04SSteven Rostedt  * is complete.
1884b6366f04SSteven Rostedt  *
1885b6366f04SSteven Rostedt  * rq->rt.push_cpu holds the last cpu returned by this function,
1886b6366f04SSteven Rostedt  * or if this is the first instance, it must hold rq->cpu.
1887b6366f04SSteven Rostedt  */
1888b6366f04SSteven Rostedt static int rto_next_cpu(struct rq *rq)
1889b6366f04SSteven Rostedt {
1890b6366f04SSteven Rostedt 	int prev_cpu = rq->rt.push_cpu;
1891b6366f04SSteven Rostedt 	int cpu;
1892b6366f04SSteven Rostedt 
1893b6366f04SSteven Rostedt 	cpu = cpumask_next(prev_cpu, rq->rd->rto_mask);
1894b6366f04SSteven Rostedt 
1895b6366f04SSteven Rostedt 	/*
1896b6366f04SSteven Rostedt 	 * If the previous cpu is less than the rq's CPU, then it already
1897b6366f04SSteven Rostedt 	 * passed the end of the mask, and has started from the beginning.
1898b6366f04SSteven Rostedt 	 * We end if the next CPU is greater or equal to rq's CPU.
1899b6366f04SSteven Rostedt 	 */
1900b6366f04SSteven Rostedt 	if (prev_cpu < rq->cpu) {
1901b6366f04SSteven Rostedt 		if (cpu >= rq->cpu)
1902b6366f04SSteven Rostedt 			return nr_cpu_ids;
1903b6366f04SSteven Rostedt 
1904b6366f04SSteven Rostedt 	} else if (cpu >= nr_cpu_ids) {
1905b6366f04SSteven Rostedt 		/*
1906b6366f04SSteven Rostedt 		 * We passed the end of the mask, start at the beginning.
1907b6366f04SSteven Rostedt 		 * If the result is greater or equal to the rq's CPU, then
1908b6366f04SSteven Rostedt 		 * the loop is finished.
1909b6366f04SSteven Rostedt 		 */
1910b6366f04SSteven Rostedt 		cpu = cpumask_first(rq->rd->rto_mask);
1911b6366f04SSteven Rostedt 		if (cpu >= rq->cpu)
1912b6366f04SSteven Rostedt 			return nr_cpu_ids;
1913b6366f04SSteven Rostedt 	}
1914b6366f04SSteven Rostedt 	rq->rt.push_cpu = cpu;
1915b6366f04SSteven Rostedt 
1916b6366f04SSteven Rostedt 	/* Return cpu to let the caller know if the loop is finished or not */
1917b6366f04SSteven Rostedt 	return cpu;
1918b6366f04SSteven Rostedt }
1919b6366f04SSteven Rostedt 
1920b6366f04SSteven Rostedt static int find_next_push_cpu(struct rq *rq)
1921b6366f04SSteven Rostedt {
1922b6366f04SSteven Rostedt 	struct rq *next_rq;
1923b6366f04SSteven Rostedt 	int cpu;
1924b6366f04SSteven Rostedt 
1925b6366f04SSteven Rostedt 	while (1) {
1926b6366f04SSteven Rostedt 		cpu = rto_next_cpu(rq);
1927b6366f04SSteven Rostedt 		if (cpu >= nr_cpu_ids)
1928b6366f04SSteven Rostedt 			break;
1929b6366f04SSteven Rostedt 		next_rq = cpu_rq(cpu);
1930b6366f04SSteven Rostedt 
1931b6366f04SSteven Rostedt 		/* Make sure the next rq can push to this rq */
1932b6366f04SSteven Rostedt 		if (next_rq->rt.highest_prio.next < rq->rt.highest_prio.curr)
1933b6366f04SSteven Rostedt 			break;
1934b6366f04SSteven Rostedt 	}
1935b6366f04SSteven Rostedt 
1936b6366f04SSteven Rostedt 	return cpu;
1937b6366f04SSteven Rostedt }
1938b6366f04SSteven Rostedt 
1939b6366f04SSteven Rostedt #define RT_PUSH_IPI_EXECUTING		1
1940b6366f04SSteven Rostedt #define RT_PUSH_IPI_RESTART		2
1941b6366f04SSteven Rostedt 
19423e777f99SSteven Rostedt (VMware) /*
19433e777f99SSteven Rostedt (VMware)  * When a high priority task schedules out from a CPU and a lower priority
19443e777f99SSteven Rostedt (VMware)  * task is scheduled in, a check is made to see if there's any RT tasks
19453e777f99SSteven Rostedt (VMware)  * on other CPUs that are waiting to run because a higher priority RT task
19463e777f99SSteven Rostedt (VMware)  * is currently running on its CPU. In this case, the CPU with multiple RT
19473e777f99SSteven Rostedt (VMware)  * tasks queued on it (overloaded) needs to be notified that a CPU has opened
19483e777f99SSteven Rostedt (VMware)  * up that may be able to run one of its non-running queued RT tasks.
19493e777f99SSteven Rostedt (VMware)  *
19503e777f99SSteven Rostedt (VMware)  * On large CPU boxes, there's the case that several CPUs could schedule
19513e777f99SSteven Rostedt (VMware)  * a lower priority task at the same time, in which case it will look for
19523e777f99SSteven Rostedt (VMware)  * any overloaded CPUs that it could pull a task from. To do this, the runqueue
19533e777f99SSteven Rostedt (VMware)  * lock must be taken from that overloaded CPU. Having 10s of CPUs all fighting
19543e777f99SSteven Rostedt (VMware)  * for a single overloaded CPU's runqueue lock can produce a large latency.
19553e777f99SSteven Rostedt (VMware)  * (This has actually been observed on large boxes running cyclictest).
19563e777f99SSteven Rostedt (VMware)  * Instead of taking the runqueue lock of the overloaded CPU, each of the
19573e777f99SSteven Rostedt (VMware)  * CPUs that scheduled a lower priority task simply sends an IPI to the
19583e777f99SSteven Rostedt (VMware)  * overloaded CPU. An IPI is much cheaper than taking an runqueue lock with
19593e777f99SSteven Rostedt (VMware)  * lots of contention. The overloaded CPU will look to push its non-running
19603e777f99SSteven Rostedt (VMware)  * RT task off, and if it does, it can then ignore the other IPIs coming
19613e777f99SSteven Rostedt (VMware)  * in, and just pass those IPIs off to any other overloaded CPU.
19623e777f99SSteven Rostedt (VMware)  *
19633e777f99SSteven Rostedt (VMware)  * When a CPU schedules a lower priority task, it only sends an IPI to
19643e777f99SSteven Rostedt (VMware)  * the "next" CPU that has overloaded RT tasks. This prevents IPI storms,
19653e777f99SSteven Rostedt (VMware)  * as having 10 CPUs scheduling lower priority tasks and 10 CPUs with
19663e777f99SSteven Rostedt (VMware)  * RT overloaded tasks, would cause 100 IPIs to go out at once.
19673e777f99SSteven Rostedt (VMware)  *
19683e777f99SSteven Rostedt (VMware)  * The overloaded RT CPU, when receiving an IPI, will try to push off its
19693e777f99SSteven Rostedt (VMware)  * overloaded RT tasks and then send an IPI to the next CPU that has
19703e777f99SSteven Rostedt (VMware)  * overloaded RT tasks. This stops when all CPUs with overloaded RT tasks
19713e777f99SSteven Rostedt (VMware)  * have completed. Just because a CPU may have pushed off its own overloaded
19723e777f99SSteven Rostedt (VMware)  * RT task does not mean it should stop sending the IPI around to other
19733e777f99SSteven Rostedt (VMware)  * overloaded CPUs. There may be another RT task waiting to run on one of
19743e777f99SSteven Rostedt (VMware)  * those CPUs that are of higher priority than the one that was just
19753e777f99SSteven Rostedt (VMware)  * pushed.
19763e777f99SSteven Rostedt (VMware)  *
19773e777f99SSteven Rostedt (VMware)  * An optimization that could possibly be made is to make a CPU array similar
19783e777f99SSteven Rostedt (VMware)  * to the cpupri array mask of all running RT tasks, but for the overloaded
19793e777f99SSteven Rostedt (VMware)  * case, then the IPI could be sent to only the CPU with the highest priority
19803e777f99SSteven Rostedt (VMware)  * RT task waiting, and that CPU could send off further IPIs to the CPU with
19813e777f99SSteven Rostedt (VMware)  * the next highest waiting task. Since the overloaded case is much less likely
19823e777f99SSteven Rostedt (VMware)  * to happen, the complexity of this implementation may not be worth it.
19833e777f99SSteven Rostedt (VMware)  * Instead, just send an IPI around to all overloaded CPUs.
19843e777f99SSteven Rostedt (VMware)  *
19853e777f99SSteven Rostedt (VMware)  * The rq->rt.push_flags holds the status of the IPI that is going around.
19863e777f99SSteven Rostedt (VMware)  * A run queue can only send out a single IPI at a time. The possible flags
19873e777f99SSteven Rostedt (VMware)  * for rq->rt.push_flags are:
19883e777f99SSteven Rostedt (VMware)  *
19893e777f99SSteven Rostedt (VMware)  *    (None or zero):		No IPI is going around for the current rq
19903e777f99SSteven Rostedt (VMware)  *    RT_PUSH_IPI_EXECUTING:	An IPI for the rq is being passed around
19913e777f99SSteven Rostedt (VMware)  *    RT_PUSH_IPI_RESTART:	The priority of the running task for the rq
19923e777f99SSteven Rostedt (VMware)  *				has changed, and the IPI should restart
19933e777f99SSteven Rostedt (VMware)  *				circulating the overloaded CPUs again.
19943e777f99SSteven Rostedt (VMware)  *
19953e777f99SSteven Rostedt (VMware)  * rq->rt.push_cpu contains the CPU that is being sent the IPI. It is updated
19963e777f99SSteven Rostedt (VMware)  * before sending to the next CPU.
19973e777f99SSteven Rostedt (VMware)  *
19983e777f99SSteven Rostedt (VMware)  * Instead of having all CPUs that schedule a lower priority task send
19993e777f99SSteven Rostedt (VMware)  * an IPI to the same "first" CPU in the RT overload mask, they send it
20003e777f99SSteven Rostedt (VMware)  * to the next overloaded CPU after their own CPU. This helps distribute
20013e777f99SSteven Rostedt (VMware)  * the work when there's more than one overloaded CPU and multiple CPUs
20023e777f99SSteven Rostedt (VMware)  * scheduling in lower priority tasks.
20033e777f99SSteven Rostedt (VMware)  *
20043e777f99SSteven Rostedt (VMware)  * When a rq schedules a lower priority task than what was currently
20053e777f99SSteven Rostedt (VMware)  * running, the next CPU with overloaded RT tasks is examined first.
20063e777f99SSteven Rostedt (VMware)  * That is, if CPU 1 and 5 are overloaded, and CPU 3 schedules a lower
20073e777f99SSteven Rostedt (VMware)  * priority task, it will send an IPI first to CPU 5, then CPU 5 will
20083e777f99SSteven Rostedt (VMware)  * send to CPU 1 if it is still overloaded. CPU 1 will clear the
20093e777f99SSteven Rostedt (VMware)  * rq->rt.push_flags if RT_PUSH_IPI_RESTART is not set.
20103e777f99SSteven Rostedt (VMware)  *
20113e777f99SSteven Rostedt (VMware)  * The first CPU to notice IPI_RESTART is set, will clear that flag and then
20123e777f99SSteven Rostedt (VMware)  * send an IPI to the next overloaded CPU after the rq->cpu and not the next
20133e777f99SSteven Rostedt (VMware)  * CPU after push_cpu. That is, if CPU 1, 4 and 5 are overloaded when CPU 3
20143e777f99SSteven Rostedt (VMware)  * schedules a lower priority task, and the IPI_RESTART gets set while the
20153e777f99SSteven Rostedt (VMware)  * handling is being done on CPU 5, it will clear the flag and send it back to
20163e777f99SSteven Rostedt (VMware)  * CPU 4 instead of CPU 1.
20173e777f99SSteven Rostedt (VMware)  *
20183e777f99SSteven Rostedt (VMware)  * Note, the above logic can be disabled by turning off the sched_feature
20193e777f99SSteven Rostedt (VMware)  * RT_PUSH_IPI. Then the rq lock of the overloaded CPU will simply be
20203e777f99SSteven Rostedt (VMware)  * taken by the CPU requesting a pull and the waiting RT task will be pulled
20213e777f99SSteven Rostedt (VMware)  * by that CPU. This may be fine for machines with few CPUs.
20223e777f99SSteven Rostedt (VMware)  */
2023b6366f04SSteven Rostedt static void tell_cpu_to_push(struct rq *rq)
2024b6366f04SSteven Rostedt {
2025b6366f04SSteven Rostedt 	int cpu;
2026b6366f04SSteven Rostedt 
2027b6366f04SSteven Rostedt 	if (rq->rt.push_flags & RT_PUSH_IPI_EXECUTING) {
2028b6366f04SSteven Rostedt 		raw_spin_lock(&rq->rt.push_lock);
2029b6366f04SSteven Rostedt 		/* Make sure it's still executing */
2030b6366f04SSteven Rostedt 		if (rq->rt.push_flags & RT_PUSH_IPI_EXECUTING) {
2031b6366f04SSteven Rostedt 			/*
2032b6366f04SSteven Rostedt 			 * Tell the IPI to restart the loop as things have
2033b6366f04SSteven Rostedt 			 * changed since it started.
2034b6366f04SSteven Rostedt 			 */
2035b6366f04SSteven Rostedt 			rq->rt.push_flags |= RT_PUSH_IPI_RESTART;
2036b6366f04SSteven Rostedt 			raw_spin_unlock(&rq->rt.push_lock);
2037b6366f04SSteven Rostedt 			return;
2038b6366f04SSteven Rostedt 		}
2039b6366f04SSteven Rostedt 		raw_spin_unlock(&rq->rt.push_lock);
2040b6366f04SSteven Rostedt 	}
2041b6366f04SSteven Rostedt 
2042b6366f04SSteven Rostedt 	/* When here, there's no IPI going around */
2043b6366f04SSteven Rostedt 
2044b6366f04SSteven Rostedt 	rq->rt.push_cpu = rq->cpu;
2045b6366f04SSteven Rostedt 	cpu = find_next_push_cpu(rq);
2046b6366f04SSteven Rostedt 	if (cpu >= nr_cpu_ids)
2047b6366f04SSteven Rostedt 		return;
2048b6366f04SSteven Rostedt 
2049b6366f04SSteven Rostedt 	rq->rt.push_flags = RT_PUSH_IPI_EXECUTING;
2050b6366f04SSteven Rostedt 
2051b6366f04SSteven Rostedt 	irq_work_queue_on(&rq->rt.push_work, cpu);
2052b6366f04SSteven Rostedt }
2053b6366f04SSteven Rostedt 
2054b6366f04SSteven Rostedt /* Called from hardirq context */
2055b6366f04SSteven Rostedt static void try_to_push_tasks(void *arg)
2056b6366f04SSteven Rostedt {
2057b6366f04SSteven Rostedt 	struct rt_rq *rt_rq = arg;
2058b6366f04SSteven Rostedt 	struct rq *rq, *src_rq;
2059b6366f04SSteven Rostedt 	int this_cpu;
2060b6366f04SSteven Rostedt 	int cpu;
2061b6366f04SSteven Rostedt 
2062b6366f04SSteven Rostedt 	this_cpu = rt_rq->push_cpu;
2063b6366f04SSteven Rostedt 
2064b6366f04SSteven Rostedt 	/* Paranoid check */
2065b6366f04SSteven Rostedt 	BUG_ON(this_cpu != smp_processor_id());
2066b6366f04SSteven Rostedt 
2067b6366f04SSteven Rostedt 	rq = cpu_rq(this_cpu);
2068b6366f04SSteven Rostedt 	src_rq = rq_of_rt_rq(rt_rq);
2069b6366f04SSteven Rostedt 
2070b6366f04SSteven Rostedt again:
2071b6366f04SSteven Rostedt 	if (has_pushable_tasks(rq)) {
2072b6366f04SSteven Rostedt 		raw_spin_lock(&rq->lock);
2073b6366f04SSteven Rostedt 		push_rt_task(rq);
2074b6366f04SSteven Rostedt 		raw_spin_unlock(&rq->lock);
2075b6366f04SSteven Rostedt 	}
2076b6366f04SSteven Rostedt 
2077b6366f04SSteven Rostedt 	/* Pass the IPI to the next rt overloaded queue */
2078b6366f04SSteven Rostedt 	raw_spin_lock(&rt_rq->push_lock);
2079b6366f04SSteven Rostedt 	/*
2080b6366f04SSteven Rostedt 	 * If the source queue changed since the IPI went out,
2081b6366f04SSteven Rostedt 	 * we need to restart the search from that CPU again.
2082b6366f04SSteven Rostedt 	 */
2083b6366f04SSteven Rostedt 	if (rt_rq->push_flags & RT_PUSH_IPI_RESTART) {
2084b6366f04SSteven Rostedt 		rt_rq->push_flags &= ~RT_PUSH_IPI_RESTART;
2085b6366f04SSteven Rostedt 		rt_rq->push_cpu = src_rq->cpu;
2086b6366f04SSteven Rostedt 	}
2087b6366f04SSteven Rostedt 
2088b6366f04SSteven Rostedt 	cpu = find_next_push_cpu(src_rq);
2089b6366f04SSteven Rostedt 
2090b6366f04SSteven Rostedt 	if (cpu >= nr_cpu_ids)
2091b6366f04SSteven Rostedt 		rt_rq->push_flags &= ~RT_PUSH_IPI_EXECUTING;
2092b6366f04SSteven Rostedt 	raw_spin_unlock(&rt_rq->push_lock);
2093b6366f04SSteven Rostedt 
2094b6366f04SSteven Rostedt 	if (cpu >= nr_cpu_ids)
2095b6366f04SSteven Rostedt 		return;
2096b6366f04SSteven Rostedt 
2097b6366f04SSteven Rostedt 	/*
2098b6366f04SSteven Rostedt 	 * It is possible that a restart caused this CPU to be
2099b6366f04SSteven Rostedt 	 * chosen again. Don't bother with an IPI, just see if we
2100b6366f04SSteven Rostedt 	 * have more to push.
2101b6366f04SSteven Rostedt 	 */
2102b6366f04SSteven Rostedt 	if (unlikely(cpu == rq->cpu))
2103b6366f04SSteven Rostedt 		goto again;
2104b6366f04SSteven Rostedt 
2105b6366f04SSteven Rostedt 	/* Try the next RT overloaded CPU */
2106b6366f04SSteven Rostedt 	irq_work_queue_on(&rt_rq->push_work, cpu);
2107b6366f04SSteven Rostedt }
2108b6366f04SSteven Rostedt 
2109b6366f04SSteven Rostedt static void push_irq_work_func(struct irq_work *work)
2110b6366f04SSteven Rostedt {
2111b6366f04SSteven Rostedt 	struct rt_rq *rt_rq = container_of(work, struct rt_rq, push_work);
2112b6366f04SSteven Rostedt 
2113b6366f04SSteven Rostedt 	try_to_push_tasks(rt_rq);
2114b6366f04SSteven Rostedt }
2115b6366f04SSteven Rostedt #endif /* HAVE_RT_PUSH_IPI */
2116b6366f04SSteven Rostedt 
21178046d680SPeter Zijlstra static void pull_rt_task(struct rq *this_rq)
2118391e43daSPeter Zijlstra {
21198046d680SPeter Zijlstra 	int this_cpu = this_rq->cpu, cpu;
21208046d680SPeter Zijlstra 	bool resched = false;
2121391e43daSPeter Zijlstra 	struct task_struct *p;
2122391e43daSPeter Zijlstra 	struct rq *src_rq;
2123391e43daSPeter Zijlstra 
2124391e43daSPeter Zijlstra 	if (likely(!rt_overloaded(this_rq)))
21258046d680SPeter Zijlstra 		return;
2126391e43daSPeter Zijlstra 
21277c3f2ab7SPeter Zijlstra 	/*
21287c3f2ab7SPeter Zijlstra 	 * Match the barrier from rt_set_overloaded; this guarantees that if we
21297c3f2ab7SPeter Zijlstra 	 * see overloaded we must also see the rto_mask bit.
21307c3f2ab7SPeter Zijlstra 	 */
21317c3f2ab7SPeter Zijlstra 	smp_rmb();
21327c3f2ab7SPeter Zijlstra 
2133b6366f04SSteven Rostedt #ifdef HAVE_RT_PUSH_IPI
2134b6366f04SSteven Rostedt 	if (sched_feat(RT_PUSH_IPI)) {
2135b6366f04SSteven Rostedt 		tell_cpu_to_push(this_rq);
21368046d680SPeter Zijlstra 		return;
2137b6366f04SSteven Rostedt 	}
2138b6366f04SSteven Rostedt #endif
2139b6366f04SSteven Rostedt 
2140391e43daSPeter Zijlstra 	for_each_cpu(cpu, this_rq->rd->rto_mask) {
2141391e43daSPeter Zijlstra 		if (this_cpu == cpu)
2142391e43daSPeter Zijlstra 			continue;
2143391e43daSPeter Zijlstra 
2144391e43daSPeter Zijlstra 		src_rq = cpu_rq(cpu);
2145391e43daSPeter Zijlstra 
2146391e43daSPeter Zijlstra 		/*
2147391e43daSPeter Zijlstra 		 * Don't bother taking the src_rq->lock if the next highest
2148391e43daSPeter Zijlstra 		 * task is known to be lower-priority than our current task.
2149391e43daSPeter Zijlstra 		 * This may look racy, but if this value is about to go
2150391e43daSPeter Zijlstra 		 * logically higher, the src_rq will push this task away.
2151391e43daSPeter Zijlstra 		 * And if its going logically lower, we do not care
2152391e43daSPeter Zijlstra 		 */
2153391e43daSPeter Zijlstra 		if (src_rq->rt.highest_prio.next >=
2154391e43daSPeter Zijlstra 		    this_rq->rt.highest_prio.curr)
2155391e43daSPeter Zijlstra 			continue;
2156391e43daSPeter Zijlstra 
2157391e43daSPeter Zijlstra 		/*
2158391e43daSPeter Zijlstra 		 * We can potentially drop this_rq's lock in
2159391e43daSPeter Zijlstra 		 * double_lock_balance, and another CPU could
2160391e43daSPeter Zijlstra 		 * alter this_rq
2161391e43daSPeter Zijlstra 		 */
2162391e43daSPeter Zijlstra 		double_lock_balance(this_rq, src_rq);
2163391e43daSPeter Zijlstra 
2164391e43daSPeter Zijlstra 		/*
2165e23ee747SKirill Tkhai 		 * We can pull only a task, which is pushable
2166e23ee747SKirill Tkhai 		 * on its rq, and no others.
2167391e43daSPeter Zijlstra 		 */
2168e23ee747SKirill Tkhai 		p = pick_highest_pushable_task(src_rq, this_cpu);
2169391e43daSPeter Zijlstra 
2170391e43daSPeter Zijlstra 		/*
2171391e43daSPeter Zijlstra 		 * Do we have an RT task that preempts
2172391e43daSPeter Zijlstra 		 * the to-be-scheduled task?
2173391e43daSPeter Zijlstra 		 */
2174391e43daSPeter Zijlstra 		if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
2175391e43daSPeter Zijlstra 			WARN_ON(p == src_rq->curr);
2176da0c1e65SKirill Tkhai 			WARN_ON(!task_on_rq_queued(p));
2177391e43daSPeter Zijlstra 
2178391e43daSPeter Zijlstra 			/*
2179391e43daSPeter Zijlstra 			 * There's a chance that p is higher in priority
2180391e43daSPeter Zijlstra 			 * than what's currently running on its cpu.
2181391e43daSPeter Zijlstra 			 * This is just that p is wakeing up and hasn't
2182391e43daSPeter Zijlstra 			 * had a chance to schedule. We only pull
2183391e43daSPeter Zijlstra 			 * p if it is lower in priority than the
2184391e43daSPeter Zijlstra 			 * current task on the run queue
2185391e43daSPeter Zijlstra 			 */
2186391e43daSPeter Zijlstra 			if (p->prio < src_rq->curr->prio)
2187391e43daSPeter Zijlstra 				goto skip;
2188391e43daSPeter Zijlstra 
21898046d680SPeter Zijlstra 			resched = true;
2190391e43daSPeter Zijlstra 
2191391e43daSPeter Zijlstra 			deactivate_task(src_rq, p, 0);
2192391e43daSPeter Zijlstra 			set_task_cpu(p, this_cpu);
2193391e43daSPeter Zijlstra 			activate_task(this_rq, p, 0);
2194391e43daSPeter Zijlstra 			/*
2195391e43daSPeter Zijlstra 			 * We continue with the search, just in
2196391e43daSPeter Zijlstra 			 * case there's an even higher prio task
2197391e43daSPeter Zijlstra 			 * in another runqueue. (low likelihood
2198391e43daSPeter Zijlstra 			 * but possible)
2199391e43daSPeter Zijlstra 			 */
2200391e43daSPeter Zijlstra 		}
2201391e43daSPeter Zijlstra skip:
2202391e43daSPeter Zijlstra 		double_unlock_balance(this_rq, src_rq);
2203391e43daSPeter Zijlstra 	}
2204391e43daSPeter Zijlstra 
22058046d680SPeter Zijlstra 	if (resched)
22068046d680SPeter Zijlstra 		resched_curr(this_rq);
2207391e43daSPeter Zijlstra }
2208391e43daSPeter Zijlstra 
2209391e43daSPeter Zijlstra /*
2210391e43daSPeter Zijlstra  * If we are not running and we are not going to reschedule soon, we should
2211391e43daSPeter Zijlstra  * try to push tasks away now
2212391e43daSPeter Zijlstra  */
2213391e43daSPeter Zijlstra static void task_woken_rt(struct rq *rq, struct task_struct *p)
2214391e43daSPeter Zijlstra {
2215391e43daSPeter Zijlstra 	if (!task_running(rq, p) &&
2216391e43daSPeter Zijlstra 	    !test_tsk_need_resched(rq->curr) &&
22174b53a341SIngo Molnar 	    p->nr_cpus_allowed > 1 &&
22181baca4ceSJuri Lelli 	    (dl_task(rq->curr) || rt_task(rq->curr)) &&
22194b53a341SIngo Molnar 	    (rq->curr->nr_cpus_allowed < 2 ||
2220391e43daSPeter Zijlstra 	     rq->curr->prio <= p->prio))
2221391e43daSPeter Zijlstra 		push_rt_tasks(rq);
2222391e43daSPeter Zijlstra }
2223391e43daSPeter Zijlstra 
2224391e43daSPeter Zijlstra /* Assumes rq->lock is held */
2225391e43daSPeter Zijlstra static void rq_online_rt(struct rq *rq)
2226391e43daSPeter Zijlstra {
2227391e43daSPeter Zijlstra 	if (rq->rt.overloaded)
2228391e43daSPeter Zijlstra 		rt_set_overload(rq);
2229391e43daSPeter Zijlstra 
2230391e43daSPeter Zijlstra 	__enable_runtime(rq);
2231391e43daSPeter Zijlstra 
2232391e43daSPeter Zijlstra 	cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
2233391e43daSPeter Zijlstra }
2234391e43daSPeter Zijlstra 
2235391e43daSPeter Zijlstra /* Assumes rq->lock is held */
2236391e43daSPeter Zijlstra static void rq_offline_rt(struct rq *rq)
2237391e43daSPeter Zijlstra {
2238391e43daSPeter Zijlstra 	if (rq->rt.overloaded)
2239391e43daSPeter Zijlstra 		rt_clear_overload(rq);
2240391e43daSPeter Zijlstra 
2241391e43daSPeter Zijlstra 	__disable_runtime(rq);
2242391e43daSPeter Zijlstra 
2243391e43daSPeter Zijlstra 	cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
2244391e43daSPeter Zijlstra }
2245391e43daSPeter Zijlstra 
2246391e43daSPeter Zijlstra /*
2247391e43daSPeter Zijlstra  * When switch from the rt queue, we bring ourselves to a position
2248391e43daSPeter Zijlstra  * that we might want to pull RT tasks from other runqueues.
2249391e43daSPeter Zijlstra  */
2250391e43daSPeter Zijlstra static void switched_from_rt(struct rq *rq, struct task_struct *p)
2251391e43daSPeter Zijlstra {
2252391e43daSPeter Zijlstra 	/*
2253391e43daSPeter Zijlstra 	 * If there are other RT tasks then we will reschedule
2254391e43daSPeter Zijlstra 	 * and the scheduling of the other RT tasks will handle
2255391e43daSPeter Zijlstra 	 * the balancing. But if we are the last RT task
2256391e43daSPeter Zijlstra 	 * we may need to handle the pulling of RT tasks
2257391e43daSPeter Zijlstra 	 * now.
2258391e43daSPeter Zijlstra 	 */
2259da0c1e65SKirill Tkhai 	if (!task_on_rq_queued(p) || rq->rt.rt_nr_running)
22601158ddb5SKirill Tkhai 		return;
22611158ddb5SKirill Tkhai 
2262fd7a4bedSPeter Zijlstra 	queue_pull_task(rq);
2263391e43daSPeter Zijlstra }
2264391e43daSPeter Zijlstra 
226511c785b7SLi Zefan void __init init_sched_rt_class(void)
2266391e43daSPeter Zijlstra {
2267391e43daSPeter Zijlstra 	unsigned int i;
2268391e43daSPeter Zijlstra 
2269391e43daSPeter Zijlstra 	for_each_possible_cpu(i) {
2270391e43daSPeter Zijlstra 		zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
2271391e43daSPeter Zijlstra 					GFP_KERNEL, cpu_to_node(i));
2272391e43daSPeter Zijlstra 	}
2273391e43daSPeter Zijlstra }
2274391e43daSPeter Zijlstra #endif /* CONFIG_SMP */
2275391e43daSPeter Zijlstra 
2276391e43daSPeter Zijlstra /*
2277391e43daSPeter Zijlstra  * When switching a task to RT, we may overload the runqueue
2278391e43daSPeter Zijlstra  * with RT tasks. In this case we try to push them off to
2279391e43daSPeter Zijlstra  * other runqueues.
2280391e43daSPeter Zijlstra  */
2281391e43daSPeter Zijlstra static void switched_to_rt(struct rq *rq, struct task_struct *p)
2282391e43daSPeter Zijlstra {
2283391e43daSPeter Zijlstra 	/*
2284391e43daSPeter Zijlstra 	 * If we are already running, then there's nothing
2285391e43daSPeter Zijlstra 	 * that needs to be done. But if we are not running
2286391e43daSPeter Zijlstra 	 * we may need to preempt the current running task.
2287391e43daSPeter Zijlstra 	 * If that current running task is also an RT task
2288391e43daSPeter Zijlstra 	 * then see if we can move to another run queue.
2289391e43daSPeter Zijlstra 	 */
2290da0c1e65SKirill Tkhai 	if (task_on_rq_queued(p) && rq->curr != p) {
2291391e43daSPeter Zijlstra #ifdef CONFIG_SMP
22924b53a341SIngo Molnar 		if (p->nr_cpus_allowed > 1 && rq->rt.overloaded)
2293fd7a4bedSPeter Zijlstra 			queue_push_tasks(rq);
2294619bd4a7SSebastian Andrzej Siewior #endif /* CONFIG_SMP */
2295fd7a4bedSPeter Zijlstra 		if (p->prio < rq->curr->prio)
22968875125eSKirill Tkhai 			resched_curr(rq);
2297391e43daSPeter Zijlstra 	}
2298391e43daSPeter Zijlstra }
2299391e43daSPeter Zijlstra 
2300391e43daSPeter Zijlstra /*
2301391e43daSPeter Zijlstra  * Priority of the task has changed. This may cause
2302391e43daSPeter Zijlstra  * us to initiate a push or pull.
2303391e43daSPeter Zijlstra  */
2304391e43daSPeter Zijlstra static void
2305391e43daSPeter Zijlstra prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
2306391e43daSPeter Zijlstra {
2307da0c1e65SKirill Tkhai 	if (!task_on_rq_queued(p))
2308391e43daSPeter Zijlstra 		return;
2309391e43daSPeter Zijlstra 
2310391e43daSPeter Zijlstra 	if (rq->curr == p) {
2311391e43daSPeter Zijlstra #ifdef CONFIG_SMP
2312391e43daSPeter Zijlstra 		/*
2313391e43daSPeter Zijlstra 		 * If our priority decreases while running, we
2314391e43daSPeter Zijlstra 		 * may need to pull tasks to this runqueue.
2315391e43daSPeter Zijlstra 		 */
2316391e43daSPeter Zijlstra 		if (oldprio < p->prio)
2317fd7a4bedSPeter Zijlstra 			queue_pull_task(rq);
2318fd7a4bedSPeter Zijlstra 
2319391e43daSPeter Zijlstra 		/*
2320391e43daSPeter Zijlstra 		 * If there's a higher priority task waiting to run
2321fd7a4bedSPeter Zijlstra 		 * then reschedule.
2322391e43daSPeter Zijlstra 		 */
2323fd7a4bedSPeter Zijlstra 		if (p->prio > rq->rt.highest_prio.curr)
23248875125eSKirill Tkhai 			resched_curr(rq);
2325391e43daSPeter Zijlstra #else
2326391e43daSPeter Zijlstra 		/* For UP simply resched on drop of prio */
2327391e43daSPeter Zijlstra 		if (oldprio < p->prio)
23288875125eSKirill Tkhai 			resched_curr(rq);
2329391e43daSPeter Zijlstra #endif /* CONFIG_SMP */
2330391e43daSPeter Zijlstra 	} else {
2331391e43daSPeter Zijlstra 		/*
2332391e43daSPeter Zijlstra 		 * This task is not running, but if it is
2333391e43daSPeter Zijlstra 		 * greater than the current running task
2334391e43daSPeter Zijlstra 		 * then reschedule.
2335391e43daSPeter Zijlstra 		 */
2336391e43daSPeter Zijlstra 		if (p->prio < rq->curr->prio)
23378875125eSKirill Tkhai 			resched_curr(rq);
2338391e43daSPeter Zijlstra 	}
2339391e43daSPeter Zijlstra }
2340391e43daSPeter Zijlstra 
2341b18b6a9cSNicolas Pitre #ifdef CONFIG_POSIX_TIMERS
2342391e43daSPeter Zijlstra static void watchdog(struct rq *rq, struct task_struct *p)
2343391e43daSPeter Zijlstra {
2344391e43daSPeter Zijlstra 	unsigned long soft, hard;
2345391e43daSPeter Zijlstra 
2346391e43daSPeter Zijlstra 	/* max may change after cur was read, this will be fixed next tick */
2347391e43daSPeter Zijlstra 	soft = task_rlimit(p, RLIMIT_RTTIME);
2348391e43daSPeter Zijlstra 	hard = task_rlimit_max(p, RLIMIT_RTTIME);
2349391e43daSPeter Zijlstra 
2350391e43daSPeter Zijlstra 	if (soft != RLIM_INFINITY) {
2351391e43daSPeter Zijlstra 		unsigned long next;
2352391e43daSPeter Zijlstra 
235357d2aa00SYing Xue 		if (p->rt.watchdog_stamp != jiffies) {
2354391e43daSPeter Zijlstra 			p->rt.timeout++;
235557d2aa00SYing Xue 			p->rt.watchdog_stamp = jiffies;
235657d2aa00SYing Xue 		}
235757d2aa00SYing Xue 
2358391e43daSPeter Zijlstra 		next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
2359391e43daSPeter Zijlstra 		if (p->rt.timeout > next)
2360391e43daSPeter Zijlstra 			p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
2361391e43daSPeter Zijlstra 	}
2362391e43daSPeter Zijlstra }
2363b18b6a9cSNicolas Pitre #else
2364b18b6a9cSNicolas Pitre static inline void watchdog(struct rq *rq, struct task_struct *p) { }
2365b18b6a9cSNicolas Pitre #endif
2366391e43daSPeter Zijlstra 
2367391e43daSPeter Zijlstra static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
2368391e43daSPeter Zijlstra {
2369454c7999SColin Cross 	struct sched_rt_entity *rt_se = &p->rt;
2370454c7999SColin Cross 
2371391e43daSPeter Zijlstra 	update_curr_rt(rq);
2372391e43daSPeter Zijlstra 
2373391e43daSPeter Zijlstra 	watchdog(rq, p);
2374391e43daSPeter Zijlstra 
2375391e43daSPeter Zijlstra 	/*
2376391e43daSPeter Zijlstra 	 * RR tasks need a special form of timeslice management.
2377391e43daSPeter Zijlstra 	 * FIFO tasks have no timeslices.
2378391e43daSPeter Zijlstra 	 */
2379391e43daSPeter Zijlstra 	if (p->policy != SCHED_RR)
2380391e43daSPeter Zijlstra 		return;
2381391e43daSPeter Zijlstra 
2382391e43daSPeter Zijlstra 	if (--p->rt.time_slice)
2383391e43daSPeter Zijlstra 		return;
2384391e43daSPeter Zijlstra 
2385ce0dbbbbSClark Williams 	p->rt.time_slice = sched_rr_timeslice;
2386391e43daSPeter Zijlstra 
2387391e43daSPeter Zijlstra 	/*
2388e9aa39bbSLi Bin 	 * Requeue to the end of queue if we (and all of our ancestors) are not
2389e9aa39bbSLi Bin 	 * the only element on the queue
2390391e43daSPeter Zijlstra 	 */
2391454c7999SColin Cross 	for_each_sched_rt_entity(rt_se) {
2392454c7999SColin Cross 		if (rt_se->run_list.prev != rt_se->run_list.next) {
2393391e43daSPeter Zijlstra 			requeue_task_rt(rq, p, 0);
23948aa6f0ebSKirill Tkhai 			resched_curr(rq);
2395454c7999SColin Cross 			return;
2396454c7999SColin Cross 		}
2397391e43daSPeter Zijlstra 	}
2398391e43daSPeter Zijlstra }
2399391e43daSPeter Zijlstra 
2400391e43daSPeter Zijlstra static void set_curr_task_rt(struct rq *rq)
2401391e43daSPeter Zijlstra {
2402391e43daSPeter Zijlstra 	struct task_struct *p = rq->curr;
2403391e43daSPeter Zijlstra 
240478becc27SFrederic Weisbecker 	p->se.exec_start = rq_clock_task(rq);
2405391e43daSPeter Zijlstra 
2406391e43daSPeter Zijlstra 	/* The running task is never eligible for pushing */
2407391e43daSPeter Zijlstra 	dequeue_pushable_task(rq, p);
2408391e43daSPeter Zijlstra }
2409391e43daSPeter Zijlstra 
2410391e43daSPeter Zijlstra static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
2411391e43daSPeter Zijlstra {
2412391e43daSPeter Zijlstra 	/*
2413391e43daSPeter Zijlstra 	 * Time slice is 0 for SCHED_FIFO tasks
2414391e43daSPeter Zijlstra 	 */
2415391e43daSPeter Zijlstra 	if (task->policy == SCHED_RR)
2416ce0dbbbbSClark Williams 		return sched_rr_timeslice;
2417391e43daSPeter Zijlstra 	else
2418391e43daSPeter Zijlstra 		return 0;
2419391e43daSPeter Zijlstra }
2420391e43daSPeter Zijlstra 
2421391e43daSPeter Zijlstra const struct sched_class rt_sched_class = {
2422391e43daSPeter Zijlstra 	.next			= &fair_sched_class,
2423391e43daSPeter Zijlstra 	.enqueue_task		= enqueue_task_rt,
2424391e43daSPeter Zijlstra 	.dequeue_task		= dequeue_task_rt,
2425391e43daSPeter Zijlstra 	.yield_task		= yield_task_rt,
2426391e43daSPeter Zijlstra 
2427391e43daSPeter Zijlstra 	.check_preempt_curr	= check_preempt_curr_rt,
2428391e43daSPeter Zijlstra 
2429391e43daSPeter Zijlstra 	.pick_next_task		= pick_next_task_rt,
2430391e43daSPeter Zijlstra 	.put_prev_task		= put_prev_task_rt,
2431391e43daSPeter Zijlstra 
2432391e43daSPeter Zijlstra #ifdef CONFIG_SMP
2433391e43daSPeter Zijlstra 	.select_task_rq		= select_task_rq_rt,
2434391e43daSPeter Zijlstra 
24356c37067eSPeter Zijlstra 	.set_cpus_allowed       = set_cpus_allowed_common,
2436391e43daSPeter Zijlstra 	.rq_online              = rq_online_rt,
2437391e43daSPeter Zijlstra 	.rq_offline             = rq_offline_rt,
2438391e43daSPeter Zijlstra 	.task_woken		= task_woken_rt,
2439391e43daSPeter Zijlstra 	.switched_from		= switched_from_rt,
2440391e43daSPeter Zijlstra #endif
2441391e43daSPeter Zijlstra 
2442391e43daSPeter Zijlstra 	.set_curr_task          = set_curr_task_rt,
2443391e43daSPeter Zijlstra 	.task_tick		= task_tick_rt,
2444391e43daSPeter Zijlstra 
2445391e43daSPeter Zijlstra 	.get_rr_interval	= get_rr_interval_rt,
2446391e43daSPeter Zijlstra 
2447391e43daSPeter Zijlstra 	.prio_changed		= prio_changed_rt,
2448391e43daSPeter Zijlstra 	.switched_to		= switched_to_rt,
24496e998916SStanislaw Gruszka 
24506e998916SStanislaw Gruszka 	.update_curr		= update_curr_rt,
2451391e43daSPeter Zijlstra };
2452391e43daSPeter Zijlstra 
24538887cd99SNicolas Pitre #ifdef CONFIG_RT_GROUP_SCHED
24548887cd99SNicolas Pitre /*
24558887cd99SNicolas Pitre  * Ensure that the real time constraints are schedulable.
24568887cd99SNicolas Pitre  */
24578887cd99SNicolas Pitre static DEFINE_MUTEX(rt_constraints_mutex);
24588887cd99SNicolas Pitre 
24598887cd99SNicolas Pitre /* Must be called with tasklist_lock held */
24608887cd99SNicolas Pitre static inline int tg_has_rt_tasks(struct task_group *tg)
24618887cd99SNicolas Pitre {
24628887cd99SNicolas Pitre 	struct task_struct *g, *p;
24638887cd99SNicolas Pitre 
24648887cd99SNicolas Pitre 	/*
24658887cd99SNicolas Pitre 	 * Autogroups do not have RT tasks; see autogroup_create().
24668887cd99SNicolas Pitre 	 */
24678887cd99SNicolas Pitre 	if (task_group_is_autogroup(tg))
24688887cd99SNicolas Pitre 		return 0;
24698887cd99SNicolas Pitre 
24708887cd99SNicolas Pitre 	for_each_process_thread(g, p) {
24718887cd99SNicolas Pitre 		if (rt_task(p) && task_group(p) == tg)
24728887cd99SNicolas Pitre 			return 1;
24738887cd99SNicolas Pitre 	}
24748887cd99SNicolas Pitre 
24758887cd99SNicolas Pitre 	return 0;
24768887cd99SNicolas Pitre }
24778887cd99SNicolas Pitre 
24788887cd99SNicolas Pitre struct rt_schedulable_data {
24798887cd99SNicolas Pitre 	struct task_group *tg;
24808887cd99SNicolas Pitre 	u64 rt_period;
24818887cd99SNicolas Pitre 	u64 rt_runtime;
24828887cd99SNicolas Pitre };
24838887cd99SNicolas Pitre 
24848887cd99SNicolas Pitre static int tg_rt_schedulable(struct task_group *tg, void *data)
24858887cd99SNicolas Pitre {
24868887cd99SNicolas Pitre 	struct rt_schedulable_data *d = data;
24878887cd99SNicolas Pitre 	struct task_group *child;
24888887cd99SNicolas Pitre 	unsigned long total, sum = 0;
24898887cd99SNicolas Pitre 	u64 period, runtime;
24908887cd99SNicolas Pitre 
24918887cd99SNicolas Pitre 	period = ktime_to_ns(tg->rt_bandwidth.rt_period);
24928887cd99SNicolas Pitre 	runtime = tg->rt_bandwidth.rt_runtime;
24938887cd99SNicolas Pitre 
24948887cd99SNicolas Pitre 	if (tg == d->tg) {
24958887cd99SNicolas Pitre 		period = d->rt_period;
24968887cd99SNicolas Pitre 		runtime = d->rt_runtime;
24978887cd99SNicolas Pitre 	}
24988887cd99SNicolas Pitre 
24998887cd99SNicolas Pitre 	/*
25008887cd99SNicolas Pitre 	 * Cannot have more runtime than the period.
25018887cd99SNicolas Pitre 	 */
25028887cd99SNicolas Pitre 	if (runtime > period && runtime != RUNTIME_INF)
25038887cd99SNicolas Pitre 		return -EINVAL;
25048887cd99SNicolas Pitre 
25058887cd99SNicolas Pitre 	/*
25068887cd99SNicolas Pitre 	 * Ensure we don't starve existing RT tasks.
25078887cd99SNicolas Pitre 	 */
25088887cd99SNicolas Pitre 	if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg))
25098887cd99SNicolas Pitre 		return -EBUSY;
25108887cd99SNicolas Pitre 
25118887cd99SNicolas Pitre 	total = to_ratio(period, runtime);
25128887cd99SNicolas Pitre 
25138887cd99SNicolas Pitre 	/*
25148887cd99SNicolas Pitre 	 * Nobody can have more than the global setting allows.
25158887cd99SNicolas Pitre 	 */
25168887cd99SNicolas Pitre 	if (total > to_ratio(global_rt_period(), global_rt_runtime()))
25178887cd99SNicolas Pitre 		return -EINVAL;
25188887cd99SNicolas Pitre 
25198887cd99SNicolas Pitre 	/*
25208887cd99SNicolas Pitre 	 * The sum of our children's runtime should not exceed our own.
25218887cd99SNicolas Pitre 	 */
25228887cd99SNicolas Pitre 	list_for_each_entry_rcu(child, &tg->children, siblings) {
25238887cd99SNicolas Pitre 		period = ktime_to_ns(child->rt_bandwidth.rt_period);
25248887cd99SNicolas Pitre 		runtime = child->rt_bandwidth.rt_runtime;
25258887cd99SNicolas Pitre 
25268887cd99SNicolas Pitre 		if (child == d->tg) {
25278887cd99SNicolas Pitre 			period = d->rt_period;
25288887cd99SNicolas Pitre 			runtime = d->rt_runtime;
25298887cd99SNicolas Pitre 		}
25308887cd99SNicolas Pitre 
25318887cd99SNicolas Pitre 		sum += to_ratio(period, runtime);
25328887cd99SNicolas Pitre 	}
25338887cd99SNicolas Pitre 
25348887cd99SNicolas Pitre 	if (sum > total)
25358887cd99SNicolas Pitre 		return -EINVAL;
25368887cd99SNicolas Pitre 
25378887cd99SNicolas Pitre 	return 0;
25388887cd99SNicolas Pitre }
25398887cd99SNicolas Pitre 
25408887cd99SNicolas Pitre static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
25418887cd99SNicolas Pitre {
25428887cd99SNicolas Pitre 	int ret;
25438887cd99SNicolas Pitre 
25448887cd99SNicolas Pitre 	struct rt_schedulable_data data = {
25458887cd99SNicolas Pitre 		.tg = tg,
25468887cd99SNicolas Pitre 		.rt_period = period,
25478887cd99SNicolas Pitre 		.rt_runtime = runtime,
25488887cd99SNicolas Pitre 	};
25498887cd99SNicolas Pitre 
25508887cd99SNicolas Pitre 	rcu_read_lock();
25518887cd99SNicolas Pitre 	ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data);
25528887cd99SNicolas Pitre 	rcu_read_unlock();
25538887cd99SNicolas Pitre 
25548887cd99SNicolas Pitre 	return ret;
25558887cd99SNicolas Pitre }
25568887cd99SNicolas Pitre 
25578887cd99SNicolas Pitre static int tg_set_rt_bandwidth(struct task_group *tg,
25588887cd99SNicolas Pitre 		u64 rt_period, u64 rt_runtime)
25598887cd99SNicolas Pitre {
25608887cd99SNicolas Pitre 	int i, err = 0;
25618887cd99SNicolas Pitre 
25628887cd99SNicolas Pitre 	/*
25638887cd99SNicolas Pitre 	 * Disallowing the root group RT runtime is BAD, it would disallow the
25648887cd99SNicolas Pitre 	 * kernel creating (and or operating) RT threads.
25658887cd99SNicolas Pitre 	 */
25668887cd99SNicolas Pitre 	if (tg == &root_task_group && rt_runtime == 0)
25678887cd99SNicolas Pitre 		return -EINVAL;
25688887cd99SNicolas Pitre 
25698887cd99SNicolas Pitre 	/* No period doesn't make any sense. */
25708887cd99SNicolas Pitre 	if (rt_period == 0)
25718887cd99SNicolas Pitre 		return -EINVAL;
25728887cd99SNicolas Pitre 
25738887cd99SNicolas Pitre 	mutex_lock(&rt_constraints_mutex);
25748887cd99SNicolas Pitre 	read_lock(&tasklist_lock);
25758887cd99SNicolas Pitre 	err = __rt_schedulable(tg, rt_period, rt_runtime);
25768887cd99SNicolas Pitre 	if (err)
25778887cd99SNicolas Pitre 		goto unlock;
25788887cd99SNicolas Pitre 
25798887cd99SNicolas Pitre 	raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
25808887cd99SNicolas Pitre 	tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
25818887cd99SNicolas Pitre 	tg->rt_bandwidth.rt_runtime = rt_runtime;
25828887cd99SNicolas Pitre 
25838887cd99SNicolas Pitre 	for_each_possible_cpu(i) {
25848887cd99SNicolas Pitre 		struct rt_rq *rt_rq = tg->rt_rq[i];
25858887cd99SNicolas Pitre 
25868887cd99SNicolas Pitre 		raw_spin_lock(&rt_rq->rt_runtime_lock);
25878887cd99SNicolas Pitre 		rt_rq->rt_runtime = rt_runtime;
25888887cd99SNicolas Pitre 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
25898887cd99SNicolas Pitre 	}
25908887cd99SNicolas Pitre 	raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
25918887cd99SNicolas Pitre unlock:
25928887cd99SNicolas Pitre 	read_unlock(&tasklist_lock);
25938887cd99SNicolas Pitre 	mutex_unlock(&rt_constraints_mutex);
25948887cd99SNicolas Pitre 
25958887cd99SNicolas Pitre 	return err;
25968887cd99SNicolas Pitre }
25978887cd99SNicolas Pitre 
25988887cd99SNicolas Pitre int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
25998887cd99SNicolas Pitre {
26008887cd99SNicolas Pitre 	u64 rt_runtime, rt_period;
26018887cd99SNicolas Pitre 
26028887cd99SNicolas Pitre 	rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
26038887cd99SNicolas Pitre 	rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
26048887cd99SNicolas Pitre 	if (rt_runtime_us < 0)
26058887cd99SNicolas Pitre 		rt_runtime = RUNTIME_INF;
26068887cd99SNicolas Pitre 
26078887cd99SNicolas Pitre 	return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
26088887cd99SNicolas Pitre }
26098887cd99SNicolas Pitre 
26108887cd99SNicolas Pitre long sched_group_rt_runtime(struct task_group *tg)
26118887cd99SNicolas Pitre {
26128887cd99SNicolas Pitre 	u64 rt_runtime_us;
26138887cd99SNicolas Pitre 
26148887cd99SNicolas Pitre 	if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
26158887cd99SNicolas Pitre 		return -1;
26168887cd99SNicolas Pitre 
26178887cd99SNicolas Pitre 	rt_runtime_us = tg->rt_bandwidth.rt_runtime;
26188887cd99SNicolas Pitre 	do_div(rt_runtime_us, NSEC_PER_USEC);
26198887cd99SNicolas Pitre 	return rt_runtime_us;
26208887cd99SNicolas Pitre }
26218887cd99SNicolas Pitre 
26228887cd99SNicolas Pitre int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us)
26238887cd99SNicolas Pitre {
26248887cd99SNicolas Pitre 	u64 rt_runtime, rt_period;
26258887cd99SNicolas Pitre 
26268887cd99SNicolas Pitre 	rt_period = rt_period_us * NSEC_PER_USEC;
26278887cd99SNicolas Pitre 	rt_runtime = tg->rt_bandwidth.rt_runtime;
26288887cd99SNicolas Pitre 
26298887cd99SNicolas Pitre 	return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
26308887cd99SNicolas Pitre }
26318887cd99SNicolas Pitre 
26328887cd99SNicolas Pitre long sched_group_rt_period(struct task_group *tg)
26338887cd99SNicolas Pitre {
26348887cd99SNicolas Pitre 	u64 rt_period_us;
26358887cd99SNicolas Pitre 
26368887cd99SNicolas Pitre 	rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
26378887cd99SNicolas Pitre 	do_div(rt_period_us, NSEC_PER_USEC);
26388887cd99SNicolas Pitre 	return rt_period_us;
26398887cd99SNicolas Pitre }
26408887cd99SNicolas Pitre 
26418887cd99SNicolas Pitre static int sched_rt_global_constraints(void)
26428887cd99SNicolas Pitre {
26438887cd99SNicolas Pitre 	int ret = 0;
26448887cd99SNicolas Pitre 
26458887cd99SNicolas Pitre 	mutex_lock(&rt_constraints_mutex);
26468887cd99SNicolas Pitre 	read_lock(&tasklist_lock);
26478887cd99SNicolas Pitre 	ret = __rt_schedulable(NULL, 0, 0);
26488887cd99SNicolas Pitre 	read_unlock(&tasklist_lock);
26498887cd99SNicolas Pitre 	mutex_unlock(&rt_constraints_mutex);
26508887cd99SNicolas Pitre 
26518887cd99SNicolas Pitre 	return ret;
26528887cd99SNicolas Pitre }
26538887cd99SNicolas Pitre 
26548887cd99SNicolas Pitre int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
26558887cd99SNicolas Pitre {
26568887cd99SNicolas Pitre 	/* Don't accept realtime tasks when there is no way for them to run */
26578887cd99SNicolas Pitre 	if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
26588887cd99SNicolas Pitre 		return 0;
26598887cd99SNicolas Pitre 
26608887cd99SNicolas Pitre 	return 1;
26618887cd99SNicolas Pitre }
26628887cd99SNicolas Pitre 
26638887cd99SNicolas Pitre #else /* !CONFIG_RT_GROUP_SCHED */
26648887cd99SNicolas Pitre static int sched_rt_global_constraints(void)
26658887cd99SNicolas Pitre {
26668887cd99SNicolas Pitre 	unsigned long flags;
26678887cd99SNicolas Pitre 	int i;
26688887cd99SNicolas Pitre 
26698887cd99SNicolas Pitre 	raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
26708887cd99SNicolas Pitre 	for_each_possible_cpu(i) {
26718887cd99SNicolas Pitre 		struct rt_rq *rt_rq = &cpu_rq(i)->rt;
26728887cd99SNicolas Pitre 
26738887cd99SNicolas Pitre 		raw_spin_lock(&rt_rq->rt_runtime_lock);
26748887cd99SNicolas Pitre 		rt_rq->rt_runtime = global_rt_runtime();
26758887cd99SNicolas Pitre 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
26768887cd99SNicolas Pitre 	}
26778887cd99SNicolas Pitre 	raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
26788887cd99SNicolas Pitre 
26798887cd99SNicolas Pitre 	return 0;
26808887cd99SNicolas Pitre }
26818887cd99SNicolas Pitre #endif /* CONFIG_RT_GROUP_SCHED */
26828887cd99SNicolas Pitre 
26838887cd99SNicolas Pitre static int sched_rt_global_validate(void)
26848887cd99SNicolas Pitre {
26858887cd99SNicolas Pitre 	if (sysctl_sched_rt_period <= 0)
26868887cd99SNicolas Pitre 		return -EINVAL;
26878887cd99SNicolas Pitre 
26888887cd99SNicolas Pitre 	if ((sysctl_sched_rt_runtime != RUNTIME_INF) &&
26898887cd99SNicolas Pitre 		(sysctl_sched_rt_runtime > sysctl_sched_rt_period))
26908887cd99SNicolas Pitre 		return -EINVAL;
26918887cd99SNicolas Pitre 
26928887cd99SNicolas Pitre 	return 0;
26938887cd99SNicolas Pitre }
26948887cd99SNicolas Pitre 
26958887cd99SNicolas Pitre static void sched_rt_do_global(void)
26968887cd99SNicolas Pitre {
26978887cd99SNicolas Pitre 	def_rt_bandwidth.rt_runtime = global_rt_runtime();
26988887cd99SNicolas Pitre 	def_rt_bandwidth.rt_period = ns_to_ktime(global_rt_period());
26998887cd99SNicolas Pitre }
27008887cd99SNicolas Pitre 
27018887cd99SNicolas Pitre int sched_rt_handler(struct ctl_table *table, int write,
27028887cd99SNicolas Pitre 		void __user *buffer, size_t *lenp,
27038887cd99SNicolas Pitre 		loff_t *ppos)
27048887cd99SNicolas Pitre {
27058887cd99SNicolas Pitre 	int old_period, old_runtime;
27068887cd99SNicolas Pitre 	static DEFINE_MUTEX(mutex);
27078887cd99SNicolas Pitre 	int ret;
27088887cd99SNicolas Pitre 
27098887cd99SNicolas Pitre 	mutex_lock(&mutex);
27108887cd99SNicolas Pitre 	old_period = sysctl_sched_rt_period;
27118887cd99SNicolas Pitre 	old_runtime = sysctl_sched_rt_runtime;
27128887cd99SNicolas Pitre 
27138887cd99SNicolas Pitre 	ret = proc_dointvec(table, write, buffer, lenp, ppos);
27148887cd99SNicolas Pitre 
27158887cd99SNicolas Pitre 	if (!ret && write) {
27168887cd99SNicolas Pitre 		ret = sched_rt_global_validate();
27178887cd99SNicolas Pitre 		if (ret)
27188887cd99SNicolas Pitre 			goto undo;
27198887cd99SNicolas Pitre 
27208887cd99SNicolas Pitre 		ret = sched_dl_global_validate();
27218887cd99SNicolas Pitre 		if (ret)
27228887cd99SNicolas Pitre 			goto undo;
27238887cd99SNicolas Pitre 
27248887cd99SNicolas Pitre 		ret = sched_rt_global_constraints();
27258887cd99SNicolas Pitre 		if (ret)
27268887cd99SNicolas Pitre 			goto undo;
27278887cd99SNicolas Pitre 
27288887cd99SNicolas Pitre 		sched_rt_do_global();
27298887cd99SNicolas Pitre 		sched_dl_do_global();
27308887cd99SNicolas Pitre 	}
27318887cd99SNicolas Pitre 	if (0) {
27328887cd99SNicolas Pitre undo:
27338887cd99SNicolas Pitre 		sysctl_sched_rt_period = old_period;
27348887cd99SNicolas Pitre 		sysctl_sched_rt_runtime = old_runtime;
27358887cd99SNicolas Pitre 	}
27368887cd99SNicolas Pitre 	mutex_unlock(&mutex);
27378887cd99SNicolas Pitre 
27388887cd99SNicolas Pitre 	return ret;
27398887cd99SNicolas Pitre }
27408887cd99SNicolas Pitre 
27418887cd99SNicolas Pitre int sched_rr_handler(struct ctl_table *table, int write,
27428887cd99SNicolas Pitre 		void __user *buffer, size_t *lenp,
27438887cd99SNicolas Pitre 		loff_t *ppos)
27448887cd99SNicolas Pitre {
27458887cd99SNicolas Pitre 	int ret;
27468887cd99SNicolas Pitre 	static DEFINE_MUTEX(mutex);
27478887cd99SNicolas Pitre 
27488887cd99SNicolas Pitre 	mutex_lock(&mutex);
27498887cd99SNicolas Pitre 	ret = proc_dointvec(table, write, buffer, lenp, ppos);
27508887cd99SNicolas Pitre 	/*
27518887cd99SNicolas Pitre 	 * Make sure that internally we keep jiffies.
27528887cd99SNicolas Pitre 	 * Also, writing zero resets the timeslice to default:
27538887cd99SNicolas Pitre 	 */
27548887cd99SNicolas Pitre 	if (!ret && write) {
27558887cd99SNicolas Pitre 		sched_rr_timeslice =
27568887cd99SNicolas Pitre 			sysctl_sched_rr_timeslice <= 0 ? RR_TIMESLICE :
27578887cd99SNicolas Pitre 			msecs_to_jiffies(sysctl_sched_rr_timeslice);
27588887cd99SNicolas Pitre 	}
27598887cd99SNicolas Pitre 	mutex_unlock(&mutex);
27608887cd99SNicolas Pitre 	return ret;
27618887cd99SNicolas Pitre }
27628887cd99SNicolas Pitre 
2763391e43daSPeter Zijlstra #ifdef CONFIG_SCHED_DEBUG
2764391e43daSPeter Zijlstra extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
2765391e43daSPeter Zijlstra 
2766391e43daSPeter Zijlstra void print_rt_stats(struct seq_file *m, int cpu)
2767391e43daSPeter Zijlstra {
2768391e43daSPeter Zijlstra 	rt_rq_iter_t iter;
2769391e43daSPeter Zijlstra 	struct rt_rq *rt_rq;
2770391e43daSPeter Zijlstra 
2771391e43daSPeter Zijlstra 	rcu_read_lock();
2772391e43daSPeter Zijlstra 	for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
2773391e43daSPeter Zijlstra 		print_rt_rq(m, cpu, rt_rq);
2774391e43daSPeter Zijlstra 	rcu_read_unlock();
2775391e43daSPeter Zijlstra }
2776391e43daSPeter Zijlstra #endif /* CONFIG_SCHED_DEBUG */
2777