xref: /openbmc/linux/kernel/sched/rt.c (revision 3b23dc52)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
4  * policies)
5  */
6 #include "sched.h"
7 
8 int sched_rr_timeslice = RR_TIMESLICE;
9 int sysctl_sched_rr_timeslice = (MSEC_PER_SEC / HZ) * RR_TIMESLICE;
10 
11 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
12 
13 struct rt_bandwidth def_rt_bandwidth;
14 
15 static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
16 {
17 	struct rt_bandwidth *rt_b =
18 		container_of(timer, struct rt_bandwidth, rt_period_timer);
19 	int idle = 0;
20 	int overrun;
21 
22 	raw_spin_lock(&rt_b->rt_runtime_lock);
23 	for (;;) {
24 		overrun = hrtimer_forward_now(timer, rt_b->rt_period);
25 		if (!overrun)
26 			break;
27 
28 		raw_spin_unlock(&rt_b->rt_runtime_lock);
29 		idle = do_sched_rt_period_timer(rt_b, overrun);
30 		raw_spin_lock(&rt_b->rt_runtime_lock);
31 	}
32 	if (idle)
33 		rt_b->rt_period_active = 0;
34 	raw_spin_unlock(&rt_b->rt_runtime_lock);
35 
36 	return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
37 }
38 
39 void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
40 {
41 	rt_b->rt_period = ns_to_ktime(period);
42 	rt_b->rt_runtime = runtime;
43 
44 	raw_spin_lock_init(&rt_b->rt_runtime_lock);
45 
46 	hrtimer_init(&rt_b->rt_period_timer,
47 			CLOCK_MONOTONIC, HRTIMER_MODE_REL);
48 	rt_b->rt_period_timer.function = sched_rt_period_timer;
49 }
50 
51 static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
52 {
53 	if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
54 		return;
55 
56 	raw_spin_lock(&rt_b->rt_runtime_lock);
57 	if (!rt_b->rt_period_active) {
58 		rt_b->rt_period_active = 1;
59 		/*
60 		 * SCHED_DEADLINE updates the bandwidth, as a run away
61 		 * RT task with a DL task could hog a CPU. But DL does
62 		 * not reset the period. If a deadline task was running
63 		 * without an RT task running, it can cause RT tasks to
64 		 * throttle when they start up. Kick the timer right away
65 		 * to update the period.
66 		 */
67 		hrtimer_forward_now(&rt_b->rt_period_timer, ns_to_ktime(0));
68 		hrtimer_start_expires(&rt_b->rt_period_timer, HRTIMER_MODE_ABS_PINNED);
69 	}
70 	raw_spin_unlock(&rt_b->rt_runtime_lock);
71 }
72 
73 void init_rt_rq(struct rt_rq *rt_rq)
74 {
75 	struct rt_prio_array *array;
76 	int i;
77 
78 	array = &rt_rq->active;
79 	for (i = 0; i < MAX_RT_PRIO; i++) {
80 		INIT_LIST_HEAD(array->queue + i);
81 		__clear_bit(i, array->bitmap);
82 	}
83 	/* delimiter for bitsearch: */
84 	__set_bit(MAX_RT_PRIO, array->bitmap);
85 
86 #if defined CONFIG_SMP
87 	rt_rq->highest_prio.curr = MAX_RT_PRIO;
88 	rt_rq->highest_prio.next = MAX_RT_PRIO;
89 	rt_rq->rt_nr_migratory = 0;
90 	rt_rq->overloaded = 0;
91 	plist_head_init(&rt_rq->pushable_tasks);
92 #endif /* CONFIG_SMP */
93 	/* We start is dequeued state, because no RT tasks are queued */
94 	rt_rq->rt_queued = 0;
95 
96 	rt_rq->rt_time = 0;
97 	rt_rq->rt_throttled = 0;
98 	rt_rq->rt_runtime = 0;
99 	raw_spin_lock_init(&rt_rq->rt_runtime_lock);
100 }
101 
102 #ifdef CONFIG_RT_GROUP_SCHED
103 static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
104 {
105 	hrtimer_cancel(&rt_b->rt_period_timer);
106 }
107 
108 #define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
109 
110 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
111 {
112 #ifdef CONFIG_SCHED_DEBUG
113 	WARN_ON_ONCE(!rt_entity_is_task(rt_se));
114 #endif
115 	return container_of(rt_se, struct task_struct, rt);
116 }
117 
118 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
119 {
120 	return rt_rq->rq;
121 }
122 
123 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
124 {
125 	return rt_se->rt_rq;
126 }
127 
128 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
129 {
130 	struct rt_rq *rt_rq = rt_se->rt_rq;
131 
132 	return rt_rq->rq;
133 }
134 
135 void free_rt_sched_group(struct task_group *tg)
136 {
137 	int i;
138 
139 	if (tg->rt_se)
140 		destroy_rt_bandwidth(&tg->rt_bandwidth);
141 
142 	for_each_possible_cpu(i) {
143 		if (tg->rt_rq)
144 			kfree(tg->rt_rq[i]);
145 		if (tg->rt_se)
146 			kfree(tg->rt_se[i]);
147 	}
148 
149 	kfree(tg->rt_rq);
150 	kfree(tg->rt_se);
151 }
152 
153 void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
154 		struct sched_rt_entity *rt_se, int cpu,
155 		struct sched_rt_entity *parent)
156 {
157 	struct rq *rq = cpu_rq(cpu);
158 
159 	rt_rq->highest_prio.curr = MAX_RT_PRIO;
160 	rt_rq->rt_nr_boosted = 0;
161 	rt_rq->rq = rq;
162 	rt_rq->tg = tg;
163 
164 	tg->rt_rq[cpu] = rt_rq;
165 	tg->rt_se[cpu] = rt_se;
166 
167 	if (!rt_se)
168 		return;
169 
170 	if (!parent)
171 		rt_se->rt_rq = &rq->rt;
172 	else
173 		rt_se->rt_rq = parent->my_q;
174 
175 	rt_se->my_q = rt_rq;
176 	rt_se->parent = parent;
177 	INIT_LIST_HEAD(&rt_se->run_list);
178 }
179 
180 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
181 {
182 	struct rt_rq *rt_rq;
183 	struct sched_rt_entity *rt_se;
184 	int i;
185 
186 	tg->rt_rq = kcalloc(nr_cpu_ids, sizeof(rt_rq), GFP_KERNEL);
187 	if (!tg->rt_rq)
188 		goto err;
189 	tg->rt_se = kcalloc(nr_cpu_ids, sizeof(rt_se), GFP_KERNEL);
190 	if (!tg->rt_se)
191 		goto err;
192 
193 	init_rt_bandwidth(&tg->rt_bandwidth,
194 			ktime_to_ns(def_rt_bandwidth.rt_period), 0);
195 
196 	for_each_possible_cpu(i) {
197 		rt_rq = kzalloc_node(sizeof(struct rt_rq),
198 				     GFP_KERNEL, cpu_to_node(i));
199 		if (!rt_rq)
200 			goto err;
201 
202 		rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
203 				     GFP_KERNEL, cpu_to_node(i));
204 		if (!rt_se)
205 			goto err_free_rq;
206 
207 		init_rt_rq(rt_rq);
208 		rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
209 		init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
210 	}
211 
212 	return 1;
213 
214 err_free_rq:
215 	kfree(rt_rq);
216 err:
217 	return 0;
218 }
219 
220 #else /* CONFIG_RT_GROUP_SCHED */
221 
222 #define rt_entity_is_task(rt_se) (1)
223 
224 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
225 {
226 	return container_of(rt_se, struct task_struct, rt);
227 }
228 
229 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
230 {
231 	return container_of(rt_rq, struct rq, rt);
232 }
233 
234 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
235 {
236 	struct task_struct *p = rt_task_of(rt_se);
237 
238 	return task_rq(p);
239 }
240 
241 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
242 {
243 	struct rq *rq = rq_of_rt_se(rt_se);
244 
245 	return &rq->rt;
246 }
247 
248 void free_rt_sched_group(struct task_group *tg) { }
249 
250 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
251 {
252 	return 1;
253 }
254 #endif /* CONFIG_RT_GROUP_SCHED */
255 
256 #ifdef CONFIG_SMP
257 
258 static void pull_rt_task(struct rq *this_rq);
259 
260 static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
261 {
262 	/* Try to pull RT tasks here if we lower this rq's prio */
263 	return rq->rt.highest_prio.curr > prev->prio;
264 }
265 
266 static inline int rt_overloaded(struct rq *rq)
267 {
268 	return atomic_read(&rq->rd->rto_count);
269 }
270 
271 static inline void rt_set_overload(struct rq *rq)
272 {
273 	if (!rq->online)
274 		return;
275 
276 	cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
277 	/*
278 	 * Make sure the mask is visible before we set
279 	 * the overload count. That is checked to determine
280 	 * if we should look at the mask. It would be a shame
281 	 * if we looked at the mask, but the mask was not
282 	 * updated yet.
283 	 *
284 	 * Matched by the barrier in pull_rt_task().
285 	 */
286 	smp_wmb();
287 	atomic_inc(&rq->rd->rto_count);
288 }
289 
290 static inline void rt_clear_overload(struct rq *rq)
291 {
292 	if (!rq->online)
293 		return;
294 
295 	/* the order here really doesn't matter */
296 	atomic_dec(&rq->rd->rto_count);
297 	cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
298 }
299 
300 static void update_rt_migration(struct rt_rq *rt_rq)
301 {
302 	if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
303 		if (!rt_rq->overloaded) {
304 			rt_set_overload(rq_of_rt_rq(rt_rq));
305 			rt_rq->overloaded = 1;
306 		}
307 	} else if (rt_rq->overloaded) {
308 		rt_clear_overload(rq_of_rt_rq(rt_rq));
309 		rt_rq->overloaded = 0;
310 	}
311 }
312 
313 static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
314 {
315 	struct task_struct *p;
316 
317 	if (!rt_entity_is_task(rt_se))
318 		return;
319 
320 	p = rt_task_of(rt_se);
321 	rt_rq = &rq_of_rt_rq(rt_rq)->rt;
322 
323 	rt_rq->rt_nr_total++;
324 	if (p->nr_cpus_allowed > 1)
325 		rt_rq->rt_nr_migratory++;
326 
327 	update_rt_migration(rt_rq);
328 }
329 
330 static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
331 {
332 	struct task_struct *p;
333 
334 	if (!rt_entity_is_task(rt_se))
335 		return;
336 
337 	p = rt_task_of(rt_se);
338 	rt_rq = &rq_of_rt_rq(rt_rq)->rt;
339 
340 	rt_rq->rt_nr_total--;
341 	if (p->nr_cpus_allowed > 1)
342 		rt_rq->rt_nr_migratory--;
343 
344 	update_rt_migration(rt_rq);
345 }
346 
347 static inline int has_pushable_tasks(struct rq *rq)
348 {
349 	return !plist_head_empty(&rq->rt.pushable_tasks);
350 }
351 
352 static DEFINE_PER_CPU(struct callback_head, rt_push_head);
353 static DEFINE_PER_CPU(struct callback_head, rt_pull_head);
354 
355 static void push_rt_tasks(struct rq *);
356 static void pull_rt_task(struct rq *);
357 
358 static inline void rt_queue_push_tasks(struct rq *rq)
359 {
360 	if (!has_pushable_tasks(rq))
361 		return;
362 
363 	queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks);
364 }
365 
366 static inline void rt_queue_pull_task(struct rq *rq)
367 {
368 	queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task);
369 }
370 
371 static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
372 {
373 	plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
374 	plist_node_init(&p->pushable_tasks, p->prio);
375 	plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
376 
377 	/* Update the highest prio pushable task */
378 	if (p->prio < rq->rt.highest_prio.next)
379 		rq->rt.highest_prio.next = p->prio;
380 }
381 
382 static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
383 {
384 	plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
385 
386 	/* Update the new highest prio pushable task */
387 	if (has_pushable_tasks(rq)) {
388 		p = plist_first_entry(&rq->rt.pushable_tasks,
389 				      struct task_struct, pushable_tasks);
390 		rq->rt.highest_prio.next = p->prio;
391 	} else
392 		rq->rt.highest_prio.next = MAX_RT_PRIO;
393 }
394 
395 #else
396 
397 static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
398 {
399 }
400 
401 static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
402 {
403 }
404 
405 static inline
406 void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
407 {
408 }
409 
410 static inline
411 void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
412 {
413 }
414 
415 static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
416 {
417 	return false;
418 }
419 
420 static inline void pull_rt_task(struct rq *this_rq)
421 {
422 }
423 
424 static inline void rt_queue_push_tasks(struct rq *rq)
425 {
426 }
427 #endif /* CONFIG_SMP */
428 
429 static void enqueue_top_rt_rq(struct rt_rq *rt_rq);
430 static void dequeue_top_rt_rq(struct rt_rq *rt_rq);
431 
432 static inline int on_rt_rq(struct sched_rt_entity *rt_se)
433 {
434 	return rt_se->on_rq;
435 }
436 
437 #ifdef CONFIG_RT_GROUP_SCHED
438 
439 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
440 {
441 	if (!rt_rq->tg)
442 		return RUNTIME_INF;
443 
444 	return rt_rq->rt_runtime;
445 }
446 
447 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
448 {
449 	return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
450 }
451 
452 typedef struct task_group *rt_rq_iter_t;
453 
454 static inline struct task_group *next_task_group(struct task_group *tg)
455 {
456 	do {
457 		tg = list_entry_rcu(tg->list.next,
458 			typeof(struct task_group), list);
459 	} while (&tg->list != &task_groups && task_group_is_autogroup(tg));
460 
461 	if (&tg->list == &task_groups)
462 		tg = NULL;
463 
464 	return tg;
465 }
466 
467 #define for_each_rt_rq(rt_rq, iter, rq)					\
468 	for (iter = container_of(&task_groups, typeof(*iter), list);	\
469 		(iter = next_task_group(iter)) &&			\
470 		(rt_rq = iter->rt_rq[cpu_of(rq)]);)
471 
472 #define for_each_sched_rt_entity(rt_se) \
473 	for (; rt_se; rt_se = rt_se->parent)
474 
475 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
476 {
477 	return rt_se->my_q;
478 }
479 
480 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
481 static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
482 
483 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
484 {
485 	struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
486 	struct rq *rq = rq_of_rt_rq(rt_rq);
487 	struct sched_rt_entity *rt_se;
488 
489 	int cpu = cpu_of(rq);
490 
491 	rt_se = rt_rq->tg->rt_se[cpu];
492 
493 	if (rt_rq->rt_nr_running) {
494 		if (!rt_se)
495 			enqueue_top_rt_rq(rt_rq);
496 		else if (!on_rt_rq(rt_se))
497 			enqueue_rt_entity(rt_se, 0);
498 
499 		if (rt_rq->highest_prio.curr < curr->prio)
500 			resched_curr(rq);
501 	}
502 }
503 
504 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
505 {
506 	struct sched_rt_entity *rt_se;
507 	int cpu = cpu_of(rq_of_rt_rq(rt_rq));
508 
509 	rt_se = rt_rq->tg->rt_se[cpu];
510 
511 	if (!rt_se) {
512 		dequeue_top_rt_rq(rt_rq);
513 		/* Kick cpufreq (see the comment in kernel/sched/sched.h). */
514 		cpufreq_update_util(rq_of_rt_rq(rt_rq), 0);
515 	}
516 	else if (on_rt_rq(rt_se))
517 		dequeue_rt_entity(rt_se, 0);
518 }
519 
520 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
521 {
522 	return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
523 }
524 
525 static int rt_se_boosted(struct sched_rt_entity *rt_se)
526 {
527 	struct rt_rq *rt_rq = group_rt_rq(rt_se);
528 	struct task_struct *p;
529 
530 	if (rt_rq)
531 		return !!rt_rq->rt_nr_boosted;
532 
533 	p = rt_task_of(rt_se);
534 	return p->prio != p->normal_prio;
535 }
536 
537 #ifdef CONFIG_SMP
538 static inline const struct cpumask *sched_rt_period_mask(void)
539 {
540 	return this_rq()->rd->span;
541 }
542 #else
543 static inline const struct cpumask *sched_rt_period_mask(void)
544 {
545 	return cpu_online_mask;
546 }
547 #endif
548 
549 static inline
550 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
551 {
552 	return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
553 }
554 
555 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
556 {
557 	return &rt_rq->tg->rt_bandwidth;
558 }
559 
560 #else /* !CONFIG_RT_GROUP_SCHED */
561 
562 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
563 {
564 	return rt_rq->rt_runtime;
565 }
566 
567 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
568 {
569 	return ktime_to_ns(def_rt_bandwidth.rt_period);
570 }
571 
572 typedef struct rt_rq *rt_rq_iter_t;
573 
574 #define for_each_rt_rq(rt_rq, iter, rq) \
575 	for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
576 
577 #define for_each_sched_rt_entity(rt_se) \
578 	for (; rt_se; rt_se = NULL)
579 
580 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
581 {
582 	return NULL;
583 }
584 
585 static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
586 {
587 	struct rq *rq = rq_of_rt_rq(rt_rq);
588 
589 	if (!rt_rq->rt_nr_running)
590 		return;
591 
592 	enqueue_top_rt_rq(rt_rq);
593 	resched_curr(rq);
594 }
595 
596 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
597 {
598 	dequeue_top_rt_rq(rt_rq);
599 }
600 
601 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
602 {
603 	return rt_rq->rt_throttled;
604 }
605 
606 static inline const struct cpumask *sched_rt_period_mask(void)
607 {
608 	return cpu_online_mask;
609 }
610 
611 static inline
612 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
613 {
614 	return &cpu_rq(cpu)->rt;
615 }
616 
617 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
618 {
619 	return &def_rt_bandwidth;
620 }
621 
622 #endif /* CONFIG_RT_GROUP_SCHED */
623 
624 bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
625 {
626 	struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
627 
628 	return (hrtimer_active(&rt_b->rt_period_timer) ||
629 		rt_rq->rt_time < rt_b->rt_runtime);
630 }
631 
632 #ifdef CONFIG_SMP
633 /*
634  * We ran out of runtime, see if we can borrow some from our neighbours.
635  */
636 static void do_balance_runtime(struct rt_rq *rt_rq)
637 {
638 	struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
639 	struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
640 	int i, weight;
641 	u64 rt_period;
642 
643 	weight = cpumask_weight(rd->span);
644 
645 	raw_spin_lock(&rt_b->rt_runtime_lock);
646 	rt_period = ktime_to_ns(rt_b->rt_period);
647 	for_each_cpu(i, rd->span) {
648 		struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
649 		s64 diff;
650 
651 		if (iter == rt_rq)
652 			continue;
653 
654 		raw_spin_lock(&iter->rt_runtime_lock);
655 		/*
656 		 * Either all rqs have inf runtime and there's nothing to steal
657 		 * or __disable_runtime() below sets a specific rq to inf to
658 		 * indicate its been disabled and disalow stealing.
659 		 */
660 		if (iter->rt_runtime == RUNTIME_INF)
661 			goto next;
662 
663 		/*
664 		 * From runqueues with spare time, take 1/n part of their
665 		 * spare time, but no more than our period.
666 		 */
667 		diff = iter->rt_runtime - iter->rt_time;
668 		if (diff > 0) {
669 			diff = div_u64((u64)diff, weight);
670 			if (rt_rq->rt_runtime + diff > rt_period)
671 				diff = rt_period - rt_rq->rt_runtime;
672 			iter->rt_runtime -= diff;
673 			rt_rq->rt_runtime += diff;
674 			if (rt_rq->rt_runtime == rt_period) {
675 				raw_spin_unlock(&iter->rt_runtime_lock);
676 				break;
677 			}
678 		}
679 next:
680 		raw_spin_unlock(&iter->rt_runtime_lock);
681 	}
682 	raw_spin_unlock(&rt_b->rt_runtime_lock);
683 }
684 
685 /*
686  * Ensure this RQ takes back all the runtime it lend to its neighbours.
687  */
688 static void __disable_runtime(struct rq *rq)
689 {
690 	struct root_domain *rd = rq->rd;
691 	rt_rq_iter_t iter;
692 	struct rt_rq *rt_rq;
693 
694 	if (unlikely(!scheduler_running))
695 		return;
696 
697 	for_each_rt_rq(rt_rq, iter, rq) {
698 		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
699 		s64 want;
700 		int i;
701 
702 		raw_spin_lock(&rt_b->rt_runtime_lock);
703 		raw_spin_lock(&rt_rq->rt_runtime_lock);
704 		/*
705 		 * Either we're all inf and nobody needs to borrow, or we're
706 		 * already disabled and thus have nothing to do, or we have
707 		 * exactly the right amount of runtime to take out.
708 		 */
709 		if (rt_rq->rt_runtime == RUNTIME_INF ||
710 				rt_rq->rt_runtime == rt_b->rt_runtime)
711 			goto balanced;
712 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
713 
714 		/*
715 		 * Calculate the difference between what we started out with
716 		 * and what we current have, that's the amount of runtime
717 		 * we lend and now have to reclaim.
718 		 */
719 		want = rt_b->rt_runtime - rt_rq->rt_runtime;
720 
721 		/*
722 		 * Greedy reclaim, take back as much as we can.
723 		 */
724 		for_each_cpu(i, rd->span) {
725 			struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
726 			s64 diff;
727 
728 			/*
729 			 * Can't reclaim from ourselves or disabled runqueues.
730 			 */
731 			if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
732 				continue;
733 
734 			raw_spin_lock(&iter->rt_runtime_lock);
735 			if (want > 0) {
736 				diff = min_t(s64, iter->rt_runtime, want);
737 				iter->rt_runtime -= diff;
738 				want -= diff;
739 			} else {
740 				iter->rt_runtime -= want;
741 				want -= want;
742 			}
743 			raw_spin_unlock(&iter->rt_runtime_lock);
744 
745 			if (!want)
746 				break;
747 		}
748 
749 		raw_spin_lock(&rt_rq->rt_runtime_lock);
750 		/*
751 		 * We cannot be left wanting - that would mean some runtime
752 		 * leaked out of the system.
753 		 */
754 		BUG_ON(want);
755 balanced:
756 		/*
757 		 * Disable all the borrow logic by pretending we have inf
758 		 * runtime - in which case borrowing doesn't make sense.
759 		 */
760 		rt_rq->rt_runtime = RUNTIME_INF;
761 		rt_rq->rt_throttled = 0;
762 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
763 		raw_spin_unlock(&rt_b->rt_runtime_lock);
764 
765 		/* Make rt_rq available for pick_next_task() */
766 		sched_rt_rq_enqueue(rt_rq);
767 	}
768 }
769 
770 static void __enable_runtime(struct rq *rq)
771 {
772 	rt_rq_iter_t iter;
773 	struct rt_rq *rt_rq;
774 
775 	if (unlikely(!scheduler_running))
776 		return;
777 
778 	/*
779 	 * Reset each runqueue's bandwidth settings
780 	 */
781 	for_each_rt_rq(rt_rq, iter, rq) {
782 		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
783 
784 		raw_spin_lock(&rt_b->rt_runtime_lock);
785 		raw_spin_lock(&rt_rq->rt_runtime_lock);
786 		rt_rq->rt_runtime = rt_b->rt_runtime;
787 		rt_rq->rt_time = 0;
788 		rt_rq->rt_throttled = 0;
789 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
790 		raw_spin_unlock(&rt_b->rt_runtime_lock);
791 	}
792 }
793 
794 static void balance_runtime(struct rt_rq *rt_rq)
795 {
796 	if (!sched_feat(RT_RUNTIME_SHARE))
797 		return;
798 
799 	if (rt_rq->rt_time > rt_rq->rt_runtime) {
800 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
801 		do_balance_runtime(rt_rq);
802 		raw_spin_lock(&rt_rq->rt_runtime_lock);
803 	}
804 }
805 #else /* !CONFIG_SMP */
806 static inline void balance_runtime(struct rt_rq *rt_rq) {}
807 #endif /* CONFIG_SMP */
808 
809 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
810 {
811 	int i, idle = 1, throttled = 0;
812 	const struct cpumask *span;
813 
814 	span = sched_rt_period_mask();
815 #ifdef CONFIG_RT_GROUP_SCHED
816 	/*
817 	 * FIXME: isolated CPUs should really leave the root task group,
818 	 * whether they are isolcpus or were isolated via cpusets, lest
819 	 * the timer run on a CPU which does not service all runqueues,
820 	 * potentially leaving other CPUs indefinitely throttled.  If
821 	 * isolation is really required, the user will turn the throttle
822 	 * off to kill the perturbations it causes anyway.  Meanwhile,
823 	 * this maintains functionality for boot and/or troubleshooting.
824 	 */
825 	if (rt_b == &root_task_group.rt_bandwidth)
826 		span = cpu_online_mask;
827 #endif
828 	for_each_cpu(i, span) {
829 		int enqueue = 0;
830 		struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
831 		struct rq *rq = rq_of_rt_rq(rt_rq);
832 		int skip;
833 
834 		/*
835 		 * When span == cpu_online_mask, taking each rq->lock
836 		 * can be time-consuming. Try to avoid it when possible.
837 		 */
838 		raw_spin_lock(&rt_rq->rt_runtime_lock);
839 		skip = !rt_rq->rt_time && !rt_rq->rt_nr_running;
840 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
841 		if (skip)
842 			continue;
843 
844 		raw_spin_lock(&rq->lock);
845 		update_rq_clock(rq);
846 
847 		if (rt_rq->rt_time) {
848 			u64 runtime;
849 
850 			raw_spin_lock(&rt_rq->rt_runtime_lock);
851 			if (rt_rq->rt_throttled)
852 				balance_runtime(rt_rq);
853 			runtime = rt_rq->rt_runtime;
854 			rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
855 			if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
856 				rt_rq->rt_throttled = 0;
857 				enqueue = 1;
858 
859 				/*
860 				 * When we're idle and a woken (rt) task is
861 				 * throttled check_preempt_curr() will set
862 				 * skip_update and the time between the wakeup
863 				 * and this unthrottle will get accounted as
864 				 * 'runtime'.
865 				 */
866 				if (rt_rq->rt_nr_running && rq->curr == rq->idle)
867 					rq_clock_cancel_skipupdate(rq);
868 			}
869 			if (rt_rq->rt_time || rt_rq->rt_nr_running)
870 				idle = 0;
871 			raw_spin_unlock(&rt_rq->rt_runtime_lock);
872 		} else if (rt_rq->rt_nr_running) {
873 			idle = 0;
874 			if (!rt_rq_throttled(rt_rq))
875 				enqueue = 1;
876 		}
877 		if (rt_rq->rt_throttled)
878 			throttled = 1;
879 
880 		if (enqueue)
881 			sched_rt_rq_enqueue(rt_rq);
882 		raw_spin_unlock(&rq->lock);
883 	}
884 
885 	if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
886 		return 1;
887 
888 	return idle;
889 }
890 
891 static inline int rt_se_prio(struct sched_rt_entity *rt_se)
892 {
893 #ifdef CONFIG_RT_GROUP_SCHED
894 	struct rt_rq *rt_rq = group_rt_rq(rt_se);
895 
896 	if (rt_rq)
897 		return rt_rq->highest_prio.curr;
898 #endif
899 
900 	return rt_task_of(rt_se)->prio;
901 }
902 
903 static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
904 {
905 	u64 runtime = sched_rt_runtime(rt_rq);
906 
907 	if (rt_rq->rt_throttled)
908 		return rt_rq_throttled(rt_rq);
909 
910 	if (runtime >= sched_rt_period(rt_rq))
911 		return 0;
912 
913 	balance_runtime(rt_rq);
914 	runtime = sched_rt_runtime(rt_rq);
915 	if (runtime == RUNTIME_INF)
916 		return 0;
917 
918 	if (rt_rq->rt_time > runtime) {
919 		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
920 
921 		/*
922 		 * Don't actually throttle groups that have no runtime assigned
923 		 * but accrue some time due to boosting.
924 		 */
925 		if (likely(rt_b->rt_runtime)) {
926 			rt_rq->rt_throttled = 1;
927 			printk_deferred_once("sched: RT throttling activated\n");
928 		} else {
929 			/*
930 			 * In case we did anyway, make it go away,
931 			 * replenishment is a joke, since it will replenish us
932 			 * with exactly 0 ns.
933 			 */
934 			rt_rq->rt_time = 0;
935 		}
936 
937 		if (rt_rq_throttled(rt_rq)) {
938 			sched_rt_rq_dequeue(rt_rq);
939 			return 1;
940 		}
941 	}
942 
943 	return 0;
944 }
945 
946 /*
947  * Update the current task's runtime statistics. Skip current tasks that
948  * are not in our scheduling class.
949  */
950 static void update_curr_rt(struct rq *rq)
951 {
952 	struct task_struct *curr = rq->curr;
953 	struct sched_rt_entity *rt_se = &curr->rt;
954 	u64 delta_exec;
955 	u64 now;
956 
957 	if (curr->sched_class != &rt_sched_class)
958 		return;
959 
960 	now = rq_clock_task(rq);
961 	delta_exec = now - curr->se.exec_start;
962 	if (unlikely((s64)delta_exec <= 0))
963 		return;
964 
965 	schedstat_set(curr->se.statistics.exec_max,
966 		      max(curr->se.statistics.exec_max, delta_exec));
967 
968 	curr->se.sum_exec_runtime += delta_exec;
969 	account_group_exec_runtime(curr, delta_exec);
970 
971 	curr->se.exec_start = now;
972 	cgroup_account_cputime(curr, delta_exec);
973 
974 	sched_rt_avg_update(rq, delta_exec);
975 
976 	if (!rt_bandwidth_enabled())
977 		return;
978 
979 	for_each_sched_rt_entity(rt_se) {
980 		struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
981 
982 		if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
983 			raw_spin_lock(&rt_rq->rt_runtime_lock);
984 			rt_rq->rt_time += delta_exec;
985 			if (sched_rt_runtime_exceeded(rt_rq))
986 				resched_curr(rq);
987 			raw_spin_unlock(&rt_rq->rt_runtime_lock);
988 		}
989 	}
990 }
991 
992 static void
993 dequeue_top_rt_rq(struct rt_rq *rt_rq)
994 {
995 	struct rq *rq = rq_of_rt_rq(rt_rq);
996 
997 	BUG_ON(&rq->rt != rt_rq);
998 
999 	if (!rt_rq->rt_queued)
1000 		return;
1001 
1002 	BUG_ON(!rq->nr_running);
1003 
1004 	sub_nr_running(rq, rt_rq->rt_nr_running);
1005 	rt_rq->rt_queued = 0;
1006 
1007 }
1008 
1009 static void
1010 enqueue_top_rt_rq(struct rt_rq *rt_rq)
1011 {
1012 	struct rq *rq = rq_of_rt_rq(rt_rq);
1013 
1014 	BUG_ON(&rq->rt != rt_rq);
1015 
1016 	if (rt_rq->rt_queued)
1017 		return;
1018 
1019 	if (rt_rq_throttled(rt_rq))
1020 		return;
1021 
1022 	if (rt_rq->rt_nr_running) {
1023 		add_nr_running(rq, rt_rq->rt_nr_running);
1024 		rt_rq->rt_queued = 1;
1025 	}
1026 
1027 	/* Kick cpufreq (see the comment in kernel/sched/sched.h). */
1028 	cpufreq_update_util(rq, 0);
1029 }
1030 
1031 #if defined CONFIG_SMP
1032 
1033 static void
1034 inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1035 {
1036 	struct rq *rq = rq_of_rt_rq(rt_rq);
1037 
1038 #ifdef CONFIG_RT_GROUP_SCHED
1039 	/*
1040 	 * Change rq's cpupri only if rt_rq is the top queue.
1041 	 */
1042 	if (&rq->rt != rt_rq)
1043 		return;
1044 #endif
1045 	if (rq->online && prio < prev_prio)
1046 		cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
1047 }
1048 
1049 static void
1050 dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1051 {
1052 	struct rq *rq = rq_of_rt_rq(rt_rq);
1053 
1054 #ifdef CONFIG_RT_GROUP_SCHED
1055 	/*
1056 	 * Change rq's cpupri only if rt_rq is the top queue.
1057 	 */
1058 	if (&rq->rt != rt_rq)
1059 		return;
1060 #endif
1061 	if (rq->online && rt_rq->highest_prio.curr != prev_prio)
1062 		cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
1063 }
1064 
1065 #else /* CONFIG_SMP */
1066 
1067 static inline
1068 void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1069 static inline
1070 void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1071 
1072 #endif /* CONFIG_SMP */
1073 
1074 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
1075 static void
1076 inc_rt_prio(struct rt_rq *rt_rq, int prio)
1077 {
1078 	int prev_prio = rt_rq->highest_prio.curr;
1079 
1080 	if (prio < prev_prio)
1081 		rt_rq->highest_prio.curr = prio;
1082 
1083 	inc_rt_prio_smp(rt_rq, prio, prev_prio);
1084 }
1085 
1086 static void
1087 dec_rt_prio(struct rt_rq *rt_rq, int prio)
1088 {
1089 	int prev_prio = rt_rq->highest_prio.curr;
1090 
1091 	if (rt_rq->rt_nr_running) {
1092 
1093 		WARN_ON(prio < prev_prio);
1094 
1095 		/*
1096 		 * This may have been our highest task, and therefore
1097 		 * we may have some recomputation to do
1098 		 */
1099 		if (prio == prev_prio) {
1100 			struct rt_prio_array *array = &rt_rq->active;
1101 
1102 			rt_rq->highest_prio.curr =
1103 				sched_find_first_bit(array->bitmap);
1104 		}
1105 
1106 	} else
1107 		rt_rq->highest_prio.curr = MAX_RT_PRIO;
1108 
1109 	dec_rt_prio_smp(rt_rq, prio, prev_prio);
1110 }
1111 
1112 #else
1113 
1114 static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
1115 static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
1116 
1117 #endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
1118 
1119 #ifdef CONFIG_RT_GROUP_SCHED
1120 
1121 static void
1122 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1123 {
1124 	if (rt_se_boosted(rt_se))
1125 		rt_rq->rt_nr_boosted++;
1126 
1127 	if (rt_rq->tg)
1128 		start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
1129 }
1130 
1131 static void
1132 dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1133 {
1134 	if (rt_se_boosted(rt_se))
1135 		rt_rq->rt_nr_boosted--;
1136 
1137 	WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
1138 }
1139 
1140 #else /* CONFIG_RT_GROUP_SCHED */
1141 
1142 static void
1143 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1144 {
1145 	start_rt_bandwidth(&def_rt_bandwidth);
1146 }
1147 
1148 static inline
1149 void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
1150 
1151 #endif /* CONFIG_RT_GROUP_SCHED */
1152 
1153 static inline
1154 unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se)
1155 {
1156 	struct rt_rq *group_rq = group_rt_rq(rt_se);
1157 
1158 	if (group_rq)
1159 		return group_rq->rt_nr_running;
1160 	else
1161 		return 1;
1162 }
1163 
1164 static inline
1165 unsigned int rt_se_rr_nr_running(struct sched_rt_entity *rt_se)
1166 {
1167 	struct rt_rq *group_rq = group_rt_rq(rt_se);
1168 	struct task_struct *tsk;
1169 
1170 	if (group_rq)
1171 		return group_rq->rr_nr_running;
1172 
1173 	tsk = rt_task_of(rt_se);
1174 
1175 	return (tsk->policy == SCHED_RR) ? 1 : 0;
1176 }
1177 
1178 static inline
1179 void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1180 {
1181 	int prio = rt_se_prio(rt_se);
1182 
1183 	WARN_ON(!rt_prio(prio));
1184 	rt_rq->rt_nr_running += rt_se_nr_running(rt_se);
1185 	rt_rq->rr_nr_running += rt_se_rr_nr_running(rt_se);
1186 
1187 	inc_rt_prio(rt_rq, prio);
1188 	inc_rt_migration(rt_se, rt_rq);
1189 	inc_rt_group(rt_se, rt_rq);
1190 }
1191 
1192 static inline
1193 void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1194 {
1195 	WARN_ON(!rt_prio(rt_se_prio(rt_se)));
1196 	WARN_ON(!rt_rq->rt_nr_running);
1197 	rt_rq->rt_nr_running -= rt_se_nr_running(rt_se);
1198 	rt_rq->rr_nr_running -= rt_se_rr_nr_running(rt_se);
1199 
1200 	dec_rt_prio(rt_rq, rt_se_prio(rt_se));
1201 	dec_rt_migration(rt_se, rt_rq);
1202 	dec_rt_group(rt_se, rt_rq);
1203 }
1204 
1205 /*
1206  * Change rt_se->run_list location unless SAVE && !MOVE
1207  *
1208  * assumes ENQUEUE/DEQUEUE flags match
1209  */
1210 static inline bool move_entity(unsigned int flags)
1211 {
1212 	if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE)
1213 		return false;
1214 
1215 	return true;
1216 }
1217 
1218 static void __delist_rt_entity(struct sched_rt_entity *rt_se, struct rt_prio_array *array)
1219 {
1220 	list_del_init(&rt_se->run_list);
1221 
1222 	if (list_empty(array->queue + rt_se_prio(rt_se)))
1223 		__clear_bit(rt_se_prio(rt_se), array->bitmap);
1224 
1225 	rt_se->on_list = 0;
1226 }
1227 
1228 static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1229 {
1230 	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1231 	struct rt_prio_array *array = &rt_rq->active;
1232 	struct rt_rq *group_rq = group_rt_rq(rt_se);
1233 	struct list_head *queue = array->queue + rt_se_prio(rt_se);
1234 
1235 	/*
1236 	 * Don't enqueue the group if its throttled, or when empty.
1237 	 * The latter is a consequence of the former when a child group
1238 	 * get throttled and the current group doesn't have any other
1239 	 * active members.
1240 	 */
1241 	if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) {
1242 		if (rt_se->on_list)
1243 			__delist_rt_entity(rt_se, array);
1244 		return;
1245 	}
1246 
1247 	if (move_entity(flags)) {
1248 		WARN_ON_ONCE(rt_se->on_list);
1249 		if (flags & ENQUEUE_HEAD)
1250 			list_add(&rt_se->run_list, queue);
1251 		else
1252 			list_add_tail(&rt_se->run_list, queue);
1253 
1254 		__set_bit(rt_se_prio(rt_se), array->bitmap);
1255 		rt_se->on_list = 1;
1256 	}
1257 	rt_se->on_rq = 1;
1258 
1259 	inc_rt_tasks(rt_se, rt_rq);
1260 }
1261 
1262 static void __dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1263 {
1264 	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1265 	struct rt_prio_array *array = &rt_rq->active;
1266 
1267 	if (move_entity(flags)) {
1268 		WARN_ON_ONCE(!rt_se->on_list);
1269 		__delist_rt_entity(rt_se, array);
1270 	}
1271 	rt_se->on_rq = 0;
1272 
1273 	dec_rt_tasks(rt_se, rt_rq);
1274 }
1275 
1276 /*
1277  * Because the prio of an upper entry depends on the lower
1278  * entries, we must remove entries top - down.
1279  */
1280 static void dequeue_rt_stack(struct sched_rt_entity *rt_se, unsigned int flags)
1281 {
1282 	struct sched_rt_entity *back = NULL;
1283 
1284 	for_each_sched_rt_entity(rt_se) {
1285 		rt_se->back = back;
1286 		back = rt_se;
1287 	}
1288 
1289 	dequeue_top_rt_rq(rt_rq_of_se(back));
1290 
1291 	for (rt_se = back; rt_se; rt_se = rt_se->back) {
1292 		if (on_rt_rq(rt_se))
1293 			__dequeue_rt_entity(rt_se, flags);
1294 	}
1295 }
1296 
1297 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1298 {
1299 	struct rq *rq = rq_of_rt_se(rt_se);
1300 
1301 	dequeue_rt_stack(rt_se, flags);
1302 	for_each_sched_rt_entity(rt_se)
1303 		__enqueue_rt_entity(rt_se, flags);
1304 	enqueue_top_rt_rq(&rq->rt);
1305 }
1306 
1307 static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1308 {
1309 	struct rq *rq = rq_of_rt_se(rt_se);
1310 
1311 	dequeue_rt_stack(rt_se, flags);
1312 
1313 	for_each_sched_rt_entity(rt_se) {
1314 		struct rt_rq *rt_rq = group_rt_rq(rt_se);
1315 
1316 		if (rt_rq && rt_rq->rt_nr_running)
1317 			__enqueue_rt_entity(rt_se, flags);
1318 	}
1319 	enqueue_top_rt_rq(&rq->rt);
1320 }
1321 
1322 /*
1323  * Adding/removing a task to/from a priority array:
1324  */
1325 static void
1326 enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1327 {
1328 	struct sched_rt_entity *rt_se = &p->rt;
1329 
1330 	if (flags & ENQUEUE_WAKEUP)
1331 		rt_se->timeout = 0;
1332 
1333 	enqueue_rt_entity(rt_se, flags);
1334 
1335 	if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1336 		enqueue_pushable_task(rq, p);
1337 }
1338 
1339 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1340 {
1341 	struct sched_rt_entity *rt_se = &p->rt;
1342 
1343 	update_curr_rt(rq);
1344 	dequeue_rt_entity(rt_se, flags);
1345 
1346 	dequeue_pushable_task(rq, p);
1347 }
1348 
1349 /*
1350  * Put task to the head or the end of the run list without the overhead of
1351  * dequeue followed by enqueue.
1352  */
1353 static void
1354 requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
1355 {
1356 	if (on_rt_rq(rt_se)) {
1357 		struct rt_prio_array *array = &rt_rq->active;
1358 		struct list_head *queue = array->queue + rt_se_prio(rt_se);
1359 
1360 		if (head)
1361 			list_move(&rt_se->run_list, queue);
1362 		else
1363 			list_move_tail(&rt_se->run_list, queue);
1364 	}
1365 }
1366 
1367 static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
1368 {
1369 	struct sched_rt_entity *rt_se = &p->rt;
1370 	struct rt_rq *rt_rq;
1371 
1372 	for_each_sched_rt_entity(rt_se) {
1373 		rt_rq = rt_rq_of_se(rt_se);
1374 		requeue_rt_entity(rt_rq, rt_se, head);
1375 	}
1376 }
1377 
1378 static void yield_task_rt(struct rq *rq)
1379 {
1380 	requeue_task_rt(rq, rq->curr, 0);
1381 }
1382 
1383 #ifdef CONFIG_SMP
1384 static int find_lowest_rq(struct task_struct *task);
1385 
1386 static int
1387 select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
1388 {
1389 	struct task_struct *curr;
1390 	struct rq *rq;
1391 
1392 	/* For anything but wake ups, just return the task_cpu */
1393 	if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
1394 		goto out;
1395 
1396 	rq = cpu_rq(cpu);
1397 
1398 	rcu_read_lock();
1399 	curr = READ_ONCE(rq->curr); /* unlocked access */
1400 
1401 	/*
1402 	 * If the current task on @p's runqueue is an RT task, then
1403 	 * try to see if we can wake this RT task up on another
1404 	 * runqueue. Otherwise simply start this RT task
1405 	 * on its current runqueue.
1406 	 *
1407 	 * We want to avoid overloading runqueues. If the woken
1408 	 * task is a higher priority, then it will stay on this CPU
1409 	 * and the lower prio task should be moved to another CPU.
1410 	 * Even though this will probably make the lower prio task
1411 	 * lose its cache, we do not want to bounce a higher task
1412 	 * around just because it gave up its CPU, perhaps for a
1413 	 * lock?
1414 	 *
1415 	 * For equal prio tasks, we just let the scheduler sort it out.
1416 	 *
1417 	 * Otherwise, just let it ride on the affined RQ and the
1418 	 * post-schedule router will push the preempted task away
1419 	 *
1420 	 * This test is optimistic, if we get it wrong the load-balancer
1421 	 * will have to sort it out.
1422 	 */
1423 	if (curr && unlikely(rt_task(curr)) &&
1424 	    (curr->nr_cpus_allowed < 2 ||
1425 	     curr->prio <= p->prio)) {
1426 		int target = find_lowest_rq(p);
1427 
1428 		/*
1429 		 * Don't bother moving it if the destination CPU is
1430 		 * not running a lower priority task.
1431 		 */
1432 		if (target != -1 &&
1433 		    p->prio < cpu_rq(target)->rt.highest_prio.curr)
1434 			cpu = target;
1435 	}
1436 	rcu_read_unlock();
1437 
1438 out:
1439 	return cpu;
1440 }
1441 
1442 static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1443 {
1444 	/*
1445 	 * Current can't be migrated, useless to reschedule,
1446 	 * let's hope p can move out.
1447 	 */
1448 	if (rq->curr->nr_cpus_allowed == 1 ||
1449 	    !cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
1450 		return;
1451 
1452 	/*
1453 	 * p is migratable, so let's not schedule it and
1454 	 * see if it is pushed or pulled somewhere else.
1455 	 */
1456 	if (p->nr_cpus_allowed != 1
1457 	    && cpupri_find(&rq->rd->cpupri, p, NULL))
1458 		return;
1459 
1460 	/*
1461 	 * There appear to be other CPUs that can accept
1462 	 * the current task but none can run 'p', so lets reschedule
1463 	 * to try and push the current task away:
1464 	 */
1465 	requeue_task_rt(rq, p, 1);
1466 	resched_curr(rq);
1467 }
1468 
1469 #endif /* CONFIG_SMP */
1470 
1471 /*
1472  * Preempt the current task with a newly woken task if needed:
1473  */
1474 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
1475 {
1476 	if (p->prio < rq->curr->prio) {
1477 		resched_curr(rq);
1478 		return;
1479 	}
1480 
1481 #ifdef CONFIG_SMP
1482 	/*
1483 	 * If:
1484 	 *
1485 	 * - the newly woken task is of equal priority to the current task
1486 	 * - the newly woken task is non-migratable while current is migratable
1487 	 * - current will be preempted on the next reschedule
1488 	 *
1489 	 * we should check to see if current can readily move to a different
1490 	 * cpu.  If so, we will reschedule to allow the push logic to try
1491 	 * to move current somewhere else, making room for our non-migratable
1492 	 * task.
1493 	 */
1494 	if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
1495 		check_preempt_equal_prio(rq, p);
1496 #endif
1497 }
1498 
1499 static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
1500 						   struct rt_rq *rt_rq)
1501 {
1502 	struct rt_prio_array *array = &rt_rq->active;
1503 	struct sched_rt_entity *next = NULL;
1504 	struct list_head *queue;
1505 	int idx;
1506 
1507 	idx = sched_find_first_bit(array->bitmap);
1508 	BUG_ON(idx >= MAX_RT_PRIO);
1509 
1510 	queue = array->queue + idx;
1511 	next = list_entry(queue->next, struct sched_rt_entity, run_list);
1512 
1513 	return next;
1514 }
1515 
1516 static struct task_struct *_pick_next_task_rt(struct rq *rq)
1517 {
1518 	struct sched_rt_entity *rt_se;
1519 	struct task_struct *p;
1520 	struct rt_rq *rt_rq  = &rq->rt;
1521 
1522 	do {
1523 		rt_se = pick_next_rt_entity(rq, rt_rq);
1524 		BUG_ON(!rt_se);
1525 		rt_rq = group_rt_rq(rt_se);
1526 	} while (rt_rq);
1527 
1528 	p = rt_task_of(rt_se);
1529 	p->se.exec_start = rq_clock_task(rq);
1530 
1531 	return p;
1532 }
1533 
1534 static struct task_struct *
1535 pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
1536 {
1537 	struct task_struct *p;
1538 	struct rt_rq *rt_rq = &rq->rt;
1539 
1540 	if (need_pull_rt_task(rq, prev)) {
1541 		/*
1542 		 * This is OK, because current is on_cpu, which avoids it being
1543 		 * picked for load-balance and preemption/IRQs are still
1544 		 * disabled avoiding further scheduler activity on it and we're
1545 		 * being very careful to re-start the picking loop.
1546 		 */
1547 		rq_unpin_lock(rq, rf);
1548 		pull_rt_task(rq);
1549 		rq_repin_lock(rq, rf);
1550 		/*
1551 		 * pull_rt_task() can drop (and re-acquire) rq->lock; this
1552 		 * means a dl or stop task can slip in, in which case we need
1553 		 * to re-start task selection.
1554 		 */
1555 		if (unlikely((rq->stop && task_on_rq_queued(rq->stop)) ||
1556 			     rq->dl.dl_nr_running))
1557 			return RETRY_TASK;
1558 	}
1559 
1560 	/*
1561 	 * We may dequeue prev's rt_rq in put_prev_task().
1562 	 * So, we update time before rt_nr_running check.
1563 	 */
1564 	if (prev->sched_class == &rt_sched_class)
1565 		update_curr_rt(rq);
1566 
1567 	if (!rt_rq->rt_queued)
1568 		return NULL;
1569 
1570 	put_prev_task(rq, prev);
1571 
1572 	p = _pick_next_task_rt(rq);
1573 
1574 	/* The running task is never eligible for pushing */
1575 	dequeue_pushable_task(rq, p);
1576 
1577 	rt_queue_push_tasks(rq);
1578 
1579 	return p;
1580 }
1581 
1582 static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1583 {
1584 	update_curr_rt(rq);
1585 
1586 	/*
1587 	 * The previous task needs to be made eligible for pushing
1588 	 * if it is still active
1589 	 */
1590 	if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
1591 		enqueue_pushable_task(rq, p);
1592 }
1593 
1594 #ifdef CONFIG_SMP
1595 
1596 /* Only try algorithms three times */
1597 #define RT_MAX_TRIES 3
1598 
1599 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1600 {
1601 	if (!task_running(rq, p) &&
1602 	    cpumask_test_cpu(cpu, &p->cpus_allowed))
1603 		return 1;
1604 
1605 	return 0;
1606 }
1607 
1608 /*
1609  * Return the highest pushable rq's task, which is suitable to be executed
1610  * on the CPU, NULL otherwise
1611  */
1612 static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
1613 {
1614 	struct plist_head *head = &rq->rt.pushable_tasks;
1615 	struct task_struct *p;
1616 
1617 	if (!has_pushable_tasks(rq))
1618 		return NULL;
1619 
1620 	plist_for_each_entry(p, head, pushable_tasks) {
1621 		if (pick_rt_task(rq, p, cpu))
1622 			return p;
1623 	}
1624 
1625 	return NULL;
1626 }
1627 
1628 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
1629 
1630 static int find_lowest_rq(struct task_struct *task)
1631 {
1632 	struct sched_domain *sd;
1633 	struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask);
1634 	int this_cpu = smp_processor_id();
1635 	int cpu      = task_cpu(task);
1636 
1637 	/* Make sure the mask is initialized first */
1638 	if (unlikely(!lowest_mask))
1639 		return -1;
1640 
1641 	if (task->nr_cpus_allowed == 1)
1642 		return -1; /* No other targets possible */
1643 
1644 	if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
1645 		return -1; /* No targets found */
1646 
1647 	/*
1648 	 * At this point we have built a mask of CPUs representing the
1649 	 * lowest priority tasks in the system.  Now we want to elect
1650 	 * the best one based on our affinity and topology.
1651 	 *
1652 	 * We prioritize the last CPU that the task executed on since
1653 	 * it is most likely cache-hot in that location.
1654 	 */
1655 	if (cpumask_test_cpu(cpu, lowest_mask))
1656 		return cpu;
1657 
1658 	/*
1659 	 * Otherwise, we consult the sched_domains span maps to figure
1660 	 * out which CPU is logically closest to our hot cache data.
1661 	 */
1662 	if (!cpumask_test_cpu(this_cpu, lowest_mask))
1663 		this_cpu = -1; /* Skip this_cpu opt if not among lowest */
1664 
1665 	rcu_read_lock();
1666 	for_each_domain(cpu, sd) {
1667 		if (sd->flags & SD_WAKE_AFFINE) {
1668 			int best_cpu;
1669 
1670 			/*
1671 			 * "this_cpu" is cheaper to preempt than a
1672 			 * remote processor.
1673 			 */
1674 			if (this_cpu != -1 &&
1675 			    cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1676 				rcu_read_unlock();
1677 				return this_cpu;
1678 			}
1679 
1680 			best_cpu = cpumask_first_and(lowest_mask,
1681 						     sched_domain_span(sd));
1682 			if (best_cpu < nr_cpu_ids) {
1683 				rcu_read_unlock();
1684 				return best_cpu;
1685 			}
1686 		}
1687 	}
1688 	rcu_read_unlock();
1689 
1690 	/*
1691 	 * And finally, if there were no matches within the domains
1692 	 * just give the caller *something* to work with from the compatible
1693 	 * locations.
1694 	 */
1695 	if (this_cpu != -1)
1696 		return this_cpu;
1697 
1698 	cpu = cpumask_any(lowest_mask);
1699 	if (cpu < nr_cpu_ids)
1700 		return cpu;
1701 
1702 	return -1;
1703 }
1704 
1705 /* Will lock the rq it finds */
1706 static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1707 {
1708 	struct rq *lowest_rq = NULL;
1709 	int tries;
1710 	int cpu;
1711 
1712 	for (tries = 0; tries < RT_MAX_TRIES; tries++) {
1713 		cpu = find_lowest_rq(task);
1714 
1715 		if ((cpu == -1) || (cpu == rq->cpu))
1716 			break;
1717 
1718 		lowest_rq = cpu_rq(cpu);
1719 
1720 		if (lowest_rq->rt.highest_prio.curr <= task->prio) {
1721 			/*
1722 			 * Target rq has tasks of equal or higher priority,
1723 			 * retrying does not release any lock and is unlikely
1724 			 * to yield a different result.
1725 			 */
1726 			lowest_rq = NULL;
1727 			break;
1728 		}
1729 
1730 		/* if the prio of this runqueue changed, try again */
1731 		if (double_lock_balance(rq, lowest_rq)) {
1732 			/*
1733 			 * We had to unlock the run queue. In
1734 			 * the mean time, task could have
1735 			 * migrated already or had its affinity changed.
1736 			 * Also make sure that it wasn't scheduled on its rq.
1737 			 */
1738 			if (unlikely(task_rq(task) != rq ||
1739 				     !cpumask_test_cpu(lowest_rq->cpu, &task->cpus_allowed) ||
1740 				     task_running(rq, task) ||
1741 				     !rt_task(task) ||
1742 				     !task_on_rq_queued(task))) {
1743 
1744 				double_unlock_balance(rq, lowest_rq);
1745 				lowest_rq = NULL;
1746 				break;
1747 			}
1748 		}
1749 
1750 		/* If this rq is still suitable use it. */
1751 		if (lowest_rq->rt.highest_prio.curr > task->prio)
1752 			break;
1753 
1754 		/* try again */
1755 		double_unlock_balance(rq, lowest_rq);
1756 		lowest_rq = NULL;
1757 	}
1758 
1759 	return lowest_rq;
1760 }
1761 
1762 static struct task_struct *pick_next_pushable_task(struct rq *rq)
1763 {
1764 	struct task_struct *p;
1765 
1766 	if (!has_pushable_tasks(rq))
1767 		return NULL;
1768 
1769 	p = plist_first_entry(&rq->rt.pushable_tasks,
1770 			      struct task_struct, pushable_tasks);
1771 
1772 	BUG_ON(rq->cpu != task_cpu(p));
1773 	BUG_ON(task_current(rq, p));
1774 	BUG_ON(p->nr_cpus_allowed <= 1);
1775 
1776 	BUG_ON(!task_on_rq_queued(p));
1777 	BUG_ON(!rt_task(p));
1778 
1779 	return p;
1780 }
1781 
1782 /*
1783  * If the current CPU has more than one RT task, see if the non
1784  * running task can migrate over to a CPU that is running a task
1785  * of lesser priority.
1786  */
1787 static int push_rt_task(struct rq *rq)
1788 {
1789 	struct task_struct *next_task;
1790 	struct rq *lowest_rq;
1791 	int ret = 0;
1792 
1793 	if (!rq->rt.overloaded)
1794 		return 0;
1795 
1796 	next_task = pick_next_pushable_task(rq);
1797 	if (!next_task)
1798 		return 0;
1799 
1800 retry:
1801 	if (unlikely(next_task == rq->curr)) {
1802 		WARN_ON(1);
1803 		return 0;
1804 	}
1805 
1806 	/*
1807 	 * It's possible that the next_task slipped in of
1808 	 * higher priority than current. If that's the case
1809 	 * just reschedule current.
1810 	 */
1811 	if (unlikely(next_task->prio < rq->curr->prio)) {
1812 		resched_curr(rq);
1813 		return 0;
1814 	}
1815 
1816 	/* We might release rq lock */
1817 	get_task_struct(next_task);
1818 
1819 	/* find_lock_lowest_rq locks the rq if found */
1820 	lowest_rq = find_lock_lowest_rq(next_task, rq);
1821 	if (!lowest_rq) {
1822 		struct task_struct *task;
1823 		/*
1824 		 * find_lock_lowest_rq releases rq->lock
1825 		 * so it is possible that next_task has migrated.
1826 		 *
1827 		 * We need to make sure that the task is still on the same
1828 		 * run-queue and is also still the next task eligible for
1829 		 * pushing.
1830 		 */
1831 		task = pick_next_pushable_task(rq);
1832 		if (task == next_task) {
1833 			/*
1834 			 * The task hasn't migrated, and is still the next
1835 			 * eligible task, but we failed to find a run-queue
1836 			 * to push it to.  Do not retry in this case, since
1837 			 * other CPUs will pull from us when ready.
1838 			 */
1839 			goto out;
1840 		}
1841 
1842 		if (!task)
1843 			/* No more tasks, just exit */
1844 			goto out;
1845 
1846 		/*
1847 		 * Something has shifted, try again.
1848 		 */
1849 		put_task_struct(next_task);
1850 		next_task = task;
1851 		goto retry;
1852 	}
1853 
1854 	deactivate_task(rq, next_task, 0);
1855 	set_task_cpu(next_task, lowest_rq->cpu);
1856 	activate_task(lowest_rq, next_task, 0);
1857 	ret = 1;
1858 
1859 	resched_curr(lowest_rq);
1860 
1861 	double_unlock_balance(rq, lowest_rq);
1862 
1863 out:
1864 	put_task_struct(next_task);
1865 
1866 	return ret;
1867 }
1868 
1869 static void push_rt_tasks(struct rq *rq)
1870 {
1871 	/* push_rt_task will return true if it moved an RT */
1872 	while (push_rt_task(rq))
1873 		;
1874 }
1875 
1876 #ifdef HAVE_RT_PUSH_IPI
1877 
1878 /*
1879  * When a high priority task schedules out from a CPU and a lower priority
1880  * task is scheduled in, a check is made to see if there's any RT tasks
1881  * on other CPUs that are waiting to run because a higher priority RT task
1882  * is currently running on its CPU. In this case, the CPU with multiple RT
1883  * tasks queued on it (overloaded) needs to be notified that a CPU has opened
1884  * up that may be able to run one of its non-running queued RT tasks.
1885  *
1886  * All CPUs with overloaded RT tasks need to be notified as there is currently
1887  * no way to know which of these CPUs have the highest priority task waiting
1888  * to run. Instead of trying to take a spinlock on each of these CPUs,
1889  * which has shown to cause large latency when done on machines with many
1890  * CPUs, sending an IPI to the CPUs to have them push off the overloaded
1891  * RT tasks waiting to run.
1892  *
1893  * Just sending an IPI to each of the CPUs is also an issue, as on large
1894  * count CPU machines, this can cause an IPI storm on a CPU, especially
1895  * if its the only CPU with multiple RT tasks queued, and a large number
1896  * of CPUs scheduling a lower priority task at the same time.
1897  *
1898  * Each root domain has its own irq work function that can iterate over
1899  * all CPUs with RT overloaded tasks. Since all CPUs with overloaded RT
1900  * tassk must be checked if there's one or many CPUs that are lowering
1901  * their priority, there's a single irq work iterator that will try to
1902  * push off RT tasks that are waiting to run.
1903  *
1904  * When a CPU schedules a lower priority task, it will kick off the
1905  * irq work iterator that will jump to each CPU with overloaded RT tasks.
1906  * As it only takes the first CPU that schedules a lower priority task
1907  * to start the process, the rto_start variable is incremented and if
1908  * the atomic result is one, then that CPU will try to take the rto_lock.
1909  * This prevents high contention on the lock as the process handles all
1910  * CPUs scheduling lower priority tasks.
1911  *
1912  * All CPUs that are scheduling a lower priority task will increment the
1913  * rt_loop_next variable. This will make sure that the irq work iterator
1914  * checks all RT overloaded CPUs whenever a CPU schedules a new lower
1915  * priority task, even if the iterator is in the middle of a scan. Incrementing
1916  * the rt_loop_next will cause the iterator to perform another scan.
1917  *
1918  */
1919 static int rto_next_cpu(struct root_domain *rd)
1920 {
1921 	int next;
1922 	int cpu;
1923 
1924 	/*
1925 	 * When starting the IPI RT pushing, the rto_cpu is set to -1,
1926 	 * rt_next_cpu() will simply return the first CPU found in
1927 	 * the rto_mask.
1928 	 *
1929 	 * If rto_next_cpu() is called with rto_cpu is a valid CPU, it
1930 	 * will return the next CPU found in the rto_mask.
1931 	 *
1932 	 * If there are no more CPUs left in the rto_mask, then a check is made
1933 	 * against rto_loop and rto_loop_next. rto_loop is only updated with
1934 	 * the rto_lock held, but any CPU may increment the rto_loop_next
1935 	 * without any locking.
1936 	 */
1937 	for (;;) {
1938 
1939 		/* When rto_cpu is -1 this acts like cpumask_first() */
1940 		cpu = cpumask_next(rd->rto_cpu, rd->rto_mask);
1941 
1942 		rd->rto_cpu = cpu;
1943 
1944 		if (cpu < nr_cpu_ids)
1945 			return cpu;
1946 
1947 		rd->rto_cpu = -1;
1948 
1949 		/*
1950 		 * ACQUIRE ensures we see the @rto_mask changes
1951 		 * made prior to the @next value observed.
1952 		 *
1953 		 * Matches WMB in rt_set_overload().
1954 		 */
1955 		next = atomic_read_acquire(&rd->rto_loop_next);
1956 
1957 		if (rd->rto_loop == next)
1958 			break;
1959 
1960 		rd->rto_loop = next;
1961 	}
1962 
1963 	return -1;
1964 }
1965 
1966 static inline bool rto_start_trylock(atomic_t *v)
1967 {
1968 	return !atomic_cmpxchg_acquire(v, 0, 1);
1969 }
1970 
1971 static inline void rto_start_unlock(atomic_t *v)
1972 {
1973 	atomic_set_release(v, 0);
1974 }
1975 
1976 static void tell_cpu_to_push(struct rq *rq)
1977 {
1978 	int cpu = -1;
1979 
1980 	/* Keep the loop going if the IPI is currently active */
1981 	atomic_inc(&rq->rd->rto_loop_next);
1982 
1983 	/* Only one CPU can initiate a loop at a time */
1984 	if (!rto_start_trylock(&rq->rd->rto_loop_start))
1985 		return;
1986 
1987 	raw_spin_lock(&rq->rd->rto_lock);
1988 
1989 	/*
1990 	 * The rto_cpu is updated under the lock, if it has a valid CPU
1991 	 * then the IPI is still running and will continue due to the
1992 	 * update to loop_next, and nothing needs to be done here.
1993 	 * Otherwise it is finishing up and an ipi needs to be sent.
1994 	 */
1995 	if (rq->rd->rto_cpu < 0)
1996 		cpu = rto_next_cpu(rq->rd);
1997 
1998 	raw_spin_unlock(&rq->rd->rto_lock);
1999 
2000 	rto_start_unlock(&rq->rd->rto_loop_start);
2001 
2002 	if (cpu >= 0) {
2003 		/* Make sure the rd does not get freed while pushing */
2004 		sched_get_rd(rq->rd);
2005 		irq_work_queue_on(&rq->rd->rto_push_work, cpu);
2006 	}
2007 }
2008 
2009 /* Called from hardirq context */
2010 void rto_push_irq_work_func(struct irq_work *work)
2011 {
2012 	struct root_domain *rd =
2013 		container_of(work, struct root_domain, rto_push_work);
2014 	struct rq *rq;
2015 	int cpu;
2016 
2017 	rq = this_rq();
2018 
2019 	/*
2020 	 * We do not need to grab the lock to check for has_pushable_tasks.
2021 	 * When it gets updated, a check is made if a push is possible.
2022 	 */
2023 	if (has_pushable_tasks(rq)) {
2024 		raw_spin_lock(&rq->lock);
2025 		push_rt_tasks(rq);
2026 		raw_spin_unlock(&rq->lock);
2027 	}
2028 
2029 	raw_spin_lock(&rd->rto_lock);
2030 
2031 	/* Pass the IPI to the next rt overloaded queue */
2032 	cpu = rto_next_cpu(rd);
2033 
2034 	raw_spin_unlock(&rd->rto_lock);
2035 
2036 	if (cpu < 0) {
2037 		sched_put_rd(rd);
2038 		return;
2039 	}
2040 
2041 	/* Try the next RT overloaded CPU */
2042 	irq_work_queue_on(&rd->rto_push_work, cpu);
2043 }
2044 #endif /* HAVE_RT_PUSH_IPI */
2045 
2046 static void pull_rt_task(struct rq *this_rq)
2047 {
2048 	int this_cpu = this_rq->cpu, cpu;
2049 	bool resched = false;
2050 	struct task_struct *p;
2051 	struct rq *src_rq;
2052 	int rt_overload_count = rt_overloaded(this_rq);
2053 
2054 	if (likely(!rt_overload_count))
2055 		return;
2056 
2057 	/*
2058 	 * Match the barrier from rt_set_overloaded; this guarantees that if we
2059 	 * see overloaded we must also see the rto_mask bit.
2060 	 */
2061 	smp_rmb();
2062 
2063 	/* If we are the only overloaded CPU do nothing */
2064 	if (rt_overload_count == 1 &&
2065 	    cpumask_test_cpu(this_rq->cpu, this_rq->rd->rto_mask))
2066 		return;
2067 
2068 #ifdef HAVE_RT_PUSH_IPI
2069 	if (sched_feat(RT_PUSH_IPI)) {
2070 		tell_cpu_to_push(this_rq);
2071 		return;
2072 	}
2073 #endif
2074 
2075 	for_each_cpu(cpu, this_rq->rd->rto_mask) {
2076 		if (this_cpu == cpu)
2077 			continue;
2078 
2079 		src_rq = cpu_rq(cpu);
2080 
2081 		/*
2082 		 * Don't bother taking the src_rq->lock if the next highest
2083 		 * task is known to be lower-priority than our current task.
2084 		 * This may look racy, but if this value is about to go
2085 		 * logically higher, the src_rq will push this task away.
2086 		 * And if its going logically lower, we do not care
2087 		 */
2088 		if (src_rq->rt.highest_prio.next >=
2089 		    this_rq->rt.highest_prio.curr)
2090 			continue;
2091 
2092 		/*
2093 		 * We can potentially drop this_rq's lock in
2094 		 * double_lock_balance, and another CPU could
2095 		 * alter this_rq
2096 		 */
2097 		double_lock_balance(this_rq, src_rq);
2098 
2099 		/*
2100 		 * We can pull only a task, which is pushable
2101 		 * on its rq, and no others.
2102 		 */
2103 		p = pick_highest_pushable_task(src_rq, this_cpu);
2104 
2105 		/*
2106 		 * Do we have an RT task that preempts
2107 		 * the to-be-scheduled task?
2108 		 */
2109 		if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
2110 			WARN_ON(p == src_rq->curr);
2111 			WARN_ON(!task_on_rq_queued(p));
2112 
2113 			/*
2114 			 * There's a chance that p is higher in priority
2115 			 * than what's currently running on its CPU.
2116 			 * This is just that p is wakeing up and hasn't
2117 			 * had a chance to schedule. We only pull
2118 			 * p if it is lower in priority than the
2119 			 * current task on the run queue
2120 			 */
2121 			if (p->prio < src_rq->curr->prio)
2122 				goto skip;
2123 
2124 			resched = true;
2125 
2126 			deactivate_task(src_rq, p, 0);
2127 			set_task_cpu(p, this_cpu);
2128 			activate_task(this_rq, p, 0);
2129 			/*
2130 			 * We continue with the search, just in
2131 			 * case there's an even higher prio task
2132 			 * in another runqueue. (low likelihood
2133 			 * but possible)
2134 			 */
2135 		}
2136 skip:
2137 		double_unlock_balance(this_rq, src_rq);
2138 	}
2139 
2140 	if (resched)
2141 		resched_curr(this_rq);
2142 }
2143 
2144 /*
2145  * If we are not running and we are not going to reschedule soon, we should
2146  * try to push tasks away now
2147  */
2148 static void task_woken_rt(struct rq *rq, struct task_struct *p)
2149 {
2150 	if (!task_running(rq, p) &&
2151 	    !test_tsk_need_resched(rq->curr) &&
2152 	    p->nr_cpus_allowed > 1 &&
2153 	    (dl_task(rq->curr) || rt_task(rq->curr)) &&
2154 	    (rq->curr->nr_cpus_allowed < 2 ||
2155 	     rq->curr->prio <= p->prio))
2156 		push_rt_tasks(rq);
2157 }
2158 
2159 /* Assumes rq->lock is held */
2160 static void rq_online_rt(struct rq *rq)
2161 {
2162 	if (rq->rt.overloaded)
2163 		rt_set_overload(rq);
2164 
2165 	__enable_runtime(rq);
2166 
2167 	cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
2168 }
2169 
2170 /* Assumes rq->lock is held */
2171 static void rq_offline_rt(struct rq *rq)
2172 {
2173 	if (rq->rt.overloaded)
2174 		rt_clear_overload(rq);
2175 
2176 	__disable_runtime(rq);
2177 
2178 	cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
2179 }
2180 
2181 /*
2182  * When switch from the rt queue, we bring ourselves to a position
2183  * that we might want to pull RT tasks from other runqueues.
2184  */
2185 static void switched_from_rt(struct rq *rq, struct task_struct *p)
2186 {
2187 	/*
2188 	 * If there are other RT tasks then we will reschedule
2189 	 * and the scheduling of the other RT tasks will handle
2190 	 * the balancing. But if we are the last RT task
2191 	 * we may need to handle the pulling of RT tasks
2192 	 * now.
2193 	 */
2194 	if (!task_on_rq_queued(p) || rq->rt.rt_nr_running)
2195 		return;
2196 
2197 	rt_queue_pull_task(rq);
2198 }
2199 
2200 void __init init_sched_rt_class(void)
2201 {
2202 	unsigned int i;
2203 
2204 	for_each_possible_cpu(i) {
2205 		zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
2206 					GFP_KERNEL, cpu_to_node(i));
2207 	}
2208 }
2209 #endif /* CONFIG_SMP */
2210 
2211 /*
2212  * When switching a task to RT, we may overload the runqueue
2213  * with RT tasks. In this case we try to push them off to
2214  * other runqueues.
2215  */
2216 static void switched_to_rt(struct rq *rq, struct task_struct *p)
2217 {
2218 	/*
2219 	 * If we are already running, then there's nothing
2220 	 * that needs to be done. But if we are not running
2221 	 * we may need to preempt the current running task.
2222 	 * If that current running task is also an RT task
2223 	 * then see if we can move to another run queue.
2224 	 */
2225 	if (task_on_rq_queued(p) && rq->curr != p) {
2226 #ifdef CONFIG_SMP
2227 		if (p->nr_cpus_allowed > 1 && rq->rt.overloaded)
2228 			rt_queue_push_tasks(rq);
2229 #endif /* CONFIG_SMP */
2230 		if (p->prio < rq->curr->prio && cpu_online(cpu_of(rq)))
2231 			resched_curr(rq);
2232 	}
2233 }
2234 
2235 /*
2236  * Priority of the task has changed. This may cause
2237  * us to initiate a push or pull.
2238  */
2239 static void
2240 prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
2241 {
2242 	if (!task_on_rq_queued(p))
2243 		return;
2244 
2245 	if (rq->curr == p) {
2246 #ifdef CONFIG_SMP
2247 		/*
2248 		 * If our priority decreases while running, we
2249 		 * may need to pull tasks to this runqueue.
2250 		 */
2251 		if (oldprio < p->prio)
2252 			rt_queue_pull_task(rq);
2253 
2254 		/*
2255 		 * If there's a higher priority task waiting to run
2256 		 * then reschedule.
2257 		 */
2258 		if (p->prio > rq->rt.highest_prio.curr)
2259 			resched_curr(rq);
2260 #else
2261 		/* For UP simply resched on drop of prio */
2262 		if (oldprio < p->prio)
2263 			resched_curr(rq);
2264 #endif /* CONFIG_SMP */
2265 	} else {
2266 		/*
2267 		 * This task is not running, but if it is
2268 		 * greater than the current running task
2269 		 * then reschedule.
2270 		 */
2271 		if (p->prio < rq->curr->prio)
2272 			resched_curr(rq);
2273 	}
2274 }
2275 
2276 #ifdef CONFIG_POSIX_TIMERS
2277 static void watchdog(struct rq *rq, struct task_struct *p)
2278 {
2279 	unsigned long soft, hard;
2280 
2281 	/* max may change after cur was read, this will be fixed next tick */
2282 	soft = task_rlimit(p, RLIMIT_RTTIME);
2283 	hard = task_rlimit_max(p, RLIMIT_RTTIME);
2284 
2285 	if (soft != RLIM_INFINITY) {
2286 		unsigned long next;
2287 
2288 		if (p->rt.watchdog_stamp != jiffies) {
2289 			p->rt.timeout++;
2290 			p->rt.watchdog_stamp = jiffies;
2291 		}
2292 
2293 		next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
2294 		if (p->rt.timeout > next)
2295 			p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
2296 	}
2297 }
2298 #else
2299 static inline void watchdog(struct rq *rq, struct task_struct *p) { }
2300 #endif
2301 
2302 /*
2303  * scheduler tick hitting a task of our scheduling class.
2304  *
2305  * NOTE: This function can be called remotely by the tick offload that
2306  * goes along full dynticks. Therefore no local assumption can be made
2307  * and everything must be accessed through the @rq and @curr passed in
2308  * parameters.
2309  */
2310 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
2311 {
2312 	struct sched_rt_entity *rt_se = &p->rt;
2313 
2314 	update_curr_rt(rq);
2315 
2316 	watchdog(rq, p);
2317 
2318 	/*
2319 	 * RR tasks need a special form of timeslice management.
2320 	 * FIFO tasks have no timeslices.
2321 	 */
2322 	if (p->policy != SCHED_RR)
2323 		return;
2324 
2325 	if (--p->rt.time_slice)
2326 		return;
2327 
2328 	p->rt.time_slice = sched_rr_timeslice;
2329 
2330 	/*
2331 	 * Requeue to the end of queue if we (and all of our ancestors) are not
2332 	 * the only element on the queue
2333 	 */
2334 	for_each_sched_rt_entity(rt_se) {
2335 		if (rt_se->run_list.prev != rt_se->run_list.next) {
2336 			requeue_task_rt(rq, p, 0);
2337 			resched_curr(rq);
2338 			return;
2339 		}
2340 	}
2341 }
2342 
2343 static void set_curr_task_rt(struct rq *rq)
2344 {
2345 	struct task_struct *p = rq->curr;
2346 
2347 	p->se.exec_start = rq_clock_task(rq);
2348 
2349 	/* The running task is never eligible for pushing */
2350 	dequeue_pushable_task(rq, p);
2351 }
2352 
2353 static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
2354 {
2355 	/*
2356 	 * Time slice is 0 for SCHED_FIFO tasks
2357 	 */
2358 	if (task->policy == SCHED_RR)
2359 		return sched_rr_timeslice;
2360 	else
2361 		return 0;
2362 }
2363 
2364 const struct sched_class rt_sched_class = {
2365 	.next			= &fair_sched_class,
2366 	.enqueue_task		= enqueue_task_rt,
2367 	.dequeue_task		= dequeue_task_rt,
2368 	.yield_task		= yield_task_rt,
2369 
2370 	.check_preempt_curr	= check_preempt_curr_rt,
2371 
2372 	.pick_next_task		= pick_next_task_rt,
2373 	.put_prev_task		= put_prev_task_rt,
2374 
2375 #ifdef CONFIG_SMP
2376 	.select_task_rq		= select_task_rq_rt,
2377 
2378 	.set_cpus_allowed       = set_cpus_allowed_common,
2379 	.rq_online              = rq_online_rt,
2380 	.rq_offline             = rq_offline_rt,
2381 	.task_woken		= task_woken_rt,
2382 	.switched_from		= switched_from_rt,
2383 #endif
2384 
2385 	.set_curr_task          = set_curr_task_rt,
2386 	.task_tick		= task_tick_rt,
2387 
2388 	.get_rr_interval	= get_rr_interval_rt,
2389 
2390 	.prio_changed		= prio_changed_rt,
2391 	.switched_to		= switched_to_rt,
2392 
2393 	.update_curr		= update_curr_rt,
2394 };
2395 
2396 #ifdef CONFIG_RT_GROUP_SCHED
2397 /*
2398  * Ensure that the real time constraints are schedulable.
2399  */
2400 static DEFINE_MUTEX(rt_constraints_mutex);
2401 
2402 /* Must be called with tasklist_lock held */
2403 static inline int tg_has_rt_tasks(struct task_group *tg)
2404 {
2405 	struct task_struct *g, *p;
2406 
2407 	/*
2408 	 * Autogroups do not have RT tasks; see autogroup_create().
2409 	 */
2410 	if (task_group_is_autogroup(tg))
2411 		return 0;
2412 
2413 	for_each_process_thread(g, p) {
2414 		if (rt_task(p) && task_group(p) == tg)
2415 			return 1;
2416 	}
2417 
2418 	return 0;
2419 }
2420 
2421 struct rt_schedulable_data {
2422 	struct task_group *tg;
2423 	u64 rt_period;
2424 	u64 rt_runtime;
2425 };
2426 
2427 static int tg_rt_schedulable(struct task_group *tg, void *data)
2428 {
2429 	struct rt_schedulable_data *d = data;
2430 	struct task_group *child;
2431 	unsigned long total, sum = 0;
2432 	u64 period, runtime;
2433 
2434 	period = ktime_to_ns(tg->rt_bandwidth.rt_period);
2435 	runtime = tg->rt_bandwidth.rt_runtime;
2436 
2437 	if (tg == d->tg) {
2438 		period = d->rt_period;
2439 		runtime = d->rt_runtime;
2440 	}
2441 
2442 	/*
2443 	 * Cannot have more runtime than the period.
2444 	 */
2445 	if (runtime > period && runtime != RUNTIME_INF)
2446 		return -EINVAL;
2447 
2448 	/*
2449 	 * Ensure we don't starve existing RT tasks.
2450 	 */
2451 	if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg))
2452 		return -EBUSY;
2453 
2454 	total = to_ratio(period, runtime);
2455 
2456 	/*
2457 	 * Nobody can have more than the global setting allows.
2458 	 */
2459 	if (total > to_ratio(global_rt_period(), global_rt_runtime()))
2460 		return -EINVAL;
2461 
2462 	/*
2463 	 * The sum of our children's runtime should not exceed our own.
2464 	 */
2465 	list_for_each_entry_rcu(child, &tg->children, siblings) {
2466 		period = ktime_to_ns(child->rt_bandwidth.rt_period);
2467 		runtime = child->rt_bandwidth.rt_runtime;
2468 
2469 		if (child == d->tg) {
2470 			period = d->rt_period;
2471 			runtime = d->rt_runtime;
2472 		}
2473 
2474 		sum += to_ratio(period, runtime);
2475 	}
2476 
2477 	if (sum > total)
2478 		return -EINVAL;
2479 
2480 	return 0;
2481 }
2482 
2483 static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
2484 {
2485 	int ret;
2486 
2487 	struct rt_schedulable_data data = {
2488 		.tg = tg,
2489 		.rt_period = period,
2490 		.rt_runtime = runtime,
2491 	};
2492 
2493 	rcu_read_lock();
2494 	ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data);
2495 	rcu_read_unlock();
2496 
2497 	return ret;
2498 }
2499 
2500 static int tg_set_rt_bandwidth(struct task_group *tg,
2501 		u64 rt_period, u64 rt_runtime)
2502 {
2503 	int i, err = 0;
2504 
2505 	/*
2506 	 * Disallowing the root group RT runtime is BAD, it would disallow the
2507 	 * kernel creating (and or operating) RT threads.
2508 	 */
2509 	if (tg == &root_task_group && rt_runtime == 0)
2510 		return -EINVAL;
2511 
2512 	/* No period doesn't make any sense. */
2513 	if (rt_period == 0)
2514 		return -EINVAL;
2515 
2516 	mutex_lock(&rt_constraints_mutex);
2517 	read_lock(&tasklist_lock);
2518 	err = __rt_schedulable(tg, rt_period, rt_runtime);
2519 	if (err)
2520 		goto unlock;
2521 
2522 	raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
2523 	tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
2524 	tg->rt_bandwidth.rt_runtime = rt_runtime;
2525 
2526 	for_each_possible_cpu(i) {
2527 		struct rt_rq *rt_rq = tg->rt_rq[i];
2528 
2529 		raw_spin_lock(&rt_rq->rt_runtime_lock);
2530 		rt_rq->rt_runtime = rt_runtime;
2531 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
2532 	}
2533 	raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
2534 unlock:
2535 	read_unlock(&tasklist_lock);
2536 	mutex_unlock(&rt_constraints_mutex);
2537 
2538 	return err;
2539 }
2540 
2541 int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
2542 {
2543 	u64 rt_runtime, rt_period;
2544 
2545 	rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
2546 	rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
2547 	if (rt_runtime_us < 0)
2548 		rt_runtime = RUNTIME_INF;
2549 
2550 	return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
2551 }
2552 
2553 long sched_group_rt_runtime(struct task_group *tg)
2554 {
2555 	u64 rt_runtime_us;
2556 
2557 	if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
2558 		return -1;
2559 
2560 	rt_runtime_us = tg->rt_bandwidth.rt_runtime;
2561 	do_div(rt_runtime_us, NSEC_PER_USEC);
2562 	return rt_runtime_us;
2563 }
2564 
2565 int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us)
2566 {
2567 	u64 rt_runtime, rt_period;
2568 
2569 	rt_period = rt_period_us * NSEC_PER_USEC;
2570 	rt_runtime = tg->rt_bandwidth.rt_runtime;
2571 
2572 	return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
2573 }
2574 
2575 long sched_group_rt_period(struct task_group *tg)
2576 {
2577 	u64 rt_period_us;
2578 
2579 	rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
2580 	do_div(rt_period_us, NSEC_PER_USEC);
2581 	return rt_period_us;
2582 }
2583 
2584 static int sched_rt_global_constraints(void)
2585 {
2586 	int ret = 0;
2587 
2588 	mutex_lock(&rt_constraints_mutex);
2589 	read_lock(&tasklist_lock);
2590 	ret = __rt_schedulable(NULL, 0, 0);
2591 	read_unlock(&tasklist_lock);
2592 	mutex_unlock(&rt_constraints_mutex);
2593 
2594 	return ret;
2595 }
2596 
2597 int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
2598 {
2599 	/* Don't accept realtime tasks when there is no way for them to run */
2600 	if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
2601 		return 0;
2602 
2603 	return 1;
2604 }
2605 
2606 #else /* !CONFIG_RT_GROUP_SCHED */
2607 static int sched_rt_global_constraints(void)
2608 {
2609 	unsigned long flags;
2610 	int i;
2611 
2612 	raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
2613 	for_each_possible_cpu(i) {
2614 		struct rt_rq *rt_rq = &cpu_rq(i)->rt;
2615 
2616 		raw_spin_lock(&rt_rq->rt_runtime_lock);
2617 		rt_rq->rt_runtime = global_rt_runtime();
2618 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
2619 	}
2620 	raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
2621 
2622 	return 0;
2623 }
2624 #endif /* CONFIG_RT_GROUP_SCHED */
2625 
2626 static int sched_rt_global_validate(void)
2627 {
2628 	if (sysctl_sched_rt_period <= 0)
2629 		return -EINVAL;
2630 
2631 	if ((sysctl_sched_rt_runtime != RUNTIME_INF) &&
2632 		(sysctl_sched_rt_runtime > sysctl_sched_rt_period))
2633 		return -EINVAL;
2634 
2635 	return 0;
2636 }
2637 
2638 static void sched_rt_do_global(void)
2639 {
2640 	def_rt_bandwidth.rt_runtime = global_rt_runtime();
2641 	def_rt_bandwidth.rt_period = ns_to_ktime(global_rt_period());
2642 }
2643 
2644 int sched_rt_handler(struct ctl_table *table, int write,
2645 		void __user *buffer, size_t *lenp,
2646 		loff_t *ppos)
2647 {
2648 	int old_period, old_runtime;
2649 	static DEFINE_MUTEX(mutex);
2650 	int ret;
2651 
2652 	mutex_lock(&mutex);
2653 	old_period = sysctl_sched_rt_period;
2654 	old_runtime = sysctl_sched_rt_runtime;
2655 
2656 	ret = proc_dointvec(table, write, buffer, lenp, ppos);
2657 
2658 	if (!ret && write) {
2659 		ret = sched_rt_global_validate();
2660 		if (ret)
2661 			goto undo;
2662 
2663 		ret = sched_dl_global_validate();
2664 		if (ret)
2665 			goto undo;
2666 
2667 		ret = sched_rt_global_constraints();
2668 		if (ret)
2669 			goto undo;
2670 
2671 		sched_rt_do_global();
2672 		sched_dl_do_global();
2673 	}
2674 	if (0) {
2675 undo:
2676 		sysctl_sched_rt_period = old_period;
2677 		sysctl_sched_rt_runtime = old_runtime;
2678 	}
2679 	mutex_unlock(&mutex);
2680 
2681 	return ret;
2682 }
2683 
2684 int sched_rr_handler(struct ctl_table *table, int write,
2685 		void __user *buffer, size_t *lenp,
2686 		loff_t *ppos)
2687 {
2688 	int ret;
2689 	static DEFINE_MUTEX(mutex);
2690 
2691 	mutex_lock(&mutex);
2692 	ret = proc_dointvec(table, write, buffer, lenp, ppos);
2693 	/*
2694 	 * Make sure that internally we keep jiffies.
2695 	 * Also, writing zero resets the timeslice to default:
2696 	 */
2697 	if (!ret && write) {
2698 		sched_rr_timeslice =
2699 			sysctl_sched_rr_timeslice <= 0 ? RR_TIMESLICE :
2700 			msecs_to_jiffies(sysctl_sched_rr_timeslice);
2701 	}
2702 	mutex_unlock(&mutex);
2703 
2704 	return ret;
2705 }
2706 
2707 #ifdef CONFIG_SCHED_DEBUG
2708 void print_rt_stats(struct seq_file *m, int cpu)
2709 {
2710 	rt_rq_iter_t iter;
2711 	struct rt_rq *rt_rq;
2712 
2713 	rcu_read_lock();
2714 	for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
2715 		print_rt_rq(m, cpu, rt_rq);
2716 	rcu_read_unlock();
2717 }
2718 #endif /* CONFIG_SCHED_DEBUG */
2719