xref: /openbmc/linux/kernel/sched/rt.c (revision 95e9fd10)
1 /*
2  * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3  * policies)
4  */
5 
6 #include "sched.h"
7 
8 #include <linux/slab.h>
9 
10 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
11 
12 struct rt_bandwidth def_rt_bandwidth;
13 
14 static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
15 {
16 	struct rt_bandwidth *rt_b =
17 		container_of(timer, struct rt_bandwidth, rt_period_timer);
18 	ktime_t now;
19 	int overrun;
20 	int idle = 0;
21 
22 	for (;;) {
23 		now = hrtimer_cb_get_time(timer);
24 		overrun = hrtimer_forward(timer, now, rt_b->rt_period);
25 
26 		if (!overrun)
27 			break;
28 
29 		idle = do_sched_rt_period_timer(rt_b, overrun);
30 	}
31 
32 	return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
33 }
34 
35 void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
36 {
37 	rt_b->rt_period = ns_to_ktime(period);
38 	rt_b->rt_runtime = runtime;
39 
40 	raw_spin_lock_init(&rt_b->rt_runtime_lock);
41 
42 	hrtimer_init(&rt_b->rt_period_timer,
43 			CLOCK_MONOTONIC, HRTIMER_MODE_REL);
44 	rt_b->rt_period_timer.function = sched_rt_period_timer;
45 }
46 
47 static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
48 {
49 	if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
50 		return;
51 
52 	if (hrtimer_active(&rt_b->rt_period_timer))
53 		return;
54 
55 	raw_spin_lock(&rt_b->rt_runtime_lock);
56 	start_bandwidth_timer(&rt_b->rt_period_timer, rt_b->rt_period);
57 	raw_spin_unlock(&rt_b->rt_runtime_lock);
58 }
59 
60 void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
61 {
62 	struct rt_prio_array *array;
63 	int i;
64 
65 	array = &rt_rq->active;
66 	for (i = 0; i < MAX_RT_PRIO; i++) {
67 		INIT_LIST_HEAD(array->queue + i);
68 		__clear_bit(i, array->bitmap);
69 	}
70 	/* delimiter for bitsearch: */
71 	__set_bit(MAX_RT_PRIO, array->bitmap);
72 
73 #if defined CONFIG_SMP
74 	rt_rq->highest_prio.curr = MAX_RT_PRIO;
75 	rt_rq->highest_prio.next = MAX_RT_PRIO;
76 	rt_rq->rt_nr_migratory = 0;
77 	rt_rq->overloaded = 0;
78 	plist_head_init(&rt_rq->pushable_tasks);
79 #endif
80 
81 	rt_rq->rt_time = 0;
82 	rt_rq->rt_throttled = 0;
83 	rt_rq->rt_runtime = 0;
84 	raw_spin_lock_init(&rt_rq->rt_runtime_lock);
85 }
86 
87 #ifdef CONFIG_RT_GROUP_SCHED
88 static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
89 {
90 	hrtimer_cancel(&rt_b->rt_period_timer);
91 }
92 
93 #define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
94 
95 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
96 {
97 #ifdef CONFIG_SCHED_DEBUG
98 	WARN_ON_ONCE(!rt_entity_is_task(rt_se));
99 #endif
100 	return container_of(rt_se, struct task_struct, rt);
101 }
102 
103 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
104 {
105 	return rt_rq->rq;
106 }
107 
108 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
109 {
110 	return rt_se->rt_rq;
111 }
112 
113 void free_rt_sched_group(struct task_group *tg)
114 {
115 	int i;
116 
117 	if (tg->rt_se)
118 		destroy_rt_bandwidth(&tg->rt_bandwidth);
119 
120 	for_each_possible_cpu(i) {
121 		if (tg->rt_rq)
122 			kfree(tg->rt_rq[i]);
123 		if (tg->rt_se)
124 			kfree(tg->rt_se[i]);
125 	}
126 
127 	kfree(tg->rt_rq);
128 	kfree(tg->rt_se);
129 }
130 
131 void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
132 		struct sched_rt_entity *rt_se, int cpu,
133 		struct sched_rt_entity *parent)
134 {
135 	struct rq *rq = cpu_rq(cpu);
136 
137 	rt_rq->highest_prio.curr = MAX_RT_PRIO;
138 	rt_rq->rt_nr_boosted = 0;
139 	rt_rq->rq = rq;
140 	rt_rq->tg = tg;
141 
142 	tg->rt_rq[cpu] = rt_rq;
143 	tg->rt_se[cpu] = rt_se;
144 
145 	if (!rt_se)
146 		return;
147 
148 	if (!parent)
149 		rt_se->rt_rq = &rq->rt;
150 	else
151 		rt_se->rt_rq = parent->my_q;
152 
153 	rt_se->my_q = rt_rq;
154 	rt_se->parent = parent;
155 	INIT_LIST_HEAD(&rt_se->run_list);
156 }
157 
158 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
159 {
160 	struct rt_rq *rt_rq;
161 	struct sched_rt_entity *rt_se;
162 	int i;
163 
164 	tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
165 	if (!tg->rt_rq)
166 		goto err;
167 	tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
168 	if (!tg->rt_se)
169 		goto err;
170 
171 	init_rt_bandwidth(&tg->rt_bandwidth,
172 			ktime_to_ns(def_rt_bandwidth.rt_period), 0);
173 
174 	for_each_possible_cpu(i) {
175 		rt_rq = kzalloc_node(sizeof(struct rt_rq),
176 				     GFP_KERNEL, cpu_to_node(i));
177 		if (!rt_rq)
178 			goto err;
179 
180 		rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
181 				     GFP_KERNEL, cpu_to_node(i));
182 		if (!rt_se)
183 			goto err_free_rq;
184 
185 		init_rt_rq(rt_rq, cpu_rq(i));
186 		rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
187 		init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
188 	}
189 
190 	return 1;
191 
192 err_free_rq:
193 	kfree(rt_rq);
194 err:
195 	return 0;
196 }
197 
198 #else /* CONFIG_RT_GROUP_SCHED */
199 
200 #define rt_entity_is_task(rt_se) (1)
201 
202 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
203 {
204 	return container_of(rt_se, struct task_struct, rt);
205 }
206 
207 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
208 {
209 	return container_of(rt_rq, struct rq, rt);
210 }
211 
212 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
213 {
214 	struct task_struct *p = rt_task_of(rt_se);
215 	struct rq *rq = task_rq(p);
216 
217 	return &rq->rt;
218 }
219 
220 void free_rt_sched_group(struct task_group *tg) { }
221 
222 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
223 {
224 	return 1;
225 }
226 #endif /* CONFIG_RT_GROUP_SCHED */
227 
228 #ifdef CONFIG_SMP
229 
230 static inline int rt_overloaded(struct rq *rq)
231 {
232 	return atomic_read(&rq->rd->rto_count);
233 }
234 
235 static inline void rt_set_overload(struct rq *rq)
236 {
237 	if (!rq->online)
238 		return;
239 
240 	cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
241 	/*
242 	 * Make sure the mask is visible before we set
243 	 * the overload count. That is checked to determine
244 	 * if we should look at the mask. It would be a shame
245 	 * if we looked at the mask, but the mask was not
246 	 * updated yet.
247 	 */
248 	wmb();
249 	atomic_inc(&rq->rd->rto_count);
250 }
251 
252 static inline void rt_clear_overload(struct rq *rq)
253 {
254 	if (!rq->online)
255 		return;
256 
257 	/* the order here really doesn't matter */
258 	atomic_dec(&rq->rd->rto_count);
259 	cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
260 }
261 
262 static void update_rt_migration(struct rt_rq *rt_rq)
263 {
264 	if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
265 		if (!rt_rq->overloaded) {
266 			rt_set_overload(rq_of_rt_rq(rt_rq));
267 			rt_rq->overloaded = 1;
268 		}
269 	} else if (rt_rq->overloaded) {
270 		rt_clear_overload(rq_of_rt_rq(rt_rq));
271 		rt_rq->overloaded = 0;
272 	}
273 }
274 
275 static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
276 {
277 	struct task_struct *p;
278 
279 	if (!rt_entity_is_task(rt_se))
280 		return;
281 
282 	p = rt_task_of(rt_se);
283 	rt_rq = &rq_of_rt_rq(rt_rq)->rt;
284 
285 	rt_rq->rt_nr_total++;
286 	if (p->nr_cpus_allowed > 1)
287 		rt_rq->rt_nr_migratory++;
288 
289 	update_rt_migration(rt_rq);
290 }
291 
292 static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
293 {
294 	struct task_struct *p;
295 
296 	if (!rt_entity_is_task(rt_se))
297 		return;
298 
299 	p = rt_task_of(rt_se);
300 	rt_rq = &rq_of_rt_rq(rt_rq)->rt;
301 
302 	rt_rq->rt_nr_total--;
303 	if (p->nr_cpus_allowed > 1)
304 		rt_rq->rt_nr_migratory--;
305 
306 	update_rt_migration(rt_rq);
307 }
308 
309 static inline int has_pushable_tasks(struct rq *rq)
310 {
311 	return !plist_head_empty(&rq->rt.pushable_tasks);
312 }
313 
314 static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
315 {
316 	plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
317 	plist_node_init(&p->pushable_tasks, p->prio);
318 	plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
319 
320 	/* Update the highest prio pushable task */
321 	if (p->prio < rq->rt.highest_prio.next)
322 		rq->rt.highest_prio.next = p->prio;
323 }
324 
325 static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
326 {
327 	plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
328 
329 	/* Update the new highest prio pushable task */
330 	if (has_pushable_tasks(rq)) {
331 		p = plist_first_entry(&rq->rt.pushable_tasks,
332 				      struct task_struct, pushable_tasks);
333 		rq->rt.highest_prio.next = p->prio;
334 	} else
335 		rq->rt.highest_prio.next = MAX_RT_PRIO;
336 }
337 
338 #else
339 
340 static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
341 {
342 }
343 
344 static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
345 {
346 }
347 
348 static inline
349 void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
350 {
351 }
352 
353 static inline
354 void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
355 {
356 }
357 
358 #endif /* CONFIG_SMP */
359 
360 static inline int on_rt_rq(struct sched_rt_entity *rt_se)
361 {
362 	return !list_empty(&rt_se->run_list);
363 }
364 
365 #ifdef CONFIG_RT_GROUP_SCHED
366 
367 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
368 {
369 	if (!rt_rq->tg)
370 		return RUNTIME_INF;
371 
372 	return rt_rq->rt_runtime;
373 }
374 
375 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
376 {
377 	return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
378 }
379 
380 typedef struct task_group *rt_rq_iter_t;
381 
382 static inline struct task_group *next_task_group(struct task_group *tg)
383 {
384 	do {
385 		tg = list_entry_rcu(tg->list.next,
386 			typeof(struct task_group), list);
387 	} while (&tg->list != &task_groups && task_group_is_autogroup(tg));
388 
389 	if (&tg->list == &task_groups)
390 		tg = NULL;
391 
392 	return tg;
393 }
394 
395 #define for_each_rt_rq(rt_rq, iter, rq)					\
396 	for (iter = container_of(&task_groups, typeof(*iter), list);	\
397 		(iter = next_task_group(iter)) &&			\
398 		(rt_rq = iter->rt_rq[cpu_of(rq)]);)
399 
400 static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq)
401 {
402 	list_add_rcu(&rt_rq->leaf_rt_rq_list,
403 			&rq_of_rt_rq(rt_rq)->leaf_rt_rq_list);
404 }
405 
406 static inline void list_del_leaf_rt_rq(struct rt_rq *rt_rq)
407 {
408 	list_del_rcu(&rt_rq->leaf_rt_rq_list);
409 }
410 
411 #define for_each_leaf_rt_rq(rt_rq, rq) \
412 	list_for_each_entry_rcu(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
413 
414 #define for_each_sched_rt_entity(rt_se) \
415 	for (; rt_se; rt_se = rt_se->parent)
416 
417 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
418 {
419 	return rt_se->my_q;
420 }
421 
422 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head);
423 static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
424 
425 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
426 {
427 	struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
428 	struct sched_rt_entity *rt_se;
429 
430 	int cpu = cpu_of(rq_of_rt_rq(rt_rq));
431 
432 	rt_se = rt_rq->tg->rt_se[cpu];
433 
434 	if (rt_rq->rt_nr_running) {
435 		if (rt_se && !on_rt_rq(rt_se))
436 			enqueue_rt_entity(rt_se, false);
437 		if (rt_rq->highest_prio.curr < curr->prio)
438 			resched_task(curr);
439 	}
440 }
441 
442 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
443 {
444 	struct sched_rt_entity *rt_se;
445 	int cpu = cpu_of(rq_of_rt_rq(rt_rq));
446 
447 	rt_se = rt_rq->tg->rt_se[cpu];
448 
449 	if (rt_se && on_rt_rq(rt_se))
450 		dequeue_rt_entity(rt_se);
451 }
452 
453 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
454 {
455 	return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
456 }
457 
458 static int rt_se_boosted(struct sched_rt_entity *rt_se)
459 {
460 	struct rt_rq *rt_rq = group_rt_rq(rt_se);
461 	struct task_struct *p;
462 
463 	if (rt_rq)
464 		return !!rt_rq->rt_nr_boosted;
465 
466 	p = rt_task_of(rt_se);
467 	return p->prio != p->normal_prio;
468 }
469 
470 #ifdef CONFIG_SMP
471 static inline const struct cpumask *sched_rt_period_mask(void)
472 {
473 	return cpu_rq(smp_processor_id())->rd->span;
474 }
475 #else
476 static inline const struct cpumask *sched_rt_period_mask(void)
477 {
478 	return cpu_online_mask;
479 }
480 #endif
481 
482 static inline
483 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
484 {
485 	return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
486 }
487 
488 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
489 {
490 	return &rt_rq->tg->rt_bandwidth;
491 }
492 
493 #else /* !CONFIG_RT_GROUP_SCHED */
494 
495 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
496 {
497 	return rt_rq->rt_runtime;
498 }
499 
500 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
501 {
502 	return ktime_to_ns(def_rt_bandwidth.rt_period);
503 }
504 
505 typedef struct rt_rq *rt_rq_iter_t;
506 
507 #define for_each_rt_rq(rt_rq, iter, rq) \
508 	for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
509 
510 static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq)
511 {
512 }
513 
514 static inline void list_del_leaf_rt_rq(struct rt_rq *rt_rq)
515 {
516 }
517 
518 #define for_each_leaf_rt_rq(rt_rq, rq) \
519 	for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
520 
521 #define for_each_sched_rt_entity(rt_se) \
522 	for (; rt_se; rt_se = NULL)
523 
524 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
525 {
526 	return NULL;
527 }
528 
529 static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
530 {
531 	if (rt_rq->rt_nr_running)
532 		resched_task(rq_of_rt_rq(rt_rq)->curr);
533 }
534 
535 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
536 {
537 }
538 
539 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
540 {
541 	return rt_rq->rt_throttled;
542 }
543 
544 static inline const struct cpumask *sched_rt_period_mask(void)
545 {
546 	return cpu_online_mask;
547 }
548 
549 static inline
550 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
551 {
552 	return &cpu_rq(cpu)->rt;
553 }
554 
555 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
556 {
557 	return &def_rt_bandwidth;
558 }
559 
560 #endif /* CONFIG_RT_GROUP_SCHED */
561 
562 #ifdef CONFIG_SMP
563 /*
564  * We ran out of runtime, see if we can borrow some from our neighbours.
565  */
566 static int do_balance_runtime(struct rt_rq *rt_rq)
567 {
568 	struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
569 	struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
570 	int i, weight, more = 0;
571 	u64 rt_period;
572 
573 	weight = cpumask_weight(rd->span);
574 
575 	raw_spin_lock(&rt_b->rt_runtime_lock);
576 	rt_period = ktime_to_ns(rt_b->rt_period);
577 	for_each_cpu(i, rd->span) {
578 		struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
579 		s64 diff;
580 
581 		if (iter == rt_rq)
582 			continue;
583 
584 		raw_spin_lock(&iter->rt_runtime_lock);
585 		/*
586 		 * Either all rqs have inf runtime and there's nothing to steal
587 		 * or __disable_runtime() below sets a specific rq to inf to
588 		 * indicate its been disabled and disalow stealing.
589 		 */
590 		if (iter->rt_runtime == RUNTIME_INF)
591 			goto next;
592 
593 		/*
594 		 * From runqueues with spare time, take 1/n part of their
595 		 * spare time, but no more than our period.
596 		 */
597 		diff = iter->rt_runtime - iter->rt_time;
598 		if (diff > 0) {
599 			diff = div_u64((u64)diff, weight);
600 			if (rt_rq->rt_runtime + diff > rt_period)
601 				diff = rt_period - rt_rq->rt_runtime;
602 			iter->rt_runtime -= diff;
603 			rt_rq->rt_runtime += diff;
604 			more = 1;
605 			if (rt_rq->rt_runtime == rt_period) {
606 				raw_spin_unlock(&iter->rt_runtime_lock);
607 				break;
608 			}
609 		}
610 next:
611 		raw_spin_unlock(&iter->rt_runtime_lock);
612 	}
613 	raw_spin_unlock(&rt_b->rt_runtime_lock);
614 
615 	return more;
616 }
617 
618 /*
619  * Ensure this RQ takes back all the runtime it lend to its neighbours.
620  */
621 static void __disable_runtime(struct rq *rq)
622 {
623 	struct root_domain *rd = rq->rd;
624 	rt_rq_iter_t iter;
625 	struct rt_rq *rt_rq;
626 
627 	if (unlikely(!scheduler_running))
628 		return;
629 
630 	for_each_rt_rq(rt_rq, iter, rq) {
631 		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
632 		s64 want;
633 		int i;
634 
635 		raw_spin_lock(&rt_b->rt_runtime_lock);
636 		raw_spin_lock(&rt_rq->rt_runtime_lock);
637 		/*
638 		 * Either we're all inf and nobody needs to borrow, or we're
639 		 * already disabled and thus have nothing to do, or we have
640 		 * exactly the right amount of runtime to take out.
641 		 */
642 		if (rt_rq->rt_runtime == RUNTIME_INF ||
643 				rt_rq->rt_runtime == rt_b->rt_runtime)
644 			goto balanced;
645 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
646 
647 		/*
648 		 * Calculate the difference between what we started out with
649 		 * and what we current have, that's the amount of runtime
650 		 * we lend and now have to reclaim.
651 		 */
652 		want = rt_b->rt_runtime - rt_rq->rt_runtime;
653 
654 		/*
655 		 * Greedy reclaim, take back as much as we can.
656 		 */
657 		for_each_cpu(i, rd->span) {
658 			struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
659 			s64 diff;
660 
661 			/*
662 			 * Can't reclaim from ourselves or disabled runqueues.
663 			 */
664 			if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
665 				continue;
666 
667 			raw_spin_lock(&iter->rt_runtime_lock);
668 			if (want > 0) {
669 				diff = min_t(s64, iter->rt_runtime, want);
670 				iter->rt_runtime -= diff;
671 				want -= diff;
672 			} else {
673 				iter->rt_runtime -= want;
674 				want -= want;
675 			}
676 			raw_spin_unlock(&iter->rt_runtime_lock);
677 
678 			if (!want)
679 				break;
680 		}
681 
682 		raw_spin_lock(&rt_rq->rt_runtime_lock);
683 		/*
684 		 * We cannot be left wanting - that would mean some runtime
685 		 * leaked out of the system.
686 		 */
687 		BUG_ON(want);
688 balanced:
689 		/*
690 		 * Disable all the borrow logic by pretending we have inf
691 		 * runtime - in which case borrowing doesn't make sense.
692 		 */
693 		rt_rq->rt_runtime = RUNTIME_INF;
694 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
695 		raw_spin_unlock(&rt_b->rt_runtime_lock);
696 	}
697 }
698 
699 static void disable_runtime(struct rq *rq)
700 {
701 	unsigned long flags;
702 
703 	raw_spin_lock_irqsave(&rq->lock, flags);
704 	__disable_runtime(rq);
705 	raw_spin_unlock_irqrestore(&rq->lock, flags);
706 }
707 
708 static void __enable_runtime(struct rq *rq)
709 {
710 	rt_rq_iter_t iter;
711 	struct rt_rq *rt_rq;
712 
713 	if (unlikely(!scheduler_running))
714 		return;
715 
716 	/*
717 	 * Reset each runqueue's bandwidth settings
718 	 */
719 	for_each_rt_rq(rt_rq, iter, rq) {
720 		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
721 
722 		raw_spin_lock(&rt_b->rt_runtime_lock);
723 		raw_spin_lock(&rt_rq->rt_runtime_lock);
724 		rt_rq->rt_runtime = rt_b->rt_runtime;
725 		rt_rq->rt_time = 0;
726 		rt_rq->rt_throttled = 0;
727 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
728 		raw_spin_unlock(&rt_b->rt_runtime_lock);
729 	}
730 }
731 
732 static void enable_runtime(struct rq *rq)
733 {
734 	unsigned long flags;
735 
736 	raw_spin_lock_irqsave(&rq->lock, flags);
737 	__enable_runtime(rq);
738 	raw_spin_unlock_irqrestore(&rq->lock, flags);
739 }
740 
741 int update_runtime(struct notifier_block *nfb, unsigned long action, void *hcpu)
742 {
743 	int cpu = (int)(long)hcpu;
744 
745 	switch (action) {
746 	case CPU_DOWN_PREPARE:
747 	case CPU_DOWN_PREPARE_FROZEN:
748 		disable_runtime(cpu_rq(cpu));
749 		return NOTIFY_OK;
750 
751 	case CPU_DOWN_FAILED:
752 	case CPU_DOWN_FAILED_FROZEN:
753 	case CPU_ONLINE:
754 	case CPU_ONLINE_FROZEN:
755 		enable_runtime(cpu_rq(cpu));
756 		return NOTIFY_OK;
757 
758 	default:
759 		return NOTIFY_DONE;
760 	}
761 }
762 
763 static int balance_runtime(struct rt_rq *rt_rq)
764 {
765 	int more = 0;
766 
767 	if (!sched_feat(RT_RUNTIME_SHARE))
768 		return more;
769 
770 	if (rt_rq->rt_time > rt_rq->rt_runtime) {
771 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
772 		more = do_balance_runtime(rt_rq);
773 		raw_spin_lock(&rt_rq->rt_runtime_lock);
774 	}
775 
776 	return more;
777 }
778 #else /* !CONFIG_SMP */
779 static inline int balance_runtime(struct rt_rq *rt_rq)
780 {
781 	return 0;
782 }
783 #endif /* CONFIG_SMP */
784 
785 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
786 {
787 	int i, idle = 1, throttled = 0;
788 	const struct cpumask *span;
789 
790 	span = sched_rt_period_mask();
791 #ifdef CONFIG_RT_GROUP_SCHED
792 	/*
793 	 * FIXME: isolated CPUs should really leave the root task group,
794 	 * whether they are isolcpus or were isolated via cpusets, lest
795 	 * the timer run on a CPU which does not service all runqueues,
796 	 * potentially leaving other CPUs indefinitely throttled.  If
797 	 * isolation is really required, the user will turn the throttle
798 	 * off to kill the perturbations it causes anyway.  Meanwhile,
799 	 * this maintains functionality for boot and/or troubleshooting.
800 	 */
801 	if (rt_b == &root_task_group.rt_bandwidth)
802 		span = cpu_online_mask;
803 #endif
804 	for_each_cpu(i, span) {
805 		int enqueue = 0;
806 		struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
807 		struct rq *rq = rq_of_rt_rq(rt_rq);
808 
809 		raw_spin_lock(&rq->lock);
810 		if (rt_rq->rt_time) {
811 			u64 runtime;
812 
813 			raw_spin_lock(&rt_rq->rt_runtime_lock);
814 			if (rt_rq->rt_throttled)
815 				balance_runtime(rt_rq);
816 			runtime = rt_rq->rt_runtime;
817 			rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
818 			if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
819 				rt_rq->rt_throttled = 0;
820 				enqueue = 1;
821 
822 				/*
823 				 * Force a clock update if the CPU was idle,
824 				 * lest wakeup -> unthrottle time accumulate.
825 				 */
826 				if (rt_rq->rt_nr_running && rq->curr == rq->idle)
827 					rq->skip_clock_update = -1;
828 			}
829 			if (rt_rq->rt_time || rt_rq->rt_nr_running)
830 				idle = 0;
831 			raw_spin_unlock(&rt_rq->rt_runtime_lock);
832 		} else if (rt_rq->rt_nr_running) {
833 			idle = 0;
834 			if (!rt_rq_throttled(rt_rq))
835 				enqueue = 1;
836 		}
837 		if (rt_rq->rt_throttled)
838 			throttled = 1;
839 
840 		if (enqueue)
841 			sched_rt_rq_enqueue(rt_rq);
842 		raw_spin_unlock(&rq->lock);
843 	}
844 
845 	if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
846 		return 1;
847 
848 	return idle;
849 }
850 
851 static inline int rt_se_prio(struct sched_rt_entity *rt_se)
852 {
853 #ifdef CONFIG_RT_GROUP_SCHED
854 	struct rt_rq *rt_rq = group_rt_rq(rt_se);
855 
856 	if (rt_rq)
857 		return rt_rq->highest_prio.curr;
858 #endif
859 
860 	return rt_task_of(rt_se)->prio;
861 }
862 
863 static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
864 {
865 	u64 runtime = sched_rt_runtime(rt_rq);
866 
867 	if (rt_rq->rt_throttled)
868 		return rt_rq_throttled(rt_rq);
869 
870 	if (runtime >= sched_rt_period(rt_rq))
871 		return 0;
872 
873 	balance_runtime(rt_rq);
874 	runtime = sched_rt_runtime(rt_rq);
875 	if (runtime == RUNTIME_INF)
876 		return 0;
877 
878 	if (rt_rq->rt_time > runtime) {
879 		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
880 
881 		/*
882 		 * Don't actually throttle groups that have no runtime assigned
883 		 * but accrue some time due to boosting.
884 		 */
885 		if (likely(rt_b->rt_runtime)) {
886 			static bool once = false;
887 
888 			rt_rq->rt_throttled = 1;
889 
890 			if (!once) {
891 				once = true;
892 				printk_sched("sched: RT throttling activated\n");
893 			}
894 		} else {
895 			/*
896 			 * In case we did anyway, make it go away,
897 			 * replenishment is a joke, since it will replenish us
898 			 * with exactly 0 ns.
899 			 */
900 			rt_rq->rt_time = 0;
901 		}
902 
903 		if (rt_rq_throttled(rt_rq)) {
904 			sched_rt_rq_dequeue(rt_rq);
905 			return 1;
906 		}
907 	}
908 
909 	return 0;
910 }
911 
912 /*
913  * Update the current task's runtime statistics. Skip current tasks that
914  * are not in our scheduling class.
915  */
916 static void update_curr_rt(struct rq *rq)
917 {
918 	struct task_struct *curr = rq->curr;
919 	struct sched_rt_entity *rt_se = &curr->rt;
920 	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
921 	u64 delta_exec;
922 
923 	if (curr->sched_class != &rt_sched_class)
924 		return;
925 
926 	delta_exec = rq->clock_task - curr->se.exec_start;
927 	if (unlikely((s64)delta_exec < 0))
928 		delta_exec = 0;
929 
930 	schedstat_set(curr->se.statistics.exec_max,
931 		      max(curr->se.statistics.exec_max, delta_exec));
932 
933 	curr->se.sum_exec_runtime += delta_exec;
934 	account_group_exec_runtime(curr, delta_exec);
935 
936 	curr->se.exec_start = rq->clock_task;
937 	cpuacct_charge(curr, delta_exec);
938 
939 	sched_rt_avg_update(rq, delta_exec);
940 
941 	if (!rt_bandwidth_enabled())
942 		return;
943 
944 	for_each_sched_rt_entity(rt_se) {
945 		rt_rq = rt_rq_of_se(rt_se);
946 
947 		if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
948 			raw_spin_lock(&rt_rq->rt_runtime_lock);
949 			rt_rq->rt_time += delta_exec;
950 			if (sched_rt_runtime_exceeded(rt_rq))
951 				resched_task(curr);
952 			raw_spin_unlock(&rt_rq->rt_runtime_lock);
953 		}
954 	}
955 }
956 
957 #if defined CONFIG_SMP
958 
959 static void
960 inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
961 {
962 	struct rq *rq = rq_of_rt_rq(rt_rq);
963 
964 	if (rq->online && prio < prev_prio)
965 		cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
966 }
967 
968 static void
969 dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
970 {
971 	struct rq *rq = rq_of_rt_rq(rt_rq);
972 
973 	if (rq->online && rt_rq->highest_prio.curr != prev_prio)
974 		cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
975 }
976 
977 #else /* CONFIG_SMP */
978 
979 static inline
980 void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
981 static inline
982 void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
983 
984 #endif /* CONFIG_SMP */
985 
986 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
987 static void
988 inc_rt_prio(struct rt_rq *rt_rq, int prio)
989 {
990 	int prev_prio = rt_rq->highest_prio.curr;
991 
992 	if (prio < prev_prio)
993 		rt_rq->highest_prio.curr = prio;
994 
995 	inc_rt_prio_smp(rt_rq, prio, prev_prio);
996 }
997 
998 static void
999 dec_rt_prio(struct rt_rq *rt_rq, int prio)
1000 {
1001 	int prev_prio = rt_rq->highest_prio.curr;
1002 
1003 	if (rt_rq->rt_nr_running) {
1004 
1005 		WARN_ON(prio < prev_prio);
1006 
1007 		/*
1008 		 * This may have been our highest task, and therefore
1009 		 * we may have some recomputation to do
1010 		 */
1011 		if (prio == prev_prio) {
1012 			struct rt_prio_array *array = &rt_rq->active;
1013 
1014 			rt_rq->highest_prio.curr =
1015 				sched_find_first_bit(array->bitmap);
1016 		}
1017 
1018 	} else
1019 		rt_rq->highest_prio.curr = MAX_RT_PRIO;
1020 
1021 	dec_rt_prio_smp(rt_rq, prio, prev_prio);
1022 }
1023 
1024 #else
1025 
1026 static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
1027 static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
1028 
1029 #endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
1030 
1031 #ifdef CONFIG_RT_GROUP_SCHED
1032 
1033 static void
1034 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1035 {
1036 	if (rt_se_boosted(rt_se))
1037 		rt_rq->rt_nr_boosted++;
1038 
1039 	if (rt_rq->tg)
1040 		start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
1041 }
1042 
1043 static void
1044 dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1045 {
1046 	if (rt_se_boosted(rt_se))
1047 		rt_rq->rt_nr_boosted--;
1048 
1049 	WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
1050 }
1051 
1052 #else /* CONFIG_RT_GROUP_SCHED */
1053 
1054 static void
1055 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1056 {
1057 	start_rt_bandwidth(&def_rt_bandwidth);
1058 }
1059 
1060 static inline
1061 void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
1062 
1063 #endif /* CONFIG_RT_GROUP_SCHED */
1064 
1065 static inline
1066 void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1067 {
1068 	int prio = rt_se_prio(rt_se);
1069 
1070 	WARN_ON(!rt_prio(prio));
1071 	rt_rq->rt_nr_running++;
1072 
1073 	inc_rt_prio(rt_rq, prio);
1074 	inc_rt_migration(rt_se, rt_rq);
1075 	inc_rt_group(rt_se, rt_rq);
1076 }
1077 
1078 static inline
1079 void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1080 {
1081 	WARN_ON(!rt_prio(rt_se_prio(rt_se)));
1082 	WARN_ON(!rt_rq->rt_nr_running);
1083 	rt_rq->rt_nr_running--;
1084 
1085 	dec_rt_prio(rt_rq, rt_se_prio(rt_se));
1086 	dec_rt_migration(rt_se, rt_rq);
1087 	dec_rt_group(rt_se, rt_rq);
1088 }
1089 
1090 static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
1091 {
1092 	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1093 	struct rt_prio_array *array = &rt_rq->active;
1094 	struct rt_rq *group_rq = group_rt_rq(rt_se);
1095 	struct list_head *queue = array->queue + rt_se_prio(rt_se);
1096 
1097 	/*
1098 	 * Don't enqueue the group if its throttled, or when empty.
1099 	 * The latter is a consequence of the former when a child group
1100 	 * get throttled and the current group doesn't have any other
1101 	 * active members.
1102 	 */
1103 	if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
1104 		return;
1105 
1106 	if (!rt_rq->rt_nr_running)
1107 		list_add_leaf_rt_rq(rt_rq);
1108 
1109 	if (head)
1110 		list_add(&rt_se->run_list, queue);
1111 	else
1112 		list_add_tail(&rt_se->run_list, queue);
1113 	__set_bit(rt_se_prio(rt_se), array->bitmap);
1114 
1115 	inc_rt_tasks(rt_se, rt_rq);
1116 }
1117 
1118 static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
1119 {
1120 	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1121 	struct rt_prio_array *array = &rt_rq->active;
1122 
1123 	list_del_init(&rt_se->run_list);
1124 	if (list_empty(array->queue + rt_se_prio(rt_se)))
1125 		__clear_bit(rt_se_prio(rt_se), array->bitmap);
1126 
1127 	dec_rt_tasks(rt_se, rt_rq);
1128 	if (!rt_rq->rt_nr_running)
1129 		list_del_leaf_rt_rq(rt_rq);
1130 }
1131 
1132 /*
1133  * Because the prio of an upper entry depends on the lower
1134  * entries, we must remove entries top - down.
1135  */
1136 static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
1137 {
1138 	struct sched_rt_entity *back = NULL;
1139 
1140 	for_each_sched_rt_entity(rt_se) {
1141 		rt_se->back = back;
1142 		back = rt_se;
1143 	}
1144 
1145 	for (rt_se = back; rt_se; rt_se = rt_se->back) {
1146 		if (on_rt_rq(rt_se))
1147 			__dequeue_rt_entity(rt_se);
1148 	}
1149 }
1150 
1151 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
1152 {
1153 	dequeue_rt_stack(rt_se);
1154 	for_each_sched_rt_entity(rt_se)
1155 		__enqueue_rt_entity(rt_se, head);
1156 }
1157 
1158 static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
1159 {
1160 	dequeue_rt_stack(rt_se);
1161 
1162 	for_each_sched_rt_entity(rt_se) {
1163 		struct rt_rq *rt_rq = group_rt_rq(rt_se);
1164 
1165 		if (rt_rq && rt_rq->rt_nr_running)
1166 			__enqueue_rt_entity(rt_se, false);
1167 	}
1168 }
1169 
1170 /*
1171  * Adding/removing a task to/from a priority array:
1172  */
1173 static void
1174 enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1175 {
1176 	struct sched_rt_entity *rt_se = &p->rt;
1177 
1178 	if (flags & ENQUEUE_WAKEUP)
1179 		rt_se->timeout = 0;
1180 
1181 	enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD);
1182 
1183 	if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1184 		enqueue_pushable_task(rq, p);
1185 
1186 	inc_nr_running(rq);
1187 }
1188 
1189 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1190 {
1191 	struct sched_rt_entity *rt_se = &p->rt;
1192 
1193 	update_curr_rt(rq);
1194 	dequeue_rt_entity(rt_se);
1195 
1196 	dequeue_pushable_task(rq, p);
1197 
1198 	dec_nr_running(rq);
1199 }
1200 
1201 /*
1202  * Put task to the head or the end of the run list without the overhead of
1203  * dequeue followed by enqueue.
1204  */
1205 static void
1206 requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
1207 {
1208 	if (on_rt_rq(rt_se)) {
1209 		struct rt_prio_array *array = &rt_rq->active;
1210 		struct list_head *queue = array->queue + rt_se_prio(rt_se);
1211 
1212 		if (head)
1213 			list_move(&rt_se->run_list, queue);
1214 		else
1215 			list_move_tail(&rt_se->run_list, queue);
1216 	}
1217 }
1218 
1219 static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
1220 {
1221 	struct sched_rt_entity *rt_se = &p->rt;
1222 	struct rt_rq *rt_rq;
1223 
1224 	for_each_sched_rt_entity(rt_se) {
1225 		rt_rq = rt_rq_of_se(rt_se);
1226 		requeue_rt_entity(rt_rq, rt_se, head);
1227 	}
1228 }
1229 
1230 static void yield_task_rt(struct rq *rq)
1231 {
1232 	requeue_task_rt(rq, rq->curr, 0);
1233 }
1234 
1235 #ifdef CONFIG_SMP
1236 static int find_lowest_rq(struct task_struct *task);
1237 
1238 static int
1239 select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
1240 {
1241 	struct task_struct *curr;
1242 	struct rq *rq;
1243 	int cpu;
1244 
1245 	cpu = task_cpu(p);
1246 
1247 	if (p->nr_cpus_allowed == 1)
1248 		goto out;
1249 
1250 	/* For anything but wake ups, just return the task_cpu */
1251 	if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
1252 		goto out;
1253 
1254 	rq = cpu_rq(cpu);
1255 
1256 	rcu_read_lock();
1257 	curr = ACCESS_ONCE(rq->curr); /* unlocked access */
1258 
1259 	/*
1260 	 * If the current task on @p's runqueue is an RT task, then
1261 	 * try to see if we can wake this RT task up on another
1262 	 * runqueue. Otherwise simply start this RT task
1263 	 * on its current runqueue.
1264 	 *
1265 	 * We want to avoid overloading runqueues. If the woken
1266 	 * task is a higher priority, then it will stay on this CPU
1267 	 * and the lower prio task should be moved to another CPU.
1268 	 * Even though this will probably make the lower prio task
1269 	 * lose its cache, we do not want to bounce a higher task
1270 	 * around just because it gave up its CPU, perhaps for a
1271 	 * lock?
1272 	 *
1273 	 * For equal prio tasks, we just let the scheduler sort it out.
1274 	 *
1275 	 * Otherwise, just let it ride on the affined RQ and the
1276 	 * post-schedule router will push the preempted task away
1277 	 *
1278 	 * This test is optimistic, if we get it wrong the load-balancer
1279 	 * will have to sort it out.
1280 	 */
1281 	if (curr && unlikely(rt_task(curr)) &&
1282 	    (curr->nr_cpus_allowed < 2 ||
1283 	     curr->prio <= p->prio) &&
1284 	    (p->nr_cpus_allowed > 1)) {
1285 		int target = find_lowest_rq(p);
1286 
1287 		if (target != -1)
1288 			cpu = target;
1289 	}
1290 	rcu_read_unlock();
1291 
1292 out:
1293 	return cpu;
1294 }
1295 
1296 static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1297 {
1298 	if (rq->curr->nr_cpus_allowed == 1)
1299 		return;
1300 
1301 	if (p->nr_cpus_allowed != 1
1302 	    && cpupri_find(&rq->rd->cpupri, p, NULL))
1303 		return;
1304 
1305 	if (!cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
1306 		return;
1307 
1308 	/*
1309 	 * There appears to be other cpus that can accept
1310 	 * current and none to run 'p', so lets reschedule
1311 	 * to try and push current away:
1312 	 */
1313 	requeue_task_rt(rq, p, 1);
1314 	resched_task(rq->curr);
1315 }
1316 
1317 #endif /* CONFIG_SMP */
1318 
1319 /*
1320  * Preempt the current task with a newly woken task if needed:
1321  */
1322 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
1323 {
1324 	if (p->prio < rq->curr->prio) {
1325 		resched_task(rq->curr);
1326 		return;
1327 	}
1328 
1329 #ifdef CONFIG_SMP
1330 	/*
1331 	 * If:
1332 	 *
1333 	 * - the newly woken task is of equal priority to the current task
1334 	 * - the newly woken task is non-migratable while current is migratable
1335 	 * - current will be preempted on the next reschedule
1336 	 *
1337 	 * we should check to see if current can readily move to a different
1338 	 * cpu.  If so, we will reschedule to allow the push logic to try
1339 	 * to move current somewhere else, making room for our non-migratable
1340 	 * task.
1341 	 */
1342 	if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
1343 		check_preempt_equal_prio(rq, p);
1344 #endif
1345 }
1346 
1347 static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
1348 						   struct rt_rq *rt_rq)
1349 {
1350 	struct rt_prio_array *array = &rt_rq->active;
1351 	struct sched_rt_entity *next = NULL;
1352 	struct list_head *queue;
1353 	int idx;
1354 
1355 	idx = sched_find_first_bit(array->bitmap);
1356 	BUG_ON(idx >= MAX_RT_PRIO);
1357 
1358 	queue = array->queue + idx;
1359 	next = list_entry(queue->next, struct sched_rt_entity, run_list);
1360 
1361 	return next;
1362 }
1363 
1364 static struct task_struct *_pick_next_task_rt(struct rq *rq)
1365 {
1366 	struct sched_rt_entity *rt_se;
1367 	struct task_struct *p;
1368 	struct rt_rq *rt_rq;
1369 
1370 	rt_rq = &rq->rt;
1371 
1372 	if (!rt_rq->rt_nr_running)
1373 		return NULL;
1374 
1375 	if (rt_rq_throttled(rt_rq))
1376 		return NULL;
1377 
1378 	do {
1379 		rt_se = pick_next_rt_entity(rq, rt_rq);
1380 		BUG_ON(!rt_se);
1381 		rt_rq = group_rt_rq(rt_se);
1382 	} while (rt_rq);
1383 
1384 	p = rt_task_of(rt_se);
1385 	p->se.exec_start = rq->clock_task;
1386 
1387 	return p;
1388 }
1389 
1390 static struct task_struct *pick_next_task_rt(struct rq *rq)
1391 {
1392 	struct task_struct *p = _pick_next_task_rt(rq);
1393 
1394 	/* The running task is never eligible for pushing */
1395 	if (p)
1396 		dequeue_pushable_task(rq, p);
1397 
1398 #ifdef CONFIG_SMP
1399 	/*
1400 	 * We detect this state here so that we can avoid taking the RQ
1401 	 * lock again later if there is no need to push
1402 	 */
1403 	rq->post_schedule = has_pushable_tasks(rq);
1404 #endif
1405 
1406 	return p;
1407 }
1408 
1409 static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1410 {
1411 	update_curr_rt(rq);
1412 
1413 	/*
1414 	 * The previous task needs to be made eligible for pushing
1415 	 * if it is still active
1416 	 */
1417 	if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
1418 		enqueue_pushable_task(rq, p);
1419 }
1420 
1421 #ifdef CONFIG_SMP
1422 
1423 /* Only try algorithms three times */
1424 #define RT_MAX_TRIES 3
1425 
1426 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1427 {
1428 	if (!task_running(rq, p) &&
1429 	    (cpu < 0 || cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) &&
1430 	    (p->nr_cpus_allowed > 1))
1431 		return 1;
1432 	return 0;
1433 }
1434 
1435 /* Return the second highest RT task, NULL otherwise */
1436 static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
1437 {
1438 	struct task_struct *next = NULL;
1439 	struct sched_rt_entity *rt_se;
1440 	struct rt_prio_array *array;
1441 	struct rt_rq *rt_rq;
1442 	int idx;
1443 
1444 	for_each_leaf_rt_rq(rt_rq, rq) {
1445 		array = &rt_rq->active;
1446 		idx = sched_find_first_bit(array->bitmap);
1447 next_idx:
1448 		if (idx >= MAX_RT_PRIO)
1449 			continue;
1450 		if (next && next->prio <= idx)
1451 			continue;
1452 		list_for_each_entry(rt_se, array->queue + idx, run_list) {
1453 			struct task_struct *p;
1454 
1455 			if (!rt_entity_is_task(rt_se))
1456 				continue;
1457 
1458 			p = rt_task_of(rt_se);
1459 			if (pick_rt_task(rq, p, cpu)) {
1460 				next = p;
1461 				break;
1462 			}
1463 		}
1464 		if (!next) {
1465 			idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
1466 			goto next_idx;
1467 		}
1468 	}
1469 
1470 	return next;
1471 }
1472 
1473 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
1474 
1475 static int find_lowest_rq(struct task_struct *task)
1476 {
1477 	struct sched_domain *sd;
1478 	struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask);
1479 	int this_cpu = smp_processor_id();
1480 	int cpu      = task_cpu(task);
1481 
1482 	/* Make sure the mask is initialized first */
1483 	if (unlikely(!lowest_mask))
1484 		return -1;
1485 
1486 	if (task->nr_cpus_allowed == 1)
1487 		return -1; /* No other targets possible */
1488 
1489 	if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
1490 		return -1; /* No targets found */
1491 
1492 	/*
1493 	 * At this point we have built a mask of cpus representing the
1494 	 * lowest priority tasks in the system.  Now we want to elect
1495 	 * the best one based on our affinity and topology.
1496 	 *
1497 	 * We prioritize the last cpu that the task executed on since
1498 	 * it is most likely cache-hot in that location.
1499 	 */
1500 	if (cpumask_test_cpu(cpu, lowest_mask))
1501 		return cpu;
1502 
1503 	/*
1504 	 * Otherwise, we consult the sched_domains span maps to figure
1505 	 * out which cpu is logically closest to our hot cache data.
1506 	 */
1507 	if (!cpumask_test_cpu(this_cpu, lowest_mask))
1508 		this_cpu = -1; /* Skip this_cpu opt if not among lowest */
1509 
1510 	rcu_read_lock();
1511 	for_each_domain(cpu, sd) {
1512 		if (sd->flags & SD_WAKE_AFFINE) {
1513 			int best_cpu;
1514 
1515 			/*
1516 			 * "this_cpu" is cheaper to preempt than a
1517 			 * remote processor.
1518 			 */
1519 			if (this_cpu != -1 &&
1520 			    cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1521 				rcu_read_unlock();
1522 				return this_cpu;
1523 			}
1524 
1525 			best_cpu = cpumask_first_and(lowest_mask,
1526 						     sched_domain_span(sd));
1527 			if (best_cpu < nr_cpu_ids) {
1528 				rcu_read_unlock();
1529 				return best_cpu;
1530 			}
1531 		}
1532 	}
1533 	rcu_read_unlock();
1534 
1535 	/*
1536 	 * And finally, if there were no matches within the domains
1537 	 * just give the caller *something* to work with from the compatible
1538 	 * locations.
1539 	 */
1540 	if (this_cpu != -1)
1541 		return this_cpu;
1542 
1543 	cpu = cpumask_any(lowest_mask);
1544 	if (cpu < nr_cpu_ids)
1545 		return cpu;
1546 	return -1;
1547 }
1548 
1549 /* Will lock the rq it finds */
1550 static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1551 {
1552 	struct rq *lowest_rq = NULL;
1553 	int tries;
1554 	int cpu;
1555 
1556 	for (tries = 0; tries < RT_MAX_TRIES; tries++) {
1557 		cpu = find_lowest_rq(task);
1558 
1559 		if ((cpu == -1) || (cpu == rq->cpu))
1560 			break;
1561 
1562 		lowest_rq = cpu_rq(cpu);
1563 
1564 		/* if the prio of this runqueue changed, try again */
1565 		if (double_lock_balance(rq, lowest_rq)) {
1566 			/*
1567 			 * We had to unlock the run queue. In
1568 			 * the mean time, task could have
1569 			 * migrated already or had its affinity changed.
1570 			 * Also make sure that it wasn't scheduled on its rq.
1571 			 */
1572 			if (unlikely(task_rq(task) != rq ||
1573 				     !cpumask_test_cpu(lowest_rq->cpu,
1574 						       tsk_cpus_allowed(task)) ||
1575 				     task_running(rq, task) ||
1576 				     !task->on_rq)) {
1577 
1578 				double_unlock_balance(rq, lowest_rq);
1579 				lowest_rq = NULL;
1580 				break;
1581 			}
1582 		}
1583 
1584 		/* If this rq is still suitable use it. */
1585 		if (lowest_rq->rt.highest_prio.curr > task->prio)
1586 			break;
1587 
1588 		/* try again */
1589 		double_unlock_balance(rq, lowest_rq);
1590 		lowest_rq = NULL;
1591 	}
1592 
1593 	return lowest_rq;
1594 }
1595 
1596 static struct task_struct *pick_next_pushable_task(struct rq *rq)
1597 {
1598 	struct task_struct *p;
1599 
1600 	if (!has_pushable_tasks(rq))
1601 		return NULL;
1602 
1603 	p = plist_first_entry(&rq->rt.pushable_tasks,
1604 			      struct task_struct, pushable_tasks);
1605 
1606 	BUG_ON(rq->cpu != task_cpu(p));
1607 	BUG_ON(task_current(rq, p));
1608 	BUG_ON(p->nr_cpus_allowed <= 1);
1609 
1610 	BUG_ON(!p->on_rq);
1611 	BUG_ON(!rt_task(p));
1612 
1613 	return p;
1614 }
1615 
1616 /*
1617  * If the current CPU has more than one RT task, see if the non
1618  * running task can migrate over to a CPU that is running a task
1619  * of lesser priority.
1620  */
1621 static int push_rt_task(struct rq *rq)
1622 {
1623 	struct task_struct *next_task;
1624 	struct rq *lowest_rq;
1625 	int ret = 0;
1626 
1627 	if (!rq->rt.overloaded)
1628 		return 0;
1629 
1630 	next_task = pick_next_pushable_task(rq);
1631 	if (!next_task)
1632 		return 0;
1633 
1634 #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
1635        if (unlikely(task_running(rq, next_task)))
1636                return 0;
1637 #endif
1638 
1639 retry:
1640 	if (unlikely(next_task == rq->curr)) {
1641 		WARN_ON(1);
1642 		return 0;
1643 	}
1644 
1645 	/*
1646 	 * It's possible that the next_task slipped in of
1647 	 * higher priority than current. If that's the case
1648 	 * just reschedule current.
1649 	 */
1650 	if (unlikely(next_task->prio < rq->curr->prio)) {
1651 		resched_task(rq->curr);
1652 		return 0;
1653 	}
1654 
1655 	/* We might release rq lock */
1656 	get_task_struct(next_task);
1657 
1658 	/* find_lock_lowest_rq locks the rq if found */
1659 	lowest_rq = find_lock_lowest_rq(next_task, rq);
1660 	if (!lowest_rq) {
1661 		struct task_struct *task;
1662 		/*
1663 		 * find_lock_lowest_rq releases rq->lock
1664 		 * so it is possible that next_task has migrated.
1665 		 *
1666 		 * We need to make sure that the task is still on the same
1667 		 * run-queue and is also still the next task eligible for
1668 		 * pushing.
1669 		 */
1670 		task = pick_next_pushable_task(rq);
1671 		if (task_cpu(next_task) == rq->cpu && task == next_task) {
1672 			/*
1673 			 * The task hasn't migrated, and is still the next
1674 			 * eligible task, but we failed to find a run-queue
1675 			 * to push it to.  Do not retry in this case, since
1676 			 * other cpus will pull from us when ready.
1677 			 */
1678 			goto out;
1679 		}
1680 
1681 		if (!task)
1682 			/* No more tasks, just exit */
1683 			goto out;
1684 
1685 		/*
1686 		 * Something has shifted, try again.
1687 		 */
1688 		put_task_struct(next_task);
1689 		next_task = task;
1690 		goto retry;
1691 	}
1692 
1693 	deactivate_task(rq, next_task, 0);
1694 	set_task_cpu(next_task, lowest_rq->cpu);
1695 	activate_task(lowest_rq, next_task, 0);
1696 	ret = 1;
1697 
1698 	resched_task(lowest_rq->curr);
1699 
1700 	double_unlock_balance(rq, lowest_rq);
1701 
1702 out:
1703 	put_task_struct(next_task);
1704 
1705 	return ret;
1706 }
1707 
1708 static void push_rt_tasks(struct rq *rq)
1709 {
1710 	/* push_rt_task will return true if it moved an RT */
1711 	while (push_rt_task(rq))
1712 		;
1713 }
1714 
1715 static int pull_rt_task(struct rq *this_rq)
1716 {
1717 	int this_cpu = this_rq->cpu, ret = 0, cpu;
1718 	struct task_struct *p;
1719 	struct rq *src_rq;
1720 
1721 	if (likely(!rt_overloaded(this_rq)))
1722 		return 0;
1723 
1724 	for_each_cpu(cpu, this_rq->rd->rto_mask) {
1725 		if (this_cpu == cpu)
1726 			continue;
1727 
1728 		src_rq = cpu_rq(cpu);
1729 
1730 		/*
1731 		 * Don't bother taking the src_rq->lock if the next highest
1732 		 * task is known to be lower-priority than our current task.
1733 		 * This may look racy, but if this value is about to go
1734 		 * logically higher, the src_rq will push this task away.
1735 		 * And if its going logically lower, we do not care
1736 		 */
1737 		if (src_rq->rt.highest_prio.next >=
1738 		    this_rq->rt.highest_prio.curr)
1739 			continue;
1740 
1741 		/*
1742 		 * We can potentially drop this_rq's lock in
1743 		 * double_lock_balance, and another CPU could
1744 		 * alter this_rq
1745 		 */
1746 		double_lock_balance(this_rq, src_rq);
1747 
1748 		/*
1749 		 * Are there still pullable RT tasks?
1750 		 */
1751 		if (src_rq->rt.rt_nr_running <= 1)
1752 			goto skip;
1753 
1754 		p = pick_next_highest_task_rt(src_rq, this_cpu);
1755 
1756 		/*
1757 		 * Do we have an RT task that preempts
1758 		 * the to-be-scheduled task?
1759 		 */
1760 		if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
1761 			WARN_ON(p == src_rq->curr);
1762 			WARN_ON(!p->on_rq);
1763 
1764 			/*
1765 			 * There's a chance that p is higher in priority
1766 			 * than what's currently running on its cpu.
1767 			 * This is just that p is wakeing up and hasn't
1768 			 * had a chance to schedule. We only pull
1769 			 * p if it is lower in priority than the
1770 			 * current task on the run queue
1771 			 */
1772 			if (p->prio < src_rq->curr->prio)
1773 				goto skip;
1774 
1775 			ret = 1;
1776 
1777 			deactivate_task(src_rq, p, 0);
1778 			set_task_cpu(p, this_cpu);
1779 			activate_task(this_rq, p, 0);
1780 			/*
1781 			 * We continue with the search, just in
1782 			 * case there's an even higher prio task
1783 			 * in another runqueue. (low likelihood
1784 			 * but possible)
1785 			 */
1786 		}
1787 skip:
1788 		double_unlock_balance(this_rq, src_rq);
1789 	}
1790 
1791 	return ret;
1792 }
1793 
1794 static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
1795 {
1796 	/* Try to pull RT tasks here if we lower this rq's prio */
1797 	if (rq->rt.highest_prio.curr > prev->prio)
1798 		pull_rt_task(rq);
1799 }
1800 
1801 static void post_schedule_rt(struct rq *rq)
1802 {
1803 	push_rt_tasks(rq);
1804 }
1805 
1806 /*
1807  * If we are not running and we are not going to reschedule soon, we should
1808  * try to push tasks away now
1809  */
1810 static void task_woken_rt(struct rq *rq, struct task_struct *p)
1811 {
1812 	if (!task_running(rq, p) &&
1813 	    !test_tsk_need_resched(rq->curr) &&
1814 	    has_pushable_tasks(rq) &&
1815 	    p->nr_cpus_allowed > 1 &&
1816 	    rt_task(rq->curr) &&
1817 	    (rq->curr->nr_cpus_allowed < 2 ||
1818 	     rq->curr->prio <= p->prio))
1819 		push_rt_tasks(rq);
1820 }
1821 
1822 static void set_cpus_allowed_rt(struct task_struct *p,
1823 				const struct cpumask *new_mask)
1824 {
1825 	struct rq *rq;
1826 	int weight;
1827 
1828 	BUG_ON(!rt_task(p));
1829 
1830 	if (!p->on_rq)
1831 		return;
1832 
1833 	weight = cpumask_weight(new_mask);
1834 
1835 	/*
1836 	 * Only update if the process changes its state from whether it
1837 	 * can migrate or not.
1838 	 */
1839 	if ((p->nr_cpus_allowed > 1) == (weight > 1))
1840 		return;
1841 
1842 	rq = task_rq(p);
1843 
1844 	/*
1845 	 * The process used to be able to migrate OR it can now migrate
1846 	 */
1847 	if (weight <= 1) {
1848 		if (!task_current(rq, p))
1849 			dequeue_pushable_task(rq, p);
1850 		BUG_ON(!rq->rt.rt_nr_migratory);
1851 		rq->rt.rt_nr_migratory--;
1852 	} else {
1853 		if (!task_current(rq, p))
1854 			enqueue_pushable_task(rq, p);
1855 		rq->rt.rt_nr_migratory++;
1856 	}
1857 
1858 	update_rt_migration(&rq->rt);
1859 }
1860 
1861 /* Assumes rq->lock is held */
1862 static void rq_online_rt(struct rq *rq)
1863 {
1864 	if (rq->rt.overloaded)
1865 		rt_set_overload(rq);
1866 
1867 	__enable_runtime(rq);
1868 
1869 	cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
1870 }
1871 
1872 /* Assumes rq->lock is held */
1873 static void rq_offline_rt(struct rq *rq)
1874 {
1875 	if (rq->rt.overloaded)
1876 		rt_clear_overload(rq);
1877 
1878 	__disable_runtime(rq);
1879 
1880 	cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
1881 }
1882 
1883 /*
1884  * When switch from the rt queue, we bring ourselves to a position
1885  * that we might want to pull RT tasks from other runqueues.
1886  */
1887 static void switched_from_rt(struct rq *rq, struct task_struct *p)
1888 {
1889 	/*
1890 	 * If there are other RT tasks then we will reschedule
1891 	 * and the scheduling of the other RT tasks will handle
1892 	 * the balancing. But if we are the last RT task
1893 	 * we may need to handle the pulling of RT tasks
1894 	 * now.
1895 	 */
1896 	if (p->on_rq && !rq->rt.rt_nr_running)
1897 		pull_rt_task(rq);
1898 }
1899 
1900 void init_sched_rt_class(void)
1901 {
1902 	unsigned int i;
1903 
1904 	for_each_possible_cpu(i) {
1905 		zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
1906 					GFP_KERNEL, cpu_to_node(i));
1907 	}
1908 }
1909 #endif /* CONFIG_SMP */
1910 
1911 /*
1912  * When switching a task to RT, we may overload the runqueue
1913  * with RT tasks. In this case we try to push them off to
1914  * other runqueues.
1915  */
1916 static void switched_to_rt(struct rq *rq, struct task_struct *p)
1917 {
1918 	int check_resched = 1;
1919 
1920 	/*
1921 	 * If we are already running, then there's nothing
1922 	 * that needs to be done. But if we are not running
1923 	 * we may need to preempt the current running task.
1924 	 * If that current running task is also an RT task
1925 	 * then see if we can move to another run queue.
1926 	 */
1927 	if (p->on_rq && rq->curr != p) {
1928 #ifdef CONFIG_SMP
1929 		if (rq->rt.overloaded && push_rt_task(rq) &&
1930 		    /* Don't resched if we changed runqueues */
1931 		    rq != task_rq(p))
1932 			check_resched = 0;
1933 #endif /* CONFIG_SMP */
1934 		if (check_resched && p->prio < rq->curr->prio)
1935 			resched_task(rq->curr);
1936 	}
1937 }
1938 
1939 /*
1940  * Priority of the task has changed. This may cause
1941  * us to initiate a push or pull.
1942  */
1943 static void
1944 prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
1945 {
1946 	if (!p->on_rq)
1947 		return;
1948 
1949 	if (rq->curr == p) {
1950 #ifdef CONFIG_SMP
1951 		/*
1952 		 * If our priority decreases while running, we
1953 		 * may need to pull tasks to this runqueue.
1954 		 */
1955 		if (oldprio < p->prio)
1956 			pull_rt_task(rq);
1957 		/*
1958 		 * If there's a higher priority task waiting to run
1959 		 * then reschedule. Note, the above pull_rt_task
1960 		 * can release the rq lock and p could migrate.
1961 		 * Only reschedule if p is still on the same runqueue.
1962 		 */
1963 		if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
1964 			resched_task(p);
1965 #else
1966 		/* For UP simply resched on drop of prio */
1967 		if (oldprio < p->prio)
1968 			resched_task(p);
1969 #endif /* CONFIG_SMP */
1970 	} else {
1971 		/*
1972 		 * This task is not running, but if it is
1973 		 * greater than the current running task
1974 		 * then reschedule.
1975 		 */
1976 		if (p->prio < rq->curr->prio)
1977 			resched_task(rq->curr);
1978 	}
1979 }
1980 
1981 static void watchdog(struct rq *rq, struct task_struct *p)
1982 {
1983 	unsigned long soft, hard;
1984 
1985 	/* max may change after cur was read, this will be fixed next tick */
1986 	soft = task_rlimit(p, RLIMIT_RTTIME);
1987 	hard = task_rlimit_max(p, RLIMIT_RTTIME);
1988 
1989 	if (soft != RLIM_INFINITY) {
1990 		unsigned long next;
1991 
1992 		p->rt.timeout++;
1993 		next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
1994 		if (p->rt.timeout > next)
1995 			p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
1996 	}
1997 }
1998 
1999 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
2000 {
2001 	struct sched_rt_entity *rt_se = &p->rt;
2002 
2003 	update_curr_rt(rq);
2004 
2005 	watchdog(rq, p);
2006 
2007 	/*
2008 	 * RR tasks need a special form of timeslice management.
2009 	 * FIFO tasks have no timeslices.
2010 	 */
2011 	if (p->policy != SCHED_RR)
2012 		return;
2013 
2014 	if (--p->rt.time_slice)
2015 		return;
2016 
2017 	p->rt.time_slice = RR_TIMESLICE;
2018 
2019 	/*
2020 	 * Requeue to the end of queue if we (and all of our ancestors) are the
2021 	 * only element on the queue
2022 	 */
2023 	for_each_sched_rt_entity(rt_se) {
2024 		if (rt_se->run_list.prev != rt_se->run_list.next) {
2025 			requeue_task_rt(rq, p, 0);
2026 			set_tsk_need_resched(p);
2027 			return;
2028 		}
2029 	}
2030 }
2031 
2032 static void set_curr_task_rt(struct rq *rq)
2033 {
2034 	struct task_struct *p = rq->curr;
2035 
2036 	p->se.exec_start = rq->clock_task;
2037 
2038 	/* The running task is never eligible for pushing */
2039 	dequeue_pushable_task(rq, p);
2040 }
2041 
2042 static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
2043 {
2044 	/*
2045 	 * Time slice is 0 for SCHED_FIFO tasks
2046 	 */
2047 	if (task->policy == SCHED_RR)
2048 		return RR_TIMESLICE;
2049 	else
2050 		return 0;
2051 }
2052 
2053 const struct sched_class rt_sched_class = {
2054 	.next			= &fair_sched_class,
2055 	.enqueue_task		= enqueue_task_rt,
2056 	.dequeue_task		= dequeue_task_rt,
2057 	.yield_task		= yield_task_rt,
2058 
2059 	.check_preempt_curr	= check_preempt_curr_rt,
2060 
2061 	.pick_next_task		= pick_next_task_rt,
2062 	.put_prev_task		= put_prev_task_rt,
2063 
2064 #ifdef CONFIG_SMP
2065 	.select_task_rq		= select_task_rq_rt,
2066 
2067 	.set_cpus_allowed       = set_cpus_allowed_rt,
2068 	.rq_online              = rq_online_rt,
2069 	.rq_offline             = rq_offline_rt,
2070 	.pre_schedule		= pre_schedule_rt,
2071 	.post_schedule		= post_schedule_rt,
2072 	.task_woken		= task_woken_rt,
2073 	.switched_from		= switched_from_rt,
2074 #endif
2075 
2076 	.set_curr_task          = set_curr_task_rt,
2077 	.task_tick		= task_tick_rt,
2078 
2079 	.get_rr_interval	= get_rr_interval_rt,
2080 
2081 	.prio_changed		= prio_changed_rt,
2082 	.switched_to		= switched_to_rt,
2083 };
2084 
2085 #ifdef CONFIG_SCHED_DEBUG
2086 extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
2087 
2088 void print_rt_stats(struct seq_file *m, int cpu)
2089 {
2090 	rt_rq_iter_t iter;
2091 	struct rt_rq *rt_rq;
2092 
2093 	rcu_read_lock();
2094 	for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
2095 		print_rt_rq(m, cpu, rt_rq);
2096 	rcu_read_unlock();
2097 }
2098 #endif /* CONFIG_SCHED_DEBUG */
2099