1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
4 * policies)
5 */
6
7 int sched_rr_timeslice = RR_TIMESLICE;
8 /* More than 4 hours if BW_SHIFT equals 20. */
9 static const u64 max_rt_runtime = MAX_BW;
10
11 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
12
13 struct rt_bandwidth def_rt_bandwidth;
14
15 /*
16 * period over which we measure -rt task CPU usage in us.
17 * default: 1s
18 */
19 unsigned int sysctl_sched_rt_period = 1000000;
20
21 /*
22 * part of the period that we allow rt tasks to run in us.
23 * default: 0.95s
24 */
25 int sysctl_sched_rt_runtime = 950000;
26
27 #ifdef CONFIG_SYSCTL
28 static int sysctl_sched_rr_timeslice = (MSEC_PER_SEC * RR_TIMESLICE) / HZ;
29 static int sched_rt_handler(struct ctl_table *table, int write, void *buffer,
30 size_t *lenp, loff_t *ppos);
31 static int sched_rr_handler(struct ctl_table *table, int write, void *buffer,
32 size_t *lenp, loff_t *ppos);
33 static struct ctl_table sched_rt_sysctls[] = {
34 {
35 .procname = "sched_rt_period_us",
36 .data = &sysctl_sched_rt_period,
37 .maxlen = sizeof(unsigned int),
38 .mode = 0644,
39 .proc_handler = sched_rt_handler,
40 .extra1 = SYSCTL_ONE,
41 .extra2 = SYSCTL_INT_MAX,
42 },
43 {
44 .procname = "sched_rt_runtime_us",
45 .data = &sysctl_sched_rt_runtime,
46 .maxlen = sizeof(int),
47 .mode = 0644,
48 .proc_handler = sched_rt_handler,
49 .extra1 = SYSCTL_NEG_ONE,
50 .extra2 = SYSCTL_INT_MAX,
51 },
52 {
53 .procname = "sched_rr_timeslice_ms",
54 .data = &sysctl_sched_rr_timeslice,
55 .maxlen = sizeof(int),
56 .mode = 0644,
57 .proc_handler = sched_rr_handler,
58 },
59 {}
60 };
61
sched_rt_sysctl_init(void)62 static int __init sched_rt_sysctl_init(void)
63 {
64 register_sysctl_init("kernel", sched_rt_sysctls);
65 return 0;
66 }
67 late_initcall(sched_rt_sysctl_init);
68 #endif
69
sched_rt_period_timer(struct hrtimer * timer)70 static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
71 {
72 struct rt_bandwidth *rt_b =
73 container_of(timer, struct rt_bandwidth, rt_period_timer);
74 int idle = 0;
75 int overrun;
76
77 raw_spin_lock(&rt_b->rt_runtime_lock);
78 for (;;) {
79 overrun = hrtimer_forward_now(timer, rt_b->rt_period);
80 if (!overrun)
81 break;
82
83 raw_spin_unlock(&rt_b->rt_runtime_lock);
84 idle = do_sched_rt_period_timer(rt_b, overrun);
85 raw_spin_lock(&rt_b->rt_runtime_lock);
86 }
87 if (idle)
88 rt_b->rt_period_active = 0;
89 raw_spin_unlock(&rt_b->rt_runtime_lock);
90
91 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
92 }
93
init_rt_bandwidth(struct rt_bandwidth * rt_b,u64 period,u64 runtime)94 void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
95 {
96 rt_b->rt_period = ns_to_ktime(period);
97 rt_b->rt_runtime = runtime;
98
99 raw_spin_lock_init(&rt_b->rt_runtime_lock);
100
101 hrtimer_init(&rt_b->rt_period_timer, CLOCK_MONOTONIC,
102 HRTIMER_MODE_REL_HARD);
103 rt_b->rt_period_timer.function = sched_rt_period_timer;
104 }
105
do_start_rt_bandwidth(struct rt_bandwidth * rt_b)106 static inline void do_start_rt_bandwidth(struct rt_bandwidth *rt_b)
107 {
108 raw_spin_lock(&rt_b->rt_runtime_lock);
109 if (!rt_b->rt_period_active) {
110 rt_b->rt_period_active = 1;
111 /*
112 * SCHED_DEADLINE updates the bandwidth, as a run away
113 * RT task with a DL task could hog a CPU. But DL does
114 * not reset the period. If a deadline task was running
115 * without an RT task running, it can cause RT tasks to
116 * throttle when they start up. Kick the timer right away
117 * to update the period.
118 */
119 hrtimer_forward_now(&rt_b->rt_period_timer, ns_to_ktime(0));
120 hrtimer_start_expires(&rt_b->rt_period_timer,
121 HRTIMER_MODE_ABS_PINNED_HARD);
122 }
123 raw_spin_unlock(&rt_b->rt_runtime_lock);
124 }
125
start_rt_bandwidth(struct rt_bandwidth * rt_b)126 static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
127 {
128 if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
129 return;
130
131 do_start_rt_bandwidth(rt_b);
132 }
133
init_rt_rq(struct rt_rq * rt_rq)134 void init_rt_rq(struct rt_rq *rt_rq)
135 {
136 struct rt_prio_array *array;
137 int i;
138
139 array = &rt_rq->active;
140 for (i = 0; i < MAX_RT_PRIO; i++) {
141 INIT_LIST_HEAD(array->queue + i);
142 __clear_bit(i, array->bitmap);
143 }
144 /* delimiter for bitsearch: */
145 __set_bit(MAX_RT_PRIO, array->bitmap);
146
147 #if defined CONFIG_SMP
148 rt_rq->highest_prio.curr = MAX_RT_PRIO-1;
149 rt_rq->highest_prio.next = MAX_RT_PRIO-1;
150 rt_rq->rt_nr_migratory = 0;
151 rt_rq->overloaded = 0;
152 plist_head_init(&rt_rq->pushable_tasks);
153 #endif /* CONFIG_SMP */
154 /* We start is dequeued state, because no RT tasks are queued */
155 rt_rq->rt_queued = 0;
156
157 rt_rq->rt_time = 0;
158 rt_rq->rt_throttled = 0;
159 rt_rq->rt_runtime = 0;
160 raw_spin_lock_init(&rt_rq->rt_runtime_lock);
161 }
162
163 #ifdef CONFIG_RT_GROUP_SCHED
destroy_rt_bandwidth(struct rt_bandwidth * rt_b)164 static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
165 {
166 hrtimer_cancel(&rt_b->rt_period_timer);
167 }
168
169 #define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
170
rt_task_of(struct sched_rt_entity * rt_se)171 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
172 {
173 #ifdef CONFIG_SCHED_DEBUG
174 WARN_ON_ONCE(!rt_entity_is_task(rt_se));
175 #endif
176 return container_of(rt_se, struct task_struct, rt);
177 }
178
rq_of_rt_rq(struct rt_rq * rt_rq)179 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
180 {
181 return rt_rq->rq;
182 }
183
rt_rq_of_se(struct sched_rt_entity * rt_se)184 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
185 {
186 return rt_se->rt_rq;
187 }
188
rq_of_rt_se(struct sched_rt_entity * rt_se)189 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
190 {
191 struct rt_rq *rt_rq = rt_se->rt_rq;
192
193 return rt_rq->rq;
194 }
195
unregister_rt_sched_group(struct task_group * tg)196 void unregister_rt_sched_group(struct task_group *tg)
197 {
198 if (tg->rt_se)
199 destroy_rt_bandwidth(&tg->rt_bandwidth);
200
201 }
202
free_rt_sched_group(struct task_group * tg)203 void free_rt_sched_group(struct task_group *tg)
204 {
205 int i;
206
207 for_each_possible_cpu(i) {
208 if (tg->rt_rq)
209 kfree(tg->rt_rq[i]);
210 if (tg->rt_se)
211 kfree(tg->rt_se[i]);
212 }
213
214 kfree(tg->rt_rq);
215 kfree(tg->rt_se);
216 }
217
init_tg_rt_entry(struct task_group * tg,struct rt_rq * rt_rq,struct sched_rt_entity * rt_se,int cpu,struct sched_rt_entity * parent)218 void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
219 struct sched_rt_entity *rt_se, int cpu,
220 struct sched_rt_entity *parent)
221 {
222 struct rq *rq = cpu_rq(cpu);
223
224 rt_rq->highest_prio.curr = MAX_RT_PRIO-1;
225 rt_rq->rt_nr_boosted = 0;
226 rt_rq->rq = rq;
227 rt_rq->tg = tg;
228
229 tg->rt_rq[cpu] = rt_rq;
230 tg->rt_se[cpu] = rt_se;
231
232 if (!rt_se)
233 return;
234
235 if (!parent)
236 rt_se->rt_rq = &rq->rt;
237 else
238 rt_se->rt_rq = parent->my_q;
239
240 rt_se->my_q = rt_rq;
241 rt_se->parent = parent;
242 INIT_LIST_HEAD(&rt_se->run_list);
243 }
244
alloc_rt_sched_group(struct task_group * tg,struct task_group * parent)245 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
246 {
247 struct rt_rq *rt_rq;
248 struct sched_rt_entity *rt_se;
249 int i;
250
251 tg->rt_rq = kcalloc(nr_cpu_ids, sizeof(rt_rq), GFP_KERNEL);
252 if (!tg->rt_rq)
253 goto err;
254 tg->rt_se = kcalloc(nr_cpu_ids, sizeof(rt_se), GFP_KERNEL);
255 if (!tg->rt_se)
256 goto err;
257
258 init_rt_bandwidth(&tg->rt_bandwidth,
259 ktime_to_ns(def_rt_bandwidth.rt_period), 0);
260
261 for_each_possible_cpu(i) {
262 rt_rq = kzalloc_node(sizeof(struct rt_rq),
263 GFP_KERNEL, cpu_to_node(i));
264 if (!rt_rq)
265 goto err;
266
267 rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
268 GFP_KERNEL, cpu_to_node(i));
269 if (!rt_se)
270 goto err_free_rq;
271
272 init_rt_rq(rt_rq);
273 rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
274 init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
275 }
276
277 return 1;
278
279 err_free_rq:
280 kfree(rt_rq);
281 err:
282 return 0;
283 }
284
285 #else /* CONFIG_RT_GROUP_SCHED */
286
287 #define rt_entity_is_task(rt_se) (1)
288
rt_task_of(struct sched_rt_entity * rt_se)289 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
290 {
291 return container_of(rt_se, struct task_struct, rt);
292 }
293
rq_of_rt_rq(struct rt_rq * rt_rq)294 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
295 {
296 return container_of(rt_rq, struct rq, rt);
297 }
298
rq_of_rt_se(struct sched_rt_entity * rt_se)299 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
300 {
301 struct task_struct *p = rt_task_of(rt_se);
302
303 return task_rq(p);
304 }
305
rt_rq_of_se(struct sched_rt_entity * rt_se)306 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
307 {
308 struct rq *rq = rq_of_rt_se(rt_se);
309
310 return &rq->rt;
311 }
312
unregister_rt_sched_group(struct task_group * tg)313 void unregister_rt_sched_group(struct task_group *tg) { }
314
free_rt_sched_group(struct task_group * tg)315 void free_rt_sched_group(struct task_group *tg) { }
316
alloc_rt_sched_group(struct task_group * tg,struct task_group * parent)317 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
318 {
319 return 1;
320 }
321 #endif /* CONFIG_RT_GROUP_SCHED */
322
323 #ifdef CONFIG_SMP
324
need_pull_rt_task(struct rq * rq,struct task_struct * prev)325 static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
326 {
327 /* Try to pull RT tasks here if we lower this rq's prio */
328 return rq->online && rq->rt.highest_prio.curr > prev->prio;
329 }
330
rt_overloaded(struct rq * rq)331 static inline int rt_overloaded(struct rq *rq)
332 {
333 return atomic_read(&rq->rd->rto_count);
334 }
335
rt_set_overload(struct rq * rq)336 static inline void rt_set_overload(struct rq *rq)
337 {
338 if (!rq->online)
339 return;
340
341 cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
342 /*
343 * Make sure the mask is visible before we set
344 * the overload count. That is checked to determine
345 * if we should look at the mask. It would be a shame
346 * if we looked at the mask, but the mask was not
347 * updated yet.
348 *
349 * Matched by the barrier in pull_rt_task().
350 */
351 smp_wmb();
352 atomic_inc(&rq->rd->rto_count);
353 }
354
rt_clear_overload(struct rq * rq)355 static inline void rt_clear_overload(struct rq *rq)
356 {
357 if (!rq->online)
358 return;
359
360 /* the order here really doesn't matter */
361 atomic_dec(&rq->rd->rto_count);
362 cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
363 }
364
update_rt_migration(struct rt_rq * rt_rq)365 static void update_rt_migration(struct rt_rq *rt_rq)
366 {
367 if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
368 if (!rt_rq->overloaded) {
369 rt_set_overload(rq_of_rt_rq(rt_rq));
370 rt_rq->overloaded = 1;
371 }
372 } else if (rt_rq->overloaded) {
373 rt_clear_overload(rq_of_rt_rq(rt_rq));
374 rt_rq->overloaded = 0;
375 }
376 }
377
inc_rt_migration(struct sched_rt_entity * rt_se,struct rt_rq * rt_rq)378 static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
379 {
380 struct task_struct *p;
381
382 if (!rt_entity_is_task(rt_se))
383 return;
384
385 p = rt_task_of(rt_se);
386 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
387
388 rt_rq->rt_nr_total++;
389 if (p->nr_cpus_allowed > 1)
390 rt_rq->rt_nr_migratory++;
391
392 update_rt_migration(rt_rq);
393 }
394
dec_rt_migration(struct sched_rt_entity * rt_se,struct rt_rq * rt_rq)395 static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
396 {
397 struct task_struct *p;
398
399 if (!rt_entity_is_task(rt_se))
400 return;
401
402 p = rt_task_of(rt_se);
403 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
404
405 rt_rq->rt_nr_total--;
406 if (p->nr_cpus_allowed > 1)
407 rt_rq->rt_nr_migratory--;
408
409 update_rt_migration(rt_rq);
410 }
411
has_pushable_tasks(struct rq * rq)412 static inline int has_pushable_tasks(struct rq *rq)
413 {
414 return !plist_head_empty(&rq->rt.pushable_tasks);
415 }
416
417 static DEFINE_PER_CPU(struct balance_callback, rt_push_head);
418 static DEFINE_PER_CPU(struct balance_callback, rt_pull_head);
419
420 static void push_rt_tasks(struct rq *);
421 static void pull_rt_task(struct rq *);
422
rt_queue_push_tasks(struct rq * rq)423 static inline void rt_queue_push_tasks(struct rq *rq)
424 {
425 if (!has_pushable_tasks(rq))
426 return;
427
428 queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks);
429 }
430
rt_queue_pull_task(struct rq * rq)431 static inline void rt_queue_pull_task(struct rq *rq)
432 {
433 queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task);
434 }
435
enqueue_pushable_task(struct rq * rq,struct task_struct * p)436 static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
437 {
438 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
439 plist_node_init(&p->pushable_tasks, p->prio);
440 plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
441
442 /* Update the highest prio pushable task */
443 if (p->prio < rq->rt.highest_prio.next)
444 rq->rt.highest_prio.next = p->prio;
445 }
446
dequeue_pushable_task(struct rq * rq,struct task_struct * p)447 static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
448 {
449 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
450
451 /* Update the new highest prio pushable task */
452 if (has_pushable_tasks(rq)) {
453 p = plist_first_entry(&rq->rt.pushable_tasks,
454 struct task_struct, pushable_tasks);
455 rq->rt.highest_prio.next = p->prio;
456 } else {
457 rq->rt.highest_prio.next = MAX_RT_PRIO-1;
458 }
459 }
460
461 #else
462
enqueue_pushable_task(struct rq * rq,struct task_struct * p)463 static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
464 {
465 }
466
dequeue_pushable_task(struct rq * rq,struct task_struct * p)467 static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
468 {
469 }
470
471 static inline
inc_rt_migration(struct sched_rt_entity * rt_se,struct rt_rq * rt_rq)472 void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
473 {
474 }
475
476 static inline
dec_rt_migration(struct sched_rt_entity * rt_se,struct rt_rq * rt_rq)477 void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
478 {
479 }
480
rt_queue_push_tasks(struct rq * rq)481 static inline void rt_queue_push_tasks(struct rq *rq)
482 {
483 }
484 #endif /* CONFIG_SMP */
485
486 static void enqueue_top_rt_rq(struct rt_rq *rt_rq);
487 static void dequeue_top_rt_rq(struct rt_rq *rt_rq, unsigned int count);
488
on_rt_rq(struct sched_rt_entity * rt_se)489 static inline int on_rt_rq(struct sched_rt_entity *rt_se)
490 {
491 return rt_se->on_rq;
492 }
493
494 #ifdef CONFIG_UCLAMP_TASK
495 /*
496 * Verify the fitness of task @p to run on @cpu taking into account the uclamp
497 * settings.
498 *
499 * This check is only important for heterogeneous systems where uclamp_min value
500 * is higher than the capacity of a @cpu. For non-heterogeneous system this
501 * function will always return true.
502 *
503 * The function will return true if the capacity of the @cpu is >= the
504 * uclamp_min and false otherwise.
505 *
506 * Note that uclamp_min will be clamped to uclamp_max if uclamp_min
507 * > uclamp_max.
508 */
rt_task_fits_capacity(struct task_struct * p,int cpu)509 static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu)
510 {
511 unsigned int min_cap;
512 unsigned int max_cap;
513 unsigned int cpu_cap;
514
515 /* Only heterogeneous systems can benefit from this check */
516 if (!sched_asym_cpucap_active())
517 return true;
518
519 min_cap = uclamp_eff_value(p, UCLAMP_MIN);
520 max_cap = uclamp_eff_value(p, UCLAMP_MAX);
521
522 cpu_cap = capacity_orig_of(cpu);
523
524 return cpu_cap >= min(min_cap, max_cap);
525 }
526 #else
rt_task_fits_capacity(struct task_struct * p,int cpu)527 static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu)
528 {
529 return true;
530 }
531 #endif
532
533 #ifdef CONFIG_RT_GROUP_SCHED
534
sched_rt_runtime(struct rt_rq * rt_rq)535 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
536 {
537 if (!rt_rq->tg)
538 return RUNTIME_INF;
539
540 return rt_rq->rt_runtime;
541 }
542
sched_rt_period(struct rt_rq * rt_rq)543 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
544 {
545 return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
546 }
547
548 typedef struct task_group *rt_rq_iter_t;
549
next_task_group(struct task_group * tg)550 static inline struct task_group *next_task_group(struct task_group *tg)
551 {
552 do {
553 tg = list_entry_rcu(tg->list.next,
554 typeof(struct task_group), list);
555 } while (&tg->list != &task_groups && task_group_is_autogroup(tg));
556
557 if (&tg->list == &task_groups)
558 tg = NULL;
559
560 return tg;
561 }
562
563 #define for_each_rt_rq(rt_rq, iter, rq) \
564 for (iter = container_of(&task_groups, typeof(*iter), list); \
565 (iter = next_task_group(iter)) && \
566 (rt_rq = iter->rt_rq[cpu_of(rq)]);)
567
568 #define for_each_sched_rt_entity(rt_se) \
569 for (; rt_se; rt_se = rt_se->parent)
570
group_rt_rq(struct sched_rt_entity * rt_se)571 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
572 {
573 return rt_se->my_q;
574 }
575
576 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
577 static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
578
sched_rt_rq_enqueue(struct rt_rq * rt_rq)579 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
580 {
581 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
582 struct rq *rq = rq_of_rt_rq(rt_rq);
583 struct sched_rt_entity *rt_se;
584
585 int cpu = cpu_of(rq);
586
587 rt_se = rt_rq->tg->rt_se[cpu];
588
589 if (rt_rq->rt_nr_running) {
590 if (!rt_se)
591 enqueue_top_rt_rq(rt_rq);
592 else if (!on_rt_rq(rt_se))
593 enqueue_rt_entity(rt_se, 0);
594
595 if (rt_rq->highest_prio.curr < curr->prio)
596 resched_curr(rq);
597 }
598 }
599
sched_rt_rq_dequeue(struct rt_rq * rt_rq)600 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
601 {
602 struct sched_rt_entity *rt_se;
603 int cpu = cpu_of(rq_of_rt_rq(rt_rq));
604
605 rt_se = rt_rq->tg->rt_se[cpu];
606
607 if (!rt_se) {
608 dequeue_top_rt_rq(rt_rq, rt_rq->rt_nr_running);
609 /* Kick cpufreq (see the comment in kernel/sched/sched.h). */
610 cpufreq_update_util(rq_of_rt_rq(rt_rq), 0);
611 }
612 else if (on_rt_rq(rt_se))
613 dequeue_rt_entity(rt_se, 0);
614 }
615
rt_rq_throttled(struct rt_rq * rt_rq)616 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
617 {
618 return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
619 }
620
rt_se_boosted(struct sched_rt_entity * rt_se)621 static int rt_se_boosted(struct sched_rt_entity *rt_se)
622 {
623 struct rt_rq *rt_rq = group_rt_rq(rt_se);
624 struct task_struct *p;
625
626 if (rt_rq)
627 return !!rt_rq->rt_nr_boosted;
628
629 p = rt_task_of(rt_se);
630 return p->prio != p->normal_prio;
631 }
632
633 #ifdef CONFIG_SMP
sched_rt_period_mask(void)634 static inline const struct cpumask *sched_rt_period_mask(void)
635 {
636 return this_rq()->rd->span;
637 }
638 #else
sched_rt_period_mask(void)639 static inline const struct cpumask *sched_rt_period_mask(void)
640 {
641 return cpu_online_mask;
642 }
643 #endif
644
645 static inline
sched_rt_period_rt_rq(struct rt_bandwidth * rt_b,int cpu)646 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
647 {
648 return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
649 }
650
sched_rt_bandwidth(struct rt_rq * rt_rq)651 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
652 {
653 return &rt_rq->tg->rt_bandwidth;
654 }
655
656 #else /* !CONFIG_RT_GROUP_SCHED */
657
sched_rt_runtime(struct rt_rq * rt_rq)658 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
659 {
660 return rt_rq->rt_runtime;
661 }
662
sched_rt_period(struct rt_rq * rt_rq)663 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
664 {
665 return ktime_to_ns(def_rt_bandwidth.rt_period);
666 }
667
668 typedef struct rt_rq *rt_rq_iter_t;
669
670 #define for_each_rt_rq(rt_rq, iter, rq) \
671 for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
672
673 #define for_each_sched_rt_entity(rt_se) \
674 for (; rt_se; rt_se = NULL)
675
group_rt_rq(struct sched_rt_entity * rt_se)676 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
677 {
678 return NULL;
679 }
680
sched_rt_rq_enqueue(struct rt_rq * rt_rq)681 static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
682 {
683 struct rq *rq = rq_of_rt_rq(rt_rq);
684
685 if (!rt_rq->rt_nr_running)
686 return;
687
688 enqueue_top_rt_rq(rt_rq);
689 resched_curr(rq);
690 }
691
sched_rt_rq_dequeue(struct rt_rq * rt_rq)692 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
693 {
694 dequeue_top_rt_rq(rt_rq, rt_rq->rt_nr_running);
695 }
696
rt_rq_throttled(struct rt_rq * rt_rq)697 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
698 {
699 return rt_rq->rt_throttled;
700 }
701
sched_rt_period_mask(void)702 static inline const struct cpumask *sched_rt_period_mask(void)
703 {
704 return cpu_online_mask;
705 }
706
707 static inline
sched_rt_period_rt_rq(struct rt_bandwidth * rt_b,int cpu)708 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
709 {
710 return &cpu_rq(cpu)->rt;
711 }
712
sched_rt_bandwidth(struct rt_rq * rt_rq)713 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
714 {
715 return &def_rt_bandwidth;
716 }
717
718 #endif /* CONFIG_RT_GROUP_SCHED */
719
sched_rt_bandwidth_account(struct rt_rq * rt_rq)720 bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
721 {
722 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
723
724 return (hrtimer_active(&rt_b->rt_period_timer) ||
725 rt_rq->rt_time < rt_b->rt_runtime);
726 }
727
728 #ifdef CONFIG_SMP
729 /*
730 * We ran out of runtime, see if we can borrow some from our neighbours.
731 */
do_balance_runtime(struct rt_rq * rt_rq)732 static void do_balance_runtime(struct rt_rq *rt_rq)
733 {
734 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
735 struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
736 int i, weight;
737 u64 rt_period;
738
739 weight = cpumask_weight(rd->span);
740
741 raw_spin_lock(&rt_b->rt_runtime_lock);
742 rt_period = ktime_to_ns(rt_b->rt_period);
743 for_each_cpu(i, rd->span) {
744 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
745 s64 diff;
746
747 if (iter == rt_rq)
748 continue;
749
750 raw_spin_lock(&iter->rt_runtime_lock);
751 /*
752 * Either all rqs have inf runtime and there's nothing to steal
753 * or __disable_runtime() below sets a specific rq to inf to
754 * indicate its been disabled and disallow stealing.
755 */
756 if (iter->rt_runtime == RUNTIME_INF)
757 goto next;
758
759 /*
760 * From runqueues with spare time, take 1/n part of their
761 * spare time, but no more than our period.
762 */
763 diff = iter->rt_runtime - iter->rt_time;
764 if (diff > 0) {
765 diff = div_u64((u64)diff, weight);
766 if (rt_rq->rt_runtime + diff > rt_period)
767 diff = rt_period - rt_rq->rt_runtime;
768 iter->rt_runtime -= diff;
769 rt_rq->rt_runtime += diff;
770 if (rt_rq->rt_runtime == rt_period) {
771 raw_spin_unlock(&iter->rt_runtime_lock);
772 break;
773 }
774 }
775 next:
776 raw_spin_unlock(&iter->rt_runtime_lock);
777 }
778 raw_spin_unlock(&rt_b->rt_runtime_lock);
779 }
780
781 /*
782 * Ensure this RQ takes back all the runtime it lend to its neighbours.
783 */
__disable_runtime(struct rq * rq)784 static void __disable_runtime(struct rq *rq)
785 {
786 struct root_domain *rd = rq->rd;
787 rt_rq_iter_t iter;
788 struct rt_rq *rt_rq;
789
790 if (unlikely(!scheduler_running))
791 return;
792
793 for_each_rt_rq(rt_rq, iter, rq) {
794 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
795 s64 want;
796 int i;
797
798 raw_spin_lock(&rt_b->rt_runtime_lock);
799 raw_spin_lock(&rt_rq->rt_runtime_lock);
800 /*
801 * Either we're all inf and nobody needs to borrow, or we're
802 * already disabled and thus have nothing to do, or we have
803 * exactly the right amount of runtime to take out.
804 */
805 if (rt_rq->rt_runtime == RUNTIME_INF ||
806 rt_rq->rt_runtime == rt_b->rt_runtime)
807 goto balanced;
808 raw_spin_unlock(&rt_rq->rt_runtime_lock);
809
810 /*
811 * Calculate the difference between what we started out with
812 * and what we current have, that's the amount of runtime
813 * we lend and now have to reclaim.
814 */
815 want = rt_b->rt_runtime - rt_rq->rt_runtime;
816
817 /*
818 * Greedy reclaim, take back as much as we can.
819 */
820 for_each_cpu(i, rd->span) {
821 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
822 s64 diff;
823
824 /*
825 * Can't reclaim from ourselves or disabled runqueues.
826 */
827 if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
828 continue;
829
830 raw_spin_lock(&iter->rt_runtime_lock);
831 if (want > 0) {
832 diff = min_t(s64, iter->rt_runtime, want);
833 iter->rt_runtime -= diff;
834 want -= diff;
835 } else {
836 iter->rt_runtime -= want;
837 want -= want;
838 }
839 raw_spin_unlock(&iter->rt_runtime_lock);
840
841 if (!want)
842 break;
843 }
844
845 raw_spin_lock(&rt_rq->rt_runtime_lock);
846 /*
847 * We cannot be left wanting - that would mean some runtime
848 * leaked out of the system.
849 */
850 WARN_ON_ONCE(want);
851 balanced:
852 /*
853 * Disable all the borrow logic by pretending we have inf
854 * runtime - in which case borrowing doesn't make sense.
855 */
856 rt_rq->rt_runtime = RUNTIME_INF;
857 rt_rq->rt_throttled = 0;
858 raw_spin_unlock(&rt_rq->rt_runtime_lock);
859 raw_spin_unlock(&rt_b->rt_runtime_lock);
860
861 /* Make rt_rq available for pick_next_task() */
862 sched_rt_rq_enqueue(rt_rq);
863 }
864 }
865
__enable_runtime(struct rq * rq)866 static void __enable_runtime(struct rq *rq)
867 {
868 rt_rq_iter_t iter;
869 struct rt_rq *rt_rq;
870
871 if (unlikely(!scheduler_running))
872 return;
873
874 /*
875 * Reset each runqueue's bandwidth settings
876 */
877 for_each_rt_rq(rt_rq, iter, rq) {
878 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
879
880 raw_spin_lock(&rt_b->rt_runtime_lock);
881 raw_spin_lock(&rt_rq->rt_runtime_lock);
882 rt_rq->rt_runtime = rt_b->rt_runtime;
883 rt_rq->rt_time = 0;
884 rt_rq->rt_throttled = 0;
885 raw_spin_unlock(&rt_rq->rt_runtime_lock);
886 raw_spin_unlock(&rt_b->rt_runtime_lock);
887 }
888 }
889
balance_runtime(struct rt_rq * rt_rq)890 static void balance_runtime(struct rt_rq *rt_rq)
891 {
892 if (!sched_feat(RT_RUNTIME_SHARE))
893 return;
894
895 if (rt_rq->rt_time > rt_rq->rt_runtime) {
896 raw_spin_unlock(&rt_rq->rt_runtime_lock);
897 do_balance_runtime(rt_rq);
898 raw_spin_lock(&rt_rq->rt_runtime_lock);
899 }
900 }
901 #else /* !CONFIG_SMP */
balance_runtime(struct rt_rq * rt_rq)902 static inline void balance_runtime(struct rt_rq *rt_rq) {}
903 #endif /* CONFIG_SMP */
904
do_sched_rt_period_timer(struct rt_bandwidth * rt_b,int overrun)905 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
906 {
907 int i, idle = 1, throttled = 0;
908 const struct cpumask *span;
909
910 span = sched_rt_period_mask();
911 #ifdef CONFIG_RT_GROUP_SCHED
912 /*
913 * FIXME: isolated CPUs should really leave the root task group,
914 * whether they are isolcpus or were isolated via cpusets, lest
915 * the timer run on a CPU which does not service all runqueues,
916 * potentially leaving other CPUs indefinitely throttled. If
917 * isolation is really required, the user will turn the throttle
918 * off to kill the perturbations it causes anyway. Meanwhile,
919 * this maintains functionality for boot and/or troubleshooting.
920 */
921 if (rt_b == &root_task_group.rt_bandwidth)
922 span = cpu_online_mask;
923 #endif
924 for_each_cpu(i, span) {
925 int enqueue = 0;
926 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
927 struct rq *rq = rq_of_rt_rq(rt_rq);
928 struct rq_flags rf;
929 int skip;
930
931 /*
932 * When span == cpu_online_mask, taking each rq->lock
933 * can be time-consuming. Try to avoid it when possible.
934 */
935 raw_spin_lock(&rt_rq->rt_runtime_lock);
936 if (!sched_feat(RT_RUNTIME_SHARE) && rt_rq->rt_runtime != RUNTIME_INF)
937 rt_rq->rt_runtime = rt_b->rt_runtime;
938 skip = !rt_rq->rt_time && !rt_rq->rt_nr_running;
939 raw_spin_unlock(&rt_rq->rt_runtime_lock);
940 if (skip)
941 continue;
942
943 rq_lock(rq, &rf);
944 update_rq_clock(rq);
945
946 if (rt_rq->rt_time) {
947 u64 runtime;
948
949 raw_spin_lock(&rt_rq->rt_runtime_lock);
950 if (rt_rq->rt_throttled)
951 balance_runtime(rt_rq);
952 runtime = rt_rq->rt_runtime;
953 rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
954 if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
955 rt_rq->rt_throttled = 0;
956 enqueue = 1;
957
958 /*
959 * When we're idle and a woken (rt) task is
960 * throttled wakeup_preempt() will set
961 * skip_update and the time between the wakeup
962 * and this unthrottle will get accounted as
963 * 'runtime'.
964 */
965 if (rt_rq->rt_nr_running && rq->curr == rq->idle)
966 rq_clock_cancel_skipupdate(rq);
967 }
968 if (rt_rq->rt_time || rt_rq->rt_nr_running)
969 idle = 0;
970 raw_spin_unlock(&rt_rq->rt_runtime_lock);
971 } else if (rt_rq->rt_nr_running) {
972 idle = 0;
973 if (!rt_rq_throttled(rt_rq))
974 enqueue = 1;
975 }
976 if (rt_rq->rt_throttled)
977 throttled = 1;
978
979 if (enqueue)
980 sched_rt_rq_enqueue(rt_rq);
981 rq_unlock(rq, &rf);
982 }
983
984 if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
985 return 1;
986
987 return idle;
988 }
989
rt_se_prio(struct sched_rt_entity * rt_se)990 static inline int rt_se_prio(struct sched_rt_entity *rt_se)
991 {
992 #ifdef CONFIG_RT_GROUP_SCHED
993 struct rt_rq *rt_rq = group_rt_rq(rt_se);
994
995 if (rt_rq)
996 return rt_rq->highest_prio.curr;
997 #endif
998
999 return rt_task_of(rt_se)->prio;
1000 }
1001
sched_rt_runtime_exceeded(struct rt_rq * rt_rq)1002 static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
1003 {
1004 u64 runtime = sched_rt_runtime(rt_rq);
1005
1006 if (rt_rq->rt_throttled)
1007 return rt_rq_throttled(rt_rq);
1008
1009 if (runtime >= sched_rt_period(rt_rq))
1010 return 0;
1011
1012 balance_runtime(rt_rq);
1013 runtime = sched_rt_runtime(rt_rq);
1014 if (runtime == RUNTIME_INF)
1015 return 0;
1016
1017 if (rt_rq->rt_time > runtime) {
1018 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
1019
1020 /*
1021 * Don't actually throttle groups that have no runtime assigned
1022 * but accrue some time due to boosting.
1023 */
1024 if (likely(rt_b->rt_runtime)) {
1025 rt_rq->rt_throttled = 1;
1026 printk_deferred_once("sched: RT throttling activated\n");
1027 } else {
1028 /*
1029 * In case we did anyway, make it go away,
1030 * replenishment is a joke, since it will replenish us
1031 * with exactly 0 ns.
1032 */
1033 rt_rq->rt_time = 0;
1034 }
1035
1036 if (rt_rq_throttled(rt_rq)) {
1037 sched_rt_rq_dequeue(rt_rq);
1038 return 1;
1039 }
1040 }
1041
1042 return 0;
1043 }
1044
1045 /*
1046 * Update the current task's runtime statistics. Skip current tasks that
1047 * are not in our scheduling class.
1048 */
update_curr_rt(struct rq * rq)1049 static void update_curr_rt(struct rq *rq)
1050 {
1051 struct task_struct *curr = rq->curr;
1052 struct sched_rt_entity *rt_se = &curr->rt;
1053 s64 delta_exec;
1054
1055 if (curr->sched_class != &rt_sched_class)
1056 return;
1057
1058 delta_exec = update_curr_common(rq);
1059 if (unlikely(delta_exec <= 0))
1060 return;
1061
1062 if (!rt_bandwidth_enabled())
1063 return;
1064
1065 for_each_sched_rt_entity(rt_se) {
1066 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1067 int exceeded;
1068
1069 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
1070 raw_spin_lock(&rt_rq->rt_runtime_lock);
1071 rt_rq->rt_time += delta_exec;
1072 exceeded = sched_rt_runtime_exceeded(rt_rq);
1073 if (exceeded)
1074 resched_curr(rq);
1075 raw_spin_unlock(&rt_rq->rt_runtime_lock);
1076 if (exceeded)
1077 do_start_rt_bandwidth(sched_rt_bandwidth(rt_rq));
1078 }
1079 }
1080 }
1081
1082 static void
dequeue_top_rt_rq(struct rt_rq * rt_rq,unsigned int count)1083 dequeue_top_rt_rq(struct rt_rq *rt_rq, unsigned int count)
1084 {
1085 struct rq *rq = rq_of_rt_rq(rt_rq);
1086
1087 BUG_ON(&rq->rt != rt_rq);
1088
1089 if (!rt_rq->rt_queued)
1090 return;
1091
1092 BUG_ON(!rq->nr_running);
1093
1094 sub_nr_running(rq, count);
1095 rt_rq->rt_queued = 0;
1096
1097 }
1098
1099 static void
enqueue_top_rt_rq(struct rt_rq * rt_rq)1100 enqueue_top_rt_rq(struct rt_rq *rt_rq)
1101 {
1102 struct rq *rq = rq_of_rt_rq(rt_rq);
1103
1104 BUG_ON(&rq->rt != rt_rq);
1105
1106 if (rt_rq->rt_queued)
1107 return;
1108
1109 if (rt_rq_throttled(rt_rq))
1110 return;
1111
1112 if (rt_rq->rt_nr_running) {
1113 add_nr_running(rq, rt_rq->rt_nr_running);
1114 rt_rq->rt_queued = 1;
1115 }
1116
1117 /* Kick cpufreq (see the comment in kernel/sched/sched.h). */
1118 cpufreq_update_util(rq, 0);
1119 }
1120
1121 #if defined CONFIG_SMP
1122
1123 static void
inc_rt_prio_smp(struct rt_rq * rt_rq,int prio,int prev_prio)1124 inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1125 {
1126 struct rq *rq = rq_of_rt_rq(rt_rq);
1127
1128 #ifdef CONFIG_RT_GROUP_SCHED
1129 /*
1130 * Change rq's cpupri only if rt_rq is the top queue.
1131 */
1132 if (&rq->rt != rt_rq)
1133 return;
1134 #endif
1135 if (rq->online && prio < prev_prio)
1136 cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
1137 }
1138
1139 static void
dec_rt_prio_smp(struct rt_rq * rt_rq,int prio,int prev_prio)1140 dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1141 {
1142 struct rq *rq = rq_of_rt_rq(rt_rq);
1143
1144 #ifdef CONFIG_RT_GROUP_SCHED
1145 /*
1146 * Change rq's cpupri only if rt_rq is the top queue.
1147 */
1148 if (&rq->rt != rt_rq)
1149 return;
1150 #endif
1151 if (rq->online && rt_rq->highest_prio.curr != prev_prio)
1152 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
1153 }
1154
1155 #else /* CONFIG_SMP */
1156
1157 static inline
inc_rt_prio_smp(struct rt_rq * rt_rq,int prio,int prev_prio)1158 void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1159 static inline
dec_rt_prio_smp(struct rt_rq * rt_rq,int prio,int prev_prio)1160 void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1161
1162 #endif /* CONFIG_SMP */
1163
1164 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
1165 static void
inc_rt_prio(struct rt_rq * rt_rq,int prio)1166 inc_rt_prio(struct rt_rq *rt_rq, int prio)
1167 {
1168 int prev_prio = rt_rq->highest_prio.curr;
1169
1170 if (prio < prev_prio)
1171 rt_rq->highest_prio.curr = prio;
1172
1173 inc_rt_prio_smp(rt_rq, prio, prev_prio);
1174 }
1175
1176 static void
dec_rt_prio(struct rt_rq * rt_rq,int prio)1177 dec_rt_prio(struct rt_rq *rt_rq, int prio)
1178 {
1179 int prev_prio = rt_rq->highest_prio.curr;
1180
1181 if (rt_rq->rt_nr_running) {
1182
1183 WARN_ON(prio < prev_prio);
1184
1185 /*
1186 * This may have been our highest task, and therefore
1187 * we may have some recomputation to do
1188 */
1189 if (prio == prev_prio) {
1190 struct rt_prio_array *array = &rt_rq->active;
1191
1192 rt_rq->highest_prio.curr =
1193 sched_find_first_bit(array->bitmap);
1194 }
1195
1196 } else {
1197 rt_rq->highest_prio.curr = MAX_RT_PRIO-1;
1198 }
1199
1200 dec_rt_prio_smp(rt_rq, prio, prev_prio);
1201 }
1202
1203 #else
1204
inc_rt_prio(struct rt_rq * rt_rq,int prio)1205 static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
dec_rt_prio(struct rt_rq * rt_rq,int prio)1206 static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
1207
1208 #endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
1209
1210 #ifdef CONFIG_RT_GROUP_SCHED
1211
1212 static void
inc_rt_group(struct sched_rt_entity * rt_se,struct rt_rq * rt_rq)1213 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1214 {
1215 if (rt_se_boosted(rt_se))
1216 rt_rq->rt_nr_boosted++;
1217
1218 if (rt_rq->tg)
1219 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
1220 }
1221
1222 static void
dec_rt_group(struct sched_rt_entity * rt_se,struct rt_rq * rt_rq)1223 dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1224 {
1225 if (rt_se_boosted(rt_se))
1226 rt_rq->rt_nr_boosted--;
1227
1228 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
1229 }
1230
1231 #else /* CONFIG_RT_GROUP_SCHED */
1232
1233 static void
inc_rt_group(struct sched_rt_entity * rt_se,struct rt_rq * rt_rq)1234 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1235 {
1236 start_rt_bandwidth(&def_rt_bandwidth);
1237 }
1238
1239 static inline
dec_rt_group(struct sched_rt_entity * rt_se,struct rt_rq * rt_rq)1240 void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
1241
1242 #endif /* CONFIG_RT_GROUP_SCHED */
1243
1244 static inline
rt_se_nr_running(struct sched_rt_entity * rt_se)1245 unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se)
1246 {
1247 struct rt_rq *group_rq = group_rt_rq(rt_se);
1248
1249 if (group_rq)
1250 return group_rq->rt_nr_running;
1251 else
1252 return 1;
1253 }
1254
1255 static inline
rt_se_rr_nr_running(struct sched_rt_entity * rt_se)1256 unsigned int rt_se_rr_nr_running(struct sched_rt_entity *rt_se)
1257 {
1258 struct rt_rq *group_rq = group_rt_rq(rt_se);
1259 struct task_struct *tsk;
1260
1261 if (group_rq)
1262 return group_rq->rr_nr_running;
1263
1264 tsk = rt_task_of(rt_se);
1265
1266 return (tsk->policy == SCHED_RR) ? 1 : 0;
1267 }
1268
1269 static inline
inc_rt_tasks(struct sched_rt_entity * rt_se,struct rt_rq * rt_rq)1270 void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1271 {
1272 int prio = rt_se_prio(rt_se);
1273
1274 WARN_ON(!rt_prio(prio));
1275 rt_rq->rt_nr_running += rt_se_nr_running(rt_se);
1276 rt_rq->rr_nr_running += rt_se_rr_nr_running(rt_se);
1277
1278 inc_rt_prio(rt_rq, prio);
1279 inc_rt_migration(rt_se, rt_rq);
1280 inc_rt_group(rt_se, rt_rq);
1281 }
1282
1283 static inline
dec_rt_tasks(struct sched_rt_entity * rt_se,struct rt_rq * rt_rq)1284 void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1285 {
1286 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
1287 WARN_ON(!rt_rq->rt_nr_running);
1288 rt_rq->rt_nr_running -= rt_se_nr_running(rt_se);
1289 rt_rq->rr_nr_running -= rt_se_rr_nr_running(rt_se);
1290
1291 dec_rt_prio(rt_rq, rt_se_prio(rt_se));
1292 dec_rt_migration(rt_se, rt_rq);
1293 dec_rt_group(rt_se, rt_rq);
1294 }
1295
1296 /*
1297 * Change rt_se->run_list location unless SAVE && !MOVE
1298 *
1299 * assumes ENQUEUE/DEQUEUE flags match
1300 */
move_entity(unsigned int flags)1301 static inline bool move_entity(unsigned int flags)
1302 {
1303 if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE)
1304 return false;
1305
1306 return true;
1307 }
1308
__delist_rt_entity(struct sched_rt_entity * rt_se,struct rt_prio_array * array)1309 static void __delist_rt_entity(struct sched_rt_entity *rt_se, struct rt_prio_array *array)
1310 {
1311 list_del_init(&rt_se->run_list);
1312
1313 if (list_empty(array->queue + rt_se_prio(rt_se)))
1314 __clear_bit(rt_se_prio(rt_se), array->bitmap);
1315
1316 rt_se->on_list = 0;
1317 }
1318
1319 static inline struct sched_statistics *
__schedstats_from_rt_se(struct sched_rt_entity * rt_se)1320 __schedstats_from_rt_se(struct sched_rt_entity *rt_se)
1321 {
1322 #ifdef CONFIG_RT_GROUP_SCHED
1323 /* schedstats is not supported for rt group. */
1324 if (!rt_entity_is_task(rt_se))
1325 return NULL;
1326 #endif
1327
1328 return &rt_task_of(rt_se)->stats;
1329 }
1330
1331 static inline void
update_stats_wait_start_rt(struct rt_rq * rt_rq,struct sched_rt_entity * rt_se)1332 update_stats_wait_start_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
1333 {
1334 struct sched_statistics *stats;
1335 struct task_struct *p = NULL;
1336
1337 if (!schedstat_enabled())
1338 return;
1339
1340 if (rt_entity_is_task(rt_se))
1341 p = rt_task_of(rt_se);
1342
1343 stats = __schedstats_from_rt_se(rt_se);
1344 if (!stats)
1345 return;
1346
1347 __update_stats_wait_start(rq_of_rt_rq(rt_rq), p, stats);
1348 }
1349
1350 static inline void
update_stats_enqueue_sleeper_rt(struct rt_rq * rt_rq,struct sched_rt_entity * rt_se)1351 update_stats_enqueue_sleeper_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
1352 {
1353 struct sched_statistics *stats;
1354 struct task_struct *p = NULL;
1355
1356 if (!schedstat_enabled())
1357 return;
1358
1359 if (rt_entity_is_task(rt_se))
1360 p = rt_task_of(rt_se);
1361
1362 stats = __schedstats_from_rt_se(rt_se);
1363 if (!stats)
1364 return;
1365
1366 __update_stats_enqueue_sleeper(rq_of_rt_rq(rt_rq), p, stats);
1367 }
1368
1369 static inline void
update_stats_enqueue_rt(struct rt_rq * rt_rq,struct sched_rt_entity * rt_se,int flags)1370 update_stats_enqueue_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se,
1371 int flags)
1372 {
1373 if (!schedstat_enabled())
1374 return;
1375
1376 if (flags & ENQUEUE_WAKEUP)
1377 update_stats_enqueue_sleeper_rt(rt_rq, rt_se);
1378 }
1379
1380 static inline void
update_stats_wait_end_rt(struct rt_rq * rt_rq,struct sched_rt_entity * rt_se)1381 update_stats_wait_end_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
1382 {
1383 struct sched_statistics *stats;
1384 struct task_struct *p = NULL;
1385
1386 if (!schedstat_enabled())
1387 return;
1388
1389 if (rt_entity_is_task(rt_se))
1390 p = rt_task_of(rt_se);
1391
1392 stats = __schedstats_from_rt_se(rt_se);
1393 if (!stats)
1394 return;
1395
1396 __update_stats_wait_end(rq_of_rt_rq(rt_rq), p, stats);
1397 }
1398
1399 static inline void
update_stats_dequeue_rt(struct rt_rq * rt_rq,struct sched_rt_entity * rt_se,int flags)1400 update_stats_dequeue_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se,
1401 int flags)
1402 {
1403 struct task_struct *p = NULL;
1404
1405 if (!schedstat_enabled())
1406 return;
1407
1408 if (rt_entity_is_task(rt_se))
1409 p = rt_task_of(rt_se);
1410
1411 if ((flags & DEQUEUE_SLEEP) && p) {
1412 unsigned int state;
1413
1414 state = READ_ONCE(p->__state);
1415 if (state & TASK_INTERRUPTIBLE)
1416 __schedstat_set(p->stats.sleep_start,
1417 rq_clock(rq_of_rt_rq(rt_rq)));
1418
1419 if (state & TASK_UNINTERRUPTIBLE)
1420 __schedstat_set(p->stats.block_start,
1421 rq_clock(rq_of_rt_rq(rt_rq)));
1422 }
1423 }
1424
__enqueue_rt_entity(struct sched_rt_entity * rt_se,unsigned int flags)1425 static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1426 {
1427 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1428 struct rt_prio_array *array = &rt_rq->active;
1429 struct rt_rq *group_rq = group_rt_rq(rt_se);
1430 struct list_head *queue = array->queue + rt_se_prio(rt_se);
1431
1432 /*
1433 * Don't enqueue the group if its throttled, or when empty.
1434 * The latter is a consequence of the former when a child group
1435 * get throttled and the current group doesn't have any other
1436 * active members.
1437 */
1438 if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) {
1439 if (rt_se->on_list)
1440 __delist_rt_entity(rt_se, array);
1441 return;
1442 }
1443
1444 if (move_entity(flags)) {
1445 WARN_ON_ONCE(rt_se->on_list);
1446 if (flags & ENQUEUE_HEAD)
1447 list_add(&rt_se->run_list, queue);
1448 else
1449 list_add_tail(&rt_se->run_list, queue);
1450
1451 __set_bit(rt_se_prio(rt_se), array->bitmap);
1452 rt_se->on_list = 1;
1453 }
1454 rt_se->on_rq = 1;
1455
1456 inc_rt_tasks(rt_se, rt_rq);
1457 }
1458
__dequeue_rt_entity(struct sched_rt_entity * rt_se,unsigned int flags)1459 static void __dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1460 {
1461 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1462 struct rt_prio_array *array = &rt_rq->active;
1463
1464 if (move_entity(flags)) {
1465 WARN_ON_ONCE(!rt_se->on_list);
1466 __delist_rt_entity(rt_se, array);
1467 }
1468 rt_se->on_rq = 0;
1469
1470 dec_rt_tasks(rt_se, rt_rq);
1471 }
1472
1473 /*
1474 * Because the prio of an upper entry depends on the lower
1475 * entries, we must remove entries top - down.
1476 */
dequeue_rt_stack(struct sched_rt_entity * rt_se,unsigned int flags)1477 static void dequeue_rt_stack(struct sched_rt_entity *rt_se, unsigned int flags)
1478 {
1479 struct sched_rt_entity *back = NULL;
1480 unsigned int rt_nr_running;
1481
1482 for_each_sched_rt_entity(rt_se) {
1483 rt_se->back = back;
1484 back = rt_se;
1485 }
1486
1487 rt_nr_running = rt_rq_of_se(back)->rt_nr_running;
1488
1489 for (rt_se = back; rt_se; rt_se = rt_se->back) {
1490 if (on_rt_rq(rt_se))
1491 __dequeue_rt_entity(rt_se, flags);
1492 }
1493
1494 dequeue_top_rt_rq(rt_rq_of_se(back), rt_nr_running);
1495 }
1496
enqueue_rt_entity(struct sched_rt_entity * rt_se,unsigned int flags)1497 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1498 {
1499 struct rq *rq = rq_of_rt_se(rt_se);
1500
1501 update_stats_enqueue_rt(rt_rq_of_se(rt_se), rt_se, flags);
1502
1503 dequeue_rt_stack(rt_se, flags);
1504 for_each_sched_rt_entity(rt_se)
1505 __enqueue_rt_entity(rt_se, flags);
1506 enqueue_top_rt_rq(&rq->rt);
1507 }
1508
dequeue_rt_entity(struct sched_rt_entity * rt_se,unsigned int flags)1509 static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1510 {
1511 struct rq *rq = rq_of_rt_se(rt_se);
1512
1513 update_stats_dequeue_rt(rt_rq_of_se(rt_se), rt_se, flags);
1514
1515 dequeue_rt_stack(rt_se, flags);
1516
1517 for_each_sched_rt_entity(rt_se) {
1518 struct rt_rq *rt_rq = group_rt_rq(rt_se);
1519
1520 if (rt_rq && rt_rq->rt_nr_running)
1521 __enqueue_rt_entity(rt_se, flags);
1522 }
1523 enqueue_top_rt_rq(&rq->rt);
1524 }
1525
1526 /*
1527 * Adding/removing a task to/from a priority array:
1528 */
1529 static void
enqueue_task_rt(struct rq * rq,struct task_struct * p,int flags)1530 enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1531 {
1532 struct sched_rt_entity *rt_se = &p->rt;
1533
1534 if (flags & ENQUEUE_WAKEUP)
1535 rt_se->timeout = 0;
1536
1537 check_schedstat_required();
1538 update_stats_wait_start_rt(rt_rq_of_se(rt_se), rt_se);
1539
1540 enqueue_rt_entity(rt_se, flags);
1541
1542 if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1543 enqueue_pushable_task(rq, p);
1544 }
1545
dequeue_task_rt(struct rq * rq,struct task_struct * p,int flags)1546 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1547 {
1548 struct sched_rt_entity *rt_se = &p->rt;
1549
1550 update_curr_rt(rq);
1551 dequeue_rt_entity(rt_se, flags);
1552
1553 dequeue_pushable_task(rq, p);
1554 }
1555
1556 /*
1557 * Put task to the head or the end of the run list without the overhead of
1558 * dequeue followed by enqueue.
1559 */
1560 static void
requeue_rt_entity(struct rt_rq * rt_rq,struct sched_rt_entity * rt_se,int head)1561 requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
1562 {
1563 if (on_rt_rq(rt_se)) {
1564 struct rt_prio_array *array = &rt_rq->active;
1565 struct list_head *queue = array->queue + rt_se_prio(rt_se);
1566
1567 if (head)
1568 list_move(&rt_se->run_list, queue);
1569 else
1570 list_move_tail(&rt_se->run_list, queue);
1571 }
1572 }
1573
requeue_task_rt(struct rq * rq,struct task_struct * p,int head)1574 static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
1575 {
1576 struct sched_rt_entity *rt_se = &p->rt;
1577 struct rt_rq *rt_rq;
1578
1579 for_each_sched_rt_entity(rt_se) {
1580 rt_rq = rt_rq_of_se(rt_se);
1581 requeue_rt_entity(rt_rq, rt_se, head);
1582 }
1583 }
1584
yield_task_rt(struct rq * rq)1585 static void yield_task_rt(struct rq *rq)
1586 {
1587 requeue_task_rt(rq, rq->curr, 0);
1588 }
1589
1590 #ifdef CONFIG_SMP
1591 static int find_lowest_rq(struct task_struct *task);
1592
1593 static int
select_task_rq_rt(struct task_struct * p,int cpu,int flags)1594 select_task_rq_rt(struct task_struct *p, int cpu, int flags)
1595 {
1596 struct task_struct *curr;
1597 struct rq *rq;
1598 bool test;
1599
1600 /* For anything but wake ups, just return the task_cpu */
1601 if (!(flags & (WF_TTWU | WF_FORK)))
1602 goto out;
1603
1604 rq = cpu_rq(cpu);
1605
1606 rcu_read_lock();
1607 curr = READ_ONCE(rq->curr); /* unlocked access */
1608
1609 /*
1610 * If the current task on @p's runqueue is an RT task, then
1611 * try to see if we can wake this RT task up on another
1612 * runqueue. Otherwise simply start this RT task
1613 * on its current runqueue.
1614 *
1615 * We want to avoid overloading runqueues. If the woken
1616 * task is a higher priority, then it will stay on this CPU
1617 * and the lower prio task should be moved to another CPU.
1618 * Even though this will probably make the lower prio task
1619 * lose its cache, we do not want to bounce a higher task
1620 * around just because it gave up its CPU, perhaps for a
1621 * lock?
1622 *
1623 * For equal prio tasks, we just let the scheduler sort it out.
1624 *
1625 * Otherwise, just let it ride on the affined RQ and the
1626 * post-schedule router will push the preempted task away
1627 *
1628 * This test is optimistic, if we get it wrong the load-balancer
1629 * will have to sort it out.
1630 *
1631 * We take into account the capacity of the CPU to ensure it fits the
1632 * requirement of the task - which is only important on heterogeneous
1633 * systems like big.LITTLE.
1634 */
1635 test = curr &&
1636 unlikely(rt_task(curr)) &&
1637 (curr->nr_cpus_allowed < 2 || curr->prio <= p->prio);
1638
1639 if (test || !rt_task_fits_capacity(p, cpu)) {
1640 int target = find_lowest_rq(p);
1641
1642 /*
1643 * Bail out if we were forcing a migration to find a better
1644 * fitting CPU but our search failed.
1645 */
1646 if (!test && target != -1 && !rt_task_fits_capacity(p, target))
1647 goto out_unlock;
1648
1649 /*
1650 * Don't bother moving it if the destination CPU is
1651 * not running a lower priority task.
1652 */
1653 if (target != -1 &&
1654 p->prio < cpu_rq(target)->rt.highest_prio.curr)
1655 cpu = target;
1656 }
1657
1658 out_unlock:
1659 rcu_read_unlock();
1660
1661 out:
1662 return cpu;
1663 }
1664
check_preempt_equal_prio(struct rq * rq,struct task_struct * p)1665 static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1666 {
1667 /*
1668 * Current can't be migrated, useless to reschedule,
1669 * let's hope p can move out.
1670 */
1671 if (rq->curr->nr_cpus_allowed == 1 ||
1672 !cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
1673 return;
1674
1675 /*
1676 * p is migratable, so let's not schedule it and
1677 * see if it is pushed or pulled somewhere else.
1678 */
1679 if (p->nr_cpus_allowed != 1 &&
1680 cpupri_find(&rq->rd->cpupri, p, NULL))
1681 return;
1682
1683 /*
1684 * There appear to be other CPUs that can accept
1685 * the current task but none can run 'p', so lets reschedule
1686 * to try and push the current task away:
1687 */
1688 requeue_task_rt(rq, p, 1);
1689 resched_curr(rq);
1690 }
1691
balance_rt(struct rq * rq,struct task_struct * p,struct rq_flags * rf)1692 static int balance_rt(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
1693 {
1694 if (!on_rt_rq(&p->rt) && need_pull_rt_task(rq, p)) {
1695 /*
1696 * This is OK, because current is on_cpu, which avoids it being
1697 * picked for load-balance and preemption/IRQs are still
1698 * disabled avoiding further scheduler activity on it and we've
1699 * not yet started the picking loop.
1700 */
1701 rq_unpin_lock(rq, rf);
1702 pull_rt_task(rq);
1703 rq_repin_lock(rq, rf);
1704 }
1705
1706 return sched_stop_runnable(rq) || sched_dl_runnable(rq) || sched_rt_runnable(rq);
1707 }
1708 #endif /* CONFIG_SMP */
1709
1710 /*
1711 * Preempt the current task with a newly woken task if needed:
1712 */
wakeup_preempt_rt(struct rq * rq,struct task_struct * p,int flags)1713 static void wakeup_preempt_rt(struct rq *rq, struct task_struct *p, int flags)
1714 {
1715 if (p->prio < rq->curr->prio) {
1716 resched_curr(rq);
1717 return;
1718 }
1719
1720 #ifdef CONFIG_SMP
1721 /*
1722 * If:
1723 *
1724 * - the newly woken task is of equal priority to the current task
1725 * - the newly woken task is non-migratable while current is migratable
1726 * - current will be preempted on the next reschedule
1727 *
1728 * we should check to see if current can readily move to a different
1729 * cpu. If so, we will reschedule to allow the push logic to try
1730 * to move current somewhere else, making room for our non-migratable
1731 * task.
1732 */
1733 if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
1734 check_preempt_equal_prio(rq, p);
1735 #endif
1736 }
1737
set_next_task_rt(struct rq * rq,struct task_struct * p,bool first)1738 static inline void set_next_task_rt(struct rq *rq, struct task_struct *p, bool first)
1739 {
1740 struct sched_rt_entity *rt_se = &p->rt;
1741 struct rt_rq *rt_rq = &rq->rt;
1742
1743 p->se.exec_start = rq_clock_task(rq);
1744 if (on_rt_rq(&p->rt))
1745 update_stats_wait_end_rt(rt_rq, rt_se);
1746
1747 /* The running task is never eligible for pushing */
1748 dequeue_pushable_task(rq, p);
1749
1750 if (!first)
1751 return;
1752
1753 /*
1754 * If prev task was rt, put_prev_task() has already updated the
1755 * utilization. We only care of the case where we start to schedule a
1756 * rt task
1757 */
1758 if (rq->curr->sched_class != &rt_sched_class)
1759 update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0);
1760
1761 rt_queue_push_tasks(rq);
1762 }
1763
pick_next_rt_entity(struct rt_rq * rt_rq)1764 static struct sched_rt_entity *pick_next_rt_entity(struct rt_rq *rt_rq)
1765 {
1766 struct rt_prio_array *array = &rt_rq->active;
1767 struct sched_rt_entity *next = NULL;
1768 struct list_head *queue;
1769 int idx;
1770
1771 idx = sched_find_first_bit(array->bitmap);
1772 BUG_ON(idx >= MAX_RT_PRIO);
1773
1774 queue = array->queue + idx;
1775 if (SCHED_WARN_ON(list_empty(queue)))
1776 return NULL;
1777 next = list_entry(queue->next, struct sched_rt_entity, run_list);
1778
1779 return next;
1780 }
1781
_pick_next_task_rt(struct rq * rq)1782 static struct task_struct *_pick_next_task_rt(struct rq *rq)
1783 {
1784 struct sched_rt_entity *rt_se;
1785 struct rt_rq *rt_rq = &rq->rt;
1786
1787 do {
1788 rt_se = pick_next_rt_entity(rt_rq);
1789 if (unlikely(!rt_se))
1790 return NULL;
1791 rt_rq = group_rt_rq(rt_se);
1792 } while (rt_rq);
1793
1794 return rt_task_of(rt_se);
1795 }
1796
pick_task_rt(struct rq * rq)1797 static struct task_struct *pick_task_rt(struct rq *rq)
1798 {
1799 struct task_struct *p;
1800
1801 if (!sched_rt_runnable(rq))
1802 return NULL;
1803
1804 p = _pick_next_task_rt(rq);
1805
1806 return p;
1807 }
1808
pick_next_task_rt(struct rq * rq)1809 static struct task_struct *pick_next_task_rt(struct rq *rq)
1810 {
1811 struct task_struct *p = pick_task_rt(rq);
1812
1813 if (p)
1814 set_next_task_rt(rq, p, true);
1815
1816 return p;
1817 }
1818
put_prev_task_rt(struct rq * rq,struct task_struct * p)1819 static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1820 {
1821 struct sched_rt_entity *rt_se = &p->rt;
1822 struct rt_rq *rt_rq = &rq->rt;
1823
1824 if (on_rt_rq(&p->rt))
1825 update_stats_wait_start_rt(rt_rq, rt_se);
1826
1827 update_curr_rt(rq);
1828
1829 update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1);
1830
1831 /*
1832 * The previous task needs to be made eligible for pushing
1833 * if it is still active
1834 */
1835 if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
1836 enqueue_pushable_task(rq, p);
1837 }
1838
1839 #ifdef CONFIG_SMP
1840
1841 /* Only try algorithms three times */
1842 #define RT_MAX_TRIES 3
1843
pick_rt_task(struct rq * rq,struct task_struct * p,int cpu)1844 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1845 {
1846 if (!task_on_cpu(rq, p) &&
1847 cpumask_test_cpu(cpu, &p->cpus_mask))
1848 return 1;
1849
1850 return 0;
1851 }
1852
1853 /*
1854 * Return the highest pushable rq's task, which is suitable to be executed
1855 * on the CPU, NULL otherwise
1856 */
pick_highest_pushable_task(struct rq * rq,int cpu)1857 static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
1858 {
1859 struct plist_head *head = &rq->rt.pushable_tasks;
1860 struct task_struct *p;
1861
1862 if (!has_pushable_tasks(rq))
1863 return NULL;
1864
1865 plist_for_each_entry(p, head, pushable_tasks) {
1866 if (pick_rt_task(rq, p, cpu))
1867 return p;
1868 }
1869
1870 return NULL;
1871 }
1872
1873 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
1874
find_lowest_rq(struct task_struct * task)1875 static int find_lowest_rq(struct task_struct *task)
1876 {
1877 struct sched_domain *sd;
1878 struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask);
1879 int this_cpu = smp_processor_id();
1880 int cpu = task_cpu(task);
1881 int ret;
1882
1883 /* Make sure the mask is initialized first */
1884 if (unlikely(!lowest_mask))
1885 return -1;
1886
1887 if (task->nr_cpus_allowed == 1)
1888 return -1; /* No other targets possible */
1889
1890 /*
1891 * If we're on asym system ensure we consider the different capacities
1892 * of the CPUs when searching for the lowest_mask.
1893 */
1894 if (sched_asym_cpucap_active()) {
1895
1896 ret = cpupri_find_fitness(&task_rq(task)->rd->cpupri,
1897 task, lowest_mask,
1898 rt_task_fits_capacity);
1899 } else {
1900
1901 ret = cpupri_find(&task_rq(task)->rd->cpupri,
1902 task, lowest_mask);
1903 }
1904
1905 if (!ret)
1906 return -1; /* No targets found */
1907
1908 /*
1909 * At this point we have built a mask of CPUs representing the
1910 * lowest priority tasks in the system. Now we want to elect
1911 * the best one based on our affinity and topology.
1912 *
1913 * We prioritize the last CPU that the task executed on since
1914 * it is most likely cache-hot in that location.
1915 */
1916 if (cpumask_test_cpu(cpu, lowest_mask))
1917 return cpu;
1918
1919 /*
1920 * Otherwise, we consult the sched_domains span maps to figure
1921 * out which CPU is logically closest to our hot cache data.
1922 */
1923 if (!cpumask_test_cpu(this_cpu, lowest_mask))
1924 this_cpu = -1; /* Skip this_cpu opt if not among lowest */
1925
1926 rcu_read_lock();
1927 for_each_domain(cpu, sd) {
1928 if (sd->flags & SD_WAKE_AFFINE) {
1929 int best_cpu;
1930
1931 /*
1932 * "this_cpu" is cheaper to preempt than a
1933 * remote processor.
1934 */
1935 if (this_cpu != -1 &&
1936 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1937 rcu_read_unlock();
1938 return this_cpu;
1939 }
1940
1941 best_cpu = cpumask_any_and_distribute(lowest_mask,
1942 sched_domain_span(sd));
1943 if (best_cpu < nr_cpu_ids) {
1944 rcu_read_unlock();
1945 return best_cpu;
1946 }
1947 }
1948 }
1949 rcu_read_unlock();
1950
1951 /*
1952 * And finally, if there were no matches within the domains
1953 * just give the caller *something* to work with from the compatible
1954 * locations.
1955 */
1956 if (this_cpu != -1)
1957 return this_cpu;
1958
1959 cpu = cpumask_any_distribute(lowest_mask);
1960 if (cpu < nr_cpu_ids)
1961 return cpu;
1962
1963 return -1;
1964 }
1965
1966 /* Will lock the rq it finds */
find_lock_lowest_rq(struct task_struct * task,struct rq * rq)1967 static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1968 {
1969 struct rq *lowest_rq = NULL;
1970 int tries;
1971 int cpu;
1972
1973 for (tries = 0; tries < RT_MAX_TRIES; tries++) {
1974 cpu = find_lowest_rq(task);
1975
1976 if ((cpu == -1) || (cpu == rq->cpu))
1977 break;
1978
1979 lowest_rq = cpu_rq(cpu);
1980
1981 if (lowest_rq->rt.highest_prio.curr <= task->prio) {
1982 /*
1983 * Target rq has tasks of equal or higher priority,
1984 * retrying does not release any lock and is unlikely
1985 * to yield a different result.
1986 */
1987 lowest_rq = NULL;
1988 break;
1989 }
1990
1991 /* if the prio of this runqueue changed, try again */
1992 if (double_lock_balance(rq, lowest_rq)) {
1993 /*
1994 * We had to unlock the run queue. In
1995 * the mean time, task could have
1996 * migrated already or had its affinity changed.
1997 * Also make sure that it wasn't scheduled on its rq.
1998 * It is possible the task was scheduled, set
1999 * "migrate_disabled" and then got preempted, so we must
2000 * check the task migration disable flag here too.
2001 */
2002 if (unlikely(task_rq(task) != rq ||
2003 !cpumask_test_cpu(lowest_rq->cpu, &task->cpus_mask) ||
2004 task_on_cpu(rq, task) ||
2005 !rt_task(task) ||
2006 is_migration_disabled(task) ||
2007 !task_on_rq_queued(task))) {
2008
2009 double_unlock_balance(rq, lowest_rq);
2010 lowest_rq = NULL;
2011 break;
2012 }
2013 }
2014
2015 /* If this rq is still suitable use it. */
2016 if (lowest_rq->rt.highest_prio.curr > task->prio)
2017 break;
2018
2019 /* try again */
2020 double_unlock_balance(rq, lowest_rq);
2021 lowest_rq = NULL;
2022 }
2023
2024 return lowest_rq;
2025 }
2026
pick_next_pushable_task(struct rq * rq)2027 static struct task_struct *pick_next_pushable_task(struct rq *rq)
2028 {
2029 struct task_struct *p;
2030
2031 if (!has_pushable_tasks(rq))
2032 return NULL;
2033
2034 p = plist_first_entry(&rq->rt.pushable_tasks,
2035 struct task_struct, pushable_tasks);
2036
2037 BUG_ON(rq->cpu != task_cpu(p));
2038 BUG_ON(task_current(rq, p));
2039 BUG_ON(p->nr_cpus_allowed <= 1);
2040
2041 BUG_ON(!task_on_rq_queued(p));
2042 BUG_ON(!rt_task(p));
2043
2044 return p;
2045 }
2046
2047 /*
2048 * If the current CPU has more than one RT task, see if the non
2049 * running task can migrate over to a CPU that is running a task
2050 * of lesser priority.
2051 */
push_rt_task(struct rq * rq,bool pull)2052 static int push_rt_task(struct rq *rq, bool pull)
2053 {
2054 struct task_struct *next_task;
2055 struct rq *lowest_rq;
2056 int ret = 0;
2057
2058 if (!rq->rt.overloaded)
2059 return 0;
2060
2061 next_task = pick_next_pushable_task(rq);
2062 if (!next_task)
2063 return 0;
2064
2065 retry:
2066 /*
2067 * It's possible that the next_task slipped in of
2068 * higher priority than current. If that's the case
2069 * just reschedule current.
2070 */
2071 if (unlikely(next_task->prio < rq->curr->prio)) {
2072 resched_curr(rq);
2073 return 0;
2074 }
2075
2076 if (is_migration_disabled(next_task)) {
2077 struct task_struct *push_task = NULL;
2078 int cpu;
2079
2080 if (!pull || rq->push_busy)
2081 return 0;
2082
2083 /*
2084 * Invoking find_lowest_rq() on anything but an RT task doesn't
2085 * make sense. Per the above priority check, curr has to
2086 * be of higher priority than next_task, so no need to
2087 * reschedule when bailing out.
2088 *
2089 * Note that the stoppers are masqueraded as SCHED_FIFO
2090 * (cf. sched_set_stop_task()), so we can't rely on rt_task().
2091 */
2092 if (rq->curr->sched_class != &rt_sched_class)
2093 return 0;
2094
2095 cpu = find_lowest_rq(rq->curr);
2096 if (cpu == -1 || cpu == rq->cpu)
2097 return 0;
2098
2099 /*
2100 * Given we found a CPU with lower priority than @next_task,
2101 * therefore it should be running. However we cannot migrate it
2102 * to this other CPU, instead attempt to push the current
2103 * running task on this CPU away.
2104 */
2105 push_task = get_push_task(rq);
2106 if (push_task) {
2107 preempt_disable();
2108 raw_spin_rq_unlock(rq);
2109 stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
2110 push_task, &rq->push_work);
2111 preempt_enable();
2112 raw_spin_rq_lock(rq);
2113 }
2114
2115 return 0;
2116 }
2117
2118 if (WARN_ON(next_task == rq->curr))
2119 return 0;
2120
2121 /* We might release rq lock */
2122 get_task_struct(next_task);
2123
2124 /* find_lock_lowest_rq locks the rq if found */
2125 lowest_rq = find_lock_lowest_rq(next_task, rq);
2126 if (!lowest_rq) {
2127 struct task_struct *task;
2128 /*
2129 * find_lock_lowest_rq releases rq->lock
2130 * so it is possible that next_task has migrated.
2131 *
2132 * We need to make sure that the task is still on the same
2133 * run-queue and is also still the next task eligible for
2134 * pushing.
2135 */
2136 task = pick_next_pushable_task(rq);
2137 if (task == next_task) {
2138 /*
2139 * The task hasn't migrated, and is still the next
2140 * eligible task, but we failed to find a run-queue
2141 * to push it to. Do not retry in this case, since
2142 * other CPUs will pull from us when ready.
2143 */
2144 goto out;
2145 }
2146
2147 if (!task)
2148 /* No more tasks, just exit */
2149 goto out;
2150
2151 /*
2152 * Something has shifted, try again.
2153 */
2154 put_task_struct(next_task);
2155 next_task = task;
2156 goto retry;
2157 }
2158
2159 deactivate_task(rq, next_task, 0);
2160 set_task_cpu(next_task, lowest_rq->cpu);
2161 activate_task(lowest_rq, next_task, 0);
2162 resched_curr(lowest_rq);
2163 ret = 1;
2164
2165 double_unlock_balance(rq, lowest_rq);
2166 out:
2167 put_task_struct(next_task);
2168
2169 return ret;
2170 }
2171
push_rt_tasks(struct rq * rq)2172 static void push_rt_tasks(struct rq *rq)
2173 {
2174 /* push_rt_task will return true if it moved an RT */
2175 while (push_rt_task(rq, false))
2176 ;
2177 }
2178
2179 #ifdef HAVE_RT_PUSH_IPI
2180
2181 /*
2182 * When a high priority task schedules out from a CPU and a lower priority
2183 * task is scheduled in, a check is made to see if there's any RT tasks
2184 * on other CPUs that are waiting to run because a higher priority RT task
2185 * is currently running on its CPU. In this case, the CPU with multiple RT
2186 * tasks queued on it (overloaded) needs to be notified that a CPU has opened
2187 * up that may be able to run one of its non-running queued RT tasks.
2188 *
2189 * All CPUs with overloaded RT tasks need to be notified as there is currently
2190 * no way to know which of these CPUs have the highest priority task waiting
2191 * to run. Instead of trying to take a spinlock on each of these CPUs,
2192 * which has shown to cause large latency when done on machines with many
2193 * CPUs, sending an IPI to the CPUs to have them push off the overloaded
2194 * RT tasks waiting to run.
2195 *
2196 * Just sending an IPI to each of the CPUs is also an issue, as on large
2197 * count CPU machines, this can cause an IPI storm on a CPU, especially
2198 * if its the only CPU with multiple RT tasks queued, and a large number
2199 * of CPUs scheduling a lower priority task at the same time.
2200 *
2201 * Each root domain has its own irq work function that can iterate over
2202 * all CPUs with RT overloaded tasks. Since all CPUs with overloaded RT
2203 * task must be checked if there's one or many CPUs that are lowering
2204 * their priority, there's a single irq work iterator that will try to
2205 * push off RT tasks that are waiting to run.
2206 *
2207 * When a CPU schedules a lower priority task, it will kick off the
2208 * irq work iterator that will jump to each CPU with overloaded RT tasks.
2209 * As it only takes the first CPU that schedules a lower priority task
2210 * to start the process, the rto_start variable is incremented and if
2211 * the atomic result is one, then that CPU will try to take the rto_lock.
2212 * This prevents high contention on the lock as the process handles all
2213 * CPUs scheduling lower priority tasks.
2214 *
2215 * All CPUs that are scheduling a lower priority task will increment the
2216 * rt_loop_next variable. This will make sure that the irq work iterator
2217 * checks all RT overloaded CPUs whenever a CPU schedules a new lower
2218 * priority task, even if the iterator is in the middle of a scan. Incrementing
2219 * the rt_loop_next will cause the iterator to perform another scan.
2220 *
2221 */
rto_next_cpu(struct root_domain * rd)2222 static int rto_next_cpu(struct root_domain *rd)
2223 {
2224 int next;
2225 int cpu;
2226
2227 /*
2228 * When starting the IPI RT pushing, the rto_cpu is set to -1,
2229 * rt_next_cpu() will simply return the first CPU found in
2230 * the rto_mask.
2231 *
2232 * If rto_next_cpu() is called with rto_cpu is a valid CPU, it
2233 * will return the next CPU found in the rto_mask.
2234 *
2235 * If there are no more CPUs left in the rto_mask, then a check is made
2236 * against rto_loop and rto_loop_next. rto_loop is only updated with
2237 * the rto_lock held, but any CPU may increment the rto_loop_next
2238 * without any locking.
2239 */
2240 for (;;) {
2241
2242 /* When rto_cpu is -1 this acts like cpumask_first() */
2243 cpu = cpumask_next(rd->rto_cpu, rd->rto_mask);
2244
2245 rd->rto_cpu = cpu;
2246
2247 if (cpu < nr_cpu_ids)
2248 return cpu;
2249
2250 rd->rto_cpu = -1;
2251
2252 /*
2253 * ACQUIRE ensures we see the @rto_mask changes
2254 * made prior to the @next value observed.
2255 *
2256 * Matches WMB in rt_set_overload().
2257 */
2258 next = atomic_read_acquire(&rd->rto_loop_next);
2259
2260 if (rd->rto_loop == next)
2261 break;
2262
2263 rd->rto_loop = next;
2264 }
2265
2266 return -1;
2267 }
2268
rto_start_trylock(atomic_t * v)2269 static inline bool rto_start_trylock(atomic_t *v)
2270 {
2271 return !atomic_cmpxchg_acquire(v, 0, 1);
2272 }
2273
rto_start_unlock(atomic_t * v)2274 static inline void rto_start_unlock(atomic_t *v)
2275 {
2276 atomic_set_release(v, 0);
2277 }
2278
tell_cpu_to_push(struct rq * rq)2279 static void tell_cpu_to_push(struct rq *rq)
2280 {
2281 int cpu = -1;
2282
2283 /* Keep the loop going if the IPI is currently active */
2284 atomic_inc(&rq->rd->rto_loop_next);
2285
2286 /* Only one CPU can initiate a loop at a time */
2287 if (!rto_start_trylock(&rq->rd->rto_loop_start))
2288 return;
2289
2290 raw_spin_lock(&rq->rd->rto_lock);
2291
2292 /*
2293 * The rto_cpu is updated under the lock, if it has a valid CPU
2294 * then the IPI is still running and will continue due to the
2295 * update to loop_next, and nothing needs to be done here.
2296 * Otherwise it is finishing up and an ipi needs to be sent.
2297 */
2298 if (rq->rd->rto_cpu < 0)
2299 cpu = rto_next_cpu(rq->rd);
2300
2301 raw_spin_unlock(&rq->rd->rto_lock);
2302
2303 rto_start_unlock(&rq->rd->rto_loop_start);
2304
2305 if (cpu >= 0) {
2306 /* Make sure the rd does not get freed while pushing */
2307 sched_get_rd(rq->rd);
2308 irq_work_queue_on(&rq->rd->rto_push_work, cpu);
2309 }
2310 }
2311
2312 /* Called from hardirq context */
rto_push_irq_work_func(struct irq_work * work)2313 void rto_push_irq_work_func(struct irq_work *work)
2314 {
2315 struct root_domain *rd =
2316 container_of(work, struct root_domain, rto_push_work);
2317 struct rq *rq;
2318 int cpu;
2319
2320 rq = this_rq();
2321
2322 /*
2323 * We do not need to grab the lock to check for has_pushable_tasks.
2324 * When it gets updated, a check is made if a push is possible.
2325 */
2326 if (has_pushable_tasks(rq)) {
2327 raw_spin_rq_lock(rq);
2328 while (push_rt_task(rq, true))
2329 ;
2330 raw_spin_rq_unlock(rq);
2331 }
2332
2333 raw_spin_lock(&rd->rto_lock);
2334
2335 /* Pass the IPI to the next rt overloaded queue */
2336 cpu = rto_next_cpu(rd);
2337
2338 raw_spin_unlock(&rd->rto_lock);
2339
2340 if (cpu < 0) {
2341 sched_put_rd(rd);
2342 return;
2343 }
2344
2345 /* Try the next RT overloaded CPU */
2346 irq_work_queue_on(&rd->rto_push_work, cpu);
2347 }
2348 #endif /* HAVE_RT_PUSH_IPI */
2349
pull_rt_task(struct rq * this_rq)2350 static void pull_rt_task(struct rq *this_rq)
2351 {
2352 int this_cpu = this_rq->cpu, cpu;
2353 bool resched = false;
2354 struct task_struct *p, *push_task;
2355 struct rq *src_rq;
2356 int rt_overload_count = rt_overloaded(this_rq);
2357
2358 if (likely(!rt_overload_count))
2359 return;
2360
2361 /*
2362 * Match the barrier from rt_set_overloaded; this guarantees that if we
2363 * see overloaded we must also see the rto_mask bit.
2364 */
2365 smp_rmb();
2366
2367 /* If we are the only overloaded CPU do nothing */
2368 if (rt_overload_count == 1 &&
2369 cpumask_test_cpu(this_rq->cpu, this_rq->rd->rto_mask))
2370 return;
2371
2372 #ifdef HAVE_RT_PUSH_IPI
2373 if (sched_feat(RT_PUSH_IPI)) {
2374 tell_cpu_to_push(this_rq);
2375 return;
2376 }
2377 #endif
2378
2379 for_each_cpu(cpu, this_rq->rd->rto_mask) {
2380 if (this_cpu == cpu)
2381 continue;
2382
2383 src_rq = cpu_rq(cpu);
2384
2385 /*
2386 * Don't bother taking the src_rq->lock if the next highest
2387 * task is known to be lower-priority than our current task.
2388 * This may look racy, but if this value is about to go
2389 * logically higher, the src_rq will push this task away.
2390 * And if its going logically lower, we do not care
2391 */
2392 if (src_rq->rt.highest_prio.next >=
2393 this_rq->rt.highest_prio.curr)
2394 continue;
2395
2396 /*
2397 * We can potentially drop this_rq's lock in
2398 * double_lock_balance, and another CPU could
2399 * alter this_rq
2400 */
2401 push_task = NULL;
2402 double_lock_balance(this_rq, src_rq);
2403
2404 /*
2405 * We can pull only a task, which is pushable
2406 * on its rq, and no others.
2407 */
2408 p = pick_highest_pushable_task(src_rq, this_cpu);
2409
2410 /*
2411 * Do we have an RT task that preempts
2412 * the to-be-scheduled task?
2413 */
2414 if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
2415 WARN_ON(p == src_rq->curr);
2416 WARN_ON(!task_on_rq_queued(p));
2417
2418 /*
2419 * There's a chance that p is higher in priority
2420 * than what's currently running on its CPU.
2421 * This is just that p is waking up and hasn't
2422 * had a chance to schedule. We only pull
2423 * p if it is lower in priority than the
2424 * current task on the run queue
2425 */
2426 if (p->prio < src_rq->curr->prio)
2427 goto skip;
2428
2429 if (is_migration_disabled(p)) {
2430 push_task = get_push_task(src_rq);
2431 } else {
2432 deactivate_task(src_rq, p, 0);
2433 set_task_cpu(p, this_cpu);
2434 activate_task(this_rq, p, 0);
2435 resched = true;
2436 }
2437 /*
2438 * We continue with the search, just in
2439 * case there's an even higher prio task
2440 * in another runqueue. (low likelihood
2441 * but possible)
2442 */
2443 }
2444 skip:
2445 double_unlock_balance(this_rq, src_rq);
2446
2447 if (push_task) {
2448 preempt_disable();
2449 raw_spin_rq_unlock(this_rq);
2450 stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop,
2451 push_task, &src_rq->push_work);
2452 preempt_enable();
2453 raw_spin_rq_lock(this_rq);
2454 }
2455 }
2456
2457 if (resched)
2458 resched_curr(this_rq);
2459 }
2460
2461 /*
2462 * If we are not running and we are not going to reschedule soon, we should
2463 * try to push tasks away now
2464 */
task_woken_rt(struct rq * rq,struct task_struct * p)2465 static void task_woken_rt(struct rq *rq, struct task_struct *p)
2466 {
2467 bool need_to_push = !task_on_cpu(rq, p) &&
2468 !test_tsk_need_resched(rq->curr) &&
2469 p->nr_cpus_allowed > 1 &&
2470 (dl_task(rq->curr) || rt_task(rq->curr)) &&
2471 (rq->curr->nr_cpus_allowed < 2 ||
2472 rq->curr->prio <= p->prio);
2473
2474 if (need_to_push)
2475 push_rt_tasks(rq);
2476 }
2477
2478 /* Assumes rq->lock is held */
rq_online_rt(struct rq * rq)2479 static void rq_online_rt(struct rq *rq)
2480 {
2481 if (rq->rt.overloaded)
2482 rt_set_overload(rq);
2483
2484 __enable_runtime(rq);
2485
2486 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
2487 }
2488
2489 /* Assumes rq->lock is held */
rq_offline_rt(struct rq * rq)2490 static void rq_offline_rt(struct rq *rq)
2491 {
2492 if (rq->rt.overloaded)
2493 rt_clear_overload(rq);
2494
2495 __disable_runtime(rq);
2496
2497 cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
2498 }
2499
2500 /*
2501 * When switch from the rt queue, we bring ourselves to a position
2502 * that we might want to pull RT tasks from other runqueues.
2503 */
switched_from_rt(struct rq * rq,struct task_struct * p)2504 static void switched_from_rt(struct rq *rq, struct task_struct *p)
2505 {
2506 /*
2507 * If there are other RT tasks then we will reschedule
2508 * and the scheduling of the other RT tasks will handle
2509 * the balancing. But if we are the last RT task
2510 * we may need to handle the pulling of RT tasks
2511 * now.
2512 */
2513 if (!task_on_rq_queued(p) || rq->rt.rt_nr_running)
2514 return;
2515
2516 rt_queue_pull_task(rq);
2517 }
2518
init_sched_rt_class(void)2519 void __init init_sched_rt_class(void)
2520 {
2521 unsigned int i;
2522
2523 for_each_possible_cpu(i) {
2524 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
2525 GFP_KERNEL, cpu_to_node(i));
2526 }
2527 }
2528 #endif /* CONFIG_SMP */
2529
2530 /*
2531 * When switching a task to RT, we may overload the runqueue
2532 * with RT tasks. In this case we try to push them off to
2533 * other runqueues.
2534 */
switched_to_rt(struct rq * rq,struct task_struct * p)2535 static void switched_to_rt(struct rq *rq, struct task_struct *p)
2536 {
2537 /*
2538 * If we are running, update the avg_rt tracking, as the running time
2539 * will now on be accounted into the latter.
2540 */
2541 if (task_current(rq, p)) {
2542 update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0);
2543 return;
2544 }
2545
2546 /*
2547 * If we are not running we may need to preempt the current
2548 * running task. If that current running task is also an RT task
2549 * then see if we can move to another run queue.
2550 */
2551 if (task_on_rq_queued(p)) {
2552 #ifdef CONFIG_SMP
2553 if (p->nr_cpus_allowed > 1 && rq->rt.overloaded)
2554 rt_queue_push_tasks(rq);
2555 #endif /* CONFIG_SMP */
2556 if (p->prio < rq->curr->prio && cpu_online(cpu_of(rq)))
2557 resched_curr(rq);
2558 }
2559 }
2560
2561 /*
2562 * Priority of the task has changed. This may cause
2563 * us to initiate a push or pull.
2564 */
2565 static void
prio_changed_rt(struct rq * rq,struct task_struct * p,int oldprio)2566 prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
2567 {
2568 if (!task_on_rq_queued(p))
2569 return;
2570
2571 if (task_current(rq, p)) {
2572 #ifdef CONFIG_SMP
2573 /*
2574 * If our priority decreases while running, we
2575 * may need to pull tasks to this runqueue.
2576 */
2577 if (oldprio < p->prio)
2578 rt_queue_pull_task(rq);
2579
2580 /*
2581 * If there's a higher priority task waiting to run
2582 * then reschedule.
2583 */
2584 if (p->prio > rq->rt.highest_prio.curr)
2585 resched_curr(rq);
2586 #else
2587 /* For UP simply resched on drop of prio */
2588 if (oldprio < p->prio)
2589 resched_curr(rq);
2590 #endif /* CONFIG_SMP */
2591 } else {
2592 /*
2593 * This task is not running, but if it is
2594 * greater than the current running task
2595 * then reschedule.
2596 */
2597 if (p->prio < rq->curr->prio)
2598 resched_curr(rq);
2599 }
2600 }
2601
2602 #ifdef CONFIG_POSIX_TIMERS
watchdog(struct rq * rq,struct task_struct * p)2603 static void watchdog(struct rq *rq, struct task_struct *p)
2604 {
2605 unsigned long soft, hard;
2606
2607 /* max may change after cur was read, this will be fixed next tick */
2608 soft = task_rlimit(p, RLIMIT_RTTIME);
2609 hard = task_rlimit_max(p, RLIMIT_RTTIME);
2610
2611 if (soft != RLIM_INFINITY) {
2612 unsigned long next;
2613
2614 if (p->rt.watchdog_stamp != jiffies) {
2615 p->rt.timeout++;
2616 p->rt.watchdog_stamp = jiffies;
2617 }
2618
2619 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
2620 if (p->rt.timeout > next) {
2621 posix_cputimers_rt_watchdog(&p->posix_cputimers,
2622 p->se.sum_exec_runtime);
2623 }
2624 }
2625 }
2626 #else
watchdog(struct rq * rq,struct task_struct * p)2627 static inline void watchdog(struct rq *rq, struct task_struct *p) { }
2628 #endif
2629
2630 /*
2631 * scheduler tick hitting a task of our scheduling class.
2632 *
2633 * NOTE: This function can be called remotely by the tick offload that
2634 * goes along full dynticks. Therefore no local assumption can be made
2635 * and everything must be accessed through the @rq and @curr passed in
2636 * parameters.
2637 */
task_tick_rt(struct rq * rq,struct task_struct * p,int queued)2638 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
2639 {
2640 struct sched_rt_entity *rt_se = &p->rt;
2641
2642 update_curr_rt(rq);
2643 update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1);
2644
2645 watchdog(rq, p);
2646
2647 /*
2648 * RR tasks need a special form of timeslice management.
2649 * FIFO tasks have no timeslices.
2650 */
2651 if (p->policy != SCHED_RR)
2652 return;
2653
2654 if (--p->rt.time_slice)
2655 return;
2656
2657 p->rt.time_slice = sched_rr_timeslice;
2658
2659 /*
2660 * Requeue to the end of queue if we (and all of our ancestors) are not
2661 * the only element on the queue
2662 */
2663 for_each_sched_rt_entity(rt_se) {
2664 if (rt_se->run_list.prev != rt_se->run_list.next) {
2665 requeue_task_rt(rq, p, 0);
2666 resched_curr(rq);
2667 return;
2668 }
2669 }
2670 }
2671
get_rr_interval_rt(struct rq * rq,struct task_struct * task)2672 static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
2673 {
2674 /*
2675 * Time slice is 0 for SCHED_FIFO tasks
2676 */
2677 if (task->policy == SCHED_RR)
2678 return sched_rr_timeslice;
2679 else
2680 return 0;
2681 }
2682
2683 #ifdef CONFIG_SCHED_CORE
task_is_throttled_rt(struct task_struct * p,int cpu)2684 static int task_is_throttled_rt(struct task_struct *p, int cpu)
2685 {
2686 struct rt_rq *rt_rq;
2687
2688 #ifdef CONFIG_RT_GROUP_SCHED
2689 rt_rq = task_group(p)->rt_rq[cpu];
2690 #else
2691 rt_rq = &cpu_rq(cpu)->rt;
2692 #endif
2693
2694 return rt_rq_throttled(rt_rq);
2695 }
2696 #endif
2697
2698 DEFINE_SCHED_CLASS(rt) = {
2699
2700 .enqueue_task = enqueue_task_rt,
2701 .dequeue_task = dequeue_task_rt,
2702 .yield_task = yield_task_rt,
2703
2704 .wakeup_preempt = wakeup_preempt_rt,
2705
2706 .pick_next_task = pick_next_task_rt,
2707 .put_prev_task = put_prev_task_rt,
2708 .set_next_task = set_next_task_rt,
2709
2710 #ifdef CONFIG_SMP
2711 .balance = balance_rt,
2712 .pick_task = pick_task_rt,
2713 .select_task_rq = select_task_rq_rt,
2714 .set_cpus_allowed = set_cpus_allowed_common,
2715 .rq_online = rq_online_rt,
2716 .rq_offline = rq_offline_rt,
2717 .task_woken = task_woken_rt,
2718 .switched_from = switched_from_rt,
2719 .find_lock_rq = find_lock_lowest_rq,
2720 #endif
2721
2722 .task_tick = task_tick_rt,
2723
2724 .get_rr_interval = get_rr_interval_rt,
2725
2726 .prio_changed = prio_changed_rt,
2727 .switched_to = switched_to_rt,
2728
2729 .update_curr = update_curr_rt,
2730
2731 #ifdef CONFIG_SCHED_CORE
2732 .task_is_throttled = task_is_throttled_rt,
2733 #endif
2734
2735 #ifdef CONFIG_UCLAMP_TASK
2736 .uclamp_enabled = 1,
2737 #endif
2738 };
2739
2740 #ifdef CONFIG_RT_GROUP_SCHED
2741 /*
2742 * Ensure that the real time constraints are schedulable.
2743 */
2744 static DEFINE_MUTEX(rt_constraints_mutex);
2745
tg_has_rt_tasks(struct task_group * tg)2746 static inline int tg_has_rt_tasks(struct task_group *tg)
2747 {
2748 struct task_struct *task;
2749 struct css_task_iter it;
2750 int ret = 0;
2751
2752 /*
2753 * Autogroups do not have RT tasks; see autogroup_create().
2754 */
2755 if (task_group_is_autogroup(tg))
2756 return 0;
2757
2758 css_task_iter_start(&tg->css, 0, &it);
2759 while (!ret && (task = css_task_iter_next(&it)))
2760 ret |= rt_task(task);
2761 css_task_iter_end(&it);
2762
2763 return ret;
2764 }
2765
2766 struct rt_schedulable_data {
2767 struct task_group *tg;
2768 u64 rt_period;
2769 u64 rt_runtime;
2770 };
2771
tg_rt_schedulable(struct task_group * tg,void * data)2772 static int tg_rt_schedulable(struct task_group *tg, void *data)
2773 {
2774 struct rt_schedulable_data *d = data;
2775 struct task_group *child;
2776 unsigned long total, sum = 0;
2777 u64 period, runtime;
2778
2779 period = ktime_to_ns(tg->rt_bandwidth.rt_period);
2780 runtime = tg->rt_bandwidth.rt_runtime;
2781
2782 if (tg == d->tg) {
2783 period = d->rt_period;
2784 runtime = d->rt_runtime;
2785 }
2786
2787 /*
2788 * Cannot have more runtime than the period.
2789 */
2790 if (runtime > period && runtime != RUNTIME_INF)
2791 return -EINVAL;
2792
2793 /*
2794 * Ensure we don't starve existing RT tasks if runtime turns zero.
2795 */
2796 if (rt_bandwidth_enabled() && !runtime &&
2797 tg->rt_bandwidth.rt_runtime && tg_has_rt_tasks(tg))
2798 return -EBUSY;
2799
2800 total = to_ratio(period, runtime);
2801
2802 /*
2803 * Nobody can have more than the global setting allows.
2804 */
2805 if (total > to_ratio(global_rt_period(), global_rt_runtime()))
2806 return -EINVAL;
2807
2808 /*
2809 * The sum of our children's runtime should not exceed our own.
2810 */
2811 list_for_each_entry_rcu(child, &tg->children, siblings) {
2812 period = ktime_to_ns(child->rt_bandwidth.rt_period);
2813 runtime = child->rt_bandwidth.rt_runtime;
2814
2815 if (child == d->tg) {
2816 period = d->rt_period;
2817 runtime = d->rt_runtime;
2818 }
2819
2820 sum += to_ratio(period, runtime);
2821 }
2822
2823 if (sum > total)
2824 return -EINVAL;
2825
2826 return 0;
2827 }
2828
__rt_schedulable(struct task_group * tg,u64 period,u64 runtime)2829 static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
2830 {
2831 int ret;
2832
2833 struct rt_schedulable_data data = {
2834 .tg = tg,
2835 .rt_period = period,
2836 .rt_runtime = runtime,
2837 };
2838
2839 rcu_read_lock();
2840 ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data);
2841 rcu_read_unlock();
2842
2843 return ret;
2844 }
2845
tg_set_rt_bandwidth(struct task_group * tg,u64 rt_period,u64 rt_runtime)2846 static int tg_set_rt_bandwidth(struct task_group *tg,
2847 u64 rt_period, u64 rt_runtime)
2848 {
2849 int i, err = 0;
2850
2851 /*
2852 * Disallowing the root group RT runtime is BAD, it would disallow the
2853 * kernel creating (and or operating) RT threads.
2854 */
2855 if (tg == &root_task_group && rt_runtime == 0)
2856 return -EINVAL;
2857
2858 /* No period doesn't make any sense. */
2859 if (rt_period == 0)
2860 return -EINVAL;
2861
2862 /*
2863 * Bound quota to defend quota against overflow during bandwidth shift.
2864 */
2865 if (rt_runtime != RUNTIME_INF && rt_runtime > max_rt_runtime)
2866 return -EINVAL;
2867
2868 mutex_lock(&rt_constraints_mutex);
2869 err = __rt_schedulable(tg, rt_period, rt_runtime);
2870 if (err)
2871 goto unlock;
2872
2873 raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
2874 tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
2875 tg->rt_bandwidth.rt_runtime = rt_runtime;
2876
2877 for_each_possible_cpu(i) {
2878 struct rt_rq *rt_rq = tg->rt_rq[i];
2879
2880 raw_spin_lock(&rt_rq->rt_runtime_lock);
2881 rt_rq->rt_runtime = rt_runtime;
2882 raw_spin_unlock(&rt_rq->rt_runtime_lock);
2883 }
2884 raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
2885 unlock:
2886 mutex_unlock(&rt_constraints_mutex);
2887
2888 return err;
2889 }
2890
sched_group_set_rt_runtime(struct task_group * tg,long rt_runtime_us)2891 int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
2892 {
2893 u64 rt_runtime, rt_period;
2894
2895 rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
2896 rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
2897 if (rt_runtime_us < 0)
2898 rt_runtime = RUNTIME_INF;
2899 else if ((u64)rt_runtime_us > U64_MAX / NSEC_PER_USEC)
2900 return -EINVAL;
2901
2902 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
2903 }
2904
sched_group_rt_runtime(struct task_group * tg)2905 long sched_group_rt_runtime(struct task_group *tg)
2906 {
2907 u64 rt_runtime_us;
2908
2909 if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
2910 return -1;
2911
2912 rt_runtime_us = tg->rt_bandwidth.rt_runtime;
2913 do_div(rt_runtime_us, NSEC_PER_USEC);
2914 return rt_runtime_us;
2915 }
2916
sched_group_set_rt_period(struct task_group * tg,u64 rt_period_us)2917 int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us)
2918 {
2919 u64 rt_runtime, rt_period;
2920
2921 if (rt_period_us > U64_MAX / NSEC_PER_USEC)
2922 return -EINVAL;
2923
2924 rt_period = rt_period_us * NSEC_PER_USEC;
2925 rt_runtime = tg->rt_bandwidth.rt_runtime;
2926
2927 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
2928 }
2929
sched_group_rt_period(struct task_group * tg)2930 long sched_group_rt_period(struct task_group *tg)
2931 {
2932 u64 rt_period_us;
2933
2934 rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
2935 do_div(rt_period_us, NSEC_PER_USEC);
2936 return rt_period_us;
2937 }
2938
2939 #ifdef CONFIG_SYSCTL
sched_rt_global_constraints(void)2940 static int sched_rt_global_constraints(void)
2941 {
2942 int ret = 0;
2943
2944 mutex_lock(&rt_constraints_mutex);
2945 ret = __rt_schedulable(NULL, 0, 0);
2946 mutex_unlock(&rt_constraints_mutex);
2947
2948 return ret;
2949 }
2950 #endif /* CONFIG_SYSCTL */
2951
sched_rt_can_attach(struct task_group * tg,struct task_struct * tsk)2952 int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
2953 {
2954 /* Don't accept realtime tasks when there is no way for them to run */
2955 if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
2956 return 0;
2957
2958 return 1;
2959 }
2960
2961 #else /* !CONFIG_RT_GROUP_SCHED */
2962
2963 #ifdef CONFIG_SYSCTL
sched_rt_global_constraints(void)2964 static int sched_rt_global_constraints(void)
2965 {
2966 unsigned long flags;
2967 int i;
2968
2969 raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
2970 for_each_possible_cpu(i) {
2971 struct rt_rq *rt_rq = &cpu_rq(i)->rt;
2972
2973 raw_spin_lock(&rt_rq->rt_runtime_lock);
2974 rt_rq->rt_runtime = global_rt_runtime();
2975 raw_spin_unlock(&rt_rq->rt_runtime_lock);
2976 }
2977 raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
2978
2979 return 0;
2980 }
2981 #endif /* CONFIG_SYSCTL */
2982 #endif /* CONFIG_RT_GROUP_SCHED */
2983
2984 #ifdef CONFIG_SYSCTL
sched_rt_global_validate(void)2985 static int sched_rt_global_validate(void)
2986 {
2987 if ((sysctl_sched_rt_runtime != RUNTIME_INF) &&
2988 ((sysctl_sched_rt_runtime > sysctl_sched_rt_period) ||
2989 ((u64)sysctl_sched_rt_runtime *
2990 NSEC_PER_USEC > max_rt_runtime)))
2991 return -EINVAL;
2992
2993 return 0;
2994 }
2995
sched_rt_do_global(void)2996 static void sched_rt_do_global(void)
2997 {
2998 unsigned long flags;
2999
3000 raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
3001 def_rt_bandwidth.rt_runtime = global_rt_runtime();
3002 def_rt_bandwidth.rt_period = ns_to_ktime(global_rt_period());
3003 raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
3004 }
3005
sched_rt_handler(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)3006 static int sched_rt_handler(struct ctl_table *table, int write, void *buffer,
3007 size_t *lenp, loff_t *ppos)
3008 {
3009 int old_period, old_runtime;
3010 static DEFINE_MUTEX(mutex);
3011 int ret;
3012
3013 mutex_lock(&mutex);
3014 old_period = sysctl_sched_rt_period;
3015 old_runtime = sysctl_sched_rt_runtime;
3016
3017 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
3018
3019 if (!ret && write) {
3020 ret = sched_rt_global_validate();
3021 if (ret)
3022 goto undo;
3023
3024 ret = sched_dl_global_validate();
3025 if (ret)
3026 goto undo;
3027
3028 ret = sched_rt_global_constraints();
3029 if (ret)
3030 goto undo;
3031
3032 sched_rt_do_global();
3033 sched_dl_do_global();
3034 }
3035 if (0) {
3036 undo:
3037 sysctl_sched_rt_period = old_period;
3038 sysctl_sched_rt_runtime = old_runtime;
3039 }
3040 mutex_unlock(&mutex);
3041
3042 return ret;
3043 }
3044
sched_rr_handler(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)3045 static int sched_rr_handler(struct ctl_table *table, int write, void *buffer,
3046 size_t *lenp, loff_t *ppos)
3047 {
3048 int ret;
3049 static DEFINE_MUTEX(mutex);
3050
3051 mutex_lock(&mutex);
3052 ret = proc_dointvec(table, write, buffer, lenp, ppos);
3053 /*
3054 * Make sure that internally we keep jiffies.
3055 * Also, writing zero resets the timeslice to default:
3056 */
3057 if (!ret && write) {
3058 sched_rr_timeslice =
3059 sysctl_sched_rr_timeslice <= 0 ? RR_TIMESLICE :
3060 msecs_to_jiffies(sysctl_sched_rr_timeslice);
3061
3062 if (sysctl_sched_rr_timeslice <= 0)
3063 sysctl_sched_rr_timeslice = jiffies_to_msecs(RR_TIMESLICE);
3064 }
3065 mutex_unlock(&mutex);
3066
3067 return ret;
3068 }
3069 #endif /* CONFIG_SYSCTL */
3070
3071 #ifdef CONFIG_SCHED_DEBUG
print_rt_stats(struct seq_file * m,int cpu)3072 void print_rt_stats(struct seq_file *m, int cpu)
3073 {
3074 rt_rq_iter_t iter;
3075 struct rt_rq *rt_rq;
3076
3077 rcu_read_lock();
3078 for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
3079 print_rt_rq(m, cpu, rt_rq);
3080 rcu_read_unlock();
3081 }
3082 #endif /* CONFIG_SCHED_DEBUG */
3083